repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
ReinaIshikawa/multimodal-vae-public
[ "2a358eb3593e9942e0846eb0095519acef462fa6" ]
[ "mnist/train.py" ]
[ "from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport shutil\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torchvision import transforms\nfrom torchvision.datasets import MNIST\n\nfrom model import MVAE\n\n\ndef elbo_loss(recon_image, image, recon_text, text, mu, logvar,\n lambda_image=1.0, lambda_text=1.0, annealing_factor=1):\n \"\"\"Bimodal ELBO loss function. \n \n @param recon_image: torch.Tensor\n reconstructed image\n @param image: torch.Tensor\n input image\n @param recon_text: torch.Tensor\n reconstructed text probabilities\n @param text: torch.Tensor\n input text (one-hot)\n @param mu: torch.Tensor\n mean of latent distribution\n @param logvar: torch.Tensor\n log-variance of latent distribution\n @param lambda_image: float [default: 1.0]\n weight for image BCE\n @param lambda_text: float [default: 1.0]\n weight for text BCE\n @param annealing_factor: integer [default: 1]\n multiplier for KL divergence term\n @return ELBO: torch.Tensor\n evidence lower bound\n \"\"\"\n image_bce, text_bce = 0, 0 # default params\n if recon_image is not None and image is not None:\n image_bce = torch.sum(binary_cross_entropy_with_logits(\n recon_image.view(-1, 1 * 28 * 28), \n image.view(-1, 1 * 28 * 28)), dim=1)\n\n if recon_text is not None and text is not None:\n text_bce = torch.sum(cross_entropy(recon_text, text), dim=1)\n\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)\n ELBO = torch.mean(lambda_image * image_bce + lambda_text * text_bce \n + annealing_factor * KLD)\n return ELBO\n\n\ndef binary_cross_entropy_with_logits(input, target):\n \"\"\"Sigmoid Activation + Binary Cross Entropy\n\n @param input: torch.Tensor (size N)\n @param target: torch.Tensor (size N)\n @return loss: torch.Tensor (size N)\n \"\"\"\n if not (target.size() == input.size()):\n raise ValueError(\"Target size ({}) must be the same as input size ({})\".format(\n target.size(), input.size()))\n\n return (torch.clamp(input, 0) - input * target \n + torch.log(1 + torch.exp(-torch.abs(input))))\n\n\ndef cross_entropy(input, target, eps=1e-6):\n \"\"\"k-Class Cross Entropy (Log Softmax + Log Loss)\n \n @param input: torch.Tensor (size N x K)\n @param target: torch.Tensor (size N x K)\n @param eps: error to add (default: 1e-6)\n @return loss: torch.Tensor (size N)\n \"\"\"\n if not (target.size(0) == input.size(0)):\n raise ValueError(\n \"Target size ({}) must be the same as input size ({})\".format(\n target.size(0), input.size(0)))\n\n log_input = F.log_softmax(input + eps, dim=1)\n y_onehot = Variable(log_input.data.new(log_input.size()).zero_())\n y_onehot = y_onehot.scatter(1, target.unsqueeze(1), 1)\n loss = y_onehot * log_input\n return -loss\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):\n if not os.path.isdir(folder):\n os.mkdir(folder)\n torch.save(state, os.path.join(folder, filename))\n if is_best:\n shutil.copyfile(os.path.join(folder, filename),\n os.path.join(folder, 'model_best.pth.tar'))\n\n\ndef load_checkpoint(file_path, use_cuda=False):\n checkpoint = torch.load(file_path) if use_cuda else \\\n torch.load(file_path, map_location=lambda storage, location: storage)\n model = MVAE(checkpoint['n_latents'])\n model.load_state_dict(checkpoint['state_dict'])\n return model\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--n-latents', type=int, default=64,\n help='size of the latent embedding [default: 64]')\n parser.add_argument('--batch-size', type=int, default=100, metavar='N',\n help='input batch size for training [default: 100]')\n parser.add_argument('--epochs', type=int, default=500, metavar='N',\n help='number of epochs to train [default: 500]')\n parser.add_argument('--annealing-epochs', type=int, default=200, metavar='N',\n help='number of epochs to anneal KL for [default: 200]')\n parser.add_argument('--lr', type=float, default=1e-3, metavar='LR',\n help='learning rate [default: 1e-3]')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status [default: 10]')\n parser.add_argument('--lambda-image', type=float, default=1.,\n help='multipler for image reconstruction [default: 1]')\n parser.add_argument('--lambda-text', type=float, default=10.,\n help='multipler for text reconstruction [default: 10]')\n parser.add_argument('--cuda', action='store_true', default=False,\n help='enables CUDA training [default: False]')\n args = parser.parse_args()\n args.cuda = args.cuda and torch.cuda.is_available()\n\n if not os.path.isdir('./trained_models'):\n os.makedirs('./trained_models')\n\n train_loader = torch.utils.data.DataLoader(\n MNIST('./data', train=True, download=True, transform=transforms.ToTensor()),\n batch_size=args.batch_size, shuffle=True)\n N_mini_batches = len(train_loader)\n test_loader = torch.utils.data.DataLoader(\n MNIST('./data', train=False, download=True, transform=transforms.ToTensor()),\n batch_size=args.batch_size, shuffle=False)\n\n model = MVAE(args.n_latents)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n\n if args.cuda:\n model.cuda()\n\n\n def train(epoch):\n model.train()\n train_loss_meter = AverageMeter()\n\n # NOTE: is_paired is 1 if the example is paired\n for batch_idx, (image, text) in enumerate(train_loader):\n if epoch < args.annealing_epochs:\n # compute the KL annealing factor for the current mini-batch in the current epoch\n annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) /\n float(args.annealing_epochs * N_mini_batches))\n else:\n # by default the KL annealing factor is unity\n annealing_factor = 1.0\n\n if args.cuda:\n image = image.cuda()\n text = text.cuda()\n\n image = Variable(image)\n text = Variable(text)\n batch_size = len(image)\n\n # refresh the optimizer\n optimizer.zero_grad()\n\n # pass data through model\n recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text)\n recon_image_2, recon_text_2, mu_2, logvar_2 = model(image)\n recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text)\n \n # compute ELBO for each data combo\n joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1, \n lambda_image=args.lambda_image, lambda_text=args.lambda_text,\n annealing_factor=annealing_factor)\n image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2, \n lambda_image=args.lambda_image, lambda_text=args.lambda_text,\n annealing_factor=annealing_factor)\n text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3, \n lambda_image=args.lambda_image, lambda_text=args.lambda_text,\n annealing_factor=annealing_factor)\n train_loss = joint_loss + image_loss + text_loss\n train_loss_meter.update(train_loss.data[0], batch_size)\n \n # compute gradients and take step\n train_loss.backward()\n optimizer.step()\n\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\\tAnnealing-Factor: {:.3f}'.format(\n epoch, batch_idx * len(image), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor))\n\n print('====> Epoch: {}\\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg))\n\n\n def test(epoch):\n model.eval()\n test_loss_meter = AverageMeter()\n\n for batch_idx, (image, text) in enumerate(test_loader):\n if args.cuda:\n image = image.cuda()\n text = text.cuda()\n\n image = Variable(image, volatile=True)\n text = Variable(text, volatile=True)\n batch_size = len(image)\n\n recon_image_1, recon_text_1, mu_1, logvar_1 = model(image, text)\n recon_image_2, recon_text_2, mu_2, logvar_2 = model(image)\n recon_image_3, recon_text_3, mu_3, logvar_3 = model(text=text)\n\n joint_loss = elbo_loss(recon_image_1, image, recon_text_1, text, mu_1, logvar_1)\n image_loss = elbo_loss(recon_image_2, image, None, None, mu_2, logvar_2)\n text_loss = elbo_loss(None, None, recon_text_3, text, mu_3, logvar_3)\n test_loss = joint_loss + image_loss + text_loss\n test_loss_meter.update(test_loss.data[0], batch_size)\n\n print('====> Test Loss: {:.4f}'.format(test_loss_meter.avg))\n return test_loss_meter.avg\n\n \n best_loss = sys.maxint\n for epoch in range(1, args.epochs + 1):\n train(epoch)\n test_loss = test(epoch)\n is_best = test_loss < best_loss\n best_loss = min(test_loss, best_loss)\n # save the best model and current model\n save_checkpoint({\n 'state_dict': model.state_dict(),\n 'best_loss': best_loss,\n 'n_latents': args.n_latents,\n 'optimizer' : optimizer.state_dict(),\n }, is_best, folder='./trained_models') \n" ]
[ [ "torch.autograd.Variable", "torch.clamp", "torch.nn.functional.log_softmax", "torch.abs", "torch.cuda.is_available", "torch.load", "torch.mean" ] ]
sami-ets/DeepNormalize
[ "5ed53280d98a201d45bb9973e79736136273eaea", "5ed53280d98a201d45bb9973e79736136273eaea" ]
[ "deepNormalize/training/sampler.py", "deepNormalize/training/unet.py" ]
[ "import math\nimport numpy as np\nimport torch\n\nfrom deepNormalize.utils.constants import AUGMENTED_INPUTS, NON_AUGMENTED_INPUTS, IMAGE_TARGET, DATASET_ID\n\n\nclass Sampler(object):\n\n def __init__(self, keep_augmented_prob: float):\n self._keep_augmented_prob = keep_augmented_prob\n\n def __call__(self, inputs, targets):\n augmented_choices = np.random.choice(np.arange(0, len(inputs[AUGMENTED_INPUTS])),\n math.ceil(len(inputs[AUGMENTED_INPUTS]) * self._keep_augmented_prob),\n replace=False)\n augmented_inputs = inputs[AUGMENTED_INPUTS][augmented_choices]\n augmented_targets = targets[IMAGE_TARGET][augmented_choices]\n augmented_target_ids = targets[DATASET_ID][augmented_choices]\n\n non_augmented_choices = np.setdiff1d(np.arange(0, len(inputs[NON_AUGMENTED_INPUTS])), augmented_choices)\n non_augmented_inputs = inputs[NON_AUGMENTED_INPUTS][non_augmented_choices]\n non_augmented_targets = targets[IMAGE_TARGET][non_augmented_choices]\n non_augmented_target_ids = targets[DATASET_ID][non_augmented_choices]\n\n new_inputs_ = torch.cat((augmented_inputs, non_augmented_inputs))\n new_image_targets = torch.cat((augmented_targets, non_augmented_targets))\n new_target_ids = torch.cat((augmented_target_ids, non_augmented_target_ids))\n\n new_targets_ = [new_image_targets, new_target_ids]\n\n new_inputs = [inputs[NON_AUGMENTED_INPUTS], new_inputs_]\n new_targets = [targets, new_targets_]\n\n return new_inputs, new_targets\n", "# -*- coding: utf-8 -*-\n# Copyright 2019 Pierre-Luc Delisle. All Rights Reserved.\n#\n# Licensed under the MIT License;\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/MIT\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport time\nfrom datetime import timedelta\nfrom typing import List\n\nimport numpy as np\nimport torch\nfrom ignite.metrics.confusion_matrix import ConfusionMatrix\nfrom kerosene.configs.configs import RunConfiguration\nfrom kerosene.metrics.gauges import AverageGauge\nfrom kerosene.training.trainers import ModelTrainer\nfrom kerosene.training.trainers import Trainer\nfrom kerosene.utils.tensors import flatten, to_onehot\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom deepNormalize.inputs.datasets import SliceDataset\nfrom deepNormalize.inputs.images import SliceType\nfrom deepNormalize.metrics.metrics import mean_hausdorff_distance\nfrom deepNormalize.training.sampler import Sampler\nfrom deepNormalize.utils.constants import IMAGE_TARGET, DATASET_ID, ABIDE_ID, \\\n NON_AUGMENTED_INPUTS, AUGMENTED_INPUTS, AUGMENTED_TARGETS\nfrom deepNormalize.utils.constants import ISEG_ID, MRBRAINS_ID\nfrom deepNormalize.utils.image_slicer import ImageSlicer, SegmentationSlicer, LabelMapper, ImageReconstructor\nfrom deepNormalize.utils.utils import to_html, to_html_per_dataset, to_html_time, get_all_patches, rebuild_image, \\\n save_rebuilt_image, save_augmented_rebuilt_images\n\n\nclass UNetTrainer(Trainer):\n\n def __init__(self, training_config, model_trainers: List[ModelTrainer],\n train_data_loader: DataLoader, valid_data_loader: DataLoader, test_data_loader: DataLoader,\n reconstruction_datasets: List[Dataset],\n input_reconstructor: ImageReconstructor,\n segmentation_reconstructor: ImageReconstructor,\n augmented_input_reconstructor: ImageReconstructor,\n gt_reconstructor: ImageReconstructor,\n run_config: RunConfiguration,\n dataset_config: dict, save_folder: str):\n super(UNetTrainer, self).__init__(\"UNetTrainer\", train_data_loader, valid_data_loader, test_data_loader,\n model_trainers, run_config)\n\n self._training_config = training_config\n self._run_config = run_config\n self._dataset_configs = dataset_config\n self._slicer = ImageSlicer()\n self._seg_slicer = SegmentationSlicer()\n self._label_mapper = LabelMapper()\n self._reconstruction_datasets = reconstruction_datasets\n self._input_reconstructor = input_reconstructor\n self._gt_reconstructor = gt_reconstructor\n self._segmentation_reconstructor = segmentation_reconstructor\n self._augmented_input_reconstructor = augmented_input_reconstructor\n self._num_datasets = len(list(dataset_config.keys()))\n self._class_hausdorff_distance_gauge = AverageGauge()\n self._mean_hausdorff_distance_gauge = AverageGauge()\n self._per_dataset_hausdorff_distance_gauge = AverageGauge()\n self._iSEG_dice_gauge = AverageGauge()\n self._MRBrainS_dice_gauge = AverageGauge()\n self._ABIDE_dice_gauge = AverageGauge()\n self._iSEG_hausdorff_gauge = AverageGauge()\n self._MRBrainS_hausdorff_gauge = AverageGauge()\n self._ABIDE_hausdorff_gauge = AverageGauge()\n self._valid_dice_gauge = AverageGauge()\n self._class_dice_gauge_on_patches = AverageGauge()\n self._class_dice_gauge_on_reconstructed_images = AverageGauge()\n self._class_dice_gauge_on_reconstructed_iseg_images = AverageGauge()\n self._class_dice_gauge_on_reconstructed_mrbrains_images = AverageGauge()\n self._class_dice_gauge_on_reconstructed_abide_images = AverageGauge()\n self._hausdorff_distance_gauge_on_reconstructed_iseg_images = AverageGauge()\n self._hausdorff_distance_gauge_on_reconstructed_mrbrains_images = AverageGauge()\n self._hausdorff_distance_gauge_on_reconstructed_abide_images = AverageGauge()\n self._general_confusion_matrix_gauge = ConfusionMatrix(num_classes=4)\n self._iSEG_confusion_matrix_gauge = ConfusionMatrix(num_classes=4)\n self._MRBrainS_confusion_matrix_gauge = ConfusionMatrix(num_classes=4)\n self._ABIDE_confusion_matrix_gauge = ConfusionMatrix(num_classes=4)\n self._previous_mean_dice = 0.0\n self._previous_per_dataset_table = \"\"\n self._start_time = time.time()\n self._sampler = Sampler(1.0)\n self._save_folder = save_folder\n self._is_sliced = True if isinstance(self._reconstruction_datasets[0], SliceDataset) else False\n print(\"Total number of parameters: {}\".format(sum(p.numel() for p in self._model_trainers[0].parameters())))\n\n def _train_s(self, S: ModelTrainer, inputs, target, backward=True):\n S.zero_grad()\n\n target_ohe = to_onehot(torch.squeeze(target, dim=1).long(), num_classes=4)\n target = torch.squeeze(target, dim=1).long()\n\n seg_pred = torch.nn.functional.softmax(S.forward(inputs), dim=1)\n\n loss_S = S.compute_loss(\"DiceLoss\", seg_pred, target_ohe)\n S.update_train_loss(\"DiceLoss\", loss_S.mean())\n\n metrics = S.compute_metrics(seg_pred, target)\n metrics[\"Dice\"] = metrics[\"Dice\"].mean()\n metrics[\"IoU\"] = metrics[\"IoU\"].mean()\n S.update_train_metrics(metrics)\n\n if backward:\n loss_S.mean().backward()\n S.step()\n\n return seg_pred, loss_S\n\n def _valid_s(self, S: ModelTrainer, inputs, target):\n target_ohe = to_onehot(torch.squeeze(target, dim=1).long(), num_classes=4)\n target = torch.squeeze(target, dim=1).long()\n\n seg_pred = torch.nn.functional.softmax(S.forward(inputs), dim=1)\n\n loss_S = S.compute_loss(\"DiceLoss\", seg_pred, target_ohe)\n S.update_valid_loss(\"DiceLoss\", loss_S.mean())\n\n metrics = S.compute_metrics(seg_pred, target)\n metrics[\"Dice\"] = metrics[\"Dice\"].mean()\n metrics[\"IoU\"] = metrics[\"IoU\"].mean()\n S.update_valid_metrics(metrics)\n\n return seg_pred, loss_S\n\n def _test_s(self, S: ModelTrainer, inputs, target, metric_gauge: AverageGauge):\n target_ohe = to_onehot(torch.squeeze(target, dim=1).long(), num_classes=4)\n target = torch.squeeze(target, dim=1).long()\n\n seg_pred = torch.nn.functional.softmax(S.forward(inputs), dim=1)\n\n loss_S = S.compute_loss(\"DiceLoss\", seg_pred, target_ohe)\n S.update_test_loss(\"DiceLoss\", loss_S.mean())\n\n metrics = S.compute_metrics(seg_pred, target)\n metric_gauge.update(np.array(metrics[\"Dice\"]))\n metrics[\"Dice\"] = metrics[\"Dice\"].mean()\n metrics[\"IoU\"] = metrics[\"IoU\"].mean()\n S.update_test_metrics(metrics)\n\n return seg_pred, loss_S\n\n def train_step(self, inputs, target):\n seg_pred, _ = self._train_s(self._model_trainers[0], inputs[AUGMENTED_INPUTS],\n target[IMAGE_TARGET])\n\n if self.current_train_step % 500 == 0:\n self._update_image_plots(self.phase, inputs[AUGMENTED_INPUTS].cpu().detach(),\n seg_pred.cpu().detach(),\n target[IMAGE_TARGET].cpu().detach(),\n target[DATASET_ID].cpu().detach())\n\n def validate_step(self, inputs, target):\n seg_pred, _ = self._valid_s(self._model_trainers[0], inputs[AUGMENTED_INPUTS],\n target[IMAGE_TARGET])\n\n if self.current_valid_step % 100 == 0:\n self._update_image_plots(self.phase, inputs[AUGMENTED_INPUTS].cpu().detach(),\n seg_pred.cpu().detach(),\n target[IMAGE_TARGET].cpu().detach(),\n target[DATASET_ID].cpu().detach())\n\n def test_step(self, inputs, target):\n inputs, target = self._sampler(inputs, target)\n target = target[AUGMENTED_TARGETS]\n\n seg_pred, _ = self._test_s(self._model_trainers[0], inputs[AUGMENTED_INPUTS], target[IMAGE_TARGET],\n self._class_dice_gauge_on_patches)\n\n if self.current_test_step % 100 == 0:\n self._update_histograms(inputs[AUGMENTED_INPUTS], target)\n self._update_image_plots(self.phase, inputs[AUGMENTED_INPUTS].cpu().detach(),\n seg_pred.cpu().detach(),\n target[IMAGE_TARGET].cpu().detach(),\n target[DATASET_ID].cpu().detach())\n\n if seg_pred[torch.where(target[DATASET_ID] == ISEG_ID)].shape[0] != 0:\n self._iSEG_dice_gauge.update(np.array(self._model_trainers[0].compute_metrics(\n torch.nn.functional.softmax(seg_pred[torch.where(target[DATASET_ID] == ISEG_ID)], dim=1),\n torch.squeeze(target[IMAGE_TARGET][torch.where(target[DATASET_ID] == ISEG_ID)],\n dim=1).long())[\"Dice\"].numpy()))\n\n self._iSEG_hausdorff_gauge.update(mean_hausdorff_distance(\n to_onehot(\n torch.argmax(\n torch.nn.functional.softmax(seg_pred[torch.where(target[DATASET_ID] == ISEG_ID)], dim=1),\n dim=1), num_classes=4),\n to_onehot(\n torch.squeeze(target[IMAGE_TARGET][torch.where(target[DATASET_ID] == ISEG_ID)], dim=1).long(),\n num_classes=4))[-3:])\n\n self._iSEG_confusion_matrix_gauge.update((\n to_onehot(\n torch.argmax(\n torch.nn.functional.softmax(seg_pred[torch.where(target[DATASET_ID] == ISEG_ID)], dim=1),\n dim=1, keepdim=False),\n num_classes=4),\n torch.squeeze(target[IMAGE_TARGET][torch.where(target[DATASET_ID] == ISEG_ID)].long(), dim=1)))\n\n else:\n self._iSEG_dice_gauge.update(np.zeros((3,)))\n self._iSEG_hausdorff_gauge.update(np.zeros((3,)))\n\n if seg_pred[torch.where(target[DATASET_ID] == MRBRAINS_ID)].shape[0] != 0:\n self._MRBrainS_dice_gauge.update(np.array(self._model_trainers[0].compute_metrics(\n torch.nn.functional.softmax(seg_pred[torch.where(target[DATASET_ID] == MRBRAINS_ID)], dim=1),\n torch.squeeze(target[IMAGE_TARGET][torch.where(target[DATASET_ID] == MRBRAINS_ID)],\n dim=1).long())[\"Dice\"].numpy()))\n\n self._MRBrainS_hausdorff_gauge.update(mean_hausdorff_distance(\n to_onehot(\n torch.argmax(\n torch.nn.functional.softmax(seg_pred[torch.where(target[DATASET_ID] == MRBRAINS_ID)],\n dim=1),\n dim=1), num_classes=4),\n to_onehot(\n torch.squeeze(target[IMAGE_TARGET][torch.where(target[DATASET_ID] == MRBRAINS_ID)],\n dim=1).long(),\n num_classes=4))[-3:])\n\n self._MRBrainS_confusion_matrix_gauge.update((\n to_onehot(\n torch.argmax(\n torch.nn.functional.softmax(seg_pred[torch.where(target[DATASET_ID] == MRBRAINS_ID)],\n dim=1),\n dim=1, keepdim=False),\n num_classes=4),\n torch.squeeze(target[IMAGE_TARGET][torch.where(target[DATASET_ID] == MRBRAINS_ID)].long(), dim=1)))\n\n else:\n self._MRBrainS_dice_gauge.update(np.zeros((3,)))\n self._MRBrainS_hausdorff_gauge.update(np.zeros((3,)))\n\n if seg_pred[torch.where(target[DATASET_ID] == ABIDE_ID)].shape[0] != 0:\n self._ABIDE_dice_gauge.update(np.array(self._model_trainers[0].compute_metrics(\n torch.nn.functional.softmax(seg_pred[torch.where(target[DATASET_ID] == ABIDE_ID)], dim=1),\n torch.squeeze(target[IMAGE_TARGET][torch.where(target[DATASET_ID] == ABIDE_ID)],\n dim=1).long())[\"Dice\"].numpy()))\n\n self._ABIDE_hausdorff_gauge.update(mean_hausdorff_distance(\n to_onehot(\n torch.argmax(\n torch.nn.functional.softmax(seg_pred[torch.where(target[DATASET_ID] == ABIDE_ID)], dim=1),\n dim=1), num_classes=4),\n to_onehot(\n torch.squeeze(target[IMAGE_TARGET][torch.where(target[DATASET_ID] == ABIDE_ID)], dim=1).long(),\n num_classes=4))[-3:])\n\n self._ABIDE_confusion_matrix_gauge.update((\n to_onehot(\n torch.argmax(\n torch.nn.functional.softmax(seg_pred[torch.where(target[DATASET_ID] == ABIDE_ID)], dim=1),\n dim=1, keepdim=False),\n num_classes=4),\n torch.squeeze(target[IMAGE_TARGET][torch.where(target[DATASET_ID] == ABIDE_ID)].long(), dim=1)))\n\n self._class_hausdorff_distance_gauge.update(\n mean_hausdorff_distance(\n to_onehot(torch.argmax(torch.nn.functional.softmax(seg_pred, dim=1), dim=1), num_classes=4),\n to_onehot(torch.squeeze(target[IMAGE_TARGET], dim=1).long(), num_classes=4))[-3:])\n\n self._general_confusion_matrix_gauge.update((\n to_onehot(torch.argmax(torch.nn.functional.softmax(seg_pred, dim=1), dim=1, keepdim=False),\n num_classes=4),\n torch.squeeze(target[IMAGE_TARGET].long(), dim=1)))\n\n def scheduler_step(self):\n self._model_trainers[0].scheduler_step()\n\n def on_epoch_begin(self):\n self._class_hausdorff_distance_gauge.reset()\n self._mean_hausdorff_distance_gauge.reset()\n self._iSEG_dice_gauge.reset()\n self._MRBrainS_dice_gauge.reset()\n self._ABIDE_dice_gauge.reset()\n self._iSEG_hausdorff_gauge.reset()\n self._MRBrainS_hausdorff_gauge.reset()\n self._ABIDE_hausdorff_gauge.reset()\n self._class_dice_gauge_on_patches.reset()\n self._general_confusion_matrix_gauge.reset()\n self._iSEG_confusion_matrix_gauge.reset()\n self._MRBrainS_confusion_matrix_gauge.reset()\n self._ABIDE_confusion_matrix_gauge.reset()\n\n def on_test_epoch_end(self):\n if self.epoch % 10 == 0:\n self._per_dataset_hausdorff_distance_gauge.reset()\n self._class_dice_gauge_on_reconstructed_iseg_images.reset()\n self._class_dice_gauge_on_reconstructed_mrbrains_images.reset()\n self._class_dice_gauge_on_reconstructed_abide_images.reset()\n self._hausdorff_distance_gauge_on_reconstructed_iseg_images.reset()\n self._hausdorff_distance_gauge_on_reconstructed_mrbrains_images.reset()\n self._hausdorff_distance_gauge_on_reconstructed_abide_images.reset()\n\n img_input = self._input_reconstructor.reconstruct_from_patches_3d()\n img_gt = self._gt_reconstructor.reconstruct_from_patches_3d()\n img_seg = self._segmentation_reconstructor.reconstruct_from_patches_3d()\n\n save_rebuilt_image(self._current_epoch, self._save_folder, self._dataset_configs.keys(), img_input,\n \"Input\")\n save_rebuilt_image(self._current_epoch, self._save_folder, self._dataset_configs.keys(), img_gt,\n \"Ground_Truth\")\n save_rebuilt_image(self._current_epoch, self._save_folder, self._dataset_configs.keys(), img_seg,\n \"Segmented\")\n\n if self._training_config.build_augmented_images:\n img_augmented_input = self._augmented_input_reconstructor.reconstruct_from_patches_3d()\n img_augmented_normalized = self._augmented_normalized_reconstructor.reconstruct_from_patches_3d()\n save_augmented_rebuilt_images(self._current_epoch, self._save_folder, self._dataset_configs.keys(),\n img_augmented_input, img_augmented_normalized)\n\n mean_mhd = []\n for dataset in self._dataset_configs.keys():\n self.custom_variables[\n \"Reconstructed Segmented {} Image\".format(dataset)] = self._seg_slicer.get_colored_slice(\n SliceType.AXIAL, np.expand_dims(img_seg[dataset], 0), 160).squeeze(0)\n self.custom_variables[\n \"Reconstructed Ground Truth {} Image\".format(dataset)] = self._seg_slicer.get_colored_slice(\n SliceType.AXIAL, np.expand_dims(img_gt[dataset], 0), 160).squeeze(0)\n self.custom_variables[\n \"Reconstructed Input {} Image\".format(dataset)] = self._slicer.get_slice(\n SliceType.AXIAL, np.expand_dims(img_input[dataset], 0), 160)\n\n if self._training_config.build_augmented_images:\n self.custom_variables[\n \"Reconstructed Augmented Input {} Image\".format(dataset)] = self._slicer.get_slice(\n SliceType.AXIAL, np.expand_dims(np.expand_dims(img_augmented_input[dataset], 0), 0), 160)\n self.custom_variables[\n \"Reconstructed Augmented {} After Normalization\".format(\n dataset)] = self._seg_slicer.get_colored_slice(\n SliceType.AXIAL,\n np.expand_dims(np.expand_dims(img_augmented_normalized[dataset], 0), 0), 160).squeeze(0)\n else:\n self.custom_variables[\"Reconstructed Augmented Input {} Image\".format(\n dataset)] = np.zeros((224, 192))\n self.custom_variables[\n \"Reconstructed Initial Noise {} Image\".format(\n dataset)] = np.zeros((224, 192))\n self.custom_variables[\n \"Reconstructed Augmented {} After Normalization\".format(\n dataset)] = np.zeros((224, 192))\n\n mean_mhd.append(mean_hausdorff_distance(\n to_onehot(torch.tensor(img_gt[dataset], dtype=torch.long), num_classes=4),\n to_onehot(torch.tensor(img_seg[dataset], dtype=torch.long), num_classes=4))[-3:].mean())\n\n metric = self._model_trainers[0].compute_metrics(\n to_onehot(torch.tensor(img_seg[dataset]).unsqueeze(0).long(), num_classes=4),\n torch.tensor(img_gt[dataset]).unsqueeze(0).long())\n\n self._class_dice_gauge_on_reconstructed_images.update(np.array(metric[\"Dice\"]))\n\n self._per_dataset_hausdorff_distance_gauge.update(np.array(mean_mhd))\n\n if \"iSEG\" in img_seg:\n metric = self._model_trainers[0].compute_metrics(\n to_onehot(torch.tensor(img_seg[\"iSEG\"]).unsqueeze(0).long(), num_classes=4),\n torch.tensor(img_gt[\"iSEG\"]).unsqueeze(0).long())\n self._class_dice_gauge_on_reconstructed_iseg_images.update(np.array(metric[\"Dice\"]))\n self._hausdorff_distance_gauge_on_reconstructed_iseg_images.update(mean_hausdorff_distance(\n to_onehot(torch.tensor(img_gt[\"iSEG\"], dtype=torch.long), num_classes=4),\n to_onehot(torch.tensor(img_seg[\"iSEG\"], dtype=torch.long), num_classes=4))[-3:])\n else:\n self._class_dice_gauge_on_reconstructed_iseg_images.update(np.array([0.0, 0.0, 0.0]))\n self._hausdorff_distance_gauge_on_reconstructed_iseg_images.update(np.array([0.0, 0.0, 0.0]))\n if \"MRBrainS\" in img_seg:\n metric = self._model_trainers[0].compute_metrics(\n to_onehot(torch.tensor(img_seg[\"MRBrainS\"]).unsqueeze(0).long(), num_classes=4),\n torch.tensor(img_gt[\"MRBrainS\"]).unsqueeze(0).long())\n self._class_dice_gauge_on_reconstructed_mrbrains_images.update(np.array(metric[\"Dice\"]))\n self._hausdorff_distance_gauge_on_reconstructed_mrbrains_images.update(mean_hausdorff_distance(\n to_onehot(torch.tensor(img_gt[\"MRBrainS\"], dtype=torch.long), num_classes=4),\n to_onehot(torch.tensor(img_seg[\"MRBrainS\"], dtype=torch.long), num_classes=4))[-3:])\n else:\n self._class_dice_gauge_on_reconstructed_mrbrains_images.update(np.array([0.0, 0.0, 0.0]))\n self._hausdorff_distance_gauge_on_reconstructed_mrbrains_images.update(np.array([0.0, 0.0, 0.0]))\n if \"ABIDE\" in img_seg:\n metric = self._model_trainers[0].compute_metrics(\n to_onehot(torch.tensor(img_seg[\"ABIDE\"]).unsqueeze(0).long(), num_classes=4),\n torch.tensor(img_gt[\"ABIDE\"]).unsqueeze(0).long())\n self._class_dice_gauge_on_reconstructed_abide_images.update(np.array(metric[\"Dice\"]))\n self._hausdorff_distance_gauge_on_reconstructed_abide_images.update(mean_hausdorff_distance(\n to_onehot(torch.tensor(img_gt[\"ABIDE\"], dtype=torch.long), num_classes=4),\n to_onehot(torch.tensor(img_seg[\"ABIDE\"], dtype=torch.long), num_classes=4))[-3:])\n else:\n self._class_dice_gauge_on_reconstructed_abide_images.update(np.array([0.0, 0.0, 0.0]))\n self._hausdorff_distance_gauge_on_reconstructed_abide_images.update(np.array([0.0, 0.0, 0.0]))\n\n if \"ABIDE\" not in self._dataset_configs.keys():\n self.custom_variables[\"Reconstructed Segmented ABIDE Image\"] = np.zeros((224, 192))\n self.custom_variables[\"Reconstructed Ground Truth ABIDE Image\"] = np.zeros((224, 192))\n self.custom_variables[\"Reconstructed Input ABIDE Image\"] = np.zeros((224, 192))\n if \"iSEG\" not in self._dataset_configs.keys():\n self.custom_variables[\"Reconstructed Segmented iSEG Image\"] = np.zeros((224, 192))\n self.custom_variables[\"Reconstructed Ground Truth iSEG Image\"] = np.zeros((224, 192))\n self.custom_variables[\"Reconstructed Input iSEG Image\"] = np.zeros((224, 192))\n if \"MRBrainS\" not in self._dataset_configs.keys():\n self.custom_variables[\"Reconstructed Segmented MRBrainS Image\"] = np.zeros((224, 192))\n self.custom_variables[\"Reconstructed Ground Truth MRBrainS Image\"] = np.zeros((224, 192))\n self.custom_variables[\"Reconstructed Input MRBrainS Image\"] = np.zeros((224, 192))\n\n self.custom_variables[\"Runtime\"] = to_html_time(timedelta(seconds=time.time() - self._start_time))\n\n if self._general_confusion_matrix_gauge._num_examples != 0:\n self.custom_variables[\"Confusion Matrix\"] = np.array(\n np.fliplr(self._general_confusion_matrix_gauge.compute().cpu().detach().numpy()))\n else:\n self.custom_variables[\"Confusion Matrix\"] = np.zeros((4, 4))\n\n if self._iSEG_confusion_matrix_gauge._num_examples != 0:\n self.custom_variables[\"iSEG Confusion Matrix\"] = np.array(\n np.fliplr(self._iSEG_confusion_matrix_gauge.compute().cpu().detach().numpy()))\n else:\n self.custom_variables[\"iSEG Confusion Matrix\"] = np.zeros((4, 4))\n\n if self._MRBrainS_confusion_matrix_gauge._num_examples != 0:\n self.custom_variables[\"MRBrainS Confusion Matrix\"] = np.array(\n np.fliplr(self._MRBrainS_confusion_matrix_gauge.compute().cpu().detach().numpy()))\n else:\n self.custom_variables[\"MRBrainS Confusion Matrix\"] = np.zeros((4, 4))\n\n if self._ABIDE_confusion_matrix_gauge._num_examples != 0:\n self.custom_variables[\"ABIDE Confusion Matrix\"] = np.array(\n np.fliplr(self._ABIDE_confusion_matrix_gauge.compute().cpu().detach().numpy()))\n else:\n self.custom_variables[\"ABIDE Confusion Matrix\"] = np.zeros((4, 4))\n\n self.custom_variables[\"Metric Table\"] = to_html([\"CSF\", \"Grey Matter\", \"White Matter\"],\n [\"DSC\", \"HD\"],\n [\n self._class_dice_gauge_on_patches.compute() if self._class_dice_gauge_on_patches.has_been_updated() else np.array(\n [0.0, 0.0, 0.0]),\n self._class_hausdorff_distance_gauge.compute() if self._class_hausdorff_distance_gauge.has_been_updated() else np.array(\n [0.0, 0.0, 0.0])\n ])\n\n self.custom_variables[\n \"Dice score per class per epoch\"] = self._class_dice_gauge_on_patches.compute() if self._class_dice_gauge_on_patches.has_been_updated() else np.array(\n [0.0, 0.0, 0.0])\n self.custom_variables[\n \"Dice score per class per epoch on reconstructed image\"] = self._class_dice_gauge_on_reconstructed_images.compute() if self._class_dice_gauge_on_reconstructed_images.has_been_updated() else np.array(\n [0.0, 0.0, 0.0])\n self.custom_variables[\n \"Dice score per class per epoch on reconstructed iSEG image\"] = self._class_dice_gauge_on_reconstructed_iseg_images.compute() if self._class_dice_gauge_on_reconstructed_iseg_images.has_been_updated() else np.array(\n [0.0, 0.0, 0.0])\n self.custom_variables[\n \"Dice score per class per epoch on reconstructed MRBrainS image\"] = self._class_dice_gauge_on_reconstructed_mrbrains_images.compute() if self._class_dice_gauge_on_reconstructed_mrbrains_images.has_been_updated() else np.array(\n [0.0, 0.0, 0.0])\n self.custom_variables[\n \"Dice score per class per epoch on reconstructed ABIDE image\"] = self._class_dice_gauge_on_reconstructed_abide_images.compute() if self._class_dice_gauge_on_reconstructed_abide_images.has_been_updated() else np.array(\n [0.0, 0.0, 0.0])\n self.custom_variables[\n \"Hausdorff Distance per class per epoch on reconstructed iSEG image\"] = self._hausdorff_distance_gauge_on_reconstructed_iseg_images.compute() if self._hausdorff_distance_gauge_on_reconstructed_iseg_images.has_been_updated() else np.array(\n [0.0, 0.0, 0.0])\n self.custom_variables[\n \"Hausdorff Distance per class per epoch on reconstructed MRBrainS image\"] = self._hausdorff_distance_gauge_on_reconstructed_mrbrains_images.compute() if self._hausdorff_distance_gauge_on_reconstructed_mrbrains_images.has_been_updated() else np.array(\n [0.0, 0.0, 0.0])\n self.custom_variables[\n \"Hausdorff Distance per class per epoch on reconstructed ABIDE image\"] = self._hausdorff_distance_gauge_on_reconstructed_abide_images.compute() if self._hausdorff_distance_gauge_on_reconstructed_abide_images.has_been_updated() else np.array(\n [0.0, 0.0, 0.0])\n\n if self._valid_dice_gauge.compute() > self._previous_mean_dice:\n new_table = to_html_per_dataset(\n [\"CSF\", \"Grey Matter\", \"White Matter\"],\n [\"DSC\", \"HD\"],\n [\n [\n self._iSEG_dice_gauge.compute() if self._iSEG_dice_gauge.has_been_updated() else np.array(\n [0.0, 0.0, 0.0]),\n self._iSEG_hausdorff_gauge.compute() if self._iSEG_hausdorff_gauge.has_been_updated() else np.array(\n [0.0, 0.0, 0.0])],\n [\n self._MRBrainS_dice_gauge.compute() if self._MRBrainS_dice_gauge.has_been_updated() else np.array(\n [0.0, 0.0, 0.0]),\n self._MRBrainS_hausdorff_gauge.compute() if self._MRBrainS_hausdorff_gauge.has_been_updated() else np.array(\n [0.0, 0.0, 0.0])],\n [\n self._ABIDE_dice_gauge.compute() if self._ABIDE_dice_gauge.has_been_updated() else np.array(\n [0.0, 0.0, 0.0]),\n self._ABIDE_hausdorff_gauge.compute() if self._ABIDE_hausdorff_gauge.has_been_updated() else np.array(\n [0.0, 0.0, 0.0])]],\n [\"iSEG\", \"MRBrainS\", \"ABIDE\"])\n\n self.custom_variables[\"Per-Dataset Metric Table\"] = new_table\n self._previous_mean_dice = self._valid_dice_gauge.compute()\n self._previous_per_dataset_table = new_table\n else:\n self.custom_variables[\"Per-Dataset Metric Table\"] = self._previous_per_dataset_table\n self._valid_dice_gauge.reset()\n\n self.custom_variables[\"Mean Hausdorff Distance\"] = [\n self._class_hausdorff_distance_gauge.compute().mean() if self._class_hausdorff_distance_gauge.has_been_updated() else np.array(\n [0.0])]\n\n self.custom_variables[\n \"Per Dataset Mean Hausdorff Distance\"] = self._per_dataset_hausdorff_distance_gauge.compute()\n\n def _update_image_plots(self, phase, inputs, segmenter_predictions, target, dataset_ids):\n inputs = torch.nn.functional.interpolate(inputs, scale_factor=5, mode=\"trilinear\",\n align_corners=True).numpy()\n segmenter_predictions = torch.nn.functional.interpolate(\n torch.argmax(torch.nn.functional.softmax(segmenter_predictions, dim=1), dim=1, keepdim=True).float(),\n scale_factor=5, mode=\"nearest\").numpy()\n\n target = torch.nn.functional.interpolate(target.float(), scale_factor=5, mode=\"nearest\").numpy()\n\n self.custom_variables[\n \"{} Input Batch Process {}\".format(phase, self._run_config.local_rank)] = self._slicer.get_slice(\n SliceType.AXIAL, inputs, inputs.shape[2] // 2)\n self.custom_variables[\n \"{} Segmented Batch Process {}\".format(phase,\n self._run_config.local_rank)] = self._seg_slicer.get_colored_slice(\n SliceType.AXIAL, segmenter_predictions, segmenter_predictions.shape[2] // 2)\n self.custom_variables[\n \"{} Segmentation Ground Truth Batch Process {}\".format(phase,\n self._run_config.local_rank)] = self._seg_slicer.get_colored_slice(\n SliceType.AXIAL, target, target.shape[2] // 2)\n self.custom_variables[\n \"{} Label Map Batch Process {}\".format(phase,\n self._run_config.local_rank)] = self._label_mapper.get_label_map(\n dataset_ids)\n\n def _update_histograms(self, inputs, target):\n self.custom_variables[\"Input Intensity Histogram\"] = flatten(inputs.cpu().detach())\n self.custom_variables[\"Background Input Intensity Histogram\"] = inputs[\n torch.where(target[IMAGE_TARGET] == 0)].cpu().detach()\n self.custom_variables[\"CSF Input Intensity Histogram\"] = inputs[\n torch.where(target[IMAGE_TARGET] == 1)].cpu().detach()\n self.custom_variables[\"GM Input Intensity Histogram\"] = inputs[\n torch.where(target[IMAGE_TARGET] == 2)].cpu().detach()\n self.custom_variables[\"WM Input Intensity Histogram\"] = inputs[\n torch.where(target[IMAGE_TARGET] == 3)].cpu().detach()\n" ]
[ [ "torch.cat" ], [ "numpy.array", "numpy.zeros", "torch.nn.functional.interpolate", "torch.where", "torch.squeeze", "torch.tensor", "torch.nn.functional.softmax", "numpy.expand_dims" ] ]
arthus701/gpytorch
[ "c7eb6fd732ed15d7578421d851a2c274ffdca100" ]
[ "gpytorch/optim/ngd.py" ]
[ "#!/usr/bin/env python3\n\nfrom typing import Iterable, Union\n\nimport torch\n\n\nclass NGD(torch.optim.Optimizer):\n r\"\"\"Implements a natural gradient descent step.\n It **can only** be used in conjunction with a :obj:`~gpytorch.variational._NaturalVariationalDistribution`.\n\n .. seealso::\n - :obj:`gpytorch.variational.NaturalVariationalDistribution`\n - :obj:`gpytorch.variational.TrilNaturalVariationalDistribution`\n - The `natural gradient descent tutorial\n <examples/04_Variational_and_Approximate_GPs/Natural_Gradient_Descent.ipynb>`_\n for use instructions.\n\n Example:\n >>> ngd_optimizer = torch.optim.NGD(model.variational_parameters(), lr=0.1, momentum=0.9)\n >>> ngd_optimizer.zero_grad()\n >>> mll(gp_model(input), target).backward()\n >>> ngd_optimizer.step()\n \"\"\"\n\n def __init__(self, params: Iterable[Union[torch.nn.Parameter, dict]], num_data: int, lr: float = 0.1):\n self.num_data = num_data\n super().__init__(params, defaults=dict(lr=lr))\n\n @torch.no_grad()\n def step(self) -> torch.Tensor:\n \"\"\"Performs a single optimization step.\n \"\"\"\n for group in self.param_groups:\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n p.add_(p.grad, alpha=(-group[\"lr\"] * self.num_data))\n\n return None\n" ]
[ [ "torch.no_grad" ] ]
westrayhao/fiftyone
[ "364aeb0566f650df4fb45743f991d6ea286a23e8" ]
[ "fiftyone/core/aggregations.py" ]
[ "\"\"\"\nAggregations.\n\n| Copyright 2017-2021, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport numpy as np\n\nimport eta.core.utils as etau\n\nfrom fiftyone.core.expressions import ViewField as F\nimport fiftyone.core.media as fom\nimport fiftyone.core.utils as fou\n\n\nclass Aggregation(object):\n \"\"\"Abstract base class for all aggregations.\n\n :class:`Aggregation` instances represent an aggregation or reduction\n of a :class:`fiftyone.core.collections.SampleCollection` instance.\n\n Args:\n field_name: the name of the field to operate on\n expr (None): an optional\n :class:`fiftyone.core.expressions.ViewExpression` or\n `MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_\n to apply to the field before aggregating\n \"\"\"\n\n def __init__(self, field_name, expr=None):\n self._field_name = field_name\n self._expr = expr\n\n @property\n def field_name(self):\n \"\"\"The field name being computed on.\"\"\"\n return self._field_name\n\n @property\n def expr(self):\n \"\"\"The :class:`fiftyone.core.expressions.ViewExpression` or MongoDB\n expression that will be applied to the field before aggregating, if any.\n \"\"\"\n return self._expr\n\n def to_mongo(self, sample_collection):\n \"\"\"Returns the MongoDB aggregation pipeline for this aggregation.\n\n Args:\n sample_collection: the\n :class:`fiftyone.core.collections.SampleCollection` to which\n the aggregation is being applied\n\n Returns:\n a MongoDB aggregation pipeline (list of dicts)\n \"\"\"\n raise NotImplementedError(\"subclasses must implement to_mongo()\")\n\n def parse_result(self, d):\n \"\"\"Parses the output of :meth:`to_mongo`.\n\n Args:\n d: the result dict\n\n Returns:\n the aggregation result\n \"\"\"\n raise NotImplementedError(\"subclasses must implement parse_result()\")\n\n def default_result(self):\n \"\"\"Returns the default result for this aggregation.\n\n Returns:\n the aggregation result\n \"\"\"\n raise NotImplementedError(\"subclasses must implement default_result()\")\n\n def _parse_field_and_expr(\n self, sample_collection, auto_unwind=True, allow_missing=False\n ):\n return _parse_field_and_expr(\n sample_collection,\n self._field_name,\n self._expr,\n auto_unwind,\n allow_missing,\n )\n\n\nclass AggregationError(Exception):\n \"\"\"An error raised during the execution of an :class:`Aggregation`.\"\"\"\n\n pass\n\n\nclass Bounds(Aggregation):\n \"\"\"Computes the bounds of a numeric field of a collection.\n\n ``None``-valued fields are ignored.\n\n This aggregation is typically applied to *numeric* field types (or lists of\n such types):\n\n - :class:`fiftyone.core.fields.IntField`\n - :class:`fiftyone.core.fields.FloatField`\n\n Examples::\n\n import fiftyone as fo\n from fiftyone import ViewField as F\n\n dataset = fo.Dataset()\n dataset.add_samples(\n [\n fo.Sample(\n filepath=\"/path/to/image1.png\",\n numeric_field=1.0,\n numeric_list_field=[1, 2, 3],\n ),\n fo.Sample(\n filepath=\"/path/to/image2.png\",\n numeric_field=4.0,\n numeric_list_field=[1, 2],\n ),\n fo.Sample(\n filepath=\"/path/to/image3.png\",\n numeric_field=None,\n numeric_list_field=None,\n ),\n ]\n )\n\n #\n # Compute the bounds of a numeric field\n #\n\n aggregation = fo.Bounds(\"numeric_field\")\n bounds = dataset.aggregate(aggregation)\n print(bounds) # (min, max)\n\n #\n # Compute the a bounds of a numeric list field\n #\n\n aggregation = fo.Bounds(\"numeric_list_field\")\n bounds = dataset.aggregate(aggregation)\n print(bounds) # (min, max)\n\n #\n # Compute the bounds of a transformation of a numeric field\n #\n\n aggregation = fo.Bounds(\"numeric_field\", expr=2 * (F() + 1))\n bounds = dataset.aggregate(aggregation)\n print(bounds) # (min, max)\n\n Args:\n field_name: the name of the field to operate on\n expr (None): an optional\n :class:`fiftyone.core.expressions.ViewExpression` or\n `MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_\n to apply to the field before aggregating\n \"\"\"\n\n def default_result(self):\n \"\"\"Returns the default result for this aggregation.\n\n Returns:\n ``(None, None)``\n \"\"\"\n return None, None\n\n def parse_result(self, d):\n \"\"\"Parses the output of :meth:`to_mongo`.\n\n Args:\n d: the result dict\n\n Returns:\n the ``(min, max)`` bounds\n \"\"\"\n return d[\"min\"], d[\"max\"]\n\n def to_mongo(self, sample_collection):\n path, pipeline, _ = self._parse_field_and_expr(sample_collection)\n\n pipeline.append(\n {\n \"$group\": {\n \"_id\": None,\n \"min\": {\"$min\": \"$\" + path},\n \"max\": {\"$max\": \"$\" + path},\n }\n }\n )\n\n return pipeline\n\n\nclass Count(Aggregation):\n \"\"\"Counts the number of field values in a collection.\n\n ``None``-valued fields are ignored.\n\n If no field is provided, the samples themselves are counted.\n\n Examples::\n\n import fiftyone as fo\n\n dataset = fo.Dataset()\n dataset.add_samples(\n [\n fo.Sample(\n filepath=\"/path/to/image1.png\",\n predictions=fo.Detections(\n detections=[\n fo.Detection(label=\"cat\"),\n fo.Detection(label=\"dog\"),\n ]\n ),\n ),\n fo.Sample(\n filepath=\"/path/to/image2.png\",\n predictions=fo.Detections(\n detections=[\n fo.Detection(label=\"cat\"),\n fo.Detection(label=\"rabbit\"),\n fo.Detection(label=\"squirrel\"),\n ]\n ),\n ),\n fo.Sample(\n filepath=\"/path/to/image3.png\",\n predictions=None,\n ),\n ]\n )\n\n #\n # Count the number of samples in the dataset\n #\n\n aggregation = fo.Count()\n count = dataset.aggregate(aggregation)\n print(count) # the count\n\n #\n # Count the number of samples with `predictions`\n #\n\n aggregation = fo.Count(\"predictions\")\n count = dataset.aggregate(aggregation)\n print(count) # the count\n\n #\n # Count the number of objects in the `predictions` field\n #\n\n aggregation = fo.Count(\"predictions.detections\")\n count = dataset.aggregate(aggregation)\n print(count) # the count\n\n #\n # Count the number of samples with more than 2 predictions\n #\n\n expr = (F(\"detections\").length() > 2).if_else(F(\"detections\"), None)\n aggregation = fo.Count(\"predictions\", expr=expr)\n count = dataset.aggregate(aggregation)\n print(count) # the count\n\n Args:\n field_name (None): the name of the field to operate on. If none is\n provided, the samples themselves are counted\n expr (None): an optional\n :class:`fiftyone.core.expressions.ViewExpression` or\n `MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_\n to apply to the field before aggregating\n \"\"\"\n\n def __init__(self, field_name=None, expr=None):\n super().__init__(field_name, expr=expr)\n\n def default_result(self):\n \"\"\"Returns the default result for this aggregation.\n\n Returns:\n ``0``\n \"\"\"\n return 0\n\n def parse_result(self, d):\n \"\"\"Parses the output of :meth:`to_mongo`.\n\n Args:\n d: the result dict\n\n Returns:\n the count\n \"\"\"\n return d[\"count\"]\n\n def to_mongo(self, sample_collection):\n if self._field_name is None:\n return [{\"$count\": \"count\"}]\n\n path, pipeline, _ = self._parse_field_and_expr(sample_collection)\n\n if sample_collection.media_type != fom.VIDEO or path != \"frames\":\n pipeline.append({\"$match\": {\"$expr\": {\"$gt\": [\"$\" + path, None]}}})\n\n pipeline.append({\"$count\": \"count\"})\n\n return pipeline\n\n\nclass CountValues(Aggregation):\n \"\"\"Counts the occurrences of field values in a collection.\n\n This aggregation is typically applied to *countable* field types (or lists\n of such types):\n\n - :class:`fiftyone.core.fields.BooleanField`\n - :class:`fiftyone.core.fields.IntField`\n - :class:`fiftyone.core.fields.StringField`\n\n Examples::\n\n import fiftyone as fo\n\n dataset = fo.Dataset()\n dataset.add_samples(\n [\n fo.Sample(\n filepath=\"/path/to/image1.png\",\n tags=[\"sunny\"],\n predictions=fo.Detections(\n detections=[\n fo.Detection(label=\"cat\"),\n fo.Detection(label=\"dog\"),\n ]\n ),\n ),\n fo.Sample(\n filepath=\"/path/to/image2.png\",\n tags=[\"cloudy\"],\n predictions=fo.Detections(\n detections=[\n fo.Detection(label=\"cat\"),\n fo.Detection(label=\"rabbit\"),\n ]\n ),\n ),\n fo.Sample(\n filepath=\"/path/to/image3.png\",\n predictions=None,\n ),\n ]\n )\n\n #\n # Compute the tag counts in the dataset\n #\n\n aggregation = fo.CountValues(\"tags\")\n counts = dataset.aggregate(aggregation)\n print(counts) # dict mapping values to counts\n\n #\n # Compute the predicted label counts in the dataset\n #\n\n aggregation = fo.CountValues(\"predictions.detections.label\")\n counts = dataset.aggregate(aggregation)\n print(counts) # dict mapping values to counts\n\n #\n # Compute the predicted label counts after some normalization\n #\n\n expr = F().map_values({\"cat\": \"pet\", \"dog\": \"pet\"}).upper()\n aggregation = fo.CountValues(\"predictions.detections.label\", expr=expr)\n counts = dataset.aggregate(aggregation)\n print(counts) # dict mapping values to counts\n\n Args:\n field_name: the name of the field to operate on\n expr (None): an optional\n :class:`fiftyone.core.expressions.ViewExpression` or\n `MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_\n to apply to the field before aggregating\n \"\"\"\n\n def default_result(self):\n \"\"\"Returns the default result for this aggregation.\n\n Returns:\n ``{}``\n \"\"\"\n return {}\n\n def parse_result(self, d):\n \"\"\"Parses the output of :meth:`to_mongo`.\n\n Args:\n d: the result dict\n\n Returns:\n a dict mapping values to counts\n \"\"\"\n return {i[\"k\"]: i[\"count\"] for i in d[\"result\"]}\n\n def to_mongo(self, sample_collection):\n path, pipeline, _ = self._parse_field_and_expr(sample_collection)\n\n pipeline += [\n {\"$group\": {\"_id\": \"$\" + path, \"count\": {\"$sum\": 1}}},\n {\n \"$group\": {\n \"_id\": None,\n \"result\": {\"$push\": {\"k\": \"$_id\", \"count\": \"$count\"}},\n }\n },\n ]\n\n return pipeline\n\n\nclass Distinct(Aggregation):\n \"\"\"Computes the distinct values of a field in a collection.\n\n ``None``-valued fields are ignored.\n\n This aggregation is typically applied to *countable* field types (or lists\n of such types):\n\n - :class:`fiftyone.core.fields.BooleanField`\n - :class:`fiftyone.core.fields.IntField`\n - :class:`fiftyone.core.fields.StringField`\n\n Examples::\n\n import fiftyone as fo\n\n dataset = fo.Dataset()\n dataset.add_samples(\n [\n fo.Sample(\n filepath=\"/path/to/image1.png\",\n tags=[\"sunny\"],\n predictions=fo.Detections(\n detections=[\n fo.Detection(label=\"cat\"),\n fo.Detection(label=\"dog\"),\n ]\n ),\n ),\n fo.Sample(\n filepath=\"/path/to/image2.png\",\n tags=[\"sunny\", \"cloudy\"],\n predictions=fo.Detections(\n detections=[\n fo.Detection(label=\"cat\"),\n fo.Detection(label=\"rabbit\"),\n ]\n ),\n ),\n fo.Sample(\n filepath=\"/path/to/image3.png\",\n predictions=None,\n ),\n ]\n )\n\n #\n # Get the distinct tags in a dataset\n #\n\n aggregation = fo.Distinct(\"tags\")\n values = dataset.aggregate(aggregation)\n print(values) # list of distinct values\n\n #\n # Get the distinct predicted labels in a dataset\n #\n\n aggregation = fo.Distinct(\"predictions.detections.label\")\n values = dataset.aggregate(aggregation)\n print(values) # list of distinct values\n\n #\n # Get the distinct predicted labels after some normalization\n #\n\n expr = F().map_values({\"cat\": \"pet\", \"dog\": \"pet\"}).upper()\n aggregation = fo.Distinct(\"predictions.detections.label\", expr=expr)\n values = dataset.aggregate(aggregation)\n print(values) # list of distinct values\n\n Args:\n field_name: the name of the field to operate on\n expr (None): an optional\n :class:`fiftyone.core.expressions.ViewExpression` or\n `MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_\n to apply to the field before aggregating\n \"\"\"\n\n def default_result(self):\n \"\"\"Returns the default result for this aggregation.\n\n Returns:\n ``[]``\n \"\"\"\n return []\n\n def parse_result(self, d):\n \"\"\"Parses the output of :meth:`to_mongo`.\n\n Args:\n d: the result dict\n\n Returns:\n a sorted list of distinct values\n \"\"\"\n return sorted(d[\"values\"])\n\n def to_mongo(self, sample_collection):\n path, pipeline, _ = self._parse_field_and_expr(sample_collection)\n\n pipeline += [\n {\"$match\": {\"$expr\": {\"$gt\": [\"$\" + path, None]}}},\n {\"$group\": {\"_id\": None, \"values\": {\"$addToSet\": \"$\" + path}}},\n ]\n\n return pipeline\n\n\nclass HistogramValues(Aggregation):\n \"\"\"Computes a histogram of the field values in a collection.\n\n This aggregation is typically applied to *numeric* field types (or\n lists of such types):\n\n - :class:`fiftyone.core.fields.IntField`\n - :class:`fiftyone.core.fields.FloatField`\n\n Examples::\n\n import numpy as np\n import matplotlib.pyplot as plt\n\n import fiftyone as fo\n\n samples = []\n for idx in range(100):\n samples.append(\n fo.Sample(\n filepath=\"/path/to/image%d.png\" % idx,\n numeric_field=np.random.randn(),\n numeric_list_field=list(np.random.randn(10)),\n )\n )\n\n dataset = fo.Dataset()\n dataset.add_samples(samples)\n\n def plot_hist(counts, edges):\n counts = np.asarray(counts)\n edges = np.asarray(edges)\n left_edges = edges[:-1]\n widths = edges[1:] - edges[:-1]\n plt.bar(left_edges, counts, width=widths, align=\"edge\")\n\n #\n # Compute a histogram of a numeric field\n #\n\n aggregation = fo.HistogramValues(\"numeric_field\", bins=50)\n counts, edges, other = dataset.aggregate(aggregation)\n\n plot_hist(counts, edges)\n plt.show(block=False)\n\n #\n # Compute the histogram of a numeric list field\n #\n\n aggregation = fo.HistogramValues(\"numeric_list_field\", bins=50)\n counts, edges, other = dataset.aggregate(aggregation)\n\n plot_hist(counts, edges)\n plt.show(block=False)\n\n #\n # Compute the histogram of a transformation of a numeric field\n #\n\n aggregation = fo.HistogramValues(\n \"numeric_field\", expr=2 * (F() + 1), bins=50\n )\n counts, edges, other = dataset.aggregate(aggregation)\n\n plot_hist(counts, edges)\n plt.show(block=False)\n\n Args:\n field_name: the name of the field to operate on\n expr (None): an optional\n :class:`fiftyone.core.expressions.ViewExpression` or\n `MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_\n to apply to the field before aggregating\n bins (None): can be either an integer number of bins to generate or a\n monotonically increasing sequence specifying the bin edges to use.\n By default, 10 bins are created. If ``bins`` is an integer and no\n ``range`` is specified, bin edges are automatically computed from\n the bounds of the field\n range (None): a ``(lower, upper)`` tuple specifying a range in which to\n generate equal-width bins. Only applicable when ``bins`` is an\n integer or ``None``\n auto (False): whether to automatically choose bin edges in an attempt\n to evenly distribute the counts in each bin. If this option is\n chosen, ``bins`` will only be used if it is an integer, and the\n ``range`` parameter is ignored\n \"\"\"\n\n def __init__(\n self, field_name, expr=None, bins=None, range=None, auto=False\n ):\n super().__init__(field_name, expr=expr)\n self._bins = bins\n self._range = range\n self._auto = auto\n\n self._num_bins = None\n self._edges = None\n self._edges_last_used = None\n self._parse_args()\n\n def default_result(self):\n \"\"\"Returns the default result for this aggregation.\n\n Returns:\n a tuple of\n\n - counts: ``[]``\n - edges: ``[]``\n - other: ``0``\n \"\"\"\n return [], [], 0\n\n def parse_result(self, d):\n \"\"\"Parses the output of :meth:`to_mongo`.\n\n Args:\n d: the result dict\n\n Returns:\n a tuple of\n\n - counts: a list of counts in each bin\n - edges: an increasing list of bin edges of length\n ``len(counts) + 1``. Note that each bin is treated as having an\n inclusive lower boundary and exclusive upper boundary,\n ``[lower, upper)``, including the rightmost bin\n - other: the number of items outside the bins\n \"\"\"\n if self._auto:\n return self._parse_result_auto(d)\n\n return self._parse_result_edges(d)\n\n def to_mongo(self, sample_collection):\n path, pipeline, _ = self._parse_field_and_expr(sample_collection)\n\n if self._auto:\n pipeline.append(\n {\n \"$bucketAuto\": {\n \"groupBy\": \"$\" + path,\n \"buckets\": self._num_bins,\n \"output\": {\"count\": {\"$sum\": 1}},\n }\n }\n )\n else:\n if self._edges is not None:\n edges = self._edges\n else:\n edges = self._compute_bin_edges(sample_collection)\n\n self._edges_last_used = edges\n pipeline.append(\n {\n \"$bucket\": {\n \"groupBy\": \"$\" + path,\n \"boundaries\": edges,\n \"default\": \"other\", # counts documents outside of bins\n \"output\": {\"count\": {\"$sum\": 1}},\n }\n }\n )\n\n pipeline.append({\"$group\": {\"_id\": None, \"bins\": {\"$push\": \"$$ROOT\"}}})\n\n return pipeline\n\n def _parse_args(self):\n if self._bins is None:\n bins = 10\n else:\n bins = self._bins\n\n if self._auto:\n if etau.is_numeric(bins):\n self._num_bins = bins\n else:\n self._num_bins = 10\n\n return\n\n if not etau.is_numeric(bins):\n # User-provided bin edges\n self._edges = list(bins)\n return\n\n if self._range is not None:\n # Linearly-spaced bins within `range`\n self._edges = list(\n np.linspace(self._range[0], self._range[1], bins + 1)\n )\n else:\n # Compute bin edges from bounds\n self._num_bins = bins\n\n def _compute_bin_edges(self, sample_collection):\n bounds = sample_collection.bounds(self._field_name, expr=self._expr)\n if any(b is None for b in bounds):\n bounds = (-1, -1)\n\n return list(\n np.linspace(bounds[0], bounds[1] + 1e-6, self._num_bins + 1)\n )\n\n def _parse_result_edges(self, d):\n _edges_array = np.array(self._edges_last_used)\n edges = list(_edges_array)\n counts = [0] * (len(edges) - 1)\n other = 0\n for di in d[\"bins\"]:\n left = di[\"_id\"]\n if left == \"other\":\n other = di[\"count\"]\n else:\n idx = np.abs(_edges_array - left).argmin()\n counts[idx] = di[\"count\"]\n\n return counts, edges, other\n\n def _parse_result_auto(self, d):\n counts = []\n edges = []\n for di in d[\"bins\"]:\n counts.append(di[\"count\"])\n edges.append(di[\"_id\"][\"min\"])\n\n edges.append(di[\"_id\"][\"max\"])\n\n return counts, edges, 0\n\n\nclass Mean(Aggregation):\n \"\"\"Computes the arithmetic mean of the field values of a collection.\n\n ``None``-valued fields are ignored.\n\n This aggregation is typically applied to *numeric* field types (or lists of\n such types):\n\n - :class:`fiftyone.core.fields.IntField`\n - :class:`fiftyone.core.fields.FloatField`\n\n Examples::\n\n import fiftyone as fo\n\n dataset = fo.Dataset()\n dataset.add_samples(\n [\n fo.Sample(\n filepath=\"/path/to/image1.png\",\n numeric_field=1.0,\n numeric_list_field=[1, 2, 3],\n ),\n fo.Sample(\n filepath=\"/path/to/image2.png\",\n numeric_field=4.0,\n numeric_list_field=[1, 2],\n ),\n fo.Sample(\n filepath=\"/path/to/image3.png\",\n numeric_field=None,\n numeric_list_field=None,\n ),\n ]\n )\n\n #\n # Compute the mean of a numeric field\n #\n\n aggregation = fo.Mean(\"numeric_field\")\n mean = dataset.aggregate(aggregation)\n print(mean) # the mean\n\n #\n # Compute the mean of a numeric list field\n #\n\n aggregation = fo.Mean(\"numeric_list_field\")\n mean = dataset.aggregate(aggregation)\n print(mean) # the mean\n\n #\n # Compute the mean of a transformation of a numeric field\n #\n\n aggregation = fo.Mean(\"numeric_field\", expr=2 * (F() + 1))\n mean = dataset.aggregate(aggregation)\n print(mean) # the mean\n\n Args:\n field_name: the name of the field to operate on\n expr (None): an optional\n :class:`fiftyone.core.expressions.ViewExpression` or\n `MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_\n to apply to the field before aggregating\n \"\"\"\n\n def default_result(self):\n \"\"\"Returns the default result for this aggregation.\n\n Returns:\n ``0``\n \"\"\"\n return 0\n\n def parse_result(self, d):\n \"\"\"Parses the output of :meth:`to_mongo`.\n\n Args:\n d: the result dict\n\n Returns:\n the mean\n \"\"\"\n return d[\"mean\"]\n\n def to_mongo(self, sample_collection):\n path, pipeline, _ = self._parse_field_and_expr(sample_collection)\n\n pipeline.append(\n {\"$group\": {\"_id\": None, \"mean\": {\"$avg\": \"$\" + path}}}\n )\n\n return pipeline\n\n\nclass Std(Aggregation):\n \"\"\"Computes the standard deviation of the field values of a collection.\n\n ``None``-valued fields are ignored.\n\n This aggregation is typically applied to *numeric* field types (or lists of\n such types):\n\n - :class:`fiftyone.core.fields.IntField`\n - :class:`fiftyone.core.fields.FloatField`\n\n Examples::\n\n import fiftyone as fo\n\n dataset = fo.Dataset()\n dataset.add_samples(\n [\n fo.Sample(\n filepath=\"/path/to/image1.png\",\n numeric_field=1.0,\n numeric_list_field=[1, 2, 3],\n ),\n fo.Sample(\n filepath=\"/path/to/image2.png\",\n numeric_field=4.0,\n numeric_list_field=[1, 2],\n ),\n fo.Sample(\n filepath=\"/path/to/image3.png\",\n numeric_field=None,\n numeric_list_field=None,\n ),\n ]\n )\n\n #\n # Compute the standard deviation of a numeric field\n #\n\n aggregation = fo.Std(\"numeric_field\")\n std = dataset.aggregate(aggregation)\n print(std) # the standard deviation\n\n #\n # Compute the standard deviation of a numeric list field\n #\n\n aggregation = fo.Std(\"numeric_list_field\")\n std = dataset.aggregate(aggregation)\n print(std) # the standard deviation\n\n #\n # Compute the standard deviation of a transformation of a numeric field\n #\n\n aggregation = fo.Std(\"numeric_field\", expr=2 * (F() + 1))\n std = dataset.aggregate(aggregation)\n print(std) # the standard deviation\n\n Args:\n field_name: the name of the field to operate on\n expr (None): an optional\n :class:`fiftyone.core.expressions.ViewExpression` or\n `MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_\n to apply to the field before aggregating\n sample (False): whether to compute the sample standard deviation rather\n than the population standard deviation\n \"\"\"\n\n def __init__(self, field_name, expr=None, sample=False):\n super().__init__(field_name, expr=expr)\n self._sample = sample\n\n def default_result(self):\n \"\"\"Returns the default result for this aggregation.\n\n Returns:\n ``0``\n \"\"\"\n return 0\n\n def parse_result(self, d):\n \"\"\"Parses the output of :meth:`to_mongo`.\n\n Args:\n d: the result dict\n\n Returns:\n the standard deviation\n \"\"\"\n return d[\"std\"]\n\n def to_mongo(self, sample_collection):\n path, pipeline, _ = self._parse_field_and_expr(sample_collection)\n\n op = \"$stdDevSamp\" if self._sample else \"$stdDevPop\"\n pipeline.append({\"$group\": {\"_id\": None, \"std\": {op: \"$\" + path}}})\n\n return pipeline\n\n\nclass Sum(Aggregation):\n \"\"\"Computes the sum of the field values of a collection.\n\n ``None``-valued fields are ignored.\n\n This aggregation is typically applied to *numeric* field types (or lists of\n such types):\n\n - :class:`fiftyone.core.fields.IntField`\n - :class:`fiftyone.core.fields.FloatField`\n\n Examples::\n\n import fiftyone as fo\n\n dataset = fo.Dataset()\n dataset.add_samples(\n [\n fo.Sample(\n filepath=\"/path/to/image1.png\",\n numeric_field=1.0,\n numeric_list_field=[1, 2, 3],\n ),\n fo.Sample(\n filepath=\"/path/to/image2.png\",\n numeric_field=4.0,\n numeric_list_field=[1, 2],\n ),\n fo.Sample(\n filepath=\"/path/to/image3.png\",\n numeric_field=None,\n numeric_list_field=None,\n ),\n ]\n )\n\n #\n # Compute the sum of a numeric field\n #\n\n aggregation = fo.Sum(\"numeric_field\")\n total = dataset.aggregate(aggregation)\n print(total) # the sum\n\n #\n # Compute the sum of a numeric list field\n #\n\n aggregation = fo.Sum(\"numeric_list_field\")\n total = dataset.aggregate(aggregation)\n print(total) # the sum\n\n #\n # Compute the sum of a transformation of a numeric field\n #\n\n aggregation = fo.Sum(\"numeric_field\", expr=2 * (F() + 1))\n total = dataset.aggregate(aggregation)\n print(total) # the sum\n\n Args:\n field_name: the name of the field to operate on\n expr (None): an optional\n :class:`fiftyone.core.expressions.ViewExpression` or\n `MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_\n to apply to the field before aggregating\n \"\"\"\n\n def default_result(self):\n \"\"\"Returns the default result for this aggregation.\n\n Returns:\n ``0``\n \"\"\"\n return 0\n\n def parse_result(self, d):\n \"\"\"Parses the output of :meth:`to_mongo`.\n\n Args:\n d: the result dict\n\n Returns:\n the sum\n \"\"\"\n return d[\"sum\"]\n\n def to_mongo(self, sample_collection):\n path, pipeline, _ = self._parse_field_and_expr(sample_collection)\n\n pipeline.append({\"$group\": {\"_id\": None, \"sum\": {\"$sum\": \"$\" + path}}})\n\n return pipeline\n\n\nclass Values(Aggregation):\n \"\"\"Extracts the values of the field from all samples in a collection.\n\n .. note::\n\n Unlike other aggregations, :class:`Values` does not automatically\n unwind list fields, which ensures that the returned values match the\n potentially-nested structure of the documents.\n\n You can opt-in to unwinding specific list fields using the ``[]``\n syntax, or you can pass the optional ``unwind=True`` parameter to\n unwind all supported list fields. See :ref:`aggregations-list-fields`\n for more information.\n\n Examples::\n\n import fiftyone as fo\n from fiftyone import ViewField as F\n\n dataset = fo.Dataset()\n dataset.add_samples(\n [\n fo.Sample(\n filepath=\"/path/to/image1.png\",\n numeric_field=1.0,\n numeric_list_field=[1, 2, 3],\n ),\n fo.Sample(\n filepath=\"/path/to/image2.png\",\n numeric_field=4.0,\n numeric_list_field=[1, 2],\n ),\n fo.Sample(\n filepath=\"/path/to/image3.png\",\n numeric_field=None,\n numeric_list_field=None,\n ),\n ]\n )\n\n #\n # Get all values of a field\n #\n\n aggregation = fo.Values(\"numeric_field\")\n values = dataset.aggregate(aggregation)\n print(values) # [1.0, 4.0, None]\n\n #\n # Get all values of a list field\n #\n\n aggregation = fo.Values(\"numeric_list_field\")\n values = dataset.aggregate(aggregation)\n print(values) # [[1, 2, 3], [1, 2], None]\n\n #\n # Get all values of transformed field\n #\n\n aggregation = fo.Values(\"numeric_field\", expr=2 * (F() + 1))\n values = dataset.aggregate(aggregation)\n print(values) # [4.0, 10.0, None]\n\n Args:\n field_name: the name of the field to operate on\n expr (None): an optional\n :class:`fiftyone.core.expressions.ViewExpression` or\n `MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_\n to apply to the field before aggregating\n missing_value (None): a value to insert for missing or ``None``-valued\n fields\n unwind (False): whether to automatically unwind all recognized list\n fields\n \"\"\"\n\n def __init__(\n self,\n field_name,\n expr=None,\n missing_value=None,\n unwind=False,\n _allow_missing=False,\n ):\n field_name, found_id_field = _handle_id_fields(field_name)\n super().__init__(field_name, expr=expr)\n\n self._missing_value = missing_value\n self._unwind = unwind\n self._allow_missing = _allow_missing\n self._found_id_field = found_id_field\n self._found_array_field = None\n self._num_list_fields = None\n\n def default_result(self):\n \"\"\"Returns the default result for this aggregation.\n\n Returns:\n ``[]``\n \"\"\"\n return []\n\n def parse_result(self, d):\n \"\"\"Parses the output of :meth:`to_mongo`.\n\n Args:\n d: the result dict\n\n Returns:\n the list of field values\n \"\"\"\n values = d[\"values\"]\n\n if self._found_id_field:\n level = 1 + self._num_list_fields\n return _transform_values(values, str, level=level)\n\n if self._found_array_field:\n fcn = fou.deserialize_numpy_array\n level = 1 + self._num_list_fields\n return _transform_values(values, fcn, level=level)\n\n return values\n\n def to_mongo(self, sample_collection):\n path, pipeline, other_list_fields = self._parse_field_and_expr(\n sample_collection,\n auto_unwind=self._unwind,\n allow_missing=self._allow_missing,\n )\n\n self._found_array_field = sample_collection._is_array_field(path)\n self._num_list_fields = len(other_list_fields)\n\n pipeline += _make_extract_values_pipeline(\n path, other_list_fields, self._missing_value\n )\n\n return pipeline\n\n\ndef _handle_id_fields(field_name):\n if field_name == \"id\":\n field_name = \"_id\"\n found_id_field = True\n elif field_name.endswith(\".id\"):\n field_name = field_name[: -len(\".id\")] + \"._id\"\n found_id_field = True\n else:\n found_id_field = False\n\n return field_name, found_id_field\n\n\ndef _transform_values(values, fcn, level=1):\n if values is None:\n return None\n\n if level < 1:\n return fcn(values)\n\n return [_transform_values(v, fcn, level=level - 1) for v in values]\n\n\ndef _make_extract_values_pipeline(path, list_fields, missing_value):\n if not list_fields:\n root = path\n else:\n root = list_fields[0]\n\n expr = (F() != None).if_else(F(), missing_value)\n\n if list_fields:\n subfield = path[len(list_fields[-1]) + 1 :]\n expr = _extract_list_values(subfield, expr)\n\n if len(list_fields) > 1:\n for list_field1, list_field2 in zip(\n reversed(list_fields[:-1]), reversed(list_fields[1:])\n ):\n inner_list_field = list_field2[len(list_field1) + 1 :]\n expr = _extract_list_values(inner_list_field, expr)\n\n return [\n {\"$set\": {root: expr.to_mongo(prefix=\"$\" + root)}},\n {\"$group\": {\"_id\": None, \"values\": {\"$push\": \"$\" + root}}},\n ]\n\n\ndef _extract_list_values(subfield, expr):\n if subfield:\n map_expr = F(subfield).apply(expr)\n else:\n map_expr = expr\n\n return F().map(map_expr)\n\n\ndef _parse_field_and_expr(\n sample_collection, field_name, expr, auto_unwind, allow_missing\n):\n if expr is not None:\n pipeline, _ = sample_collection._make_set_field_pipeline(\n field_name, expr\n )\n else:\n pipeline = []\n\n (\n path,\n is_frame_field,\n unwind_list_fields,\n other_list_fields,\n ) = sample_collection._parse_field_name(\n field_name, auto_unwind=auto_unwind, allow_missing=allow_missing\n )\n\n if is_frame_field and auto_unwind:\n pipeline.extend(\n [{\"$unwind\": \"$frames\"}, {\"$replaceRoot\": {\"newRoot\": \"$frames\"}}]\n )\n\n for list_field in unwind_list_fields:\n pipeline.append({\"$unwind\": \"$\" + list_field})\n\n if other_list_fields:\n # Don't unroll terminal lists unless explicitly requested\n other_list_fields = [\n lf for lf in other_list_fields if lf != field_name\n ]\n\n if other_list_fields:\n root = other_list_fields[0]\n leaf = path[len(root) + 1 :]\n else:\n root = path\n leaf = None\n\n pipeline.append({\"$project\": {root: True}})\n\n return path, pipeline, other_list_fields\n" ]
[ [ "numpy.array", "numpy.abs", "numpy.linspace" ] ]
caron14/pycaret
[ "d0b079ba35fc211875b1abfe4b8753c367d8ace1" ]
[ "pycaret/internal/tabular.py" ]
[ "# Module: Classification\n# Author: Moez Ali <[email protected]>\n# License: MIT\n# Release: PyCaret 2.2\n# Last modified : 26/08/2020\n\nfrom enum import Enum, auto\nimport math\nfrom pycaret.internal.meta_estimators import (\n PowerTransformedTargetRegressor,\n get_estimator_from_meta_estimator,\n)\nfrom pycaret.internal.pipeline import (\n add_estimator_to_pipeline,\n get_pipeline_estimator_label,\n make_internal_pipeline,\n estimator_pipeline,\n merge_pipelines,\n Pipeline as InternalPipeline,\n)\nfrom pycaret.internal.utils import (\n color_df,\n normalize_custom_transformers,\n nullcontext,\n true_warm_start,\n can_early_stop,\n infer_ml_usecase,\n set_n_jobs,\n)\nimport pycaret.internal.patches.sklearn\nimport pycaret.internal.patches.yellowbrick\nfrom pycaret.internal.logging import get_logger\nfrom pycaret.internal.plots.yellowbrick import show_yellowbrick_plot\nfrom pycaret.internal.plots.helper import MatplotlibDefaultDPI\nfrom pycaret.internal.Display import Display, is_in_colab\nfrom pycaret.internal.distributions import *\nfrom pycaret.internal.validation import *\nfrom pycaret.internal.tunable import TunableMixin\nimport pycaret.containers.metrics.classification\nimport pycaret.containers.metrics.regression\nimport pycaret.containers.metrics.clustering\nimport pycaret.containers.metrics.anomaly\nimport pycaret.containers.models.classification\nimport pycaret.containers.models.regression\nimport pycaret.containers.models.clustering\nimport pycaret.containers.models.anomaly\nimport pycaret.internal.preprocess\nimport pandas as pd\nimport numpy as np\nimport os\nimport sys\nimport datetime\nimport time\nimport random\nimport gc\nimport multiprocessing\nfrom copy import deepcopy\nfrom sklearn.base import clone\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.compose import TransformedTargetRegressor\nfrom sklearn.preprocessing import LabelEncoder\nfrom typing import List, Tuple, Any, Union, Optional, Dict\nfrom collections import Iterable\nimport warnings\nfrom IPython.utils import io\nimport traceback\nfrom unittest.mock import patch\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport scikitplot as skplt\nfrom packaging import version\n\nwarnings.filterwarnings(\"ignore\")\n\n_available_plots = {}\n\n\nclass MLUsecase(Enum):\n CLASSIFICATION = auto()\n REGRESSION = auto()\n CLUSTERING = auto()\n ANOMALY = auto()\n\n\ndef _is_unsupervised(ml_usecase: MLUsecase) -> bool:\n return ml_usecase == MLUsecase.CLUSTERING or ml_usecase == MLUsecase.ANOMALY\n\n\ndef setup(\n data: pd.DataFrame,\n target: str,\n ml_usecase: str,\n available_plots: dict,\n train_size: float = 0.7,\n test_data: Optional[pd.DataFrame] = None,\n preprocess: bool = True,\n imputation_type: str = \"simple\",\n iterative_imputation_iters: int = 5,\n categorical_features: Optional[List[str]] = None,\n categorical_imputation: str = \"mode\",\n categorical_iterative_imputer: Union[str, Any] = \"lightgbm\",\n ordinal_features: Optional[Dict[str, list]] = None,\n high_cardinality_features: Optional[List[str]] = None,\n high_cardinality_method: str = \"frequency\",\n numeric_features: Optional[List[str]] = None,\n numeric_imputation: str = \"mean\", # method 'zero' added in pycaret==2.1\n numeric_iterative_imputer: Union[str, Any] = \"lightgbm\",\n date_features: Optional[List[str]] = None,\n ignore_features: Optional[List[str]] = None,\n normalize: bool = False,\n normalize_method: str = \"zscore\",\n transformation: bool = False,\n transformation_method: str = \"yeo-johnson\",\n handle_unknown_categorical: bool = True,\n unknown_categorical_method: str = \"least_frequent\",\n pca: bool = False,\n pca_method: str = \"linear\",\n pca_components: Optional[float] = None,\n ignore_low_variance: bool = False,\n combine_rare_levels: bool = False,\n rare_level_threshold: float = 0.10,\n bin_numeric_features: Optional[List[str]] = None,\n remove_outliers: bool = False,\n outliers_threshold: float = 0.05,\n remove_multicollinearity: bool = False,\n multicollinearity_threshold: float = 0.9,\n remove_perfect_collinearity: bool = True,\n create_clusters: bool = False,\n cluster_iter: int = 20,\n polynomial_features: bool = False,\n polynomial_degree: int = 2,\n trigonometry_features: bool = False,\n polynomial_threshold: float = 0.1,\n group_features: Optional[List[str]] = None,\n group_names: Optional[List[str]] = None,\n feature_selection: bool = False,\n feature_selection_threshold: float = 0.8,\n feature_selection_method: str = \"classic\", # boruta algorithm added in pycaret==2.1\n feature_interaction: bool = False,\n feature_ratio: bool = False,\n interaction_threshold: float = 0.01,\n # classification specific\n fix_imbalance: bool = False,\n fix_imbalance_method: Optional[Any] = None,\n # regression specific\n transform_target=False,\n transform_target_method=\"box-cox\",\n data_split_shuffle: bool = True,\n data_split_stratify: Union[bool, List[str]] = False, # added in pycaret==2.2\n fold_strategy: Union[str, Any] = \"kfold\", # added in pycaret==2.2\n fold: int = 10, # added in pycaret==2.2\n fold_shuffle: bool = False,\n fold_groups: Optional[Union[str, pd.DataFrame]] = None,\n n_jobs: Optional[int] = -1,\n use_gpu: bool = False, # added in pycaret==2.1\n custom_pipeline: Union[\n Any, Tuple[str, Any], List[Any], List[Tuple[str, Any]]\n ] = None,\n html: bool = True,\n session_id: Optional[int] = None,\n log_experiment: bool = False,\n experiment_name: Optional[str] = None,\n log_plots: Union[bool, list] = False,\n log_profile: bool = False,\n log_data: bool = False,\n silent: bool = False,\n verbose: bool = True,\n profile: bool = False,\n profile_kwargs: Dict[str, Any] = None,\n display: Optional[Display] = None,\n):\n\n \"\"\"\n This function initializes the environment in pycaret and creates the transformation\n pipeline to prepare the data for modeling and deployment. setup() must called before\n executing any other function in pycaret. It takes two mandatory parameters:\n data and name of the target column.\n\n All other parameters are optional.\n\n \"\"\"\n\n function_params_str = \", \".join(\n [f\"{k}={v}\" for k, v in locals().items() if k != \"data\"]\n )\n\n global _available_plots\n\n _available_plots = available_plots\n\n warnings.filterwarnings(\"ignore\")\n\n from pycaret.utils import __version__\n\n ver = __version__\n\n # create logger\n global logger\n\n logger = get_logger()\n\n logger.info(\"PyCaret Supervised Module\")\n logger.info(f\"ML Usecase: {ml_usecase}\")\n logger.info(f\"version {ver}\")\n logger.info(\"Initializing setup()\")\n logger.info(f\"setup({function_params_str})\")\n\n # logging environment and libraries\n logger.info(\"Checking environment\")\n\n from platform import python_version, platform, python_build, machine\n\n logger.info(f\"python_version: {python_version()}\")\n logger.info(f\"python_build: {python_build()}\")\n logger.info(f\"machine: {machine()}\")\n logger.info(f\"platform: {platform()}\")\n\n try:\n import psutil\n\n logger.info(f\"Memory: {psutil.virtual_memory()}\")\n logger.info(f\"Physical Core: {psutil.cpu_count(logical=False)}\")\n logger.info(f\"Logical Core: {psutil.cpu_count(logical=True)}\")\n except:\n logger.warning(\n \"cannot find psutil installation. memory not traceable. Install psutil using pip to enable memory logging.\"\n )\n\n logger.info(\"Checking libraries\")\n\n try:\n from pandas import __version__\n\n logger.info(f\"pd=={__version__}\")\n except ImportError:\n logger.warning(\"pandas not found\")\n\n try:\n from numpy import __version__\n\n logger.info(f\"numpy=={__version__}\")\n except ImportError:\n logger.warning(\"numpy not found\")\n\n try:\n from sklearn import __version__\n\n logger.info(f\"sklearn=={__version__}\")\n except ImportError:\n logger.warning(\"sklearn not found\")\n\n try:\n from lightgbm import __version__\n\n logger.info(f\"lightgbm=={__version__}\")\n except ImportError:\n logger.warning(\"lightgbm not found\")\n\n try:\n from catboost import __version__\n\n logger.info(f\"catboost=={__version__}\")\n except ImportError:\n logger.warning(\"catboost not found\")\n\n try:\n from xgboost import __version__\n\n logger.info(f\"xgboost=={__version__}\")\n except ImportError:\n logger.warning(\"xgboost not found\")\n\n try:\n from mlflow.version import VERSION\n\n warnings.filterwarnings(\"ignore\")\n logger.info(f\"mlflow=={VERSION}\")\n except ImportError:\n logger.warning(\"mlflow not found\")\n\n # run_time\n runtime_start = time.time()\n\n logger.info(\"Checking Exceptions\")\n\n # checking data type\n if not isinstance(data, pd.DataFrame):\n raise TypeError(f\"data passed must be of type pandas.DataFrame\")\n if data.shape[0] == 0:\n raise ValueError(f\"data passed must be a positive dataframe\")\n\n # checking train size parameter\n if type(train_size) is not float:\n raise TypeError(\"train_size parameter only accepts float value.\")\n if train_size <= 0 or train_size > 1:\n raise ValueError(\"train_size parameter has to be positive and not above 1.\")\n\n possible_ml_usecases = [\"classification\", \"regression\", \"clustering\", \"anomaly\"]\n if ml_usecase not in possible_ml_usecases:\n raise ValueError(\n f\"ml_usecase must be one of {', '.join(possible_ml_usecases)}.\"\n )\n\n ml_usecase = MLUsecase[ml_usecase.upper()]\n\n # checking target parameter\n if not _is_unsupervised(ml_usecase) and target not in data.columns:\n raise ValueError(\n f\"Target parameter: {target} does not exist in the data provided.\"\n )\n\n # checking session_id\n if session_id is not None:\n if type(session_id) is not int:\n raise TypeError(\"session_id parameter must be an integer.\")\n\n # checking profile parameter\n if type(profile) is not bool:\n raise TypeError(\"profile parameter only accepts True or False.\")\n\n if profile_kwargs is not None:\n if type(profile_kwargs) is not dict:\n raise TypeError(\"profile_kwargs can only be a dict.\")\n else:\n profile_kwargs = {}\n\n # checking normalize parameter\n if type(normalize) is not bool:\n raise TypeError(\"normalize parameter only accepts True or False.\")\n\n # checking transformation parameter\n if type(transformation) is not bool:\n raise TypeError(\"transformation parameter only accepts True or False.\")\n\n all_cols = list(data.columns)\n if not _is_unsupervised(ml_usecase):\n all_cols.remove(target)\n\n # checking imputation type\n allowed_imputation_type = [\"simple\", \"iterative\"]\n if imputation_type not in allowed_imputation_type:\n raise ValueError(\n \"imputation_type parameter only accepts 'simple' or 'iterative'.\"\n )\n\n if type(iterative_imputation_iters) is not int or iterative_imputation_iters <= 0:\n raise TypeError(\n \"iterative_imputation_iters parameter must be an integer greater than 0.\"\n )\n\n # checking categorical imputation\n allowed_categorical_imputation = [\"constant\", \"mode\"]\n if categorical_imputation not in allowed_categorical_imputation:\n raise ValueError(\n f\"categorical_imputation param only accepts {', '.join(allowed_categorical_imputation)}.\"\n )\n\n # ordinal_features\n if ordinal_features is not None:\n if type(ordinal_features) is not dict:\n raise TypeError(\n \"ordinal_features must be of type dictionary with column name as key \"\n \"and ordered values as list.\"\n )\n\n # ordinal features check\n if ordinal_features is not None:\n ordinal_features = ordinal_features.copy()\n data_cols = data.columns.drop(target, errors=\"ignore\")\n ord_keys = ordinal_features.keys()\n\n for i in ord_keys:\n if i not in data_cols:\n raise ValueError(\n \"Column name passed as a key in ordinal_features param doesnt exist.\"\n )\n\n for k in ord_keys:\n if data[k].nunique() != len(ordinal_features[k]):\n raise ValueError(\n \"Levels passed in ordinal_features param doesnt match with levels in data.\"\n )\n\n for i in ord_keys:\n value_in_keys = ordinal_features.get(i)\n value_in_data = list(data[i].unique().astype(str))\n for j in value_in_keys:\n if str(j) not in value_in_data:\n raise ValueError(\n f\"Column name '{i}' doesn't contain any level named '{j}'.\"\n )\n\n # high_cardinality_features\n if high_cardinality_features is not None:\n if type(high_cardinality_features) is not list:\n raise TypeError(\n \"high_cardinality_features param only accepts name of columns as a list.\"\n )\n data_cols = data.columns.drop(target, errors=\"ignore\")\n for high_cardinality_feature in high_cardinality_features:\n if high_cardinality_feature not in data_cols:\n raise ValueError(\n f\"Item {high_cardinality_feature} in high_cardinality_features parameter is either target \"\n f\"column or doesn't exist in the dataset.\"\n )\n\n # stratify\n if data_split_stratify:\n if (\n type(data_split_stratify) is not list\n and type(data_split_stratify) is not bool\n ):\n raise TypeError(\n \"data_split_stratify parameter only accepts a bool or a list of strings.\"\n )\n\n if not data_split_shuffle:\n raise TypeError(\n \"data_split_stratify parameter requires data_split_shuffle to be set to True.\"\n )\n\n # high_cardinality_methods\n high_cardinality_allowed_methods = [\"frequency\", \"clustering\"]\n if high_cardinality_method not in high_cardinality_allowed_methods:\n raise ValueError(\n f\"high_cardinality_method parameter only accepts {', '.join(high_cardinality_allowed_methods)}.\"\n )\n\n # checking numeric imputation\n allowed_numeric_imputation = [\"mean\", \"median\", \"zero\"]\n if numeric_imputation not in allowed_numeric_imputation:\n raise ValueError(\n f\"numeric_imputation parameter only accepts {', '.join(allowed_numeric_imputation)}.\"\n )\n\n # checking normalize method\n allowed_normalize_method = [\"zscore\", \"minmax\", \"maxabs\", \"robust\"]\n if normalize_method not in allowed_normalize_method:\n raise ValueError(\n f\"normalize_method parameter only accepts {', '.join(allowed_normalize_method)}.\"\n )\n\n # checking transformation method\n allowed_transformation_method = [\"yeo-johnson\", \"quantile\"]\n if transformation_method not in allowed_transformation_method:\n raise ValueError(\n f\"transformation_method parameter only accepts {', '.join(allowed_transformation_method)}.\"\n )\n\n # handle unknown categorical\n if type(handle_unknown_categorical) is not bool:\n raise TypeError(\n \"handle_unknown_categorical parameter only accepts True or False.\"\n )\n\n # unknown categorical method\n unknown_categorical_method_available = [\"least_frequent\", \"most_frequent\"]\n\n if unknown_categorical_method not in unknown_categorical_method_available:\n raise TypeError(\n f\"unknown_categorical_method parameter only accepts {', '.join(unknown_categorical_method_available)}.\"\n )\n\n # check pca\n if type(pca) is not bool:\n raise TypeError(\"PCA parameter only accepts True or False.\")\n\n # pca method check\n allowed_pca_methods = [\"linear\", \"kernel\", \"incremental\"]\n if pca_method not in allowed_pca_methods:\n raise ValueError(\n f\"pca method parameter only accepts {', '.join(allowed_pca_methods)}.\"\n )\n\n # pca components check\n if pca is True:\n if pca_method != \"linear\":\n if pca_components is not None:\n if (type(pca_components)) is not int:\n raise TypeError(\n \"pca_components parameter must be integer when pca_method is not 'linear'.\"\n )\n\n # pca components check 2\n if pca is True:\n if pca_method != \"linear\":\n if pca_components is not None:\n if pca_components > len(data.columns) - 1:\n raise TypeError(\n \"pca_components parameter cannot be greater than original features space.\"\n )\n\n # pca components check 3\n if pca is True:\n if pca_method == \"linear\":\n if pca_components is not None:\n if type(pca_components) is not float:\n if pca_components > len(data.columns) - 1:\n raise TypeError(\n \"pca_components parameter cannot be greater than original features space or float between 0 - 1.\"\n )\n\n # check ignore_low_variance\n if type(ignore_low_variance) is not bool:\n raise TypeError(\"ignore_low_variance parameter only accepts True or False.\")\n\n # check ignore_low_variance\n if type(combine_rare_levels) is not bool:\n raise TypeError(\"combine_rare_levels parameter only accepts True or False.\")\n\n # check rare_level_threshold\n if (\n type(rare_level_threshold) is not float\n and rare_level_threshold < 0\n or rare_level_threshold > 1\n ):\n raise TypeError(\n \"rare_level_threshold parameter must be a float between 0 and 1.\"\n )\n\n # bin numeric features\n if bin_numeric_features is not None:\n if type(bin_numeric_features) is not list:\n raise TypeError(\"bin_numeric_features parameter must be a list.\")\n for bin_numeric_feature in bin_numeric_features:\n if type(bin_numeric_feature) is not str:\n raise TypeError(\"bin_numeric_features parameter item must be a string.\")\n if bin_numeric_feature not in all_cols:\n raise ValueError(\n f\"bin_numeric_feature: {bin_numeric_feature} is either target column or \"\n f\"does not exist in the dataset.\"\n )\n\n # remove_outliers\n if type(remove_outliers) is not bool:\n raise TypeError(\"remove_outliers parameter only accepts True or False.\")\n\n # outliers_threshold\n if type(outliers_threshold) is not float:\n raise TypeError(\"outliers_threshold must be a float between 0 and 1.\")\n\n # remove_multicollinearity\n if type(remove_multicollinearity) is not bool:\n raise TypeError(\n \"remove_multicollinearity parameter only accepts True or False.\"\n )\n\n # multicollinearity_threshold\n if type(multicollinearity_threshold) is not float:\n raise TypeError(\"multicollinearity_threshold must be a float between 0 and 1.\")\n\n # create_clusters\n if type(create_clusters) is not bool:\n raise TypeError(\"create_clusters parameter only accepts True or False.\")\n\n # cluster_iter\n if type(cluster_iter) is not int:\n raise TypeError(\"cluster_iter must be a integer greater than 1.\")\n\n # polynomial_features\n if type(polynomial_features) is not bool:\n raise TypeError(\"polynomial_features only accepts True or False.\")\n\n # polynomial_degree\n if type(polynomial_degree) is not int:\n raise TypeError(\"polynomial_degree must be an integer.\")\n\n # polynomial_features\n if type(trigonometry_features) is not bool:\n raise TypeError(\"trigonometry_features only accepts True or False.\")\n\n # polynomial threshold\n if type(polynomial_threshold) is not float:\n raise TypeError(\"polynomial_threshold must be a float between 0 and 1.\")\n\n # group features\n if group_features is not None:\n if type(group_features) is not list:\n raise TypeError(\"group_features must be of type list.\")\n\n if group_names is not None:\n if type(group_names) is not list:\n raise TypeError(\"group_names must be of type list.\")\n\n # cannot drop target\n if ignore_features is not None:\n if target in ignore_features:\n raise ValueError(\"cannot drop target column.\")\n\n # feature_selection\n if type(feature_selection) is not bool:\n raise TypeError(\"feature_selection only accepts True or False.\")\n\n # feature_selection_threshold\n if type(feature_selection_threshold) is not float:\n raise TypeError(\"feature_selection_threshold must be a float between 0 and 1.\")\n\n # feature_selection_method\n feature_selection_methods = [\"boruta\", \"classic\"]\n if feature_selection_method not in feature_selection_methods:\n raise TypeError(\n f\"feature_selection_method must be one of {', '.join(feature_selection_methods)}\"\n )\n\n # feature_interaction\n if type(feature_interaction) is not bool:\n raise TypeError(\"feature_interaction only accepts True or False.\")\n\n # feature_ratio\n if type(feature_ratio) is not bool:\n raise TypeError(\"feature_ratio only accepts True or False.\")\n\n # interaction_threshold\n if type(interaction_threshold) is not float:\n raise TypeError(\"interaction_threshold must be a float between 0 and 1.\")\n\n # categorical\n if categorical_features is not None:\n for i in categorical_features:\n if i not in all_cols:\n raise ValueError(\n \"Column type forced is either target column or doesn't exist in the dataset.\"\n )\n\n # numeric\n if numeric_features is not None:\n for i in numeric_features:\n if i not in all_cols:\n raise ValueError(\n \"Column type forced is either target column or doesn't exist in the dataset.\"\n )\n\n # date features\n if date_features is not None:\n for i in date_features:\n if i not in all_cols:\n raise ValueError(\n \"Column type forced is either target column or doesn't exist in the dataset.\"\n )\n\n # drop features\n if ignore_features is not None:\n for i in ignore_features:\n if i not in all_cols:\n raise ValueError(\n \"Feature ignored is either target column or doesn't exist in the dataset.\"\n )\n\n # log_experiment\n if type(log_experiment) is not bool:\n raise TypeError(\"log_experiment parameter only accepts True or False.\")\n\n # log_profile\n if type(log_profile) is not bool:\n raise TypeError(\"log_profile parameter only accepts True or False.\")\n\n # experiment_name\n if experiment_name is not None:\n if type(experiment_name) is not str:\n raise TypeError(\"experiment_name parameter must be str if not None.\")\n\n # silent\n if type(silent) is not bool:\n raise TypeError(\"silent parameter only accepts True or False.\")\n\n # remove_perfect_collinearity\n if type(remove_perfect_collinearity) is not bool:\n raise TypeError(\n \"remove_perfect_collinearity parameter only accepts True or False.\"\n )\n\n # html\n if type(html) is not bool:\n raise TypeError(\"html parameter only accepts True or False.\")\n\n # use_gpu\n if use_gpu != \"force\" and type(use_gpu) is not bool:\n raise TypeError(\"use_gpu parameter only accepts 'force', True or False.\")\n\n # data_split_shuffle\n if type(data_split_shuffle) is not bool:\n raise TypeError(\"data_split_shuffle parameter only accepts True or False.\")\n\n possible_fold_strategy = [\"kfold\", \"stratifiedkfold\", \"groupkfold\", \"timeseries\"]\n if not (\n fold_strategy in possible_fold_strategy\n or is_sklearn_cv_generator(fold_strategy)\n ):\n raise TypeError(\n f\"fold_strategy parameter must be either a scikit-learn compatible CV generator object or one of {', '.join(possible_fold_strategy)}.\"\n )\n\n if fold_strategy == \"groupkfold\" and (fold_groups is None or len(fold_groups) == 0):\n raise ValueError(\n \"'groupkfold' fold_strategy requires 'fold_groups' to be a non-empty array-like object.\"\n )\n\n if isinstance(fold_groups, str):\n if fold_groups not in all_cols:\n raise ValueError(\n f\"Column {fold_groups} used for fold_groups is not present in the dataset.\"\n )\n\n # checking fold parameter\n if type(fold) is not int:\n raise TypeError(\"fold parameter only accepts integer value.\")\n\n # fold_shuffle\n if type(fold_shuffle) is not bool:\n raise TypeError(\"fold_shuffle parameter only accepts True or False.\")\n\n # log_plots\n if isinstance(log_plots, list):\n for i in log_plots:\n if i not in _available_plots:\n raise ValueError(\n f\"Incorrect value for log_plots '{i}'. Possible values are: {', '.join(_available_plots.keys())}.\"\n )\n elif type(log_plots) is not bool:\n raise TypeError(\"log_plots parameter must be a bool or a list.\")\n\n # log_data\n if type(log_data) is not bool:\n raise TypeError(\"log_data parameter only accepts True or False.\")\n\n # fix_imbalance\n if type(fix_imbalance) is not bool:\n raise TypeError(\"fix_imbalance parameter only accepts True or False.\")\n\n # fix_imbalance_method\n if fix_imbalance:\n if fix_imbalance_method is not None:\n if hasattr(fix_imbalance_method, \"fit_resample\"):\n pass\n else:\n raise TypeError(\n \"fix_imbalance_method must contain resampler with fit_resample method.\"\n )\n\n # check transform_target\n if type(transform_target) is not bool:\n raise TypeError(\"transform_target parameter only accepts True or False.\")\n\n # transform_target_method\n allowed_transform_target_method = [\"box-cox\", \"yeo-johnson\"]\n if transform_target_method not in allowed_transform_target_method:\n raise ValueError(\n f\"transform_target_method param only accepts {', '.join(allowed_transform_target_method)}.\"\n )\n\n # pandas option\n pd.set_option(\"display.max_columns\", 500)\n pd.set_option(\"display.max_rows\", 500)\n\n # generate USI for mlflow tracking\n import secrets\n\n # declaring global variables to be accessed by other functions\n logger.info(\"Declaring global variables\")\n global _ml_usecase, USI, html_param, X, y, X_train, X_test, y_train, y_test, seed, prep_pipe, experiment__, fold_shuffle_param, n_jobs_param, _gpu_n_jobs_param, create_model_container, master_model_container, display_container, exp_name_log, logging_param, log_plots_param, fix_imbalance_param, fix_imbalance_method_param, transform_target_param, transform_target_method_param, data_before_preprocess, target_param, gpu_param, _all_models, _all_models_internal, _all_metrics, _internal_pipeline, stratify_param, fold_generator, fold_param, fold_groups_param, fold_groups_param_full, imputation_regressor, imputation_classifier, iterative_imputation_iters_param\n\n USI = secrets.token_hex(nbytes=2)\n logger.info(f\"USI: {USI}\")\n\n _ml_usecase = ml_usecase\n\n global pycaret_globals\n supervised_globals = {\n \"y\",\n \"X_train\",\n \"X_test\",\n \"y_train\",\n \"y_test\",\n }\n common_globals = {\n \"_ml_usecase\",\n \"_available_plots\",\n \"pycaret_globals\",\n \"USI\",\n \"html_param\",\n \"X\",\n \"seed\",\n \"prep_pipe\",\n \"experiment__\",\n \"n_jobs_param\",\n \"_gpu_n_jobs_param\",\n \"create_model_container\",\n \"master_model_container\",\n \"display_container\",\n \"exp_name_log\",\n \"logging_param\",\n \"log_plots_param\",\n \"transform_target_param\",\n \"transform_target_method_param\",\n \"data_before_preprocess\",\n \"target_param\",\n \"gpu_param\",\n \"_all_models\",\n \"_all_models_internal\",\n \"_all_metrics\",\n \"_internal_pipeline\",\n \"imputation_regressor\",\n \"imputation_classifier\",\n \"iterative_imputation_iters_param\",\n \"fold_shuffle_param\",\n \"fix_imbalance_param\",\n \"fix_imbalance_method_param\",\n \"stratify_param\",\n \"fold_generator\",\n \"fold_param\",\n \"fold_groups_param\",\n \"fold_groups_param_full\",\n }\n if not _is_unsupervised(_ml_usecase):\n pycaret_globals = common_globals.union(supervised_globals)\n else:\n pycaret_globals = common_globals\n\n logger.info(f\"pycaret_globals: {pycaret_globals}\")\n\n # create html_param\n html_param = html\n\n logger.info(\"Preparing display monitor\")\n\n if not display:\n # progress bar\n max_steps = 3\n\n progress_args = {\"max\": max_steps}\n timestampStr = datetime.datetime.now().strftime(\"%H:%M:%S\")\n monitor_rows = [\n [\"Initiated\", \". . . . . . . . . . . . . . . . . .\", timestampStr],\n [\"Status\", \". . . . . . . . . . . . . . . . . .\", \"Loading Dependencies\"],\n ]\n display = Display(\n verbose=verbose,\n html_param=html_param,\n progress_args=progress_args,\n monitor_rows=monitor_rows,\n )\n\n display.display_progress()\n display.display_monitor()\n\n logger.info(\"Importing libraries\")\n\n # general dependencies\n\n from sklearn.model_selection import train_test_split\n\n # setting sklearn config to print all parameters including default\n import sklearn\n\n sklearn.set_config(print_changed_only=False)\n\n # define highlight function for function grid to display\n def highlight_max(s):\n is_max = s == True\n return [\"background-color: lightgreen\" if v else \"\" for v in is_max]\n\n logger.info(\"Copying data for preprocessing\")\n\n # copy original data for pandas profiler\n data_before_preprocess = data.copy()\n\n # generate seed to be used globally\n seed = random.randint(150, 9000) if session_id is None else session_id\n\n np.random.seed(seed)\n\n _internal_pipeline = []\n\n \"\"\"\n preprocessing starts here\n \"\"\"\n\n display.update_monitor(1, \"Preparing Data for Modeling\")\n display.display_monitor()\n\n # define parameters for preprocessor\n\n logger.info(\"Declaring preprocessing parameters\")\n\n # categorical features\n cat_features_pass = categorical_features or []\n\n # numeric features\n numeric_features_pass = numeric_features or []\n\n # drop features\n ignore_features_pass = ignore_features or []\n\n # date features\n date_features_pass = date_features or []\n\n # categorical imputation strategy\n cat_dict = {\"constant\": \"not_available\", \"mode\": \"most frequent\"}\n categorical_imputation_pass = cat_dict[categorical_imputation]\n\n # transformation method strategy\n trans_dict = {\"yeo-johnson\": \"yj\", \"quantile\": \"quantile\"}\n trans_method_pass = trans_dict[transformation_method]\n\n # pass method\n pca_dict = {\n \"linear\": \"pca_liner\",\n \"kernel\": \"pca_kernal\",\n \"incremental\": \"incremental\",\n \"pls\": \"pls\",\n }\n pca_method_pass = pca_dict[pca_method]\n\n # pca components\n if pca is True:\n if pca_components is None:\n if pca_method == \"linear\":\n pca_components_pass = 0.99\n else:\n pca_components_pass = int((len(data.columns) - 1) * 0.5)\n\n else:\n pca_components_pass = pca_components\n\n else:\n pca_components_pass = 0.99\n\n apply_binning_pass = False if bin_numeric_features is None else True\n features_to_bin_pass = bin_numeric_features or []\n\n # trignometry\n trigonometry_features_pass = [\"sin\", \"cos\", \"tan\"] if trigonometry_features else []\n\n # group features\n # =============#\n\n # apply grouping\n apply_grouping_pass = True if group_features is not None else False\n\n # group features listing\n if apply_grouping_pass is True:\n\n if type(group_features[0]) is str:\n group_features_pass = []\n group_features_pass.append(group_features)\n else:\n group_features_pass = group_features\n\n else:\n\n group_features_pass = [[]]\n\n # group names\n if apply_grouping_pass is True:\n\n if (group_names is None) or (len(group_names) != len(group_features_pass)):\n group_names_pass = list(np.arange(len(group_features_pass)))\n group_names_pass = [f\"group_{i}\" for i in group_names_pass]\n\n else:\n group_names_pass = group_names\n\n else:\n group_names_pass = []\n\n # feature interactions\n\n apply_feature_interactions_pass = (\n True if feature_interaction or feature_ratio else False\n )\n\n interactions_to_apply_pass = []\n\n if feature_interaction:\n interactions_to_apply_pass.append(\"multiply\")\n\n if feature_ratio:\n interactions_to_apply_pass.append(\"divide\")\n\n # unknown categorical\n unkn_dict = {\"least_frequent\": \"least frequent\", \"most_frequent\": \"most frequent\"}\n unknown_categorical_method_pass = unkn_dict[unknown_categorical_method]\n\n # ordinal_features\n apply_ordinal_encoding_pass = True if ordinal_features is not None else False\n\n ordinal_columns_and_categories_pass = (\n ordinal_features if apply_ordinal_encoding_pass else {}\n )\n\n apply_cardinality_reduction_pass = (\n True if high_cardinality_features is not None else False\n )\n\n hi_card_dict = {\"frequency\": \"count\", \"clustering\": \"cluster\"}\n cardinal_method_pass = hi_card_dict[high_cardinality_method]\n\n cardinal_features_pass = (\n high_cardinality_features if apply_cardinality_reduction_pass else []\n )\n\n display_dtypes_pass = False if silent else True\n\n # transform target method\n transform_target_param = transform_target\n transform_target_method_param = transform_target_method\n\n # create n_jobs_param\n n_jobs_param = n_jobs\n\n cuml_version = None\n if use_gpu:\n try:\n from cuml import __version__\n \n cuml_version = __version__\n logger.info(f\"cuml=={cuml_version}\")\n except:\n logger.warning(f\"cuML not found\")\n\n if cuml_version is None or not version.parse(cuml_version) >= version.parse(\"0.15\"):\n message = f\"cuML is outdated or not found. Required version is >=0.15, got {__version__}\"\n if use_gpu == \"force\":\n raise ImportError(message)\n else:\n logger.warning(message)\n\n # create _gpu_n_jobs_param\n _gpu_n_jobs_param = n_jobs if not use_gpu else 1\n\n # create gpu_param var\n gpu_param = use_gpu\n\n iterative_imputation_iters_param = iterative_imputation_iters\n\n # creating variables to be used later in the function\n train_data = data_before_preprocess.copy()\n if _is_unsupervised(_ml_usecase):\n target = \"UNSUPERVISED_DUMMY_TARGET\"\n train_data[target] = 2\n # just to add diversified values to target\n train_data[target][0:3] = 3\n X_before_preprocess = train_data.drop(target, axis=1)\n y_before_preprocess = train_data[target]\n\n imputation_regressor = numeric_iterative_imputer\n imputation_classifier = categorical_iterative_imputer\n imputation_regressor_name = \"Bayesian Ridge\" # todo change\n imputation_classifier_name = \"Random Forest Classifier\"\n\n if imputation_type == \"iterative\":\n logger.info(\"Setting up iterative imputation\")\n\n iterative_imputer_models_globals = globals().copy()\n iterative_imputer_models_globals[\"y_train\"] = y_before_preprocess\n iterative_imputer_models_globals[\"X_train\"] = X_before_preprocess\n iterative_imputer_classification_models = {\n k: v\n for k, v in pycaret.containers.models.classification.get_all_model_containers(\n iterative_imputer_models_globals, raise_errors=True\n ).items()\n if not v.is_special\n }\n iterative_imputer_regression_models = {\n k: v\n for k, v in pycaret.containers.models.regression.get_all_model_containers(\n iterative_imputer_models_globals, raise_errors=True\n ).items()\n if not v.is_special\n }\n\n if not (\n (\n isinstance(imputation_regressor, str)\n and imputation_regressor in iterative_imputer_regression_models\n )\n or hasattr(imputation_regressor, \"predict\")\n ):\n raise ValueError(\n f\"numeric_iterative_imputer param must be either a scikit-learn estimator or a string - one of {', '.join(iterative_imputer_regression_models.keys())}.\"\n )\n\n if not (\n (\n isinstance(imputation_classifier, str)\n and imputation_classifier in iterative_imputer_classification_models\n )\n or hasattr(imputation_classifier, \"predict\")\n ):\n raise ValueError(\n f\"categorical_iterative_imputer param must be either a scikit-learn estimator or a string - one of {', '.join(iterative_imputer_classification_models.keys())}.\"\n )\n\n if isinstance(imputation_regressor, str):\n imputation_regressor = iterative_imputer_regression_models[\n imputation_regressor\n ]\n imputation_regressor_name = imputation_regressor.name\n imputation_regressor = imputation_regressor.class_def(\n **imputation_regressor.args\n )\n else:\n imputation_regressor_name = type(imputation_regressor).__name__\n\n if isinstance(imputation_classifier, str):\n imputation_classifier = iterative_imputer_classification_models[\n imputation_classifier\n ]\n imputation_classifier_name = imputation_classifier.name\n imputation_classifier = imputation_classifier.class_def(\n **imputation_classifier.args\n )\n else:\n imputation_classifier_name = type(imputation_classifier).__name__\n\n logger.info(\"Creating preprocessing pipeline\")\n\n prep_pipe = pycaret.internal.preprocess.Preprocess_Path_One(\n train_data=train_data,\n ml_usecase=\"classification\"\n if _ml_usecase == MLUsecase.CLASSIFICATION\n else \"regression\",\n imputation_type=imputation_type,\n target_variable=target,\n imputation_regressor=imputation_regressor,\n imputation_classifier=imputation_classifier,\n imputation_max_iter=iterative_imputation_iters_param,\n categorical_features=cat_features_pass,\n apply_ordinal_encoding=apply_ordinal_encoding_pass,\n ordinal_columns_and_categories=ordinal_columns_and_categories_pass,\n apply_cardinality_reduction=apply_cardinality_reduction_pass,\n cardinal_method=cardinal_method_pass,\n cardinal_features=cardinal_features_pass,\n numerical_features=numeric_features_pass,\n time_features=date_features_pass,\n features_todrop=ignore_features_pass,\n numeric_imputation_strategy=numeric_imputation,\n categorical_imputation_strategy=categorical_imputation_pass,\n scale_data=normalize,\n scaling_method=normalize_method,\n Power_transform_data=transformation,\n Power_transform_method=trans_method_pass,\n apply_untrained_levels_treatment=handle_unknown_categorical,\n untrained_levels_treatment_method=unknown_categorical_method_pass,\n apply_pca=pca,\n pca_method=pca_method_pass,\n pca_variance_retained_or_number_of_components=pca_components_pass,\n apply_zero_nearZero_variance=ignore_low_variance,\n club_rare_levels=combine_rare_levels,\n rara_level_threshold_percentage=rare_level_threshold,\n apply_binning=apply_binning_pass,\n features_to_binn=features_to_bin_pass,\n remove_outliers=remove_outliers,\n outlier_contamination_percentage=outliers_threshold,\n outlier_methods=[\"pca\"],\n remove_multicollinearity=remove_multicollinearity,\n maximum_correlation_between_features=multicollinearity_threshold,\n remove_perfect_collinearity=remove_perfect_collinearity,\n cluster_entire_data=create_clusters,\n range_of_clusters_to_try=cluster_iter,\n apply_polynomial_trigonometry_features=polynomial_features,\n max_polynomial=polynomial_degree,\n trigonometry_calculations=trigonometry_features_pass,\n top_poly_trig_features_to_select_percentage=polynomial_threshold,\n apply_grouping=apply_grouping_pass,\n features_to_group_ListofList=group_features_pass,\n group_name=group_names_pass,\n apply_feature_selection=feature_selection,\n feature_selection_top_features_percentage=feature_selection_threshold,\n feature_selection_method=feature_selection_method,\n apply_feature_interactions=apply_feature_interactions_pass,\n feature_interactions_to_apply=interactions_to_apply_pass,\n feature_interactions_top_features_to_select_percentage=interaction_threshold,\n display_types=display_dtypes_pass, # this is for inferred input box\n random_state=seed,\n )\n\n dtypes = prep_pipe.named_steps[\"dtypes\"]\n\n display.move_progress()\n logger.info(\"Preprocessing pipeline created successfully\")\n\n try:\n res_type = [\"quit\", \"Quit\", \"exit\", \"EXIT\", \"q\", \"Q\", \"e\", \"E\", \"QUIT\", \"Exit\"]\n res = dtypes.response\n\n if res in res_type:\n sys.exit(\n \"(Process Exit): setup has been interupted with user command 'quit'. setup must rerun.\"\n )\n\n except:\n logger.error(\n \"(Process Exit): setup has been interupted with user command 'quit'. setup must rerun.\"\n )\n\n if not preprocess:\n prep_pipe.steps = prep_pipe.steps[:1]\n\n \"\"\"\n preprocessing ends here\n \"\"\"\n\n # reset pandas option\n pd.reset_option(\"display.max_rows\")\n pd.reset_option(\"display.max_columns\")\n\n logger.info(\"Creating global containers\")\n\n # create an empty list for pickling later.\n experiment__ = []\n\n # CV params\n fold_param = fold\n fold_groups_param = None\n fold_groups_param_full = None\n if fold_groups is not None:\n if isinstance(fold_groups, str):\n fold_groups_param = X_before_preprocess[fold_groups]\n else:\n fold_groups_param = fold_groups\n if pd.isnull(fold_groups_param).any():\n raise ValueError(f\"fold_groups cannot contain NaNs.\")\n fold_shuffle_param = fold_shuffle\n\n from sklearn.model_selection import (\n StratifiedKFold,\n KFold,\n GroupKFold,\n TimeSeriesSplit,\n )\n\n fold_seed = seed if fold_shuffle_param else None\n if fold_strategy == \"kfold\":\n fold_generator = KFold(\n fold_param, random_state=fold_seed, shuffle=fold_shuffle_param\n )\n elif fold_strategy == \"stratifiedkfold\":\n fold_generator = StratifiedKFold(\n fold_param, random_state=fold_seed, shuffle=fold_shuffle_param\n )\n elif fold_strategy == \"groupkfold\":\n fold_generator = GroupKFold(fold_param)\n elif fold_strategy == \"timeseries\":\n fold_generator = TimeSeriesSplit(fold_param)\n else:\n fold_generator = fold_strategy\n\n # create create_model_container\n create_model_container = []\n\n # create master_model_container\n master_model_container = []\n\n # create display container\n display_container = []\n\n # create logging parameter\n logging_param = log_experiment\n\n # create exp_name_log param incase logging is False\n exp_name_log = \"no_logging\"\n\n # create an empty log_plots_param\n if not log_plots:\n log_plots_param = False\n else:\n log_plots_param = log_plots\n\n # add custom transformers to prep pipe\n if custom_pipeline:\n custom_steps = normalize_custom_transformers(custom_pipeline)\n _internal_pipeline.extend(custom_steps)\n\n # create a fix_imbalance_param and fix_imbalance_method_param\n fix_imbalance_param = fix_imbalance and preprocess\n fix_imbalance_method_param = fix_imbalance_method\n\n if fix_imbalance_method_param is None:\n fix_imbalance_model_name = \"SMOTE\"\n\n if fix_imbalance_param:\n if fix_imbalance_method_param is None:\n import six\n\n sys.modules[\"sklearn.externals.six\"] = six\n from imblearn.over_sampling import SMOTE\n\n fix_imbalance_resampler = SMOTE(random_state=seed)\n else:\n fix_imbalance_model_name = str(fix_imbalance_method_param).split(\"(\")[0]\n fix_imbalance_resampler = fix_imbalance_method_param\n _internal_pipeline.append((\"fix_imbalance\", fix_imbalance_resampler))\n\n for x in _internal_pipeline:\n if x[0] in prep_pipe.named_steps:\n raise ValueError(f\"Step named {x[0]} already present in pipeline.\")\n\n _internal_pipeline = make_internal_pipeline(_internal_pipeline)\n\n logger.info(f\"Internal pipeline: {_internal_pipeline}\")\n\n # create target_param var\n target_param = target\n\n # create stratify_param var\n stratify_param = data_split_stratify\n\n display.move_progress()\n\n display.update_monitor(1, \"Preprocessing Data\")\n display.display_monitor()\n\n if not _is_unsupervised(_ml_usecase):\n _stratify_columns = _get_columns_to_stratify_by(\n X_before_preprocess, y_before_preprocess, stratify_param, target\n )\n if test_data is None:\n X_train, X_test, y_train, y_test = train_test_split(\n X_before_preprocess,\n y_before_preprocess,\n test_size=1 - train_size,\n stratify=_stratify_columns,\n random_state=seed,\n shuffle=data_split_shuffle,\n )\n train_data = pd.concat([X_train, y_train], axis=1)\n test_data = pd.concat([X_test, y_test], axis=1)\n\n train_data = prep_pipe.fit_transform(train_data)\n # workaround to also transform target\n dtypes.final_training_columns.append(target)\n test_data = prep_pipe.transform(test_data)\n\n X_train = train_data.drop(target, axis=1)\n y_train = train_data[target]\n\n X_test = test_data.drop(target, axis=1)\n y_test = test_data[target]\n\n if fold_groups_param is not None:\n fold_groups_param_full = fold_groups_param.copy()\n fold_groups_param = fold_groups_param[\n fold_groups_param.index.isin(X_train.index)\n ]\n\n display.move_progress()\n if not _is_unsupervised(_ml_usecase):\n _internal_pipeline.fit(train_data.drop(target, axis=1), train_data[target])\n data = prep_pipe.transform(data_before_preprocess.copy())\n X = data.drop(target, axis=1)\n y = data[target]\n else:\n X = prep_pipe.fit_transform(train_data).drop(target, axis=1)\n X_train = X\n\n # we do just the fitting so that it will be fitted when saved/deployed,\n # but we don't want to modify the data\n _internal_pipeline.fit(X, y=y if not _is_unsupervised(_ml_usecase) else None)\n\n prep_pipe.steps = prep_pipe.steps + [\n (step[0], deepcopy(step[1]))\n for step in _internal_pipeline.steps\n if hasattr(step[1], \"transform\")\n ]\n\n try:\n dtypes.final_training_columns.remove(target)\n except ValueError:\n pass\n\n # determining target type\n if _is_multiclass():\n target_type = \"Multiclass\"\n else:\n target_type = \"Binary\"\n\n if _ml_usecase == MLUsecase.CLASSIFICATION:\n _all_models = {\n k: v\n for k, v in pycaret.containers.models.classification.get_all_model_containers(\n globals(), raise_errors=True\n ).items()\n if not v.is_special\n }\n _all_models_internal = pycaret.containers.models.classification.get_all_model_containers(\n globals(), raise_errors=True\n )\n _all_metrics = pycaret.containers.metrics.classification.get_all_metric_containers(\n globals(), raise_errors=True\n )\n elif _ml_usecase == MLUsecase.REGRESSION:\n _all_models = {\n k: v\n for k, v in pycaret.containers.models.regression.get_all_model_containers(\n globals(), raise_errors=True\n ).items()\n if not v.is_special\n }\n _all_models_internal = pycaret.containers.models.regression.get_all_model_containers(\n globals(), raise_errors=True\n )\n _all_metrics = pycaret.containers.metrics.regression.get_all_metric_containers(\n globals(), raise_errors=True\n )\n elif _ml_usecase == MLUsecase.CLUSTERING:\n _all_models = {\n k: v\n for k, v in pycaret.containers.models.clustering.get_all_model_containers(\n globals(), raise_errors=True\n ).items()\n if not v.is_special\n }\n _all_models_internal = pycaret.containers.models.clustering.get_all_model_containers(\n globals(), raise_errors=True\n )\n _all_metrics = pycaret.containers.metrics.clustering.get_all_metric_containers(\n globals(), raise_errors=True\n )\n elif _ml_usecase == MLUsecase.ANOMALY:\n _all_models = {\n k: v\n for k, v in pycaret.containers.models.anomaly.get_all_model_containers(\n globals(), raise_errors=True\n ).items()\n if not v.is_special\n }\n _all_models_internal = pycaret.containers.models.anomaly.get_all_model_containers(\n globals(), raise_errors=True\n )\n _all_metrics = pycaret.containers.metrics.anomaly.get_all_metric_containers(\n globals(), raise_errors=True\n )\n\n \"\"\"\n Final display Starts\n \"\"\"\n logger.info(\"Creating grid variables\")\n\n if hasattr(dtypes, \"replacement\"):\n label_encoded = dtypes.replacement\n label_encoded = (\n str(label_encoded).replace(\"'\", \"\").replace(\"{\", \"\").replace(\"}\", \"\")\n )\n\n else:\n label_encoded = \"None\"\n\n # generate values for grid show\n missing_values = data_before_preprocess.isna().sum().sum()\n missing_flag = True if missing_values > 0 else False\n\n normalize_grid = normalize_method if normalize else \"None\"\n\n transformation_grid = transformation_method if transformation else \"None\"\n\n pca_method_grid = pca_method if pca else \"None\"\n\n pca_components_grid = pca_components_pass if pca else \"None\"\n\n rare_level_threshold_grid = rare_level_threshold if combine_rare_levels else \"None\"\n\n numeric_bin_grid = False if bin_numeric_features is None else True\n\n outliers_threshold_grid = outliers_threshold if remove_outliers else None\n\n multicollinearity_threshold_grid = (\n multicollinearity_threshold if remove_multicollinearity else None\n )\n\n cluster_iter_grid = cluster_iter if create_clusters else None\n\n polynomial_degree_grid = polynomial_degree if polynomial_features else None\n\n polynomial_threshold_grid = (\n polynomial_threshold if polynomial_features or trigonometry_features else None\n )\n\n feature_selection_threshold_grid = (\n feature_selection_threshold if feature_selection else None\n )\n\n interaction_threshold_grid = (\n interaction_threshold if feature_interaction or feature_ratio else None\n )\n\n ordinal_features_grid = False if ordinal_features is None else True\n\n unknown_categorical_method_grid = (\n unknown_categorical_method if handle_unknown_categorical else None\n )\n\n group_features_grid = False if group_features is None else True\n\n high_cardinality_features_grid = (\n False if high_cardinality_features is None else True\n )\n\n high_cardinality_method_grid = (\n high_cardinality_method if high_cardinality_features_grid else None\n )\n\n learned_types = dtypes.learned_dtypes\n learned_types.drop(target, inplace=True)\n\n float_type = 0\n cat_type = 0\n\n for i in dtypes.learned_dtypes:\n if \"float\" in str(i):\n float_type += 1\n elif \"object\" in str(i):\n cat_type += 1\n elif \"int\" in str(i):\n float_type += 1\n\n if profile:\n print(\"Setup Succesfully Completed! Loading Profile Now... Please Wait!\")\n else:\n if verbose:\n print(\"Setup Succesfully Completed!\")\n\n exp_name_dict = {\n MLUsecase.CLASSIFICATION: \"clf-default-name\",\n MLUsecase.REGRESSION: \"reg-default-name\",\n MLUsecase.CLUSTERING: \"cluster-default-name\",\n MLUsecase.ANOMALY: \"anomaly-default-name\",\n }\n if experiment_name is None:\n exp_name_ = exp_name_dict[_ml_usecase]\n else:\n exp_name_ = experiment_name\n\n URI = secrets.token_hex(nbytes=4)\n exp_name_log = exp_name_\n\n functions = pd.DataFrame(\n [[\"session_id\", seed],]\n + ([[\"Target\", target]] if not _is_unsupervised(_ml_usecase) else [])\n + (\n [[\"Target Type\", target_type], [\"Label Encoded\", label_encoded],]\n if _ml_usecase == MLUsecase.CLASSIFICATION\n else []\n )\n + [\n [\"Original Data\", data_before_preprocess.shape],\n [\"Missing Values\", missing_flag],\n [\"Numeric Features\", str(float_type)],\n [\"Categorical Features\", str(cat_type)],\n ]\n + (\n [\n [\"Ordinal Features\", ordinal_features_grid],\n [\"High Cardinality Features\", high_cardinality_features_grid],\n [\"High Cardinality Method\", high_cardinality_method_grid],\n ]\n if preprocess\n else []\n )\n + (\n [\n [\"Transformed Train Set\", X_train.shape],\n [\"Transformed Test Set\", X_test.shape],\n [\"Shuffle Train-Test\", str(data_split_shuffle)],\n [\"Stratify Train-Test\", str(data_split_stratify)],\n [\"Fold Generator\", type(fold_generator).__name__],\n [\"Fold Number\", fold_param],\n ]\n if not _is_unsupervised(_ml_usecase)\n else [[\"Transformed Data\", X.shape]]\n )\n + [\n [\"CPU Jobs\", n_jobs_param],\n [\"Use GPU\", gpu_param],\n [\"Log Experiment\", logging_param],\n [\"Experiment Name\", exp_name_],\n [\"USI\", USI],\n ]\n + (\n [\n [\"Imputation Type\", imputation_type],\n [\n \"Iterative Imputation Iteration\",\n iterative_imputation_iters_param\n if imputation_type == \"iterative\"\n else \"None\",\n ],\n [\"Numeric Imputer\", numeric_imputation],\n [\n \"Iterative Imputation Numeric Model\",\n imputation_regressor_name\n if imputation_type == \"iterative\"\n else \"None\",\n ],\n [\"Categorical Imputer\", categorical_imputation],\n [\n \"Iterative Imputation Categorical Model\",\n imputation_classifier_name\n if imputation_type == \"iterative\"\n else \"None\",\n ],\n [\"Unknown Categoricals Handling\", unknown_categorical_method_grid],\n [\"Normalize\", normalize],\n [\"Normalize Method\", normalize_grid],\n [\"Transformation\", transformation],\n [\"Transformation Method\", transformation_grid],\n [\"PCA\", pca],\n [\"PCA Method\", pca_method_grid],\n [\"PCA Components\", pca_components_grid],\n [\"Ignore Low Variance\", ignore_low_variance],\n [\"Combine Rare Levels\", combine_rare_levels],\n [\"Rare Level Threshold\", rare_level_threshold_grid],\n [\"Numeric Binning\", numeric_bin_grid],\n [\"Remove Outliers\", remove_outliers],\n [\"Outliers Threshold\", outliers_threshold_grid],\n [\"Remove Multicollinearity\", remove_multicollinearity],\n [\"Multicollinearity Threshold\", multicollinearity_threshold_grid],\n [\"Remove Perfect Collinearity\", remove_perfect_collinearity],\n [\"Clustering\", create_clusters],\n [\"Clustering Iteration\", cluster_iter_grid],\n [\"Polynomial Features\", polynomial_features],\n [\"Polynomial Degree\", polynomial_degree_grid],\n [\"Trignometry Features\", trigonometry_features],\n [\"Polynomial Threshold\", polynomial_threshold_grid],\n [\"Group Features\", group_features_grid],\n [\"Feature Selection\", feature_selection],\n [\"Feature Selection Method\", feature_selection_method],\n [\"Features Selection Threshold\", feature_selection_threshold_grid],\n [\"Feature Interaction\", feature_interaction],\n [\"Feature Ratio\", feature_ratio],\n [\"Interaction Threshold\", interaction_threshold_grid],\n ]\n if preprocess\n else []\n )\n + (\n [\n [\"Fix Imbalance\", fix_imbalance_param],\n [\"Fix Imbalance Method\", fix_imbalance_model_name],\n ]\n if _ml_usecase == MLUsecase.CLASSIFICATION\n else []\n )\n + (\n [\n [\"Transform Target\", transform_target_param],\n [\"Transform Target Method\", transform_target_method_param],\n ]\n if _ml_usecase == MLUsecase.REGRESSION\n else []\n ),\n columns=[\"Description\", \"Value\"],\n )\n functions_ = functions.style.apply(highlight_max)\n\n display_container.append(functions_)\n\n display.display(functions_, clear=True)\n\n if profile:\n try:\n import pandas_profiling\n\n pf = pandas_profiling.ProfileReport(\n data_before_preprocess, **profile_kwargs\n )\n display.display(pf, clear=True)\n except:\n print(\n \"Data Profiler Failed. No output to show, please continue with Modeling.\"\n )\n logger.error(\n \"Data Profiler Failed. No output to show, please continue with Modeling.\"\n )\n\n \"\"\"\n Final display Ends\n \"\"\"\n\n # log into experiment\n experiment__.append((\"Setup Config\", functions))\n if not _is_unsupervised(_ml_usecase):\n experiment__.append((\"X_training Set\", X_train))\n experiment__.append((\"y_training Set\", y_train))\n experiment__.append((\"X_test Set\", X_test))\n experiment__.append((\"y_test Set\", y_test))\n else:\n experiment__.append((\"Transformed Data\", X))\n experiment__.append((\"Transformation Pipeline\", prep_pipe))\n\n # end runtime\n runtime_end = time.time()\n runtime = np.array(runtime_end - runtime_start).round(2)\n\n if logging_param:\n\n logger.info(\"Logging experiment in MLFlow\")\n\n import mlflow\n\n try:\n mlflow.create_experiment(exp_name_log)\n except:\n logger.warning(\"Couldn't create mlflow experiment. Exception:\")\n logger.warning(traceback.format_exc())\n\n # mlflow logging\n mlflow.set_experiment(exp_name_log)\n\n run_name_ = f\"Session Initialized {USI}\"\n\n with mlflow.start_run(run_name=run_name_) as run:\n\n # Get active run to log as tag\n RunID = mlflow.active_run().info.run_id\n\n k = functions.copy()\n k.set_index(\"Description\", drop=True, inplace=True)\n kdict = k.to_dict()\n params = kdict.get(\"Value\")\n mlflow.log_params(params)\n\n # set tag of compare_models\n mlflow.set_tag(\"Source\", \"setup\")\n\n import secrets\n\n URI = secrets.token_hex(nbytes=4)\n mlflow.set_tag(\"URI\", URI)\n mlflow.set_tag(\"USI\", USI)\n mlflow.set_tag(\"Run Time\", runtime)\n mlflow.set_tag(\"Run ID\", RunID)\n\n # Log the transformation pipeline\n logger.info(\n \"SubProcess save_model() called ==================================\"\n )\n save_model(prep_pipe, \"Transformation Pipeline\", verbose=False)\n logger.info(\n \"SubProcess save_model() end ==================================\"\n )\n mlflow.log_artifact(\"Transformation Pipeline.pkl\")\n os.remove(\"Transformation Pipeline.pkl\")\n\n # Log pandas profile\n if log_profile:\n import pandas_profiling\n\n pf = pandas_profiling.ProfileReport(\n data_before_preprocess, **profile_kwargs\n )\n pf.to_file(\"Data Profile.html\")\n mlflow.log_artifact(\"Data Profile.html\")\n os.remove(\"Data Profile.html\")\n display.display(functions_, clear=True)\n\n # Log training and testing set\n if log_data:\n if not _is_unsupervised(_ml_usecase):\n X_train.join(y_train).to_csv(\"Train.csv\")\n X_test.join(y_test).to_csv(\"Test.csv\")\n mlflow.log_artifact(\"Train.csv\")\n mlflow.log_artifact(\"Test.csv\")\n os.remove(\"Train.csv\")\n os.remove(\"Test.csv\")\n else:\n X.to_csv(\"Dataset.csv\")\n mlflow.log_artifact(\"Dataset.csv\")\n os.remove(\"Dataset.csv\")\n\n logger.info(f\"create_model_container: {len(create_model_container)}\")\n logger.info(f\"master_model_container: {len(master_model_container)}\")\n logger.info(f\"display_container: {len(display_container)}\")\n\n logger.info(str(prep_pipe))\n logger.info(\"setup() succesfully completed......................................\")\n\n gc.collect()\n\n return tuple([globals()[v] for v in pycaret_globals])\n\n\ndef compare_models(\n include: Optional[\n List[Union[str, Any]]\n ] = None, # changed whitelist to include in pycaret==2.1\n exclude: Optional[List[str]] = None, # changed blacklist to exclude in pycaret==2.1\n fold: Optional[Union[int, Any]] = None,\n round: int = 4,\n cross_validation: bool = True,\n sort: str = \"Accuracy\",\n n_select: int = 1,\n budget_time: Optional[float] = None, # added in pycaret==2.1.0\n turbo: bool = True,\n errors: str = \"ignore\",\n fit_kwargs: Optional[dict] = None,\n groups: Optional[Union[str, Any]] = None,\n verbose: bool = True,\n display: Optional[Display] = None,\n) -> List[Any]:\n\n \"\"\"\n This function train all the models available in the model library and scores them\n using Cross Validation. The output prints a score grid with Accuracy,\n AUC, Recall, Precision, F1, Kappa and MCC (averaged across folds).\n\n This function returns all of the models compared, sorted by the value of the selected metric.\n\n When turbo is set to True ('rbfsvm', 'gpc' and 'mlp') are excluded due to longer\n training time. By default turbo param is set to True.\n\n Example\n -------\n >>> from pycaret.datasets import get_data\n >>> juice = get_data('juice')\n >>> experiment_name = setup(data = juice, target = 'Purchase')\n >>> best_model = compare_models()\n\n This will return the averaged score grid of all the models except 'rbfsvm', 'gpc'\n and 'mlp'. When turbo param is set to False, all models including 'rbfsvm', 'gpc'\n and 'mlp' are used but this may result in longer training time.\n\n >>> best_model = compare_models( exclude = [ 'knn', 'gbc' ] , turbo = False)\n\n This will return a comparison of all models except K Nearest Neighbour and\n Gradient Boosting Classifier.\n\n >>> best_model = compare_models( exclude = [ 'knn', 'gbc' ] , turbo = True)\n\n This will return comparison of all models except K Nearest Neighbour,\n Gradient Boosting Classifier, SVM (RBF), Gaussian Process Classifier and\n Multi Level Perceptron.\n\n\n >>> tuned_model = tune_model(create_model('lr'))\n >>> best_model = compare_models( include = [ 'lr', tuned_model ])\n\n This will compare a tuned Linear Regression model with an untuned one.\n\n Parameters\n ----------\n exclude: list of strings, default = None\n In order to omit certain models from the comparison model ID's can be passed as\n a list of strings in exclude param.\n\n include: list of strings or objects, default = None\n In order to run only certain models for the comparison, the model ID's can be\n passed as a list of strings in include param. The list can also include estimator\n objects to be compared.\n\n fold: integer or scikit-learn compatible CV generator, default = None\n Controls cross-validation. If None, will use the CV generator defined in setup().\n If integer, will use KFold CV with that many folds.\n When cross_validation is False, this parameter is ignored.\n\n round: integer, default = 4\n Number of decimal places the metrics in the score grid will be rounded to.\n\n cross_validation: bool, default = True\n When cross_validation set to False fold parameter is ignored and models are trained\n on entire training dataset, returning metrics calculated using the train (holdout) set.\n\n sort: str, default = 'Accuracy'\n The scoring measure specified is used for sorting the average score grid\n Other options are 'AUC', 'Recall', 'Precision', 'F1', 'Kappa' and 'MCC'.\n\n n_select: int, default = 1\n Number of top_n models to return. use negative argument for bottom selection.\n for example, n_select = -3 means bottom 3 models.\n\n budget_time: int or float, default = None\n If not 0 or None, will terminate execution of the function after budget_time\n minutes have passed and return results up to that point.\n\n turbo: bool, default = True\n When turbo is set to True, it excludes estimators that have longer\n training time.\n\n errors: str, default = 'ignore'\n If 'ignore', will suppress model exceptions and continue.\n If 'raise', will allow exceptions to be raised.\n\n fit_kwargs: dict, default = {} (empty dict)\n Dictionary of arguments passed to the fit method of the model. The parameters will be applied to all models,\n therefore it is recommended to set errors parameter to 'ignore'.\n\n groups: str or array-like, with shape (n_samples,), default = None\n Optional Group labels for the samples used while splitting the dataset into train/test set.\n If string is passed, will use the data column with that name as the groups.\n Only used if a group based cross-validation generator is used (eg. GroupKFold).\n If None, will use the value set in fold_groups param in setup().\n\n verbose: bool, default = True\n Score grid is not printed when verbose is set to False.\n\n Returns\n -------\n score_grid\n A table containing the scores of the model across the kfolds.\n Scoring metrics used are Accuracy, AUC, Recall, Precision, F1,\n Kappa and MCC. Mean and standard deviation of the scores across\n the folds are also returned.\n\n list\n List of fitted model objects that were compared.\n\n Warnings\n --------\n - compare_models() though attractive, might be time consuming with large\n datasets. By default turbo is set to True, which excludes models that\n have longer training times. Changing turbo parameter to False may result\n in very high training times with datasets where number of samples exceed\n 10,000.\n\n - If target variable is multiclass (more than 2 classes), AUC will be\n returned as zero (0.0)\n\n - If cross_validation param is set to False, no models will be logged with MLFlow.\n\n \"\"\"\n\n function_params_str = \", \".join([f\"{k}={v}\" for k, v in locals().items()])\n\n logger = get_logger()\n\n logger.info(\"Initializing compare_models()\")\n logger.info(f\"compare_models({function_params_str})\")\n\n logger.info(\"Checking exceptions\")\n\n if not fit_kwargs:\n fit_kwargs = {}\n\n # checking error for exclude (string)\n available_estimators = _all_models\n\n if exclude != None:\n for i in exclude:\n if i not in available_estimators:\n raise ValueError(\n f\"Estimator Not Available {i}. Please see docstring for list of available estimators.\"\n )\n\n if include != None:\n for i in include:\n if isinstance(i, str):\n if i not in available_estimators:\n raise ValueError(\n f\"Estimator {i} Not Available. Please see docstring for list of available estimators.\"\n )\n elif not hasattr(i, \"fit\"):\n raise ValueError(\n f\"Estimator {i} does not have the required fit() method.\"\n )\n\n # include and exclude together check\n if include is not None and exclude is not None:\n raise TypeError(\n \"Cannot use exclude parameter when include is used to compare models.\"\n )\n\n # checking fold parameter\n if fold is not None and not (type(fold) is int or is_sklearn_cv_generator(fold)):\n raise TypeError(\n \"fold parameter must be either None, an integer or a scikit-learn compatible CV generator object.\"\n )\n\n # checking round parameter\n if type(round) is not int:\n raise TypeError(\"Round parameter only accepts integer value.\")\n\n # checking budget_time parameter\n if budget_time and type(budget_time) is not int and type(budget_time) is not float:\n raise TypeError(\"budget_time parameter only accepts integer or float values.\")\n\n # checking sort parameter\n if not (isinstance(sort, str) and (sort == \"TT\" or sort == \"TT (Sec)\")):\n sort = _get_metric(sort)\n if sort is None:\n raise ValueError(\n f\"Sort method not supported. See docstring for list of available parameters.\"\n )\n\n # checking errors parameter\n possible_errors = [\"ignore\", \"raise\"]\n if errors not in possible_errors:\n raise ValueError(\n f\"errors parameter must be one of: {', '.join(possible_errors)}.\"\n )\n\n # checking optimize parameter for multiclass\n if _is_multiclass():\n if not sort.is_multiclass:\n raise TypeError(\n f\"{sort} metric not supported for multiclass problems. See docstring for list of other optimization parameters.\"\n )\n\n \"\"\"\n\n ERROR HANDLING ENDS HERE\n\n \"\"\"\n\n fold = _get_cv_splitter(fold)\n\n groups = _get_groups(groups)\n\n pd.set_option(\"display.max_columns\", 500)\n\n logger.info(\"Preparing display monitor\")\n\n len_mod = (\n len({k: v for k, v in _all_models.items() if v.is_turbo})\n if turbo\n else len(_all_models)\n )\n\n if include:\n len_mod = len(include)\n elif exclude:\n len_mod -= len(exclude)\n\n if not display:\n progress_args = {\"max\": (4 * len_mod) + 4 + len_mod}\n master_display_columns = (\n [\"Model\"] + [v.display_name for k, v in _all_metrics.items()] + [\"TT (Sec)\"]\n )\n timestampStr = datetime.datetime.now().strftime(\"%H:%M:%S\")\n monitor_rows = [\n [\"Initiated\", \". . . . . . . . . . . . . . . . . .\", timestampStr],\n [\"Status\", \". . . . . . . . . . . . . . . . . .\", \"Loading Dependencies\"],\n [\"Estimator\", \". . . . . . . . . . . . . . . . . .\", \"Compiling Library\"],\n ]\n display = Display(\n verbose=verbose,\n html_param=html_param,\n progress_args=progress_args,\n master_display_columns=master_display_columns,\n monitor_rows=monitor_rows,\n )\n\n display.display_progress()\n display.display_monitor()\n display.display_master_display()\n\n greater_is_worse_columns = {\n v.display_name for k, v in _all_metrics.items() if not v.greater_is_better\n }\n greater_is_worse_columns.add(\"TT (Sec)\")\n\n np.random.seed(seed)\n\n display.move_progress()\n\n # defining sort parameter (making Precision equivalent to Prec. )\n\n if not (isinstance(sort, str) and (sort == \"TT\" or sort == \"TT (Sec)\")):\n sort_ascending = not sort.greater_is_better\n sort = sort.display_name\n else:\n sort_ascending = True\n sort = \"TT (Sec)\"\n\n \"\"\"\n MONITOR UPDATE STARTS\n \"\"\"\n\n display.update_monitor(1, \"Loading Estimator\")\n display.display_monitor()\n\n \"\"\"\n MONITOR UPDATE ENDS\n \"\"\"\n\n if include:\n model_library = include\n else:\n if turbo:\n model_library = _all_models\n model_library = [k for k, v in _all_models.items() if v.is_turbo]\n else:\n model_library = list(_all_models.keys())\n if exclude:\n model_library = [x for x in model_library if x not in exclude]\n\n display.move_progress()\n\n # create URI (before loop)\n import secrets\n\n URI = secrets.token_hex(nbytes=4)\n\n master_display = None\n master_display_ = None\n\n total_runtime_start = time.time()\n total_runtime = 0\n over_time_budget = False\n if budget_time and budget_time > 0:\n logger.info(f\"Time budget is {budget_time} minutes\")\n\n for i, model in enumerate(model_library):\n\n model_id = (\n model\n if (\n isinstance(model, str)\n and all(isinstance(m, str) for m in model_library)\n )\n else str(i)\n )\n model_name = _get_model_name(model)\n\n if isinstance(model, str):\n logger.info(f\"Initializing {model_name}\")\n else:\n logger.info(f\"Initializing custom model {model_name}\")\n\n # run_time\n runtime_start = time.time()\n total_runtime += (runtime_start - total_runtime_start) / 60\n logger.info(f\"Total runtime is {total_runtime} minutes\")\n over_time_budget = (\n budget_time and budget_time > 0 and total_runtime > budget_time\n )\n if over_time_budget:\n logger.info(\n f\"Total runtime {total_runtime} is over time budget by {total_runtime - budget_time}, breaking loop\"\n )\n break\n total_runtime_start = runtime_start\n\n display.move_progress()\n\n \"\"\"\n MONITOR UPDATE STARTS\n \"\"\"\n\n display.update_monitor(2, model_name)\n display.display_monitor()\n\n \"\"\"\n MONITOR UPDATE ENDS\n \"\"\"\n display.replace_master_display(None)\n\n logger.info(\n \"SubProcess create_model() called ==================================\"\n )\n if errors == \"raise\":\n model, model_fit_time = create_model_supervised(\n estimator=model,\n system=False,\n verbose=False,\n display=display,\n fold=fold,\n round=round,\n cross_validation=cross_validation,\n fit_kwargs=fit_kwargs,\n groups=groups,\n refit=False,\n )\n model_results = pull(pop=True)\n else:\n try:\n model, model_fit_time = create_model_supervised(\n estimator=model,\n system=False,\n verbose=False,\n display=display,\n fold=fold,\n round=round,\n cross_validation=cross_validation,\n fit_kwargs=fit_kwargs,\n groups=groups,\n refit=False,\n )\n model_results = pull(pop=True)\n assert np.sum(model_results.iloc[0]) != 0.0\n except:\n logger.warning(\n f\"create_model() for {model} raised an exception or returned all 0.0, trying without fit_kwargs:\"\n )\n logger.warning(traceback.format_exc())\n try:\n model, model_fit_time = create_model_supervised(\n estimator=model,\n system=False,\n verbose=False,\n display=display,\n fold=fold,\n round=round,\n cross_validation=cross_validation,\n groups=groups,\n refit=False,\n )\n model_results = pull(pop=True)\n except:\n logger.error(f\"create_model() for {model} raised an exception:\")\n logger.error(traceback.format_exc())\n continue\n logger.info(\"SubProcess create_model() end ==================================\")\n\n if model is None:\n over_time_budget = True\n logger.info(f\"Time budged exceeded in create_model(), breaking loop\")\n break\n\n runtime_end = time.time()\n runtime = np.array(runtime_end - runtime_start).round(2)\n\n logger.info(\"Creating metrics dataframe\")\n if cross_validation:\n compare_models_ = pd.DataFrame(model_results.loc[\"Mean\"]).T\n else:\n compare_models_ = pd.DataFrame(model_results.iloc[0]).T\n compare_models_.insert(len(compare_models_.columns), \"TT (Sec)\", model_fit_time)\n compare_models_.insert(0, \"Model\", model_name)\n compare_models_.insert(0, \"Object\", [model])\n compare_models_.insert(0, \"runtime\", runtime)\n compare_models_.index = [model_id]\n if master_display is None:\n master_display = compare_models_\n else:\n master_display = pd.concat(\n [master_display, compare_models_], ignore_index=False\n )\n master_display = master_display.round(round)\n master_display = master_display.sort_values(by=sort, ascending=sort_ascending)\n\n master_display_ = master_display.drop(\n [\"Object\", \"runtime\"], axis=1, errors=\"ignore\"\n ).style.set_precision(round)\n master_display_ = master_display_.set_properties(**{\"text-align\": \"left\"})\n master_display_ = master_display_.set_table_styles(\n [dict(selector=\"th\", props=[(\"text-align\", \"left\")])]\n )\n\n display.replace_master_display(master_display_)\n\n display.display_master_display()\n\n display.move_progress()\n\n def highlight_max(s):\n to_highlight = s == s.max()\n return [\"background-color: yellow\" if v else \"\" for v in to_highlight]\n\n def highlight_min(s):\n to_highlight = s == s.min()\n return [\"background-color: yellow\" if v else \"\" for v in to_highlight]\n\n def highlight_cols(s):\n color = \"lightgrey\"\n return f\"background-color: {color}\"\n\n if master_display_ is not None:\n compare_models_ = (\n master_display_.apply(\n highlight_max,\n subset=[\n x\n for x in master_display_.columns[1:]\n if x not in greater_is_worse_columns\n ],\n )\n .apply(\n highlight_min,\n subset=[\n x\n for x in master_display_.columns[1:]\n if x in greater_is_worse_columns\n ],\n )\n .applymap(highlight_cols, subset=[\"TT (Sec)\"])\n )\n else:\n compare_models_ = pd.DataFrame().style\n\n display.update_monitor(1, \"Compiling Final Models\")\n display.display_monitor()\n\n display.move_progress()\n\n sorted_models = []\n\n if master_display is not None:\n if n_select < 0:\n n_select_range = range(len(master_display) - n_select, len(master_display))\n else:\n n_select_range = range(0, n_select)\n\n for index, row in enumerate(master_display.iterrows()):\n loc, row = row\n model = row[\"Object\"]\n\n results = row.to_frame().T.drop(\n [\"Object\", \"Model\", \"runtime\", \"TT (Sec)\"], errors=\"ignore\", axis=1\n )\n\n avgs_dict_log = {k: v for k, v in results.iloc[0].items()}\n\n full_logging = False\n\n if index in n_select_range:\n display.update_monitor(2, _get_model_name(model))\n display.display_monitor()\n model, model_fit_time = create_model_supervised(\n estimator=model,\n system=False,\n verbose=False,\n fold=fold,\n round=round,\n cross_validation=False,\n predict=False,\n fit_kwargs=fit_kwargs,\n groups=groups,\n )\n sorted_models.append(model)\n full_logging = True\n\n if logging_param and cross_validation:\n\n try:\n _mlflow_log_model(\n model=model,\n model_results=results,\n score_dict=avgs_dict_log,\n source=\"compare_models\",\n runtime=row[\"runtime\"],\n model_fit_time=row[\"TT (Sec)\"],\n _prep_pipe=prep_pipe,\n log_plots=log_plots_param if full_logging else False,\n log_holdout=full_logging,\n URI=URI,\n display=display,\n )\n except:\n logger.error(\n f\"_mlflow_log_model() for {model} raised an exception:\"\n )\n logger.error(traceback.format_exc())\n\n if len(sorted_models) == 1:\n sorted_models = sorted_models[0]\n\n display.display(compare_models_, clear=True)\n\n pd.reset_option(\"display.max_columns\")\n\n # store in display container\n display_container.append(compare_models_.data)\n\n logger.info(f\"create_model_container: {len(create_model_container)}\")\n logger.info(f\"master_model_container: {len(master_model_container)}\")\n logger.info(f\"display_container: {len(display_container)}\")\n\n logger.info(str(sorted_models))\n logger.info(\n \"compare_models() succesfully completed......................................\"\n )\n\n return sorted_models\n\n\ndef create_model_unsupervised(\n estimator,\n num_clusters: int = 4,\n fraction: float = 0.05,\n ground_truth: Optional[str] = None,\n round: int = 4,\n fit_kwargs: Optional[dict] = None,\n verbose: bool = True,\n system: bool = True,\n raise_num_clusters: bool = False,\n X_data: Optional[pd.DataFrame] = None, # added in pycaret==2.2.0\n display: Optional[Display] = None, # added in pycaret==2.2.0\n **kwargs,\n) -> Any:\n\n \"\"\"\n This is an internal version of the create_model function.\n\n This function creates a model and scores it using Cross Validation.\n The output prints a score grid that shows Accuracy, AUC, Recall, Precision,\n F1, Kappa and MCC by fold (default = 10 Fold).\n\n This function returns a trained model object.\n\n setup() function must be called before using create_model()\n\n Example\n -------\n >>> from pycaret.datasets import get_data\n >>> juice = get_data('juice')\n >>> experiment_name = setup(data = juice, target = 'Purchase')\n >>> lr = create_model('lr')\n\n This will create a trained Logistic Regression model.\n\n Parameters\n ----------\n model : string / object, default = None\n Enter ID of the models available in model library or pass an untrained model\n object consistent with fit / predict API to train and evaluate model. List of\n models available in model library (ID - Model):\n\n * 'kmeans' - K-Means Clustering\n * 'ap' - Affinity Propagation\n * 'meanshift' - Mean shift Clustering\n * 'sc' - Spectral Clustering\n * 'hclust' - Agglomerative Clustering\n * 'dbscan' - Density-Based Spatial Clustering\n * 'optics' - OPTICS Clustering\n * 'birch' - Birch Clustering\n * 'kmodes' - K-Modes Clustering\n\n num_clusters: int, default = 4\n Number of clusters to be generated with the dataset.\n\n ground_truth: string, default = None\n When ground_truth is provided, Homogeneity Score, Rand Index, and\n Completeness Score is evaluated and printer along with other metrics.\n\n round: integer, default = 4\n Number of decimal places the metrics in the score grid will be rounded to.\n\n fit_kwargs: dict, default = {} (empty dict)\n Dictionary of arguments passed to the fit method of the model.\n\n verbose: bool, default = True\n Score grid is not printed when verbose is set to False.\n\n system: bool, default = True\n Must remain True all times. Only to be changed by internal functions.\n If False, method will return a tuple of model and the model fit time.\n\n **kwargs:\n Additional keyword arguments to pass to the estimator.\n\n Returns\n -------\n score_grid\n A table containing the Silhouette, Calinski-Harabasz,\n Davies-Bouldin, Homogeneity Score, Rand Index, and\n Completeness Score. Last 3 are only evaluated when\n ground_truth param is provided.\n\n model\n trained model object\n\n Warnings\n --------\n - num_clusters not required for Affinity Propagation ('ap'), Mean shift\n clustering ('meanshift'), Density-Based Spatial Clustering ('dbscan')\n and OPTICS Clustering ('optics'). num_clusters param for these models\n are automatically determined.\n\n - When fit doesn't converge in Affinity Propagation ('ap') model, all\n datapoints are labelled as -1.\n\n - Noisy samples are given the label -1, when using Density-Based Spatial\n ('dbscan') or OPTICS Clustering ('optics').\n\n - OPTICS ('optics') clustering may take longer training times on large\n datasets.\n\n \"\"\"\n\n function_params_str = \", \".join(\n [f\"{k}={v}\" for k, v in locals().items() if k not in (\"X_data\")]\n )\n\n logger = get_logger()\n\n logger.info(\"Initializing create_model()\")\n logger.info(f\"create_model({function_params_str})\")\n\n logger.info(\"Checking exceptions\")\n\n # run_time\n runtime_start = time.time()\n\n available_estimators = set(_all_models_internal.keys())\n\n if not fit_kwargs:\n fit_kwargs = {}\n\n # only raise exception of estimator is of type string.\n if isinstance(estimator, str):\n if estimator not in available_estimators:\n raise ValueError(\n f\"Estimator {estimator} not available. Please see docstring for list of available estimators.\"\n )\n elif not hasattr(estimator, \"fit\"):\n raise ValueError(\n f\"Estimator {estimator} does not have the required fit() method.\"\n )\n\n # checking round parameter\n if type(round) is not int:\n raise TypeError(\"Round parameter only accepts integer value.\")\n\n # checking verbose parameter\n if type(verbose) is not bool:\n raise TypeError(\"Verbose parameter can only take argument as True or False.\")\n\n # checking system parameter\n if type(system) is not bool:\n raise TypeError(\"System parameter can only take argument as True or False.\")\n\n # checking fraction type:\n if fraction <= 0 or fraction >= 1:\n raise TypeError(\n \"Fraction parameter can only take value as float between 0 to 1.\"\n )\n\n # checking num_clusters type:\n if num_clusters <= 1:\n raise TypeError(\n \"num_clusters parameter can only take value integer value greater than 1.\"\n )\n\n # check ground truth exist in data_\n if ground_truth is not None:\n if ground_truth not in data_before_preprocess.columns:\n raise ValueError(\n f\"ground_truth {ground_truth} doesn't exist in the dataset.\"\n )\n\n \"\"\"\n\n ERROR HANDLING ENDS HERE\n\n \"\"\"\n\n if not display:\n progress_args = {\"max\": 3}\n master_display_columns = [v.display_name for k, v in _all_metrics.items()]\n timestampStr = datetime.datetime.now().strftime(\"%H:%M:%S\")\n monitor_rows = [\n [\"Initiated\", \". . . . . . . . . . . . . . . . . .\", timestampStr],\n [\"Status\", \". . . . . . . . . . . . . . . . . .\", \"Loading Dependencies\"],\n [\"Estimator\", \". . . . . . . . . . . . . . . . . .\", \"Compiling Library\"],\n ]\n display = Display(\n verbose=verbose,\n html_param=html_param,\n progress_args=progress_args,\n master_display_columns=master_display_columns,\n monitor_rows=monitor_rows,\n )\n display.display_progress()\n display.display_monitor()\n display.display_master_display()\n\n logger.info(\"Importing libraries\")\n\n # general dependencies\n\n np.random.seed(seed)\n\n # Storing X_train and y_train in data_X and data_y parameter\n data_X = X if X_data is None else X_data\n\n \"\"\"\n MONITOR UPDATE STARTS\n \"\"\"\n display.update_monitor(1, \"Selecting Estimator\")\n display.display_monitor()\n \"\"\"\n MONITOR UPDATE ENDS\n \"\"\"\n\n logger.info(\"Importing untrained model\")\n\n is_cblof = False\n\n if isinstance(estimator, str) and estimator in available_estimators:\n is_cblof = estimator == \"cluster\"\n model_definition = _all_models_internal[estimator]\n model_args = model_definition.args\n model_args = {**model_args, **kwargs}\n model = model_definition.class_def(**model_args)\n full_name = model_definition.name\n else:\n logger.info(\"Declaring custom model\")\n\n model = clone(estimator)\n model.set_params(**kwargs)\n\n full_name = _get_model_name(model)\n\n display.update_monitor(2, full_name)\n display.display_monitor()\n\n if _ml_usecase == MLUsecase.CLUSTERING:\n if raise_num_clusters:\n model.set_params(n_clusters=num_clusters)\n else:\n try:\n model.set_params(n_clusters=num_clusters)\n except:\n pass\n else:\n model.set_params(contamination=fraction)\n\n # workaround for an issue with set_params in cuML\n try:\n model = clone(model)\n except:\n logger.warning(\n f\"create_model_unsupervised() for {model} raised an exception when cloning:\"\n )\n logger.warning(traceback.format_exc())\n\n logger.info(f\"{full_name} Imported succesfully\")\n\n display.move_progress()\n\n \"\"\"\n MONITOR UPDATE STARTS\n \"\"\"\n if _ml_usecase == MLUsecase.CLUSTERING:\n display.update_monitor(1, f\"Fitting {num_clusters} Clusters\")\n else:\n display.update_monitor(1, f\"Fitting {fraction} Fraction\")\n display.display_monitor()\n \"\"\"\n MONITOR UPDATE ENDS\n \"\"\"\n\n with estimator_pipeline(_internal_pipeline, model) as pipeline_with_model:\n fit_kwargs = _get_pipeline_fit_kwargs(pipeline_with_model, fit_kwargs)\n\n logger.info(\"Fitting Model\")\n model_fit_start = time.time()\n with io.capture_output():\n if is_cblof and \"n_clusters\" not in kwargs:\n try:\n pipeline_with_model.fit(data_X, **fit_kwargs)\n except:\n try:\n pipeline_with_model.set_params(actual_estimator__n_clusters=12)\n model_fit_start = time.time()\n pipeline_with_model.fit(data_X, **fit_kwargs)\n except:\n raise RuntimeError(\n \"Could not form valid cluster separation. Try a different dataset or model.\"\n )\n else:\n pipeline_with_model.fit(data_X, **fit_kwargs)\n model_fit_end = time.time()\n\n model_fit_time = np.array(model_fit_end - model_fit_start).round(2)\n\n display.move_progress()\n\n if ground_truth is not None:\n\n logger.info(f\"ground_truth parameter set to {ground_truth}\")\n\n gt = np.array(data_before_preprocess[ground_truth])\n else:\n gt = None\n\n if _ml_usecase == MLUsecase.CLUSTERING:\n metrics = _calculate_metrics_unsupervised(X, model.labels_, ground_truth=gt)\n else:\n metrics = {}\n\n logger.info(str(model))\n logger.info(\n \"create_models() succesfully completed......................................\"\n )\n\n runtime = time.time() - runtime_start\n\n # mlflow logging\n if logging_param and system:\n\n metrics_log = {k: v for k, v in metrics.items()}\n\n try:\n _mlflow_log_model(\n model=model,\n model_results=None,\n score_dict=metrics_log,\n source=\"create_model\",\n runtime=runtime,\n model_fit_time=model_fit_time,\n _prep_pipe=prep_pipe,\n log_plots=log_plots_param,\n display=display,\n )\n except:\n logger.error(f\"_mlflow_log_model() for {model} raised an exception:\")\n logger.error(traceback.format_exc())\n\n display.move_progress()\n\n logger.info(\"Uploading results into container\")\n\n model_results = pd.DataFrame(metrics, index=[0])\n model_results = model_results.round(round)\n\n # storing results in create_model_container\n create_model_container.append(model_results)\n display_container.append(model_results)\n\n # storing results in master_model_container\n logger.info(\"Uploading model into container now\")\n master_model_container.append(model)\n\n if _ml_usecase == MLUsecase.CLUSTERING:\n display.display(\n model_results, clear=system, override=False if not system else None\n )\n elif system:\n display.clear_output()\n\n logger.info(f\"create_model_container: {len(create_model_container)}\")\n logger.info(f\"master_model_container: {len(master_model_container)}\")\n logger.info(f\"display_container: {len(display_container)}\")\n\n logger.info(str(model))\n logger.info(\n \"create_model() succesfully completed......................................\"\n )\n gc.collect()\n\n if not system:\n return (model, model_fit_time)\n\n return model\n\n\ndef create_model_supervised(\n estimator,\n fold: Optional[Union[int, Any]] = None,\n round: int = 4,\n cross_validation: bool = True,\n predict: bool = True,\n fit_kwargs: Optional[dict] = None,\n groups: Optional[Union[str, Any]] = None,\n refit: bool = True,\n verbose: bool = True,\n system: bool = True,\n X_train_data: Optional[pd.DataFrame] = None, # added in pycaret==2.2.0\n y_train_data: Optional[pd.DataFrame] = None, # added in pycaret==2.2.0\n metrics=None,\n display: Optional[Display] = None, # added in pycaret==2.2.0\n **kwargs,\n) -> Any:\n\n \"\"\"\n This is an internal version of the create_model function.\n\n This function creates a model and scores it using Cross Validation.\n The output prints a score grid that shows Accuracy, AUC, Recall, Precision,\n F1, Kappa and MCC by fold (default = 10 Fold).\n\n This function returns a trained model object.\n\n setup() function must be called before using create_model()\n\n Example\n -------\n >>> from pycaret.datasets import get_data\n >>> juice = get_data('juice')\n >>> experiment_name = setup(data = juice, target = 'Purchase')\n >>> lr = create_model('lr')\n\n This will create a trained Logistic Regression model.\n\n Parameters\n ----------\n estimator : str / object, default = None\n Enter ID of the estimators available in model library or pass an untrained model\n object consistent with fit / predict API to train and evaluate model. All\n estimators support binary or multiclass problem. List of estimators in model\n library (ID - Name):\n\n * 'lr' - Logistic Regression\n * 'knn' - K Nearest Neighbour\n * 'nb' - Naive Bayes\n * 'dt' - Decision Tree Classifier\n * 'svm' - SVM - Linear Kernel\n * 'rbfsvm' - SVM - Radial Kernel\n * 'gpc' - Gaussian Process Classifier\n * 'mlp' - Multi Level Perceptron\n * 'ridge' - Ridge Classifier\n * 'rf' - Random Forest Classifier\n * 'qda' - Quadratic Discriminant Analysis\n * 'ada' - Ada Boost Classifier\n * 'gbc' - Gradient Boosting Classifier\n * 'lda' - Linear Discriminant Analysis\n * 'et' - Extra Trees Classifier\n * 'xgboost' - Extreme Gradient Boosting\n * 'lightgbm' - Light Gradient Boosting\n * 'catboost' - CatBoost Classifier\n\n fold: integer or scikit-learn compatible CV generator, default = None\n Controls cross-validation. If None, will use the CV generator defined in setup().\n If integer, will use KFold CV with that many folds.\n When cross_validation is False, this parameter is ignored.\n\n round: integer, default = 4\n Number of decimal places the metrics in the score grid will be rounded to.\n\n cross_validation: bool, default = True\n When cross_validation set to False fold parameter is ignored and model is trained\n on entire training dataset.\n\n predict: bool, default = True\n Whether to predict model on holdout if cross_validation == False.\n\n fit_kwargs: dict, default = {} (empty dict)\n Dictionary of arguments passed to the fit method of the model.\n\n groups: str or array-like, with shape (n_samples,), default = None\n Optional Group labels for the samples used while splitting the dataset into train/test set.\n If string is passed, will use the data column with that name as the groups.\n Only used if a group based cross-validation generator is used (eg. GroupKFold).\n If None, will use the value set in fold_groups param in setup().\n\n refit: bool, default = True\n Whether to refit the model on the entire dataset after CV. Ignored if cross_validation == False.\n\n verbose: bool, default = True\n Score grid is not printed when verbose is set to False.\n\n system: bool, default = True\n Must remain True all times. Only to be changed by internal functions.\n If False, method will return a tuple of model and the model fit time.\n\n X_train_data: pandas.DataFrame, default = None\n If not None, will use this dataframe as training features.\n Intended to be only changed by internal functions.\n\n y_train_data: pandas.DataFrame, default = None\n If not None, will use this dataframe as training target.\n Intended to be only changed by internal functions.\n\n **kwargs:\n Additional keyword arguments to pass to the estimator.\n\n Returns\n -------\n score_grid\n A table containing the scores of the model across the kfolds.\n Scoring metrics used are Accuracy, AUC, Recall, Precision, F1,\n Kappa and MCC. Mean and standard deviation of the scores across\n the folds are highlighted in yellow.\n\n model\n trained model object\n\n Warnings\n --------\n - 'svm' and 'ridge' doesn't support predict_proba method. As such, AUC will be\n returned as zero (0.0)\n\n - If target variable is multiclass (more than 2 classes), AUC will be returned\n as zero (0.0)\n\n - 'rbfsvm' and 'gpc' uses non-linear kernel and hence the fit time complexity is\n more than quadratic. These estimators are hard to scale on datasets with more\n than 10,000 samples.\n\n - If cross_validation param is set to False, model will not be logged with MLFlow.\n\n \"\"\"\n\n function_params_str = \", \".join(\n [\n f\"{k}={v}\"\n for k, v in locals().items()\n if k not in (\"X_train_data\", \"y_train_data\")\n ]\n )\n\n logger = get_logger()\n\n logger.info(\"Initializing create_model()\")\n logger.info(f\"create_model({function_params_str})\")\n\n logger.info(\"Checking exceptions\")\n\n # run_time\n runtime_start = time.time()\n\n available_estimators = set(_all_models_internal.keys())\n\n if not fit_kwargs:\n fit_kwargs = {}\n\n # only raise exception of estimator is of type string.\n if isinstance(estimator, str):\n if estimator not in available_estimators:\n raise ValueError(\n f\"Estimator {estimator} not available. Please see docstring for list of available estimators.\"\n )\n elif not hasattr(estimator, \"fit\"):\n raise ValueError(\n f\"Estimator {estimator} does not have the required fit() method.\"\n )\n\n # checking fold parameter\n if fold is not None and not (type(fold) is int or is_sklearn_cv_generator(fold)):\n raise TypeError(\n \"fold parameter must be either None, an integer or a scikit-learn compatible CV generator object.\"\n )\n\n # checking round parameter\n if type(round) is not int:\n raise TypeError(\"Round parameter only accepts integer value.\")\n\n # checking verbose parameter\n if type(verbose) is not bool:\n raise TypeError(\"Verbose parameter can only take argument as True or False.\")\n\n # checking system parameter\n if type(system) is not bool:\n raise TypeError(\"System parameter can only take argument as True or False.\")\n\n # checking cross_validation parameter\n if type(cross_validation) is not bool:\n raise TypeError(\n \"cross_validation parameter can only take argument as True or False.\"\n )\n\n \"\"\"\n\n ERROR HANDLING ENDS HERE\n\n \"\"\"\n\n groups = _get_groups(groups, data=X_train_data)\n\n if not display:\n progress_args = {\"max\": 4}\n master_display_columns = [v.display_name for k, v in _all_metrics.items()]\n timestampStr = datetime.datetime.now().strftime(\"%H:%M:%S\")\n monitor_rows = [\n [\"Initiated\", \". . . . . . . . . . . . . . . . . .\", timestampStr],\n [\"Status\", \". . . . . . . . . . . . . . . . . .\", \"Loading Dependencies\"],\n [\"Estimator\", \". . . . . . . . . . . . . . . . . .\", \"Compiling Library\"],\n ]\n display = Display(\n verbose=verbose,\n html_param=html_param,\n progress_args=progress_args,\n master_display_columns=master_display_columns,\n monitor_rows=monitor_rows,\n )\n display.display_progress()\n display.display_monitor()\n display.display_master_display()\n\n logger.info(\"Importing libraries\")\n\n # general dependencies\n\n np.random.seed(seed)\n\n logger.info(\"Copying training dataset\")\n\n # Storing X_train and y_train in data_X and data_y parameter\n data_X = X_train.copy() if X_train_data is None else X_train_data.copy()\n data_y = y_train.copy() if y_train_data is None else y_train_data.copy()\n\n # reset index\n data_X.reset_index(drop=True, inplace=True)\n data_y.reset_index(drop=True, inplace=True)\n\n if metrics is None:\n metrics = _all_metrics\n\n display.move_progress()\n\n logger.info(\"Defining folds\")\n\n # cross validation setup starts here\n cv = _get_cv_splitter(fold)\n\n logger.info(\"Declaring metric variables\")\n\n \"\"\"\n MONITOR UPDATE STARTS\n \"\"\"\n display.update_monitor(1, \"Selecting Estimator\")\n display.display_monitor()\n \"\"\"\n MONITOR UPDATE ENDS\n \"\"\"\n\n logger.info(\"Importing untrained model\")\n\n if isinstance(estimator, str) and estimator in available_estimators:\n model_definition = _all_models_internal[estimator]\n model_args = model_definition.args\n model_args = {**model_args, **kwargs}\n model = model_definition.class_def(**model_args)\n full_name = model_definition.name\n else:\n logger.info(\"Declaring custom model\")\n\n model = clone(estimator)\n model.set_params(**kwargs)\n\n full_name = _get_model_name(model)\n\n # workaround for an issue with set_params in cuML\n model = clone(model)\n\n display.update_monitor(2, full_name)\n display.display_monitor()\n\n if transform_target_param and not isinstance(model, TransformedTargetRegressor):\n model = PowerTransformedTargetRegressor(\n regressor=model, power_transformer_method=transform_target_method_param\n )\n\n logger.info(f\"{full_name} Imported succesfully\")\n\n display.move_progress()\n\n \"\"\"\n MONITOR UPDATE STARTS\n \"\"\"\n if not cross_validation:\n display.update_monitor(1, f\"Fitting {str(full_name)}\")\n else:\n display.update_monitor(1, \"Initializing CV\")\n\n display.display_monitor()\n \"\"\"\n MONITOR UPDATE ENDS\n \"\"\"\n\n if not cross_validation:\n\n with estimator_pipeline(_internal_pipeline, model) as pipeline_with_model:\n fit_kwargs = _get_pipeline_fit_kwargs(pipeline_with_model, fit_kwargs)\n logger.info(\"Cross validation set to False\")\n\n logger.info(\"Fitting Model\")\n model_fit_start = time.time()\n with io.capture_output():\n pipeline_with_model.fit(data_X, data_y, **fit_kwargs)\n model_fit_end = time.time()\n\n model_fit_time = np.array(model_fit_end - model_fit_start).round(2)\n\n display.move_progress()\n\n if predict:\n predict_model(pipeline_with_model, verbose=False)\n model_results = pull(pop=True).drop(\"Model\", axis=1)\n\n display_container.append(model_results)\n\n display.display(\n model_results, clear=system, override=False if not system else None\n )\n\n logger.info(f\"display_container: {len(display_container)}\")\n\n display.move_progress()\n\n logger.info(str(model))\n logger.info(\n \"create_models() succesfully completed......................................\"\n )\n\n gc.collect()\n\n if not system:\n return (model, model_fit_time)\n return model\n\n \"\"\"\n MONITOR UPDATE STARTS\n \"\"\"\n display.update_monitor(\n 1, f\"Fitting {_get_cv_n_folds(fold, data_X, y=data_y, groups=groups)} Folds\"\n )\n display.display_monitor()\n \"\"\"\n MONITOR UPDATE ENDS\n \"\"\"\n\n from sklearn.model_selection import cross_validate\n\n metrics_dict = dict([(k, v.scorer) for k, v in metrics.items()])\n\n logger.info(\"Starting cross validation\")\n\n n_jobs = _gpu_n_jobs_param\n from sklearn.gaussian_process import (\n GaussianProcessClassifier,\n GaussianProcessRegressor,\n )\n\n # special case to prevent running out of memory\n if isinstance(model, (GaussianProcessClassifier, GaussianProcessRegressor)):\n n_jobs = 1\n\n with estimator_pipeline(_internal_pipeline, model) as pipeline_with_model:\n fit_kwargs = _get_pipeline_fit_kwargs(pipeline_with_model, fit_kwargs)\n logger.info(f\"Cross validating with {cv}, n_jobs={n_jobs}\")\n\n model_fit_start = time.time()\n scores = cross_validate(\n pipeline_with_model,\n data_X,\n data_y,\n cv=cv,\n groups=groups,\n scoring=metrics_dict,\n fit_params=fit_kwargs,\n n_jobs=n_jobs,\n return_train_score=False,\n error_score=0,\n )\n model_fit_end = time.time()\n model_fit_time = np.array(model_fit_end - model_fit_start).round(2)\n\n score_dict = {\n v.display_name: scores[f\"test_{k}\"] * (1 if v.greater_is_better else -1)\n for k, v in metrics.items()\n }\n\n logger.info(\"Calculating mean and std\")\n\n avgs_dict = {k: [np.mean(v), np.std(v)] for k, v in score_dict.items()}\n\n display.move_progress()\n\n logger.info(\"Creating metrics dataframe\")\n\n model_results = pd.DataFrame(score_dict)\n model_avgs = pd.DataFrame(avgs_dict, index=[\"Mean\", \"SD\"],)\n\n model_results = model_results.append(model_avgs)\n model_results = model_results.round(round)\n\n # yellow the mean\n model_results = color_df(model_results, \"yellow\", [\"Mean\"], axis=1)\n model_results = model_results.set_precision(round)\n\n if refit:\n # refitting the model on complete X_train, y_train\n display.update_monitor(1, \"Finalizing Model\")\n display.display_monitor()\n model_fit_start = time.time()\n logger.info(\"Finalizing model\")\n with io.capture_output():\n pipeline_with_model.fit(data_X, data_y, **fit_kwargs)\n model_fit_end = time.time()\n\n model_fit_time = np.array(model_fit_end - model_fit_start).round(2)\n else:\n model_fit_time /= _get_cv_n_folds(cv, data_X, y=data_y, groups=groups)\n\n # end runtime\n runtime_end = time.time()\n runtime = np.array(runtime_end - runtime_start).round(2)\n\n # mlflow logging\n if logging_param and system and refit:\n\n avgs_dict_log = avgs_dict.copy()\n avgs_dict_log = {k: v[0] for k, v in avgs_dict_log.items()}\n\n try:\n _mlflow_log_model(\n model=model,\n model_results=model_results,\n score_dict=avgs_dict_log,\n source=\"create_model\",\n runtime=runtime,\n model_fit_time=model_fit_time,\n _prep_pipe=prep_pipe,\n log_plots=log_plots_param,\n display=display,\n )\n except:\n logger.error(f\"_mlflow_log_model() for {model} raised an exception:\")\n logger.error(traceback.format_exc())\n\n display.move_progress()\n\n logger.info(\"Uploading results into container\")\n\n # storing results in create_model_container\n create_model_container.append(model_results.data)\n display_container.append(model_results.data)\n\n # storing results in master_model_container\n logger.info(\"Uploading model into container now\")\n master_model_container.append(model)\n\n display.display(model_results, clear=system, override=False if not system else None)\n\n logger.info(f\"create_model_container: {len(create_model_container)}\")\n logger.info(f\"master_model_container: {len(master_model_container)}\")\n logger.info(f\"display_container: {len(display_container)}\")\n\n logger.info(str(model))\n logger.info(\n \"create_model() succesfully completed......................................\"\n )\n gc.collect()\n\n if not system:\n return (model, model_fit_time)\n\n return model\n\n\ndef tune_model_unsupervised(\n model,\n supervised_target: str,\n supervised_type: Optional[str] = None,\n supervised_estimator: Union[str, Any] = \"lr\",\n optimize: Optional[str] = None,\n custom_grid: Optional[List[int]] = None,\n fold: Optional[Union[int, Any]] = None,\n groups: Optional[Union[str, Any]] = None,\n ground_truth: Optional[str] = None,\n method: str = \"drop\",\n fit_kwargs: Optional[dict] = None,\n round: int = 4,\n verbose: bool = True,\n display: Optional[Display] = None,\n **kwargs,\n):\n\n function_params_str = \", \".join([f\"{k}={v}\" for k, v in locals().items()])\n\n logger = get_logger()\n\n logger.info(\"Initializing tune_model()\")\n logger.info(f\"tune_model({function_params_str})\")\n\n logger.info(\"Checking exceptions\")\n\n # run_time\n runtime_start = time.time()\n\n if not fit_kwargs:\n fit_kwargs = {}\n\n if supervised_target not in data_before_preprocess.columns:\n raise ValueError(\n f\"{supervised_target} is not present as a column in the dataset.\"\n )\n\n warnings.filterwarnings(\"ignore\")\n\n np.random.seed(seed)\n\n cols_to_drop = [x for x in X.columns if x.startswith(supervised_target)]\n data_X = X.drop(cols_to_drop, axis=1)\n data_y = data_before_preprocess[[supervised_target]]\n if data_y.dtypes[0] not in [int, float, bool]:\n data_y[supervised_target] = LabelEncoder().fit_transform(\n data_y[supervised_target]\n )\n data_y = data_y[supervised_target]\n\n temp_globals = globals()\n temp_globals[\"y_train\"] = data_y\n\n if supervised_type is None:\n supervised_type, _ = infer_ml_usecase(data_y)\n logger.info(f\"supervised_type inferred as {supervised_type}\")\n\n if supervised_type == \"classification\":\n metrics = pycaret.containers.metrics.classification.get_all_metric_containers(\n temp_globals, raise_errors=True\n )\n available_estimators = pycaret.containers.models.classification.get_all_model_containers(\n temp_globals, raise_errors=True\n )\n ml_usecase = MLUsecase.CLASSIFICATION\n elif supervised_type == \"regression\":\n metrics = pycaret.containers.metrics.regression.get_all_metric_containers(\n temp_globals, raise_errors=True\n )\n available_estimators = pycaret.containers.models.regression.get_all_model_containers(\n temp_globals, raise_errors=True\n )\n ml_usecase = MLUsecase.REGRESSION\n else:\n raise ValueError(\n f\"supervised_type param must be either 'classification' or 'regression'.\"\n )\n\n fold = _get_cv_splitter(fold, ml_usecase)\n\n if isinstance(supervised_estimator, str):\n if supervised_estimator in available_estimators:\n estimator_definition = available_estimators[supervised_estimator]\n estimator_args = estimator_definition.args\n estimator_args = {**estimator_args}\n supervised_estimator = estimator_definition.class_def(**estimator_args)\n else:\n raise ValueError(f\"Unknown supervised_estimator {supervised_estimator}.\")\n else:\n logger.info(\"Declaring custom model\")\n\n supervised_estimator = clone(supervised_estimator)\n\n supervised_estimator_name = _get_model_name(\n supervised_estimator, models=available_estimators\n )\n\n if optimize is None:\n optimize = \"Accuracy\" if supervised_type == \"classification\" else \"R2\"\n optimize = _get_metric(optimize, metrics=metrics)\n if optimize is None:\n raise ValueError(\n \"Optimize method not supported. See docstring for list of available parameters.\"\n )\n\n if custom_grid is not None and not isinstance(custom_grid, list):\n raise ValueError(f\"custom_grid param must be a list.\")\n\n # checking round parameter\n if type(round) is not int:\n raise TypeError(\"Round parameter only accepts integer value.\")\n\n # checking verbose parameter\n if type(verbose) is not bool:\n raise TypeError(\"Verbose parameter can only take argument as True or False.\")\n\n if custom_grid is None:\n if _ml_usecase == MLUsecase.CLUSTERING:\n param_grid = [2, 4, 5, 6, 8, 10, 14, 18, 25, 30, 40]\n else:\n param_grid = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10]\n else:\n param_grid = custom_grid\n try:\n param_grid.remove(0)\n except ValueError:\n pass\n param_grid.sort()\n\n if not display:\n progress_args = {\"max\": len(param_grid) * 3 + (len(param_grid) + 1) * 4}\n master_display_columns = None\n timestampStr = datetime.datetime.now().strftime(\"%H:%M:%S\")\n monitor_rows = [\n [\"Initiated\", \". . . . . . . . . . . . . . . . . .\", timestampStr],\n [\"Status\", \". . . . . . . . . . . . . . . . . .\", \"Loading Dependencies\"],\n [\"Estimator\", \". . . . . . . . . . . . . . . . . .\", \"Compiling Library\"],\n ]\n display = Display(\n verbose=verbose,\n html_param=html_param,\n progress_args=progress_args,\n master_display_columns=master_display_columns,\n monitor_rows=monitor_rows,\n )\n\n display.display_progress()\n display.display_monitor()\n display.display_master_display()\n\n unsupervised_models = {}\n unsupervised_models_results = {}\n unsupervised_grids = {0: data_X}\n\n logger.info(\"Fitting unsupervised models\")\n\n for k in param_grid:\n if _ml_usecase == MLUsecase.CLUSTERING:\n try:\n new_model, _ = create_model_unsupervised(\n model,\n num_clusters=k,\n X_data=data_X,\n display=display,\n system=False,\n ground_truth=ground_truth,\n round=round,\n fit_kwargs=fit_kwargs,\n raise_num_clusters=True,\n **kwargs,\n )\n except ValueError:\n raise ValueError(\n f\"Model {model} cannot be used in this function as its number of clusters cannot be set (n_clusters param required).\"\n )\n else:\n new_model, _ = create_model_unsupervised(\n model,\n fraction=k,\n X_data=data_X,\n display=display,\n system=False,\n ground_truth=ground_truth,\n round=round,\n fit_kwargs=fit_kwargs,\n **kwargs,\n )\n unsupervised_models_results[k] = pull(pop=True)\n unsupervised_models[k] = new_model\n unsupervised_grids[k] = (\n assign_model(new_model, verbose=False, transformation=True)\n .reset_index(drop=True)\n .drop(cols_to_drop, axis=1)\n )\n if _ml_usecase == MLUsecase.CLUSTERING:\n unsupervised_grids[k] = pd.get_dummies(\n unsupervised_grids[k], columns=[\"Cluster\"],\n )\n elif method == \"drop\":\n unsupervised_grids[k] = unsupervised_grids[k][\n unsupervised_grids[k][\"Anomaly\"] == 0\n ].drop([\"Anomaly\", \"Anomaly_Score\"], axis=1)\n\n results = {}\n\n logger.info(\"Fitting supervised estimator\")\n\n for k, v in unsupervised_grids.items():\n create_model_supervised(\n supervised_estimator,\n fold=fold,\n display=display,\n system=False,\n X_train_data=v,\n y_train_data=data_y[data_y.index.isin(v.index)],\n metrics=metrics,\n groups=groups,\n round=round,\n refit=False,\n )\n results[k] = pull(pop=True).loc[\"Mean\"]\n display.move_progress()\n\n logger.info(\"Compiling results\")\n\n results = pd.DataFrame(results).T\n\n greater_is_worse_columns = {\n v.display_name for k, v in metrics.items() if not v.greater_is_better\n }\n\n best_model_idx = (\n results.drop(0)\n .sort_values(\n by=optimize.display_name, ascending=optimize in greater_is_worse_columns\n )\n .index[0]\n )\n\n def highlight_max(s):\n to_highlight = s == s.max()\n return [\"background-color: yellow\" if v else \"\" for v in to_highlight]\n\n def highlight_min(s):\n to_highlight = s == s.min()\n return [\"background-color: yellow\" if v else \"\" for v in to_highlight]\n\n results = results.style.apply(\n highlight_max,\n subset=[x for x in results.columns if x not in greater_is_worse_columns],\n ).apply(\n highlight_min,\n subset=[x for x in results.columns if x in greater_is_worse_columns],\n )\n\n # end runtime\n runtime_end = time.time()\n runtime = np.array(runtime_end - runtime_start).round(2)\n\n if _ml_usecase == MLUsecase.CLUSTERING:\n best_model, best_model_fit_time = create_model_unsupervised(\n unsupervised_models[best_model_idx],\n num_clusters=best_model_idx,\n system=False,\n round=round,\n ground_truth=ground_truth,\n fit_kwargs=fit_kwargs,\n display=display,\n **kwargs,\n )\n else:\n best_model, best_model_fit_time = create_model_unsupervised(\n unsupervised_models[best_model_idx],\n fraction=best_model_idx,\n system=False,\n round=round,\n fit_kwargs=fit_kwargs,\n display=display,\n **kwargs,\n )\n best_model_results = pull(pop=True)\n\n if logging_param:\n\n metrics_log = {k: v[0] for k, v in best_model_results.items()}\n\n try:\n _mlflow_log_model(\n model=model,\n model_results=None,\n score_dict=metrics_log,\n source=\"tune_model\",\n runtime=runtime,\n model_fit_time=best_model_fit_time,\n _prep_pipe=prep_pipe,\n log_plots=log_plots_param,\n display=display,\n )\n except:\n logger.error(f\"_mlflow_log_model() for {model} raised an exception:\")\n logger.error(traceback.format_exc())\n\n results = results.set_precision(round)\n display_container.append(results)\n\n display.display(results, clear=True)\n\n if html_param and verbose:\n logger.info(\"Rendering Visual\")\n plot_df = results.data.drop(\n [x for x in results.columns if x != optimize.display_name], axis=1\n )\n\n fig = go.Figure()\n fig.add_trace(\n go.Scatter(\n x=plot_df.index,\n y=plot_df[optimize.display_name],\n mode=\"lines+markers\",\n name=optimize.display_name,\n )\n )\n msg = (\n \"Number of Clusters\"\n if _ml_usecase == MLUsecase.CLUSTERING\n else \"Anomaly Fraction\"\n )\n title = f\"{supervised_estimator_name} Metrics and {msg} by {_get_model_name(best_model)}\"\n fig.update_layout(\n plot_bgcolor=\"rgb(245,245,245)\",\n title={\n \"text\": title,\n \"y\": 0.95,\n \"x\": 0.45,\n \"xanchor\": \"center\",\n \"yanchor\": \"top\",\n },\n xaxis_title=msg,\n yaxis_title=optimize.display_name,\n )\n fig.show()\n logger.info(\"Visual Rendered Successfully\")\n\n logger.info(f\"create_model_container: {len(create_model_container)}\")\n logger.info(f\"master_model_container: {len(master_model_container)}\")\n logger.info(f\"display_container: {len(display_container)}\")\n\n logger.info(str(best_model))\n logger.info(\n \"tune_model() succesfully completed......................................\"\n )\n\n gc.collect()\n\n return best_model\n\n\ndef tune_model_supervised(\n estimator,\n fold: Optional[Union[int, Any]] = None,\n round: int = 4,\n n_iter: int = 10,\n custom_grid: Optional[Union[Dict[str, list], Any]] = None,\n optimize: str = \"Accuracy\",\n custom_scorer=None, # added in pycaret==2.1 - depreciated\n search_library: str = \"scikit-learn\",\n search_algorithm: Optional[str] = None,\n early_stopping: Any = False,\n early_stopping_max_iters: int = 10,\n choose_better: bool = False,\n fit_kwargs: Optional[dict] = None,\n groups: Optional[Union[str, Any]] = None,\n return_tuner: bool = False,\n verbose: bool = True,\n tuner_verbose: Union[int, bool] = True,\n display: Optional[Display] = None,\n **kwargs,\n) -> Any:\n\n \"\"\"\n This function tunes the hyperparameters of a model and scores it using Cross Validation.\n The output prints a score grid that shows Accuracy, AUC, Recall\n Precision, F1, Kappa and MCC by fold (by default = 10 Folds).\n\n This function returns a trained model object.\n\n Example\n -------\n >>> from pycaret.datasets import get_data\n >>> juice = get_data('juice')\n >>> experiment_name = setup(data = juice, target = 'Purchase')\n >>> xgboost = create_model('xgboost')\n >>> tuned_xgboost = tune_model(xgboost)\n\n This will tune the hyperparameters of Extreme Gradient Boosting Classifier.\n\n\n Parameters\n ----------\n estimator : object, default = None\n\n fold: integer or scikit-learn compatible CV generator, default = None\n Controls cross-validation. If None, will use the CV generator defined in setup().\n If integer, will use KFold CV with that many folds.\n When cross_validation is False, this parameter is ignored.\n\n round: integer, default = 4\n Number of decimal places the metrics in the score grid will be rounded to.\n\n n_iter: integer, default = 10\n Number of iterations within the Random Grid Search. For every iteration,\n the model randomly selects one value from the pre-defined grid of\n hyperparameters.\n\n custom_grid: dictionary, default = None\n To use custom hyperparameters for tuning pass a dictionary with parameter name\n and values to be iterated. When set to None it uses pre-defined tuning grid.\n Custom grids must be in a format supported by the chosen search library.\n\n optimize: str, default = 'Accuracy'\n Measure used to select the best model through hyperparameter tuning.\n Can be either a string representing a metric or a custom scorer object\n created using sklearn.make_scorer.\n\n custom_scorer: object, default = None\n Will be eventually depreciated.\n custom_scorer can be passed to tune hyperparameters of the model. It must be\n created using sklearn.make_scorer.\n\n search_library: str, default = 'scikit-learn'\n The search library used to tune hyperparameters.\n Possible values:\n\n - 'scikit-learn' - default, requires no further installation\n - 'scikit-optimize' - scikit-optimize. ``pip install scikit-optimize`` https://scikit-optimize.github.io/stable/\n - 'tune-sklearn' - Ray Tune scikit API. Does not support GPU models.\n ``pip install tune-sklearn ray[tune]`` https://github.com/ray-project/tune-sklearn\n - 'optuna' - Optuna. ``pip install optuna`` https://optuna.org/\n\n search_algorithm: str, default = None\n The search algorithm to be used for finding the best hyperparameters.\n Selection of search algorithms depends on the search_library parameter.\n Some search algorithms require additional libraries to be installed.\n If None, will use search library-specific default algorith.\n 'scikit-learn' possible values:\n\n - 'random' - random grid search (default)\n - 'grid' - grid search\n\n 'scikit-optimize' possible values:\n\n - 'bayesian' - Bayesian search (default)\n\n 'tune-sklearn' possible values:\n\n - 'random' - random grid search (default)\n - 'grid' - grid search\n - 'bayesian' - Bayesian search using scikit-optimize\n ``pip install scikit-optimize``\n - 'hyperopt' - Tree-structured Parzen Estimator search using Hyperopt\n ``pip install hyperopt``\n - 'optuna' - Tree-structured Parzen Estimator search using Optuna\n ``pip install optuna``\n - 'bohb' - Bayesian search using HpBandSter\n ``pip install hpbandster ConfigSpace``\n\n 'optuna' possible values:\n\n - 'random' - randomized search\n - 'tpe' - Tree-structured Parzen Estimator search (default)\n\n early_stopping: bool or str or object, default = False\n Use early stopping to stop fitting to a hyperparameter configuration\n if it performs poorly. Ignored if search_library is ``scikit-learn``, or\n if the estimator doesn't have partial_fit attribute.\n If False or None, early stopping will not be used.\n Can be either an object accepted by the search library or one of the\n following:\n\n - 'asha' for Asynchronous Successive Halving Algorithm\n - 'hyperband' for Hyperband\n - 'median' for median stopping rule\n - If False or None, early stopping will not be used.\n\n More info for Optuna - https://optuna.readthedocs.io/en/stable/reference/pruners.html\n More info for Ray Tune (tune-sklearn) - https://docs.ray.io/en/master/tune/api_docs/schedulers.html\n\n early_stopping_max_iters: int, default = 10\n Maximum number of epochs to run for each sampled configuration.\n Ignored if early_stopping is False or None.\n\n choose_better: bool, default = False\n When set to set to True, base estimator is returned when the performance doesn't\n improve by tune_model. This gurantees the returned object would perform atleast\n equivalent to base estimator created using create_model or model returned by\n compare_models.\n\n fit_kwargs: dict, default = {} (empty dict)\n Dictionary of arguments passed to the fit method of the tuner.\n\n groups: str or array-like, with shape (n_samples,), default = None\n Optional Group labels for the samples used while splitting the dataset into train/test set.\n If string is passed, will use the data column with that name as the groups.\n Only used if a group based cross-validation generator is used (eg. GroupKFold).\n If None, will use the value set in fold_groups param in setup().\n\n return_tuner: bool, default = False\n If True, will reutrn a tuple of (model, tuner_object). Otherwise,\n will return just the best model.\n\n verbose: bool, default = True\n Score grid is not printed when verbose is set to False.\n\n tuner_verbose: bool or in, default = True\n If True or above 0, will print messages from the tuner. Higher values\n print more messages. Ignored if verbose param is False.\n\n **kwargs:\n Additional keyword arguments to pass to the optimizer.\n\n Returns\n -------\n score_grid\n A table containing the scores of the model across the kfolds.\n Scoring metrics used are Accuracy, AUC, Recall, Precision, F1,\n Kappa and MCC. Mean and standard deviation of the scores across\n the folds are also returned.\n\n model\n Trained and tuned model object.\n\n tuner_object\n Only if return_tuner param is True. The object used for tuning.\n\n Notes\n -----\n\n - If a StackingClassifier is passed, the hyperparameters of the meta model (final_estimator)\n will be tuned.\n\n - If a VotingClassifier is passed, the weights will be tuned.\n\n Warnings\n --------\n\n - Using 'Grid' search algorithm with default parameter grids may result in very\n long computation.\n\n\n \"\"\"\n function_params_str = \", \".join([f\"{k}={v}\" for k, v in locals().items()])\n\n logger = get_logger()\n\n logger.info(\"Initializing tune_model()\")\n logger.info(f\"tune_model({function_params_str})\")\n\n logger.info(\"Checking exceptions\")\n\n # run_time\n runtime_start = time.time()\n\n if not fit_kwargs:\n fit_kwargs = {}\n\n # checking estimator if string\n if type(estimator) is str:\n raise TypeError(\n \"The behavior of tune_model in version 1.0.1 is changed. Please pass trained model object.\"\n )\n\n # Check for estimator\n if not hasattr(estimator, \"fit\"):\n raise ValueError(\n f\"Estimator {estimator} does not have the required fit() method.\"\n )\n\n # checking fold parameter\n if fold is not None and not (type(fold) is int or is_sklearn_cv_generator(fold)):\n raise TypeError(\n \"fold parameter must be either None, an integer or a scikit-learn compatible CV generator object.\"\n )\n\n # checking round parameter\n if type(round) is not int:\n raise TypeError(\"Round parameter only accepts integer value.\")\n\n # checking n_iter parameter\n if type(n_iter) is not int:\n raise TypeError(\"n_iter parameter only accepts integer value.\")\n\n # checking early_stopping parameter\n possible_early_stopping = [\"asha\", \"Hyperband\", \"Median\"]\n if (\n isinstance(early_stopping, str)\n and early_stopping not in possible_early_stopping\n ):\n raise TypeError(\n f\"early_stopping parameter must be one of {', '.join(possible_early_stopping)}\"\n )\n\n # checking early_stopping_max_iters parameter\n if type(early_stopping_max_iters) is not int:\n raise TypeError(\n \"early_stopping_max_iters parameter only accepts integer value.\"\n )\n\n # checking search_library parameter\n possible_search_libraries = [\n \"scikit-learn\",\n \"scikit-optimize\",\n \"tune-sklearn\",\n \"optuna\",\n ]\n search_library = search_library.lower()\n if search_library not in possible_search_libraries:\n raise ValueError(\n f\"search_library parameter must be one of {', '.join(possible_search_libraries)}\"\n )\n\n if search_library == \"scikit-optimize\":\n try:\n import skopt\n except ImportError:\n raise ImportError(\n \"'scikit-optimize' requires scikit-optimize package to be installed. Do: pip install scikit-optimize\"\n )\n\n if not search_algorithm:\n search_algorithm = \"bayesian\"\n\n possible_search_algorithms = [\"bayesian\"]\n if search_algorithm not in possible_search_algorithms:\n raise ValueError(\n f\"For 'scikit-optimize' search_algorithm parameter must be one of {', '.join(possible_search_algorithms)}\"\n )\n\n elif search_library == \"tune-sklearn\":\n try:\n import tune_sklearn\n except ImportError:\n raise ImportError(\n \"'tune-sklearn' requires tune_sklearn package to be installed. Do: pip install tune-sklearn ray[tune]\"\n )\n\n if not search_algorithm:\n search_algorithm = \"random\"\n\n possible_search_algorithms = [\n \"random\",\n \"grid\",\n \"bayesian\",\n \"hyperopt\",\n \"bohb\",\n \"optuna\",\n ]\n if search_algorithm not in possible_search_algorithms:\n raise ValueError(\n f\"For 'tune-sklearn' search_algorithm parameter must be one of {', '.join(possible_search_algorithms)}\"\n )\n\n if search_algorithm == \"bohb\":\n try:\n from ray.tune.suggest.bohb import TuneBOHB\n from ray.tune.schedulers import HyperBandForBOHB\n import ConfigSpace as CS\n import hpbandster\n except ImportError:\n raise ImportError(\n \"It appears that either HpBandSter or ConfigSpace is not installed. Do: pip install hpbandster ConfigSpace\"\n )\n elif search_algorithm == \"hyperopt\":\n try:\n from ray.tune.suggest.hyperopt import HyperOptSearch\n from hyperopt import hp\n except ImportError:\n raise ImportError(\n \"It appears that hyperopt is not installed. Do: pip install hyperopt\"\n )\n elif search_algorithm == \"bayesian\":\n try:\n import skopt\n except ImportError:\n raise ImportError(\n \"It appears that scikit-optimize is not installed. Do: pip install scikit-optimize\"\n )\n elif search_algorithm == \"optuna\":\n try:\n import optuna\n except ImportError:\n raise ImportError(\n \"'optuna' requires optuna package to be installed. Do: pip install optuna\"\n )\n\n elif search_library == \"optuna\":\n try:\n import optuna\n except ImportError:\n raise ImportError(\n \"'optuna' requires optuna package to be installed. Do: pip install optuna\"\n )\n\n if not search_algorithm:\n search_algorithm = \"tpe\"\n\n possible_search_algorithms = [\"random\", \"tpe\"]\n if search_algorithm not in possible_search_algorithms:\n raise ValueError(\n f\"For 'optuna' search_algorithm parameter must be one of {', '.join(possible_search_algorithms)}\"\n )\n else:\n if not search_algorithm:\n search_algorithm = \"random\"\n\n possible_search_algorithms = [\"random\", \"grid\"]\n if search_algorithm not in possible_search_algorithms:\n raise ValueError(\n f\"For 'scikit-learn' search_algorithm parameter must be one of {', '.join(possible_search_algorithms)}\"\n )\n\n if custom_scorer is not None:\n optimize = custom_scorer\n warnings.warn(\n \"custom_scorer parameter will be depreciated, use optimize instead\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n if isinstance(optimize, str):\n # checking optimize parameter\n optimize = _get_metric(optimize)\n if optimize is None:\n raise ValueError(\n \"Optimize method not supported. See docstring for list of available parameters.\"\n )\n\n # checking optimize parameter for multiclass\n if _is_multiclass():\n if not optimize.is_multiclass:\n raise TypeError(\n \"Optimization metric not supported for multiclass problems. See docstring for list of other optimization parameters.\"\n )\n else:\n logger.info(f\"optimize set to user defined function {optimize}\")\n\n # checking verbose parameter\n if type(verbose) is not bool:\n raise TypeError(\"verbose parameter can only take argument as True or False.\")\n\n # checking verbose parameter\n if type(return_tuner) is not bool:\n raise TypeError(\n \"return_tuner parameter can only take argument as True or False.\"\n )\n\n if not verbose:\n tuner_verbose = 0\n\n if type(tuner_verbose) not in (bool, int):\n raise TypeError(\"tuner_verbose parameter must be a bool or an int.\")\n\n tuner_verbose = int(tuner_verbose)\n\n if tuner_verbose < 0:\n tuner_verbose = 0\n elif tuner_verbose > 2:\n tuner_verbose = 2\n\n \"\"\"\n\n ERROR HANDLING ENDS HERE\n\n \"\"\"\n\n fold = _get_cv_splitter(fold)\n\n groups = _get_groups(groups)\n\n if not display:\n progress_args = {\"max\": 3 + 4}\n master_display_columns = [v.display_name for k, v in _all_metrics.items()]\n timestampStr = datetime.datetime.now().strftime(\"%H:%M:%S\")\n monitor_rows = [\n [\"Initiated\", \". . . . . . . . . . . . . . . . . .\", timestampStr],\n [\"Status\", \". . . . . . . . . . . . . . . . . .\", \"Loading Dependencies\"],\n [\"Estimator\", \". . . . . . . . . . . . . . . . . .\", \"Compiling Library\"],\n ]\n display = Display(\n verbose=verbose,\n html_param=html_param,\n progress_args=progress_args,\n master_display_columns=master_display_columns,\n monitor_rows=monitor_rows,\n )\n\n display.display_progress()\n display.display_monitor()\n display.display_master_display()\n\n # ignore warnings\n\n warnings.filterwarnings(\"ignore\")\n\n import logging\n\n np.random.seed(seed)\n\n logger.info(\"Copying training dataset\")\n # Storing X_train and y_train in data_X and data_y parameter\n data_X = X_train.copy()\n data_y = y_train.copy()\n\n # reset index\n data_X.reset_index(drop=True, inplace=True)\n data_y.reset_index(drop=True, inplace=True)\n\n display.move_progress()\n\n # setting optimize parameter\n\n compare_dimension = optimize.display_name\n optimize = optimize.scorer\n\n # convert trained estimator into string name for grids\n\n logger.info(\"Checking base model\")\n\n is_stacked_model = False\n\n if hasattr(estimator, \"final_estimator\"):\n logger.info(\"Model is stacked, using the definition of the meta-model\")\n is_stacked_model = True\n estimator_id = _get_model_id(estimator.final_estimator)\n else:\n estimator_id = _get_model_id(estimator)\n if estimator_id is None:\n if custom_grid is None:\n raise ValueError(\n \"When passing a model not in PyCaret's model library, the custom_grid parameter must be provided.\"\n )\n estimator_name = _get_model_name(estimator)\n estimator_definition = None\n logger.info(\"A custom model has been passed\")\n else:\n estimator_definition = _all_models_internal[estimator_id]\n estimator_name = estimator_definition.name\n logger.info(f\"Base model : {estimator_name}\")\n\n if estimator_definition is None or estimator_definition.tunable is None:\n model = clone(estimator)\n else:\n logger.info(\"Model has a special tunable class, using that\")\n if is_stacked_model:\n model = clone(estimator)\n model.set_params(\n final_estimator=estimator_definition.tunable(**estimator.get_params())\n )\n else:\n model = clone(estimator_definition.tunable(**estimator.get_params()))\n\n base_estimator = model\n\n if is_stacked_model:\n base_estimator = model.final_estimator\n\n display.update_monitor(2, estimator_name)\n display.display_monitor()\n\n display.move_progress()\n\n logger.info(\"Declaring metric variables\")\n\n \"\"\"\n MONITOR UPDATE STARTS\n \"\"\"\n\n display.update_monitor(1, \"Searching Hyperparameters\")\n display.display_monitor()\n\n \"\"\"\n MONITOR UPDATE ENDS\n \"\"\"\n\n logger.info(\"Defining Hyperparameters\")\n\n from pycaret.internal.tunable import VotingClassifier, VotingRegressor\n\n def total_combintaions_in_grid(grid):\n nc = 1\n\n def get_iter(x):\n if isinstance(x, dict):\n return x.values()\n return x\n\n for v in get_iter(grid):\n if isinstance(v, dict):\n for v2 in get_iter(v):\n nc *= len(v2)\n else:\n nc *= len(v)\n return nc\n\n def get_ccp_alphas(estimator):\n path = estimator.cost_complexity_pruning_path(X_train, y_train)\n ccp_alphas, impurities = path.ccp_alphas, path.impurities\n return list(ccp_alphas[:-1])\n\n if custom_grid is not None:\n if not isinstance(custom_grid, dict):\n raise TypeError(f\"custom_grid must be a dict, got {type(custom_grid)}.\")\n param_grid = custom_grid\n if not (\n search_library == \"scikit-learn\"\n or (\n search_library == \"tune-sklearn\"\n and (search_algorithm == \"grid\" or search_algorithm == \"random\")\n )\n ):\n param_grid = {\n k: CategoricalDistribution(v) if isinstance(v, Iterable) else v\n for k, v in param_grid.items()\n }\n elif any(isinstance(v, Distribution) for k, v in param_grid.items()):\n raise TypeError(\n f\"For the combination of search_library {search_library} and search_algorithm {search_algorithm}, PyCaret Distribution objects are not supported. Pass a list or other object supported by the search library (in most cases, an object with a 'rvs' function).\"\n )\n elif search_library == \"scikit-learn\" or (\n search_library == \"tune-sklearn\"\n and (search_algorithm == \"grid\" or search_algorithm == \"random\")\n ):\n param_grid = estimator_definition.tune_grid\n if isinstance(base_estimator, (VotingClassifier, VotingRegressor)):\n # special case to handle VotingClassifier, as weights need to be\n # generated dynamically\n param_grid = {\n f\"weight_{i}\": np.arange(0.01, 1, 0.01)\n for i, e in enumerate(base_estimator.estimators)\n }\n # if hasattr(base_estimator, \"cost_complexity_pruning_path\"):\n # # special case for Tree-based models\n # param_grid[\"ccp_alpha\"] = get_ccp_alphas(base_estimator)\n # if \"min_impurity_decrease\" in param_grid:\n # param_grid.pop(\"min_impurity_decrease\")\n\n if search_algorithm != \"grid\":\n tc = total_combintaions_in_grid(param_grid)\n if tc <= n_iter:\n logger.info(\n f\"{n_iter} is bigger than total combinations {tc}, setting search algorithm to grid\"\n )\n search_algorithm = \"grid\"\n else:\n param_grid = estimator_definition.tune_distribution\n\n if isinstance(base_estimator, (VotingClassifier, VotingRegressor)):\n # special case to handle VotingClassifier, as weights need to be\n # generated dynamically\n param_grid = {\n f\"weight_{i}\": UniformDistribution(0.000000001, 1)\n for i, e in enumerate(base_estimator.estimators)\n }\n # if hasattr(base_estimator, \"cost_complexity_pruning_path\"):\n # # special case for Tree-based models\n # param_grid[\"ccp_alpha\"] = CategoricalDistribution(\n # get_ccp_alphas(base_estimator)\n # )\n # if \"min_impurity_decrease\" in param_grid:\n # param_grid.pop(\"min_impurity_decrease\")\n\n if not param_grid:\n raise ValueError(\n \"parameter grid for tuning is empty. If passing custom_grid, make sure that it is not empty. If not passing custom_grid, the passed estimator does not have a built-in tuning grid.\"\n )\n\n suffixes = []\n\n if is_stacked_model:\n logger.info(\"Stacked model passed, will tune meta model hyperparameters\")\n suffixes.append(\"final_estimator\")\n\n gc.collect()\n\n with estimator_pipeline(_internal_pipeline, model) as pipeline_with_model:\n extra_params = {}\n\n fit_kwargs = _get_pipeline_fit_kwargs(pipeline_with_model, fit_kwargs)\n\n actual_estimator_label = get_pipeline_estimator_label(pipeline_with_model)\n\n suffixes.append(actual_estimator_label)\n\n suffixes = \"__\".join(reversed(suffixes))\n\n param_grid = {f\"{suffixes}__{k}\": v for k, v in param_grid.items()}\n\n if estimator_definition is not None:\n search_kwargs = {**estimator_definition.tune_args, **kwargs}\n n_jobs = (\n _gpu_n_jobs_param\n if estimator_definition.is_gpu_enabled\n else n_jobs_param\n )\n else:\n search_kwargs = {}\n n_jobs = n_jobs_param\n\n if custom_grid is not None:\n logger.info(f\"custom_grid: {param_grid}\")\n\n from sklearn.gaussian_process import GaussianProcessClassifier\n\n # special case to prevent running out of memory\n if isinstance(pipeline_with_model.steps[-1][1], GaussianProcessClassifier):\n n_jobs = 1\n\n logger.info(f\"Tuning with n_jobs={n_jobs}\")\n\n def get_optuna_tpe_sampler():\n try:\n tpe_sampler = optuna.samplers.TPESampler(\n seed=seed, multivariate=True, constant_liar=True\n )\n except TypeError:\n # constant_liar added in 2.8.0\n tpe_sampler = optuna.samplers.TPESampler(seed=seed, multivariate=True)\n return tpe_sampler\n\n if search_library == \"optuna\":\n # suppress output\n logging.getLogger(\"optuna\").setLevel(logging.WARNING)\n\n pruner_translator = {\n \"asha\": optuna.pruners.SuccessiveHalvingPruner(),\n \"hyperband\": optuna.pruners.HyperbandPruner(),\n \"median\": optuna.pruners.MedianPruner(),\n False: optuna.pruners.NopPruner(),\n None: optuna.pruners.NopPruner(),\n }\n pruner = early_stopping\n if pruner in pruner_translator:\n pruner = pruner_translator[early_stopping]\n\n sampler_translator = {\n \"tpe\": get_optuna_tpe_sampler(),\n \"random\": optuna.samplers.RandomSampler(seed=seed),\n }\n sampler = sampler_translator[search_algorithm]\n\n try:\n param_grid = get_optuna_distributions(param_grid)\n except:\n logger.warning(\n \"Couldn't convert param_grid to specific library distributions. Exception:\"\n )\n logger.warning(traceback.format_exc())\n\n study = optuna.create_study(\n direction=\"maximize\", sampler=sampler, pruner=pruner\n )\n\n logger.info(\"Initializing optuna.integration.OptunaSearchCV\")\n model_grid = optuna.integration.OptunaSearchCV(\n estimator=pipeline_with_model,\n param_distributions=param_grid,\n cv=fold,\n enable_pruning=early_stopping\n and can_early_stop(pipeline_with_model, True, False, False, param_grid),\n max_iter=early_stopping_max_iters,\n n_jobs=n_jobs,\n n_trials=n_iter,\n random_state=seed,\n scoring=optimize,\n study=study,\n refit=False,\n verbose=tuner_verbose,\n error_score=\"raise\",\n **search_kwargs,\n )\n\n elif search_library == \"tune-sklearn\":\n\n early_stopping_translator = {\n \"asha\": \"ASHAScheduler\",\n \"hyperband\": \"HyperBandScheduler\",\n \"median\": \"MedianStoppingRule\",\n }\n if early_stopping in early_stopping_translator:\n early_stopping = early_stopping_translator[early_stopping]\n\n do_early_stop = early_stopping and can_early_stop(\n pipeline_with_model, True, True, True, param_grid\n )\n\n if not do_early_stop and search_algorithm == \"bohb\":\n raise ValueError(\n \"'bohb' requires early_stopping = True and the estimator to support early stopping (has partial_fit, warm_start or is an XGBoost model).\"\n )\n\n elif early_stopping and can_early_stop(\n pipeline_with_model, False, True, False, param_grid\n ):\n if \"actual_estimator__n_estimators\" in param_grid:\n if custom_grid is None:\n extra_params[\n \"actual_estimator__n_estimators\"\n ] = pipeline_with_model.get_params()[\n \"actual_estimator__n_estimators\"\n ]\n param_grid.pop(\"actual_estimator__n_estimators\")\n else:\n raise ValueError(\n \"Param grid cannot contain n_estimators or max_iter if early_stopping is True and the model is warm started. Use early_stopping_max_iters params to set the upper bound of n_estimators or max_iter.\"\n )\n if \"actual_estimator__max_iter\" in param_grid:\n if custom_grid is None:\n param_grid.pop(\"actual_estimator__max_iter\")\n else:\n raise ValueError(\n \"Param grid cannot contain n_estimators or max_iter if early_stopping is True and the model is warm started. Use early_stopping_max_iters params to set the upper bound of n_estimators or max_iter.\"\n )\n\n from tune_sklearn import TuneSearchCV, TuneGridSearchCV\n\n with (\n true_warm_start(pipeline_with_model) if do_early_stop else nullcontext()\n ), set_n_jobs(pipeline_with_model, 1), (\n patch.dict(\"os.environ\", {\"TUNE_GLOBAL_CHECKPOINT_S\": \"1000000\"})\n if \"TUNE_GLOBAL_CHECKPOINT_S\" not in os.environ\n else nullcontext()\n ):\n if search_algorithm == \"grid\":\n\n logger.info(\"Initializing tune_sklearn.TuneGridSearchCV\")\n model_grid = TuneGridSearchCV(\n estimator=pipeline_with_model,\n param_grid=param_grid,\n early_stopping=do_early_stop,\n scoring=optimize,\n cv=fold,\n max_iters=early_stopping_max_iters,\n n_jobs=n_jobs,\n use_gpu=gpu_param,\n refit=False,\n verbose=tuner_verbose,\n pipeline_auto_early_stop=True,\n **search_kwargs,\n )\n else:\n if search_algorithm == \"hyperopt\":\n try:\n param_grid = get_hyperopt_distributions(param_grid)\n except:\n logger.warning(\n \"Couldn't convert param_grid to specific library distributions. Exception:\"\n )\n logger.warning(traceback.format_exc())\n elif search_algorithm == \"bayesian\":\n try:\n param_grid = get_skopt_distributions(param_grid)\n except:\n logger.warning(\n \"Couldn't convert param_grid to specific library distributions. Exception:\"\n )\n logger.warning(traceback.format_exc())\n elif search_algorithm == \"bohb\":\n try:\n param_grid = get_CS_distributions(param_grid)\n except:\n logger.warning(\n \"Couldn't convert param_grid to specific library distributions. Exception:\"\n )\n logger.warning(traceback.format_exc())\n elif search_algorithm != \"random\":\n try:\n param_grid = get_tune_distributions(param_grid)\n except:\n logger.warning(\n \"Couldn't convert param_grid to specific library distributions. Exception:\"\n )\n logger.warning(traceback.format_exc())\n if search_algorithm == \"optuna\" and not \"sampler\" in search_kwargs:\n import optuna\n\n search_kwargs[\"sampler\"] = get_optuna_tpe_sampler()\n logger.info(\n f\"Initializing tune_sklearn.TuneSearchCV, {search_algorithm}\"\n )\n model_grid = TuneSearchCV(\n estimator=pipeline_with_model,\n search_optimization=search_algorithm,\n param_distributions=param_grid,\n n_trials=n_iter,\n early_stopping=do_early_stop,\n scoring=optimize,\n cv=fold,\n random_state=seed,\n max_iters=early_stopping_max_iters,\n n_jobs=n_jobs,\n use_gpu=gpu_param,\n refit=True,\n verbose=tuner_verbose,\n pipeline_auto_early_stop=True,\n **search_kwargs,\n )\n elif search_library == \"scikit-optimize\":\n import skopt\n\n try:\n param_grid = get_skopt_distributions(param_grid)\n except:\n logger.warning(\n \"Couldn't convert param_grid to specific library distributions. Exception:\"\n )\n logger.warning(traceback.format_exc())\n\n logger.info(\"Initializing skopt.BayesSearchCV\")\n model_grid = skopt.BayesSearchCV(\n estimator=pipeline_with_model,\n search_spaces=param_grid,\n scoring=optimize,\n n_iter=n_iter,\n cv=fold,\n random_state=seed,\n refit=False,\n n_jobs=n_jobs,\n verbose=tuner_verbose,\n **search_kwargs,\n )\n else:\n # needs to be imported like that for the monkeypatch\n import sklearn.model_selection._search\n\n if search_algorithm == \"grid\":\n logger.info(\"Initializing GridSearchCV\")\n model_grid = sklearn.model_selection._search.GridSearchCV(\n estimator=pipeline_with_model,\n param_grid=param_grid,\n scoring=optimize,\n cv=fold,\n refit=False,\n n_jobs=n_jobs,\n verbose=tuner_verbose,\n **search_kwargs,\n )\n else:\n logger.info(\"Initializing RandomizedSearchCV\")\n model_grid = sklearn.model_selection._search.RandomizedSearchCV(\n estimator=pipeline_with_model,\n param_distributions=param_grid,\n scoring=optimize,\n n_iter=n_iter,\n cv=fold,\n random_state=seed,\n refit=False,\n n_jobs=n_jobs,\n verbose=tuner_verbose,\n **search_kwargs,\n )\n\n # with io.capture_output():\n if search_library == \"scikit-learn\":\n # monkey patching to fix overflows on Windows\n with patch(\n \"sklearn.model_selection._search.sample_without_replacement\",\n pycaret.internal.patches.sklearn._mp_sample_without_replacement,\n ), patch(\n \"sklearn.model_selection._search.ParameterGrid.__getitem__\",\n pycaret.internal.patches.sklearn._mp_ParameterGrid_getitem,\n ):\n model_grid.fit(X_train, y_train, groups=groups, **fit_kwargs)\n else:\n model_grid.fit(X_train, y_train, groups=groups, **fit_kwargs)\n best_params = model_grid.best_params_\n logger.info(f\"best_params: {best_params}\")\n best_params = {**best_params, **extra_params}\n best_params = {\n k.replace(f\"{actual_estimator_label}__\", \"\"): v\n for k, v in best_params.items()\n }\n cv_results = None\n try:\n cv_results = model_grid.cv_results_\n except:\n logger.warning(\"Couldn't get cv_results from model_grid. Exception:\")\n logger.warning(traceback.format_exc())\n\n display.move_progress()\n\n logger.info(\"Hyperparameter search completed\")\n\n if isinstance(model, TunableMixin):\n logger.info(\"Getting base sklearn object from tunable\")\n model.set_params(**best_params)\n best_params = {\n k: v\n for k, v in model.get_params().items()\n if k in model.get_base_sklearn_params().keys()\n }\n model = model.get_base_sklearn_object()\n\n logger.info(\"SubProcess create_model() called ==================================\")\n best_model, model_fit_time = create_model_supervised(\n estimator=model,\n system=False,\n display=display,\n fold=fold,\n round=round,\n groups=groups,\n fit_kwargs=fit_kwargs,\n **best_params,\n )\n model_results = pull()\n logger.info(\"SubProcess create_model() end ==================================\")\n\n if choose_better:\n best_model = _choose_better(\n [estimator, (best_model, model_results)],\n compare_dimension,\n fold,\n groups=groups,\n fit_kwargs=fit_kwargs,\n display=display,\n )\n\n # end runtime\n runtime_end = time.time()\n runtime = np.array(runtime_end - runtime_start).round(2)\n\n # mlflow logging\n if logging_param:\n\n avgs_dict_log = {k: v for k, v in model_results.loc[\"Mean\"].items()}\n\n try:\n _mlflow_log_model(\n model=best_model,\n model_results=model_results,\n score_dict=avgs_dict_log,\n source=\"tune_model\",\n runtime=runtime,\n model_fit_time=model_fit_time,\n _prep_pipe=prep_pipe,\n log_plots=log_plots_param,\n tune_cv_results=cv_results,\n display=display,\n )\n except:\n logger.error(f\"_mlflow_log_model() for {best_model} raised an exception:\")\n logger.error(traceback.format_exc())\n\n model_results = color_df(model_results, \"yellow\", [\"Mean\"], axis=1)\n model_results = model_results.set_precision(round)\n display.display(model_results, clear=True)\n\n logger.info(f\"create_model_container: {len(create_model_container)}\")\n logger.info(f\"master_model_container: {len(master_model_container)}\")\n logger.info(f\"display_container: {len(display_container)}\")\n\n logger.info(str(best_model))\n logger.info(\n \"tune_model() succesfully completed......................................\"\n )\n\n gc.collect()\n if return_tuner:\n return (best_model, model_grid)\n return best_model\n\n\ndef ensemble_model(\n estimator,\n method: str = \"Bagging\",\n fold: Optional[Union[int, Any]] = None,\n n_estimators: int = 10,\n round: int = 4,\n choose_better: bool = False,\n optimize: str = \"Accuracy\",\n fit_kwargs: Optional[dict] = None,\n groups: Optional[Union[str, Any]] = None,\n verbose: bool = True,\n display: Optional[Display] = None, # added in pycaret==2.2.0\n) -> Any:\n \"\"\"\n This function ensembles the trained base estimator using the method defined in\n 'method' param (default = 'Bagging'). The output prints a score grid that shows\n Accuracy, AUC, Recall, Precision, F1, Kappa and MCC by fold (default = 10 Fold).\n\n This function returns a trained model object.\n\n Model must be created using create_model() or tune_model().\n\n Example\n -------\n >>> from pycaret.datasets import get_data\n >>> juice = get_data('juice')\n >>> experiment_name = setup(data = juice, target = 'Purchase')\n >>> dt = create_model('dt')\n >>> ensembled_dt = ensemble_model(dt)\n\n This will return an ensembled Decision Tree model using 'Bagging'.\n\n Parameters\n ----------\n estimator : object, default = None\n\n method: str, default = 'Bagging'\n Bagging method will create an ensemble meta-estimator that fits base\n classifiers each on random subsets of the original dataset. The other\n available method is 'Boosting' which will create a meta-estimators by\n fitting a classifier on the original dataset and then fits additional\n copies of the classifier on the same dataset but where the weights of\n incorrectly classified instances are adjusted such that subsequent\n classifiers focus more on difficult cases.\n\n fold: integer or scikit-learn compatible CV generator, default = None\n Controls cross-validation. If None, will use the CV generator defined in setup().\n If integer, will use KFold CV with that many folds.\n When cross_validation is False, this parameter is ignored.\n\n n_estimators: integer, default = 10\n The number of base estimators in the ensemble.\n In case of perfect fit, the learning procedure is stopped early.\n\n round: integer, default = 4\n Number of decimal places the metrics in the score grid will be rounded to.\n\n choose_better: bool, default = False\n When set to set to True, base estimator is returned when the metric doesn't\n improve by ensemble_model. This gurantees the returned object would perform\n atleast equivalent to base estimator created using create_model or model\n returned by compare_models.\n\n optimize: str, default = 'Accuracy'\n Only used when choose_better is set to True. optimize parameter is used\n to compare emsembled model with base estimator. Values accepted in\n optimize parameter are 'Accuracy', 'AUC', 'Recall', 'Precision', 'F1',\n 'Kappa', 'MCC'.\n\n fit_kwargs: dict, default = {} (empty dict)\n Dictionary of arguments passed to the fit method of the model.\n\n groups: str or array-like, with shape (n_samples,), default = None\n Optional Group labels for the samples used while splitting the dataset into train/test set.\n If string is passed, will use the data column with that name as the groups.\n Only used if a group based cross-validation generator is used (eg. GroupKFold).\n If None, will use the value set in fold_groups param in setup().\n\n verbose: bool, default = True\n Score grid is not printed when verbose is set to False.\n\n Returns\n -------\n score_grid\n A table containing the scores of the model across the kfolds.\n Scoring metrics used are Accuracy, AUC, Recall, Precision, F1,\n Kappa and MCC. Mean and standard deviation of the scores across\n the folds are also returned.\n\n model\n Trained ensembled model object.\n\n Warnings\n --------\n - If target variable is multiclass (more than 2 classes), AUC will be returned\n as zero (0.0).\n\n\n \"\"\"\n\n function_params_str = \", \".join([f\"{k}={v}\" for k, v in locals().items()])\n\n logger = get_logger()\n\n logger.info(\"Initializing ensemble_model()\")\n logger.info(f\"ensemble_model({function_params_str})\")\n\n logger.info(\"Checking exceptions\")\n\n # run_time\n runtime_start = time.time()\n\n if not fit_kwargs:\n fit_kwargs = {}\n\n # Check for estimator\n if not hasattr(estimator, \"fit\"):\n raise ValueError(\n f\"Estimator {estimator} does not have the required fit() method.\"\n )\n\n # Check for allowed method\n available_method = [\"Bagging\", \"Boosting\"]\n if method not in available_method:\n raise ValueError(\n \"Method parameter only accepts two values 'Bagging' or 'Boosting'.\"\n )\n\n # check boosting conflict\n if method == \"Boosting\":\n\n boosting_model_definition = _all_models_internal[\"ada\"]\n\n check_model = estimator\n\n try:\n check_model = boosting_model_definition.class_def(\n check_model,\n n_estimators=n_estimators,\n **boosting_model_definition.args,\n )\n with io.capture_output():\n check_model.fit(X_train, y_train)\n except:\n raise TypeError(\n \"Estimator not supported for the Boosting method. Change the estimator or method to 'Bagging'.\"\n )\n\n # checking fold parameter\n if fold is not None and not (type(fold) is int or is_sklearn_cv_generator(fold)):\n raise TypeError(\n \"fold parameter must be either None, an integer or a scikit-learn compatible CV generator object.\"\n )\n\n # checking n_estimators parameter\n if type(n_estimators) is not int:\n raise TypeError(\"n_estimators parameter only accepts integer value.\")\n\n # checking round parameter\n if type(round) is not int:\n raise TypeError(\"Round parameter only accepts integer value.\")\n\n # checking verbose parameter\n if type(verbose) is not bool:\n raise TypeError(\"Verbose parameter can only take argument as True or False.\")\n\n # checking optimize parameter\n optimize = _get_metric(optimize)\n if optimize is None:\n raise ValueError(\n f\"Optimize method not supported. See docstring for list of available parameters.\"\n )\n\n # checking optimize parameter for multiclass\n if _is_multiclass():\n if not optimize.is_multiclass:\n raise TypeError(\n f\"Optimization metric not supported for multiclass problems. See docstring for list of other optimization parameters.\"\n )\n\n \"\"\"\n\n ERROR HANDLING ENDS HERE\n\n \"\"\"\n\n fold = _get_cv_splitter(fold)\n\n groups = _get_groups(groups)\n\n if not display:\n progress_args = {\"max\": 2 + 4}\n master_display_columns = [v.display_name for k, v in _all_metrics.items()]\n timestampStr = datetime.datetime.now().strftime(\"%H:%M:%S\")\n monitor_rows = [\n [\"Initiated\", \". . . . . . . . . . . . . . . . . .\", timestampStr],\n [\"Status\", \". . . . . . . . . . . . . . . . . .\", \"Loading Dependencies\"],\n [\"Estimator\", \". . . . . . . . . . . . . . . . . .\", \"Compiling Library\"],\n ]\n display = Display(\n verbose=verbose,\n html_param=html_param,\n progress_args=progress_args,\n master_display_columns=master_display_columns,\n monitor_rows=monitor_rows,\n )\n\n display.display_progress()\n display.display_monitor()\n display.display_master_display()\n\n logger.info(\"Importing libraries\")\n\n np.random.seed(seed)\n\n logger.info(\"Copying training dataset\")\n\n # Storing X_train and y_train in data_X and data_y parameter\n data_X = X_train.copy()\n data_y = y_train.copy()\n\n # reset index\n data_X.reset_index(drop=True, inplace=True)\n data_y.reset_index(drop=True, inplace=True)\n\n display.move_progress()\n\n # setting optimize parameter\n\n compare_dimension = optimize.display_name\n optimize = optimize.scorer\n\n logger.info(\"Checking base model\")\n\n _estimator_ = estimator\n\n estimator_id = _get_model_id(estimator)\n\n if estimator_id is None:\n estimator_name = _get_model_name(estimator)\n logger.info(\"A custom model has been passed\")\n else:\n estimator_definition = _all_models_internal[estimator_id]\n estimator_name = estimator_definition.name\n\n logger.info(f\"Base model : {estimator_name}\")\n\n display.update_monitor(2, estimator_name)\n display.display_monitor()\n\n \"\"\"\n MONITOR UPDATE STARTS\n \"\"\"\n\n display.update_monitor(1, \"Selecting Estimator\")\n display.display_monitor()\n\n \"\"\"\n MONITOR UPDATE ENDS\n \"\"\"\n\n model = get_estimator_from_meta_estimator(_estimator_)\n\n logger.info(\"Importing untrained ensembler\")\n\n if method == \"Bagging\":\n logger.info(\"Ensemble method set to Bagging\")\n bagging_model_definition = _all_models_internal[\"Bagging\"]\n\n model = bagging_model_definition.class_def(\n model,\n bootstrap=True,\n n_estimators=n_estimators,\n **bagging_model_definition.args,\n )\n\n else:\n logger.info(\"Ensemble method set to Boosting\")\n boosting_model_definition = _all_models_internal[\"ada\"]\n model = boosting_model_definition.class_def(\n model, n_estimators=n_estimators, **boosting_model_definition.args\n )\n\n display.move_progress()\n\n logger.info(\"SubProcess create_model() called ==================================\")\n model, model_fit_time = create_model_supervised(\n estimator=model,\n system=False,\n display=display,\n fold=fold,\n round=round,\n fit_kwargs=fit_kwargs,\n groups=groups,\n )\n best_model = model\n model_results = pull()\n logger.info(\"SubProcess create_model() end ==================================\")\n\n # end runtime\n runtime_end = time.time()\n runtime = np.array(runtime_end - runtime_start).round(2)\n\n # mlflow logging\n if logging_param:\n\n avgs_dict_log = {k: v for k, v in model_results.loc[\"Mean\"].items()}\n\n try:\n _mlflow_log_model(\n model=best_model,\n model_results=model_results,\n score_dict=avgs_dict_log,\n source=\"ensemble_model\",\n runtime=runtime,\n model_fit_time=model_fit_time,\n _prep_pipe=prep_pipe,\n log_plots=log_plots_param,\n display=display,\n )\n except:\n logger.error(f\"_mlflow_log_model() for {best_model} raised an exception:\")\n logger.error(traceback.format_exc())\n\n if choose_better:\n model = _choose_better(\n [_estimator_, (best_model, model_results)],\n compare_dimension,\n fold,\n groups=groups,\n fit_kwargs=fit_kwargs,\n display=display,\n )\n\n model_results = color_df(model_results, \"yellow\", [\"Mean\"], axis=1)\n model_results = model_results.set_precision(round)\n display.display(model_results, clear=True)\n\n logger.info(f\"create_model_container: {len(create_model_container)}\")\n logger.info(f\"master_model_container: {len(master_model_container)}\")\n logger.info(f\"display_container: {len(display_container)}\")\n\n logger.info(str(model))\n logger.info(\n \"ensemble_model() succesfully completed......................................\"\n )\n\n gc.collect()\n return model\n\n\ndef blend_models(\n estimator_list: list,\n fold: Optional[Union[int, Any]] = None,\n round: int = 4,\n choose_better: bool = False,\n optimize: str = \"Accuracy\",\n method: str = \"auto\",\n weights: Optional[List[float]] = None, # added in pycaret==2.2.0\n fit_kwargs: Optional[dict] = None,\n groups: Optional[Union[str, Any]] = None,\n verbose: bool = True,\n display: Optional[Display] = None, # added in pycaret==2.2.0\n) -> Any:\n\n \"\"\"\n This function creates a Soft Voting / Majority Rule classifier for all the\n estimators in the model library (excluding the few when turbo is True) or\n for specific trained estimators passed as a list in estimator_list param.\n It scores it using Cross Validation. The output prints a score\n grid that shows Accuracy, AUC, Recall, Precision, F1, Kappa and MCC by\n fold (default CV = 10 Folds).\n\n This function returns a trained model object.\n\n Example\n -------\n >>> lr = create_model('lr')\n >>> rf = create_model('rf')\n >>> knn = create_model('knn')\n >>> blend_three = blend_models(estimator_list = [lr,rf,knn])\n\n This will create a VotingClassifier of lr, rf and knn.\n\n Parameters\n ----------\n estimator_list : list of objects\n\n fold: integer or scikit-learn compatible CV generator, default = None\n Controls cross-validation. If None, will use the CV generator defined in setup().\n If integer, will use KFold CV with that many folds.\n When cross_validation is False, this parameter is ignored.\n\n round: integer, default = 4\n Number of decimal places the metrics in the score grid will be rounded to.\n\n choose_better: bool, default = False\n When set to set to True, base estimator is returned when the metric doesn't\n improve by ensemble_model. This gurantees the returned object would perform\n atleast equivalent to base estimator created using create_model or model\n returned by compare_models.\n\n optimize: str, default = 'Accuracy'\n Only used when choose_better is set to True. optimize parameter is used\n to compare emsembled model with base estimator. Values accepted in\n optimize parameter are 'Accuracy', 'AUC', 'Recall', 'Precision', 'F1',\n 'Kappa', 'MCC'.\n\n method: str, default = 'auto'\n 'hard' uses predicted class labels for majority rule voting. 'soft', predicts\n the class label based on the argmax of the sums of the predicted probabilities,\n which is recommended for an ensemble of well-calibrated classifiers. Default value,\n 'auto', will try to use 'soft' and fall back to 'hard' if the former is not supported.\n\n weights: list, default = None\n Sequence of weights (float or int) to weight the occurrences of predicted class labels (hard voting)\n or class probabilities before averaging (soft voting). Uses uniform weights if None.\n\n fit_kwargs: dict, default = {} (empty dict)\n Dictionary of arguments passed to the fit method of the model.\n\n groups: str or array-like, with shape (n_samples,), default = None\n Optional Group labels for the samples used while splitting the dataset into train/test set.\n If string is passed, will use the data column with that name as the groups.\n Only used if a group based cross-validation generator is used (eg. GroupKFold).\n If None, will use the value set in fold_groups param in setup().\n\n verbose: bool, default = True\n Score grid is not printed when verbose is set to False.\n\n Returns\n -------\n score_grid\n A table containing the scores of the model across the kfolds.\n Scoring metrics used are Accuracy, AUC, Recall, Precision, F1,\n Kappa and MCC. Mean and standard deviation of the scores across\n the folds are also returned.\n\n model\n Trained Voting Classifier model object.\n\n Warnings\n --------\n - When passing estimator_list with method set to 'soft'. All the models in the\n estimator_list must support predict_proba function. 'svm' and 'ridge' doesnt\n support the predict_proba and hence an exception will be raised.\n\n - When estimator_list is set to 'All' and method is forced to 'soft', estimators\n that doesnt support the predict_proba function will be dropped from the estimator\n list.\n\n - If target variable is multiclass (more than 2 classes), AUC will be returned as\n zero (0.0).\n\n\n\n \"\"\"\n\n function_params_str = \", \".join([f\"{k}={v}\" for k, v in locals().items()])\n\n logger = get_logger()\n\n logger.info(\"Initializing blend_models()\")\n logger.info(f\"blend_models({function_params_str})\")\n\n logger.info(\"Checking exceptions\")\n\n # run_time\n runtime_start = time.time()\n\n if not fit_kwargs:\n fit_kwargs = {}\n\n # checking method parameter\n available_method = [\"auto\", \"soft\", \"hard\"]\n if method not in available_method:\n raise ValueError(\n \"Method parameter only accepts 'auto', 'soft' or 'hard' as a parameter. See Docstring for details.\"\n )\n\n # checking error for estimator_list\n for i in estimator_list:\n if not hasattr(i, \"fit\"):\n raise ValueError(f\"Estimator {i} does not have the required fit() method.\")\n if _ml_usecase == MLUsecase.CLASSIFICATION:\n # checking method param with estimator list\n if method != \"hard\":\n\n for i in estimator_list:\n if not hasattr(i, \"predict_proba\"):\n if method != \"auto\":\n raise TypeError(\n f\"Estimator list contains estimator {i} that doesn't support probabilities and method is forced to 'soft'. Either change the method or drop the estimator.\"\n )\n else:\n logger.info(\n f\"Estimator {i} doesn't support probabilities, falling back to 'hard'.\"\n )\n method = \"hard\"\n break\n\n if method == \"auto\":\n method = \"soft\"\n\n # checking fold parameter\n if fold is not None and not (type(fold) is int or is_sklearn_cv_generator(fold)):\n raise TypeError(\n \"fold parameter must be either None, an integer or a scikit-learn compatible CV generator object.\"\n )\n\n # checking round parameter\n if type(round) is not int:\n raise TypeError(\"Round parameter only accepts integer value.\")\n\n if weights is not None:\n num_estimators = len(estimator_list)\n # checking weights parameter\n if len(weights) != num_estimators:\n raise ValueError(\n \"weights parameter must have the same length as the estimator_list.\"\n )\n if not all((isinstance(x, int) or isinstance(x, float)) for x in weights):\n raise TypeError(\"weights must contain only ints or floats.\")\n\n # checking verbose parameter\n if type(verbose) is not bool:\n raise TypeError(\"Verbose parameter can only take argument as True or False.\")\n\n # checking optimize parameter\n optimize = _get_metric(optimize)\n if optimize is None:\n raise ValueError(\n f\"Optimize method not supported. See docstring for list of available parameters.\"\n )\n\n # checking optimize parameter for multiclass\n if _is_multiclass():\n if not optimize.is_multiclass:\n raise TypeError(\n f\"Optimization metric not supported for multiclass problems. See docstring for list of other optimization parameters.\"\n )\n\n \"\"\"\n\n ERROR HANDLING ENDS HERE\n\n \"\"\"\n\n fold = _get_cv_splitter(fold)\n\n groups = _get_groups(groups)\n\n if not display:\n progress_args = {\"max\": 2 + 4}\n master_display_columns = [v.display_name for k, v in _all_metrics.items()]\n timestampStr = datetime.datetime.now().strftime(\"%H:%M:%S\")\n monitor_rows = [\n [\"Initiated\", \". . . . . . . . . . . . . . . . . .\", timestampStr],\n [\"Status\", \". . . . . . . . . . . . . . . . . .\", \"Loading Dependencies\"],\n [\"Estimator\", \". . . . . . . . . . . . . . . . . .\", \"Compiling Library\"],\n ]\n display = Display(\n verbose=verbose,\n html_param=html_param,\n progress_args=progress_args,\n master_display_columns=master_display_columns,\n monitor_rows=monitor_rows,\n )\n display.display_progress()\n display.display_monitor()\n display.display_master_display()\n\n logger.info(\"Importing libraries\")\n\n np.random.seed(seed)\n\n logger.info(\"Copying training dataset\")\n\n # Storing X_train and y_train in data_X and data_y parameter\n data_X = X_train.copy()\n data_y = y_train.copy()\n\n # reset index\n data_X.reset_index(drop=True, inplace=True)\n data_y.reset_index(drop=True, inplace=True)\n\n # setting optimize parameter\n compare_dimension = optimize.display_name\n optimize = optimize.scorer\n\n display.move_progress()\n\n \"\"\"\n MONITOR UPDATE STARTS\n \"\"\"\n\n display.update_monitor(1, \"Compiling Estimators\")\n display.display_monitor()\n\n \"\"\"\n MONITOR UPDATE ENDS\n \"\"\"\n\n logger.info(\"Getting model names\")\n estimator_dict = {}\n for x in estimator_list:\n x = get_estimator_from_meta_estimator(x)\n name = _get_model_id(x)\n suffix = 1\n original_name = name\n while name in estimator_dict:\n name = f\"{original_name}_{suffix}\"\n suffix += 1\n estimator_dict[name] = x\n\n estimator_list = list(estimator_dict.items())\n\n voting_model_definition = _all_models_internal[\"Voting\"]\n if _ml_usecase == MLUsecase.CLASSIFICATION:\n model = voting_model_definition.class_def(\n estimators=estimator_list, voting=method, n_jobs=_gpu_n_jobs_param\n )\n else:\n model = voting_model_definition.class_def(\n estimators=estimator_list, n_jobs=_gpu_n_jobs_param\n )\n\n display.update_monitor(2, voting_model_definition.name)\n display.display_monitor()\n\n display.move_progress()\n\n logger.info(\"SubProcess create_model() called ==================================\")\n model, model_fit_time = create_model_supervised(\n estimator=model,\n system=False,\n display=display,\n fold=fold,\n round=round,\n fit_kwargs=fit_kwargs,\n groups=groups,\n )\n model_results = pull()\n logger.info(\"SubProcess create_model() end ==================================\")\n\n # end runtime\n runtime_end = time.time()\n runtime = np.array(runtime_end - runtime_start).round(2)\n\n # mlflow logging\n if logging_param:\n\n avgs_dict_log = {k: v for k, v in model_results.loc[\"Mean\"].items()}\n\n try:\n _mlflow_log_model(\n model=model,\n model_results=model_results,\n score_dict=avgs_dict_log,\n source=\"blend_models\",\n runtime=runtime,\n model_fit_time=model_fit_time,\n _prep_pipe=prep_pipe,\n log_plots=log_plots_param,\n display=display,\n )\n except:\n logger.error(f\"_mlflow_log_model() for {model} raised an exception:\")\n logger.error(traceback.format_exc())\n\n if choose_better:\n model = _choose_better(\n [(model, model_results)] + estimator_list,\n compare_dimension,\n fold,\n groups=groups,\n fit_kwargs=fit_kwargs,\n display=display,\n )\n\n model_results = color_df(model_results, \"yellow\", [\"Mean\"], axis=1)\n model_results = model_results.set_precision(round)\n display.display(model_results, clear=True)\n\n logger.info(f\"create_model_container: {len(create_model_container)}\")\n logger.info(f\"master_model_container: {len(master_model_container)}\")\n logger.info(f\"display_container: {len(display_container)}\")\n\n logger.info(str(model))\n logger.info(\n \"blend_models() succesfully completed......................................\"\n )\n\n gc.collect()\n return model\n\n\ndef stack_models(\n estimator_list: list,\n meta_model=None,\n fold: Optional[Union[int, Any]] = None,\n round: int = 4,\n method: str = \"auto\",\n restack: bool = True,\n choose_better: bool = False,\n optimize: str = \"Accuracy\",\n fit_kwargs: Optional[dict] = None,\n groups: Optional[Union[str, Any]] = None,\n verbose: bool = True,\n display: Optional[Display] = None,\n) -> Any:\n\n \"\"\"\n This function trains a meta model and scores it using Cross Validation.\n The predictions from the base level models as passed in the estimator_list param\n are used as input features for the meta model. The restacking parameter controls\n the ability to expose raw features to the meta model when set to True\n (default = False).\n\n The output prints the score grid that shows Accuracy, AUC, Recall, Precision,\n F1, Kappa and MCC by fold (default = 10 Folds).\n\n This function returns a trained model object.\n\n Example\n -------\n >>> from pycaret.datasets import get_data\n >>> juice = get_data('juice')\n >>> experiment_name = setup(data = juice, target = 'Purchase')\n >>> dt = create_model('dt')\n >>> rf = create_model('rf')\n >>> ada = create_model('ada')\n >>> ridge = create_model('ridge')\n >>> knn = create_model('knn')\n >>> stacked_models = stack_models(estimator_list=[dt,rf,ada,ridge,knn])\n\n This will create a meta model that will use the predictions of all the\n models provided in estimator_list param. By default, the meta model is\n Logistic Regression but can be changed with meta_model param.\n\n Parameters\n ----------\n estimator_list : list of objects\n\n meta_model : object, default = None\n If set to None, Logistic Regression is used as a meta model.\n\n fold: integer or scikit-learn compatible CV generator, default = None\n Controls cross-validation. If None, will use the CV generator defined in setup().\n If integer, will use KFold CV with that many folds.\n When cross_validation is False, this parameter is ignored.\n\n round: integer, default = 4\n Number of decimal places the metrics in the score grid will be rounded to.\n\n method: string, default = 'auto'\n - if ‘auto’, it will try to invoke, for each estimator, 'predict_proba',\n 'decision_function' or 'predict' in that order.\n - otherwise, one of 'predict_proba', 'decision_function' or 'predict'.\n If the method is not implemented by the estimator, it will raise an error.\n\n restack: bool, default = True\n When restack is set to True, raw data will be exposed to meta model when\n making predictions, otherwise when False, only the predicted label or\n probabilities is passed to meta model when making final predictions.\n\n choose_better: bool, default = False\n When set to set to True, base estimator is returned when the metric doesn't\n improve by ensemble_model. This gurantees the returned object would perform\n atleast equivalent to base estimator created using create_model or model\n returned by compare_models.\n\n optimize: str, default = 'Accuracy'\n Only used when choose_better is set to True. optimize parameter is used\n to compare emsembled model with base estimator. Values accepted in\n optimize parameter are 'Accuracy', 'AUC', 'Recall', 'Precision', 'F1',\n 'Kappa', 'MCC'.\n\n fit_kwargs: dict, default = {} (empty dict)\n Dictionary of arguments passed to the fit method of the model.\n\n groups: str or array-like, with shape (n_samples,), default = None\n Optional Group labels for the samples used while splitting the dataset into train/test set.\n If string is passed, will use the data column with that name as the groups.\n Only used if a group based cross-validation generator is used (eg. GroupKFold).\n If None, will use the value set in fold_groups param in setup().\n\n verbose: bool, default = True\n Score grid is not printed when verbose is set to False.\n\n Returns\n -------\n score_grid\n A table containing the scores of the model across the kfolds.\n Scoring metrics used are Accuracy, AUC, Recall, Precision, F1,\n Kappa and MCC. Mean and standard deviation of the scores across\n the folds are also returned.\n\n model\n Trained model object.\n\n Warnings\n --------\n - If target variable is multiclass (more than 2 classes), AUC will be returned\n as zero (0.0).\n\n \"\"\"\n\n function_params_str = \", \".join([f\"{k}={v}\" for k, v in locals().items()])\n\n logger = get_logger()\n\n logger.info(\"Initializing stack_models()\")\n logger.info(f\"stack_models({function_params_str})\")\n\n logger.info(\"Checking exceptions\")\n\n # run_time\n runtime_start = time.time()\n\n if not fit_kwargs:\n fit_kwargs = {}\n\n # checking error for estimator_list\n for i in estimator_list:\n if not hasattr(i, \"fit\"):\n raise ValueError(f\"Estimator {i} does not have the required fit() method.\")\n\n # checking meta model\n if meta_model is not None:\n if not hasattr(meta_model, \"fit\"):\n raise ValueError(\n f\"Meta Model {meta_model} does not have the required fit() method.\"\n )\n\n # checking fold parameter\n if fold is not None and not (type(fold) is int or is_sklearn_cv_generator(fold)):\n raise TypeError(\n \"fold parameter must be either None, an integer or a scikit-learn compatible CV generator object.\"\n )\n\n # checking round parameter\n if type(round) is not int:\n raise TypeError(\"Round parameter only accepts integer value.\")\n\n # checking method parameter\n available_method = [\"auto\", \"predict_proba\", \"decision_function\", \"predict\"]\n if method not in available_method:\n raise ValueError(\n \"Method parameter not acceptable. It only accepts 'auto', 'predict_proba', 'decision_function', 'predict'.\"\n )\n\n # checking restack parameter\n if type(restack) is not bool:\n raise TypeError(\"Restack parameter can only take argument as True or False.\")\n\n # checking verbose parameter\n if type(verbose) is not bool:\n raise TypeError(\"Verbose parameter can only take argument as True or False.\")\n\n # checking optimize parameter\n optimize = _get_metric(optimize)\n if optimize is None:\n raise ValueError(\n f\"Optimize method not supported. See docstring for list of available parameters.\"\n )\n\n # checking optimize parameter for multiclass\n if _is_multiclass():\n if not optimize.is_multiclass:\n raise TypeError(\n f\"Optimization metric not supported for multiclass problems. See docstring for list of other optimization parameters.\"\n )\n\n \"\"\"\n\n ERROR HANDLING ENDS HERE\n\n \"\"\"\n\n fold = _get_cv_splitter(fold)\n\n groups = _get_groups(groups)\n\n logger.info(\"Defining meta model\")\n # Defining meta model.\n if meta_model == None:\n estimator = \"lr\"\n meta_model_definition = _all_models_internal[estimator]\n meta_model_args = meta_model_definition.args\n meta_model = meta_model_definition.class_def(**meta_model_args)\n else:\n meta_model = clone(get_estimator_from_meta_estimator(meta_model))\n\n if not display:\n progress_args = {\"max\": 2 + 4}\n master_display_columns = [v.display_name for k, v in _all_metrics.items()]\n timestampStr = datetime.datetime.now().strftime(\"%H:%M:%S\")\n monitor_rows = [\n [\"Initiated\", \". . . . . . . . . . . . . . . . . .\", timestampStr],\n [\"Status\", \". . . . . . . . . . . . . . . . . .\", \"Loading Dependencies\"],\n [\"Estimator\", \". . . . . . . . . . . . . . . . . .\", \"Compiling Library\"],\n ]\n display = Display(\n verbose=verbose,\n html_param=html_param,\n progress_args=progress_args,\n master_display_columns=master_display_columns,\n monitor_rows=monitor_rows,\n )\n display.display_progress()\n display.display_monitor()\n display.display_master_display()\n\n np.random.seed(seed)\n\n logger.info(\"Copying training dataset\")\n # Storing X_train and y_train in data_X and data_y parameter\n data_X = X_train.copy()\n data_y = y_train.copy()\n\n # reset index\n data_X.reset_index(drop=True, inplace=True)\n data_y.reset_index(drop=True, inplace=True)\n\n # setting optimize parameter\n compare_dimension = optimize.display_name\n optimize = optimize.scorer\n\n display.move_progress()\n\n \"\"\"\n MONITOR UPDATE STARTS\n \"\"\"\n\n display.update_monitor(1, \"Compiling Estimators\")\n display.display_monitor()\n\n \"\"\"\n MONITOR UPDATE ENDS\n \"\"\"\n\n logger.info(\"Getting model names\")\n estimator_dict = {}\n for x in estimator_list:\n x = get_estimator_from_meta_estimator(x)\n name = _get_model_id(x)\n suffix = 1\n original_name = name\n while name in estimator_dict:\n name = f\"{original_name}_{suffix}\"\n suffix += 1\n estimator_dict[name] = x\n\n estimator_list = list(estimator_dict.items())\n\n logger.info(estimator_list)\n\n stacking_model_definition = _all_models_internal[\"Stacking\"]\n if _ml_usecase == MLUsecase.CLASSIFICATION:\n model = stacking_model_definition.class_def(\n estimators=estimator_list,\n final_estimator=meta_model,\n cv=fold,\n stack_method=method,\n n_jobs=_gpu_n_jobs_param,\n passthrough=restack,\n )\n else:\n model = stacking_model_definition.class_def(\n estimators=estimator_list,\n final_estimator=meta_model,\n cv=fold,\n n_jobs=_gpu_n_jobs_param,\n passthrough=restack,\n )\n\n display.update_monitor(2, stacking_model_definition.name)\n display.display_monitor()\n\n display.move_progress()\n\n logger.info(\"SubProcess create_model() called ==================================\")\n model, model_fit_time = create_model_supervised(\n estimator=model,\n system=False,\n display=display,\n fold=fold,\n round=round,\n fit_kwargs=fit_kwargs,\n groups=groups,\n )\n model_results = pull()\n logger.info(\"SubProcess create_model() end ==================================\")\n\n # end runtime\n runtime_end = time.time()\n runtime = np.array(runtime_end - runtime_start).round(2)\n\n # mlflow logging\n if logging_param:\n\n avgs_dict_log = {k: v for k, v in model_results.loc[\"Mean\"].items()}\n\n try:\n _mlflow_log_model(\n model=model,\n model_results=model_results,\n score_dict=avgs_dict_log,\n source=\"stack_models\",\n runtime=runtime,\n model_fit_time=model_fit_time,\n _prep_pipe=prep_pipe,\n log_plots=log_plots_param,\n display=display,\n )\n except:\n logger.error(f\"_mlflow_log_model() for {model} raised an exception:\")\n logger.error(traceback.format_exc())\n\n if choose_better:\n model = _choose_better(\n [(model, model_results)] + estimator_list,\n compare_dimension,\n fold,\n groups=groups,\n fit_kwargs=fit_kwargs,\n display=display,\n )\n\n model_results = color_df(model_results, \"yellow\", [\"Mean\"], axis=1)\n model_results = model_results.set_precision(round)\n display.display(model_results, clear=True)\n\n logger.info(f\"create_model_container: {len(create_model_container)}\")\n logger.info(f\"master_model_container: {len(master_model_container)}\")\n logger.info(f\"display_container: {len(display_container)}\")\n\n logger.info(str(model))\n logger.info(\n \"stack_models() succesfully completed......................................\"\n )\n\n gc.collect()\n return model\n\n\ndef plot_model(\n estimator,\n plot: str = \"auc\",\n scale: float = 1, # added in pycaret==2.1.0\n save: bool = False,\n fold: Optional[Union[int, Any]] = None,\n fit_kwargs: Optional[dict] = None,\n groups: Optional[Union[str, Any]] = None,\n feature_name: Optional[str] = None,\n label: bool = False,\n use_train_data: bool = False,\n verbose: bool = True,\n system: bool = True,\n display: Optional[Display] = None, # added in pycaret==2.2.0\n display_format: Optional[str] = None,\n is_in_evaluate: bool = False,\n) -> str:\n\n \"\"\"\n This function takes a trained model object and returns a plot based on the\n test / hold-out set. The process may require the model to be re-trained in\n certain cases. See list of plots supported below.\n\n Model must be created using create_model() or tune_model().\n\n Example\n -------\n >>> from pycaret.datasets import get_data\n >>> juice = get_data('juice')\n >>> experiment_name = setup(data = juice, target = 'Purchase')\n >>> lr = create_model('lr')\n >>> plot_model(lr)\n\n This will return an AUC plot of a trained Logistic Regression model.\n\n Parameters\n ----------\n estimator : object, default = none\n A trained model object should be passed as an estimator.\n\n plot : str, default = auc\n Enter abbreviation of type of plot. The current list of plots supported are (Plot - Name):\n\n\n * 'residuals_interactive' - Interactive Residual plots\n * 'auc' - Area Under the Curve\n * 'threshold' - Discrimination Threshold \n * 'pr' - Precision Recall Curve \n * 'confusion_matrix' - Confusion Matrix \n * 'error' - Class Prediction Error \n * 'class_report' - Classification Report \n * 'boundary' - Decision Boundary \n * 'rfe' - Recursive Feature Selection \n * 'learning' - Learning Curve \n * 'manifold' - Manifold Learning \n * 'calibration' - Calibration Curve \n * 'vc' - Validation Curve \n * 'dimension' - Dimension Learning \n * 'feature' - Feature Importance \n * 'feature_all' - Feature Importance (All)\n * 'parameter' - Model Hyperparameter\n * 'lift' - Lift Curve\n * 'gain' - Gain Chart\n * 'ks' - KS Statistic Plot\n\n scale: float, default = 1\n The resolution scale of the figure.\n\n save: bool, default = False\n When set to True, Plot is saved as a 'png' file in current working directory.\n\n fold: integer or scikit-learn compatible CV generator, default = None\n Controls cross-validation used in certain plots. If None, will use the CV generator\n defined in setup(). If integer, will use KFold CV with that many folds.\n When cross_validation is False, this parameter is ignored.\n\n fit_kwargs: dict, default = {} (empty dict)\n Dictionary of arguments passed to the fit method of the model.\n\n groups: str or array-like, with shape (n_samples,), default = None\n Optional Group labels for the samples used while splitting the dataset into train/test set.\n If string is passed, will use the data column with that name as the groups.\n Only used if a group based cross-validation generator is used (eg. GroupKFold).\n If None, will use the value set in fold_groups param in setup().\n\n verbose: bool, default = True\n Progress bar not shown when verbose set to False.\n\n system: bool, default = True\n Must remain True all times. Only to be changed by internal functions.\n\n\n display_format: str, default = None\n To display plots in Streamlit (https://www.streamlit.io/), set this to 'streamlit'.\n Currently, not all plots are supported.\n\n Returns\n -------\n Visual_Plot\n Prints the visual plot.\n str:\n If save param is True, will return the name of the saved file.\n\n Warnings\n --------\n - 'svm' and 'ridge' doesn't support the predict_proba method. As such, AUC and\n calibration plots are not available for these estimators.\n\n - When the 'max_features' parameter of a trained model object is not equal to\n the number of samples in training set, the 'rfe' plot is not available.\n\n - 'calibration', 'threshold', 'manifold' and 'rfe' plots are not available for\n multiclass problems.\n\n\n \"\"\"\n\n function_params_str = \", \".join([f\"{k}={v}\" for k, v in locals().items()])\n\n logger = get_logger()\n\n logger.info(\"Initializing plot_model()\")\n logger.info(f\"plot_model({function_params_str})\")\n\n logger.info(\"Checking exceptions\")\n\n if not fit_kwargs:\n fit_kwargs = {}\n\n if not hasattr(estimator, \"fit\"):\n raise ValueError(\n f\"Estimator {estimator} does not have the required fit() method.\"\n )\n\n if plot not in _available_plots:\n raise ValueError(\n \"Plot Not Available. Please see docstring for list of available Plots.\"\n )\n\n # multiclass plot exceptions:\n multiclass_not_available = [\"calibration\", \"threshold\", \"manifold\", \"rfe\"]\n if _is_multiclass():\n if plot in multiclass_not_available:\n raise ValueError(\n \"Plot Not Available for multiclass problems. Please see docstring for list of available Plots.\"\n )\n\n # exception for CatBoost\n # if \"CatBoostClassifier\" in str(type(estimator)):\n # raise ValueError(\n # \"CatBoost estimator is not compatible with plot_model function, try using Catboost with interpret_model instead.\"\n # )\n\n # checking for auc plot\n if not hasattr(estimator, \"predict_proba\") and plot == \"auc\":\n raise TypeError(\n \"AUC plot not available for estimators with no predict_proba attribute.\"\n )\n\n # checking for auc plot\n if not hasattr(estimator, \"predict_proba\") and plot == \"auc\":\n raise TypeError(\n \"AUC plot not available for estimators with no predict_proba attribute.\"\n )\n\n # checking for calibration plot\n if not hasattr(estimator, \"predict_proba\") and plot == \"calibration\":\n raise TypeError(\n \"Calibration plot not available for estimators with no predict_proba attribute.\"\n )\n\n def is_tree(e):\n from sklearn.tree import BaseDecisionTree\n from sklearn.ensemble._forest import BaseForest\n\n if \"final_estimator\" in e.get_params():\n e = e.final_estimator\n if \"base_estimator\" in e.get_params():\n e = e.base_estimator\n if isinstance(e, BaseForest) or isinstance(e, BaseDecisionTree):\n return True\n\n # checking for calibration plot\n if plot == \"tree\" and not is_tree(estimator):\n raise TypeError(\n \"Decision Tree plot is only available for scikit-learn Decision Trees and Forests, Ensemble models using those or Stacked models using those as meta (final) estimators.\"\n )\n\n # checking for feature plot\n if not (\n hasattr(estimator, \"coef_\") or hasattr(estimator, \"feature_importances_\")\n ) and (plot == \"feature\" or plot == \"feature_all\" or plot == \"rfe\"):\n raise TypeError(\n \"Feature Importance and RFE plots not available for estimators that doesnt support coef_ or feature_importances_ attribute.\"\n )\n\n if plot == \"residuals_interactive\" and is_in_evaluate and is_in_colab():\n raise ValueError(\n \"Interactive Residuals plot not available in evaluate_model() in Google Colab. Do plot_model(model, plot='residuals_interactive') instead.\"\n )\n\n # checking fold parameter\n if fold is not None and not (type(fold) is int or is_sklearn_cv_generator(fold)):\n raise TypeError(\n \"fold parameter must be either None, an integer or a scikit-learn compatible CV generator object.\"\n )\n\n if type(label) is not bool:\n raise TypeError(\"Label param only accepts True or False.\")\n\n if type(use_train_data) is not bool:\n raise TypeError(\"use_train_data param only accepts True or False.\")\n\n if feature_name is not None and type(feature_name) is not str:\n raise TypeError(\n \"feature parameter must be string containing column name of dataset.\"\n )\n\n # checking display_format parameter\n plot_formats = [None, \"streamlit\"]\n\n if display_format not in plot_formats:\n raise ValueError(\"display_format can only be None or 'streamlit'.\")\n\n if display_format == \"streamlit\":\n try:\n import streamlit as st\n except ImportError:\n raise ImportError(\n \"It appears that streamlit is not installed. Do: pip install streamlit\"\n )\n\n \"\"\"\n\n ERROR HANDLING ENDS HERE\n\n \"\"\"\n\n cv = _get_cv_splitter(fold)\n\n groups = _get_groups(groups)\n\n if not display:\n progress_args = {\"max\": 5}\n display = Display(\n verbose=verbose, html_param=html_param, progress_args=progress_args\n )\n display.display_progress()\n\n logger.info(\"Preloading libraries\")\n # pre-load libraries\n import matplotlib.pyplot as plt\n\n np.random.seed(seed)\n\n display.move_progress()\n\n # defining estimator as model locally\n # deepcopy instead of clone so we have a fitted estimator\n if isinstance(estimator, InternalPipeline):\n estimator = estimator.steps[-1][1]\n estimator = deepcopy(estimator)\n model = estimator\n\n display.move_progress()\n\n # plots used for logging (controlled through plots_log_param)\n # AUC, #Confusion Matrix and #Feature Importance\n\n logger.info(\"Copying training dataset\")\n\n # Storing X_train and y_train in data_X and data_y parameter\n data_X = X_train.copy()\n if not _is_unsupervised(_ml_usecase):\n data_y = y_train.copy()\n\n # reset index\n data_X.reset_index(drop=True, inplace=True)\n if not _is_unsupervised(_ml_usecase):\n data_y.reset_index(drop=True, inplace=True)\n\n logger.info(\"Copying test dataset\")\n\n # Storing X_train and y_train in data_X and data_y parameter\n test_X = X_train.copy() if use_train_data else X_test.copy()\n test_y = y_train.copy() if use_train_data else y_test.copy()\n\n # reset index\n test_X.reset_index(drop=True, inplace=True)\n test_y.reset_index(drop=True, inplace=True)\n\n logger.info(f\"Plot type: {plot}\")\n plot_name = _available_plots[plot]\n display.move_progress()\n\n # yellowbrick workaround start\n import yellowbrick.utils.types\n import yellowbrick.utils.helpers\n\n # yellowbrick workaround end\n\n model_name = _get_model_name(model)\n plot_filename = f\"{plot_name}.png\"\n with patch(\n \"yellowbrick.utils.types.is_estimator\",\n pycaret.internal.patches.yellowbrick.is_estimator,\n ), patch(\n \"yellowbrick.utils.helpers.is_estimator\",\n pycaret.internal.patches.yellowbrick.is_estimator,\n ), estimator_pipeline(\n _internal_pipeline, model\n ) as pipeline_with_model:\n fit_kwargs = _get_pipeline_fit_kwargs(pipeline_with_model, fit_kwargs)\n\n _base_dpi = 100\n\n def residuals_interactive():\n from pycaret.internal.plots.residual_plots import InteractiveResidualsPlot\n\n resplots = InteractiveResidualsPlot(\n x=data_X,\n y=data_y,\n x_test=test_X,\n y_test=test_y,\n model=pipeline_with_model,\n display=display,\n )\n\n display.clear_output()\n if system:\n resplots.show()\n\n plot_filename = f\"{plot_name}.html\"\n\n if save:\n resplots.write_html(plot_filename)\n logger.info(f\"Saving '{plot_filename}' in current active directory\")\n\n logger.info(\"Visual Rendered Successfully\")\n return plot_filename\n\n def cluster():\n logger.info(\n \"SubProcess assign_model() called ==================================\"\n )\n b = assign_model(\n pipeline_with_model, verbose=False, transformation=True\n ).reset_index(drop=True)\n logger.info(\n \"SubProcess assign_model() end ==================================\"\n )\n cluster = b[\"Cluster\"].values\n b.drop(\"Cluster\", axis=1, inplace=True)\n b = pd.get_dummies(b) # casting categorical variable\n\n from sklearn.decomposition import PCA\n\n pca = PCA(n_components=2, random_state=seed)\n logger.info(\"Fitting PCA()\")\n pca_ = pca.fit_transform(b)\n pca_ = pd.DataFrame(pca_)\n pca_ = pca_.rename(columns={0: \"PCA1\", 1: \"PCA2\"})\n pca_[\"Cluster\"] = cluster\n\n if feature_name is not None:\n pca_[\"Feature\"] = data_before_preprocess[feature_name]\n else:\n pca_[\"Feature\"] = data_before_preprocess[\n data_before_preprocess.columns[0]\n ]\n\n if label:\n pca_[\"Label\"] = pca_[\"Feature\"]\n\n \"\"\"\n sorting\n \"\"\"\n\n logger.info(\"Sorting dataframe\")\n\n print(pca_[\"Cluster\"])\n\n clus_num = [int(i.split()[1]) for i in pca_[\"Cluster\"]]\n\n pca_[\"cnum\"] = clus_num\n pca_.sort_values(by=\"cnum\", inplace=True)\n\n \"\"\"\n sorting ends\n \"\"\"\n\n display.clear_output()\n\n logger.info(\"Rendering Visual\")\n\n if label:\n fig = px.scatter(\n pca_,\n x=\"PCA1\",\n y=\"PCA2\",\n text=\"Label\",\n color=\"Cluster\",\n opacity=0.5,\n )\n else:\n fig = px.scatter(\n pca_,\n x=\"PCA1\",\n y=\"PCA2\",\n hover_data=[\"Feature\"],\n color=\"Cluster\",\n opacity=0.5,\n )\n\n fig.update_traces(textposition=\"top center\")\n fig.update_layout(plot_bgcolor=\"rgb(240,240,240)\")\n\n fig.update_layout(height=600 * scale, title_text=\"2D Cluster PCA Plot\")\n\n plot_filename = f\"{plot_name}.html\"\n\n if save:\n fig.write_html(plot_filename)\n logger.info(f\"Saving '{plot_filename}' in current active directory\")\n\n elif system:\n if display_format == \"streamlit\":\n st.write(fig)\n else:\n fig.show()\n\n logger.info(\"Visual Rendered Successfully\")\n return plot_filename\n\n def umap():\n logger.info(\n \"SubProcess assign_model() called ==================================\"\n )\n b = assign_model(\n model, verbose=False, transformation=True, score=False\n ).reset_index(drop=True)\n logger.info(\n \"SubProcess assign_model() end ==================================\"\n )\n\n label = pd.DataFrame(b[\"Anomaly\"])\n b.dropna(axis=0, inplace=True) # droping rows with NA's\n b.drop([\"Anomaly\"], axis=1, inplace=True)\n\n import umap\n\n reducer = umap.UMAP()\n logger.info(\"Fitting UMAP()\")\n embedding = reducer.fit_transform(b)\n X = pd.DataFrame(embedding)\n\n import plotly.express as px\n\n df = X\n df[\"Anomaly\"] = label\n\n if feature_name is not None:\n df[\"Feature\"] = data_before_preprocess[feature_name]\n else:\n df[\"Feature\"] = data_before_preprocess[\n data_before_preprocess.columns[0]\n ]\n\n display.clear_output()\n\n logger.info(\"Rendering Visual\")\n\n fig = px.scatter(\n df,\n x=0,\n y=1,\n color=\"Anomaly\",\n title=\"uMAP Plot for Outliers\",\n hover_data=[\"Feature\"],\n opacity=0.7,\n width=900 * scale,\n height=800 * scale,\n )\n plot_filename = f\"{plot_name}.html\"\n\n if save:\n fig.write_html(f\"{plot_filename}\")\n logger.info(f\"Saving '{plot_filename}' in current active directory\")\n elif system:\n if display_format == \"streamlit\":\n st.write(fig)\n else:\n fig.show()\n\n logger.info(\"Visual Rendered Successfully\")\n return plot_filename\n\n def tsne():\n if _ml_usecase == MLUsecase.CLUSTERING:\n return _tsne_clustering()\n else:\n return _tsne_anomaly()\n\n def _tsne_anomaly():\n logger.info(\n \"SubProcess assign_model() called ==================================\"\n )\n b = assign_model(\n model, verbose=False, transformation=True, score=False\n ).reset_index(drop=True)\n logger.info(\n \"SubProcess assign_model() end ==================================\"\n )\n cluster = b[\"Anomaly\"].values\n b.dropna(axis=0, inplace=True) # droping rows with NA's\n b.drop(\"Anomaly\", axis=1, inplace=True)\n\n logger.info(\"Getting dummies to cast categorical variables\")\n\n from sklearn.manifold import TSNE\n\n logger.info(\"Fitting TSNE()\")\n X_embedded = TSNE(n_components=3).fit_transform(b)\n\n X = pd.DataFrame(X_embedded)\n X[\"Anomaly\"] = cluster\n if feature_name is not None:\n X[\"Feature\"] = data_before_preprocess[feature_name]\n else:\n X[\"Feature\"] = data_before_preprocess[data_before_preprocess.columns[0]]\n\n df = X\n\n display.clear_output()\n\n logger.info(\"Rendering Visual\")\n\n if label:\n fig = px.scatter_3d(\n df,\n x=0,\n y=1,\n z=2,\n text=\"Feature\",\n color=\"Anomaly\",\n title=\"3d TSNE Plot for Outliers\",\n opacity=0.7,\n width=900 * scale,\n height=800 * scale,\n )\n else:\n fig = px.scatter_3d(\n df,\n x=0,\n y=1,\n z=2,\n hover_data=[\"Feature\"],\n color=\"Anomaly\",\n title=\"3d TSNE Plot for Outliers\",\n opacity=0.7,\n width=900 * scale,\n height=800 * scale,\n )\n\n plot_filename = f\"{plot_name}.html\"\n\n if save:\n fig.write_html(f\"{plot_filename}\")\n logger.info(f\"Saving '{plot_filename}' in current active directory\")\n elif system:\n if display_format == \"streamlit\":\n st.write(fig)\n else:\n fig.show()\n\n logger.info(\"Visual Rendered Successfully\")\n return plot_filename\n\n def _tsne_clustering():\n logger.info(\n \"SubProcess assign_model() called ==================================\"\n )\n b = assign_model(\n pipeline_with_model, verbose=False, score=False, transformation=True,\n ).reset_index(drop=True)\n logger.info(\n \"SubProcess assign_model() end ==================================\"\n )\n\n cluster = b[\"Cluster\"].values\n b.drop(\"Cluster\", axis=1, inplace=True)\n\n from sklearn.manifold import TSNE\n\n logger.info(\"Fitting TSNE()\")\n X_embedded = TSNE(n_components=3, random_state=seed).fit_transform(b)\n X_embedded = pd.DataFrame(X_embedded)\n X_embedded[\"Cluster\"] = cluster\n\n if feature_name is not None:\n X_embedded[\"Feature\"] = data_before_preprocess[feature_name]\n else:\n X_embedded[\"Feature\"] = data_before_preprocess[data_X.columns[0]]\n\n if label:\n X_embedded[\"Label\"] = X_embedded[\"Feature\"]\n\n \"\"\"\n sorting\n \"\"\"\n logger.info(\"Sorting dataframe\")\n\n clus_num = [int(i.split()[1]) for i in X_embedded[\"Cluster\"]]\n\n X_embedded[\"cnum\"] = clus_num\n X_embedded.sort_values(by=\"cnum\", inplace=True)\n\n \"\"\"\n sorting ends\n \"\"\"\n\n df = X_embedded\n\n display.clear_output()\n\n logger.info(\"Rendering Visual\")\n\n if label:\n\n fig = px.scatter_3d(\n df,\n x=0,\n y=1,\n z=2,\n color=\"Cluster\",\n title=\"3d TSNE Plot for Clusters\",\n text=\"Label\",\n opacity=0.7,\n width=900 * scale,\n height=800 * scale,\n )\n\n else:\n fig = px.scatter_3d(\n df,\n x=0,\n y=1,\n z=2,\n color=\"Cluster\",\n title=\"3d TSNE Plot for Clusters\",\n hover_data=[\"Feature\"],\n opacity=0.7,\n width=900 * scale,\n height=800 * scale,\n )\n\n plot_filename = f\"{plot_name}.html\"\n\n if save:\n fig.write_html(f\"{plot_filename}\")\n logger.info(f\"Saving '{plot_filename}' in current active directory\")\n elif system:\n if display_format == \"streamlit\":\n st.write(fig)\n else:\n fig.show()\n\n logger.info(\"Visual Rendered Successfully\")\n return plot_filename\n\n def distribution():\n logger.info(\n \"SubProcess assign_model() called ==================================\"\n )\n d = assign_model(pipeline_with_model, verbose=False).reset_index(drop=True)\n logger.info(\n \"SubProcess assign_model() end ==================================\"\n )\n\n \"\"\"\n sorting\n \"\"\"\n logger.info(\"Sorting dataframe\")\n\n clus_num = []\n for i in d.Cluster:\n a = int(i.split()[1])\n clus_num.append(a)\n\n d[\"cnum\"] = clus_num\n d.sort_values(by=\"cnum\", inplace=True)\n d.reset_index(inplace=True, drop=True)\n\n clus_label = []\n for i in d.cnum:\n a = \"Cluster \" + str(i)\n clus_label.append(a)\n\n d.drop([\"Cluster\", \"cnum\"], inplace=True, axis=1)\n d[\"Cluster\"] = clus_label\n\n \"\"\"\n sorting ends\n \"\"\"\n\n if feature_name is None:\n x_col = \"Cluster\"\n else:\n x_col = feature_name\n\n display.clear_output()\n\n logger.info(\"Rendering Visual\")\n\n fig = px.histogram(\n d,\n x=x_col,\n color=\"Cluster\",\n marginal=\"box\",\n opacity=0.7,\n hover_data=d.columns,\n )\n\n fig.update_layout(height=600 * scale,)\n\n plot_filename = f\"{plot_name}.html\"\n\n if save:\n fig.write_html(f\"{plot_filename}\")\n logger.info(f\"Saving '{plot_filename}' in current active directory\")\n elif system:\n fig.show()\n\n logger.info(\"Visual Rendered Successfully\")\n return plot_filename\n\n def elbow():\n try:\n from yellowbrick.cluster import KElbowVisualizer\n\n visualizer = KElbowVisualizer(pipeline_with_model, timings=False)\n show_yellowbrick_plot(\n visualizer=visualizer,\n X_train=data_X,\n y_train=None,\n X_test=None,\n y_test=None,\n name=plot_name,\n handle_test=\"\",\n scale=scale,\n save=save,\n fit_kwargs=fit_kwargs,\n groups=groups,\n display=display,\n display_format=display_format,\n )\n\n except:\n logger.error(\"Elbow plot failed. Exception:\")\n logger.error(traceback.format_exc())\n raise TypeError(\"Plot Type not supported for this model.\")\n\n def silhouette():\n from yellowbrick.cluster import SilhouetteVisualizer\n\n try:\n visualizer = SilhouetteVisualizer(\n pipeline_with_model, colors=\"yellowbrick\"\n )\n show_yellowbrick_plot(\n visualizer=visualizer,\n X_train=data_X,\n y_train=None,\n X_test=None,\n y_test=None,\n name=plot_name,\n handle_test=\"\",\n scale=scale,\n save=save,\n fit_kwargs=fit_kwargs,\n groups=groups,\n display=display,\n display_format=display_format,\n )\n except:\n logger.error(\"Silhouette plot failed. Exception:\")\n logger.error(traceback.format_exc())\n raise TypeError(\"Plot Type not supported for this model.\")\n\n def distance():\n from yellowbrick.cluster import InterclusterDistance\n\n try:\n visualizer = InterclusterDistance(pipeline_with_model)\n show_yellowbrick_plot(\n visualizer=visualizer,\n X_train=data_X,\n y_train=None,\n X_test=None,\n y_test=None,\n name=plot_name,\n handle_test=\"\",\n scale=scale,\n save=save,\n fit_kwargs=fit_kwargs,\n groups=groups,\n display=display,\n display_format=display_format,\n )\n except:\n logger.error(\"Distance plot failed. Exception:\")\n logger.error(traceback.format_exc())\n raise TypeError(\"Plot Type not supported for this model.\")\n\n def residuals():\n\n from yellowbrick.regressor import ResidualsPlot\n\n visualizer = ResidualsPlot(pipeline_with_model)\n show_yellowbrick_plot(\n visualizer=visualizer,\n X_train=data_X,\n y_train=data_y,\n X_test=test_X,\n y_test=test_y,\n name=plot_name,\n scale=scale,\n save=save,\n fit_kwargs=fit_kwargs,\n groups=groups,\n display=display,\n display_format=display_format,\n )\n\n def auc():\n\n from yellowbrick.classifier import ROCAUC\n\n visualizer = ROCAUC(pipeline_with_model)\n show_yellowbrick_plot(\n visualizer=visualizer,\n X_train=data_X,\n y_train=data_y,\n X_test=test_X,\n y_test=test_y,\n name=plot_name,\n scale=scale,\n save=save,\n fit_kwargs=fit_kwargs,\n groups=groups,\n display=display,\n display_format=display_format,\n )\n\n def threshold():\n\n from yellowbrick.classifier import DiscriminationThreshold\n\n visualizer = DiscriminationThreshold(pipeline_with_model, random_state=seed)\n show_yellowbrick_plot(\n visualizer=visualizer,\n X_train=data_X,\n y_train=data_y,\n X_test=test_X,\n y_test=test_y,\n name=plot_name,\n scale=scale,\n save=save,\n fit_kwargs=fit_kwargs,\n groups=groups,\n display=display,\n display_format=display_format,\n )\n\n def pr():\n\n from yellowbrick.classifier import PrecisionRecallCurve\n\n visualizer = PrecisionRecallCurve(pipeline_with_model, random_state=seed)\n show_yellowbrick_plot(\n visualizer=visualizer,\n X_train=data_X,\n y_train=data_y,\n X_test=test_X,\n y_test=test_y,\n name=plot_name,\n scale=scale,\n save=save,\n fit_kwargs=fit_kwargs,\n groups=groups,\n display=display,\n display_format=display_format,\n )\n\n def confusion_matrix():\n\n from yellowbrick.classifier import ConfusionMatrix\n\n visualizer = ConfusionMatrix(\n pipeline_with_model, random_state=seed, fontsize=15, cmap=\"Greens\",\n )\n show_yellowbrick_plot(\n visualizer=visualizer,\n X_train=data_X,\n y_train=data_y,\n X_test=test_X,\n y_test=test_y,\n name=plot_name,\n scale=scale,\n save=save,\n fit_kwargs=fit_kwargs,\n groups=groups,\n display=display,\n display_format=display_format,\n )\n\n def error():\n\n if _ml_usecase == MLUsecase.CLASSIFICATION:\n from yellowbrick.classifier import ClassPredictionError\n\n visualizer = ClassPredictionError(\n pipeline_with_model, random_state=seed\n )\n\n elif _ml_usecase == MLUsecase.REGRESSION:\n from yellowbrick.regressor import PredictionError\n\n visualizer = PredictionError(pipeline_with_model, random_state=seed)\n\n show_yellowbrick_plot(\n visualizer=visualizer,\n X_train=data_X,\n y_train=data_y,\n X_test=test_X,\n y_test=test_y,\n name=plot_name,\n scale=scale,\n save=save,\n fit_kwargs=fit_kwargs,\n groups=groups,\n display=display,\n display_format=display_format,\n )\n\n def cooks():\n\n from yellowbrick.regressor import CooksDistance\n\n visualizer = CooksDistance()\n show_yellowbrick_plot(\n visualizer=visualizer,\n X_train=X,\n y_train=y,\n X_test=test_X,\n y_test=test_y,\n name=plot_name,\n scale=scale,\n save=save,\n fit_kwargs=fit_kwargs,\n handle_test=\"\",\n groups=groups,\n display=display,\n display_format=display_format,\n )\n\n def class_report():\n\n from yellowbrick.classifier import ClassificationReport\n\n visualizer = ClassificationReport(\n pipeline_with_model, random_state=seed, support=True\n )\n show_yellowbrick_plot(\n visualizer=visualizer,\n X_train=data_X,\n y_train=data_y,\n X_test=test_X,\n y_test=test_y,\n name=plot_name,\n scale=scale,\n save=save,\n fit_kwargs=fit_kwargs,\n groups=groups,\n display=display,\n display_format=display_format,\n )\n\n def boundary():\n\n from sklearn.preprocessing import StandardScaler\n from sklearn.decomposition import PCA\n from yellowbrick.contrib.classifier import DecisionViz\n\n data_X_transformed = data_X.select_dtypes(include=\"float32\")\n test_X_transformed = test_X.select_dtypes(include=\"float32\")\n logger.info(\"Fitting StandardScaler()\")\n data_X_transformed = StandardScaler().fit_transform(data_X_transformed)\n test_X_transformed = StandardScaler().fit_transform(test_X_transformed)\n pca = PCA(n_components=2, random_state=seed)\n logger.info(\"Fitting PCA()\")\n data_X_transformed = pca.fit_transform(data_X_transformed)\n test_X_transformed = pca.fit_transform(test_X_transformed)\n\n data_y_transformed = np.array(data_y)\n test_y_transformed = np.array(test_y)\n\n viz_ = DecisionViz(pipeline_with_model)\n show_yellowbrick_plot(\n visualizer=viz_,\n X_train=data_X_transformed,\n y_train=data_y_transformed,\n X_test=test_X_transformed,\n y_test=test_y_transformed,\n name=plot_name,\n scale=scale,\n handle_test=\"draw\",\n save=save,\n fit_kwargs=fit_kwargs,\n groups=groups,\n display=display,\n features=[\"Feature One\", \"Feature Two\"],\n classes=[\"A\", \"B\"],\n display_format=display_format,\n )\n\n def rfe():\n\n from yellowbrick.model_selection import RFECV\n\n visualizer = RFECV(pipeline_with_model, cv=cv)\n show_yellowbrick_plot(\n visualizer=visualizer,\n X_train=data_X,\n y_train=data_y,\n X_test=test_X,\n y_test=test_y,\n handle_test=\"\",\n name=plot_name,\n scale=scale,\n save=save,\n fit_kwargs=fit_kwargs,\n groups=groups,\n display=display,\n display_format=display_format,\n )\n\n def learning():\n\n from yellowbrick.model_selection import LearningCurve\n\n sizes = np.linspace(0.3, 1.0, 10)\n visualizer = LearningCurve(\n pipeline_with_model,\n cv=cv,\n train_sizes=sizes,\n n_jobs=_gpu_n_jobs_param,\n random_state=seed,\n )\n show_yellowbrick_plot(\n visualizer=visualizer,\n X_train=data_X,\n y_train=data_y,\n X_test=test_X,\n y_test=test_y,\n handle_test=\"\",\n name=plot_name,\n scale=scale,\n save=save,\n fit_kwargs=fit_kwargs,\n groups=groups,\n display=display,\n display_format=display_format,\n )\n\n def lift():\n\n display.move_progress()\n logger.info(\"Generating predictions / predict_proba on X_test\")\n with fit_if_not_fitted(\n pipeline_with_model, data_X, data_y, groups=groups, **fit_kwargs\n ) as fitted_pipeline_with_model:\n y_test__ = test_y #fitted_pipeline_with_model.predict(X_test)\n predict_proba__ = fitted_pipeline_with_model.predict_proba(X_test)\n display.move_progress()\n display.move_progress()\n display.clear_output()\n with MatplotlibDefaultDPI(base_dpi=_base_dpi, scale_to_set=scale):\n fig = skplt.metrics.plot_lift_curve(\n y_test__, predict_proba__, figsize=(10, 6)\n )\n if save:\n logger.info(f\"Saving '{plot_name}.png' in current active directory\")\n plt.savefig(f\"{plot_name}.png\", bbox_inches=\"tight\")\n elif system:\n plt.show()\n plt.close()\n\n logger.info(\"Visual Rendered Successfully\")\n\n def gain():\n\n display.move_progress()\n logger.info(\"Generating predictions / predict_proba on X_test\")\n with fit_if_not_fitted(\n pipeline_with_model, data_X, data_y, groups=groups, **fit_kwargs\n ) as fitted_pipeline_with_model:\n y_test__ = test_y #fitted_pipeline_with_model.predict(X_test)\n predict_proba__ = fitted_pipeline_with_model.predict_proba(X_test)\n display.move_progress()\n display.move_progress()\n display.clear_output()\n with MatplotlibDefaultDPI(base_dpi=_base_dpi, scale_to_set=scale):\n fig = skplt.metrics.plot_cumulative_gain(\n y_test__, predict_proba__, figsize=(10, 6)\n )\n if save:\n logger.info(f\"Saving '{plot_name}.png' in current active directory\")\n plt.savefig(f\"{plot_name}.png\", bbox_inches=\"tight\")\n elif system:\n plt.show()\n plt.close()\n\n logger.info(\"Visual Rendered Successfully\")\n\n def manifold():\n\n from yellowbrick.features import Manifold\n\n data_X_transformed = data_X.select_dtypes(include=\"float32\")\n visualizer = Manifold(manifold=\"tsne\", random_state=seed)\n show_yellowbrick_plot(\n visualizer=visualizer,\n X_train=data_X_transformed,\n y_train=data_y,\n X_test=test_X,\n y_test=test_y,\n handle_train=\"fit_transform\",\n handle_test=\"\",\n name=plot_name,\n scale=scale,\n save=save,\n fit_kwargs=fit_kwargs,\n groups=groups,\n display=display,\n display_format=display_format,\n )\n\n def tree():\n\n from sklearn.tree import plot_tree\n from sklearn.base import is_classifier\n from sklearn.model_selection import check_cv\n\n is_stacked_model = False\n is_ensemble_of_forests = False\n\n tree_estimator = pipeline_with_model.steps[-1][1]\n\n if \"final_estimator\" in tree_estimator.get_params():\n tree_estimator = tree_estimator.final_estimator\n is_stacked_model = True\n\n if (\n \"base_estimator\" in tree_estimator.get_params()\n and \"n_estimators\" in tree_estimator.base_estimator.get_params()\n ):\n n_estimators = (\n tree_estimator.get_params()[\"n_estimators\"]\n * tree_estimator.base_estimator.get_params()[\"n_estimators\"]\n )\n is_ensemble_of_forests = True\n elif \"n_estimators\" in tree_estimator.get_params():\n n_estimators = tree_estimator.get_params()[\"n_estimators\"]\n else:\n n_estimators = 1\n if n_estimators > 10:\n rows = (n_estimators // 10) + 1\n cols = 10\n else:\n rows = 1\n cols = n_estimators\n figsize = (cols * 20, rows * 16)\n fig, axes = plt.subplots(\n nrows=rows,\n ncols=cols,\n figsize=figsize,\n dpi=_base_dpi * scale,\n squeeze=False,\n )\n axes = list(axes.flatten())\n\n fig.suptitle(\"Decision Trees\")\n\n display.move_progress()\n logger.info(\"Plotting decision trees\")\n with fit_if_not_fitted(\n pipeline_with_model, data_X, data_y, groups=groups, **fit_kwargs\n ) as fitted_pipeline_with_model:\n trees = []\n feature_names = list(data_X.columns)\n if _ml_usecase == MLUsecase.CLASSIFICATION:\n class_names = {\n v: k\n for k, v in prep_pipe.named_steps[\"dtypes\"].replacement.items()\n }\n else:\n class_names = None\n fitted_tree_estimator = fitted_pipeline_with_model.steps[-1][1]\n if is_stacked_model:\n stacked_feature_names = []\n if _ml_usecase == MLUsecase.CLASSIFICATION:\n classes = list(data_y.unique())\n if len(classes) == 2:\n classes.pop()\n for c in classes:\n stacked_feature_names.extend(\n [\n f\"{k}_{class_names[c]}\"\n for k, v in fitted_tree_estimator.estimators\n ]\n )\n else:\n stacked_feature_names.extend(\n [f\"{k}\" for k, v in fitted_tree_estimator.estimators]\n )\n if not fitted_tree_estimator.passthrough:\n feature_names = stacked_feature_names\n else:\n feature_names = stacked_feature_names + feature_names\n fitted_tree_estimator = fitted_tree_estimator.final_estimator_\n if is_ensemble_of_forests:\n for estimator in fitted_tree_estimator.estimators_:\n trees.extend(estimator.estimators_)\n else:\n try:\n trees = fitted_tree_estimator.estimators_\n except:\n trees = [fitted_tree_estimator]\n if _ml_usecase == MLUsecase.CLASSIFICATION:\n class_names = list(class_names.values())\n for i, tree in enumerate(trees):\n logger.info(f\"Plotting tree {i}\")\n plot_tree(\n tree,\n feature_names=feature_names,\n class_names=class_names,\n filled=True,\n rounded=True,\n precision=4,\n ax=axes[i],\n )\n axes[i].set_title(f\"Tree {i}\")\n for i in range(len(trees), len(axes)):\n axes[i].set_visible(False)\n display.move_progress()\n\n display.move_progress()\n display.clear_output()\n if save:\n logger.info(f\"Saving '{plot_name}.png' in current active directory\")\n plt.savefig(f\"{plot_name}.png\", bbox_inches=\"tight\")\n elif system:\n plt.show()\n plt.close()\n\n logger.info(\"Visual Rendered Successfully\")\n\n def calibration():\n\n from sklearn.calibration import calibration_curve\n\n plt.figure(figsize=(7, 6), dpi=_base_dpi * scale)\n ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)\n\n ax1.plot([0, 1], [0, 1], \"k:\", label=\"Perfectly calibrated\")\n display.move_progress()\n logger.info(\"Scoring test/hold-out set\")\n with fit_if_not_fitted(\n pipeline_with_model, data_X, data_y, groups=groups, **fit_kwargs\n ) as fitted_pipeline_with_model:\n prob_pos = fitted_pipeline_with_model.predict_proba(test_X)[:, 1]\n prob_pos = (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())\n fraction_of_positives, mean_predicted_value = calibration_curve(\n test_y, prob_pos, n_bins=10\n )\n display.move_progress()\n ax1.plot(\n mean_predicted_value,\n fraction_of_positives,\n \"s-\",\n label=f\"{model_name}\",\n )\n\n ax1.set_ylabel(\"Fraction of positives\")\n ax1.set_ylim([0, 1])\n ax1.set_xlim([0, 1])\n ax1.legend(loc=\"lower right\")\n ax1.set_title(\"Calibration plots (reliability curve)\")\n ax1.set_facecolor(\"white\")\n ax1.grid(b=True, color=\"grey\", linewidth=0.5, linestyle=\"-\")\n plt.tight_layout()\n display.move_progress()\n display.clear_output()\n if save:\n logger.info(f\"Saving '{plot_name}.png' in current active directory\")\n plt.savefig(f\"{plot_name}.png\", bbox_inches=\"tight\")\n elif system:\n plt.show()\n plt.close()\n\n logger.info(\"Visual Rendered Successfully\")\n\n def vc():\n\n logger.info(\"Determining param_name\")\n\n actual_estimator_label = get_pipeline_estimator_label(pipeline_with_model)\n actual_estimator = pipeline_with_model.named_steps[actual_estimator_label]\n\n try:\n try:\n # catboost special case\n model_params = actual_estimator.get_all_params()\n except:\n model_params = pipeline_with_model.get_params()\n except:\n display.clear_output()\n logger.error(\"VC plot failed. Exception:\")\n logger.error(traceback.format_exc())\n raise TypeError(\n \"Plot not supported for this estimator. Try different estimator.\"\n )\n\n if _ml_usecase == MLUsecase.CLASSIFICATION:\n\n # Catboost\n if \"depth\" in model_params:\n param_name = f\"{actual_estimator_label}__depth\"\n param_range = np.arange(1, 8 if gpu_param else 11)\n\n # SGD Classifier\n elif f\"{actual_estimator_label}__l1_ratio\" in model_params:\n param_name = f\"{actual_estimator_label}__l1_ratio\"\n param_range = np.arange(0, 1, 0.01)\n\n # tree based models\n elif f\"{actual_estimator_label}__max_depth\" in model_params:\n param_name = f\"{actual_estimator_label}__max_depth\"\n param_range = np.arange(1, 11)\n\n # knn\n elif f\"{actual_estimator_label}__n_neighbors\" in model_params:\n param_name = f\"{actual_estimator_label}__n_neighbors\"\n param_range = np.arange(1, 11)\n\n # MLP / Ridge\n elif f\"{actual_estimator_label}__alpha\" in model_params:\n param_name = f\"{actual_estimator_label}__alpha\"\n param_range = np.arange(0, 1, 0.1)\n\n # Logistic Regression\n elif f\"{actual_estimator_label}__C\" in model_params:\n param_name = f\"{actual_estimator_label}__C\"\n param_range = np.arange(1, 11)\n\n # Bagging / Boosting\n elif f\"{actual_estimator_label}__n_estimators\" in model_params:\n param_name = f\"{actual_estimator_label}__n_estimators\"\n param_range = np.arange(1, 1000, 10)\n\n # Naive Bayes\n elif f\"{actual_estimator_label}__var_smoothing\" in model_params:\n param_name = f\"{actual_estimator_label}__var_smoothing\"\n param_range = np.arange(0.1, 1, 0.01)\n\n # QDA\n elif f\"{actual_estimator_label}__reg_param\" in model_params:\n param_name = f\"{actual_estimator_label}__reg_param\"\n param_range = np.arange(0, 1, 0.1)\n\n # GPC\n elif f\"{actual_estimator_label}__max_iter_predict\" in model_params:\n param_name = f\"{actual_estimator_label}__max_iter_predict\"\n param_range = np.arange(100, 1000, 100)\n\n else:\n display.clear_output()\n raise TypeError(\n \"Plot not supported for this estimator. Try different estimator.\"\n )\n\n elif _ml_usecase == MLUsecase.REGRESSION:\n\n # Catboost\n if \"depth\" in model_params:\n param_name = f\"{actual_estimator_label}__depth\"\n param_range = np.arange(1, 8 if gpu_param else 11)\n\n # lasso/ridge/en/llar/huber/kr/mlp/br/ard\n elif f\"{actual_estimator_label}__alpha\" in model_params:\n param_name = f\"{actual_estimator_label}__alpha\"\n param_range = np.arange(0, 1, 0.1)\n\n elif f\"{actual_estimator_label}__alpha_1\" in model_params:\n param_name = f\"{actual_estimator_label}__alpha_1\"\n param_range = np.arange(0, 1, 0.1)\n\n # par/svm\n elif f\"{actual_estimator_label}__C\" in model_params:\n param_name = f\"{actual_estimator_label}__C\"\n param_range = np.arange(1, 11)\n\n # tree based models (dt/rf/et)\n elif f\"{actual_estimator_label}__max_depth\" in model_params:\n param_name = f\"{actual_estimator_label}__max_depth\"\n param_range = np.arange(1, 11)\n\n # knn\n elif f\"{actual_estimator_label}__n_neighbors\" in model_params:\n param_name = f\"{actual_estimator_label}__n_neighbors\"\n param_range = np.arange(1, 11)\n\n # Bagging / Boosting (ada/gbr)\n elif f\"{actual_estimator_label}__n_estimators\" in model_params:\n param_name = f\"{actual_estimator_label}__n_estimators\"\n param_range = np.arange(1, 1000, 10)\n\n # Bagging / Boosting (ada/gbr)\n elif f\"{actual_estimator_label}__n_nonzero_coefs\" in model_params:\n param_name = f\"{actual_estimator_label}__n_nonzero_coefs\"\n if len(X_train.columns) >= 10:\n param_max = 11\n else:\n param_max = len(X_train.columns) + 1\n param_range = np.arange(1, param_max, 1)\n\n elif f\"{actual_estimator_label}__eps\" in model_params:\n param_name = f\"{actual_estimator_label}__eps\"\n param_range = np.arange(0, 1, 0.1)\n\n elif f\"{actual_estimator_label}__max_subpopulation\" in model_params:\n param_name = f\"{actual_estimator_label}__max_subpopulation\"\n param_range = np.arange(1000, 100000, 2000)\n\n elif f\"{actual_estimator_label}__min_samples\" in model_params:\n param_name = f\"{actual_estimator_label}__max_subpopulation\"\n param_range = np.arange(0.01, 1, 0.1)\n\n else:\n display.clear_output()\n raise TypeError(\n \"Plot not supported for this estimator. Try different estimator.\"\n )\n\n logger.info(f\"param_name: {param_name}\")\n\n display.move_progress()\n\n from yellowbrick.model_selection import ValidationCurve\n\n viz = ValidationCurve(\n pipeline_with_model,\n param_name=param_name,\n param_range=param_range,\n cv=cv,\n random_state=seed,\n n_jobs=_gpu_n_jobs_param,\n )\n show_yellowbrick_plot(\n visualizer=viz,\n X_train=data_X,\n y_train=data_y,\n X_test=test_X,\n y_test=test_y,\n handle_train=\"fit\",\n handle_test=\"\",\n name=plot_name,\n scale=scale,\n save=save,\n fit_kwargs=fit_kwargs,\n groups=groups,\n display=display,\n display_format=display_format,\n )\n\n def dimension():\n\n from yellowbrick.features import RadViz\n from sklearn.preprocessing import StandardScaler\n from sklearn.decomposition import PCA\n\n data_X_transformed = data_X.select_dtypes(include=\"float32\")\n logger.info(\"Fitting StandardScaler()\")\n data_X_transformed = StandardScaler().fit_transform(data_X_transformed)\n data_y_transformed = np.array(data_y)\n\n features = min(round(len(data_X.columns) * 0.3, 0), 5)\n features = int(features)\n\n pca = PCA(n_components=features, random_state=seed)\n logger.info(\"Fitting PCA()\")\n data_X_transformed = pca.fit_transform(data_X_transformed)\n display.move_progress()\n classes = data_y.unique().tolist()\n visualizer = RadViz(classes=classes, alpha=0.25)\n\n show_yellowbrick_plot(\n visualizer=visualizer,\n X_train=data_X_transformed,\n y_train=data_y_transformed,\n X_test=test_X,\n y_test=test_y,\n handle_train=\"fit_transform\",\n handle_test=\"\",\n name=plot_name,\n scale=scale,\n save=save,\n fit_kwargs=fit_kwargs,\n groups=groups,\n display=display,\n display_format=display_format,\n )\n\n def feature():\n _feature(10)\n\n def feature_all():\n _feature(len(data_X.columns))\n\n def _feature(n: int):\n variables = None\n temp_model = pipeline_with_model\n if hasattr(pipeline_with_model, \"steps\"):\n temp_model = pipeline_with_model.steps[-1][1]\n if hasattr(temp_model, \"coef_\"):\n try:\n coef = temp_model.coef_.flatten()\n if len(coef) > len(data_X.columns):\n coef = coef[: len(data_X.columns)]\n variables = abs(coef)\n except:\n pass\n if variables is None:\n logger.warning(\"No coef_ found. Trying feature_importances_\")\n variables = abs(temp_model.feature_importances_)\n coef_df = pd.DataFrame({\"Variable\": data_X.columns, \"Value\": variables})\n sorted_df = (\n coef_df.sort_values(by=\"Value\", ascending=False)\n .head(n)\n .sort_values(by=\"Value\")\n )\n my_range = range(1, len(sorted_df.index) + 1)\n display.move_progress()\n plt.figure(figsize=(8, 5 * (n // 10)), dpi=_base_dpi * scale)\n plt.hlines(y=my_range, xmin=0, xmax=sorted_df[\"Value\"], color=\"skyblue\")\n plt.plot(sorted_df[\"Value\"], my_range, \"o\")\n display.move_progress()\n plt.yticks(my_range, sorted_df[\"Variable\"])\n plt.title(\"Feature Importance Plot\")\n plt.xlabel(\"Variable Importance\")\n plt.ylabel(\"Features\")\n display.move_progress()\n display.clear_output()\n if save:\n logger.info(f\"Saving '{plot_name}.png' in current active directory\")\n plt.savefig(f\"{plot_name}.png\", bbox_inches=\"tight\")\n elif system:\n plt.show()\n plt.close()\n\n logger.info(\"Visual Rendered Successfully\")\n\n def parameter():\n\n try:\n params = estimator.get_all_params()\n except:\n params = estimator.get_params(deep=False)\n\n param_df = pd.DataFrame.from_dict(\n {str(k): str(v) for k, v in params.items()},\n orient=\"index\",\n columns=[\"Parameters\"],\n )\n display.display(param_df, clear=True)\n logger.info(\"Visual Rendered Successfully\")\n\n def ks():\n\n display.move_progress()\n logger.info(\"Generating predictions / predict_proba on X_test\")\n with fit_if_not_fitted(\n pipeline_with_model, data_X, data_y, groups=groups, **fit_kwargs\n ) as fitted_pipeline_with_model:\n predict_proba__ = fitted_pipeline_with_model.predict_proba(data_X)\n display.move_progress()\n display.move_progress()\n display.clear_output()\n with MatplotlibDefaultDPI(base_dpi=_base_dpi, scale_to_set=scale):\n fig = skplt.metrics.plot_ks_statistic(\n data_y, predict_proba__, figsize=(10, 6)\n )\n if save:\n logger.info(f\"Saving '{plot_name}.png' in current active directory\")\n plt.savefig(f\"{plot_name}.png\", bbox_inches=\"tight\")\n elif system:\n plt.show()\n plt.close()\n\n logger.info(\"Visual Rendered Successfully\")\n\n # execute the plot method\n ret = locals()[plot]()\n if ret:\n plot_filename = ret\n\n try:\n plt.close()\n except:\n pass\n\n gc.collect()\n\n logger.info(\n \"plot_model() succesfully completed......................................\"\n )\n\n if save:\n return plot_filename\n\n\ndef evaluate_model(\n estimator,\n fold: Optional[Union[int, Any]] = None,\n fit_kwargs: Optional[dict] = None,\n feature_name: Optional[str] = None,\n groups: Optional[Union[str, Any]] = None,\n use_train_data: bool = False,\n):\n\n \"\"\"\n This function displays a user interface for all of the available plots for\n a given estimator. It internally uses the plot_model() function.\n\n Example\n -------\n >>> from pycaret.datasets import get_data\n >>> juice = get_data('juice')\n >>> experiment_name = setup(data = juice, target = 'Purchase')\n >>> lr = create_model('lr')\n >>> evaluate_model(lr)\n\n This will display the User Interface for all of the plots for a given\n estimator.\n\n Parameters\n ----------\n estimator : object, default = none\n A trained model object should be passed as an estimator.\n\n fold: integer or scikit-learn compatible CV generator, default = None\n Controls cross-validation. If None, will use the CV generator defined in setup().\n If integer, will use KFold CV with that many folds.\n When cross_validation is False, this parameter is ignored.\n\n fit_kwargs: dict, default = {} (empty dict)\n Dictionary of arguments passed to the fit method of the model.\n\n groups: str or array-like, with shape (n_samples,), default = None\n Optional Group labels for the samples used while splitting the dataset into train/test set.\n If string is passed, will use the data column with that name as the groups.\n Only used if a group based cross-validation generator is used (eg. GroupKFold).\n If None, will use the value set in fold_groups param in setup().\n\n Returns\n -------\n User_Interface\n Displays the user interface for plotting.\n\n \"\"\"\n\n function_params_str = \", \".join([f\"{k}={v}\" for k, v in locals().items()])\n\n logger = get_logger()\n\n logger.info(\"Initializing evaluate_model()\")\n logger.info(f\"evaluate_model({function_params_str})\")\n\n from ipywidgets import widgets\n from ipywidgets.widgets import interact, fixed\n\n if not fit_kwargs:\n fit_kwargs = {}\n\n a = widgets.ToggleButtons(\n options=[(v, k) for k, v in _available_plots.items()],\n description=\"Plot Type:\",\n disabled=False,\n button_style=\"\", # 'success', 'info', 'warning', 'danger' or ''\n icons=[\"\"],\n )\n\n fold = _get_cv_splitter(fold)\n\n groups = _get_groups(groups)\n\n d = interact(\n plot_model,\n estimator=fixed(estimator),\n plot=a,\n save=fixed(False),\n verbose=fixed(True),\n scale=fixed(1),\n fold=fixed(fold),\n fit_kwargs=fixed(fit_kwargs),\n feature_name=fixed(feature_name),\n label=fixed(False),\n groups=fixed(groups),\n use_train_data=fixed(use_train_data),\n system=fixed(True),\n display=fixed(None),\n display_format=fixed(None),\n is_in_evaluate=fixed(True),\n )\n\n\ndef interpret_model(\n estimator,\n plot: str = \"summary\",\n feature: Optional[str] = None,\n observation: Optional[int] = None,\n use_train_data: Optional[bool] = False,\n X_new_sample: Optional[pd.DataFrame] = None,\n y_new_sample: Optional[pd.DataFrame] = None, # add for pfi explainer\n save: bool = False,\n **kwargs, # added in pycaret==2.1\n):\n\n \"\"\"\n This function takes a trained model object and returns an interpretation plot.\n Most plots in this function are implemented based on the SHAP (SHapley Additive\n exPlanations), which is a unified approach to explain the output of any machine\n learning model. SHAP connects game theory with local explanations.\n\n For more information : https://shap.readthedocs.io/en/latest/\n\n For Partial Dependence Plot : https://github.com/SauceCat/PDPbox\n\n Example\n -------\n >>> from pycaret.datasets import get_data\n >>> juice = get_data('juice')\n >>> experiment_name = setup(data = juice, target = 'Purchase')\n >>> dt = create_model('dt')\n >>> interpret_model(dt)\n\n This will return a summary interpretation plot of Decision Tree model.\n\n Parameters\n ----------\n estimator : object, default = none\n A trained model object to be passed as an estimator. Only tree-based\n models are accepted when plot type is 'summary', 'correlation', or\n 'reason'. 'pdp' plot is model agnostic.\n\n plot : str, default = 'summary'\n Abbreviation of type of plot. The current list of plots supported\n are (Plot - Name):\n\n * 'summary' - Summary Plot using SHAP\n * 'correlation' - Dependence Plot using SHAP\n * 'reason' - Force Plot using SHAP\n * 'pdp' - Partial Dependence Plot\n * 'msa' - Morris Sensitivity Analysis\n * 'pfi' - Permutation Feature Importance\n\n feature: str, default = None\n This parameter is only needed when plot = 'correlation' or 'pdp'.\n By default feature is set to None which means the first column of the\n dataset will be used as a variable. A feature parameter must be passed\n to change this.\n\n observation: integer, default = None\n This parameter only comes into effect when plot is set to 'reason'. If no\n observation number is provided, it will return an analysis of all observations\n with the option to select the feature on x and y axes through drop down\n interactivity. For analysis at the sample level, an observation parameter must\n be passed with the index value of the observation in test / hold-out set.\n\n use_train_data: bool, default = False\n When set to true, train data will be used for plots, instead\n of test data.\n\n X_new_sample: pd.DataFrame, default = None\n Row from an out-of-sample dataframe (neither train nor test data) to be plotted.\n The sample must have the same columns as the raw input train data, and it is transformed\n by the preprocessing pipeline automatically before plotting.\n\n y_new_sample: pd.DataFrame, default = None\n Row from an out-of-sample dataframe (neither train nor test data) to be plotted.\n The sample must have the same columns as the raw input label data, and it is transformed\n by the preprocessing pipeline automatically before plotting.\n\n save: bool, default = False\n When set to True, Plot is saved as a 'png' file in current working directory.\n\n **kwargs:\n Additional keyword arguments to pass to the plot.\n\n Returns\n -------\n Visual_Plot\n Returns the visual plot.\n Returns the interactive JS plot when plot = 'reason'.\n\n Warnings\n --------\n - interpret_model doesn't support multiclass problems.\n\n \"\"\"\n\n function_params_str = \", \".join([f\"{k}={v}\" for k, v in locals().items()])\n\n logger = get_logger()\n\n logger.info(\"Initializing interpret_model()\")\n logger.info(f\"interpret_model({function_params_str})\")\n\n logger.info(\"Checking exceptions\")\n\n import matplotlib.pyplot as plt\n\n # checking if shap available\n if plot in [\"summary\", \"correlation\", \"reason\"]:\n try:\n import shap\n except ImportError:\n logger.error(\n \"shap library not found. pip install shap to use interpret_model function.\"\n )\n raise ImportError(\n \"shap library not found. pip install shap to use interpret_model function.\"\n )\n\n # checking if pdpbox is available\n if plot == \"pdp\":\n try:\n from interpret.blackbox import PartialDependence\n except ImportError:\n logger.error(\n \"interpretml library not found. pip install interpret to generate pdp plot in interpret_model function.\"\n )\n raise ImportError(\n \"interpretml library not found. pip install interpret to generate pdp plot in interpret_model function.\"\n )\n\n # checking interpret is available\n if plot == \"msa\":\n try:\n from interpret.blackbox import MorrisSensitivity\n except ImportError:\n logger.error(\n \"interpretml library not found. pip install interpret to generate msa plot in interpret_model function.\"\n )\n raise ImportError(\n \"interpretml library not found. pip install interpret to generate msa plot in interpret_model function.\"\n )\n\n # checking interpret-community is available\n if plot == \"pfi\":\n try:\n from interpret.ext.blackbox import PFIExplainer\n except ImportError:\n logger.error(\n \"interpret-community library not found. pip install interpret-community to generate pfi plot in interpret_model function.\"\n )\n raise ImportError(\n \"interpret-community library not found. pip install interpret-community to generate pfi plot in interpret_model function.\"\n )\n\n # get estimator from meta estimator\n estimator = get_estimator_from_meta_estimator(estimator)\n\n # allowed models\n model_id = _get_model_id(estimator)\n\n shap_models = {k: v for k, v in _all_models_internal.items() if v.shap}\n shap_models_ids = set(shap_models.keys())\n\n if plot in [\"summary\", \"correlation\", \"reason\"] and (\n model_id not in shap_models_ids\n ):\n raise TypeError(\n f\"This function only supports tree based models for binary classification: {', '.join(shap_models_ids)}.\"\n )\n\n # plot type\n allowed_types = [\"summary\", \"correlation\", \"reason\", \"pdp\", \"msa\", \"pfi\"]\n if plot not in allowed_types:\n raise ValueError(\n \"type parameter only accepts 'summary', 'correlation', 'reason', 'pdp', 'msa' or 'pfi'.\"\n )\n\n if X_new_sample is not None and (observation is not None or use_train_data):\n raise ValueError(\n \"Specifying 'X_new_sample' and ('observation' or 'use_train_data') is ambiguous.\"\n )\n \"\"\"\n Error Checking Ends here\n\n \"\"\"\n if X_new_sample is not None:\n test_X = prep_pipe.transform(X_new_sample)\n if plot == \"pfi\":\n test_y = prep_pipe.transform(y_new_sample) # add for pfi explainer\n else:\n # Storing X_train and y_train in data_X and data_y parameter\n test_X = X_train if use_train_data else X_test\n if plot == \"pfi\":\n test_y = y_train if use_train_data else y_test # add for pfi explainer\n\n np.random.seed(seed)\n\n # storing estimator in model variable\n model = estimator\n\n # defining type of classifier\n shap_models_type1 = {k for k, v in shap_models.items() if v.shap == \"type1\"}\n shap_models_type2 = {k for k, v in shap_models.items() if v.shap == \"type2\"}\n\n logger.info(f\"plot type: {plot}\")\n\n shap_plot = None\n\n def summary(show: bool = True):\n\n logger.info(\"Creating TreeExplainer\")\n explainer = shap.TreeExplainer(model)\n logger.info(\"Compiling shap values\")\n shap_values = explainer.shap_values(test_X)\n\n try:\n assert len(shap_values) == 2\n shap_plot = shap.summary_plot(shap_values[1], test_X, show=show, **kwargs)\n except Exception:\n shap_plot = shap.summary_plot(shap_values, test_X, show=show, **kwargs)\n\n if save:\n plt.savefig(f\"SHAP {plot}.png\", bbox_inches=\"tight\")\n return shap_plot\n\n def correlation(show: bool = True):\n\n if feature == None:\n\n logger.warning(\n f\"No feature passed. Default value of feature used for correlation plot: {test_X.columns[0]}\"\n )\n dependence = test_X.columns[0]\n\n else:\n\n logger.warning(\n f\"feature value passed. Feature used for correlation plot: {feature}\"\n )\n dependence = feature\n\n logger.info(\"Creating TreeExplainer\")\n explainer = shap.TreeExplainer(model)\n logger.info(\"Compiling shap values\")\n shap_values = explainer.shap_values(test_X)\n\n if model_id in shap_models_type1:\n logger.info(\"model type detected: type 1\")\n shap.dependence_plot(\n dependence, shap_values[1], test_X, show=show, **kwargs\n )\n elif model_id in shap_models_type2:\n logger.info(\"model type detected: type 2\")\n shap.dependence_plot(dependence, shap_values, test_X, show=show, **kwargs)\n if save:\n plt.savefig(f\"SHAP {plot}.png\", bbox_inches=\"tight\")\n return None\n\n def reason(show: bool = True):\n shap_plot = None\n if model_id in shap_models_type1:\n logger.info(\"model type detected: type 1\")\n\n logger.info(\"Creating TreeExplainer\")\n explainer = shap.TreeExplainer(model)\n logger.info(\"Compiling shap values\")\n\n if observation is None:\n logger.warning(\n \"Observation set to None. Model agnostic plot will be rendered.\"\n )\n shap_values = explainer.shap_values(test_X)\n shap.initjs()\n shap_plot = shap.force_plot(\n explainer.expected_value[1],\n shap_values[1],\n test_X,\n show=show,\n **kwargs,\n )\n\n else:\n row_to_show = observation\n data_for_prediction = test_X.iloc[row_to_show]\n\n if model_id == \"lightgbm\":\n logger.info(\"model type detected: LGBMClassifier\")\n shap_values = explainer.shap_values(test_X)\n shap.initjs()\n shap_plot = shap.force_plot(\n explainer.expected_value[1],\n shap_values[0][row_to_show],\n data_for_prediction,\n show=show,\n **kwargs,\n )\n\n else:\n logger.info(\"model type detected: Unknown\")\n\n shap_values = explainer.shap_values(data_for_prediction)\n shap.initjs()\n shap_plot = shap.force_plot(\n explainer.expected_value[1],\n shap_values[1],\n data_for_prediction,\n show=show,\n **kwargs,\n )\n\n elif model_id in shap_models_type2:\n logger.info(\"model type detected: type 2\")\n\n logger.info(\"Creating TreeExplainer\")\n explainer = shap.TreeExplainer(model)\n logger.info(\"Compiling shap values\")\n shap_values = explainer.shap_values(test_X)\n shap.initjs()\n\n if observation is None:\n logger.warning(\n \"Observation set to None. Model agnostic plot will be rendered.\"\n )\n\n shap_plot = shap.force_plot(\n explainer.expected_value, shap_values, test_X, show=show, **kwargs\n )\n\n else:\n\n row_to_show = observation\n data_for_prediction = test_X.iloc[row_to_show]\n\n shap_plot = shap.force_plot(\n explainer.expected_value,\n shap_values[row_to_show, :],\n test_X.iloc[row_to_show, :],\n show=show,\n **kwargs,\n )\n if save:\n shap.save_html(f\"SHAP {plot}.html\", shap_plot)\n return shap_plot\n\n def pdp(show: bool = True):\n logger.info(\"Checking feature parameter passed\")\n if feature == None:\n logger.warning(\n f\"No feature passed. Default value of feature used for pdp : {test_X.columns[0]}\"\n )\n pdp_feature = test_X.columns[0]\n else:\n logger.info(\n f\"feature value passed. Feature used for correlation plot: {feature}\"\n )\n pdp_feature = feature\n\n from interpret.blackbox import PartialDependence\n\n try:\n pdp = PartialDependence(\n predict_fn=model.predict_proba, data=test_X\n ) # classification\n except AttributeError:\n pdp = PartialDependence(predict_fn=model.predict, data=test_X) # regression\n\n pdp_global = pdp.explain_global()\n pdp_plot = pdp_global.visualize(list(test_X.columns).index(pdp_feature))\n if save:\n import plotly.io as pio\n\n pio.write_html(pdp_plot, f\"PDP {plot}.html\")\n return pdp_plot\n\n def msa(show: bool = True):\n from interpret.blackbox import MorrisSensitivity\n\n try:\n msa = MorrisSensitivity(\n predict_fn=model.predict_proba, data=test_X\n ) # classification\n except AttributeError:\n msa = MorrisSensitivity(predict_fn=model.predict, data=test_X) # regression\n msa_global = msa.explain_global()\n msa_plot = msa_global.visualize()\n if save:\n import plotly.io as pio\n\n pio.write_html(msa_plot, f\"MSA {plot}.html\")\n return msa_plot\n\n def pfi(show: bool = True):\n from interpret.ext.blackbox import PFIExplainer\n\n pfi = PFIExplainer(model)\n pfi_global = pfi.explain_global(test_X, true_labels=test_y)\n pfi_plot = pfi_global.visualize()\n if save:\n import plotly.io as pio\n\n pio.write_html(pfi_plot, f\"PFI {plot}.html\")\n return pfi_plot\n\n shap_plot = locals()[plot](show=not save)\n\n logger.info(\"Visual Rendered Successfully\")\n\n logger.info(\n \"interpret_model() succesfully completed......................................\"\n )\n\n gc.collect()\n return shap_plot\n\n\ndef calibrate_model(\n estimator,\n method: str = \"sigmoid\",\n fold: Optional[Union[int, Any]] = None,\n round: int = 4,\n fit_kwargs: Optional[dict] = None,\n groups: Optional[Union[str, Any]] = None,\n verbose: bool = True,\n display: Optional[Display] = None, # added in pycaret==2.2.0\n) -> Any:\n\n \"\"\"\n This function takes the input of trained estimator and performs probability\n calibration with sigmoid or isotonic regression. The output prints a score\n grid that shows Accuracy, AUC, Recall, Precision, F1, Kappa and MCC by fold\n (default = 10 Fold). The ouput of the original estimator and the calibrated\n estimator (created using this function) might not differ much. In order\n to see the calibration differences, use 'calibration' plot in plot_model to\n see the difference before and after.\n\n This function returns a trained model object.\n\n Example\n -------\n >>> from pycaret.datasets import get_data\n >>> juice = get_data('juice')\n >>> experiment_name = setup(data = juice, target = 'Purchase')\n >>> dt_boosted = create_model('dt', ensemble = True, method = 'Boosting')\n >>> calibrated_dt = calibrate_model(dt_boosted)\n\n This will return Calibrated Boosted Decision Tree Model.\n\n Parameters\n ----------\n estimator : object\n\n method : str, default = 'sigmoid'\n The method to use for calibration. Can be 'sigmoid' which corresponds to Platt's\n method or 'isotonic' which is a non-parametric approach. It is not advised to use\n isotonic calibration with too few calibration samples\n\n fold: integer or scikit-learn compatible CV generator, default = None\n Controls cross-validation. If None, will use the CV generator defined in setup().\n If integer, will use KFold CV with that many folds.\n When cross_validation is False, this parameter is ignored.\n\n round: integer, default = 4\n Number of decimal places the metrics in the score grid will be rounded to.\n\n fit_kwargs: dict, default = {} (empty dict)\n Dictionary of arguments passed to the fit method of the model.\n\n groups: str or array-like, with shape (n_samples,), default = None\n Optional Group labels for the samples used while splitting the dataset into train/test set.\n If string is passed, will use the data column with that name as the groups.\n Only used if a group based cross-validation generator is used (eg. GroupKFold).\n If None, will use the value set in fold_groups param in setup().\n\n verbose: bool, default = True\n Score grid is not printed when verbose is set to False.\n\n Returns\n -------\n score_grid\n A table containing the scores of the model across the kfolds.\n Scoring metrics used are Accuracy, AUC, Recall, Precision, F1,\n Kappa and MCC. Mean and standard deviation of the scores across\n the folds are also returned.\n\n model\n trained and calibrated model object.\n\n Warnings\n --------\n - Avoid isotonic calibration with too few calibration samples (<1000) since it\n tends to overfit.\n\n - calibration plot not available for multiclass problems.\n\n\n \"\"\"\n\n function_params_str = \", \".join([f\"{k}={v}\" for k, v in locals().items()])\n\n logger = get_logger()\n\n logger.info(\"Initializing calibrate_model()\")\n logger.info(f\"calibrate_model({function_params_str})\")\n\n logger.info(\"Checking exceptions\")\n\n # run_time\n runtime_start = time.time()\n\n if not fit_kwargs:\n fit_kwargs = {}\n\n # checking fold parameter\n if fold is not None and not (type(fold) is int or is_sklearn_cv_generator(fold)):\n raise TypeError(\n \"fold parameter must be either None, an integer or a scikit-learn compatible CV generator object.\"\n )\n\n # checking round parameter\n if type(round) is not int:\n raise TypeError(\"Round parameter only accepts integer value.\")\n\n # checking verbose parameter\n if type(verbose) is not bool:\n raise TypeError(\"Verbose parameter can only take argument as True or False.\")\n\n \"\"\"\n\n ERROR HANDLING ENDS HERE\n\n \"\"\"\n\n fold = _get_cv_splitter(fold)\n\n groups = _get_groups(groups)\n\n logger.info(\"Preloading libraries\")\n\n # pre-load libraries\n\n logger.info(\"Preparing display monitor\")\n\n if not display:\n progress_args = {\"max\": 2 + 4}\n master_display_columns = [v.display_name for k, v in _all_metrics.items()]\n timestampStr = datetime.datetime.now().strftime(\"%H:%M:%S\")\n monitor_rows = [\n [\"Initiated\", \". . . . . . . . . . . . . . . . . .\", timestampStr],\n [\"Status\", \". . . . . . . . . . . . . . . . . .\", \"Loading Dependencies\"],\n [\"Estimator\", \". . . . . . . . . . . . . . . . . .\", \"Compiling Library\"],\n ]\n display = Display(\n verbose=verbose,\n html_param=html_param,\n progress_args=progress_args,\n master_display_columns=master_display_columns,\n monitor_rows=monitor_rows,\n )\n\n display.display_progress()\n display.display_monitor()\n display.display_master_display()\n\n np.random.seed(seed)\n\n logger.info(\"Getting model name\")\n\n full_name = _get_model_name(estimator)\n\n logger.info(f\"Base model : {full_name}\")\n\n display.update_monitor(2, full_name)\n display.display_monitor()\n\n \"\"\"\n MONITOR UPDATE STARTS\n \"\"\"\n\n display.update_monitor(1, \"Selecting Estimator\")\n display.display_monitor()\n\n \"\"\"\n MONITOR UPDATE ENDS\n \"\"\"\n\n # calibrating estimator\n\n logger.info(\"Importing untrained CalibratedClassifierCV\")\n\n calibrated_model_definition = _all_models_internal[\"CalibratedCV\"]\n model = calibrated_model_definition.class_def(\n base_estimator=estimator,\n method=method,\n cv=fold,\n **calibrated_model_definition.args,\n )\n\n display.move_progress()\n\n logger.info(\"SubProcess create_model() called ==================================\")\n model, model_fit_time = create_model_supervised(\n estimator=model,\n system=False,\n display=display,\n fold=fold,\n round=round,\n fit_kwargs=fit_kwargs,\n groups=groups,\n )\n model_results = pull()\n logger.info(\"SubProcess create_model() end ==================================\")\n\n model_results = model_results.round(round)\n\n display.move_progress()\n\n # end runtime\n runtime_end = time.time()\n runtime = np.array(runtime_end - runtime_start).round(2)\n\n # mlflow logging\n if logging_param:\n\n avgs_dict_log = {k: v for k, v in model_results.loc[\"Mean\"].items()}\n\n try:\n _mlflow_log_model(\n model=model,\n model_results=model_results,\n score_dict=avgs_dict_log,\n source=\"calibrate_models\",\n runtime=runtime,\n model_fit_time=model_fit_time,\n _prep_pipe=prep_pipe,\n log_plots=log_plots_param,\n display=display,\n )\n except:\n logger.error(f\"_mlflow_log_model() for {model} raised an exception:\")\n logger.error(traceback.format_exc())\n\n model_results = color_df(model_results, \"yellow\", [\"Mean\"], axis=1)\n model_results = model_results.set_precision(round)\n display.display(model_results, clear=True)\n\n logger.info(f\"create_model_container: {len(create_model_container)}\")\n logger.info(f\"master_model_container: {len(master_model_container)}\")\n logger.info(f\"display_container: {len(display_container)}\")\n\n logger.info(str(model))\n logger.info(\n \"calibrate_model() succesfully completed......................................\"\n )\n\n gc.collect()\n return model\n\n\ndef optimize_threshold(\n estimator,\n true_positive: int = 0,\n true_negative: int = 0,\n false_positive: int = 0,\n false_negative: int = 0,\n):\n\n \"\"\"\n This function optimizes probability threshold for a trained model using custom cost\n function that can be defined using combination of True Positives, True Negatives,\n False Positives (also known as Type I error), and False Negatives (Type II error).\n\n This function returns a plot of optimized cost as a function of probability\n threshold between 0 to 100.\n\n Example\n -------\n >>> from pycaret.datasets import get_data\n >>> juice = get_data('juice')\n >>> experiment_name = setup(data = juice, target = 'Purchase')\n >>> lr = create_model('lr')\n >>> optimize_threshold(lr, true_negative = 10, false_negative = -100)\n\n This will return a plot of optimized cost as a function of probability threshold.\n\n Parameters\n ----------\n estimator : object\n A trained model object should be passed as an estimator.\n\n true_positive : int, default = 0\n Cost function or returns when prediction is true positive.\n\n true_negative : int, default = 0\n Cost function or returns when prediction is true negative.\n\n false_positive : int, default = 0\n Cost function or returns when prediction is false positive.\n\n false_negative : int, default = 0\n Cost function or returns when prediction is false negative.\n\n\n Returns\n -------\n Visual_Plot\n Prints the visual plot.\n\n Warnings\n --------\n - This function is not supported for multiclass problems.\n\n\n \"\"\"\n\n function_params_str = \", \".join([f\"{k}={v}\" for k, v in locals().items()])\n\n logger = get_logger()\n\n logger.info(\"Initializing optimize_threshold()\")\n logger.info(f\"optimize_threshold({function_params_str})\")\n\n logger.info(\"Importing libraries\")\n\n # import libraries\n\n np.random.seed(seed)\n\n \"\"\"\n ERROR HANDLING STARTS HERE\n \"\"\"\n\n logger.info(\"Checking exceptions\")\n\n # exception 1 for multi-class\n if _is_multiclass():\n raise TypeError(\n \"optimize_threshold() cannot be used when target is multi-class.\"\n )\n\n # check predict_proba value\n if type(estimator) is not list:\n if not hasattr(estimator, \"predict_proba\"):\n raise TypeError(\n \"Estimator doesn't support predict_proba function and cannot be used in optimize_threshold().\"\n )\n\n # check cost function type\n allowed_types = [int, float]\n\n if type(true_positive) not in allowed_types:\n raise TypeError(\"true_positive parameter only accepts float or integer value.\")\n\n if type(true_negative) not in allowed_types:\n raise TypeError(\"true_negative parameter only accepts float or integer value.\")\n\n if type(false_positive) not in allowed_types:\n raise TypeError(\"false_positive parameter only accepts float or integer value.\")\n\n if type(false_negative) not in allowed_types:\n raise TypeError(\"false_negative parameter only accepts float or integer value.\")\n\n \"\"\"\n ERROR HANDLING ENDS HERE\n \"\"\"\n\n # define model as estimator\n model = estimator\n\n model_name = _get_model_name(model)\n\n # generate predictions and store actual on y_test in numpy array\n actual = np.array(y_test)\n\n predicted = model.predict_proba(X_test)\n predicted = predicted[:, 1]\n\n \"\"\"\n internal function to calculate loss starts here\n \"\"\"\n\n logger.info(\"Defining loss function\")\n\n def calculate_loss(\n actual,\n predicted,\n tp_cost=true_positive,\n tn_cost=true_negative,\n fp_cost=false_positive,\n fn_cost=false_negative,\n ):\n\n # true positives\n tp = predicted + actual\n tp = np.where(tp == 2, 1, 0)\n tp = tp.sum()\n\n # true negative\n tn = predicted + actual\n tn = np.where(tn == 0, 1, 0)\n tn = tn.sum()\n\n # false positive\n fp = (predicted > actual).astype(int)\n fp = np.where(fp == 1, 1, 0)\n fp = fp.sum()\n\n # false negative\n fn = (predicted < actual).astype(int)\n fn = np.where(fn == 1, 1, 0)\n fn = fn.sum()\n\n total_cost = (tp_cost * tp) + (tn_cost * tn) + (fp_cost * fp) + (fn_cost * fn)\n\n return total_cost\n\n \"\"\"\n internal function to calculate loss ends here\n \"\"\"\n\n grid = np.arange(0, 1, 0.0001)\n\n # loop starts here\n\n cost = []\n # global optimize_results\n\n logger.info(\"Iteration starts at 0\")\n\n for i in grid:\n\n pred_prob = (predicted >= i).astype(int)\n cost.append(calculate_loss(actual, pred_prob))\n\n optimize_results = pd.DataFrame(\n {\"Probability Threshold\": grid, \"Cost Function\": cost}\n )\n fig = px.line(\n optimize_results,\n x=\"Probability Threshold\",\n y=\"Cost Function\",\n line_shape=\"linear\",\n )\n fig.update_layout(plot_bgcolor=\"rgb(245,245,245)\")\n title = f\"{model_name} Probability Threshold Optimization\"\n\n # calculate vertical line\n y0 = optimize_results[\"Cost Function\"].min()\n y1 = optimize_results[\"Cost Function\"].max()\n x0 = optimize_results.sort_values(by=\"Cost Function\", ascending=False).iloc[0][0]\n x1 = x0\n\n t = x0\n if html_param:\n\n fig.add_shape(\n dict(\n type=\"line\", x0=x0, y0=y0, x1=x1, y1=y1, line=dict(color=\"red\", width=2)\n )\n )\n fig.update_layout(\n title={\n \"text\": title,\n \"y\": 0.95,\n \"x\": 0.45,\n \"xanchor\": \"center\",\n \"yanchor\": \"top\",\n }\n )\n logger.info(\"Figure ready for render\")\n fig.show()\n print(f\"Optimized Probability Threshold: {t} | Optimized Cost Function: {y1}\")\n logger.info(\n \"optimize_threshold() succesfully completed......................................\"\n )\n\n return float(t)\n\n\ndef assign_model(\n model, transformation: bool = False, score: bool = True, verbose: bool = True\n) -> pd.DataFrame:\n\n \"\"\"\n This function assigns each of the data point in the dataset passed during setup\n stage to one of the clusters using trained model object passed as model param.\n create_model() function must be called before using assign_model().\n\n This function returns a pandas.DataFrame.\n\n Example\n -------\n >>> from pycaret.datasets import get_data\n >>> jewellery = get_data('jewellery')\n >>> experiment_name = setup(data = jewellery, normalize = True)\n >>> kmeans = create_model('kmeans')\n >>> kmeans_df = assign_model(kmeans)\n\n This will return a pandas.DataFrame with inferred clusters using trained model.\n\n Parameters\n ----------\n model: trained model object, default = None\n\n transformation: bool, default = False\n When set to True, assigned clusters are returned on transformed dataset instead\n of original dataset passed during setup().\n\n verbose: Boolean, default = True\n Status update is not printed when verbose is set to False.\n\n Returns\n -------\n pandas.DataFrame\n Returns a DataFrame with assigned clusters using a trained model.\n\n \"\"\"\n\n function_params_str = \", \".join([f\"{k}={v}\" for k, v in locals().items()])\n\n logger = get_logger()\n\n logger.info(\"Initializing assign_model()\")\n logger.info(f\"assign_model({function_params_str})\")\n\n logger.info(\"Checking exceptions\")\n\n # checking transformation parameter\n if type(transformation) is not bool:\n raise TypeError(\n \"Transformation parameter can only take argument as True or False.\"\n )\n\n # checking verbose parameter\n if type(verbose) is not bool:\n raise TypeError(\"Verbose parameter can only take argument as True or False.\")\n\n \"\"\"\n error handling ends here\n \"\"\"\n\n if is_sklearn_pipeline(model):\n model = model.steps[-1][1]\n\n logger.info(\"Determining Trained Model\")\n\n name = _get_model_name(model)\n\n logger.info(f\"Trained Model : {name}\")\n\n logger.info(\"Copying data\")\n # copy data_\n if transformation:\n data = X.copy()\n logger.info(\n \"Transformation param set to True. Assigned clusters are attached on transformed dataset.\"\n )\n else:\n data = data_before_preprocess.copy()\n\n # calculation labels and attaching to dataframe\n\n if _ml_usecase == MLUsecase.CLUSTERING:\n labels = [f\"Cluster {i}\" for i in model.labels_]\n data[\"Cluster\"] = labels\n else:\n data[\"Anomaly\"] = model.labels_\n if score:\n data[\"Anomaly_Score\"] = model.decision_scores_\n\n logger.info(data.shape)\n logger.info(\n \"assign_model() succesfully completed......................................\"\n )\n\n return data\n\n\ndef predict_model_unsupervised(\n estimator, data: pd.DataFrame, ml_usecase: Optional[MLUsecase] = None,\n) -> pd.DataFrame:\n function_params_str = \", \".join(\n [f\"{k}={v}\" for k, v in locals().items() if k != \"data\"]\n )\n\n logger = get_logger()\n\n logger.info(\"Initializing predict_model()\")\n logger.info(f\"predict_model({function_params_str})\")\n\n if ml_usecase is None:\n ml_usecase = _ml_usecase\n\n # copy data and model\n data_transformed = data.copy()\n\n # exception checking for predict param\n if hasattr(estimator, \"predict\"):\n pass\n else:\n raise TypeError(\"Model doesn't support predict parameter.\")\n\n pred_score = None\n\n # predictions start here\n if is_sklearn_pipeline(estimator):\n pred = estimator.predict(data_transformed)\n if ml_usecase == MLUsecase.ANOMALY:\n pred_score = estimator.decision_function(data_transformed)\n else:\n pred = estimator.predict(prep_pipe.transform(data_transformed))\n if ml_usecase == MLUsecase.ANOMALY:\n pred_score = estimator.decision_function(\n prep_pipe.transform(data_transformed)\n )\n\n if ml_usecase == MLUsecase.CLUSTERING:\n pred_list = [f\"Cluster {i}\" for i in pred]\n\n data_transformed[\"Cluster\"] = pred_list\n else:\n data_transformed[\"Anomaly\"] = pred\n data_transformed[\"Anomaly_Score\"] = pred_score\n\n return data_transformed\n\n\ndef predict_model(\n estimator,\n data: Optional[pd.DataFrame] = None,\n probability_threshold: Optional[float] = None,\n encoded_labels: bool = False, # added in pycaret==2.1.0\n raw_score: bool = False,\n round: int = 4, # added in pycaret==2.2.0\n verbose: bool = True,\n ml_usecase: Optional[MLUsecase] = None,\n display: Optional[Display] = None, # added in pycaret==2.2.0\n) -> pd.DataFrame:\n\n \"\"\"\n This function is used to predict label and probability score on the new dataset\n using a trained estimator. New unseen data can be passed to data param as pandas\n Dataframe. If data is not passed, the test / hold-out set separated at the time of\n setup() is used to generate predictions.\n\n Example\n -------\n >>> from pycaret.datasets import get_data\n >>> juice = get_data('juice')\n >>> experiment_name = setup(data = juice, target = 'Purchase')\n >>> lr = create_model('lr')\n >>> lr_predictions_holdout = predict_model(lr)\n\n Parameters\n ----------\n estimator : object, default = none\n A trained model object / pipeline should be passed as an estimator.\n\n data : pandas.DataFrame\n Shape (n_samples, n_features) where n_samples is the number of samples\n and n_features is the number of features. All features used during training\n must be present in the new dataset.\n\n probability_threshold : float, default = None\n Threshold used to convert probability values into binary outcome. By default\n the probability threshold for all binary classifiers is 0.5 (50%). This can be\n changed using probability_threshold param.\n\n encoded_labels: Boolean, default = False\n If True, will return labels encoded as an integer.\n\n raw_score: bool, default = False\n When set to True, scores for all labels will be returned.\n\n round: integer, default = 4\n Number of decimal places the metrics in the score grid will be rounded to.\n\n verbose: bool, default = True\n Holdout score grid is not printed when verbose is set to False.\n\n Returns\n -------\n Predictions\n Predictions (Label and Score) column attached to the original dataset\n and returned as pandas dataframe.\n\n score_grid\n A table containing the scoring metrics on hold-out / test set.\n\n Warnings\n --------\n - The behavior of the predict_model is changed in version 2.1 without backward compatibility.\n As such, the pipelines trained using the version (<= 2.0), may not work for inference\n with version >= 2.1. You can either retrain your models with a newer version or downgrade\n the version for inference.\n\n\n \"\"\"\n\n function_params_str = \", \".join(\n [f\"{k}={v}\" for k, v in locals().items() if k != \"data\"]\n )\n\n logger = get_logger()\n\n logger.info(\"Initializing predict_model()\")\n logger.info(f\"predict_model({function_params_str})\")\n\n logger.info(\"Checking exceptions\")\n\n \"\"\"\n exception checking starts here\n \"\"\"\n\n if ml_usecase is None:\n ml_usecase = _ml_usecase\n\n if data is None and \"pycaret_globals\" not in globals():\n raise ValueError(\n \"data parameter may not be None without running setup() first.\"\n )\n\n if probability_threshold is not None:\n # probability_threshold allowed types\n allowed_types = [int, float]\n if type(probability_threshold) not in allowed_types:\n raise TypeError(\n \"probability_threshold parameter only accepts value between 0 to 1.\"\n )\n\n if probability_threshold > 1:\n raise TypeError(\n \"probability_threshold parameter only accepts value between 0 to 1.\"\n )\n\n if probability_threshold < 0:\n raise TypeError(\n \"probability_threshold parameter only accepts value between 0 to 1.\"\n )\n\n \"\"\"\n exception checking ends here\n \"\"\"\n\n logger.info(\"Preloading libraries\")\n\n # general dependencies\n from sklearn import metrics\n\n try:\n np.random.seed(seed)\n if not display:\n display = Display(verbose=verbose, html_param=html_param,)\n except:\n display = Display(verbose=False, html_param=False,)\n\n dtypes = None\n\n # dataset\n if data is None:\n\n if is_sklearn_pipeline(estimator):\n estimator = estimator.steps[-1][1]\n\n X_test_ = X_test.copy()\n y_test_ = y_test.copy()\n\n dtypes = prep_pipe.named_steps[\"dtypes\"]\n\n X_test_.reset_index(drop=True, inplace=True)\n y_test_.reset_index(drop=True, inplace=True)\n\n else:\n\n if is_sklearn_pipeline(estimator) and hasattr(estimator, \"predict\"):\n dtypes = estimator.named_steps[\"dtypes\"]\n else:\n try:\n dtypes = prep_pipe.named_steps[\"dtypes\"]\n\n estimator_ = deepcopy(prep_pipe)\n if is_sklearn_pipeline(estimator):\n merge_pipelines(estimator_, estimator)\n estimator_.steps[-1] = (\"trained_model\", estimator_.steps[-1][1])\n else:\n add_estimator_to_pipeline(\n estimator_, estimator, name=\"trained_model\"\n )\n estimator = estimator_\n\n except:\n logger.error(\"Pipeline not found. Exception:\")\n logger.error(traceback.format_exc())\n raise ValueError(\"Pipeline not found\")\n\n X_test_ = data.copy()\n\n # function to replace encoded labels with their original values\n # will not run if categorical_labels is false\n def replace_lables_in_column(label_column):\n if dtypes and hasattr(dtypes, \"replacement\"):\n replacement_mapper = {int(v): k for k, v in dtypes.replacement.items()}\n label_column.replace(replacement_mapper, inplace=True)\n\n # prediction starts here\n\n pred = np.nan_to_num(estimator.predict(X_test_))\n\n try:\n score = estimator.predict_proba(X_test_)\n\n if len(np.unique(pred)) <= 2:\n pred_prob = score[:, 1]\n else:\n pred_prob = score\n\n except:\n score = None\n pred_prob = None\n\n if probability_threshold is not None and pred_prob is not None:\n try:\n pred = (pred_prob >= probability_threshold).astype(int)\n except:\n pass\n\n if pred_prob is None:\n pred_prob = pred\n\n df_score = None\n\n if data is None:\n # model name\n full_name = _get_model_name(estimator)\n metrics = _calculate_metrics_supervised(y_test_, pred, pred_prob)\n df_score = pd.DataFrame(metrics, index=[0])\n df_score.insert(0, \"Model\", full_name)\n df_score = df_score.round(round)\n display.display(df_score.style.set_precision(round), clear=False)\n\n label = pd.DataFrame(pred)\n label.columns = [\"Label\"]\n if not encoded_labels:\n replace_lables_in_column(label[\"Label\"])\n if ml_usecase == MLUsecase.CLASSIFICATION:\n try:\n label[\"Label\"] = label[\"Label\"].astype(int)\n except:\n pass\n\n if data is None:\n if not encoded_labels:\n replace_lables_in_column(y_test_)\n X_test_ = pd.concat([X_test_, y_test_, label], axis=1)\n else:\n X_test_ = data.copy()\n X_test_[\"Label\"] = label[\"Label\"].values\n\n if score is not None:\n pred = pred.astype(int)\n if not raw_score:\n score = [s[pred[i]] for i, s in enumerate(score)]\n try:\n score = pd.DataFrame(score)\n if raw_score:\n score_columns = pd.Series(range(score.shape[1]))\n if not encoded_labels:\n replace_lables_in_column(score_columns)\n score.columns = [f\"Score_{label}\" for label in score_columns]\n else:\n score.columns = [\"Score\"]\n score = score.round(round)\n score.index = X_test_.index\n X_test_ = pd.concat((X_test_, score), axis=1)\n except:\n pass\n\n # store predictions on hold-out in display_container\n if df_score is not None:\n display_container.append(df_score)\n\n gc.collect()\n return X_test_\n\n\ndef finalize_model(\n estimator,\n fit_kwargs: Optional[dict] = None,\n groups: Optional[Union[str, Any]] = None,\n model_only: bool = True,\n display: Optional[Display] = None,\n) -> Any: # added in pycaret==2.2.0\n\n \"\"\"\n This function fits the estimator onto the complete dataset passed during the\n setup() stage. The purpose of this function is to prepare for final model\n deployment after experimentation.\n\n Example\n -------\n >>> from pycaret.datasets import get_data\n >>> juice = get_data('juice')\n >>> experiment_name = setup(data = juice, target = 'Purchase')\n >>> lr = create_model('lr')\n >>> final_lr = finalize_model(lr)\n\n This will return the final model object fitted to complete dataset.\n\n Parameters\n ----------\n estimator : object, default = none\n A trained model object should be passed as an estimator.\n\n fit_kwargs: dict, default = {} (empty dict)\n Dictionary of arguments passed to the fit method of the model.\n\n groups: str or array-like, with shape (n_samples,), default = None\n Optional Group labels for the samples used while splitting the dataset into train/test set.\n If string is passed, will use the data column with that name as the groups.\n Only used if a group based cross-validation generator is used (eg. GroupKFold).\n If None, will use the value set in fold_groups param in setup().\n\n model_only : bool, default = True\n When set to True, only trained model object is saved and all the\n transformations are ignored.\n\n Returns\n -------\n model\n Trained model object fitted on complete dataset.\n\n Warnings\n --------\n - If the model returned by finalize_model(), is used on predict_model() without\n passing a new unseen dataset, then the information grid printed is misleading\n as the model is trained on the complete dataset including test / hold-out sample.\n Once finalize_model() is used, the model is considered ready for deployment and\n should be used on new unseens dataset only.\n\n\n \"\"\"\n\n function_params_str = \", \".join([f\"{k}={v}\" for k, v in locals().items()])\n\n logger = get_logger()\n\n logger.info(\"Initializing finalize_model()\")\n logger.info(f\"finalize_model({function_params_str})\")\n\n # run_time\n runtime_start = time.time()\n\n if not fit_kwargs:\n fit_kwargs = {}\n\n groups = _get_groups(groups, data=X, fold_groups=fold_groups_param_full)\n\n if not display:\n display = Display(verbose=False, html_param=html_param,)\n\n np.random.seed(seed)\n\n logger.info(f\"Finalizing {estimator}\")\n display.clear_output()\n model_final, model_fit_time = create_model_supervised(\n estimator=estimator,\n verbose=False,\n system=False,\n X_train_data=X,\n y_train_data=y,\n fit_kwargs=fit_kwargs,\n groups=groups,\n )\n model_results = pull(pop=True)\n\n # end runtime\n runtime_end = time.time()\n runtime = np.array(runtime_end - runtime_start).round(2)\n\n # mlflow logging\n if logging_param:\n\n avgs_dict_log = {k: v for k, v in model_results.loc[\"Mean\"].items()}\n\n try:\n _mlflow_log_model(\n model=model_final,\n model_results=model_results,\n score_dict=avgs_dict_log,\n source=\"finalize_model\",\n runtime=runtime,\n model_fit_time=model_fit_time,\n _prep_pipe=prep_pipe,\n log_plots=log_plots_param,\n display=display,\n )\n except:\n logger.error(f\"_mlflow_log_model() for {model_final} raised an exception:\")\n logger.error(traceback.format_exc())\n\n model_results = color_df(model_results, \"yellow\", [\"Mean\"], axis=1)\n model_results = model_results.set_precision(round)\n display.display(model_results, clear=True)\n\n logger.info(f\"create_model_container: {len(create_model_container)}\")\n logger.info(f\"master_model_container: {len(master_model_container)}\")\n logger.info(f\"display_container: {len(display_container)}\")\n\n logger.info(str(model_final))\n logger.info(\n \"finalize_model() succesfully completed......................................\"\n )\n\n gc.collect()\n if not model_only:\n pipeline_final = deepcopy(prep_pipe)\n pipeline_final.steps.append([\"trained_model\", model_final])\n return pipeline_final\n\n return model_final\n\n\ndef deploy_model(\n model,\n model_name: str,\n authentication: dict,\n platform: str = \"aws\", # added gcp and azure support in pycaret==2.1\n):\n\n \"\"\"\n (In Preview)\n\n This function deploys the transformation pipeline and trained model object for\n production use. The platform of deployment can be defined under the platform\n param along with the applicable authentication tokens which are passed as a\n dictionary to the authentication param.\n\n Example\n -------\n >>> from pycaret.datasets import get_data\n >>> juice = get_data('juice')\n >>> experiment_name = setup(data = juice, target = 'Purchase')\n >>> lr = create_model('lr')\n >>> deploy_model(model = lr, model_name = 'deploy_lr', platform = 'aws', authentication = {'bucket' : 'pycaret-test'})\n\n This will deploy the model on an AWS S3 account under bucket 'pycaret-test'\n\n Notes\n -----\n For AWS users:\n Before deploying a model to an AWS S3 ('aws'), environment variables must be\n configured using the command line interface. To configure AWS env. variables,\n type aws configure in your python command line. The following information is\n required which can be generated using the Identity and Access Management (IAM)\n portal of your amazon console account:\n\n - AWS Access Key ID\n - AWS Secret Key Access\n - Default Region Name (can be seen under Global settings on your AWS console)\n - Default output format (must be left blank)\n\n For GCP users:\n --------------\n Before deploying a model to Google Cloud Platform (GCP), project must be created\n either using command line or GCP console. Once project is created, you must create\n a service account and download the service account key as a JSON file, which is\n then used to set environment variable.\n\n https://cloud.google.com/docs/authentication/production\n\n - Google Cloud Project\n - Service Account Authetication\n\n For Azure users:\n ---------------\n Before deploying a model to Microsoft's Azure (Azure), environment variables\n for connection string must be set. In order to get connection string, user has\n to create account of Azure. Once it is done, create a Storage account. In the settings\n section of storage account, user can get the connection string.\n\n Read below link for more details.\n https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-python?toc=%2Fpython%2Fazure%2FTOC.json\n\n - Azure Storage Account\n\n Parameters\n ----------\n model : object\n A trained model object should be passed as an estimator.\n\n model_name : str\n Name of model to be passed as a str.\n\n authentication : dict\n Dictionary of applicable authentication tokens.\n\n When platform = 'aws':\n {'bucket' : 'Name of Bucket on S3'}\n\n When platform = 'gcp':\n {'project': 'gcp_pycaret', 'bucket' : 'pycaret-test'}\n\n When platform = 'azure':\n {'container': 'pycaret-test'}\n\n platform: str, default = 'aws'\n Name of platform for deployment. Current available options are: 'aws', 'gcp' and 'azure'\n\n Returns\n -------\n Success_Message\n\n Warnings\n --------\n - This function uses file storage services to deploy the model on cloud platform.\n As such, this is efficient for batch-use. Where the production objective is to\n obtain prediction at an instance level, this may not be the efficient choice as\n it transmits the binary pickle file between your local python environment and\n the platform.\n\n \"\"\"\n import pycaret.internal.persistence\n\n return pycaret.internal.persistence.deploy_model(\n model, model_name, authentication, platform, prep_pipe\n )\n\n\ndef create_webservice(model, model_endopoint, api_key=True, pydantic_payload=None):\n \"\"\"\n (In Preview)\n\n This function deploys the transformation pipeline and trained model object as api. Rest api base on FastAPI and could run on localhost, it uses\n the model name as a path to POST endpoint. The endpoint can be protected by api key generated by pycaret and return for the user.\n Create_webservice uses pydantic style input/output model.\n Parameters\n ----------\n model : object\n A trained model object should be passed as an estimator.\n\n model_endopoint : string\n Name of model to be passed as a string.\n\n api_key: bool, default = True\n Security for API, if True Pycaret generates api key and print in console,\n else user can post data without header but it not safe if application will\n expose external.\n\n pydantic_payload: pydantic.main.ModelMetaclass, default = None\n Pycaret allows us to automatically generate a schema for the input model,\n thanks to which can prevent incorrect requests. User can generate own pydantic model and use it as an input model.\n\n Returns\n -------\n Dictionary with api_key: FastAPI application class which is ready to run with console\n if api_key is set to False, the dictionary key is set to 'Not_exist'.\n\n \"\"\"\n\n function_params_str = \", \".join([f\"{k}={v}\" for k, v in locals().items()])\n\n logger = get_logger()\n\n logger.info(\"Initializing create_service()\")\n logger.info(f\"create_service({function_params_str})\")\n\n try:\n from fastapi import FastAPI\n except ImportError:\n logger.error(\n \"fastapi library not found. pip install fastapi to use create_service function.\"\n )\n raise ImportError(\n \"fastapi library not found. pip install fastapi to use create_service function.\"\n )\n # try initialize predict before add it to endpoint (cold start)\n try:\n _ = predict_model(estimator=model, verbose=False)\n except:\n raise ValueError(\n \"Cannot predict on cold start check probability_threshold or model\"\n )\n\n # check pydantic style\n try:\n from pydantic import create_model, BaseModel, Json\n from pydantic.main import ModelMetaclass\n except ImportError:\n logger.error(\n \"pydantic library not found. pip install fastapi to use create_service function.\"\n )\n ImportError(\n \"pydantic library not found. pip install fastapi to use create_service function.\"\n )\n if pydantic_payload is not None:\n assert isinstance(\n pydantic_payload, ModelMetaclass\n ), \"pydantic_payload must be ModelMetaClass type\"\n else:\n # automatically create pydantic payload model\n import json\n from typing import Optional\n\n print(\n \"You are using an automatic data validation model it could fail in some cases\"\n )\n print(\n \"To be sure the model works properly create pydantic model in your own (pydantic_payload)\"\n )\n fields = {\n name: (Optional[type(t)], ...)\n for name, t in json.loads(\n data_before_preprocess.drop(columns=[target_param])\n .convert_dtypes()\n .sample(1)\n .to_json(orient=\"records\")\n )[0].items()\n }\n print(fields)\n pydantic_payload = create_model(\"DefaultModel\", __base__=BaseModel, **fields)\n logger.info(\n \"Generated json schema: {}\".format(pydantic_payload.schema_json(indent=2))\n )\n\n # generate apikey\n import secrets\n from typing import Optional\n from fastapi.security.api_key import APIKeyHeader\n from fastapi import HTTPException, Security\n\n api_key_handler = APIKeyHeader(name=\"token\", auto_error=False)\n if api_key:\n # generate key and log into console\n key = secrets.token_urlsafe(30)\n\n def validate_request(header: Optional[str] = Security(api_key_handler)):\n if header is None:\n raise HTTPException(status_code=400, detail=\"No api key\", headers={})\n if not secrets.compare_digest(header, str(key)):\n raise HTTPException(\n status_code=401, detail=\"Unauthorized request\", headers={}\n )\n return True\n\n else:\n key = \"Not_exist\"\n print(\"API will be working without security\")\n\n def validate_request(header: Optional[str] = Security(api_key_handler)):\n return True\n\n # validate request functionality\n validation = validate_request\n\n # creating response model\n from typing import Any, Optional\n from pycaret.utils import __version__\n\n class PredictionResult(BaseModel):\n prediction: Any\n author: str = \"pycaret\"\n lib_version: str = __version__()\n input_data: pydantic_payload\n processed_input_data: Json = None\n time_utc: Optional[str] = None\n\n class Config:\n schema_extra = {\n \"example\": {\n \"prediction\": 1,\n \"autohor\": \"pycaret\",\n \"lib_version\": \"2.0.0\",\n \"input_data\": pydantic_payload,\n \"processed_input_data\": {\"col1\": 1, \"col2\": \"string\"},\n \"time\": \"2020-09-10 20:00\",\n }\n }\n\n app = FastAPI(\n title=\"REST API for ML prediction created by Pycaret\",\n description=\"This is the REST API for the ML model generated\"\n \"by the Pycaret library: https://pycaret.org. \"\n \"All endpoints should run asynchronously, please validate\"\n \"the Pydantic model and read api documentation. \"\n \"In case of trouble, please add issuesto github: https://github.com/pycaret/pycaret/issues\",\n version=\"pycaret: {}\".format(__version__()),\n externalDocs={\"Pycaret\": \"https://pycaret.org/\"},\n )\n\n # import additionals from fastAPI\n import pandas as pd\n import time\n from fastapi.middleware.cors import CORSMiddleware\n from fastapi import Depends\n from fastapi.encoders import jsonable_encoder\n\n # enable CORS\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n @app.post(\"/predict/{}\".format(model_endopoint))\n def post_predict(\n block_data: pydantic_payload, authenticated: bool = Depends(validation)\n ):\n # encode input data\n try:\n encoded_data_df = pd.DataFrame(jsonable_encoder(block_data), index=[0])\n except Exception as e:\n raise HTTPException(status_code=404, detail=\"Wrong json format\")\n # predict values\n unseen_predictions = predict_model(model, data=encoded_data_df)\n # change format to dictionary to be sure that python types used\n unseen_predictions = unseen_predictions.to_dict(orient=\"records\")[0]\n label = unseen_predictions[\"Label\"]\n del unseen_predictions[\"Label\"]\n\n # creating return object\n predict_schema = PredictionResult(\n prediction=label,\n input_data=block_data,\n processed_input_data=json.dumps(unseen_predictions),\n time_utc=time.strftime(\"%Y-%m-%d %H:%M\", time.gmtime(time.time())),\n )\n return predict_schema\n\n return {key: app}\n\n\ndef save_model(\n model, model_name: str, model_only: bool = False, verbose: bool = True, **kwargs\n):\n\n \"\"\"\n This function saves the transformation pipeline and trained model object\n into the current active directory as a pickle file for later use.\n\n Example\n -------\n >>> from pycaret.datasets import get_data\n >>> juice = get_data('juice')\n >>> experiment_name = setup(data = juice, target = 'Purchase')\n >>> lr = create_model('lr')\n >>> save_model(lr, 'lr_model_23122019')\n\n This will save the transformation pipeline and model as a binary pickle\n file in the current active directory.\n\n Parameters\n ----------\n model : object, default = none\n A trained model object should be passed as an estimator.\n\n model_name : str, default = none\n Name of pickle file to be passed as a string.\n\n model_only : bool, default = False\n When set to True, only trained model object is saved and all the\n transformations are ignored.\n\n **kwargs: \n Additional keyword arguments to pass to joblib.dump().\n\n verbose: bool, default = True\n Success message is not printed when verbose is set to False.\n\n Returns\n -------\n Success_Message\n\n\n \"\"\"\n\n import pycaret.internal.persistence\n\n return pycaret.internal.persistence.save_model(\n model, model_name, None if model_only else prep_pipe, verbose, **kwargs\n )\n\n\ndef load_model(\n model_name,\n platform: Optional[str] = None,\n authentication: Optional[Dict[str, str]] = None,\n verbose: bool = True,\n):\n\n \"\"\"\n This function loads a previously saved transformation pipeline and model\n from the current active directory into the current python environment.\n Load object must be a pickle file.\n\n Example\n -------\n >>> saved_lr = load_model('lr_model_23122019')\n\n This will load the previously saved model in saved_lr variable. The file\n must be in the current directory.\n\n Parameters\n ----------\n model_name : str, default = none\n Name of pickle file to be passed as a string.\n\n platform: str, default = None\n Name of platform, if loading model from cloud. Current available options are:\n 'aws', 'gcp' and 'azure'.\n\n authentication : dict\n dictionary of applicable authentication tokens.\n\n When platform = 'aws':\n {'bucket' : 'Name of Bucket on S3'}\n\n When platform = 'gcp':\n {'project': 'gcp_pycaret', 'bucket' : 'pycaret-test'}\n\n When platform = 'azure':\n {'container': 'pycaret-test'}\n\n verbose: bool, default = True\n Success message is not printed when verbose is set to False.\n\n Returns\n -------\n Model Object\n\n \"\"\"\n\n import pycaret.internal.persistence\n\n return pycaret.internal.persistence.load_model(\n model_name, platform, authentication, verbose\n )\n\n\ndef automl(optimize: str = \"Accuracy\", use_holdout: bool = False) -> Any:\n\n \"\"\"\n This function returns the best model out of all models created in\n current active environment based on metric defined in optimize parameter.\n\n Parameters\n ----------\n optimize : str, default = 'Accuracy'\n Other values you can pass in optimize param are 'AUC', 'Recall', 'Precision',\n 'F1', 'Kappa', and 'MCC'.\n\n use_holdout: bool, default = False\n When set to True, metrics are evaluated on holdout set instead of CV.\n\n \"\"\"\n\n function_params_str = \", \".join([f\"{k}={v}\" for k, v in locals().items()])\n\n logger = get_logger()\n\n logger.info(\"Initializing automl()\")\n logger.info(f\"automl({function_params_str})\")\n\n # checking optimize parameter\n optimize = _get_metric(optimize)\n if optimize is None:\n raise ValueError(\n f\"Optimize method not supported. See docstring for list of available parameters.\"\n )\n\n # checking optimize parameter for multiclass\n if _is_multiclass():\n if not optimize.is_multiclass:\n raise TypeError(\n f\"Optimization metric not supported for multiclass problems. See docstring for list of other optimization parameters.\"\n )\n\n compare_dimension = optimize.display_name\n greater_is_better = optimize.greater_is_better\n optimize = optimize.scorer\n\n scorer = []\n\n if use_holdout:\n logger.info(\"Model Selection Basis : Holdout set\")\n for i in master_model_container:\n try:\n pred_holdout = predict_model(i, verbose=False)\n except:\n logger.warning(f\"Model {i} is not fitted, running create_model\")\n i, _ = create_model_supervised(\n estimator=i,\n system=False,\n verbose=False,\n cross_validation=False,\n predict=False,\n groups=fold_groups_param,\n )\n pull(pop=True)\n pred_holdout = predict_model(i, verbose=False)\n\n p = pull(pop=True)\n p = p[compare_dimension][0]\n scorer.append(p)\n\n else:\n logger.info(\"Model Selection Basis : CV Results on Training set\")\n for i in create_model_container:\n r = i[compare_dimension][-2:][0]\n scorer.append(r)\n\n # returning better model\n if greater_is_better:\n index_scorer = scorer.index(max(scorer))\n else:\n index_scorer = scorer.index(min(scorer))\n\n automl_result = master_model_container[index_scorer]\n\n automl_model, _ = create_model_supervised(\n estimator=automl_result,\n system=False,\n verbose=False,\n cross_validation=False,\n predict=False,\n groups=fold_groups_param,\n )\n\n logger.info(str(automl_model))\n logger.info(\"automl() succesfully completed......................................\")\n\n return automl_model\n\n\ndef pull(pop=False) -> pd.DataFrame: # added in pycaret==2.2.0\n \"\"\"\n Returns latest displayed table.\n\n Parameters\n ----------\n pop : bool, default = False\n If true, will pop (remove) the returned dataframe from the\n display container.\n\n Returns\n -------\n pandas.DataFrame\n Equivalent to get_config('display_container')[-1]\n\n \"\"\"\n if not display_container:\n return None\n return display_container.pop(-1) if pop else display_container[-1]\n\n\ndef models(\n type: Optional[str] = None, internal: bool = False, raise_errors: bool = True,\n) -> pd.DataFrame:\n\n \"\"\"\n Returns table of models available in model library.\n\n Example\n -------\n >>> _all_models = models()\n\n This will return pandas dataframe with all available\n models and their metadata.\n\n Parameters\n ----------\n type : str, default = None\n - linear : filters and only return linear models\n - tree : filters and only return tree based models\n - ensemble : filters and only return ensemble models\n\n internal: bool, default = False\n If True, will return extra columns and rows used internally.\n\n raise_errors: bool, default = True\n If False, will suppress all exceptions, ignoring models\n that couldn't be created.\n\n Returns\n -------\n pandas.DataFrame\n\n \"\"\"\n\n model_type = {\n \"linear\": [\n \"lr\",\n \"ridge\",\n \"svm\",\n \"lasso\",\n \"en\",\n \"lar\",\n \"llar\",\n \"omp\",\n \"br\",\n \"ard\",\n \"par\",\n \"ransac\",\n \"tr\",\n \"huber\",\n \"kr\",\n ],\n \"tree\": [\"dt\"],\n \"ensemble\": [\n \"rf\",\n \"et\",\n \"gbc\",\n \"gbr\",\n \"xgboost\",\n \"lightgbm\",\n \"catboost\",\n \"ada\",\n ],\n }\n\n def filter_model_df_by_type(df):\n if not type:\n return df\n return df[df.index.isin(model_type[type])]\n\n # Check if type is valid\n if type not in list(model_type) + [None]:\n raise ValueError(\n f\"type param only accepts {', '.join(list(model_type) + str(None))}.\"\n )\n\n logger.info(f\"gpu_param set to {gpu_param}\")\n\n if _ml_usecase == MLUsecase.CLASSIFICATION:\n model_containers = pycaret.containers.models.classification.get_all_model_containers(\n globals(), raise_errors\n )\n elif _ml_usecase == MLUsecase.REGRESSION:\n model_containers = pycaret.containers.models.regression.get_all_model_containers(\n globals(), raise_errors\n )\n elif _ml_usecase == MLUsecase.CLUSTERING:\n model_containers = pycaret.containers.models.clustering.get_all_model_containers(\n globals(), raise_errors\n )\n elif _ml_usecase == MLUsecase.ANOMALY:\n model_containers = pycaret.containers.models.anomaly.get_all_model_containers(\n globals(), raise_errors\n )\n rows = [\n v.get_dict(internal)\n for k, v in model_containers.items()\n if (internal or not v.is_special)\n ]\n\n df = pd.DataFrame(rows)\n df.set_index(\"ID\", inplace=True, drop=True)\n\n return filter_model_df_by_type(df)\n\n\ndef get_metrics(\n reset: bool = False, include_custom: bool = True, raise_errors: bool = True,\n) -> pd.DataFrame:\n \"\"\"\n Returns table of metrics available.\n\n Example\n -------\n >>> metrics = get_metrics()\n\n This will return pandas dataframe with all available\n metrics and their metadata.\n\n Parameters\n ----------\n reset: bool, default = False\n If True, will reset all changes made using add_metric() and get_metric().\n include_custom: bool, default = True\n Whether to include user added (custom) metrics or not.\n raise_errors: bool, default = True\n If False, will suppress all exceptions, ignoring models\n that couldn't be created.\n\n Returns\n -------\n pandas.DataFrame\n\n \"\"\"\n\n if reset and not \"_all_metrics\" in globals():\n raise ValueError(\"setup() needs to be ran first.\")\n\n global _all_metrics\n\n np.random.seed(seed)\n\n if reset:\n if _ml_usecase == MLUsecase.CLASSIFICATION:\n _all_metrics = pycaret.containers.metrics.classification.get_all_metric_containers(\n globals(), raise_errors\n )\n elif _ml_usecase == MLUsecase.REGRESSION:\n _all_metrics = pycaret.containers.metrics.regression.get_all_metric_containers(\n globals(), raise_errors\n )\n\n metric_containers = _all_metrics\n rows = [v.get_dict() for k, v in metric_containers.items()]\n\n df = pd.DataFrame(rows)\n df.set_index(\"ID\", inplace=True, drop=True)\n\n if not include_custom:\n df = df[df[\"Custom\"] == False]\n\n return df\n\n\ndef _get_metric(name_or_id: str, metrics: Optional[Any] = None):\n \"\"\"\n Gets a metric from get_metrics() by name or index.\n \"\"\"\n if metrics is None:\n metrics = _all_metrics\n metric = None\n try:\n metric = metrics[name_or_id]\n return metric\n except:\n pass\n\n try:\n metric = next(\n v for k, v in metrics.items() if name_or_id in (v.display_name, v.name)\n )\n return metric\n except:\n pass\n\n return metric\n\n\ndef add_metric(\n id: str,\n name: str,\n score_func: type,\n target: str = \"pred\",\n greater_is_better: bool = True,\n multiclass: bool = True,\n **kwargs,\n) -> pd.Series:\n \"\"\"\n Adds a custom metric to be used in all functions.\n\n Parameters\n ----------\n id: str\n Unique id for the metric.\n\n name: str\n Display name of the metric.\n\n score_func: type\n Score function (or loss function) with signature score_func(y, y_pred, **kwargs).\n\n target: str, default = 'pred'\n The target of the score function.\n - 'pred' for the prediction table\n - 'pred_proba' for pred_proba\n - 'threshold' for decision_function or predict_proba\n\n greater_is_better: bool, default = True\n Whether score_func is a score function (default), meaning high is good,\n or a loss function, meaning low is good. In the latter case, the\n scorer object will sign-flip the outcome of the score_func.\n\n multiclass: bool, default = True\n Whether the metric supports multiclass problems.\n\n **kwargs:\n Arguments to be passed to score function.\n\n Returns\n -------\n pandas.Series\n The created row as Series.\n\n \"\"\"\n\n if not \"_all_metrics\" in globals():\n raise ValueError(\"setup() needs to be ran first.\")\n\n global _all_metrics\n\n if id in _all_metrics:\n raise ValueError(\"id already present in metrics dataframe.\")\n\n if _ml_usecase == MLUsecase.CLASSIFICATION:\n new_metric = pycaret.containers.metrics.classification.ClassificationMetricContainer(\n id=id,\n name=name,\n score_func=score_func,\n target=target,\n args=kwargs,\n display_name=name,\n greater_is_better=greater_is_better,\n is_multiclass=bool(multiclass),\n is_custom=True,\n )\n else:\n new_metric = pycaret.containers.metrics.regression.RegressionMetricContainer(\n id=id,\n name=name,\n score_func=score_func,\n args=kwargs,\n display_name=name,\n greater_is_better=greater_is_better,\n is_custom=True,\n )\n\n _all_metrics[id] = new_metric\n\n new_metric = new_metric.get_dict()\n\n new_metric = pd.Series(new_metric, name=id.replace(\" \", \"_\")).drop(\"ID\")\n\n return new_metric\n\n\ndef remove_metric(name_or_id: str):\n \"\"\"\n Removes a metric used in all functions.\n\n Parameters\n ----------\n name_or_id: str\n Display name or ID of the metric.\n\n \"\"\"\n if not \"_all_metrics\" in globals():\n raise ValueError(\"setup() needs to be ran first.\")\n\n try:\n _all_metrics.pop(name_or_id)\n return\n except:\n pass\n\n try:\n k_to_remove = next(k for k, v in _all_metrics.items() if v.name == name_or_id)\n _all_metrics.pop(k_to_remove)\n return\n except:\n pass\n\n raise ValueError(\n f\"No metric 'Display Name' or 'ID' (index) {name_or_id} present in the metrics repository.\"\n )\n\n\ndef get_logs(experiment_name: Optional[str] = None, save: bool = False) -> pd.DataFrame:\n\n \"\"\"\n Returns a table with experiment logs consisting\n run details, parameter, metrics and tags.\n\n Example\n -------\n >>> logs = get_logs()\n\n This will return pandas dataframe.\n\n Parameters\n ----------\n experiment_name : str, default = None\n When set to None current active run is used.\n\n save : bool, default = False\n When set to True, csv file is saved in current directory.\n\n Returns\n -------\n pandas.DataFrame\n\n \"\"\"\n\n if experiment_name is None:\n exp_name_log_ = exp_name_log\n else:\n exp_name_log_ = experiment_name\n\n import mlflow\n from mlflow.tracking import MlflowClient\n\n client = MlflowClient()\n\n if client.get_experiment_by_name(exp_name_log_) is None:\n raise ValueError(\n \"No active run found. Check logging parameter in setup or to get logs for inactive run pass experiment_name.\"\n )\n\n exp_id = client.get_experiment_by_name(exp_name_log_).experiment_id\n runs = mlflow.search_runs(exp_id)\n\n if save:\n file_name = f\"{exp_name_log_}_logs.csv\"\n runs.to_csv(file_name, index=False)\n\n return runs\n\n\ndef get_config(variable: str):\n\n \"\"\"\n This function is used to access global environment variables.\n Following variables can be accessed:\n\n - X: Transformed dataset (X)\n - y: Transformed dataset (y)\n - X_train: Transformed train dataset (X)\n - X_test: Transformed test/holdout dataset (X)\n - y_train: Transformed train dataset (y)\n - y_test: Transformed test/holdout dataset (y)\n - seed: random state set through session_id\n - prep_pipe: Transformation pipeline configured through setup\n - fold_shuffle_param: shuffle parameter used in Kfolds\n - n_jobs_param: n_jobs parameter used in model training\n - html_param: html_param configured through setup\n - create_model_container: results grid storage container\n - master_model_container: model storage container\n - display_container: results display container\n - exp_name_log: Name of experiment set through setup\n - logging_param: log_experiment param set through setup\n - log_plots_param: log_plots param set through setup\n - USI: Unique session ID parameter set through setup\n - fix_imbalance_param: fix_imbalance param set through setup\n - fix_imbalance_method_param: fix_imbalance_method param set through setup\n - data_before_preprocess: data before preprocessing\n - target_param: name of target variable\n - gpu_param: use_gpu param configured through setup\n\n Example\n -------\n >>> X_train = get_config('X_train')\n\n This will return X_train transformed dataset.\n\n Returns\n -------\n variable\n\n \"\"\"\n\n import pycaret.internal.utils\n\n return pycaret.internal.utils.get_config(variable, globals())\n\n\ndef set_config(variable: str, value):\n\n \"\"\"\n This function is used to reset global environment variables.\n Following variables can be accessed:\n\n - X: Transformed dataset (X)\n - y: Transformed dataset (y)\n - X_train: Transformed train dataset (X)\n - X_test: Transformed test/holdout dataset (X)\n - y_train: Transformed train dataset (y)\n - y_test: Transformed test/holdout dataset (y)\n - seed: random state set through session_id\n - prep_pipe: Transformation pipeline configured through setup\n - fold_shuffle_param: shuffle parameter used in Kfolds\n - n_jobs_param: n_jobs parameter used in model training\n - html_param: html_param configured through setup\n - create_model_container: results grid storage container\n - master_model_container: model storage container\n - display_container: results display container\n - exp_name_log: Name of experiment set through setup\n - logging_param: log_experiment param set through setup\n - log_plots_param: log_plots param set through setup\n - USI: Unique session ID parameter set through setup\n - fix_imbalance_param: fix_imbalance param set through setup\n - fix_imbalance_method_param: fix_imbalance_method param set through setup\n - data_before_preprocess: data before preprocessing\n\n Example\n -------\n >>> set_config('seed', 123)\n\n This will set the global seed to '123'.\n\n \"\"\"\n\n import pycaret.internal.utils\n\n return pycaret.internal.utils.set_config(variable, value, globals())\n\n\ndef save_config(file_name: str):\n\n \"\"\"\n This function is used to save all enviroment variables to file,\n allowing to later resume modeling without rerunning setup().\n\n Example\n -------\n >>> save_config('myvars.pkl')\n\n This will save all enviroment variables to 'myvars.pkl'.\n\n \"\"\"\n\n import pycaret.internal.utils\n\n return pycaret.internal.utils.save_config(file_name, globals())\n\n\ndef load_config(file_name: str):\n\n \"\"\"\n This function is used to load enviroment variables from file created with save_config(),\n allowing to later resume modeling without rerunning setup().\n\n\n Example\n -------\n >>> load_config('myvars.pkl')\n\n This will load all enviroment variables from 'myvars.pkl'.\n\n \"\"\"\n\n global _all_models, _all_models_internal, _all_metrics, X_train, create_model_container, master_model_container, display_container\n\n import pycaret.internal.utils\n\n r = pycaret.internal.utils.load_config(file_name, globals())\n\n if _ml_usecase == MLUsecase.CLASSIFICATION:\n _all_models = {\n k: v\n for k, v in pycaret.containers.models.classification.get_all_model_containers(\n globals(), raise_errors=True\n ).items()\n if not v.is_special\n }\n _all_models_internal = pycaret.containers.models.classification.get_all_model_containers(\n globals(), raise_errors=True\n )\n _all_metrics = pycaret.containers.metrics.classification.get_all_metric_containers(\n globals(), raise_errors=True\n )\n elif _ml_usecase == MLUsecase.REGRESSION:\n _all_models = {\n k: v\n for k, v in pycaret.containers.models.regression.get_all_model_containers(\n globals(), raise_errors=True\n ).items()\n if not v.is_special\n }\n _all_models_internal = pycaret.containers.models.regression.get_all_model_containers(\n globals(), raise_errors=True\n )\n _all_metrics = pycaret.containers.metrics.regression.get_all_metric_containers(\n globals(), raise_errors=True\n )\n elif _ml_usecase == MLUsecase.CLUSTERING:\n _all_models = {\n k: v\n for k, v in pycaret.containers.models.clustering.get_all_model_containers(\n globals(), raise_errors=True\n ).items()\n if not v.is_special\n }\n _all_models_internal = pycaret.containers.models.clustering.get_all_model_containers(\n globals(), raise_errors=True\n )\n _all_metrics = pycaret.containers.metrics.clustering.get_all_metric_containers(\n globals(), raise_errors=True\n )\n X_train = X\n elif _ml_usecase == MLUsecase.ANOMALY:\n _all_models = {\n k: v\n for k, v in pycaret.containers.models.anomaly.get_all_model_containers(\n globals(), raise_errors=True\n ).items()\n if not v.is_special\n }\n _all_models_internal = pycaret.containers.models.anomaly.get_all_model_containers(\n globals(), raise_errors=True\n )\n _all_metrics = pycaret.containers.metrics.anomaly.get_all_metric_containers(\n globals(), raise_errors=True\n )\n X_train = X\n\n create_model_container = []\n master_model_container = []\n display_container = []\n\n return r\n\n\ndef _choose_better(\n models_and_results: list,\n compare_dimension: str,\n fold: int,\n fit_kwargs: Optional[dict] = None,\n groups: Optional[Union[str, Any]] = None,\n display: Optional[Display] = None,\n):\n \"\"\"\n When choose_better is set to True, optimize metric in scoregrid is\n compared with base model created using create_model so that the\n functions return the model with better score only. This will ensure\n model performance is at least equivalent to what is seen in compare_models\n \"\"\"\n\n logger = get_logger()\n logger.info(\"choose_better activated\")\n display.update_monitor(1, \"Compiling Final Results\")\n display.display_monitor()\n\n if not fit_kwargs:\n fit_kwargs = {}\n\n for i, x in enumerate(models_and_results):\n if not isinstance(x, tuple):\n models_and_results[i] = (x, None)\n elif isinstance(x[0], str):\n models_and_results[i] = (x[1], None)\n elif len(x) != 2:\n raise ValueError(f\"{x} must have lenght 2 but has {len(x)}\")\n\n metric = _get_metric(compare_dimension)\n\n best_result = None\n best_model = None\n for model, result in models_and_results:\n if result is not None and is_fitted(model):\n result = result.loc[\"Mean\"][compare_dimension]\n else:\n logger.info(\n \"SubProcess create_model() called ==================================\"\n )\n model, _ = create_model_supervised(\n model,\n verbose=False,\n system=False,\n fold=fold,\n fit_kwargs=fit_kwargs,\n groups=groups,\n )\n logger.info(\n \"SubProcess create_model() end ==================================\"\n )\n result = pull(pop=True).loc[\"Mean\"][compare_dimension]\n logger.info(f\"{model} result for {compare_dimension} is {result}\")\n if not metric.greater_is_better:\n result *= -1\n if best_result is None or best_result < result:\n best_result = result\n best_model = model\n\n logger.info(f\"{best_model} is best model\")\n\n logger.info(\"choose_better completed\")\n return best_model\n\n\ndef _is_multiclass() -> bool:\n \"\"\"\n Method to check if the problem is multiclass.\n \"\"\"\n try:\n return _ml_usecase == MLUsecase.CLASSIFICATION and y.value_counts().count() > 2\n except:\n return False\n\n\ndef _get_model_id(e, models=None) -> str:\n \"\"\"\n Get model id.\n \"\"\"\n if models is None:\n models = _all_models_internal\n\n import pycaret.internal.utils\n\n return pycaret.internal.utils.get_model_id(e, models)\n\n\ndef _get_model_name(e, deep: bool = True, models=None) -> str:\n \"\"\"\n Get model name.\n \"\"\"\n if models is None:\n models = _all_models_internal\n\n import pycaret.internal.utils\n\n return pycaret.internal.utils.get_model_name(e, models, deep=deep)\n\n\ndef _is_special_model(e, models=None) -> bool:\n \"\"\"\n Is the model special (eg. VotingClassifier).\n \"\"\"\n if models is None:\n models = _all_models_internal\n\n import pycaret.internal.utils\n\n return pycaret.internal.utils.is_special_model(e, models)\n\n\ndef _calculate_metrics_supervised(\n y_test, pred, pred_prob, weights: Optional[list] = None,\n) -> dict:\n \"\"\"\n Calculate all metrics in _all_metrics.\n \"\"\"\n from pycaret.internal.utils import calculate_metrics\n\n try:\n return calculate_metrics(\n metrics=_all_metrics,\n y_test=y_test,\n pred=pred,\n pred_proba=pred_prob,\n weights=weights,\n )\n except:\n ml_usecase = get_ml_task(y_test)\n if ml_usecase == MLUsecase.CLASSIFICATION:\n metrics = pycaret.containers.metrics.classification.get_all_metric_containers(\n globals(), True\n )\n elif ml_usecase == MLUsecase.REGRESSION:\n metrics = pycaret.containers.metrics.regression.get_all_metric_containers(\n globals(), True\n )\n return calculate_metrics(\n metrics=metrics,\n y_test=y_test,\n pred=pred,\n pred_proba=pred_prob,\n weights=weights,\n )\n\n\ndef _calculate_metrics_unsupervised(\n X, labels, ground_truth=None, ml_usecase=None\n) -> dict:\n \"\"\"\n Calculate all metrics in _all_metrics.\n \"\"\"\n from pycaret.internal.utils import calculate_unsupervised_metrics\n\n if ml_usecase is None:\n ml_usecase = _ml_usecase\n\n try:\n return calculate_unsupervised_metrics(\n metrics=_all_metrics, X=X, labels=labels, ground_truth=ground_truth\n )\n except:\n if ml_usecase == MLUsecase.CLUSTERING:\n metrics = pycaret.containers.metrics.clustering.get_all_metric_containers(\n globals(), True\n )\n return calculate_unsupervised_metrics(\n metrics=metrics, X=X, labels=labels, ground_truth=ground_truth\n )\n\n\ndef get_ml_task(y):\n c1 = y.dtype == \"int64\"\n c2 = y.nunique() <= 20\n c3 = y.dtype.name in [\"object\", \"bool\", \"category\"]\n if ((c1) & (c2)) | (c3):\n ml_usecase = MLUsecase.CLASSIFICATION\n else:\n ml_usecase = MLUsecase.REGRESSION\n return ml_usecase\n\n\ndef _mlflow_log_model(\n model,\n model_results,\n score_dict: dict,\n source: str,\n runtime: float,\n model_fit_time: float,\n _prep_pipe,\n log_holdout: bool = True,\n log_plots: bool = False,\n tune_cv_results=None,\n URI=None,\n display: Optional[Display] = None,\n):\n logger = get_logger()\n\n logger.info(\"Creating MLFlow logs\")\n\n # Creating Logs message monitor\n if display:\n display.update_monitor(1, \"Creating Logs\")\n display.display_monitor()\n\n # import mlflow\n import mlflow\n import mlflow.sklearn\n\n mlflow.set_experiment(exp_name_log)\n\n full_name = _get_model_name(model)\n logger.info(f\"Model: {full_name}\")\n\n with mlflow.start_run(run_name=full_name) as run:\n\n # Get active run to log as tag\n RunID = mlflow.active_run().info.run_id\n\n # Log model parameters\n pipeline_estimator_name = get_pipeline_estimator_label(model)\n if pipeline_estimator_name:\n params = model.named_steps[pipeline_estimator_name]\n else:\n params = model\n\n # get regressor from meta estimator\n params = get_estimator_from_meta_estimator(params)\n\n try:\n try:\n params = params.get_all_params()\n except:\n params = params.get_params()\n except:\n logger.warning(\"Couldn't get params for model. Exception:\")\n logger.warning(traceback.format_exc())\n params = {}\n\n for i in list(params):\n v = params.get(i)\n if len(str(v)) > 250:\n params.pop(i)\n\n logger.info(f\"logged params: {params}\")\n mlflow.log_params(params)\n\n # Log metrics\n mlflow.log_metrics(score_dict)\n\n # set tag of compare_models\n mlflow.set_tag(\"Source\", source)\n\n if not URI:\n import secrets\n\n URI = secrets.token_hex(nbytes=4)\n mlflow.set_tag(\"URI\", URI)\n mlflow.set_tag(\"USI\", USI)\n mlflow.set_tag(\"Run Time\", runtime)\n mlflow.set_tag(\"Run ID\", RunID)\n\n # Log training time in seconds\n mlflow.log_metric(\"TT\", model_fit_time)\n\n # Log the CV results as model_results.html artifact\n if not _is_unsupervised(_ml_usecase):\n try:\n model_results.data.to_html(\"Results.html\", col_space=65, justify=\"left\")\n except:\n model_results.to_html(\"Results.html\", col_space=65, justify=\"left\")\n mlflow.log_artifact(\"Results.html\")\n os.remove(\"Results.html\")\n\n if log_holdout:\n # Generate hold-out predictions and save as html\n try:\n holdout = predict_model(model, verbose=False)\n holdout_score = pull(pop=True)\n del holdout\n holdout_score.to_html(\"Holdout.html\", col_space=65, justify=\"left\")\n mlflow.log_artifact(\"Holdout.html\")\n os.remove(\"Holdout.html\")\n except:\n logger.warning(\n \"Couldn't create holdout prediction for model, exception below:\"\n )\n logger.warning(traceback.format_exc())\n\n # Log AUC and Confusion Matrix plot\n\n if log_plots:\n\n logger.info(\n \"SubProcess plot_model() called ==================================\"\n )\n\n def _log_plot(plot):\n try:\n plot_name = plot_model(\n model, plot=plot, verbose=False, save=True, system=False\n )\n mlflow.log_artifact(plot_name)\n os.remove(plot_name)\n except Exception as e:\n logger.warning(e)\n\n for plot in log_plots:\n _log_plot(plot)\n\n logger.info(\n \"SubProcess plot_model() end ==================================\"\n )\n\n # Log hyperparameter tuning grid\n if tune_cv_results:\n d1 = tune_cv_results.get(\"params\")\n dd = pd.DataFrame.from_dict(d1)\n dd[\"Score\"] = tune_cv_results.get(\"mean_test_score\")\n dd.to_html(\"Iterations.html\", col_space=75, justify=\"left\")\n mlflow.log_artifact(\"Iterations.html\")\n os.remove(\"Iterations.html\")\n\n # get default conda env\n from mlflow.sklearn import get_default_conda_env\n\n default_conda_env = get_default_conda_env()\n default_conda_env[\"name\"] = f\"{exp_name_log}-env\"\n default_conda_env.get(\"dependencies\").pop(-3)\n dependencies = default_conda_env.get(\"dependencies\")[-1]\n from pycaret.utils import __version__\n\n dep = f\"pycaret=={__version__}\"\n dependencies[\"pip\"] = [dep]\n\n # define model signature\n from mlflow.models.signature import infer_signature\n\n try:\n signature = infer_signature(\n data_before_preprocess.drop([target_param], axis=1)\n )\n except:\n logger.warning(\"Couldn't infer MLFlow signature.\")\n signature = None\n if not _is_unsupervised(_ml_usecase):\n input_example = (\n data_before_preprocess.drop([target_param], axis=1).iloc[0].to_dict()\n )\n else:\n input_example = data_before_preprocess.iloc[0].to_dict()\n\n # log model as sklearn flavor\n prep_pipe_temp = deepcopy(_prep_pipe)\n prep_pipe_temp.steps.append([\"trained_model\", model])\n mlflow.sklearn.log_model(\n prep_pipe_temp,\n \"model\",\n conda_env=default_conda_env,\n # signature=signature,\n # input_example=input_example,\n )\n del prep_pipe_temp\n gc.collect()\n\n\ndef _get_columns_to_stratify_by(\n X: pd.DataFrame, y: pd.DataFrame, stratify: Union[bool, List[str]], target: str\n) -> pd.DataFrame:\n if not stratify:\n stratify = None\n else:\n if isinstance(stratify, list):\n data = pd.concat([X, y], axis=1)\n if not all(col in data.columns for col in stratify):\n raise ValueError(\"Column to stratify by does not exist in the dataset.\")\n stratify = data[stratify]\n else:\n stratify = y\n return stratify\n\n\ndef _get_cv_splitter(fold, ml_usecase: Optional[MLUsecase] = None):\n if not ml_usecase:\n ml_usecase = _ml_usecase\n\n import pycaret.internal.utils\n\n return pycaret.internal.utils.get_cv_splitter(\n fold,\n default=fold_generator,\n seed=seed,\n shuffle=fold_shuffle_param,\n int_default=\"stratifiedkfold\"\n if ml_usecase == MLUsecase.CLASSIFICATION\n else \"kfold\",\n )\n\n\ndef _get_cv_n_folds(fold, X, y=None, groups=None):\n import pycaret.internal.utils\n\n return pycaret.internal.utils.get_cv_n_folds(\n fold, default=fold_generator, X=X, y=y, groups=groups\n )\n\n\ndef _get_pipeline_fit_kwargs(pipeline, fit_kwargs: dict) -> dict:\n import pycaret.internal.pipeline\n\n return pycaret.internal.pipeline.get_pipeline_fit_kwargs(pipeline, fit_kwargs)\n\n\ndef _get_groups(\n groups,\n data: Optional[pd.DataFrame] = None,\n fold_groups=None,\n ml_usecase: Optional[MLUsecase] = None,\n):\n import pycaret.internal.utils\n\n data = data if data is not None else X_train\n fold_groups = fold_groups if fold_groups is not None else fold_groups_param\n\n return pycaret.internal.utils.get_groups(groups, data, fold_groups)\n" ]
[ [ "pandas.reset_option", "sklearn.tree.plot_tree", "sklearn.model_selection.cross_validate", "numpy.mean", "numpy.where", "numpy.unique", "pandas.concat", "sklearn.model_selection._search.GridSearchCV", "sklearn.model_selection.StratifiedKFold", "pandas.set_option", "pandas.DataFrame", "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.tight_layout", "sklearn.base.clone", "sklearn.model_selection._search.RandomizedSearchCV", "sklearn.decomposition.PCA", "matplotlib.pyplot.hlines", "numpy.array", "sklearn.preprocessing.LabelEncoder", "matplotlib.pyplot.title", "sklearn.model_selection.GroupKFold", "matplotlib.pyplot.close", "matplotlib.pyplot.yticks", "matplotlib.pyplot.figure", "sklearn.manifold.TSNE", "numpy.std", "sklearn.model_selection.train_test_split", "sklearn.model_selection.KFold", "matplotlib.pyplot.show", "pandas.get_dummies", "pandas.isnull", "sklearn.calibration.calibration_curve", "sklearn.preprocessing.StandardScaler", "numpy.random.seed", "matplotlib.pyplot.xlabel", "pandas.DataFrame.from_dict", "matplotlib.pyplot.plot", "numpy.sum", "matplotlib.pyplot.subplot2grid", "matplotlib.pyplot.ylabel", "sklearn.model_selection.TimeSeriesSplit", "numpy.linspace", "sklearn.set_config" ] ]
chrwolff/udacityMLCapstone
[ "44c11d34defd90c6ff1180376ecc764d82ed09af" ]
[ "webapp/server.py" ]
[ "from flask import Flask, request, jsonify, send_from_directory\nimport pandas as pd\nimport numpy as np\nimport json\nimport xgboost as xgb\n\napp = Flask(__name__)\n\nfeatures = ['latitude_pca', 'longitude_pca',\n 'apparentTemperature', 'dewPoint', 'humidity', 'precipIntensity',\n 'precipProbability', 'pressure', 'temperature',\n 'visibility', 'windBearing', 'windSpeed', 'hour',\n 'weekday', 'is_holiday', 'is_weekend', 'is_weekend_or_holiday']\n\n#load stations\nprint(\"Loading Station data...\")\nstations_df = pd.read_csv('../data/modelInput/stations_201505_201611.csv')\nstations_df = stations_df.rename(columns={\n 'station_name': 'name',\n 'station_id': 'id'\n})\nstations_df['first_used'] = stations_df['first_used'].apply(pd.Timestamp)\nstations_df['last_used'] = stations_df['last_used'].apply(pd.Timestamp)\nstation_ids = stations_df['id'].unique()\n\n#load models\nprint(\"Loading models...\")\ndepartures_model = xgb.Booster(model_file='../models/boosterDepartures.xgbm')\narrivals_model = xgb.Booster(model_file='../models/boosterArrivals.xgbm')\n\n#load additional features\nprint(\"Loading additional features...\")\nadditional_features_df = pd.read_csv('../data/modelInput/additionalFeatures.csv')\nadditional_features_df['date_hour'] = additional_features_df['date_hour'].apply(pd.Timestamp)\nadditional_features_df = additional_features_df.set_index('date_hour')\nadditional_features_df = additional_features_df.tz_localize(None)\n\n#create empty dataframe to cache predictions\npredictions_cache = pd.DataFrame([], columns=['date_hour',\n 'station_id',\n 'arrivals',\n 'departures',\n 'flow'])\npredictions_cache = predictions_cache.set_index('date_hour')\n\n#load the actual flow data for comparison\nprint(\"Loading historical data...\")\nstation_history_df = pd.read_csv('../data/modelInput/flowPerHourAndStation.csv')\nstation_history_df['date_hour'] = station_history_df['date_hour'].apply(pd.Timestamp)\nstation_history_df = station_history_df.set_index('date_hour')\n\ndef get_predictions(timestamp_start, timestamp_end):\n global predictions_cache\n \n #if possible, use predictions from cache \n trips = predictions_cache[timestamp_start:timestamp_end]\n if (trips.shape[0] > 0):\n return trips\n\n #create index from the Cartesian product of station ids and timestamps for the 24 hours of the requested day\n date_hours = pd.date_range(timestamp_start, timestamp_end, freq='H')\n station_time_index = pd.MultiIndex.from_product([date_hours, station_ids], names=['date_hour', 'station_id'])\n station_time_df = pd.DataFrame({'arrivals': 0, 'departures': 0}, index=station_time_index)\n station_time_df = station_time_df.reset_index()\n station_time_df = station_time_df.set_index('date_hour')\n\n #join weather and holiday data\n model_data = station_time_df.merge(additional_features_df, how='left', left_index=True, right_index=True)\n\n #join latitude and longitude from station data\n model_data = model_data.reset_index()\n model_data = model_data.merge(stations_df[['id', 'latitude_pca', 'longitude_pca']], how='left', left_on='station_id', right_on='id')\n model_data = model_data.set_index('date_hour')\n\n #make predictions\n x_model_data = xgb.DMatrix(model_data[features])\n model_data['departures'] = departures_model.predict(x_model_data)\n model_data['arrivals'] = arrivals_model.predict(x_model_data)\n model_data = model_data.reset_index()\n\n #add predicted arrivals and departures to cache and return them \n trips = model_data[['date_hour', 'station_id', 'arrivals', 'departures']]\n trips = trips.round({'arrivals': 0, 'departures': 0})\n trips['flow'] = trips['arrivals'] - trips['departures']\n trips = trips.set_index('date_hour')\n\n predictions_cache = predictions_cache.append(trips)\n predictions_cache = predictions_cache.sort_index()\n return trips\n\[email protected](\"/\")\ndef index():\n return app.send_static_file('index.html')\n\[email protected]('/resources/<path:path>')\ndef send_resource(path):\n return send_from_directory('static/dist', path)\n\[email protected]('/packages/<path:path>')\ndef send_package(path):\n return send_from_directory('node_modules', path)\n\n#fetch numbers for a single station and date\[email protected]('/stationData', methods=[\"POST\"])\ndef station_data():\n if request.method == \"POST\":\n json_dict = request.get_json()\n\n #retrieve requested station id and date\n stationId = int(json_dict['stationId'])\n timestamp = pd.Timestamp(json_dict['timestamp'], tz='UTC')\n\n timestamp_start = pd.Timestamp(timestamp.year, timestamp.month, timestamp.day, 0)\n timestamp_end = pd.Timestamp(timestamp.year, timestamp.month, timestamp.day, 23)\n \n #real historical data\n real_df = station_history_df[timestamp_start:timestamp_end]\n real_df = real_df.reset_index()\n real_df = real_df[real_df['station_id'] == stationId]\n real_df['hour'] = real_df['date_hour'].dt.hour\n real_df = real_df.set_index('hour')\n\n #get predictions\n predictions_df = get_predictions(timestamp_start, timestamp_end)\n predictions_df = predictions_df.reset_index()\n predictions_df = predictions_df[predictions_df['station_id'] == stationId]\n predictions_df['hour'] = predictions_df['date_hour'].dt.hour\n predictions_df = predictions_df.set_index('hour')\n\n #return real and predicted data as json\n return_json = {\n 'predictionValues': predictions_df.to_json(orient='index'),\n 'realValues': real_df.to_json(orient='index'),\n 'maximum': {\n 'arrivals': int(real_df['arrivals'].append(predictions_df['arrivals']).max()),\n 'departures': int(real_df['departures'].append(predictions_df['departures']).max())\n }\n }\n\n return json.dumps(return_json)\n else:\n return \"\"\n\n#fetch aggregated numbers for all stations for a given date\[email protected]('/aggregatedData', methods=[\"POST\"])\ndef aggregatedData():\n if request.method == \"POST\":\n json_dict = request.get_json()\n\n #retrievethe requested date and the event threshold \n timestamp = pd.Timestamp(json_dict['timestamp'], tz='UTC')\n event_threshold = int(json_dict['eventThreshold'])\n\n timestamp_start = pd.Timestamp(timestamp.year, timestamp.month, timestamp.day, 0)\n timestamp_end = pd.Timestamp(timestamp.year, timestamp.month, timestamp.day, 23)\n \n #real historical data\n real_df = station_history_df[timestamp_start:timestamp_end]\n real_df = real_df.reset_index()\n real_df['hour'] = real_df['date_hour'].dt.hour\n\n #get predicted data\n predictions_df = get_predictions(timestamp_start, timestamp_end)\n predictions_df = predictions_df.reset_index()\n predictions_df['hour'] = predictions_df['date_hour'].dt.hour\n\n #aggregate real and predicted data\n aggregated_real_df = real_df.drop(['date_hour', 'station_id'], axis=1)\n aggregated_real_df = aggregated_real_df.groupby('hour').sum()\n\n aggregated_predictions_df = predictions_df.drop(['date_hour', 'station_id'], axis=1)\n aggregated_predictions_df = aggregated_predictions_df.groupby('hour').sum()\n\n #get all rows with absolute flow above the event threshold\n events_df = predictions_df[abs(predictions_df['flow']) > event_threshold]\n\n return_json = {\n 'aggregatedRealValues': aggregated_real_df.to_json(orient='index'),\n 'aggregatedPredictionValues': aggregated_predictions_df.to_json(orient='index'),\n 'events': events_df.to_json(orient='records'),\n 'aggregatedMaximum': {\n 'arrivals': int(aggregated_predictions_df['arrivals'].append(aggregated_real_df['arrivals']).max()),\n 'departures': int(aggregated_predictions_df['departures'].append(aggregated_real_df['departures']).max())\n }\n }\n\n return json.dumps(return_json)\n else:\n return \"\"\n\[email protected]('/weather', methods=[\"POST\"])\ndef weather():\n if request.method == \"POST\":\n json_dict = request.get_json()\n\n timestamp = pd.Timestamp(json_dict['timestamp'], tz='UTC')\n\n timestamp_start = pd.Timestamp(timestamp.year, timestamp.month, timestamp.day, 0)\n timestamp_end = pd.Timestamp(timestamp.year, timestamp.month, timestamp.day, 23)\n slice_df = additional_features_df[timestamp_start:timestamp_end]\n slice_df = slice_df.reset_index()\n slice_df = slice_df.set_index('hour')\n\n return slice_df.to_json(orient='index')\n else:\n return \"\"\n\[email protected]('/stations', methods=[\"GET\"])\ndef stations():\n return stations_df.set_index('id').to_json(orient='index')\n\nif __name__ == '__main__':\n app.run()\n" ]
[ [ "pandas.DataFrame", "pandas.date_range", "pandas.Timestamp", "pandas.MultiIndex.from_product", "pandas.read_csv" ] ]
LiFaytheGoblin/aequitas
[ "e5690baa955c94ea6459af5064cf1c741a345646" ]
[ "examples/compas_data_for_aequitas.py" ]
[ "# The purpose of this script is to transform raw data to \n# the format expected by Aequitas.\n#\n# SOURCE: ProPublica \n# Data: https://github.com/propublica/compas-analysis/raw/master/compas-scores-two-years.csv\n# ProPublica's methodology: https://www.propublica.org/article/how-we-analyzed-the-compas-recidivism-algorithm\n# Ari Anisfeld\n\nimport pandas as pd\ndf = pd.read_csv('./raw_data/compas-scores-two-years.csv')\n\n\n# rename for tool\ndf = df.rename(columns={'id':'entity_id', \n 'two_year_recid':'label_value'})\n\n# score_text is 'High', 'Medium' or 'Low' and reflects level of assessed risk of recidivism\n# \"High\" and \"Medium\" are considered prediction that the defendant is charged with a felony \n# or misdemenor in the two years after administation of COMPAS assessment. \"Low\" is considered \n# a prediction of non-recidivism. This is based on ProPublica's interpretation of Northpointe's\n# practioner guide.\n#\n# \"According to Northpointe’s practitioners guide, COMPAS “scores in the medium and high range \n# garner more interest from supervision agencies than low scores, as a low score would suggest\n# there is little risk of general recidivism,” so we considered scores any higher than “low” to \n# indicate a risk of recidivism.\"\n# (https://www.propublica.org/article/how-we-analyzed-the-compas-recidivism-algorithm)\n\ndf.loc[df['score_text'] == 'Low', 'score'] = str(0.0)\ndf.loc[df['score_text'] != 'Low', 'score'] = str(1.0)\n\n\ndf = df[['entity_id', 'score', 'label_value', 'race', 'sex', 'age_cat']]\n\n\ndf.to_csv('./data/compas_for_aequitas.csv', index=False)" ]
[ [ "pandas.read_csv" ] ]
dannis999/RE-MIMO
[ "199ddec7f142ba5bd87e76e0b5f7790c64e69b0c" ]
[ "iid_channels/qam_64/oamp_net/test_oampnet.py" ]
[ "import torch\nimport numpy as np\nimport torch.nn as nn\nimport pickle\nimport os\nimport math\nimport torch.nn.functional as F\n\nfrom collections import defaultdict\nfrom sample_generator import sample_generator\nfrom oampnet import oampnet\n\n# Parameters\nNT = 32\nNR = 64\n\nmod_n = 64\nnum_layers = 10\n\n# Batch sizes for training and validation sets\nvalidtn_batch_size = 5000\nvalidtn_iter = 1000\n\nM = int(np.sqrt(mod_n))\nsigConst = np.linspace(-M+1, M-1, M)\nsigConst /= np.sqrt((sigConst ** 2).mean())\nsigConst /= np.sqrt(2.) #Each complex transmitted signal will have two parts\n\nvalidtn_NT_list = np.asarray([NT])\nsnrdb_classical_list = {16:np.arange(16.0, 22.0), 32:np.arange(18.0, 24.0)}\n\nmodel_filename = './validtn_results/oampnet_q' + str(mod_n) + '_' + str(NT) + '_' + str(NR) + '.pth'\noampnet_validtn_filename = './final_results/oampnet_' + str(NT) + '_validtn_results.pickle'\n\ndef bit_indices(indices, mod_n):\n\treal_indices = (indices//np.sqrt(mod_n)).to(dtype=torch.int32)\n\timag_indices = (indices%np.sqrt(mod_n)).to(dtype=torch.int32)\n\tjoint_bit_indices = torch.cat((real_indices, imag_indices), dim=-1)\n\treturn joint_bit_indices\n\ndef sym_accuracy(out, j_indices):\n\taccuracy = (out == j_indices).sum().to(dtype=torch.float32)\n\treturn accuracy.item()/out.numel()\n\ndef bit_accuracy(out, j_indices):\n\tbit_out_indices = bit_indices(out, mod_n)\n\tbit_j_indices = bit_indices(j_indices, mod_n)\n\treturn sym_accuracy(bit_out_indices, bit_j_indices)\n\ndef sym_detection(x_hat, j_indices, real_QAM_const, imag_QAM_const):\n\tx_real, x_imag = torch.chunk(x_hat, 2, dim=-1)\n\tx_real = x_real.unsqueeze(dim=-1).expand(-1,-1, real_QAM_const.numel())\n\tx_imag = x_imag.unsqueeze(dim=-1).expand(-1, -1, imag_QAM_const.numel())\n\n\tx_real = torch.pow(x_real - real_QAM_const, 2)\n\tx_imag = torch.pow(x_imag - imag_QAM_const, 2)\n\tx_dist = x_real + x_imag\n\tx_indices = torch.argmin(x_dist, dim=-1)\n\n\treturn x_indices\n\ndef generate_big_validtn_data(generator, batch_size):\n\tvalidtn_data_dict = {int(NT):{} for NT in validtn_NT_list}\n\tfor NT in validtn_NT_list:\n\t\tfor snr in snrdb_classical_list[NT]:\n\t\t\tbig_validtn_H, big_validtn_y, _, big_validtn_j_indices, big_noise_sigma = generator.give_batch_data(int(NT), snr_db_min=snr, snr_db_max=snr, batch_size=batch_size)\n\t\t\tvalidtn_data_dict[int(NT)][snr] = (big_validtn_H, big_validtn_y , big_validtn_j_indices, big_noise_sigma)\n\treturn validtn_data_dict\n\n\ndef validate_model_given_data(model, validtn_H, validtn_y, validtn_j_indices, big_noise_sigma, real_QAM_const, imag_QAM_const, device):\n\t\n\twith torch.no_grad():\n\t\tH = validtn_H.to(device=device)\n\t\ty = validtn_y.to(device=device)\n\t\tnoise_sigma = big_noise_sigma.to(device=device)\n\n\t\tlist_batch_x_predicted = model.forward(H, y, noise_sigma)\n\t\tvalidtn_out = list_batch_x_predicted[-1].to(device='cpu')\n\t\tindices_oampnet = sym_detection(validtn_out, validtn_j_indices, real_QAM_const, imag_QAM_const)\n\t\taccr = sym_accuracy(indices_oampnet, validtn_j_indices)\n\n\t\tdel H, y, noise_sigma, list_batch_x_predicted\n\n\treturn accr\n\n\ndef validate_oampnet(model, generator, device, real_QAM_const, imag_QAM_const, save_result=True):\n\tresult_dict = {int(NT):defaultdict(float) for NT in validtn_NT_list}\n\tfor iter in range(validtn_iter):\n\t\tvalidtn_data_dict = generate_big_validtn_data(generator, validtn_batch_size)\n\t\tfor NT in validtn_NT_list:\n\t\t\tfor snr in snrdb_classical_list[NT]:\n\t\t\t\tbig_validtn_H, big_validtn_y, big_validtn_j_indices, big_noise_sigma = validtn_data_dict[NT][snr]\n\t\t\t\taccr = validate_model_given_data(model, big_validtn_H, big_validtn_y, big_validtn_j_indices, big_noise_sigma, real_QAM_const, imag_QAM_const, device)\n\t\t\t\tresult_dict[NT][snr] = result_dict[NT][snr] + (accr-result_dict[NT][snr])/float(iter+1.0)\n\n\t\tif (save_result):\n\t\t\twith open(oampnet_validtn_filename, 'wb') as handle:\n\t\t\t\tpickle.dump(result_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\t\t\tprint('Intermediate Test results saved at directory : ', oampnet_validtn_filename)\n\t\tprint('Big Validation resut, Accr for ' + str(NT) + ' : ', result_dict[NT])\n\n\ndef test(model, generator, device):\n\tmodel.eval()\n\n\t# Testing Trained Network\n\tvalidate_oampnet(model, generator, device, generator.real_QAM_const, generator.imag_QAM_const, True)\n\ndef main():\n\tgenerator = sample_generator(validtn_batch_size, mod_n, NR)\n\tdevice = 'cuda'\n\tmodel = oampnet(num_layers, generator.constellation, generator.real_QAM_const, generator.imag_QAM_const, device=device)\n\tmodel = model.to(device=device)\n\tmodel.load_state_dict(torch.load(model_filename))\n\tprint('*******Successfully loaded pre-trained model*********** from directory : ', model_filename)\n\n\ttest(model, generator, device)\n\tprint('******************************** Now Testing **********************************************')\n\nif __name__ == '__main__':\n\tmain()\n" ]
[ [ "torch.cat", "numpy.asarray", "torch.no_grad", "torch.argmin", "numpy.arange", "numpy.sqrt", "torch.load", "numpy.linspace", "torch.chunk", "torch.pow" ] ]
glavrentiadis/pygmm
[ "0ad5c870a102ec483a725d1f7f87507cd419c378" ]
[ "pygmm/idriss_2014.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Idriss (2014, :cite:`idriss14`) model.\"\"\"\n\nimport numpy as np\n\nfrom . import model\n\n__author__ = 'Albert Kottke'\n\n\nclass Idriss2014(model.GroundMotionModel):\n \"\"\"Idriss (2014, :cite:`idriss14`) model.\n\n This model was developed for active tectonic regions as part of the\n NGA-West2 effort.\n\n Parameters\n ----------\n scenario : :class:`pygmm.model.Scenario`\n earthquake scenario\n\n \"\"\"\n\n NAME = 'Idriss (2014)'\n ABBREV = 'I14'\n\n # Reference velocity (m/s)\n V_REF = 1200.\n\n # Load the coefficients for the model\n COEFF = dict(\n small=model.load_data_file('idriss_2014-small.csv', 2),\n large=model.load_data_file('idriss_2014-large.csv', 2), )\n PERIODS = COEFF['small']['period']\n\n INDEX_PGA = 0\n INDICES_PSA = np.arange(22)\n\n PARAMS = [\n model.NumericParameter('dist_rup', True, None, 150),\n model.NumericParameter('mag', True, 5, None),\n model.NumericParameter('v_s30', True, 450, 1200),\n model.CategoricalParameter('mechanism', True, ['SS', 'RS'], 'SS'),\n ]\n\n def __init__(self, scenario: model.Scenario):\n \"\"\"Initialize the model.\"\"\"\n super().__init__(scenario)\n self._ln_resp = self._calc_ln_resp()\n self._ln_std = self._calc_ln_std()\n\n def _calc_ln_resp(self) -> np.ndarray:\n \"\"\"Calculate the natural logarithm of the response.\n\n Returns\n -------\n ln_resp : class:`np.array`:\n natural log of the response\n\n \"\"\"\n s = self._scenario\n c = self.COEFF['small'] if s.mag <= 6.75 else self.COEFF['large']\n\n if s.mechanism == 'RS':\n flag_mech = 1\n else:\n # SS/RS/U\n flag_mech = 0\n\n f_mag = (c.alpha_1 + c.alpha_2 * s.mag + c.alpha_3 *\n (8.5 - s.mag) ** 2)\n f_dst = (-(c.beta_1 + c.beta_2 * s.mag) * np.log(s.dist_rup + 10) +\n c.gamma * s.dist_rup)\n f_ste = c.epsilon * np.log(s.v_s30)\n f_mec = c.phi * flag_mech\n\n ln_resp = f_mag + f_dst + f_ste + f_mec\n\n return ln_resp\n\n def _calc_ln_std(self) -> np.ndarray:\n \"\"\"Calculate the logarithmic standard deviation.\n\n Returns\n -------\n ln_std : class:`np.array`:\n natural log standard deviation\n\n \"\"\"\n s = self._scenario\n ln_std = (1.18 + 0.035 * np.log(np.clip(self.PERIODS, 0.05, 3.0)) -\n 0.06 * np.clip(s.mag, 5.0, 7.5))\n return ln_std\n" ]
[ [ "numpy.arange", "numpy.log", "numpy.clip" ] ]
rguerrettaz/client
[ "06a8759ad9c3c407e815cecbd789c3a2d44e4a2b" ]
[ "tests/test_history.py" ]
[ "import pytest\nimport os\nimport json\nimport six\nimport numpy as np\nfrom click.testing import CliRunner\n\nfrom wandb.history import History\nfrom wandb import media\nfrom wandb import data_types\nimport torch\n\n\[email protected]\ndef history():\n with CliRunner().isolated_filesystem():\n yield History(\"wandb-history.jsonl\")\n\n\ndef di(row):\n \"\"\"Returns a dict_items object for easier comparison\"\"\"\n return six.viewitems(row)\n\n\ndef disk_history():\n \"\"\"Reads history from disk and returns an array of dicts\"\"\"\n return History(\"wandb-history.jsonl\").rows\n\n\ndef test_history_default(history):\n history.add({\"loss\": 0.5})\n h = disk_history()\n assert di({\"loss\": 0.5, \"_step\": 0}) <= di(h[0])\n assert \"_runtime\" in h[0].keys()\n\n\ndef test_history_multi_write(history):\n history.row.update({\"epoch\": 1, \"val_loss\": 1})\n history.add({\"loss\": 0.5})\n h = disk_history()\n assert di({\"loss\": 0.5, \"val_loss\": 1, \"epoch\": 1}) <= di(h[0])\n\n\ndef test_history_explicit_write(history):\n history.add({\"loss\": 0.5})\n history.add({\"loss\": 0.6})\n h = disk_history()\n assert h[0][\"loss\"] == 0.5\n assert h[-1][\"loss\"] == 0.6\n\n\ndef test_step_context(history):\n with history.step() as h:\n h.add({\"loss\": 0.2})\n h.row[\"epoch\"] = 1\n h = disk_history()\n assert di({\"loss\": 0.2, \"epoch\": 1}) <= di(h[0])\n\n\ndef test_step_context_no_compute(history):\n with history.step(compute=False) as h:\n h.add({\"loss\": 0.2})\n h.row[\"epoch\"] = 1\n if h.compute:\n raise ValueError()\n h = disk_history()\n assert len(h) == 0\n\n\ndef test_step_context_global(history):\n with history.step():\n history.add({\"foo\": \"bar\"})\n h = disk_history()\n assert di({\"foo\": \"bar\"}) <= di(h[0])\n\n\ndef test_stream_step(history):\n with history.stream(\"batch\").step() as h:\n h.add({\"foo\": \"bar\"})\n h = disk_history()\n assert di({\"_stream\": \"batch\", \"foo\": \"bar\"}) <= di(h[0])\n\n\ndef test_list_of_images(history):\n image = np.random.randint(255, size=(28, 28))\n history.add({\"images\": [media.Image(image)]})\n h = disk_history()\n assert h[0][\"images\"] == {'_type': 'images',\n 'count': 1, 'height': 28, 'width': 28}\n\n\ndef test_single_image(history):\n image = np.random.randint(255, size=(28, 28))\n history.add({\"images\": media.Image(image)})\n h = disk_history()\n assert h[0][\"images\"] == {'_type': 'images',\n 'count': 1, 'height': 28, 'width': 28}\n\n\ndef test_histogram(history):\n data = np.random.randint(255, size=500)\n history.add({\"hist\": data_types.Histogram(data)})\n h = disk_history()\n assert h[0][\"hist\"]['_type'] == 'histogram'\n assert len(h[0][\"hist\"]['values']) == 64\n\n\ndef test_stream(history):\n history.stream(\"foo\").add({\"acc\": 1})\n h = disk_history()\n assert di({\"_stream\": \"foo\", \"acc\": 1}) <= di(h[0])\n\n\[email protected](\"This stopped working for reasons not clear to me...\")\ndef test_torch(history):\n with history.step():\n history.torch.log_stats(\n torch.autograd.Variable(torch.randn(\n 2, 2).type(torch.FloatTensor), requires_grad=True), \"layer1\")\n h = disk_history()\n assert \"_layer1-0.50\" in h[0].keys()\n\n\ndef test_torch_no_compute(history):\n with history.step(False):\n history.torch.log_stats(\n torch.autograd.Variable(torch.randn(\n 2, 2).type(torch.FloatTensor), requires_grad=True), \"layer1\")\n h = disk_history()\n assert len(h) == 0\n" ]
[ [ "numpy.random.randint", "torch.randn" ] ]
mftorres/seqcap_processor
[ "ce8ff01b3918bd29105db7b3d91b1d12572f014e" ]
[ "secapr/assemble_reads.py" ]
[ "#author: Tobias Andermann, [email protected]\n\n'''\nAssemble trimmed Illumina read files (fastq)\n'''\n\nimport os\nimport sys\nimport re\nimport glob\nimport shutil\nimport argparse\nimport subprocess\nimport pandas as pd\nimport numpy as np\nfrom Bio import SeqIO\nimport time\n\n# Complete path function\nclass CompletePath(argparse.Action):\n \"\"\"give the full path of an input file/folder\"\"\"\n def __call__(self, parser, namespace, values, option_string=None):\n setattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values)))\n\ndef add_arguments(parser):\n parser.add_argument(\n '--input',\n required=True,\n action=CompletePath,\n default=None,\n help='Call the folder that contains the trimmed reads, organized in a separate subfolder for each sample. The name of the subfolder has to start with the sample name, delimited with an underscore [_] (default output of secapr clean_reads function)'\n )\n parser.add_argument(\n '--output',\n required=True,\n action=CompletePath,\n default=None,\n help='The output directory where results will be saved'\n )\n parser.add_argument(\n '--assembler',\n choices=[\"spades\",\"abyss\"],# trinity support discontinued \n default=\"spades\",\n help=\"\"\"The assembler to use (default = spades).\"\"\"\n )\n parser.add_argument(\n '--kmer',\n type=str,\n help='Set the kmer value (only available for Abyss and Spades). Provide single value for Abyss, or list of kmers for Spades, e.g. \"--kmer 21,33,55\". Default for Abyss is 35, and for spades it is 21,33,55,77,99,127. Note that Spades only accepts uneven kmer values.'\n )\n parser.add_argument(\n '--contig_length',\n type=int,\n default=200,\n help='Set the minimum contig length for the assembly. Contigs that are shorter than this threshold will be discarded.'\n )\n parser.add_argument(\n '--max_memory',\n type=str,\n help='Set the maximum memory to be used during assembly in GB (only available for Spades). This can be necessary when working with computing nodes with limited memory or to avoid over-allocation of computing resources on clusters which can in some cases cause your assembly to be stopped or interrupted.'\n )\n parser.add_argument(\n '--single_reads',\n action='store_true',\n default=False,\n help='Use this flag if you additionally want to use single reads for the assembly'\n )\n parser.add_argument(\n '--cores',\n type=int,\n default=1,\n help='For parallel processing you can set the number of cores you want to run the assembly on.'\n )\n\n\ndef assembly_trinity(forw,backw,output_folder,id_sample,cores,min_length,max_memory):\n print((\"De-novo assembly with Trinity of sample %s:\" %id_sample))\n #print(output_folder)\n if not max_memory:\n max_memory = '8G'\n else:\n max_memory = '%sG'%max_memory\n command = [\n \"Trinity\",\n \"--seqType\",\n \"fq\",\n \"--left\",\n forw,\n \"--right\",\n backw,\n \"--CPU\",\n str(cores),\n \"--min_contig_length\",\n str(min_length),\n #\"--JM\",\n #\"8G\",\n \"--max_memory\",\n max_memory, \n #\"--bypass_java_version_check\",\n #\"--normalize_reads\",\n \"--output\",\n output_folder\n ]\n print (\"Building contigs........\")\n with open(os.path.join(output_folder, \"%s_trinity_screen_out.txt\" %id_sample), 'w') as log_err_file:\n p = subprocess.Popen(command, stdout=log_err_file, stderr=log_err_file)\n p.communicate()\n filename = os.path.join(output_folder, \"%s_trinity_screen_out.txt\" %id_sample)\n file_object = open(filename, 'r')\n for line in file_object:\n if line.startswith('Error'):\n print(line)\n print ('SECAPR NOTE:\\nTrinity is currently only functional in the Linux distribution of SECAPR due to Java incompatibilities.\\n')\n #'However, the environment on MacOS machines can be easily altered by hand in order to properly run Trinity.\\n',\n #'This might however compromise the functionality of other parts of the SECAPR pipeline, therefore we recommend to undo the changes made in the envrionment after using Trinity by following the instructions below.\\n\\n',\n #'In order to run the Trinity assembly on MacOS do the following:\\n',\n #'1. within the SECAPR conda envrionment type: \"conda install openjdk=7\"\\n',\n #'2. run the secapr assemble_reads function with Trinity (using the \"--assembler trinity\" flag)\\n',\n #'3. after assembly rebuild the SECAPR default environment by typing \"conda install trimmomatic=0.33\"\\n'\n sys.exit()\n elif line.startswith('Trinity run failed.'):\n print(filename)\n print ('SECAPR NOTE:\\nTrinity is currently only functional in the Linux distribution of SECAPR.\\n')\n sys.exit()\n\n print((\"%s assembled. Trinity-stats are printed into %s\" %(id_sample, os.path.join(output_folder, \"%s_trinity_screen_out.txt\" %id_sample))))\n #except:\n # print (\"Trinity failed, maybe due to limited stack-size. Try increase stacksize with command 'zsh | ulimit -s unlimited | sh' and run again.\")\n\ndef assembly_abyss(forw,backw,singlef,singleb,output_folder,id_sample,kmer,cores,args):\n print(\"WARNING: Abyss is very memory heavy and depending on the size of your read files may throw an error because it's running out of memory. If running on a cluster, ask your system administrator how to allocate more memory to your abyss job.\")\n if cores > 1:\n print('WARNING: You chose to run Abyss on more than 1 core. This can cause problems on some systems and will make the script crash. In that case try running Abyss on a sinlge core instead.')\n print((\"De-novo assembly with abyss of sample %s:\" %id_sample))\n try:\n kmer = int(kmer)\n except:\n quit('\\n\\nError: Provided kmer value could not be formatted as integer. Please provide single numeric kmer value when choosing the Abyss assembler.')\n command = [\n \"abyss-pe\",\n \"--directory={}\".format(output_folder),\n \"k={}\".format(kmer),\n \"j={}\".format(cores),\n 'name={}'.format(id_sample),\n 'in={} {}'.format(forw,backw)\n ]\n if args.single_reads:\n command.append('se={} {}'.format(singlef,singleb))\n try:\n print (\"Building contigs........\")\n with open(os.path.join(output_folder, \"%s_abyss_screen_out.txt\" %id_sample), 'w') as log_err_file:\n p = subprocess.Popen(command, stdout=log_err_file)\n p.communicate()\n p.wait()\n print((\"%s assembled. Statistics are printed into %s\" %(id_sample, os.path.join(output_folder, \"%s_abyss_screen_out.txt\" %id_sample))))\n except:\n print((\"Could not assemble %s\" %id_sample))\n\ndef assembly_spades(forw,backw,singlef,singleb,output_folder,id_sample,kmer,cores,max_memory,args):\n print((\"De-novo assembly with spades of sample %s:\" %id_sample))\n kmer = str(kmer) \n command = [\n \"spades.py\",\n \"-k\",\n kmer,\n \"--only-assembler\",\n \"--pe1-1\",\n forw,\n \"--pe1-2\",\n backw,\n \"-o\",\n output_folder\n ]\n if args.single_reads:\n command+=[\"--pe1-s\", singlef, \"--pe1-s\",singleb]\n if args.cores > 1:\n command+=[\"--threads\", str(args.cores)]\n if args.max_memory:\n command+=[\"--memory\", str(args.max_memory)]\n # try:\n print (\"Building contigs........\")\n with open(os.path.join(output_folder, \"%s_spades_screen_out.txt\" %id_sample), 'w') as log_err_file:\n p = subprocess.Popen(command, stdout=log_err_file)\n p.communicate()\n p.wait()\n print((\"%s assembled. Statistics are printed into %s\" %(id_sample, os.path.join(output_folder, \"%s_spades_screen_out.txt\" %id_sample))))\n # except:\n # print((\"Could not assemble %s\" %id_sample))\n\ndef get_trinity_stats(sample_output_folder,sample_id,sample_contig_count_dict):\n print((\"Extracting statistics for %s\" %str(sample_id)))\n contig_file = \"%s/Trinity.fasta\" %sample_output_folder\n new_contig_file = \"%s/Trinity_formatted.fasta\" %sample_output_folder\n edit_trinity_headers(contig_file,new_contig_file)\n contig_count = count_contigs(new_contig_file)\n sample_contig_count_dict.setdefault(sample_id,contig_count)\n stats_df=pd.DataFrame.from_dict(sample_contig_count_dict, orient='index').reset_index()\n stats_df.columns = ['sample', 'total_contig_count']\n print(('#'*50))\n print(stats_df)\n return(stats_df)\n\ndef edit_trinity_headers(contig_file,new_contig_file):\n fasta = open(contig_file,'r')\n new_fasta = open(new_contig_file,'w')\n counter = 0\n for line in fasta:\n if line.startswith('>'):\n readcount = int(re.sub(r'.*len=([0-9]*).*','\\\\1',line).strip())\n new_header = '>%i %i XXX\\n' %(counter,readcount)\n counter += 1\n new_fasta.write(new_header)\n else:\n new_fasta.write(line)\n new_fasta.close()\n\ndef count_contigs(contig_file):\n \"\"\"Return a count of contigs from a fasta file\"\"\"\n return sum([1 for line in open(contig_file, 'r').readlines() if line.startswith('>')])\n\ndef remove_short_contigs(contig_file,min_length):\n fasta_content = list(SeqIO.parse(open(contig_file),'fasta'))\n new_fasta_content = []\n for record in fasta_content:\n contig_length = len(str(record.seq))\n if contig_length < min_length:\n pass\n else:\n new_fasta_content.append(record)\n SeqIO.write(new_fasta_content,contig_file, 'fasta-2line')\n \n\ndef get_stats_abyss(sample_output_folder,sample_id,sample_contig_count_dict):\n #contig_count_cmd = subprocess.Popen([\"tail\", \"-n\", \"2\", \"%s/%s.fa\" %('/'.join(sample_output_folder.split('/')[:-2]),sample_id)], stdout=subprocess.PIPE)\n #contig_count_pre = contig_count_cmd.communicate()[0]\n contig_file = \"%s/%s.fa\" %('/'.join(sample_output_folder.split('/')[:-2]),sample_id)\n contig_count = count_contigs(contig_file)\n #contig_count = contig_count_pre.split(' ')[0].replace('>','')\n sample_contig_count_dict.setdefault(sample_id,contig_count)\n stats_df=pd.DataFrame.from_dict(sample_contig_count_dict, orient='index').reset_index()\n stats_df.columns = ['sample', 'total_contig_count']\n print(('#'*50))\n print(stats_df)\n return(stats_df,contig_file)\n #contig_count, header, percent, sequence = contig_count_pre.split(\"\\t\") \n\ndef get_stats_spades(contig_file,sample_id,sample_contig_count_dict):\n #contig_count_cmd = subprocess.Popen([\"tail\", \"-n\", \"2\", \"%s/%s.fa\" %('/'.join(sample_output_folder.split('/')[:-2]),sample_id)], stdout=subprocess.PIPE)\n #contig_count_pre = contig_count_cmd.communicate()[0]\n contig_count = count_contigs(contig_file)\n #contig_count = contig_count_pre.split(' ')[0].replace('>','')\n sample_contig_count_dict.setdefault(sample_id,contig_count)\n stats_df=pd.DataFrame.from_dict(sample_contig_count_dict, orient='index').reset_index()\n stats_df.columns = ['sample', 'total_contig_count']\n print(('#'*50))\n print(stats_df)\n return(stats_df,contig_file)\n #contig_count, header, percent, sequence = contig_count_pre.split(\"\\t\") \n\ndef cleanup_trinity_assembly_folder(sample_output_folder, sample_id):\n# This function is copied (and slightly modified) from phyluce, written by Brant Faircloth\n print((\"Removing unnecessary files from the Trinity folder for %s\" %sample_id))\n files = glob.glob(os.path.join(sample_output_folder, '*'))\n # check the names to make sure we're not deleting something improperly\n names = [os.path.basename(f) for f in files]\n try:\n assert \"Trinity.fasta\" in names\n assert \"%s_trinity_screen_out.txt\" %sample_id in names\n except:\n raise IOError(\"Neither Trinity.fasta nor %s_trinity_screen_out.txt were found in output.\" %sample_id)\n for file in files:\n if not os.path.basename(file) in (\"Trinity.fasta\",\"Trinity_formatted.fasta\", \"%s_trinity_screen_out.txt\" %sample_id, \"%s_stats.txt\" %sample_id):\n if os.path.isfile(file) or os.path.islink(file):\n os.remove(file)\n elif os.path.isdir(file):\n shutil.rmtree(file)\n\n\ndef main(args):\n # Set working directory\n out_folder = args.output\n out_dir = \"%s/stats\" %out_folder\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n # Get all the other input variables\n input_folder = args.input\n min_length = args.contig_length\n #trinity = args.trinity\n cores = args.cores\n assembler = args.assembler\n\n if args.kmer:\n kmer = str(args.kmer)\n else:\n if assembler == 'spades':\n kmer = '21,33,55,77,99,127'\n else:\n kmer = 35\n if args.max_memory:\n max_memory = args.max_memory\n else:\n max_memory = None\n #home_dir = os.getcwd()\n sample_contig_count_dict = {}\n if cores > 1:\n print((\"Running %s parallel on %d cores\" %(assembler, cores)))\n for subfolder, dirs, files in os.walk(input_folder):\n subfolder_path_elements = re.split(\"%s/\" %input_folder, subfolder)\n if subfolder_path_elements[-1] != input_folder:\n sample_folder = subfolder_path_elements[-1]\n sample_id = re.split(\"_clean\", sample_folder)[0]\n # Loop through each sample-folder and find read-files\n sample_output_folder = \"%s/%s\" %(out_dir, sample_id)\n if assembler == \"trinity\":\n sample_output_folder = '%s_trinity'%sample_output_folder \n if not os.path.exists(sample_output_folder):\n os.makedirs(sample_output_folder)\n for misc1, misc2, fastq in os.walk(subfolder):\n forward = \"\"\n backward = \"\"\n single_f = \"\"\n single_b = \"\"\n for element in fastq:\n if sample_id in element and element.endswith(\"READ1.fastq\"):\n forward = \"%s/%s\" %(subfolder,element)\n if sample_id in element and element.endswith(\"READ2.fastq\"):\n backward = \"%s/%s\" %(subfolder,element)\n if sample_id in element and element.endswith(\"READ1-single.fastq\"):\n single_f = \"%s/%s\" %(subfolder,element)\n if sample_id in element and element.endswith(\"READ2-single.fastq\"):\n single_b = \"%s/%s\" %(subfolder,element)\n if forward != \"\" and backward != \"\":\n print(('#' * 50))\n print((\"Processing sample %s\" %sample_id))\n start = time.time()\n if assembler == \"trinity\":\n assembly_trinity(forward,backward,sample_output_folder,sample_id,cores,min_length,max_memory)\n contig_count_df = get_trinity_stats(sample_output_folder,sample_id,sample_contig_count_dict)\n cleanup_trinity_assembly_folder(sample_output_folder,sample_id)\n print((\"#\" * 50))\n mv_cmd = \"mv %s/Trinity_formatted.fasta %s/%s.fasta\" %(sample_output_folder,out_folder,sample_id)\n os.system(mv_cmd)\n elif assembler == \"abyss\":\n assembly_abyss(forward,backward,single_f,single_b,sample_output_folder,sample_id,kmer,cores,args)\n files = glob.glob(os.path.join(sample_output_folder,'*'))\n links = [f for f in files if os.path.islink(f)]\n for l in links:\n if l.endswith(\"-contigs.fa\"):\n contig_file = os.path.realpath(l)\n mv_contig = \"mv %s %s/../../%s.fa\" %(contig_file,sample_output_folder,sample_id)\n os.system(mv_contig)\n #mv_cmd1 = \"mv %s/%s* %s\" %(home_dir,sample_id,sample_output_folder)\n #os.system(mv_cmd1)\n #mv_cmd2 = \"mv %s/coverage.hist %s\" %(home_dir,sample_output_folder)\n #os.system(mv_cmd2)\n contig_count_df,contig_file = get_stats_abyss(sample_output_folder,sample_id,sample_contig_count_dict)\n remove_short_contigs(contig_file,min_length)\n elif assembler == 'spades':\n assembly_spades(forward,backward,single_f,single_b,sample_output_folder,sample_id,kmer,cores,max_memory,args)\n contig_file = os.path.join(sample_output_folder,'contigs.fasta')\n new_contig_file = '%s/../../%s.fa'%(sample_output_folder,sample_id)\n mv_contig = \"cp %s %s\" %(contig_file,new_contig_file)\n os.system(mv_contig)\n contig_count_df,contig_file = get_stats_spades(new_contig_file,sample_id,sample_contig_count_dict)\n remove_short_contigs(new_contig_file,min_length)\n end = time.time()\n print('Assembled contigs for sample %s in %i minutes' %(sample_id,int(np.round((end-start)/60))))\n else:\n print((\"Error: Read-files for sample %s could not be found.Please check if fastq file names end with 'READ1.fastq' and 'READ2.fastq' respectively and if all files are unzipped.\" %sample_id))\n raise SystemExit\n\n try:\n previous_stats_df = pd.read_csv(os.path.join(input_folder,'sample_stats.txt'),sep='\\t')\n counter = 0\n for index,row in previous_stats_df.iterrows():\n sample_name = str(row['sample'])\n if sample_name in list(contig_count_df['sample']):\n new_info = contig_count_df[contig_count_df['sample']==sample_name]['total_contig_count']\n new_value = new_info.values[0]\n new_name = new_info.name\n headers = np.array(row.index)\n old_values = row.values\n new_index = np.append(headers,new_name)\n new_values = np.append(old_values,new_value)\n if counter == 0:\n new_values_previous = new_values\n else:\n new_values_previous = np.vstack([new_values_previous, new_values])\n counter += 1\n new_stats_df = pd.DataFrame(data=new_values_previous,columns=new_index)\n\n except:\n print('No previous stats file found, creating new stats file.')\n new_stats_df = contig_count_df\n\n new_stats_df.to_csv(os.path.join(out_folder,'sample_stats.txt'),sep=\"\\t\",index=False)\n\n\n\n" ]
[ [ "numpy.array", "pandas.DataFrame.from_dict", "pandas.DataFrame", "numpy.round", "numpy.append", "numpy.vstack" ] ]
lstasiak/ml-django-app
[ "23547c7eaa3ef6a80ae9b39f2f84430c4e56280d" ]
[ "backend/apps/ml/income_classifier/random_forest.py" ]
[ "from pathlib import Path\n\nimport joblib\nimport pandas as pd\n\n\nclass RandomForestClassifier:\n def __init__(self):\n path_to_artifacts = \"./research/\"\n self.values_fill_missing = joblib.load(path_to_artifacts + \"train_mode.joblib\")\n self.encoders = joblib.load(path_to_artifacts + \"encoders.joblib\")\n self.model = joblib.load(path_to_artifacts + \"random_forest.joblib\")\n\n def preprocessing(self, input_data):\n # JSON to pandas DataFrame\n input_data = pd.DataFrame(input_data, index=[0])\n # fill missing values\n input_data.fillna(self.values_fill_missing)\n # convert categoricals\n for column in [\n \"workclass\",\n \"education\",\n \"marital-status\",\n \"occupation\",\n \"relationship\",\n \"race\",\n \"sex\",\n \"native-country\",\n ]:\n categorical_convert = self.encoders[column]\n input_data[column] = categorical_convert.transform(input_data[column])\n\n return input_data\n\n def predict(self, input_data):\n return self.model.predict_proba(input_data)\n\n def postprocessing(self, input_data):\n label = \"<=50K\"\n if input_data[1] > 0.5:\n label = \">50K\"\n return {\"probability\": input_data[1], \"label\": label, \"status\": \"OK\"}\n\n def compute_prediction(self, input_data):\n try:\n input_data = self.preprocessing(input_data)\n prediction = self.predict(input_data)[0] # only one sample\n prediction = self.postprocessing(prediction)\n except Exception as e:\n return {\"status\": \"Error\", \"message\": str(e)}\n\n return prediction\n" ]
[ [ "pandas.DataFrame" ] ]
psaks/auto-sklearn
[ "e21047aa7b52e762a58992e33ffcebb420586e67" ]
[ "test/test_pipeline/test_regression.py" ]
[ "import copy\nimport itertools\nimport resource\nimport sys\nimport tempfile\nimport traceback\nimport unittest\nimport unittest.mock\n\nfrom joblib import Memory\nimport numpy as np\nimport sklearn.datasets\nimport sklearn.decomposition\nfrom sklearn.base import clone\nimport sklearn.ensemble\nimport sklearn.svm\nfrom sklearn.utils.validation import check_is_fitted\n\nfrom ConfigSpace.configuration_space import ConfigurationSpace\nfrom ConfigSpace.hyperparameters import CategoricalHyperparameter\n\nfrom autosklearn.pipeline.regression import SimpleRegressionPipeline\nfrom autosklearn.pipeline.components.base import \\\n AutoSklearnPreprocessingAlgorithm, AutoSklearnRegressionAlgorithm\nimport autosklearn.pipeline.components.regression as regression_components\nfrom autosklearn.pipeline.components.base import AutoSklearnComponent, AutoSklearnChoice\nimport autosklearn.pipeline.components.feature_preprocessing as preprocessing_components\nfrom autosklearn.pipeline.util import get_dataset\nfrom autosklearn.pipeline.constants import SPARSE, DENSE, SIGNED_DATA, UNSIGNED_DATA, PREDICTIONS\n\n\nclass SimpleRegressionPipelineTest(unittest.TestCase):\n _multiprocess_can_split_ = True\n\n def test_io_dict(self):\n regressors = regression_components._regressors\n for r in regressors:\n if regressors[r] == regression_components.RegressorChoice:\n continue\n props = regressors[r].get_properties()\n self.assertIn('input', props)\n self.assertIn('output', props)\n inp = props['input']\n output = props['output']\n\n self.assertIsInstance(inp, tuple)\n self.assertIsInstance(output, tuple)\n for i in inp:\n self.assertIn(i, (SPARSE, DENSE, SIGNED_DATA, UNSIGNED_DATA))\n self.assertEqual(output, (PREDICTIONS,))\n self.assertIn('handles_regression', props)\n self.assertTrue(props['handles_regression'])\n self.assertIn('handles_classification', props)\n self.assertIn('handles_multiclass', props)\n self.assertIn('handles_multilabel', props)\n self.assertIn('handles_multioutput', props)\n self.assertFalse(props['handles_classification'])\n self.assertFalse(props['handles_multiclass'])\n self.assertFalse(props['handles_multilabel'])\n\n def test_find_regressors(self):\n regressors = regression_components._regressors\n self.assertGreaterEqual(len(regressors), 1)\n for key in regressors:\n if hasattr(regressors[key], 'get_components'):\n continue\n self.assertIn(AutoSklearnRegressionAlgorithm, regressors[key].__bases__)\n\n def test_find_preprocessors(self):\n preprocessors = preprocessing_components._preprocessors\n self.assertGreaterEqual(len(preprocessors), 1)\n for key in preprocessors:\n if hasattr(preprocessors[key], 'get_components'):\n continue\n self.assertIn(AutoSklearnPreprocessingAlgorithm, preprocessors[key].__bases__)\n\n def test_configurations(self):\n cs = SimpleRegressionPipeline().get_hyperparameter_search_space()\n\n self._test_configurations(cs)\n\n def test_configurations_signed_data(self):\n dataset_properties = {'signed': True}\n cs = SimpleRegressionPipeline(dataset_properties=dataset_properties).\\\n get_hyperparameter_search_space()\n\n self._test_configurations(configurations_space=cs,\n dataset_properties=dataset_properties)\n\n def test_configurations_sparse(self):\n dataset_properties = {'sparse': True}\n cs = SimpleRegressionPipeline(\n dataset_properties=dataset_properties\n ).get_hyperparameter_search_space()\n\n self._test_configurations(cs, make_sparse=True,\n dataset_properties=dataset_properties)\n\n def test_multioutput(self):\n cache = Memory(location=tempfile.gettempdir())\n cached_func = cache.cache(\n sklearn.datasets.make_regression\n )\n X, Y = cached_func(\n n_samples=250,\n n_features=20,\n n_informative=9,\n n_targets=4,\n bias=0.5,\n effective_rank=10,\n tail_strength=0.4,\n noise=0.3,\n shuffle=True,\n coef=False,\n random_state=1\n )\n X_train = X[:200, :]\n Y_train = Y[:200, :]\n X_test = X[200:, :]\n Y_test = Y[200:, :]\n\n data = {'X_train': X_train, 'Y_train': Y_train,\n 'X_test': X_test, 'Y_test': Y_test}\n\n dataset_properties = {'multioutput': True}\n cs = SimpleRegressionPipeline(dataset_properties=dataset_properties).\\\n get_hyperparameter_search_space()\n self._test_configurations(cs, data=data,\n dataset_properties=dataset_properties)\n\n def _test_configurations(self, configurations_space, make_sparse=False,\n data=None, dataset_properties=None):\n # Use a limit of ~4GiB\n limit = 3072 * 1024 * 1024\n resource.setrlimit(resource.RLIMIT_AS, (limit, limit))\n\n configurations_space.seed(1)\n\n for i in range(10):\n config = configurations_space.sample_configuration()\n config._populate_values()\n\n # Restrict configurations which could take too long on travis-ci\n restrictions = {'regressor:adaboost:n_estimators': 50,\n 'regressor:adaboost:max_depth': 1,\n 'feature_preprocessor:kernel_pca:n_components': 10,\n 'feature_preprocessor:kitchen_sinks:n_components': 50,\n 'regressor:libsvm_svc:degree': 2,\n 'regressor:libsvm_svr:degree': 2,\n 'regressor:libsvm_svr:C': 1.,\n 'feature_preprocessor:truncatedSVD:target_dim': 10,\n 'feature_preprocessor:polynomial:degree': 2,\n 'regressor:lda:n_components': 10}\n\n for restrict_parameter in restrictions:\n restrict_to = restrictions[restrict_parameter]\n if restrict_parameter in config and config[restrict_parameter] is not None:\n config._values[restrict_parameter] = restrict_to\n\n if data is None:\n X_train, Y_train, X_test, Y_test = get_dataset(\n dataset='boston', make_sparse=make_sparse, add_NaNs=True)\n else:\n X_train = data['X_train'].copy()\n Y_train = data['Y_train'].copy()\n X_test = data['X_test'].copy()\n data['Y_test'].copy()\n\n cls = SimpleRegressionPipeline(\n random_state=1,\n dataset_properties=dataset_properties\n )\n cls.set_hyperparameters(config)\n\n # First make sure that for this configuration, setting the parameters\n # does not mistakenly set the estimator as fitted\n for name, step in cls.named_steps.items():\n with self.assertRaisesRegex(sklearn.exceptions.NotFittedError,\n \"instance is not fitted yet\"):\n check_is_fitted(step)\n\n try:\n cls.fit(X_train, Y_train)\n # After fit, all components should be tagged as fitted\n # by sklearn. Check is fitted raises an exception if that\n # is not the case\n try:\n for name, step in cls.named_steps.items():\n check_is_fitted(step)\n except sklearn.exceptions.NotFittedError:\n self.fail(\"config={} raised NotFittedError unexpectedly!\".format(\n config\n ))\n\n cls.predict(X_test)\n except MemoryError:\n continue\n except np.linalg.LinAlgError:\n continue\n except ValueError as e:\n if \"Floating-point under-/overflow occurred at epoch\" in \\\n e.args[0]:\n continue\n elif \"removed all features\" in e.args[0]:\n continue\n elif \"all features are discarded\" in e.args[0]:\n continue\n elif \"Numerical problems in QDA\" in e.args[0]:\n continue\n elif 'Bug in scikit-learn' in e.args[0]:\n continue\n elif 'The condensed distance matrix must contain only finite ' \\\n 'values.' in e.args[0]:\n continue\n else:\n print(config)\n print(traceback.format_exc())\n raise e\n except RuntimeWarning as e:\n if \"invalid value encountered in sqrt\" in e.args[0]:\n continue\n elif \"divide by zero encountered in\" in e.args[0]:\n continue\n elif \"invalid value encountered in divide\" in e.args[0]:\n continue\n elif \"invalid value encountered in true_divide\" in e.args[0]:\n continue\n elif \"invalid value encountered in multiply\" in e.args[0]:\n continue\n else:\n print(config)\n traceback.print_tb(sys.exc_info()[2])\n raise e\n except UserWarning as e:\n if \"FastICA did not converge\" in e.args[0]:\n continue\n else:\n print(config)\n traceback.print_tb(sys.exc_info()[2])\n raise e\n except Exception as e:\n if \"Multiple input features cannot have the same target value\" in e.args[0]:\n continue\n else:\n print(config)\n traceback.print_tb(sys.exc_info()[2])\n raise e\n\n def test_default_configuration(self):\n for i in range(2):\n X_train, Y_train, X_test, Y_test = get_dataset(dataset='diabetes')\n auto = SimpleRegressionPipeline(random_state=1)\n auto = auto.fit(X_train, Y_train)\n predictions = auto.predict(copy.deepcopy(X_test))\n # The lower the worse\n r2_score = sklearn.metrics.r2_score(Y_test, predictions)\n self.assertAlmostEqual(0.3458397471855429, r2_score, places=2)\n model_score = auto.score(copy.deepcopy(X_test), Y_test)\n self.assertAlmostEqual(model_score, r2_score, places=5)\n\n def test_default_configuration_iterative_fit(self):\n regressor = SimpleRegressionPipeline(\n random_state=1,\n include={\n 'regressor': ['random_forest'],\n 'feature_preprocessor': ['no_preprocessing']\n }\n )\n X_train, Y_train, X_test, Y_test = get_dataset(dataset='boston')\n regressor.fit_transformer(X_train, Y_train)\n for i in range(1, 11):\n regressor.iterative_fit(X_train, Y_train)\n self.assertEqual(regressor.steps[-1][-1].choice.estimator.n_estimators,\n i)\n\n def test_repr(self):\n representation = repr(SimpleRegressionPipeline())\n cls = eval(representation)\n self.assertIsInstance(cls, SimpleRegressionPipeline)\n\n def test_get_hyperparameter_search_space(self):\n cs = SimpleRegressionPipeline().get_hyperparameter_search_space()\n self.assertIsInstance(cs, ConfigurationSpace)\n conditions = cs.get_conditions()\n hyperparameters = cs.get_hyperparameters()\n forbiddens = cs.get_forbiddens()\n self.assertEqual(156, len(hyperparameters))\n self.assertEqual(len(hyperparameters) - 3, len(conditions))\n self.assertEqual(len(forbiddens), 35)\n\n def test_get_hyperparameter_search_space_include_exclude_models(self):\n regressor = SimpleRegressionPipeline(\n include={'regressor': ['random_forest']}\n )\n cs = regressor.get_hyperparameter_search_space()\n self.assertEqual(\n cs.get_hyperparameter('regressor:__choice__'),\n CategoricalHyperparameter('regressor:__choice__', ['random_forest']),\n )\n\n # TODO add this test when more than one regressor is present\n regressor = SimpleRegressionPipeline(\n exclude={'regressor': ['random_forest']}\n )\n cs = regressor.get_hyperparameter_search_space()\n self.assertNotIn('random_forest', str(cs))\n\n regressor = SimpleRegressionPipeline(\n include={'feature_preprocessor': ['pca']}\n )\n cs = regressor.get_hyperparameter_search_space()\n self.assertEqual(cs.get_hyperparameter(\n 'feature_preprocessor:__choice__'),\n CategoricalHyperparameter('feature_preprocessor:__choice__', ['pca']))\n\n regressor = SimpleRegressionPipeline(\n exclude={'feature_preprocessor': ['no_preprocessing']}\n )\n cs = regressor.get_hyperparameter_search_space()\n self.assertNotIn('no_preprocessing', str(cs))\n\n def test_get_hyperparameter_search_space_preprocessor_contradicts_default_classifier(\n self\n ):\n regressor = SimpleRegressionPipeline(\n include={'feature_preprocessor': ['densifier']},\n dataset_properties={'sparse': True}\n )\n cs = regressor.get_hyperparameter_search_space()\n self.assertEqual(\n cs.get_hyperparameter('regressor:__choice__').default_value,\n 'gradient_boosting'\n )\n\n regressor = SimpleRegressionPipeline(\n include={'feature_preprocessor': ['nystroem_sampler']}\n )\n cs = regressor.get_hyperparameter_search_space()\n self.assertEqual(\n cs.get_hyperparameter('regressor:__choice__').default_value,\n 'sgd'\n )\n\n def test_get_hyperparameter_search_space_only_forbidden_combinations(self):\n self.assertRaisesRegex(\n ValueError,\n \"Cannot find a legal default configuration.\",\n SimpleRegressionPipeline,\n include={\n 'regressor': ['random_forest'],\n 'feature_preprocessor': ['kitchen_sinks']\n }\n )\n\n # It must also be catched that no classifiers which can handle sparse\n # data are located behind the densifier\n self.assertRaisesRegex(\n ValueError,\n \"Cannot find a legal default configuration\",\n SimpleRegressionPipeline,\n include={\n 'regressor': ['extra_trees'],\n 'feature_preprocessor': ['densifier']\n },\n dataset_properties={'sparse': True}\n )\n\n @unittest.skip(\"test_get_hyperparameter_search_space_dataset_properties\" +\n \" Not yet Implemented\")\n def test_get_hyperparameter_search_space_dataset_properties(self):\n # TODO: We do not have any dataset properties for regression, so this\n # test is somewhat stupid\n pass\n \"\"\"\n full_cs = SimpleRegressionPipeline.get_hyperparameter_search_space()\n cs_mc = SimpleRegressionPipeline.get_hyperparameter_search_space()\n self.assertEqual(full_cs, cs_mc)\n\n cs_ml = SimpleRegressionPipeline.get_hyperparameter_search_space()\n self.assertNotIn('k_nearest_neighbors', str(cs_ml))\n self.assertNotIn('liblinear', str(cs_ml))\n self.assertNotIn('libsvm_svc', str(cs_ml))\n self.assertNotIn('sgd', str(cs_ml))\n\n cs_sp = SimpleRegressionPipeline.get_hyperparameter_search_space(\n sparse=True)\n self.assertNotIn('extra_trees', str(cs_sp))\n self.assertNotIn('gradient_boosting', str(cs_sp))\n self.assertNotIn('random_forest', str(cs_sp))\n\n cs_mc_ml = SimpleRegressionPipeline.get_hyperparameter_search_space()\n self.assertEqual(cs_ml, cs_mc_ml)\n\n self.assertRaisesRegex(ValueError,\n \"No regressor to build a configuration space \"\n \"for...\", SimpleRegressionPipeline.\n get_hyperparameter_search_space,\n multiclass=True, multilabel=True, sparse=True)\n \"\"\"\n\n def test_predict_batched(self):\n include = {'regressor': ['decision_tree']}\n cs = SimpleRegressionPipeline(include=include).get_hyperparameter_search_space()\n default = cs.get_default_configuration()\n regressor = SimpleRegressionPipeline(\n config=default,\n random_state=1,\n include=include\n )\n\n X_train, Y_train, X_test, Y_test = get_dataset(dataset='boston')\n regressor.fit(X_train, Y_train)\n X_test_ = X_test.copy()\n prediction_ = regressor.predict(X_test_)\n mock_predict = unittest.mock.Mock(wraps=regressor.steps[-1][-1].predict)\n regressor.steps[-1][-1].predict = mock_predict\n prediction = regressor.predict(X_test, batch_size=20)\n self.assertEqual((356,), prediction.shape)\n self.assertEqual(18, mock_predict.call_count)\n np.testing.assert_array_almost_equal(prediction_, prediction)\n\n def test_predict_batched_sparse(self):\n dataset_properties = {'sparse': True}\n include = {'regressor': ['decision_tree']}\n\n cs = SimpleRegressionPipeline(\n dataset_properties=dataset_properties,\n include=include\n ).get_hyperparameter_search_space()\n\n default = cs.get_default_configuration()\n regressor = SimpleRegressionPipeline(\n config=default,\n random_state=1,\n dataset_properties=dataset_properties,\n include=include\n )\n\n X_train, Y_train, X_test, Y_test = get_dataset(dataset='boston',\n make_sparse=True)\n regressor.fit(X_train, Y_train)\n X_test_ = X_test.copy()\n prediction_ = regressor.predict(X_test_)\n mock_predict = unittest.mock.Mock(wraps=regressor.steps[-1][-1].predict)\n regressor.steps[-1][-1].predict = mock_predict\n prediction = regressor.predict(X_test, batch_size=20)\n self.assertEqual((356,), prediction.shape)\n self.assertEqual(18, mock_predict.call_count)\n np.testing.assert_array_almost_equal(prediction_, prediction)\n\n @unittest.skip(\"test_check_random_state Not yet Implemented\")\n def test_check_random_state(self):\n raise NotImplementedError()\n\n @unittest.skip(\"test_validate_input_X Not yet Implemented\")\n def test_validate_input_X(self):\n raise NotImplementedError()\n\n @unittest.skip(\"test_validate_input_Y Not yet Implemented\")\n def test_validate_input_Y(self):\n raise NotImplementedError()\n\n def test_pipeline_clonability(self):\n X_train, Y_train, X_test, Y_test = get_dataset(dataset='boston')\n auto = SimpleRegressionPipeline(random_state=1)\n auto = auto.fit(X_train, Y_train)\n auto_clone = clone(auto)\n auto_clone_params = auto_clone.get_params()\n\n # Make sure all keys are copied properly\n for k, v in auto.get_params().items():\n self.assertIn(k, auto_clone_params)\n\n # Make sure the params getter of estimator are honored\n klass = auto.__class__\n new_object_params = auto.get_params(deep=False)\n for name, param in new_object_params.items():\n new_object_params[name] = clone(param, safe=False)\n new_object = klass(**new_object_params)\n params_set = new_object.get_params(deep=False)\n\n for name in new_object_params:\n param1 = new_object_params[name]\n param2 = params_set[name]\n self.assertEqual(param1, param2)\n\n def test_set_params(self):\n pass\n\n def test_get_params(self):\n pass\n\n def _test_set_hyperparameter_choice(self, expected_key, implementation, config_dict):\n \"\"\"\n Given a configuration in config, this procedure makes sure that\n the given implementation, which should be a Choice component, honors\n the type of the object, and any hyperparameter associated to it\n \"\"\"\n keys_checked = [expected_key]\n implementation_type = config_dict[expected_key]\n expected_type = implementation.get_components()[implementation_type]\n self.assertIsInstance(implementation.choice, expected_type)\n\n # Are there further hyperparams?\n # A choice component might have attribute requirements that we need to check\n expected_sub_key = expected_key.replace(':__choice__', ':') + implementation_type\n expected_attributes = {}\n if 'data_preprocessor:__choice__' in expected_key:\n # We have to check both the numerical and categorical\n to_check = {\n 'numerical_transformer': implementation.choice.numer_ppl.named_steps,\n 'categorical_transformer': implementation.choice.categ_ppl.named_steps,\n }\n\n for data_type, pipeline in to_check.items():\n for sub_name, sub_step in pipeline.items():\n # If it is a Choice, make sure it is the correct one!\n if isinstance(sub_step, AutoSklearnChoice):\n key = \"data_preprocessor:feature_type:{}:{}:__choice__\".format(\n data_type,\n sub_name\n )\n keys_checked.extend(\n self._test_set_hyperparameter_choice(\n key, sub_step, config_dict\n )\n )\n # If it is a component, make sure it has the correct hyperparams\n elif isinstance(sub_step, AutoSklearnComponent):\n keys_checked.extend(\n self._test_set_hyperparameter_component(\n \"data_preprocessor:feature_type:{}:{}\".format(\n data_type,\n sub_name\n ),\n sub_step, config_dict\n )\n )\n else:\n raise ValueError(\"New type of pipeline component!\")\n return keys_checked\n else:\n for key, value in config_dict.items():\n if key != expected_key and expected_sub_key in key:\n expected_attributes[key.split(':')[-1]] = value\n keys_checked.append(key)\n if expected_attributes:\n attributes = vars(implementation.choice)\n # Cannot check the whole dictionary, just names, as some\n # classes map the text hyperparameter directly to a function!\n for expected_attribute in expected_attributes.keys():\n self.assertIn(expected_attribute, attributes.keys())\n return keys_checked\n\n def _test_set_hyperparameter_component(self, expected_key, implementation, config_dict):\n \"\"\"\n Given a configuration in config, this procedure makes sure that\n the given implementation, which should be a autosklearn component, honors\n is created with the desired hyperparameters stated in config_dict\n \"\"\"\n keys_checked = []\n attributes = vars(implementation)\n expected_attributes = {}\n for key, value in config_dict.items():\n if expected_key in key:\n keys_checked.append(key)\n key = key.replace(expected_key + ':', '')\n if ':' in key:\n raise ValueError(\"This utility should only be called with a \"\n \"matching string that produces leaf configurations, \"\n \"that is no further colons are expected, yet key={}\"\n \"\".format(\n key\n )\n )\n expected_attributes[key] = value\n # Cannot check the whole dictionary, just names, as some\n # classes map the text hyperparameter directly to a function!\n for expected_attribute in expected_attributes.keys():\n self.assertIn(expected_attribute, attributes.keys())\n return keys_checked\n\n def test_set_hyperparameters_honors_configuration(self):\n \"\"\"Makes sure that a given configuration is honored in practice.\n\n This method tests that the set hyperparameters actually create objects\n that comply with the given configuration. It iterates trough the pipeline to\n make sure we did not miss a step, but also checks at the end that every\n configuration from Config was checked\n\n Also considers random_state and ensures pipeline steps correctly recieve\n the right random_state\n \"\"\"\n\n all_combinations = list(itertools.product([True, False], repeat=4))\n for sparse, multilabel, signed, multiclass, in all_combinations:\n dataset_properties = {\n 'sparse': sparse,\n 'multilabel': multilabel,\n 'multiclass': multiclass,\n 'signed': signed,\n }\n random_state = 1\n auto = SimpleRegressionPipeline(\n random_state=random_state,\n dataset_properties=dataset_properties,\n )\n cs = auto.get_hyperparameter_search_space()\n config = cs.sample_configuration()\n\n # Set hyperparameters takes a given config and translate\n # a config to an actual implementation\n auto.set_hyperparameters(config)\n config_dict = config.get_dictionary()\n\n # keys to check is our mechanism to ensure that every\n # every config key is checked\n keys_checked = []\n\n for name, step in auto.named_steps.items():\n if name == 'data_preprocessor':\n keys_checked.extend(\n self._test_set_hyperparameter_choice(\n 'data_preprocessor:__choice__', step, config_dict\n )\n )\n self.assertEqual(step.random_state, random_state)\n elif name == 'feature_preprocessor':\n keys_checked.extend(\n self._test_set_hyperparameter_choice(\n 'feature_preprocessor:__choice__', step, config_dict\n )\n )\n self.assertEqual(step.random_state, random_state)\n elif name == 'regressor':\n keys_checked.extend(\n self._test_set_hyperparameter_choice(\n 'regressor:__choice__', step, config_dict\n )\n )\n self.assertEqual(step.random_state, random_state)\n else:\n raise ValueError(\"Found another type of step! Need to update this check\"\n \" {}. \".format(name)\n )\n\n # Make sure we checked the whole configuration\n self.assertSetEqual(set(config_dict.keys()), set(keys_checked))\n" ]
[ [ "numpy.testing.assert_array_almost_equal", "sklearn.base.clone", "sklearn.utils.validation.check_is_fitted" ] ]
griffincalme/MicroDeconvolution
[ "447af89e4db9a9874a475cba4e58b6d9dee502c3" ]
[ "ScriptsUsedInPaper/IHCRandomWalkTimed.py" ]
[ "'''\nTo Do\nShow speed comparison between my hand annotation and\nthe computer output with 6600k @ 4.5 GHz\nand 6200U as well\nwith and without library speedup cg_mg\n\nfor human counting time for just a count and also time\nfor calculations of percent coverage. using pixels? or ratio of inches?\nhand annotate in paint\n\nChange title for publication\n\"Color Deconvolution and Random Walker Segmentation\nfor Automating Immunohistochemical Characterization in Breast Cancer\"\n\nDo bland-altman plot comparing mean and differences for computer vs human\n\nIf the regression of counts to patient outcome doesnt work explain why not\n\n'''\n\n\n\nimport numpy as np\nfrom numpy import linalg\nimport matplotlib.pyplot as plt\n\nfrom skimage.io import imread\nfrom skimage.color import separate_stains, rgb2grey\nfrom skimage.exposure import rescale_intensity\nfrom skimage.segmentation import random_walker\nfrom skimage import morphology\nfrom skimage.filters import sobel\n\n#from pyamg import *\nimport time\n\n\ndef RunScript():\n\n # Color deconvolution\n # Normalized optical density matrix\n # see Ruifrok AC, Johnston DA. Quantification of histological staining by color deconvolution.\n # R G B\n # X X X Hematoxylin(0)\n # X X X Red(1)\n # X X X DAB(2)\n # Hematoxylin(0), Red(1), DAB(2)\n rgb_from_hrd = np.array([[0.644, 0.710, 0.285],\n [0.0326, 0.873, 0.487],\n [0.270, 0.562, 0.781]])\n # conv_matrix\n hrd_from_rgb = linalg.inv(rgb_from_hrd)\n\n # Import picture\n #ihc_rgb = imread(r'TestImage.jpg')\n ihc_rgb = imread(r'TimedRunImage.jpg')\n\n # Rescale signals so that intensity ranges from 0 to 1\n # ihc_hrd[:, :, (0,1, or 2 -- is the color channel)]\n def stainspace_to_2d_array(ihc_xyz, channel):\n rescale = rescale_intensity(ihc_xyz[:, :, channel], out_range=(0, 1))\n stain_array = np.dstack((np.zeros_like(rescale), rescale, rescale))\n grey_array = rgb2grey(stain_array)\n\n return grey_array\n\n # Stain space conversion\n ihc_hrd = separate_stains(ihc_rgb, hrd_from_rgb)\n\n DAB_Grey_Array = stainspace_to_2d_array(ihc_hrd, 2)\n Hema_Gray_Array = stainspace_to_2d_array(ihc_hrd, 0)\n permRed_Gray_Array = stainspace_to_2d_array(ihc_hrd, 1)\n\n # Get markers for random walk\n def get_markers(grey_array, bottom_thresh, top_thresh):\n markers = np.zeros_like(grey_array)\n markers[grey_array < bottom_thresh] = 1\n markers[grey_array > top_thresh] = 2\n\n return markers\n\n # perform Random Walker, fills in positive regions\n DAB_segmentation = random_walker(DAB_Grey_Array, get_markers(DAB_Grey_Array, .3, .5), beta=130, mode='cg')\n Hema_segmentation = random_walker(Hema_Gray_Array, get_markers(Hema_Gray_Array, .2, .4), beta=130, mode='cg')\n permRed_segmentation = random_walker(permRed_Gray_Array, get_markers(permRed_Gray_Array, .4, .5), beta=130, mode='cg')\n\n \"\"\"PRINTING OUTPUT\"\"\"\n\n print(20 *'-')\n print(' ')\n\n '''Compute and Output'''\n # Compute and output percentages of pixels stained by each chromagen\n pic_dimensions = np.shape(DAB_segmentation) # both arrays same shape\n total_pixels = pic_dimensions[0] * pic_dimensions[1]\n\n # change negative pixel values from 1 -> 0, positives 2 -> 1\n subtrahend_array = np.ones_like(DAB_segmentation)\n DAB_segmentation = np.subtract(DAB_segmentation, subtrahend_array)\n Hema_segmentation = np.subtract(Hema_segmentation, subtrahend_array)\n permRed_segmentation = np.subtract(permRed_segmentation, subtrahend_array)\n\n # count positive pixels\n DAB_pixels = np.count_nonzero(DAB_segmentation)\n Hema_pixels = np.count_nonzero(Hema_segmentation)\n red_pixels = np.count_nonzero(permRed_segmentation)\n\n DAB_coverage_percent = (round((DAB_pixels / total_pixels * 100), 1))\n print(\"The percentage of the image covered by DAB is: \" + str(DAB_coverage_percent) + \"%\")\n\n Hema_coverage_percent = (round((Hema_pixels / total_pixels * 100), 1))\n print(\"The percentage of the image covered by Hematoxylin is: \" + str(Hema_coverage_percent) + \"%\")\n\n total_cell_array = np.add(DAB_segmentation, Hema_segmentation)\n total_cell_pixels = np.count_nonzero(total_cell_array)\n\n total_cell_percent = (round((total_cell_pixels / total_pixels * 100), 1))\n print(\"The percentage of the image covered by DAB & Hematoxylin is: \" + str(total_cell_percent) + \"%\")\n\n percent_pos_cells = (round((DAB_pixels / total_cell_pixels * 100), 1))\n print(\"The percentage of CD3+ cells out of the total number of cells is: \" + str(percent_pos_cells) + \"%\")\n\n PercentPos = percent_pos_cells\n\n \"\"\"\n if PercentPos >= 81:\n print('Proportion Score: 5')\n proportion_score = 5\n elif PercentPos >= 61:\n print('Proportion Score: 4')\n proportion_score = 4\n elif PercentPos >= 41:\n print('Proportion Score: 3')\n proportion_score = 3\n elif PercentPos >= 21:\n print('Proportion Score: 2')\n proportion_score = 2\n elif PercentPos >= 5:\n print('Proportion Score: 1')\n proportion_score = 1\n elif PercentPos >= 0:\n print('Proportion Score: 0')\n proportion_score = 0\n else:\n print('error, proportion score below zero?')\n proportion_score = -1\n \"\"\"\n\n # Cytokines\n print(\" \")\n\n Red_coverage_percent = (round((red_pixels / total_pixels * 100), 1))\n print(\"The percentage of the image covered by cytokines is: \" + str(Red_coverage_percent) + \"%\")\n\n red_plus_total_array = np.add(total_cell_array, permRed_segmentation)\n red_plus_total_pixels = np.count_nonzero(red_plus_total_array)\n\n adjusted_red_coverage_percent = (round((red_pixels / red_plus_total_pixels * 100), 1))\n\n print(\"The percentage of the area covered by cytokines, with non-cellular regions subtracted is: \" + str(\n adjusted_red_coverage_percent) + \"%\")\n\n \"\"\"PLOTTING IMAGES\"\"\"\n\n # Plot images\n fig, axes = plt.subplots(2, 2, figsize=(12, 11))\n # ax0 = axes.ravel()\n ax0, ax1, ax2, ax3 = axes.ravel()\n\n ax0.imshow(ihc_rgb, cmap=plt.cm.gray, interpolation='nearest')\n ax0.set_title(\"Original\")\n\n ax1.imshow(DAB_segmentation, cmap=plt.cm.gray, interpolation='nearest')\n ax1.set_title(\"DAB\")\n\n ax2.imshow(permRed_segmentation, cmap=plt.cm.gray)\n ax2.set_title(\"Permanent Red\")\n\n ax3.imshow(Hema_segmentation, cmap=plt.cm.gray)\n ax3.set_title(\"Hematoxylin\")\n\n for ax in axes.ravel():\n ax.axis('on')\n\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)\n\n #plt.show()\n\n\n\n\ndef timed_run(trials):\n summed_duration = 0\n min_duration = 9999999\n max_duration = 0\n\n for i in range(trials):\n start = time.clock()\n RunScript()\n end = time.clock()\n duration = end - start\n print('\\n This process took ' + str(round((duration), 2)) + ' seconds to complete')\n summed_duration = duration + summed_duration\n\n if max_duration < duration:\n max_duration = duration\n\n if min_duration > duration:\n min_duration = duration\n\n average_duration = summed_duration / trials\n print(20 * '-')\n print('\\nAverage runtime over ' + str(trials) + ' trials was ' + str(round(average_duration, 2)))\n\n print('Max duration: ' + str(round(max_duration,2)))\n print('Min duration: ' + str(round(min_duration, 2)))\n\ntimed_run(trials=3)\n" ]
[ [ "numpy.array", "numpy.ones_like", "numpy.count_nonzero", "numpy.add", "numpy.zeros_like", "numpy.shape", "matplotlib.pyplot.subplots", "numpy.subtract", "numpy.linalg.inv" ] ]
NivekNey/tensorflow
[ "3e21fe5faedab3a8258d344c8ad1cec2612a8aa8" ]
[ "tensorflow/python/framework/convert_to_constants_test.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for convert_to_constants.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.client import session\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import convert_to_constants\nfrom tensorflow.python.framework import importer\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model.load import load\nfrom tensorflow.python.saved_model.save import save\nfrom tensorflow.python.training.tracking import tracking\n\n\nclass VariablesToConstantsTest(test.TestCase):\n\n def _hasStatefulPartitionedCallOp(self, graph_def):\n \"\"\"Determines if a StatefulPartitionedCall op exists in the graph.\"\"\"\n for node in graph_def.node:\n if node.op == \"StatefulPartitionedCall\":\n return True\n return False\n\n def _getNumVariables(self, graph_def):\n \"\"\"Returns the number of ReadVariableOp in the graph.\"\"\"\n return sum(node.op == \"ReadVariableOp\" for node in graph_def.node)\n\n def _getTensors(self, sess, tensor_list):\n \"\"\"Returns a list of Tensor objects from the Session.\"\"\"\n return [\n sess.graph.get_tensor_by_name(tensor.name) for tensor in tensor_list\n ]\n\n def _evaluateGraphDef(self, graph_def, func, input_data):\n \"\"\"Evaluates the GraphDef using Sessions.\"\"\"\n with ops.Graph().as_default() as graph:\n importer.import_graph_def(graph_def, name=\"\")\n func.add_to_graph(graph)\n sess = session.Session(graph=graph)\n\n input_tensors = self._getTensors(sess, func.inputs)\n output_tensors = self._getTensors(sess, func.outputs)\n return sess.run(\n output_tensors, feed_dict=dict(zip(input_tensors, input_data)))\n\n @test_util.run_v2_only\n def testConstSavedModel(self):\n \"\"\"Test a basic model with functions to make sure functions are inlined.\"\"\"\n input_data = constant_op.constant(1., shape=[1])\n root = tracking.AutoTrackable()\n root.f = def_function.function(lambda x: 2. * x)\n to_save = root.f.get_concrete_function(input_data)\n\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n save(root, save_dir, to_save)\n saved_model = load(save_dir)\n concrete_func = saved_model.signatures[\"serving_default\"]\n\n variable_graph_def = concrete_func.graph.as_graph_def()\n self.assertEqual(0, self._getNumVariables(variable_graph_def))\n self.assertTrue(variable_graph_def.library.function)\n\n constant_graph_def = convert_to_constants.convert_variables_to_constants_v2(\n concrete_func)\n self.assertEqual(0, self._getNumVariables(constant_graph_def))\n self.assertFalse(constant_graph_def.library.function)\n\n # Check value.\n expected_value = root.f(input_data)\n actual_value = self._evaluateGraphDef(constant_graph_def, concrete_func,\n [input_data.numpy()])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n @test_util.run_v2_only\n def testVariableModel(self):\n \"\"\"Test a basic model with Variables.\"\"\"\n input_data = constant_op.constant(1., shape=[1])\n root = tracking.AutoTrackable()\n root.v1 = variables.Variable(3.)\n root.v2 = variables.Variable(2.)\n root.f = def_function.function(lambda x: root.v1 * root.v2 * x)\n concrete_func = root.f.get_concrete_function(input_data)\n\n variable_graph_def = concrete_func.graph.as_graph_def()\n self.assertEqual(2, self._getNumVariables(variable_graph_def))\n\n constant_graph_def = convert_to_constants.convert_variables_to_constants_v2(\n concrete_func)\n self.assertEqual(0, self._getNumVariables(constant_graph_def))\n self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))\n\n # Check value.\n expected_value = root.f(input_data)\n actual_value = self._evaluateGraphDef(constant_graph_def, concrete_func,\n [input_data.numpy()])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n @test_util.run_v2_only\n def testVariableSavedModel(self):\n \"\"\"Test a basic model with Variables with saving/loading the SavedModel.\"\"\"\n input_data = constant_op.constant(1., shape=[1])\n root = tracking.AutoTrackable()\n root.v1 = variables.Variable(3.)\n root.v2 = variables.Variable(2.)\n root.f = def_function.function(lambda x: root.v1 * root.v2 * x)\n to_save = root.f.get_concrete_function(input_data)\n\n save_dir = os.path.join(self.get_temp_dir(), \"saved_model\")\n save(root, save_dir, to_save)\n saved_model = load(save_dir)\n concrete_func = saved_model.signatures[\"serving_default\"]\n\n variable_graph_def = concrete_func.graph.as_graph_def()\n self.assertTrue(self._hasStatefulPartitionedCallOp(variable_graph_def))\n\n constant_graph_def = convert_to_constants.convert_variables_to_constants_v2(\n concrete_func)\n self.assertEqual(0, self._getNumVariables(constant_graph_def))\n self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))\n\n # Check value.\n expected_value = root.f(input_data)\n actual_value = self._evaluateGraphDef(constant_graph_def, concrete_func,\n [input_data.numpy()])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n @test_util.run_v2_only\n def testMultiFunctionModel(self):\n \"\"\"Test a basic model with Variables.\"\"\"\n\n class BasicModel(tracking.AutoTrackable):\n\n def __init__(self):\n self.y = None\n self.z = None\n\n @def_function.function\n def add(self, x):\n if self.y is None:\n self.y = variables.Variable(2.)\n return x + self.y\n\n @def_function.function\n def sub(self, x):\n if self.z is None:\n self.z = variables.Variable(3.)\n return x - self.z\n\n input_data = constant_op.constant(1., shape=[1])\n root = BasicModel()\n concrete_func = root.add.get_concrete_function(input_data)\n\n variable_graph_def = concrete_func.graph.as_graph_def()\n self.assertEqual(1, self._getNumVariables(variable_graph_def))\n\n constant_graph_def = convert_to_constants.convert_variables_to_constants_v2(\n concrete_func)\n self.assertEqual(0, self._getNumVariables(constant_graph_def))\n self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))\n\n # Check value.\n expected_value = root.add(input_data)\n actual_value = self._evaluateGraphDef(constant_graph_def, concrete_func,\n [input_data.numpy()])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n @test_util.run_v2_only\n def testKerasModel(self):\n input_data = constant_op.constant(1., shape=[1, 1])\n\n # Create a simple Keras model.\n x = [-1, 0, 1, 2, 3, 4]\n y = [-3, -1, 1, 3, 5, 7]\n\n model = keras.models.Sequential(\n [keras.layers.Dense(units=1, input_shape=[1])])\n model.compile(optimizer=\"sgd\", loss=\"mean_squared_error\")\n model.fit(x, y, epochs=1)\n\n # Get the concrete function from the Keras model.\n @def_function.function\n def to_save(x):\n return model(x)\n\n concrete_func = to_save.get_concrete_function(input_data)\n\n variable_graph_def = concrete_func.graph.as_graph_def()\n self.assertEqual(2, self._getNumVariables(variable_graph_def))\n\n constant_graph_def = convert_to_constants.convert_variables_to_constants_v2(\n concrete_func)\n self.assertEqual(0, self._getNumVariables(constant_graph_def))\n self.assertFalse(self._hasStatefulPartitionedCallOp(constant_graph_def))\n\n # Check value.\n expected_value = to_save(input_data)\n actual_value = self._evaluateGraphDef(constant_graph_def, concrete_func,\n [input_data.numpy()])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n\nif __name__ == \"__main__\":\n test.main()\n" ]
[ [ "tensorflow.python.training.tracking.tracking.AutoTrackable", "tensorflow.python.ops.variables.Variable", "tensorflow.python.framework.ops.Graph", "tensorflow.python.saved_model.save.save", "tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2", "tensorflow.python.eager.def_function.function", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.saved_model.load.load", "tensorflow.python.client.session.Session", "tensorflow.python.keras.layers.Dense", "tensorflow.python.platform.test.main", "tensorflow.python.framework.importer.import_graph_def" ] ]
thatch/BitSwanPump
[ "98a5b8d09f9b59d5361611cee0bd45e7b4c69e3f" ]
[ "bspump/matrix/namedmatrix.py" ]
[ "import logging\nimport os\nimport numpy as np\n\nimport asab\nfrom .utils.index import Index, PersistentIndex\nfrom .matrix import Matrix, PersistentMatrix\n\n###\n\nL = logging.getLogger(__name__)\n\n###\n\n\n\nclass NamedMatrix(Matrix):\n\n\tdef __init__(self, app, dtype='float_', id=None, config=None):\n\t\tsuper().__init__(app, dtype=dtype, id=id, config=config)\n\t\tself.PubSub = asab.PubSub(app)\n\n\n\tdef zeros(self):\n\t\tsuper().zeros()\n\t\tself.Index = Index()\n\n\n\tdef serialize(self):\n\t\tserialized = {}\n\t\tserialized['Index'] = self.Index.serialize()\n\t\tserialized['ClosedRows'] = self.ClosedRows.serialize()\n\t\tserialized['DType'] = self.DType\n\t\tserialized['Array'] = self.Array.tolist()\n\t\treturn serialized\n\n\n\tdef deserialize(self, data):\n\t\tself.Index.deserialize(data['Index'])\n\t\tself.ClosedRows.deserialize(data['ClosedRows'])\n\n\t\tif isinstance(data['DType'], str):\n\t\t\tself.DType = data['DType']\n\t\telse:\n\t\t\tself.DType = []\n\t\t\tfor member in data['DType']:\n\t\t\t\tself.DType.append(tuple(member))\n\n\t\tarray = []\n\t\tfor member in data['Array']:\n\t\t\tarray.append(tuple(member))\n\n\t\tself.Array = np.array(array, dtype=self.DType)\n\n\n\tdef _grow_rows(self, rows=1):\n\t\tsuper()._grow_rows(rows)\n\t\tself.Index.extend(self.Array.shape[0])\n\n\n\tdef flush(self):\n\t\t'''\n\t\tThe matrix will be recreated without rows from `ClosedRows`.\n\t\t'''\n\t\tclosed_indexes, saved_indexes = super().flush()\n\t\tself.Index.flush(closed_indexes)\n\t\treturn closed_indexes, saved_indexes\n\n\n\tdef add_row(self, row_name: str):\n\t\tassert(row_name is not None)\n\n\t\trow_index = super().add_row()\n\t\tself.Index.add_row(row_name, row_index)\n\t\tself.PubSub.publish(\"Matrix changed!\")\n\t\treturn row_index\n\n\n\tdef close_row(self, row_name, clear=True):\n\t\trow_index = self.Index.get_row_index(row_name)\n\t\tif row_index in self.ClosedRows:\n\t\t\treturn False\n\n\t\tif row_index is None:\n\t\t\treturn False\n\n\t\tself.Index.pop_index(row_index)\n\t\tself.PubSub.publish(\"Matrix changed!\")\n\n\t\tif clear:\n\t\t\tself.Array[row_index] = np.zeros(1, dtype=self.DType)\n\n\t\tself.ClosedRows.add(row_index)\n\t\tif (len(self.ClosedRows) >= self.MaxClosedRowsCapacity * self.Array.shape[0]):\n\t\t\tself.flush()\n\n\t\tcrc = len(self.ClosedRows)\n\t\tself.Gauge.set(\"rows.active\", self.Array.shape[0] - crc)\n\t\tself.Gauge.set(\"rows.closed\", crc)\n\t\treturn True\n\n\n\tdef close_rows(self, row_names, clear=True):\n\t\tfor name in row_names:\n\t\t\tself.close_row(name, clear=clear)\n\n\n\tdef get_row_index(self, row_name: str):\n\t\treturn self.Index.get_row_index(row_name)\n\n\n\tdef get_row_name(self, row_index: int):\n\t\treturn self.Index.get_row_name(row_index)\n\n\nclass PersistentNamedMatrix(PersistentMatrix):\n\n\tdef __init__(self, app, dtype='float_', id=None, config=None):\n\t\tsuper().__init__(app, dtype=dtype, id=id, config=config)\n\t\tself.PubSub = asab.PubSub(app)\n\n\tdef zeros(self):\n\t\tsuper().zeros()\n\t\tpath = os.path.join(self.Path, 'map.dat')\n\t\tself.Index = PersistentIndex(path, self.Array.shape[0])\n\n\tdef _grow_rows(self, rows=1):\n\t\tsuper()._grow_rows(rows)\n\t\tself.Index.extend(self.Array.shape[0])\n\n\n\tdef flush(self):\n\t\t'''\n\t\tThe matrix will be recreated without rows from `ClosedRows`.\n\t\t'''\n\t\tclosed_indexes, saved_indexes = super().flush()\n\t\tself.Index.flush(closed_indexes)\n\t\treturn closed_indexes, saved_indexes\n\n\n\tdef add_row(self, row_name: str):\n\t\tassert(row_name is not None)\n\n\t\trow_index = super().add_row()\n\t\tself.Index.add_row(row_name, row_index)\n\t\tself.PubSub.publish(\"Matrix changed!\")\n\t\treturn row_index\n\n\n\tdef close_row(self, row_name, clear=True):\n\t\trow_index = self.Index.get_row_index(row_name)\n\t\tif row_index in self.ClosedRows:\n\t\t\treturn False\n\n\t\tif row_index is None:\n\t\t\treturn False\n\n\t\tself.Index.pop_index(row_index)\n\t\tself.PubSub.publish(\"Matrix changed!\")\n\n\t\tif clear:\n\t\t\tself.Array[row_index] = np.zeros(1, dtype=self.DType)\n\n\t\tself.ClosedRows.add(row_index)\n\t\tif (len(self.ClosedRows) >= self.MaxClosedRowsCapacity * self.Array.shape[0]):\n\t\t\tself.flush()\n\n\t\tcrc = len(self.ClosedRows)\n\t\tself.Gauge.set(\"rows.active\", self.Array.shape[0] - crc)\n\t\tself.Gauge.set(\"rows.closed\", crc)\n\t\treturn True\n\n\n\tdef close_rows(self, row_names, clear=True):\n\t\tfor name in row_names:\n\t\t\tself.close_row(name, clear=clear)\n\n\n\tdef get_row_index(self, row_name: str):\n\t\treturn self.Index.get_row_index(row_name)\n\n\n\tdef get_row_name(self, row_index: int):\n\t\treturn self.Index.get_row_name(row_index)\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
TIan1874/PCA-Net
[ "fe4e4d00380ec477e6e6b28175f99750b58ddc47" ]
[ "main.py" ]
[ "import logging\nimport argparse\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nimport torch.backends.cudnn as cudnn\n\nfrom dataset import config, Dataset, collate_fn\nfrom utils import *\nfrom train import train, test\nfrom model import *\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2,3\"\ncudnn.benchmark = False\ndef init_seeds(seed=0):\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n if seed == 0:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='PCA-Net parameters')\n parser.add_argument('--dataset', metavar='DIR', default='bird', help='bird car aircraft')\n parser.add_argument('--lr', '--learning-rate', default=0.001, type=float, metavar='LR', help='initial learning rate')\n parser.add_argument('--model-name', default='resnet50', type=str, help='model name')\n parser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\n parser.add_argument('--epochs', default=300, type=int, metavar='N',\n help='number of total epochs to run')\n parser.add_argument('--decay-step', default=2, type=int, metavar='N',\n help='learning rate decay step')\n parser.add_argument('--gamma', default=0.9, type=float, metavar='M',\n help='gamma')\n parser.add_argument('-b', '--batch-size', default=16, type=int,\n metavar='N', help='mini-batch size (default: 16)')\n parser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint')\n parser.add_argument('--checkpoint-path', default='./checkpoint_bird', type=str, metavar='checkpoint_path',\n help='path to save checkpoint')\n\n args = parser.parse_args()\n return args\n\n\nargs = parse_args()\nprint(args)\ninit_seeds(seed=0)\nbest_acc1 = 0.\n\ntry:\n os.stat(args.checkpoint_path)\nexcept:\n os.makedirs(args.checkpoint_path)\n\nlogging.info(\"OPENING \" + args.checkpoint_path + '/results_train.csv')\nlogging.info(\"OPENING \" + args.checkpoint_path + '/results_test.csv')\n\nresults_train_file = open(args.checkpoint_path + '/results_train.csv', 'w')\nresults_train_file.write('epoch, train_acc,train_loss\\n')\nresults_train_file.flush()\n\nresults_test_file = open(args.checkpoint_path + '/results_test.csv', 'w')\nresults_test_file.write('epoch, test_acc,test_loss\\n')\nresults_test_file.flush()\n\n# dataset\ntrain_root, test_root, train_pd, test_pd, cls_num = config(data=args.dataset)\ndata_transforms = {\n 'train': transforms.Compose([\n transforms.Resize((512, 512)),\n transforms.RandomCrop((448, 448)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ]),\n 'test': transforms.Compose([\n transforms.Resize((512, 512)),\n transforms.CenterCrop((448, 448)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ]),\n}\ntrain_dataset = Dataset(train_root, train_pd, train=True, transform=data_transforms['train'], num_positive=1)\ntest_dataset = Dataset(test_root, test_pd, train=False, transform=data_transforms['test'])\n\ntrain_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4, collate_fn=collate_fn)\ntest_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4)\n\n\nmodel = resnet50(pretrained=True, use_bp=True)\nin_features = model.classifier.in_features\nmodel.classifier = torch.nn.Linear(in_features=in_features, out_features=cls_num)\n\nmodel = model.cuda()\nmodel = torch.nn.DataParallel(model)\n\n# feature center\nfeature_len = 512\ncenter_dict = {'center': torch.zeros(cls_num, feature_len * 32)}\ncenter = center_dict['center'].cuda()\n\ncriterion = torch.nn.CrossEntropyLoss()\ncriterion = criterion.cuda()\n\noptimizer = torch.optim.SGD(\n model.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-5)\ncudnn.benchmark = True\n\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.decay_step, gamma=args.gamma)\n\nif args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\nfor epoch in range(args.start_epoch, args.epochs):\n scheduler.step()\n for param_group in optimizer.param_groups:\n lr_val = float(param_group['lr'])\n print(\"Start epoch %d, lr=%f\" % (epoch, lr_val))\n\n train_acc, train_loss = train(train_loader, model, criterion, optimizer, center)\n logging.info('Iteration %d, train_acc = %.4f,train_loss = %.4f' % (epoch, train_acc, train_loss))\n results_train_file.write('%d, %.4f,%.4f\\n' % (epoch, train_acc, train_loss))\n results_train_file.flush()\n\n val_acc, val_loss = test(test_loader, model, criterion, center)\n is_best = val_acc > best_acc1\n best_acc1 = max(val_acc, best_acc1)\n logging.info('Iteration %d, test_acc = %.4f,test_loss = %.4f' % (epoch, val_acc, val_loss))\n results_test_file.write('%d, %.4f,%.4f\\n' % (epoch, val_acc, val_loss))\n results_test_file.flush()\n\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'best_acc1': best_acc1,\n 'optimizer': optimizer.state_dict(),\n 'center': center\n }, is_best, args.checkpoint_path)\n\n\n\n\n\n\n" ]
[ [ "torch.nn.Linear", "torch.zeros", "torch.cuda.manual_seed", "torch.optim.lr_scheduler.StepLR", "torch.cuda.manual_seed_all", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.load", "torch.nn.CrossEntropyLoss", "torch.nn.DataParallel" ] ]
raviriley/stocks_data
[ "a884bd5ce04dff7e8f0f38fe97e3a3b67ffbbb33" ]
[ "stocks_data/functions.py" ]
[ "import pandas as pd\nfrom stocks_data.stock import stock\n\ndef getCorrelatedPairsFromStocks(list_of_stocks, pearson_threshold):\n threshold = pearson_threshold\n dic_pearsons={}\n #stocks_tickers = []\n for i in range(len(list_of_stocks)):\n ticker = list_of_stocks[i].ticker\n #stocks_tickers.append(ticker)\n dic_pearsons[ticker] = list_of_stocks[i].percent_change\n # df=pd.DataFrame(dic_pearsons, columns = stocks_tickers)\n df=pd.DataFrame.from_dict(dic_pearsons, orient='index').transpose().dropna(1)\n corrMatrix=df.corr()\n best_pairs = []\n stocks_tickers = list(corrMatrix.columns)\n for i in range(len(stocks_tickers)):\n for j in range(len(stocks_tickers)):\n if (corrMatrix.iloc[i][j] >= threshold and stocks_tickers[i] != stocks_tickers[j] and [stocks_tickers[j], stocks_tickers[i], corrMatrix.iloc[i][j]] not in best_pairs):\n best_pairs.append([stocks_tickers[i], stocks_tickers[j], corrMatrix.iloc[i][j]])\n # for ticker in stocks_tickers:\n # for i in range(len(stocks_tickers)):\n # if (corrMatrix.iloc[i][ticker])>threshold and ticker!=stocks_tickers[i] and ([stocks_tickers[i],ticker] not in best_pairs):\n # best_pairs.append([ticker, stocks_tickers[i]])\n return best_pairs\n\ndef getCorrelatedPairsFromTickers(stocks_tickers, pearson_threshold):\n threshold = pearson_threshold\n dic_pearsons={}\n length_list = [] # number of datapoints in percent_change for each ticker\n #removed_keys = []\n for ticker in stocks_tickers:\n dic_pearsons[ticker]=stock(ticker).percent_change\n #length_list.append(len(dic_pearsons[ticker]))\n # mode_length = max(length_list, key=length_list.count)\n # for key in list(dic_pearsons):\n # if (len(dic_pearsons[key]) != mode_length):\n # remove = dic_pearsons.pop(key)\n # removed_keys.append(remove)\n # print(removed_keys)\n\n df=pd.DataFrame(dic_pearsons, columns = stocks_tickers)\n corrMatrix=df.corr()\n #sb.heatmap(corrMatrix, annot=True, cmap='RdBu_r')\n #plt.show()\n best_pairs = [] \n for ticker in stocks_tickers:\n for i in range(len(stocks_tickers)):\n if (corrMatrix.iloc[i][ticker])>threshold and ticker!=stocks_tickers[i] and ([stocks_tickers[i],ticker] not in best_pairs ):\n best_pairs.append([ticker, stocks_tickers[i]])\n return best_pairs\n\ndef getCorrelatedPairsFromCSV(csv_file, pearson_threshold):\n stocks_tickers = pd.read_csv(csv_file, delimiter=\",\")\n \n threshold = pearson_threshold\n dic_pearsons={}\n for ticker in stocks_tickers:\n dic_pearsons[ticker]=stock(ticker).percent_change\n df=pd.DataFrame(dic_pearsons, columns = stocks_tickers)\n corrMatrix=df.corr()\n #sb.heatmap(corrMatrix, annot=True, cmap='RdBu_r')\n #plt.show()\n best_pairs = [] \n for ticker in stocks_tickers:\n for i in range(len(stocks_tickers)):\n if (corrMatrix.iloc[i][ticker])>threshold and ticker!=stocks_tickers[i] and ([stocks_tickers[i],ticker] not in best_pairs ):\n best_pairs.append([ticker, stocks_tickers[i]])\n return best_pairs\n\n# test speed of making my own pearson \n\n# def timePeriod_to_start_end(time_period):\n# if (time_period == \"5y\"):\n# end = \"today\"\n# start = today - 5 years\n\n# def generateAllTickers():\n\n# def getMode(input_list):\n# mode_length = max(input_list, key=length_list.count)\n# print(mode_length)\n# # check percent_change (if stock IPOs after start date this should fix it)\n# for item in dic_pearsons:\n# if (len(item) != mode_length):\n# remove_values.append(dic_pearsons.pop(item))\n# # for i in range(len(length_list)):\n# # if (length_list[i] != mode_length):\n# # dic_pearsons remove i\n\n# def getIPO(ticker):\n# # get ipo\n# return (ticker + \"test\")\n\n\n# getValidTickers() #get \n\n\n\n\"\"\" JOHANSEN LIBRARY + METHOD \"\"\"\ndef johansenTest(pair, percent_confidence):\n import numpy as np\n from numpy.linalg import inv, eig, cholesky as chol\n from numpy import zeros, ones, flipud, log\n import statsmodels.tsa.tsatools as tsat\n from statsmodels.regression.linear_model import OLS\n\n\n tdiff = np.diff\n\n class Holder(object):\n pass\n\n def rows(x):\n return x.shape[0]\n\n def trimr(x, front, end):\n if end > 0:\n return x[front:-end]\n else:\n return x[front:]\n\n mlag = tsat.lagmat\n\n def mlag_(x, maxlag):\n '''return all lags up to maxlag\n '''\n return x[:-lag]\n\n def lag(x, lag):\n return x[:-lag]\n\n def detrend(y, order):\n if order == -1:\n return y\n return OLS(y, np.vander(np.linspace(-1, 1, len(y)), order + 1)).fit().resid\n\n def resid(y, x):\n r = y - np.dot(x, np.dot(np.linalg.pinv(x), y))\n return r\n\n def coint_johansen(x, p, k, print_on_console=True):\n # % error checking on inputs\n # if (nargin ~= 3)\n # error('Wrong # of inputs to johansen')\n # end\n nobs, m = x.shape\n\n # why this? f is detrend transformed series, p is detrend data\n if (p > -1):\n f = 0\n else:\n f = p\n\n x = detrend(x, p)\n dx = tdiff(x, 1, axis=0)\n # dx = trimr(dx,1,0)\n z = mlag(dx, k) # [k-1:]\n # print z.shape\n z = trimr(z, k, 0)\n z = detrend(z, f)\n # print dx.shape\n dx = trimr(dx, k, 0)\n\n dx = detrend(dx, f)\n # r0t = dx - z*(z\\dx)\n r0t = resid(dx, z) # diff on lagged diffs\n # lx = trimr(lag(x,k),k,0)\n lx = lag(x, k)\n lx = trimr(lx, 1, 0)\n dx = detrend(lx, f)\n # print 'rkt', dx.shape, z.shape\n # rkt = dx - z*(z\\dx)\n rkt = resid(dx, z) # level on lagged diffs\n skk = np.dot(rkt.T, rkt) / rows(rkt)\n sk0 = np.dot(rkt.T, r0t) / rows(rkt)\n s00 = np.dot(r0t.T, r0t) / rows(r0t)\n sig = np.dot(sk0, np.dot(inv(s00), (sk0.T)))\n tmp = inv(skk)\n # du, au = eig(np.dot(tmp, sig))\n au, du = eig(np.dot(tmp, sig)) # au is eval, du is evec\n # orig = np.dot(tmp, sig)\n\n # % Normalize the eigen vectors such that (du'skk*du) = I\n temp = inv(chol(np.dot(du.T, np.dot(skk, du))))\n dt = np.dot(du, temp)\n\n # JP: the next part can be done much easier\n\n # % NOTE: At this point, the eigenvectors are aligned by column. To\n # % physically move the column elements using the MATLAB sort,\n # % take the transpose to put the eigenvectors across the row\n\n # dt = transpose(dt)\n\n # % sort eigenvalues and vectors\n\n # au, auind = np.sort(diag(au))\n auind = np.argsort(au)\n # a = flipud(au)\n aind = flipud(auind)\n a = au[aind]\n # d = dt[aind,:]\n d = dt[:, aind]\n\n # %NOTE: The eigenvectors have been sorted by row based on auind and moved to array \"d\".\n # % Put the eigenvectors back in column format after the sort by taking the\n # % transpose of \"d\". Since the eigenvectors have been physically moved, there is\n # % no need for aind at all. To preserve existing programming, aind is reset back to\n # % 1, 2, 3, ....\n\n # d = transpose(d)\n # test = np.dot(transpose(d), np.dot(skk, d))\n\n # %EXPLANATION: The MATLAB sort function sorts from low to high. The flip realigns\n # %auind to go from the largest to the smallest eigenvalue (now aind). The original procedure\n # %physically moved the rows of dt (to d) based on the alignment in aind and then used\n # %aind as a column index to address the eigenvectors from high to low. This is a double\n # %sort. If you wanted to extract the eigenvector corresponding to the largest eigenvalue by,\n # %using aind as a reference, you would get the correct eigenvector, but with sorted\n # %coefficients and, therefore, any follow-on calculation would seem to be in error.\n # %If alternative programming methods are used to evaluate the eigenvalues, e.g. Frame method\n # %followed by a root extraction on the characteristic equation, then the roots can be\n # %quickly sorted. One by one, the corresponding eigenvectors can be generated. The resultant\n # %array can be operated on using the Cholesky transformation, which enables a unit\n # %diagonalization of skk. But nowhere along the way are the coefficients within the\n # %eigenvector array ever changed. The final value of the \"beta\" array using either method\n # %should be the same.\n\n # % Compute the trace and max eigenvalue statistics */\n lr1 = zeros(m)\n lr2 = zeros(m)\n cvm = zeros((m, 3))\n cvt = zeros((m, 3))\n iota = ones(m)\n t, junk = rkt.shape\n for i in range(0, m):\n tmp = trimr(log(iota - a), i , 0)\n lr1[i] = -t * np.sum(tmp, 0) # columnsum ?\n # tmp = np.log(1-a)\n # lr1[i] = -t * np.sum(tmp[i:])\n lr2[i] = -t * log(1 - a[i])\n cvm[i, :] = c_sja(m - i, p)\n cvt[i, :] = c_sjt(m - i, p)\n aind[i] = i\n # end\n\n result = Holder()\n # % set up results structure\n # estimation results, residuals\n result.rkt = rkt\n result.r0t = r0t\n result.eig = a\n result.evec = d # transposed compared to matlab ?\n result.lr1 = lr1\n result.lr2 = lr2\n result.cvt = cvt\n result.cvm = cvm\n result.ind = aind\n result.meth = 'johansen'\n\n if print_on_console == True:\n print ('--------------------------------------------------')\n print ('--> Trace Statistics')\n print ('variable statistic Crit-90% Crit-95% Crit-99%')\n for i in range(len(result.lr1)):\n print ('r =', i, '\\t', round(result.lr1[i], 4), result.cvt[i, 0], result.cvt[i, 1], result.cvt[i, 2])\n print ('--------------------------------------------------')\n print ('--> Eigen Statistics')\n print ('variable statistic Crit-90% Crit-95% Crit-99%')\n for i in range(len(result.lr2)):\n print ('r =', i, '\\t', round(result.lr2[i], 4), result.cvm[i, 0], result.cvm[i, 1], result.cvm[i, 2])\n print ('--------------------------------------------------')\n print ('eigenvectors:\\n', result.evec)\n print ('--------------------------------------------------')\n print ('eigenvalues:\\n', result.eig)\n print ('--------------------------------------------------')\n \n \n results = [result.lr1[0], result.lr2[0]]\n return results\n return result\n \n def c_sjt(n, p):\n # PURPOSE: find critical values for Johansen trace statistic\n # ------------------------------------------------------------\n # USAGE: jc = c_sjt(n,p)\n # where: n = dimension of the VAR system\n # NOTE: routine doesn't work for n > 12\n # p = order of time polynomial in the null-hypothesis\n # p = -1, no deterministic part\n # p = 0, for constant term\n # p = 1, for constant plus time-trend\n # p > 1 returns no critical values\n # ------------------------------------------------------------\n # RETURNS: a (3x1) vector of percentiles for the trace\n # statistic for [90# 95# 99#]\n # ------------------------------------------------------------\n # NOTES: for n > 12, the function returns a (3x1) vector of zeros.\n # The values returned by the function were generated using\n # a method described in MacKinnon (1996), using his FORTRAN\n # program johdist.f\n # ------------------------------------------------------------\n # SEE ALSO: johansen()\n # ------------------------------------------------------------\n # # References: MacKinnon, Haug, Michelis (1996) 'Numerical distribution\n # functions of likelihood ratio tests for cointegration',\n # Queen's University Institute for Economic Research Discussion paper.\n # -------------------------------------------------------\n\n # written by:\n # James P. LeSage, Dept of Economics\n # University of Toledo\n # 2801 W. Bancroft St,\n # Toledo, OH 43606\n # [email protected]\n #\n # Ported to Python by Javier Garcia\n # [email protected]\n\n # these are the values from Johansen's 1995 book\n # for comparison to the MacKinnon values\n # jcp0 = [ 2.98 4.14 7.02\n # 10.35 12.21 16.16\n # 21.58 24.08 29.19\n # 36.58 39.71 46.00\n # 55.54 59.24 66.71\n # 78.30 86.36 91.12\n # 104.93 109.93 119.58\n # 135.16 140.74 151.70\n # 169.30 175.47 187.82\n # 207.21 214.07 226.95\n # 248.77 256.23 270.47\n # 293.83 301.95 318.14];\n\n jcp0 = ((2.9762, 4.1296, 6.9406),\n (10.4741, 12.3212, 16.3640),\n (21.7781, 24.2761, 29.5147),\n (37.0339, 40.1749, 46.5716),\n (56.2839, 60.0627, 67.6367),\n (79.5329, 83.9383, 92.7136),\n (106.7351, 111.7797, 121.7375),\n (137.9954, 143.6691, 154.7977),\n (173.2292, 179.5199, 191.8122),\n (212.4721, 219.4051, 232.8291),\n (255.6732, 263.2603, 277.9962),\n (302.9054, 311.1288, 326.9716))\n\n jcp1 = ((2.7055, 3.8415, 6.6349),\n (13.4294, 15.4943, 19.9349),\n (27.0669, 29.7961, 35.4628),\n (44.4929, 47.8545, 54.6815),\n (65.8202, 69.8189, 77.8202),\n (91.1090, 95.7542, 104.9637),\n (120.3673, 125.6185, 135.9825),\n (153.6341, 159.5290, 171.0905),\n (190.8714, 197.3772, 210.0366),\n (232.1030, 239.2468, 253.2526),\n (277.3740, 285.1402, 300.2821),\n (326.5354, 334.9795, 351.2150))\n\n jcp2 = ((2.7055, 3.8415, 6.6349),\n (16.1619, 18.3985, 23.1485),\n (32.0645, 35.0116, 41.0815),\n (51.6492, 55.2459, 62.5202),\n (75.1027, 79.3422, 87.7748),\n (102.4674, 107.3429, 116.9829),\n (133.7852, 139.2780, 150.0778),\n (169.0618, 175.1584, 187.1891),\n (208.3582, 215.1268, 228.2226),\n (251.6293, 259.0267, 273.3838),\n (298.8836, 306.8988, 322.4264),\n (350.1125, 358.7190, 375.3203))\n\n if (p > 1) or (p < -1):\n jc = (0, 0, 0)\n elif (n > 12) or (n < 1):\n jc = (0, 0, 0)\n elif p == -1:\n jc = jcp0[n - 1]\n elif p == 0:\n jc = jcp1[n - 1]\n elif p == 1:\n jc = jcp2[n - 1]\n\n return jc\n\n def c_sja(n, p):\n # PURPOSE: find critical values for Johansen maximum eigenvalue statistic\n # ------------------------------------------------------------\n # USAGE: jc = c_sja(n,p)\n # where: n = dimension of the VAR system\n # p = order of time polynomial in the null-hypothesis\n # p = -1, no deterministic part\n # p = 0, for constant term\n # p = 1, for constant plus time-trend\n # p > 1 returns no critical values\n # ------------------------------------------------------------\n # RETURNS: a (3x1) vector of percentiles for the maximum eigenvalue\n # statistic for: [90# 95# 99#]\n # ------------------------------------------------------------\n # NOTES: for n > 12, the function returns a (3x1) vector of zeros.\n # The values returned by the function were generated using\n # a method described in MacKinnon (1996), using his FORTRAN\n # program johdist.f\n # ------------------------------------------------------------\n # SEE ALSO: johansen()\n # ------------------------------------------------------------\n # References: MacKinnon, Haug, Michelis (1996) 'Numerical distribution\n # functions of likelihood ratio tests for cointegration',\n # Queen's University Institute for Economic Research Discussion paper.\n # -------------------------------------------------------\n\n # written by:\n # James P. LeSage, Dept of Economics\n # University of Toledo\n # 2801 W. Bancroft St,\n # Toledo, OH 43606\n # [email protected]\n # Ported to Python by Javier Garcia\n # [email protected]\n jcp0 = ((2.9762, 4.1296, 6.9406),\n (9.4748, 11.2246, 15.0923),\n (15.7175, 17.7961, 22.2519),\n (21.8370, 24.1592, 29.0609),\n (27.9160, 30.4428, 35.7359),\n (33.9271, 36.6301, 42.2333),\n (39.9085, 42.7679, 48.6606),\n (45.8930, 48.8795, 55.0335),\n (51.8528, 54.9629, 61.3449),\n (57.7954, 61.0404, 67.6415),\n (63.7248, 67.0756, 73.8856),\n (69.6513, 73.0946, 80.0937))\n\n jcp1 = ((2.7055, 3.8415, 6.6349),\n (12.2971, 14.2639, 18.5200),\n (18.8928, 21.1314, 25.8650),\n (25.1236, 27.5858, 32.7172),\n (31.2379, 33.8777, 39.3693),\n (37.2786, 40.0763, 45.8662),\n (43.2947, 46.2299, 52.3069),\n (49.2855, 52.3622, 58.6634),\n (55.2412, 58.4332, 64.9960),\n (61.2041, 64.5040, 71.2525),\n (67.1307, 70.5392, 77.4877),\n (73.0563, 76.5734, 83.7105))\n\n jcp2 = ((2.7055, 3.8415, 6.6349),\n (15.0006, 17.1481, 21.7465),\n (21.8731, 24.2522, 29.2631),\n (28.2398, 30.8151, 36.1930),\n (34.4202, 37.1646, 42.8612),\n (40.5244, 43.4183, 49.4095),\n (46.5583, 49.5875, 55.8171),\n (52.5858, 55.7302, 62.1741),\n (58.5316, 61.8051, 68.5030),\n (64.5292, 67.9040, 74.7434),\n (70.4630, 73.9355, 81.0678),\n (76.4081, 79.9878, 87.2395))\n\n if (p > 1) or (p < -1):\n jc = (0, 0, 0)\n elif (n > 12) or (n < 1):\n jc = (0, 0, 0)\n elif p == -1:\n jc = jcp0[n - 1]\n elif p == 0:\n jc = jcp1[n - 1]\n elif p == 1:\n jc = jcp2[n - 1]\n\n return jc\n\n df=pd.DataFrame({'x':pair[0].values_close,'y':pair[1].values_close})\n result = coint_johansen(df,0,1)\n print(\"\\n\",result)\n if (percent_confidence == 90):\n if (result[0] > 13.4294):\n return [True, result[0]]\n else:\n return [False]\n elif (percent_confidence == 95):\n if (result[0] > 15.4943):\n return [True, result[0]]\n else:\n return [False]\n elif (percent_confidence == 99):\n if (result[0] > 19.9349):\n return [True, result[0]]\n else:\n return [False]\n else:\n raise ValueError(\"Invalid percent confidence. Must be 90, 95, or 99.\")\n" ]
[ [ "numpy.dot", "numpy.zeros", "numpy.log", "pandas.DataFrame.from_dict", "pandas.DataFrame", "numpy.sum", "numpy.ones", "numpy.linalg.pinv", "numpy.flipud", "numpy.argsort", "pandas.read_csv", "numpy.linalg.inv" ] ]
ChantalTax/dipy
[ "da656ca630934a79e5eabd4aee64f8f0ae05bf95" ]
[ "dipy/reconst/tests/test_dti.py" ]
[ "\"\"\" Testing DTI\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom nose.tools import (assert_true, assert_equal,\n assert_almost_equal, assert_raises)\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal, assert_\nimport nibabel as nib\n\nimport scipy.optimize as opt\n\nimport dipy.reconst.dti as dti\nfrom dipy.reconst.dti import (axial_diffusivity, color_fa,\n fractional_anisotropy, from_lower_triangular,\n lower_triangular, mean_diffusivity,\n radial_diffusivity, TensorModel, trace,\n linearity, planarity, sphericity)\n\nfrom dipy.io.bvectxt import read_bvec_file\nfrom dipy.data import get_data, dsi_voxels, get_sphere\n\nfrom dipy.core.subdivide_octahedron import create_unit_sphere\nfrom dipy.reconst.odf import gfa\nimport dipy.core.gradients as grad\nimport dipy.core.sphere as dps\n\nfrom dipy.sims.voxel import single_tensor\n\n\ndef test_tensor_algebra():\n \"\"\"\n Test that the computation of tensor determinant and norm is correct\n \"\"\"\n test_arr = np.random.rand(10, 3, 3)\n t_det = dti.determinant(test_arr)\n t_norm = dti.norm(test_arr)\n for i, x in enumerate(test_arr):\n assert_almost_equal(np.linalg.det(x), t_det[i])\n assert_almost_equal(np.linalg.norm(x), t_norm[i])\n\n\ndef test_TensorModel():\n data, gtab = dsi_voxels()\n dm = dti.TensorModel(gtab, 'LS')\n dtifit = dm.fit(data[0, 0, 0])\n assert_equal(dtifit.fa < 0.5, True)\n dm = dti.TensorModel(gtab, 'WLS')\n dtifit = dm.fit(data[0, 0, 0])\n assert_equal(dtifit.fa < 0.5, True)\n sphere = create_unit_sphere(4)\n assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))\n assert_almost_equal(dtifit.fa, gfa(dtifit.odf(sphere)), 1)\n\n # Check that the multivoxel case works:\n dtifit = dm.fit(data)\n # And smoke-test that all these operations return sensibly-shaped arrays:\n assert_equal(dtifit.fa.shape, data.shape[:3])\n assert_equal(dtifit.ad.shape, data.shape[:3])\n assert_equal(dtifit.md.shape, data.shape[:3])\n assert_equal(dtifit.rd.shape, data.shape[:3])\n assert_equal(dtifit.trace.shape, data.shape[:3])\n assert_equal(dtifit.mode.shape, data.shape[:3])\n assert_equal(dtifit.linearity.shape, data.shape[:3])\n assert_equal(dtifit.planarity.shape, data.shape[:3])\n assert_equal(dtifit.sphericity.shape, data.shape[:3])\n \n # Make some synthetic data\n b0 = 1000.\n bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))\n gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)\n # The first b value is 0., so we take the second one:\n B = bvals[1]\n #Scale the eigenvalues and tensor by the B value so the units match\n D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B\n evals = np.array([2., 1., 0.]) / B\n md = evals.mean()\n tensor = from_lower_triangular(D)\n A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3)\n mode = 3 * np.sqrt(6) * np.linalg.det(A_squiggle / np.linalg.norm(A_squiggle))\n evecs = np.linalg.eigh(tensor)[1]\n #Design Matrix\n X = dti.design_matrix(gtab)\n #Signals\n Y = np.exp(np.dot(X, D))\n assert_almost_equal(Y[0], b0)\n Y.shape = (-1,) + Y.shape\n\n # Test fitting with different methods:\n for fit_method in ['OLS', 'WLS', 'NLLS']:\n tensor_model = dti.TensorModel(gtab,\n fit_method=fit_method)\n\n tensor_fit = tensor_model.fit(Y)\n assert_true(tensor_fit.model is tensor_model)\n assert_equal(tensor_fit.shape, Y.shape[:-1])\n assert_array_almost_equal(tensor_fit.evals[0], evals)\n\n assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,\n err_msg=\\\n \"Calculation of tensor from Y does not compare to analytical solution\")\n\n assert_almost_equal(tensor_fit.md[0], md)\n assert_array_almost_equal(tensor_fit.mode, mode, decimal=5)\n assert_equal(tensor_fit.directions.shape[-2], 1)\n assert_equal(tensor_fit.directions.shape[-1], 3)\n\n # Test error-handling:\n assert_raises(ValueError,\n dti.TensorModel,\n gtab,\n fit_method='crazy_method')\n\n\ndef test_indexing_on_TensorFit():\n params = np.zeros([2, 3, 4, 12])\n fit = dti.TensorFit(None, params)\n\n # Should return a TensorFit of appropriate shape\n assert_equal(fit.shape, (2, 3, 4))\n fit1 = fit[0]\n assert_equal(fit1.shape, (3, 4))\n assert_equal(type(fit1), dti.TensorFit)\n fit1 = fit[0, 0, 0]\n assert_equal(fit1.shape, ())\n assert_equal(type(fit1), dti.TensorFit)\n fit1 = fit[[0], slice(None)]\n assert_equal(fit1.shape, (1, 3, 4))\n assert_equal(type(fit1), dti.TensorFit)\n\n # Should raise an index error if too many indices are passed\n assert_raises(IndexError, fit.__getitem__, (0, 0, 0, 0))\n\n\ndef test_fa_of_zero():\n evals = np.zeros((4, 3))\n fa = fractional_anisotropy(evals)\n assert_array_equal(fa, 0)\n\n\ndef test_diffusivities():\n psphere = get_sphere('symmetric362')\n bvecs = np.concatenate(([[0, 0, 0]], psphere.vertices))\n bvals = np.zeros(len(bvecs)) + 1000\n bvals[0] = 0\n gtab = grad.gradient_table(bvals, bvecs)\n mevals = np.array(([0.0015, 0.0003, 0.0001], [0.0015, 0.0003, 0.0003]))\n mevecs = [ np.array( [ [1, 0, 0], [0, 1, 0], [0, 0, 1] ] ),\n np.array( [ [0, 0, 1], [0, 1, 0], [1, 0, 0] ] ) ]\n S = single_tensor( gtab, 100, mevals[0], mevecs[0], snr=None )\n\n dm = dti.TensorModel(gtab, 'LS')\n dmfit = dm.fit(S)\n\n md = mean_diffusivity(dmfit.evals)\n Trace = trace(dmfit.evals)\n rd = radial_diffusivity(dmfit.evals)\n ad = axial_diffusivity(dmfit.evals)\n lin = linearity(dmfit.evals)\n plan = planarity(dmfit.evals)\n spher = sphericity(dmfit.evals)\n \n assert_almost_equal(md, (0.0015 + 0.0003 + 0.0001) / 3)\n assert_almost_equal(Trace, (0.0015 + 0.0003 + 0.0001))\n assert_almost_equal(ad, 0.0015)\n assert_almost_equal(rd, (0.0003 + 0.0001) / 2)\n assert_almost_equal(lin, (0.0015 - 0.0003)/Trace)\n assert_almost_equal(plan, 2 * (0.0003 - 0.0001)/Trace)\n assert_almost_equal(spher, (3 * 0.0001)/Trace)\n\n\ndef test_color_fa():\n data, gtab = dsi_voxels()\n dm = dti.TensorModel(gtab, 'LS')\n dmfit = dm.fit(data)\n fa = fractional_anisotropy(dmfit.evals)\n cfa = color_fa(fa, dmfit.evecs)\n\n # evecs should be of shape (fa, 3, 3)\n fa = np.ones((3, 3, 3))\n evecs = np.zeros(fa.shape + (3, 3))\n evecs[..., :, :] = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n\n assert_equal(fa.shape, evecs[..., 0, 0].shape)\n assert_equal((3, 3), evecs.shape[-2:])\n\n # 3D test case\n fa = np.ones((3, 3, 3))\n evecs = np.zeros(fa.shape + (3, 3))\n evecs[..., :, :] = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n cfa = color_fa(fa, evecs)\n cfa_truth = np.array([1, 0, 0])\n true_cfa = np.reshape(np.tile(cfa_truth, 27), [3, 3, 3, 3])\n\n assert_array_equal(cfa, true_cfa)\n\n # 2D test case\n fa = np.ones((3, 3))\n evecs = np.zeros(fa.shape + (3, 3))\n evecs[..., :, :] = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n cfa = color_fa(fa, evecs)\n cfa_truth = np.array([1, 0, 0])\n true_cfa = np.reshape(np.tile(cfa_truth, 9), [3, 3, 3])\n\n assert_array_equal(cfa, true_cfa)\n\n # 1D test case\n fa = np.ones((3))\n evecs = np.zeros(fa.shape + (3, 3))\n evecs[..., :, :] = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n cfa = color_fa(fa, evecs)\n cfa_truth = np.array([1, 0, 0])\n true_cfa = np.reshape(np.tile(cfa_truth, 3), [3, 3])\n\n assert_array_equal(cfa, true_cfa)\n\n\ndef test_WLS_and_LS_fit():\n \"\"\"\n Tests the WLS and LS fitting functions to see if they returns the correct\n eigenvalues and eigenvectors.\n\n Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii\n as the data.\n\n \"\"\"\n\n ### Defining Test Voxel (avoid nibabel dependency) ###\n\n #Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s\n b0 = 1000.\n bvec, bval = read_bvec_file(get_data('55dir_grad.bvec'))\n B = bval[1]\n #Scale the eigenvalues and tensor by the B value so the units match\n D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B\n evals = np.array([2., 1., 0.]) / B\n md = evals.mean()\n tensor = from_lower_triangular(D)\n #Design Matrix\n gtab = grad.gradient_table(bval, bvec)\n X = dti.design_matrix(gtab)\n #Signals\n Y = np.exp(np.dot(X, D))\n assert_almost_equal(Y[0], b0)\n Y.shape = (-1,) + Y.shape\n\n\n ### Testing WLS Fit on Single Voxel ###\n #Estimate tensor from test signals\n model = TensorModel(gtab, min_signal=1e-8, fit_method='WLS')\n tensor_est = model.fit(Y)\n assert_equal(tensor_est.shape, Y.shape[:-1])\n assert_array_almost_equal(tensor_est.evals[0], evals)\n assert_array_almost_equal(tensor_est.quadratic_form[0], tensor,\n err_msg=\"Calculation of tensor from Y does not \"\n \"compare to analytical solution\")\n assert_almost_equal(tensor_est.md[0], md)\n\n # Test that we can fit a single voxel's worth of data (a 1d array)\n y = Y[0]\n tensor_est = model.fit(y)\n assert_equal(tensor_est.shape, tuple())\n assert_array_almost_equal(tensor_est.evals, evals)\n assert_array_almost_equal(tensor_est.quadratic_form, tensor)\n assert_almost_equal(tensor_est.md, md)\n assert_array_almost_equal(tensor_est.lower_triangular(b0), D)\n\n # Test using fit_method='LS'\n model = TensorModel(gtab, min_signal=1e-8, fit_method='LS')\n tensor_est = model.fit(y)\n assert_equal(tensor_est.shape, tuple())\n assert_array_almost_equal(tensor_est.evals, evals)\n assert_array_almost_equal(tensor_est.quadratic_form, tensor)\n assert_almost_equal(tensor_est.md, md)\n assert_array_almost_equal(tensor_est.lower_triangular(b0), D)\n assert_array_almost_equal(tensor_est.linearity, linearity(evals))\n assert_array_almost_equal(tensor_est.planarity, planarity(evals))\n assert_array_almost_equal(tensor_est.sphericity, sphericity(evals))\n\n\n\ndef test_masked_array_with_Tensor():\n data = np.ones((2, 4, 56))\n mask = np.array([[True, False, False, True],\n [True, False, True, False]])\n\n bvec, bval = read_bvec_file(get_data('55dir_grad.bvec'))\n gtab = grad.gradient_table_from_bvals_bvecs(bval, bvec.T)\n\n tensor_model = TensorModel(gtab, min_signal=1e-9)\n tensor = tensor_model.fit(data, mask=mask)\n assert_equal(tensor.shape, (2, 4))\n assert_equal(tensor.fa.shape, (2, 4))\n assert_equal(tensor.evals.shape, (2, 4, 3))\n assert_equal(tensor.evecs.shape, (2, 4, 3, 3))\n\n tensor = tensor[0]\n assert_equal(tensor.shape, (4,))\n assert_equal(tensor.fa.shape, (4,))\n assert_equal(tensor.evals.shape, (4, 3))\n assert_equal(tensor.evecs.shape, (4, 3, 3))\n\n tensor = tensor[0]\n assert_equal(tensor.shape, tuple())\n assert_equal(tensor.fa.shape, tuple())\n assert_equal(tensor.evals.shape, (3,))\n assert_equal(tensor.evecs.shape, (3, 3))\n assert_equal(type(tensor.model_params), np.ndarray)\n\n\ndef test_fit_method_error():\n bvec, bval = read_bvec_file(get_data('55dir_grad.bvec'))\n gtab = grad.gradient_table_from_bvals_bvecs(bval, bvec.T)\n\n # This should work\n tensor_model = TensorModel(gtab, fit_method='WLS')\n\n # This should raise an error because there is no such fit_method\n assert_raises(ValueError, TensorModel, gtab, min_signal=1e-9,\n fit_method='s')\n\n\ndef test_lower_triangular():\n tensor = np.arange(9).reshape((3, 3))\n D = lower_triangular(tensor)\n assert_array_equal(D, [0, 3, 4, 6, 7, 8])\n D = lower_triangular(tensor, 1)\n assert_array_equal(D, [0, 3, 4, 6, 7, 8, 0])\n assert_raises(ValueError, lower_triangular, np.zeros((2, 3)))\n shape = (4, 5, 6)\n many_tensors = np.empty(shape + (3, 3))\n many_tensors[:] = tensor\n result = np.empty(shape + (6,))\n result[:] = [0, 3, 4, 6, 7, 8]\n D = lower_triangular(many_tensors)\n assert_array_equal(D, result)\n D = lower_triangular(many_tensors, 1)\n result = np.empty(shape + (7,))\n result[:] = [0, 3, 4, 6, 7, 8, 0]\n assert_array_equal(D, result)\n\n\ndef test_from_lower_triangular():\n result = np.array([[0, 1, 3],\n [1, 2, 4],\n [3, 4, 5]])\n D = np.arange(7)\n tensor = from_lower_triangular(D)\n assert_array_equal(tensor, result)\n result = result * np.ones((5, 4, 1, 1))\n D = D * np.ones((5, 4, 1))\n tensor = from_lower_triangular(D)\n assert_array_equal(tensor, result)\n\n\ndef test_all_constant():\n \"\"\"\n\n \"\"\"\n bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))\n gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)\n fit_methods = ['LS', 'OLS', 'NNLS']\n for fit_method in fit_methods:\n dm = dti.TensorModel(gtab)\n assert_almost_equal(dm.fit(np.zeros(bvals.shape[0])).fa, 0)\n assert_almost_equal(dm.fit(100 * np.ones(bvals.shape[0])).fa, 0)\n\n\ndef test_mask():\n data, gtab = dsi_voxels()\n dm = dti.TensorModel(gtab, 'LS')\n mask = np.zeros(data.shape[:-1], dtype=bool)\n mask[0, 0, 0] = True\n dtifit = dm.fit(data)\n dtifit_w_mask = dm.fit(data, mask=mask)\n # Without a mask it has some value\n assert_(not np.isnan(dtifit.fa[0, 0, 0]))\n # Where mask is False, evals, evecs and fa should all be 0\n assert_array_equal(dtifit_w_mask.evals[~mask], 0)\n assert_array_equal(dtifit_w_mask.evecs[~mask], 0)\n assert_array_equal(dtifit_w_mask.fa[~mask], 0)\n # Except for the one voxel that was selected by the mask:\n assert_almost_equal(dtifit_w_mask.fa[0, 0, 0], dtifit.fa[0, 0, 0])\n\ndef test_nnls_jacobian_fucn():\n b0 = 1000.\n bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))\n gtab = grad.gradient_table(bval, bvecs)\n B = bval[1]\n\n #Scale the eigenvalues and tensor by the B value so the units match\n D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B\n evals = np.array([2., 1., 0.]) / B\n\n #Design Matrix\n X = dti.design_matrix(gtab)\n\n #Signals\n Y = np.exp(np.dot(X,D))\n\n # Test Jacobian at D\n args = [X, Y]\n analytical = dti._nlls_jacobian_func(D, *args)\n for i in range(len(X)):\n args = [X[i], Y[i]]\n approx = opt.approx_fprime(D, dti._nlls_err_func, 1e-8, *args)\n assert_true(np.allclose(approx, analytical[i]))\n\n # Test Jacobian at zero\n D = np.zeros_like(D)\n args = [X, Y]\n analytical = dti._nlls_jacobian_func(D, *args)\n for i in range(len(X)):\n args = [X[i], Y[i]]\n approx = opt.approx_fprime(D, dti._nlls_err_func, 1e-8, *args)\n assert_true(np.allclose(approx, analytical[i]))\n\ndef test_nlls_fit_tensor():\n \"\"\"\n Test the implementation of NLLS and RESTORE\n \"\"\"\n\n b0 = 1000.\n bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))\n gtab = grad.gradient_table(bval, bvecs)\n B = bval[1]\n\n #Scale the eigenvalues and tensor by the B value so the units match\n D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B\n evals = np.array([2., 1., 0.]) / B\n md = evals.mean()\n tensor = from_lower_triangular(D)\n\n #Design Matrix\n X = dti.design_matrix(gtab)\n\n #Signals\n Y = np.exp(np.dot(X,D))\n Y.shape = (-1,) + Y.shape\n\n #Estimate tensor from test signals and compare against expected result\n #using non-linear least squares:\n tensor_model = dti.TensorModel(gtab, fit_method='NLLS')\n tensor_est = tensor_model.fit(Y)\n assert_equal(tensor_est.shape, Y.shape[:-1])\n assert_array_almost_equal(tensor_est.evals[0], evals)\n assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)\n assert_almost_equal(tensor_est.md[0], md)\n\n # Using the gmm weighting scheme:\n tensor_model = dti.TensorModel(gtab, fit_method='NLLS', weighting='gmm')\n assert_equal(tensor_est.shape, Y.shape[:-1])\n assert_array_almost_equal(tensor_est.evals[0], evals)\n assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)\n assert_almost_equal(tensor_est.md[0], md)\n\n # Use NLLS with some actual 4D data:\n data, bvals, bvecs = get_data('small_25')\n gtab = grad.gradient_table(bvals, bvecs)\n tm1 = dti.TensorModel(gtab, fit_method='NLLS')\n dd = nib.load(data).get_data()\n tf1 = tm1.fit(dd)\n tm2 = dti.TensorModel(gtab)\n tf2 = tm2.fit(dd)\n\n assert_array_almost_equal(tf1.fa, tf2.fa, decimal=1)\n\ndef test_restore():\n \"\"\"\n Test the implementation of the RESTORE algorithm\n \"\"\"\n b0 = 1000.\n bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))\n gtab = grad.gradient_table(bval, bvecs)\n B = bval[1]\n\n #Scale the eigenvalues and tensor by the B value so the units match\n D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B\n evals = np.array([2., 1., 0.]) / B\n md = evals.mean()\n tensor = from_lower_triangular(D)\n\n #Design Matrix\n X = dti.design_matrix(gtab)\n\n #Signals\n Y = np.exp(np.dot(X,D))\n Y.shape = (-1,) + Y.shape\n for drop_this in range(1, Y.shape[-1]):\n # RESTORE estimates should be robust to dropping\n this_y = Y.copy()\n this_y[:, drop_this] = 1.0\n tensor_model = dti.TensorModel(gtab, fit_method='restore',\n sigma=67.0)\n\n tensor_est = tensor_model.fit(this_y)\n assert_array_almost_equal(tensor_est.evals[0], evals, decimal=3)\n assert_array_almost_equal(tensor_est.quadratic_form[0], tensor,\n decimal=3)\n\ndef test_adc():\n \"\"\"\n Test the implementation of the calculation of apparent diffusion coefficient\n \"\"\"\n data, gtab = dsi_voxels()\n dm = dti.TensorModel(gtab, 'LS')\n mask = np.zeros(data.shape[:-1], dtype=bool)\n mask[0, 0, 0] = True\n dtifit = dm.fit(data)\n sphere = create_unit_sphere(4)\n \n # The ADC in the principal diffusion direction should be equal to the AD in\n # each voxel:\n\n pdd0 = dtifit.evecs[0,0,0,0]\n sphere_pdd0 = dps.Sphere(x=pdd0[0], y=pdd0[1], z=pdd0[2])\n assert_array_almost_equal(dtifit.adc(sphere_pdd0)[0,0,0],\n dtifit.ad[0,0,0], decimal=5)\n \n \n # Test that it works for cases in which the data is 1D\n dtifit = dm.fit(data[0,0,0])\n sphere_pdd0 = dps.Sphere(x=pdd0[0], y=pdd0[1], z=pdd0[2])\n assert_array_almost_equal(dtifit.adc(sphere_pdd0),\n dtifit.ad, decimal=5)\n\ndef test_predict():\n \"\"\"\n Test model prediction API\n \"\"\"\n psphere = get_sphere('symmetric362')\n bvecs = np.concatenate(([[1, 0, 0]], psphere.vertices))\n bvals = np.zeros(len(bvecs)) + 1000\n bvals[0] = 0\n gtab = grad.gradient_table(bvals, bvecs)\n mevals = np.array(([0.0015, 0.0003, 0.0001], [0.0015, 0.0003, 0.0003]))\n mevecs = [ np.array( [ [1, 0, 0], [0, 1, 0], [0, 0, 1] ] ),\n np.array( [ [0, 0, 1], [0, 1, 0], [1, 0, 0] ] ) ]\n S = single_tensor( gtab, 100, mevals[0], mevecs[0], snr=None )\n\n dm = dti.TensorModel(gtab, 'LS')\n dmfit = dm.fit(S)\n \n assert_array_almost_equal(dmfit.predict(gtab, S0=100), S)\n\n data, gtab = dsi_voxels()\n dtim = dti.TensorModel(gtab)\n dtif = dtim.fit(data)\n S0 = np.mean(data[...,gtab.b0s_mask], -1)\n p = dtif.predict(gtab, S0)\n\n" ]
[ [ "numpy.dot", "numpy.random.rand", "numpy.tile", "numpy.mean", "numpy.concatenate", "numpy.zeros_like", "numpy.linalg.norm", "numpy.empty", "numpy.log", "numpy.linalg.eigh", "numpy.eye", "numpy.testing.assert_array_almost_equal", "numpy.arange", "numpy.sqrt", "numpy.array", "numpy.zeros", "numpy.linalg.det", "numpy.allclose", "numpy.isnan", "numpy.trace", "numpy.testing.assert_array_equal", "numpy.ones", "scipy.optimize.approx_fprime" ] ]
changliao1025/pyswat
[ "cdcda1375be8c0f71459a78438b1e9f8a22a77bc" ]
[ "swaty/swaty_read_model_configuration_file.py" ]
[ "from collections import _OrderedDictKeysView\nimport os\nfrom pprint import pp \nimport sys #used to add system path\n\nimport datetime\nimport json\nimport numpy as np\nimport pyearth.toolbox.date.julian as julian\nfrom swaty.auxiliary.text_reader_string import text_reader_string\nfrom swaty.classes.pycase import swatcase\n\npDate = datetime.datetime.today()\nsDate_default = \"{:04d}\".format(pDate.year) + \"{:02d}\".format(pDate.month) + \"{:02d}\".format(pDate.day)\n\ndef swaty_read_model_configuration_file(sFilename_configuration_in , \\\n iFlag_read_discretization_in = None,\\\n iFlag_standalone_in=None, \\\n iCase_index_in = None, sDate_in = None,\\\n iYear_start_in = None,\\\n iMonth_start_in = None,\\\n iDay_start_in = None, \\\n iYear_end_in = None,\\\n iMonth_end_in = None,\\\n iDay_end_in = None, \\\n sWorkspace_input_in =None, \\\n sWorkspace_output_in =None ,\\\n aParameter_in=None ):\n\n if not os.path.isfile(sFilename_configuration_in):\n print(sFilename_configuration_in + ' does not exist')\n return\n \n # Opening JSON file\n with open(sFilename_configuration_in) as json_file:\n aConfig = json.load(json_file) \n\n sModel = aConfig['sModel'] \n sRegion = aConfig['sRegion']\n\n if sWorkspace_input_in is not None:\n sWorkspace_input = sWorkspace_input_in\n else:\n sWorkspace_input = aConfig['sWorkspace_input']\n pass\n if sWorkspace_output_in is not None:\n sWorkspace_output = sWorkspace_output_in\n else:\n sWorkspace_output = aConfig['sWorkspace_output']\n pass\n\n if iFlag_read_discretization_in is not None:\n iFlag_read_discretization=1\n else:\n iFlag_read_discretization=0\n pass\n \n if iFlag_standalone_in is not None: \n iFlag_standalone = iFlag_standalone_in\n else: \n iFlag_standalone = int( aConfig['iFlag_standalone'])\n if sDate_in is not None:\n sDate = sDate_in\n else:\n sDate = aConfig['sDate']\n pass\n if iCase_index_in is not None: \n iCase_index = iCase_index_in\n else: \n iCase_index = int( aConfig['iCase_index'])\n \n \n if iYear_start_in is not None: \n iYear_start = iYear_start_in\n else: \n iYear_start = int( aConfig['iYear_start'])\n\n if iMonth_start_in is not None: \n iMonth_start = iYear_end_in\n else: \n iMonth_start = int( aConfig['iMonth_start'])\n\n if iDay_start_in is not None: \n iDay_start = iDay_start_in\n else: \n iDay_start = int( aConfig['iDay_start'])\n \n if iYear_end_in is not None: \n iYear_end = iYear_end_in\n else: \n iYear_end = int( aConfig['iYear_end'])\n \n if iMonth_end_in is not None: \n iMonth_end = iMonth_end_in\n else: \n iMonth_end = int( aConfig['iMonth_end'])\n\n if iDay_end_in is not None:\n iDay_end = iDay_end_in\n else: \n iDay_end = int( aConfig['iDay_end'])\n\n if aParameter_in is not None:\n iFlag_paramter = 1\n aParameter = aParameter_in\n else: \n iFlag_paramter = 0\n \n\n #by default, this system is used to prepare inputs for modflow simulation.\n #however, it can also be used to prepare gsflow simulation inputs.\n\n #based on global variable, a few variables are calculate once\n #calculate the modflow simulation period\n #https://docs.python.org/3/library/datetime.html#datetime-objects\n \n aConfig['iFlag_standalone'] = iFlag_standalone\n\n aConfig['iFlag_read_discretization'] = iFlag_read_discretization\n aConfig['iCase_index'] = iCase_index\n aConfig['sDate'] = sDate\n aConfig['sWorkspace_input'] = sWorkspace_input\n aConfig['sWorkspace_output'] = sWorkspace_output\n dummy1 = datetime.datetime(iYear_start, iMonth_start, iDay_start)\n dummy2 = datetime.datetime(iYear_end, iMonth_end, iDay_end)\n julian1 = julian.to_jd(dummy1, fmt='jd')\n julian2 = julian.to_jd(dummy2, fmt='jd')\n\n nstress =int( julian2 - julian1 + 1 ) \n aConfig['lJulian_start'] = julian1\n aConfig['lJulian_end'] = julian2\n aConfig['nstress'] = nstress \n \n sFilename_swat = aConfig['sFilename_swat'] \n \n #data\n oSwat = swatcase(aConfig)\n \n\n if iFlag_paramter ==1:\n #oSwat.nParameter_watershed = 0\n #oSwat.nParameter_subbasin = 0\n #oSwat.nParameter_hru = 0\n #oSwat.nParameter_soil = 0\n #oSwat.aParameter_watershed = list()\n #oSwat.aParameter_subbasin = list()\n #oSwat.aParameter_hru = list()\n #oSwat.aParameter_soil = list()\n for i in range(len(aParameter)):\n pParameter = aParameter[i]\n sName = pParameter.sName\n iType = pParameter.iParameter_type\n lIndex_subbasin = pParameter.lIndex_subbasin\n lIndex_hru = pParameter.lIndex_hru\n lIndex_soil_layer = pParameter.lIndex_soil_layer\n dValue = pParameter.dValue_current\n iFlag_found = 0\n if iType == 1: \n \n for j in range(oSwat.pWatershed.nParameter_watershed):\n pPara = oSwat.pWatershed.aParameter_watershed[j]\n sName1 = pPara.sName\n if sName.lower() == sName1.lower():\n #replace\n oSwat.pWatershed.aParameter_watershed[j].dValue_current = dValue\n iFlag_found = 1\n break\n \n #if iFlag_found == 0:\n # #this one is not in the list yet\n # pass\n pass \n else:\n if iType == 2: #subbasin level\n #get name index\n \n for j in np.arange(oSwat.nsubbasin ):\n iIndex_name = oSwat.aSubbasin[j].aParameter_subbasin_name.index(sName) \n pPara = oSwat.aSubbasin[j].aParameter_subbasin[iIndex_name]\n sName1 = pPara.sName\n iIndex1 = pPara.lIndex_subbasin\n if lIndex_subbasin == iIndex1:\n #replace\n oSwat.aSubbasin[j].aParameter_subbasin[iIndex_name].dValue_current = dValue\n iFlag_found = 1\n break\n pass\n else: #hru level\n if iType == 3:\n for j in np.arange(oSwat.nhru_combination ):\n iIndex_name = oSwat.aHru_combination[j].aParameter_hru_name.index(sName) \n pPara = oSwat.aHru_combination[j].aParameter_hru[iIndex_name]\n sName1 = pPara.sName\n iIndex1 = pPara.lIndex_hru\n if lIndex_hru == iIndex1:\n #replace\n oSwat.aHru_combination[j].aParameter_hru[iIndex_name].dValue_current = dValue\n iFlag_found = 1\n break\n pass\n else: #soil layer\n for j in np.arange(oSwat.nhru_combination ):\n for k in np.arange(oSwat.aHru_combination[j].nSoil_layer):\n iIndex_name = oSwat.aHru_combination[j].aSoil[k].aParameter_soil_name.index(sName) \n pPara = oSwat.aHru_combination[j].aSoil[k].aParameter_soil[iIndex_name]\n sName1 = pPara.sName\n iIndex0 = pPara.lIndex_hru\n iIndex1 = pPara.lIndex_soil_layer\n if lIndex_hru ==iIndex0 and lIndex_soil_layer == iIndex1:\n #replace\n oSwat.aHru_combination[j].aSoil[k].aParameter_soil[iIndex_name].dValue_current = dValue\n iFlag_found = 1\n break\n pass\n pass #\n\n \n pass\n \n \n \n\n\n \n \n return oSwat" ]
[ [ "numpy.arange" ] ]
dschaub95/FINDER-TSP
[ "d69ee6a954aac317b717622a9c0744e3f2827c64" ]
[ "py_utils/FINDER_test_utils.py" ]
[ "import os\nimport numpy as np\nimport pandas as pd\nimport tsplib95\nimport networkx as nx\nfrom tqdm import tqdm\nimport sys\nimport re\n\ndef prepare_testset_FINDER(data_dir, scale_factor=0.000001):\n graph_list = []\n\n atoi = lambda text : int(text) if text.isdigit() else text\n natural_keys = lambda text : [atoi(c) for c in re.split('(\\d+)', text)]\n\n fnames = os.listdir(data_dir)\n fnames.sort(key=natural_keys)\n print(\"Loading test graphs...\")\n for fname in tqdm(fnames):\n try:\n if not '.tsp' in fname or '.sol' in fname:\n continue\n problem = tsplib95.load(data_dir + fname)\n g = problem.get_graph()\n \n except:\n print('Error, while loading file {}'.format(fname))\n # remove edges from one node to itself\n ebunch=[(k,k) for k in g.nodes]\n g.remove_edges_from(ebunch)\n # reset node index to start at zero\n mapping = {k:i for i,k in enumerate(g.nodes)}\n g = nx.relabel_nodes(g, mapping)\n # scale size of the graphs such that it fits into 0,1 square\n for node in g.nodes:\n g.nodes[node]['coord'] = np.array(g.nodes[node]['coord']) * scale_factor\n for edge in g.edges:\n g.edges[edge]['weight'] = g.edges[edge]['weight'] * scale_factor\n graph_list.append(g)\n print(\"Number of loaded test graphs:\",len(graph_list))\n return graph_list, fnames\n\ndef prepare_testset_S2VDQN(folder, scale_factor=0.000001):\n if folder[-1] == '/':\n folder = folder[0:-1]\n graph_list = []\n fnames = []\n print(\"Loading test graphs...\")\n with open(f'{folder}/paths.txt', 'r') as f:\n for line in tqdm(f):\n fname = line.split('/')[-1].strip()\n file_path = '%s/%s' % (folder, fname)\n try:\n if not '.tsp' in fname or '.sol' in fname:\n continue\n problem = tsplib95.load(file_path)\n g = problem.get_graph()\n \n except:\n print('Error, while loading file {}'.format(fname))\n \n # remove edges from one node to itself\n ebunch=[(k,k) for k in g.nodes]\n g.remove_edges_from(ebunch)\n # reset node index to start at zero\n mapping = {k:i for i,k in enumerate(g.nodes)}\n g = nx.relabel_nodes(g, mapping)\n # scale size of the graphs such that it fits into 0,1 square\n for node in g.nodes:\n g.nodes[node]['coord'] = np.array(g.nodes[node]['coord']) * scale_factor\n for edge in g.edges:\n g.edges[edge]['weight'] = g.edges[edge]['weight'] * scale_factor\n graph_list.append(g)\n fnames.append(fname)\n # print(\"Number of loaded test graphs:\",len(graph_list))\n return graph_list, fnames\n\ndef get_approx_ratios(data_dir, test_lengths):\n fnames = get_fnames(data_dir)\n true_lengths = []\n len_dict = get_len_dict(data_dir)\n for fname in fnames:\n true_lengths.append(len_dict[fname])\n approx_ratios = [length[0]/length[1] for length in zip(test_lengths, true_lengths)]\n mean_approx_ratio = np.mean([length[0]/length[1] for length in zip(test_lengths, true_lengths)])\n return approx_ratios, mean_approx_ratio\n\ndef get_fnames(dir, search_phrase='tsp'):\n atoi = lambda text : int(text) if text.isdigit() else text\n natural_keys = lambda text : [atoi(c) for c in re.split('(\\d+)', text)]\n try:\n fnames = [f for f in os.listdir(dir) if os.path.isfile(f'{dir}/{f}')]\n fnames.sort(key=natural_keys)\n except:\n print('\\nBad directory!')\n fnames = [fname for fname in fnames if search_phrase in fname]\n return fnames\n\ndef get_len_dict(folder):\n # get lengths\n with open(f'{folder}/lengths.txt', 'r') as f:\n lines = f.readlines()\n file_names = [line.split(':')[0].strip() for k, line in enumerate(lines)]\n test_lens = [float(line.split(':')[-1].strip()) for k, line in enumerate(lines)]\n len_dict = dict(zip(file_names, test_lens))\n return len_dict\n\ndef save_solutions(data_dir, solutions, model_name, suffix=''):\n fnames = get_fnames(data_dir)\n sol_df = pd.DataFrame()\n idx = 0\n tqdm.write(\"Saving solutions...\")\n for fname in tqdm(fnames):\n if not '.tsp' in fname or '.sol' in fname:\n continue\n tmp_df = pd.DataFrame()\n tmp_df[fname] = solutions[idx]\n sol_df = pd.concat([sol_df,tmp_df.astype(int)], ignore_index=False, axis=1)\n idx += 1\n test_set_folder = data_dir.split(\"/\")[-2]\n test_set_name = data_dir.split(\"/\")[-1]\n result_path = f'results/{model_name}/{test_set_folder}/{test_set_name}'\n model_name_short = '_'.join(model_name.split('_')[0:-4])\n create_dir(result_path)\n if suffix:\n sol_df.to_csv(f'{result_path}/solutions_{model_name_short}_{suffix}.csv')\n else:\n sol_df.to_csv(f'{result_path}/solutions_{model_name_short}.csv')\n\ndef save_lengths(data_dir, lengths, model_name, suffix=''):\n fnames = get_fnames(data_dir)\n lens_df = pd.DataFrame()\n idx = 0\n tqdm.write(\"Saving solution lengths...\")\n for fname in tqdm(fnames):\n if not '.tsp' in fname or '.sol' in fname:\n continue\n tmp_df = pd.DataFrame()\n tmp_df[fname] = [lengths[idx]]\n lens_df = pd.concat([lens_df,tmp_df], ignore_index=False, axis=1)\n idx += 1\n test_set_folder = data_dir.split(\"/\")[-2]\n test_set_name = data_dir.split(\"/\")[-1]\n result_path = f'results/{model_name}/{test_set_folder}/{test_set_name}'\n model_name_short = '_'.join(model_name.split('_')[0:-4])\n create_dir(result_path)\n if suffix:\n lens_df.to_csv(f'{result_path}/tour_lengths_{model_name_short}_{suffix}.csv')\n else:\n lens_df.to_csv(f'{result_path}/tour_lengths_{model_name_short}.csv')\n\ndef save_approx_ratios(data_dir, approx_ratios, model_name, suffix=''):\n fnames = get_fnames(data_dir)\n approx_df = pd.DataFrame()\n idx = 0\n tqdm.write(\"Saving approximation ratios...\")\n for fname in tqdm(fnames):\n if not '.tsp' in fname or '.sol' in fname:\n continue\n tmp_df = pd.DataFrame()\n tmp_df[fname] = [approx_ratios[idx]]\n approx_df = pd.concat([approx_df,tmp_df], ignore_index=False, axis=1)\n idx += 1\n test_set_folder = data_dir.split(\"/\")[-2]\n test_set_name = data_dir.split(\"/\")[-1]\n result_path = f'results/{model_name}/{test_set_folder}/{test_set_name}'\n model_name_short = '_'.join(model_name.split('_')[0:-4])\n create_dir(result_path)\n if suffix:\n approx_df.to_csv(f'{result_path}/approx_ratios_{model_name_short}_{suffix}.csv')\n else:\n approx_df.to_csv(f'{result_path}/approx_ratios_{model_name_short}.csv')\n\ndef create_dir(save_dir):\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\ndef get_test_approx_ratios_for_model(test_set_names, model_name, search_strategy='greedy'):\n mean_approx_ratios = []\n std_approx_ratios = []\n for test_set in test_set_names:\n result_dir = f'../results/{model_name}/test_sets/{test_set}'\n try:\n fnames, approx_ratios, test_lengths, solutions = get_data_from_result_files(result_dir, search_strategy=search_strategy)\n except: \n # print(search_strategy)\n # print('Using placeholders!')\n approx_ratios = [np.nan]\n mean_approx_ratios.append(np.mean(approx_ratios))\n std_approx_ratios.append(np.std(approx_ratios))\n return mean_approx_ratios, std_approx_ratios\n\ndef get_data_from_result_files(result_dir, search_strategy='greedy'):\n # print(result_dir)\n for f in os.listdir(result_dir):\n if not search_strategy in f:\n continue\n if 'solution' in f:\n sol_df = pd.read_csv(f'{result_dir}/{f}', index_col=0)\n elif 'tour' in f:\n len_df = pd.read_csv(f'{result_dir}/{f}', index_col=0)\n elif 'approx' in f:\n approx_df = pd.read_csv(f'{result_dir}/{f}', index_col=0)\n\n approx_ratios = list(approx_df.iloc[0])\n fnames = [f'{fname}.tsp' if not '.tsp' in fname else fname for fname in len_df.columns ]\n test_lengths = list(len_df.iloc[0])\n\n solutions = []\n for column in sol_df.columns:\n raw_list = list(sol_df[column])\n processed_list = [int(k) for k in raw_list if not np.isnan(k)]\n solutions.append(processed_list)\n return fnames, approx_ratios, test_lengths, solutions\n\ndef get_best_ckpt(model_path, rank=1):\n best_ckpt_path = f'{model_path}/best_checkpoint'\n fnames = get_fnames(best_ckpt_path, search_phrase='ckpt')\n for fname in fnames:\n if 'rank' in fname:\n if f'rank_{rank}.' in fname:\n best_ckpt_file = '.'.join(fname.split('.')[0:-1])\n break\n else:\n best_ckpt_file = '.'.join(fnames[0].split('.')[0:-1])\n break\n best_ckpt_file_path = f'{best_ckpt_path}/{best_ckpt_file}'\n return best_ckpt_file_path\n\ndef get_model_file(model_path):\n k = 0\n for f in os.listdir(model_path):\n if not 'ckpt' in f:# or (nrange_str not in f):\n continue\n # print(f)\n f_len = f.split('_')[-1].split('.')[0]\n tour_length = float(f_len)/(10**(len(f_len)-1))\n if f_len[0] != '1':\n continue\n # norm_tour_length = tour_length/float(config['valid_sol'])\n \n else:\n k += 1\n model_file = '.'.join(f.split('.')[0:-1])\n model_base_path = model_path\n if k > 0:\n print(\"Best model file:\", model_file)\n else:\n print(\"Could not find any checkpoint file in the specified folder!\")\n return model_file, model_base_path, tour_length\n\ndef prepare_real_samples(data_dir):\n if not data_dir[-1] == '/':\n data_dir = data_dir + '/'\n prepared_graphs = []\n raw_graphs = []\n fnames = []\n for fname in os.listdir(data_dir):\n if not '.tsp' in fname:\n continue\n try:\n problem = tsplib95.load(data_dir + fname)\n g = problem.get_graph()\n except:\n print('Error loading tsp file!')\n continue\n #try:\n # remove edges from nodes to itself\n ebunch=[(k,k) for k in g.nodes()]\n g.remove_edges_from(ebunch)\n # mapping\n mapping = {k:i for i,k in enumerate(g.nodes)}\n g = nx.relabel_nodes(g, mapping)\n # save raw graph\n raw_graphs.append(g.copy())\n # make sure every coordinate is positive\n min_x = np.inf\n max_x = -np.inf\n min_y = np.inf\n max_y = -np.inf\n for node in g.nodes:\n x = g.nodes[node]['coord'][0]\n y = g.nodes[node]['coord'][1]\n if x > max_x:\n max_x = x\n if x < min_x:\n min_x = x\n if y > max_y:\n max_y = y\n if y < min_y:\n min_y = y\n if min_x <= 0:\n x_offset = -min_x + 1\n else:\n x_offset = 0\n if min_y <= 0:\n y_offset = -min_y + 1\n else:\n y_offset = 0\n # change node positions into 0,1 square\n for node in g.nodes():\n g.nodes[node]['coord'] = np.array(g.nodes[node]['coord'])\n g.nodes[node]['coord'][0] += x_offset\n g.nodes[node]['coord'][1] += y_offset\n if max_x > max_y:\n scale_factor = 1 / (1.05 * max_x)\n else:\n scale_factor = 1 / (1.05 * max_y)\n g.nodes[node]['coord'] = np.array(g.nodes[node]['coord']) * scale_factor\n for edge in g.edges:\n g.edges[edge]['weight'] = g.edges[edge]['weight'] * scale_factor\n #except:\n # g = nx.Graph()\n # print(\"Error altering graph!\")\n # continue\n prepared_graphs.append(g)\n \n fnames.append(fname)\n return raw_graphs, prepared_graphs, fnames\n\n" ]
[ [ "numpy.array", "numpy.isnan", "pandas.DataFrame", "numpy.mean", "numpy.std", "pandas.concat", "pandas.read_csv" ] ]
eelgin/BigDataUsingPython
[ "c4a25a7db37c5fe0b12a180b850a8900772f1f64" ]
[ "final_project/defunct/data_formatter.py" ]
[ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport os\n\ndirectory = 'data/individual_stocks_5yr'\nfiles = os.listdir(directory)\n\nsp_data = pd.DataFrame(columns=['date'])\n\nfor file in files:\n\n\ttmp = pd.read_csv(directory+'/'+file)\n\n\tname = file.rpartition('_')[0]\n\n\tprint (name)\n\n\ttmp[name] = (tmp[['high','low']].sum(axis=1) / 2.0)\n\ttmp = tmp.drop(columns=['high','low','open','close','volume','Name'])\n\n\t#tmp[]\n\n\tsp_data = pd.merge(sp_data, tmp, how='outer', on='date')\n\n\tprint (sp_data.head(5))\n\n\tdel tmp\n\nindx_data = sp_data[list(sp_data.columns)].sum(axis=1)\n\nsp_data.insert(loc=1, column='S&P500', value=indx_data)\nprint (sp_data.head(5))\n\nsp_data.to_csv('data/formatted_data.csv')\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv", "pandas.merge" ] ]
ZiyiDuan/LewPeaLab_EEG_pipeline
[ "1f923585d18edfe6b43637bd1cb11db1a1740698" ]
[ "EEG_pipeline_MVPA/MVPA_multi-subjects_8_posBins.py" ]
[ "# !/usr/bin/env python\n# -*-coding:utf-8 -*-\n\n\"\"\"\n# File : MVPA_multi-subjects_8_posBins.py\n# Time :2021/4/16 10:40\n# Author :ZoeDuan\n# version :python 3.7\n# Description: The pipeline of decoding 8-class alpha-band power from Foster et al(2017)\n# Data: Experiment 2a, 11 Subjects, preprocessed data (epochs)\n\"\"\"\n\nimport os\nimport mne\nimport numpy as np\nimport time\nimport pandas as pd\nimport h5py\nfrom tqdm import trange\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn import svm\nfrom sklearn.model_selection import cross_val_score, cross_val_predict, StratifiedShuffleSplit\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import accuracy_score\nfrom mne.decoding import (SlidingEstimator, GeneralizingEstimator, Scaler,\n cross_val_multiscore, Vectorizer)\n\nfrom mini_block_function_MVPA import *\n\n# detect how many subjects you have\ndir = os.getcwd()\ndata_path = dir + '/Preprocessed_data'\nfiles = os.listdir(data_path)\nsub_num = len(files) # the number of subjects in total\nprint('You have %d participants in total' % sub_num)\n\n# read all participants' data into all_data\nall_data = {}\nsubIDs = np.array([])\nfor file in files:\n if not os.path.isdir(file):\n # find subID from file name\n afterInd = file.find('_')\n subID = file[0:afterInd]\n subIDs = np.append(subIDs, subID)\n data = mne.read_epochs(data_path + '/' + file, preload=False)\n all_data[subID] = data\n\n\n# create file folder to save prepared data for decoding\nprepared_data_dir = dir + '/prepared_data_8_posBins/'\nexist = os.path.exists(prepared_data_dir)\nif not exist:\n os.makedirs(prepared_data_dir)\n\n# create file folder to save classification results\nclassification_results_dir = dir + '/classification_results_8_posBins/'\nexist = os.path.exists(classification_results_dir)\nif not exist:\n os.makedirs(classification_results_dir)\n\n\n# define variables\n# define band-pass frequency\nl_freq, h_freq = 8, 12\n# define decoding timeWindow -300ms ~ 1250ms\ntimeWindow = [-0.3, 1.25]\n# define resample numbers for each time point\nn_resample = 5\n\n# create time string\ntime_string = str(time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime()))\n\n# prepare data for each subject\nX_all = {}\ny_all = {}\ntime_points_resample_all = {}\nfor subID in subIDs:\n # read current data\n epochs = all_data[subID]\n # load current data\n epochs.load_data()\n # pick eeg channels only\n epochs.pick_types(eeg=True)\n # get sample frequency\n sfreq = epochs.info['sfreq']\n\n # get block_index and labels\n events_id = epochs.events[:, 2]\n blocks = events_id // 10\n labels = events_id % 10\n\n # get alpha-band power\n epochs_filt = epochs.copy().filter(l_freq=l_freq, h_freq=h_freq, phase='zero-double')\n epochs_filt.apply_hilbert(picks=['eeg'], envelope=True, n_fft='auto')\n power = epochs_filt.get_data()\n\n # extract sub-data within timeWindow\n all_time_points = epochs.times\n left_timepoint_index = int((timeWindow[0] - all_time_points[0])*sfreq)\n right_timepoint_index = int(len(all_time_points) - (all_time_points[-1] - timeWindow[1])*sfreq)\n sub_time_points = all_time_points[left_timepoint_index:right_timepoint_index]\n assert sub_time_points[0] == timeWindow[0]\n assert sub_time_points[-1] == timeWindow[1]\n # extract sub-power within timeWindow\n sub_power = power[:, :, left_timepoint_index:right_timepoint_index]\n\n n_trial = sub_power.shape[0]\n n_channel = sub_power.shape[1]\n n_times = sub_power.shape[2]\n\n # delete epochs to save memory\n del epochs, epochs_filt\n\n # down-sampling data based on n_resample\n n_times_resample = round(n_times / n_resample)\n time_points_resample = np.linspace(timeWindow[0], timeWindow[1], n_times_resample)\n sub_power_resample = np.zeros([n_trial, n_channel, n_times_resample], dtype=np.float)\n # down-sampling data by combining every n_resample data\n for t in range(n_times_resample):\n sub_power_resample[:, :, t] = np.average(sub_power[:, :, t * n_resample: (t + 1) * n_resample], axis=-1)\n assert sub_power_resample.shape == (n_trial, n_channel, n_times_resample)\n\n # define data for decoding\n X = np.array(sub_power_resample)\n y = np.array(labels)\n\n del sub_power_resample, labels\n\n # save each participants' alpha_band data\n f = h5py.File(prepared_data_dir + '/' + subID + '_prepared_data.txt', 'w')\n f['X'] = X\n f['y'] = y\n f['time_points_resample'] = time_points_resample\n f.close()\n # read each participants' alpha_band data\n # f = h5py.File(prepared_data_dir + '/' + subID + '_prepared_data.txt', 'r')\n # X = f['X'][()]\n # y = f['y'][()]\n # time_points_resample = f['time_points_resample'][()]\n\n X_all[subID] = X\n y_all[subID] = y\n time_points_resample_all[subID] = time_points_resample\n\n\n# run MVPA decoding for each subject\n\n# define classifier\nclf = svm.SVC(kernel='linear', C=1, decision_function_shape='ovr')\n# define cross-validation\ncv = StratifiedShuffleSplit(n_splits=1, test_size=1/3)\n# define scoring\nscoring = 'accuracy'\n# define iteration times\nn_iteration = 10\n# define trial numbers for mini-block\ntrials_mini_block = 5\n# define the Temporal decoding object\n# time_decod = SlidingEstimator(clf, n_jobs=1, scoring=scoring, verbose=True)\n# define the Temporal generalization object\n# time_gen = GeneralizingEstimator(clf, n_jobs=1, scoring=scoring, verbose=True)\n\n\n# record results for all subjects\nscores_all = np.empty([0])\nsub_all = np.empty([0])\ntimePoints_all = np.empty([0])\nweights_all = {}\nfor subID in subIDs:\n # read each subject's data\n X = X_all[subID]\n y = y_all[subID]\n # find information for current data\n n_trial = X.shape[0]\n n_channel = X.shape[1]\n n_times_resample = X.shape[2]\n\n # decode across time\n # pre-allocate space for scores and channel weights for each iteration\n scores_itr = np.zeros((n_iteration, n_times_resample))\n weights_itr = np.zeros((n_iteration, n_times_resample, n_channel))\n\n # Loop through each iteration\n time_start = time.time()\n for i in trange(n_iteration):\n # find the minimal trial number among labels\n min_trials = min_trials_calculation(y)\n # average single trials into mini blocks\n X_mini, y_mini = mini_block_transform(min_trials, trials_mini_block, X, y)\n # temporal decoding for each iteration\n for train_ind, test_ind in cv.split(X_mini, y_mini):\n X_train = X_mini[train_ind]\n y_train = y_mini[train_ind]\n X_test = X_mini[test_ind]\n y_test = y_mini[test_ind]\n for time_ind in range(n_times_resample):\n X_train_t = X_train[:, :, time_ind]\n X_test_t = X_test[:, :, time_ind]\n scaler = StandardScaler().fit(X_train_t)\n X_train_transformed = scaler.transform(X_train_t)\n X_test_transformed = scaler.transform(X_test_t)\n\n clf.fit(X_train_transformed, y_train)\n y_pred = clf.predict(X_test_transformed)\n score = accuracy_score(y_test, y_pred)\n scores_itr[i, time_ind] = score\n # calculate weight for each channel\n channel_weight = clf.coef_\n weights_itr[i, time_ind, :] = np.mean(channel_weight, axis=0)\n\n\n time_end = time.time()\n print('Time cost for temporal decoding for sub %s: %f s' % (subID, time_end - time_start))\n\n scores_final = np.mean(scores_itr, axis=0)\n weights_final = np.mean(weights_itr, axis=0)\n\n\n # save decoding scores for each subject into disk\n np.savetxt(classification_results_dir + '/' + subID + '_' + time_string + '_decoding_results.txt', scores_final, fmt='%f')\n np.savetxt(classification_results_dir + '/' + subID + '_' + time_string + '_channel_weights.txt', weights_final, fmt='%f')\n\n # save all decoding scores, subject index, and time points\n subs = np.repeat(subID, scores_final.size)\n sub_all = np.append(sub_all, subs, axis=0)\n scores_all = np.append(scores_all, scores_final, axis=0)\n timePoints_all = np.append(timePoints_all, time_points_resample_all[subID], axis=0)\n # calculate averaged weights across time span for each channel\n weights_mean = np.mean(abs(weights_final), axis=0)\n weights_mean = weights_mean[None, :]\n weights_all[subID] = weights_mean\n\n\n# statistical analysis\nfrom scipy import stats\nfrom mne.stats import (fdr_correction)\n\n# change data into dataframe\ndata = {'subject': sub_all, 'time': timePoints_all, 'scores': scores_all}\ndf = pd.DataFrame(data=data)\n\n# one-sample t-test with chance level (0.125)\nt_all = np.zeros(n_times_resample)\np_all = np.zeros(n_times_resample)\nsig_all = np.zeros(n_times_resample)\nfor i in range(n_times_resample):\n current_time = time_points_resample[i]\n current_scores = df[df['time'].isin([current_time])]\n\n t, p_twoTail = stats.ttest_1samp(current_scores['scores'], 0.125)\n p_FDR = fdr_correction(p_twoTail)[1]\n\n if p_FDR <= .05:\n sig = 1\n else:\n sig = 0\n\n t_all[i] = t\n p_all[i] = p_FDR\n sig_all[i] = sig\n\n# record significant time points for plot\nx_sig = timePoints_all[np.nonzero(sig_all)]\ny_sig = np.repeat(0.45, len(x_sig))\n# s_sig = t_all[np.nonzero(sig_all)]\n\n# plot subject-wise results\nfig, ax = plt.subplots()\nsns.lineplot(data=df, x='time', y='scores', ci=95, n_boot=1000, err_style='band')\nsns.scatterplot(x=x_sig, y=y_sig, c=['r'], s=20, marker='s')\nax.axhline(.125, color='k', linestyle='-')\nax.set_xlabel('Times')\nax.set_ylabel('Accuracy')\nax.set_title('temporal decoding for %d subjects_eight_posBin' % sub_num)\nax.text(0.8, 0.4, 'p < .05, 95% CI', fontsize=12)\nax.text(0.8, 0.36, 'FDR correction', fontsize=12)\n# add events\nax.axvline(.0, color='k', linestyle='--', label='stimuli onset')\nax.axvline(.1, color='b', linestyle='--', label='stimuli offset')\nax.axvline(x_sig[0], ymax=0.95, color='r', linestyle='--', label='First significant point')\nax.legend(loc='best', fontsize='xx-small')\n\nplt.savefig('decoding.png')" ]
[ [ "numpy.array", "numpy.empty", "numpy.savetxt", "numpy.zeros", "sklearn.preprocessing.StandardScaler", "pandas.DataFrame", "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplots", "sklearn.svm.SVC", "numpy.mean", "scipy.stats.ttest_1samp", "numpy.nonzero", "sklearn.metrics.accuracy_score", "numpy.append", "numpy.repeat", "sklearn.model_selection.StratifiedShuffleSplit", "numpy.linspace", "numpy.average" ] ]
MSchnei/MRI_segmentation_preparation_scripts
[ "02f65b584e09908247202fff57714b63ef44e7dd" ]
[ "plotting/01u_plot_learning.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Compare classic, dense, denseExt U-Net at dr 0.05 with unweighted loss.\"\"\"\n\nimport os\nimport numpy as np\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom load_tfevents import func_load_event\n\n# %% Set input parameters\n\n# Set path to log directtory\nstr_log_path = '/media/sf_D_DRIVE/Unet/MPRAGEsingle/results/'\n\n# List project names\nlst_prj = ['project50_32strides_maxpool_tranposed_classic_dr_0p05_weighted',\n 'project51_32strides_maxpool_tranposed_dense_dr_0p05_weighted',\n 'project52_32strides_maxpool_tranposed_denseExt_dr_0p05_weighted']\n\n# list project names for plotting\nlst_names = ['_classic_weighted',\n '_dense_weighted',\n '_denseExt_weighted']\n\n# Set subfolder to training logs\nlst_evnt_trn = ['events.out.tfevents.1576583873.bi-node1.bi.31630.2779.v2',\n 'events.out.tfevents.1578004967.bi-node1.bi.9960.7922.v2',\n 'events.out.tfevents.1578040711.bi-node1.bi.10803.11288.v2']\n\n# Set subfolder to validation logs\nlst_evnt_val = ['events.out.tfevents.1576584907.bi-node1.bi.31630.33672.v2',\n 'events.out.tfevents.1578005918.bi-node1.bi.9960.64050.v2',\n 'events.out.tfevents.1578043231.bi-node1.bi.10803.83834.v2']\n\n# Set color\nlst_colors = ['#1b9e77', '#1b9e77',\n '#d95f02', '#d95f02',\n '#7570b3', '#7570b3']\n\n# Set dashes\nlst_dashes = [(''), (2, 2), (''), (2, 2), (''), (2, 2)]\n\n# define size guidance for loading data\ntf_size_guidance = {\n 'compressedHistograms': 10,\n 'images': 0,\n 'scalars': 100,\n 'histograms': 1}\n\n# Initialize lists for collecting the results\nlst_trn_lss = [None] * len(lst_evnt_trn)\nlst_val_lss = [None] * len(lst_evnt_trn)\nlst_trn_acc = [None] * len(lst_evnt_trn)\nlst_val_acc = [None] * len(lst_evnt_trn)\n\n# %% Load data\n\nfor ind in range(len(lst_evnt_trn)):\n # Derive paths to training and validation data\n str_evnt_trn = os.path.join(str_log_path, lst_prj[ind], 'logs', 'train',\n lst_evnt_trn[ind])\n str_evnt_val = os.path.join(str_log_path, lst_prj[ind], 'logs',\n 'validation', lst_evnt_val[ind])\n\n # Load training and validation loss\n lst_trn_lss[ind] = func_load_event(str_evnt_trn,\n tf_size_guidance=tf_size_guidance,\n name_scalar='epoch_loss')\n lst_val_lss[ind] = func_load_event(str_evnt_val,\n tf_size_guidance=tf_size_guidance,\n name_scalar='epoch_loss')\n # Load training and validation accuracy\n lst_trn_acc[ind] = func_load_event(str_evnt_trn,\n tf_size_guidance=tf_size_guidance,\n name_scalar='epoch_sparse_categorical_accuracy')\n lst_val_acc[ind] = func_load_event(str_evnt_val,\n tf_size_guidance=tf_size_guidance,\n name_scalar='epoch_sparse_categorical_accuracy')\n\n# %% Plot\n\n# increase font size\nsns.set(font_scale=2)\n# get number of epochs\nvar_num_steps = len(lst_trn_lss[0])\nary_epochs = np.arange(var_num_steps)\n# initialize data frames\ndf_loss = pd.DataFrame(index=ary_epochs)\ndf_acc = pd.DataFrame(index=ary_epochs)\n\nfor ind in range(len(lst_evnt_trn)):\n\n # add data to pandas loss data frame\n df_loss['trn_loss'+lst_names[ind]] = lst_trn_lss[ind]\n df_loss['val_loss'+lst_names[ind]] = lst_val_lss[ind]\n # add data to pandas loss data frame\n df_acc['trn_acc'+lst_names[ind]] = lst_trn_acc[ind]\n df_acc['val_acc'+lst_names[ind]] = lst_val_acc[ind]\n\n# plot losses\nfig, ax = plt.subplots()\nfig.set_size_inches(17.5, 12.5)\nsns.lineplot(data=df_loss, palette=lst_colors, dashes=lst_dashes,\n linewidth=2.5)\nplt.xlabel(\"Number of Epochs\")\nplt.ylabel(\"Loss\")\nfig.savefig(\"/media/sf_D_DRIVE/Unet/presentation/results/plots/loss_model_weighted.svg\")\nfig.savefig(\"/media/sf_D_DRIVE/Unet/presentation/results/plots/loss_model_weighted.png\")\n\n# plot accuracies\nfig, ax = plt.subplots()\nfig.set_size_inches(17.5, 12.5)\nsns.lineplot(data=df_acc, palette=lst_colors, dashes=lst_dashes,\n linewidth=2.5)\nplt.xlabel(\"Number of Epochs\")\nplt.ylabel(\"Accuracy\")\n\nfig.savefig(\"/media/sf_D_DRIVE/Unet/presentation/results/plots/accuracy_model_weighted.svg\")\nfig.savefig(\"/media/sf_D_DRIVE/Unet/presentation/results/plots/accuracy_model_weighted.png\")\n" ]
[ [ "pandas.DataFrame", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.ylabel" ] ]
garrettkatz/directional-fibers
[ "3cd5262d80f684184ef12e273ad4bd3c3ce60b82" ]
[ "readme.py" ]
[ "# Code from README.md\n\nimport numpy as np\nN = 2\nW = 1.2*np.eye(N) + 0.1*np.random.randn(N,N)\nf = lambda v: np.tanh(W.dot(v)) - v\n\nI = np.eye(W.shape[0])\ndef Df(V):\n D = 1-np.tanh(W.dot(V))**2\n return D.T[:,:,np.newaxis]*W[np.newaxis,:,:] - I[np.newaxis,:,:]\n\nef = lambda v: 10**-10\n\nimport dfibers.traversal as tv\n# help(tv.FiberTrace)\n\nterminate = lambda trace: (np.fabs(trace.x) > 10**6).any()\n\ncompute_step_amount = lambda trace: (10**-3, None, False)\n\nimport dfibers.solvers as sv\n# help(tv.traverse_fiber)\n# help(sv.fiber_solver)\nsolution = sv.fiber_solver(\n f,\n ef,\n Df,\n compute_step_amount,\n v = np.zeros((N,1)),\n terminate=terminate,\n max_traverse_steps=10**3,\n max_solve_iterations=2**5,\n )\n\nduplicates = lambda U, v: (np.fabs(U - v) < 2**-21).all(axis=0)\n\nimport dfibers.fixed_points as fx\nV = solution[\"Fixed points\"]\nV = fx.sanitize_points(V, f, ef, Df, duplicates)\n\nprint(\"Fixed points:\")\nprint(V)\nprint(\"Fixed point residuals:\")\nprint(f(V))\nassert((f(V) < ef(V)).all())\n" ]
[ [ "numpy.fabs", "numpy.random.randn", "numpy.zeros", "numpy.eye" ] ]
willpatera/cvd_pupillometry
[ "fd015d9221112dfa43a3b512b51324733d5b73b0" ]
[ "build/lib/pyplr/preproc.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\npyplr.preproc\n=============\n\nScripting tools for preprocessing pupil data.\n\n@author: jtm\n\n'''\n\nimport numpy as np\nimport scipy.signal as signal\n\n\ndef even_samples(samples, sample_rate, fields=['diameter'], zero_index=False):\n '''Resample data in `fields` to a new index with evenly spaced timepoints.\n\n Pupil Core data samples are unevenly spaced. \n \n Parameters\n ----------\n samples : pandas.DataFrame\n The samples.\n sample_rate : int\n Sampling rate of the data.\n fields : list, optional\n The columns to interpolate to new index. The default is ['diameter'].\n\n Returns\n -------\n samps : pandas.DataFrame\n DataFrame with evenly spaced index and interpolated data.\n\n '''\n # TODO: When is the best time to do this?\n samps = samples.copy(deep=True)\n x = samps.index.to_numpy()\n if zero_index:\n x = x - x[0]\n xnew = np.linspace(x.min(), x.max(), len(x))\n for f in fields:\n y = samps[f].to_numpy()\n samps[f] = np.interp(xnew, x, y)\n samps.index = xnew\n return samps\n\ndef even_range_samples(rangs, sample_rate, fields=[]):\n for idx, df in rangs.groupby(level=['event']):\n for f in fields:\n x = df.orig_idx.to_numpy()\n x = x - x[0]\n xnew = np.linspace(x.min(), x.max(), len(x))\n y = df.loc[idx, f]\n rangs.loc[idx, f] = np.interp(xnew, x, y)\n rangs.loc[idx, 'even_idx'] = xnew\n return rangs\n\ndef mask_pupil_first_derivative(samples, \n threshold=3.0,\n mask_cols=['diameter']):\n '''Apply a masking threshold on the first derivative of pupil data.\n \n Use a statistical criterion on the first derivative of pupil data to mask \n poor quality data. Helpful for dealing with blinks. \n\n Parameters\n ----------\n samples : pandas.DataFrame\n Samples containing the data to be masked.\n threshold : float, optional\n Number of standard deviations from the mean of the first derivative \n to use as the threshold for masking. The default is 3.0.\n mask_cols : list, optional\n Columns to mask. The default is ``['diameter']``.\n\n Returns\n -------\n samps : pandas.DataFrame\n masked data\n\n '''\n samps = samples.copy(deep=True)\n for col in mask_cols:\n d = samples[col].diff()\n m = samples[col].diff().mean()\n s = samples[col].diff().std() * threshold\n #TODO: check this works properly\n samps[col] = samps[col].mask((d < (m-s)) | (d > (m+s)))\n samps[samps[col] == 0] = np.nan\n return samps\n\ndef mask_pupil_confidence(samples, threshold=.8, mask_cols=['diameter']):\n '''\n Sets data in mask_cols to NaN where the corresponding confidence metric is\n below threshold. Pupil Labs reccommend a threshold of 0.8. Helpful for\n dealing with blinks. \n\n Parameters\n ----------\n samples : pandas.DataFrame\n Samples containing the data to be masked.\n threshold : float, optional\n Confidence threshold for masking. The default is 0.8.\n mask_cols : list, optional\n Columns to mask. The default is ['diameter'].\n\n Returns\n -------\n samps : pandas.DataFrame\n masked data\n\n '''\n samps = samples.copy(deep=True)\n samps[mask_cols] = samps[mask_cols].mask(samps.confidence < threshold)\n return samps\n \ndef pupil_confidence_filter(samples, threshold=.8, mask_cols=['diameter']):\n '''\n Sets data in mask_cols to NaN where the corresponding confidence metric is\n below threshold. Pupil Labs reccommend a threshold of .8. An alterntive\n to interpolating blinks. \n\n Parameters\n ----------\n samples : pandas.DataFrame\n The samples from which to pull indices.\n threshold : float, optional\n Threshold to use for filtering by confidence. The default is .8.\n mask_cols : list, optional\n Columns to mask. The default is ['diameter'].\n\n Returns\n -------\n samps : pandas.DataFrame\n Masked data.\n \n '''\n \n samps = samples.copy(deep=True)\n indices = samples[samples.confidence<threshold].index\n samps.loc[indices, mask_cols] = float('nan')\n samps['interpolated'] = 0\n samps.loc[indices, 'interpolated'] = 1\n return samps\n\ndef interpolate_pupil(samples, interp_cols=['diameter'], \n method='linear', order=None):\n '''\n Use linear interpolation to reconstruct nan values in interp_cols.\n\n Parameters\n ----------\n samples : pandas.DataFrame\n Samples containing the data to be interpolated.\n interp_cols : list, optional\n Columns to interpolate. The default is ['diameter'].\n method : string\n Linear or polynomial. If polynomial, requires 'order' to be specified.\n\n Returns\n -------\n samps : pandas.DataFrame\n Masked data\n\n '''\n if method=='polynomial' and not order:\n raise ValueError('Must specify order for polynomial')\n samps = samples.copy(deep=True)\n samps['interpolated'] = 0\n samps.loc[samps[interp_cols].isna().any(axis=1), 'interpolated'] = 1\n samps[interp_cols] = samps[interp_cols].interpolate(\n method=method, order=order, axis=0, inplace=False)\n return samps\n\ndef percent_signal_change(ranges, baselines, pupil_cols=[]):\n '''Add new columns expressing pupil_cols as percent change from baseline.\n \n Parameters\n ----------\n ranges : pandas.DataFrame\n DataFrame with MultiIndex. Usually the output from ``utils.extract(...)``\n baselines : TYPE\n DESCRIPTION.\n pupil_cols : TYPE, optional\n DESCRIPTION. The default is [].\n\n Returns\n -------\n ranges : pandas.DataFrame\n Original DataFrame with new columns for percent-signal change.\n\n '''\n for col in pupil_cols:\n new_col = col + '_pc'\n ranges[new_col] = (ranges[col] / baselines[col] - 1).values * 100\n return ranges\n \n \ndef ev_row_idxs(samples, blinks):\n ''' \n Returns the indices in 'samples' contained in events from 'events'.\n \n Parameters\n ----------\n samples : pandas.DataFrame\n The samples from which to pull indices.\n events : pandas.DataFrame\n The events whose indices should be pulled from 'samples'.\n \n Returns\n -------\n samps : pandas.DataFrame\n Masked data\n \n '''\n idxs = []\n for start, end in zip(blinks['start_timestamp'], blinks['end_timestamp']):\n idxs.extend(list(samples.loc[start:end].index))\n idxs = np.unique(idxs)\n idxs = np.intersect1d(idxs, samples.index.tolist())\n return idxs\n\ndef get_mask_idxs(samples, blinks):\n '''\n Finds indices from 'samples' within the returned events.\n \n '''\n blidxs = ev_row_idxs(samples, blinks)\n return blidxs\n\ndef mask_blinks(samples, blinks, mask_cols=['diameter']):\n '''\n Sets untrustworthy pupil data to NaN.\n \n Parameters\n ----------\n samples : pandas.DataFrame\n Must contain at least 'pupil_timestamp' and 'diameter' columns\n blinks : pandas.DataFrame\n Must contain 'start_timestamp' and 'end_timestamp' columns\n mask_cols : list, optional\n Columns to mask. The default is ['diameter'].\n Returns\n -------\n samps : pandas.DataFrame\n Masked data\n \n '''\n samps = samples.copy(deep=True)\n indices = get_mask_idxs(samps, blinks)\n samps.loc[indices, mask_cols] = float('nan')\n samps['interpolated'] = 0\n samps.loc[indices, 'interpolated'] = 1\n return samps\n\ndef interpolate_blinks(samples, blinks, fields=['diameter']):\n '''Reconstructs Pupil Labs eye blinks with linear interpolation.\n \n Parameters\n ----------\n samples : pandas.DataFrame\n Must contain at least 'pupil_timestamp' and 'diameter' columns\n blinks : pandas.DataFrame\n Must contain 'start_timestamp' and 'end_timestamp' columns\n interp_cols : list, optional\n Columns to interpolate. The default is ['diameter'].\n \n Returns\n -------\n samps : pandas.DataFrame\n Blink-interpolated data\n \n '''\n #TODO: fix this pipeline\n samps = mask_blinks(samples, blinks, mask_cols=fields)\n n = samps[fields].isna().sum().max()\n samps = samps.interpolate(method='linear', axis=0, inplace=False)\n #breakpoint()\n print('{} samples ({:.3f} %) reconstructed with linear interpolation'.format(\n len(samps.loc[samps['interpolated']==1]), n))\n return samps\n\ndef mask_zeros(samples, mask_cols=['diameter']):\n '''Sets any 0 values in columns in mask_cols to NaN.\n \n Parameters\n ----------\n samples : pandas.DataFrame\n The samples to search for 0 values.\n mask_fields (list of strings)\n The columns to search for 0 values.\n \n '''\n samps = samples.copy(deep=True)\n for f in mask_cols:\n samps[samps[f] == 0] = float('nan')\n return samps\n\ndef interpolate_zeros(samples, fields=['diameter']):\n '''Replace 0s in \"samples\" with linearly interpolated data.\n \n Parameters\n ----------\n samples : pandas.DataFrame\n The samples in which you'd like to replace 0s\n interp_cols : list\n The column names from samples in which you'd like to replace 0s.\n \n '''\n samps = mask_zeros(samples, mask_cols=fields)\n samps = samps.interpolate(method='linear', axis=0, inplace=False)\n # since interpolate doesn't handle the start/finish, bfill the ffill to\n # take care of NaN's at the start/finish samps.\n samps.fillna(method='bfill', inplace=True)\n samps.fillna(method='ffill', inplace=True)\n return samps \n\ndef butterworth_series(samples,\n fields=['diameter'], \n filt_order=3,\n cutoff_freq=.01,\n inplace=False):\n '''Applies a Butterworth filter to the given fields. \n\n Parameters\n ----------\n samples : `pandas.DataFrame`\n DataFrame of samples containing the pupil data.\n fields : list, optional\n List of columns to be filtered. The default is ['diameter'].\n filt_order : int, optional\n Order of the filter. The default is 3.\n cutoff_freq : float\n Normalised cut-off frequency in hertz. For 4 Hz cut-off, this should \n 4/(sample_rate/2). The default is .01.\n inplace : bool, optional\n Whether to modify `samples` in place. The default is False.\n\n Returns\n -------\n samps : \n The samples.\n\n '''\n samps = samples if inplace else samples.copy(deep=True)\n B, A = signal.butter(filt_order, cutoff_freq, output='BA')\n samps[fields] = samps[fields].apply(\n lambda x: signal.filtfilt(B, A, x), axis=0)\n return samps\n\ndef rolling_mean_series(samples,\n window_size,\n fields=['diameter'],\n inplace=False):\n samps = samples if inplace else samples.copy(deep=True)\n for f in fields:\n samps[f] = samps[f].rolling(window_size).mean()\n return samps\n\n \n \ndef savgol_series(samples, \n fields=['diameter'], \n window_length=51, \n filt_order=7,\n inplace=False): \n '''\n Applies a savitsky-golay filter to the given fields\n See documentation on scipys savgol_filter method FMI.\n '''\n samps = samples if inplace else samples.copy(deep=True)\n samps[fields] = samps[fields].apply(\n lambda x: signal.savgol_filter(x, window_length, filt_order), axis=0)\n return samps\n" ]
[ [ "scipy.signal.savgol_filter", "scipy.signal.butter", "numpy.interp", "scipy.signal.filtfilt", "numpy.unique" ] ]
ndcuong91/mmocr
[ "46e6faad9bf268af2d8e68ce279fcb328269c504" ]
[ "mmocr/datasets/kie_dataset.py" ]
[ "import copy\nfrom os import path as osp\n\nimport numpy as np\nimport torch\n\nimport mmocr.utils as utils\nfrom mmdet.datasets.builder import DATASETS\nfrom mmocr.core import compute_f1_score\nfrom mmocr.datasets.base_dataset import BaseDataset\nfrom mmocr.datasets.pipelines.crop import sort_vertex\n\n\[email protected]_module()\nclass KIEDataset(BaseDataset):\n \"\"\"\n Args:\n ann_file (str): Annotation file path.\n pipeline (list[dict]): Processing pipeline.\n loader (dict): Dictionary to construct loader\n to load annotation infos.\n img_prefix (str, optional): Image prefix to generate full\n image path.\n test_mode (bool, optional): If True, try...except will\n be turned off in __getitem__.\n dict_file (str): Character dict file path.\n norm (float): Norm to map value from one range to another.\n \"\"\"\n\n def __init__(self,\n ann_file,\n loader,\n dict_file,\n img_prefix='',\n pipeline=None,\n norm=10.,\n directed=False,\n test_mode=True,\n **kwargs):\n super().__init__(\n ann_file,\n loader,\n pipeline,\n img_prefix=img_prefix,\n test_mode=test_mode)\n assert osp.exists(dict_file)\n\n self.norm = norm\n self.directed = directed\n\n self.dict = dict({'': 0})\n with open(dict_file, 'r') as fr:\n idx = 1\n for line in fr:\n char = line.strip()\n self.dict[char] = idx\n idx += 1\n\n def pre_pipeline(self, results):\n results['img_prefix'] = self.img_prefix\n results['bbox_fields'] = []\n\n def _parse_anno_info(self, annotations):\n \"\"\"Parse annotations of boxes, texts and labels for one image.\n Args:\n annotations (list[dict]): Annotations of one image, where\n each dict is for one character.\n\n Returns:\n dict: A dict containing the following keys:\n\n - bboxes (np.ndarray): Bbox in one image with shape:\n box_num * 4.\n - relations (np.ndarray): Relations between bbox with shape:\n box_num * box_num * D.\n - texts (np.ndarray): Text index with shape:\n box_num * text_max_len.\n - labels (np.ndarray): Box Labels with shape:\n box_num * (box_num + 1).\n \"\"\"\n\n assert utils.is_type_list(annotations, dict)\n assert 'box' in annotations[0]\n assert 'text' in annotations[0]\n assert 'label' in annotations[0]\n\n boxes, texts, text_inds, labels, edges = [], [], [], [], []\n for ann in annotations:\n box = ann['box']\n x_list, y_list = box[0:8:2], box[1:9:2]\n sorted_x_list, sorted_y_list = sort_vertex(x_list, y_list)\n sorted_box = []\n for x, y in zip(sorted_x_list, sorted_y_list):\n sorted_box.append(x)\n sorted_box.append(y)\n boxes.append(sorted_box)\n text = ann['text']\n texts.append(ann['text'])\n text_ind = [self.dict[c] for c in text if c in self.dict]\n text_inds.append(text_ind)\n labels.append(ann['label'])\n edges.append(ann.get('edge', 0))\n\n ann_infos = dict(\n boxes=boxes,\n texts=texts,\n text_inds=text_inds,\n edges=edges,\n labels=labels)\n\n return self.list_to_numpy(ann_infos)\n\n def prepare_train_img(self, index):\n \"\"\"Get training data and annotations from pipeline.\n\n Args:\n index (int): Index of data.\n\n Returns:\n dict: Training data and annotation after pipeline with new keys\n introduced by pipeline.\n \"\"\"\n img_ann_info = self.data_infos[index]\n img_info = {\n 'filename': img_ann_info['file_name'],\n 'height': img_ann_info['height'],\n 'width': img_ann_info['width']\n }\n ann_info = self._parse_anno_info(img_ann_info['annotations'])\n results = dict(img_info=img_info, ann_info=ann_info)\n\n self.pre_pipeline(results)\n\n return self.pipeline(results)\n\n def evaluate(self,\n results,\n metric='macro_f1',\n metric_options=dict(macro_f1=dict(ignores=[])),\n **kwargs):\n # allow some kwargs to pass through\n # assert set(kwargs).issubset(['logger'])\n\n # Protect ``metric_options`` since it uses mutable value as default\n metric_options = copy.deepcopy(metric_options)\n\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['macro_f1']\n for m in metrics:\n if m not in allowed_metrics:\n raise KeyError(f'metric {m} is not supported')\n\n return self.compute_macro_f1(results, **metric_options['macro_f1'])\n\n def compute_macro_f1(self, results, ignores=[]):\n node_preds = []\n node_gts = []\n for idx, result in enumerate(results):\n node_preds.append(result['nodes'])\n box_ann_infos = self.data_infos[idx]['annotations']\n node_gt = [box_ann_info['label'] for box_ann_info in box_ann_infos]\n node_gts.append(torch.Tensor(node_gt))\n\n node_preds = torch.cat(node_preds)\n node_gts = torch.cat(node_gts).int().to(node_preds.device)\n\n node_f1s = compute_f1_score(node_preds, node_gts, ignores)\n\n return {\n 'macro_f1': node_f1s.mean(),\n }\n\n def list_to_numpy(self, ann_infos):\n \"\"\"Convert bboxes, relations, texts and labels to ndarray.\"\"\"\n boxes, text_inds = ann_infos['boxes'], ann_infos['text_inds']\n boxes = np.array(boxes, np.int32)\n relations, bboxes = self.compute_relation(boxes)\n\n labels = ann_infos.get('labels', None)\n if labels is not None:\n labels = np.array(labels, np.int32)\n edges = ann_infos.get('edges', None)\n if edges is not None:\n labels = labels[:, None]\n edges = np.array(edges)\n edges = (edges[:, None] == edges[None, :]).astype(np.int32)\n if self.directed:\n edges = (edges & labels == 1).astype(np.int32)\n np.fill_diagonal(edges, -1)\n labels = np.concatenate([labels, edges], -1)\n padded_text_inds = self.pad_text_indices(text_inds)\n\n return dict(\n bboxes=bboxes,\n relations=relations,\n texts=padded_text_inds,\n labels=labels)\n\n def pad_text_indices(self, text_inds):\n \"\"\"Pad text index to same length.\"\"\"\n max_len = max([len(text_ind) for text_ind in text_inds])\n padded_text_inds = -np.ones((len(text_inds), max_len), np.int32)\n for idx, text_ind in enumerate(text_inds):\n padded_text_inds[idx, :len(text_ind)] = np.array(text_ind)\n return padded_text_inds\n\n def compute_relation(self, boxes):\n \"\"\"Compute relation between every two boxes.\"\"\"\n x1s, y1s = boxes[:, 0:1], boxes[:, 1:2]\n x2s, y2s = boxes[:, 4:5], boxes[:, 5:6]\n ws, hs = x2s - x1s + 1, np.maximum(y2s - y1s + 1, 1)\n dxs = (x1s[:, 0][None] - x1s) / self.norm\n dys = (y1s[:, 0][None] - y1s) / self.norm\n xhhs, xwhs = hs[:, 0][None] / hs, ws[:, 0][None] / hs\n whs = ws / hs + np.zeros_like(xhhs)\n relations = np.stack([dxs, dys, whs, xhhs, xwhs], -1)\n bboxes = np.concatenate([x1s, y1s, x2s, y2s], -1).astype(np.float32)\n return relations, bboxes\n" ]
[ [ "numpy.concatenate", "numpy.zeros_like", "numpy.array", "torch.cat", "numpy.fill_diagonal", "numpy.stack", "torch.Tensor", "numpy.maximum" ] ]
LuisRondoCuevas/schainpy
[ "ef41efe03993a6ae56e587334a1bfc529fccc2df" ]
[ "schainpy/model/io/jroIO_hf.py" ]
[ "'''\nCreated on Jul 3, 2014\n\n@author: roj-com0419\n'''\n\nimport os,sys\nimport time,datetime\nimport h5py\nimport numpy\nimport fnmatch\nimport re\n\nfrom schainpy.model.data.jroheaderIO import RadarControllerHeader, SystemHeader\nfrom schainpy.model.data.jrodata import Voltage\nfrom schainpy.model.proc.jroproc_base import ProcessingUnit, Operation\n\n\ndef isNumber(str):\n \"\"\"\n Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero.\n\n Excepciones:\n Si un determinado string no puede ser convertido a numero\n Input:\n str, string al cual se le analiza para determinar si convertible a un numero o no\n\n Return:\n True : si el string es uno numerico\n False : no es un string numerico\n \"\"\"\n try:\n float( str )\n return True\n except:\n return False\n\ndef getFileFromSet(path, ext, set=None):\n validFilelist = []\n fileList = os.listdir(path)\n\n\n if len(fileList) < 1:\n return None\n\n # 0 1234 567 89A BCDE\n # H YYYY DDD SSS .ext\n\n for thisFile in fileList:\n try:\n number= int(thisFile[6:16])\n\n # year = int(thisFile[1:5])\n # doy = int(thisFile[5:8])\n except:\n continue\n\n if (os.path.splitext(thisFile)[-1].lower() != ext.lower()):\n continue\n\n validFilelist.append(thisFile)\n\n if len(validFilelist) < 1:\n return None\n\n validFilelist = sorted( validFilelist, key=str.lower )\n\n if set == None:\n return validFilelist[-1]\n\n print(\"set =\" ,set)\n for thisFile in validFilelist:\n if set <= int(thisFile[6:16]):\n print(thisFile,int(thisFile[6:16]))\n return thisFile\n\n return validFilelist[-1]\n\n myfile = fnmatch.filter(validFilelist,'*%10d*'%(set))\n #myfile = fnmatch.filter(validFilelist,'*%4.4d%3.3d%3.3d*'%(year,doy,set))\n\n if len(myfile)!= 0:\n return myfile[0]\n else:\n filename = '*%10.10d%s'%(set,ext.lower())\n print('the filename %s does not exist'%filename)\n print('...going to the last file: ')\n\n if validFilelist:\n validFilelist = sorted( validFilelist, key=str.lower )\n return validFilelist[-1]\n\n return None\n\ndef getlastFileFromPath(path, ext):\n \"\"\"\nDepura el fileList dejando solo los que cumplan el formato de \"res-xxxxxx.ext\"\n al final de la depuracion devuelve el ultimo file de la lista que quedo.\n\n Input:\n fileList : lista conteniendo todos los files (sin path) que componen una determinada carpeta\n ext : extension de los files contenidos en una carpeta\n\n Return:\n El ultimo file de una determinada carpeta, no se considera el path.\n \"\"\"\n validFilelist = []\n fileList = os.listdir(path)\n\n # 0 1234 567 89A BCDE\n # H YYYY DDD SSS .ext\n\n for thisFile in fileList:\n\n try:\n number= int(thisFile[6:16])\n except:\n print(\"There is a file or folder with different format\")\n if not isNumber(number):\n continue\n\n# year = thisFile[1:5]\n# if not isNumber(year):\n# continue\n\n# doy = thisFile[5:8]\n# if not isNumber(doy):\n# continue\n\n number= int(number)\n# year = int(year)\n# doy = int(doy)\n\n if (os.path.splitext(thisFile)[-1].lower() != ext.lower()):\n continue\n\n\n validFilelist.append(thisFile)\n\n\n if validFilelist:\n validFilelist = sorted( validFilelist, key=str.lower )\n return validFilelist[-1]\n\n return None\n\n\n\nclass HFReader(ProcessingUnit):\n '''\n classdocs\n '''\n path = None\n startDate= None\n endDate = None\n startTime= None\n endTime = None\n walk = None\n isConfig = False\n dataOut=None\n nTries = 3\n ext = \".hdf5\"\n\n def __init__(self, **kwargs):\n '''\n Constructor\n '''\n ProcessingUnit.__init__(self, **kwargs)\n\n self.isConfig =False\n\n self.datablock = None\n\n self.filename_current=None\n\n self.utc = 0\n\n self.ext='.hdf5'\n\n self.flagIsNewFile = 1\n\n #-------------------------------------------------\n self.fileIndex=None\n\n self.profileIndex_offset=None\n\n self.filenameList=[]\n\n self.hfFilePointer= None\n\n self.filename_online = None\n\n self.status=True\n\n self.flagNoMoreFiles= False\n\n self.__waitForNewFile = 20\n\n\n #--------------------------------------------------\n\n self.dataOut = self.createObjByDefault()\n\n\n def createObjByDefault(self):\n\n dataObj = Voltage()\n\n return dataObj\n\n def setObjProperties(self):\n\n pass\n\n def getBlockDimension(self):\n \"\"\"\n Obtiene la cantidad de puntos a leer por cada bloque de datos\n\n Affected:\n self.blocksize\n\n Return:\n None\n \"\"\"\n pts2read =self.nChannels*self.nHeights*self.nProfiles\n self.blocksize = pts2read\n\n def __readHeader(self):\n\n self.nProfiles = 100\n self.nHeights = 1000\n self.nChannels = 2\n self.__firstHeigth=0\n self.__nSamples=1000\n self.__deltaHeigth=1.5\n self.__sample_rate=1e5\n #self.__frequency=2.72e6\n #self.__frequency=3.64e6\n self.__frequency=None\n self.__online = False\n self.filename_next_set=None\n\n #print \"Frequency of Operation:\", self.__frequency\n\n\n def __setParameters(self,path='', startDate='',endDate='',startTime='', endTime='', walk=''):\n self.path = path\n self.startDate = startDate\n self.endDate = endDate\n self.startTime = startTime\n self.endTime = endTime\n self.walk = walk\n\n def __checkPath(self):\n if os.path.exists(self.path):\n self.status=1\n else:\n self.status=0\n print('Path %s does not exits'%self.path)\n return\n return\n\n def __selDates(self, hf_dirname_format):\n try:\n dir_hf_filename= self.path+\"/\"+hf_dirname_format\n fp= h5py.File(dir_hf_filename,'r')\n hipoc=fp['t'].value\n fp.close()\n date_time=datetime.datetime.utcfromtimestamp(hipoc)\n year =int(date_time[0:4])\n month=int(date_time[5:7])\n dom =int(date_time[8:10])\n thisDate= datetime.date(year,month,dom)\n if (thisDate>=self.startDate and thisDate <= self.endDate):\n return hf_dirname_format\n except:\n return None\n\n def __findDataForDates(self,online=False):\n if not(self.status):\n return None\n\n pat = '\\d+.\\d+'\n dirnameList = [re.search(pat,x) for x in os.listdir(self.path)]\n dirnameList = [x for x in dirnameList if x!=None]\n dirnameList = [x.string for x in dirnameList]\n if not(online):\n\n dirnameList = [self.__selDates(x) for x in dirnameList]\n dirnameList = [x for x in dirnameList if x!=None]\n\n if len(dirnameList)>0:\n self.status = 1\n self.dirnameList = dirnameList\n self.dirnameList.sort()\n\n else:\n self.status = 0\n return None\n\n def __getTimeFromData(self):\n startDateTime_Reader = datetime.datetime.combine(self.startDate,self.startTime)\n endDateTime_Reader = datetime.datetime.combine(self.endDate,self.endTime)\n print('Filtering Files from %s to %s'%(startDateTime_Reader, endDateTime_Reader))\n print('........................................')\n filter_filenameList=[]\n self.filenameList.sort()\n for i in range(len(self.filenameList)-1):\n filename=self.filenameList[i]\n dir_hf_filename= filename\n fp= h5py.File(dir_hf_filename,'r')\n hipoc=fp['t'].value\n hipoc=hipoc+self.timezone\n date_time=datetime.datetime.utcfromtimestamp(hipoc)\n fp.close()\n year =int(date_time[0:4])\n month=int(date_time[5:7])\n dom =int(date_time[8:10])\n hour =int(date_time[11:13])\n min =int(date_time[14:16])\n sec =int(date_time[17:19])\n this_time=datetime.datetime(year,month,dom,hour,min,sec)\n if (this_time>=startDateTime_Reader and this_time <= endDateTime_Reader):\n filter_filenameList.append(filename)\n filter_filenameList.sort()\n self.filenameList = filter_filenameList\n return 1\n\n def __getFilenameList(self):\n #print \"hola\"\n #print self.dirnameList\n dirList = [os.path.join(self.path,x) for x in self.dirnameList]\n self.filenameList= dirList\n #print self.filenameList\n #print \"pase\",len(self.filenameList)\n\n def __selectDataForTimes(self, online=False):\n\n if not(self.status):\n return None\n #----------------\n self.__getFilenameList()\n #----------------\n if not(online):\n if not(self.all):\n self.__getTimeFromData()\n if len(self.filenameList)>0:\n self.status=1\n self.filenameList.sort()\n else:\n self.status=0\n return None\n else:\n if self.set != None:\n\n filename=getFileFromSet(self.path,self.ext,self.set)\n\n if self.flag_nextfile==True:\n self.dirnameList=[filename]\n fullfilename=self.path+\"/\"+filename\n self.filenameList=[fullfilename]\n self.filename_next_set=int(filename[6:16])+10\n\n self.flag_nextfile=False\n else:\n print(filename)\n print(\"PRIMERA CONDICION\")\n #if self.filename_next_set== int(filename[6:16]):\n print(\"TODO BIEN\")\n\n if filename == None:\n raise ValueError(\"corregir\")\n\n self.dirnameList=[filename]\n fullfilename=self.path+\"/\"+filename\n self.filenameList=[fullfilename]\n self.filename_next_set=int(filename[6:16])+10\n print(\"Setting next file\",self.filename_next_set)\n self.set=int(filename[6:16])\n if True:\n pass\n else:\n print(\"ESTOY AQUI PORQUE NO EXISTE EL SIGUIENTE ARCHIVO\")\n\n else:\n filename =getlastFileFromPath(self.path,self.ext)\n\n if self.flag_nextfile==True:\n self.dirnameList=[filename]\n fullfilename=self.path+\"/\"+filename\n self.filenameList=[self.filenameList[-1]]\n self.filename_next_set=int(filename[6:16])+10\n\n self.flag_nextfile=False\n else:\n filename=getFileFromSet(self.path,self.ext,self.set)\n print(filename)\n print(\"PRIMERA CONDICION\")\n #if self.filename_next_set== int(filename[6:16]):\n print(\"TODO BIEN\")\n\n if filename == None:\n raise ValueError(\"corregir\")\n\n self.dirnameList=[filename]\n fullfilename=self.path+\"/\"+filename\n self.filenameList=[fullfilename]\n self.filename_next_set=int(filename[6:16])+10\n print(\"Setting next file\",self.filename_next_set)\n self.set=int(filename[6:16])\n if True:\n pass\n else:\n print(\"ESTOY AQUI PORQUE NO EXISTE EL SIGUIENTE ARCHIVO\")\n\n\n\n def searchFilesOffLine(self,\n path,\n startDate,\n endDate,\n ext,\n startTime=datetime.time(0,0,0),\n endTime=datetime.time(23,59,59),\n walk=True):\n\n self.__setParameters(path, startDate, endDate, startTime, endTime, walk)\n\n self.__checkPath()\n\n self.__findDataForDates()\n #print self.dirnameList\n\n self.__selectDataForTimes()\n\n for i in range(len(self.filenameList)):\n print(\"%s\"% (self.filenameList[i]))\n\n return\n\n def searchFilesOnLine(self,\n path,\n expLabel= \"\",\n ext=None,\n startDate=None,\n endDate=None,\n walk=True,\n set=None):\n\n\n startDate = datetime.datetime.utcnow().date()\n endDate = datetime.datetime.utcnow().date()\n\n self.__setParameters(path=path,startDate=startDate,endDate=endDate,walk=walk)\n\n self.__checkPath()\n\n fullpath=path\n print(\"%s folder was found: \" %(fullpath ))\n\n if set == None:\n self.set=None\n filename =getlastFileFromPath(fullpath,ext)\n startDate= datetime.datetime.utcnow().date\n endDate= datetime.datetime.utcnow().date()\n#\n else:\n filename= getFileFromSet(fullpath,ext,set)\n startDate=None\n endDate=None\n#\n if not (filename):\n return None,None,None,None,None\n #print \"%s file was found\" %(filename)\n\n#\n# dir_hf_filename= self.path+\"/\"+filename\n# fp= h5py.File(dir_hf_filename,'r')\n# hipoc=fp['t'].value\n# fp.close()\n# date_time=datetime.datetime.utcfromtimestamp(hipoc)\n#\n# year =int(date_time[0:4])\n# month=int(date_time[5:7])\n# dom =int(date_time[8:10])\n# set= int(filename[4:10])\n# self.set=set-1\n #self.dirnameList=[filename]\n filenameList= fullpath+\"/\"+filename\n self.dirnameList=[filename]\n self.filenameList=[filenameList]\n self.flag_nextfile=True\n\n #self.__findDataForDates(online=True)\n #self.dirnameList=[self.dirnameList[-1]]\n #print self.dirnameList\n #self.__selectDataForTimes(online=True)\n #return fullpath,filename,year,month,dom,set\n return\n\n def __setNextFile(self,online=False):\n \"\"\"\n \"\"\"\n if not(online):\n newFile = self.__setNextFileOffline()\n else:\n newFile = self.__setNextFileOnline()\n\n if not(newFile):\n return 0\n return 1\n\n def __setNextFileOffline(self):\n \"\"\"\n \"\"\"\n idFile= self.fileIndex\n while(True):\n idFile += 1\n if not (idFile < len(self.filenameList)):\n self.flagNoMoreFiles = 1\n print(\"No more Files\")\n return 0\n filename = self.filenameList[idFile]\n hfFilePointer =h5py.File(filename,'r')\n\n epoc=hfFilePointer['t'].value\n #this_time=datetime.datetime(year,month,dom,hour,min,sec)\n break\n\n self.flagIsNewFile = 1\n self.fileIndex = idFile\n self.filename = filename\n\n self.hfFilePointer = hfFilePointer\n hfFilePointer.close()\n self.__t0=epoc\n print(\"Setting the file: %s\"%self.filename)\n\n return 1\n\n def __setNextFileOnline(self):\n \"\"\"\n \"\"\"\n print(\"SOY NONE\",self.set)\n if self.set==None:\n pass\n else:\n self.set +=10\n\n filename = self.filenameList[0]#fullfilename\n if self.filename_online != None:\n self.__selectDataForTimes(online=True)\n filename = self.filenameList[0]\n while self.filename_online == filename:\n print('waiting %d seconds to get a new file...'%(self.__waitForNewFile))\n time.sleep(self.__waitForNewFile)\n #self.__findDataForDates(online=True)\n self.set=self.filename_next_set\n self.__selectDataForTimes(online=True)\n filename = self.filenameList[0]\n sizeoffile=os.path.getsize(filename)\n\n #print filename\n sizeoffile=os.path.getsize(filename)\n if sizeoffile<1670240:\n print(\"%s is not the rigth size\"%filename)\n delay=50\n print('waiting %d seconds for delay...'%(delay))\n time.sleep(delay)\n sizeoffile=os.path.getsize(filename)\n if sizeoffile<1670240:\n delay=50\n print('waiting %d more seconds for delay...'%(delay))\n time.sleep(delay)\n\n sizeoffile=os.path.getsize(filename)\n if sizeoffile<1670240:\n delay=50\n print('waiting %d more seconds for delay...'%(delay))\n time.sleep(delay)\n\n try:\n hfFilePointer=h5py.File(filename,'r')\n\n except:\n print(\"Error reading file %s\"%filename)\n\n self.filename_online=filename\n epoc=hfFilePointer['t'].value\n\n self.hfFilePointer=hfFilePointer\n hfFilePointer.close()\n self.__t0=epoc\n\n\n self.flagIsNewFile = 1\n self.filename = filename\n\n print(\"Setting the file: %s\"%self.filename)\n return 1\n\n def __getExpParameters(self):\n if not(self.status):\n return None\n\n def setup(self,\n path = None,\n startDate = None,\n endDate = None,\n startTime = datetime.time(0,0,0),\n endTime = datetime.time(23,59,59),\n set = None,\n expLabel = \"\",\n ext = None,\n all=0,\n timezone=0,\n online = False,\n delay = 60,\n walk = True):\n '''\n In this method we should set all initial parameters.\n\n '''\n if path==None:\n raise ValueError(\"The path is not valid\")\n\n if ext==None:\n ext = self.ext\n\n self.timezone= timezone\n self.online= online\n self.all=all\n #if set==None:\n\n #print set\n if not(online):\n print(\"Searching files in offline mode...\")\n\n self.searchFilesOffLine(path, startDate, endDate, ext, startTime, endTime, walk)\n else:\n print(\"Searching files in online mode...\")\n self.searchFilesOnLine(path, walk,ext,set=set)\n if set==None:\n pass\n else:\n self.set=set-10\n\n# for nTries in range(self.nTries):\n#\n# fullpath,file,year,month,day,set = self.searchFilesOnLine(path=path,expLabel=expLabel,ext=ext, walk=walk,set=set)\n#\n# if fullpath:\n# break\n# print '\\tWaiting %0.2f sec for an valid file in %s: try %02d ...' % (self.delay, path, nTries+1)\n# time.sleep(self.delay)\n# if not(fullpath):\n# print \"There ins't valid files in %s\" % path\n# return None\n\n\n if not(self.filenameList):\n print(\"There is no files into the folder: %s\"%(path))\n sys.exit(-1)\n\n self.__getExpParameters()\n\n\n self.fileIndex = -1\n\n self.__setNextFile(online)\n\n self.__readMetadata()\n\n self.__setLocalVariables()\n\n self.__setHeaderDO()\n #self.profileIndex_offset= 0\n\n #self.profileIndex = self.profileIndex_offset\n\n self.isConfig = True\n\n def __readMetadata(self):\n self.__readHeader()\n\n\n def __setLocalVariables(self):\n\n self.datablock = numpy.zeros((self.nChannels, self.nHeights,self.nProfiles), dtype = numpy.complex)\n #\n\n\n\n self.profileIndex = 9999\n\n\n def __setHeaderDO(self):\n\n\n self.dataOut.radarControllerHeaderObj = RadarControllerHeader()\n\n self.dataOut.systemHeaderObj = SystemHeader()\n\n\n #---------------------------------------------------------\n self.dataOut.systemHeaderObj.nProfiles=100\n self.dataOut.systemHeaderObj.nSamples=1000\n\n\n SAMPLING_STRUCTURE=[('h0', '<f4'), ('dh', '<f4'), ('nsa', '<u4')]\n self.dataOut.radarControllerHeaderObj.samplingWindow=numpy.zeros((1,),SAMPLING_STRUCTURE)\n self.dataOut.radarControllerHeaderObj.samplingWindow['h0']=0\n self.dataOut.radarControllerHeaderObj.samplingWindow['dh']=1.5\n self.dataOut.radarControllerHeaderObj.samplingWindow['nsa']=1000\n self.dataOut.radarControllerHeaderObj.nHeights=int(self.dataOut.radarControllerHeaderObj.samplingWindow['nsa'])\n self.dataOut.radarControllerHeaderObj.firstHeight = self.dataOut.radarControllerHeaderObj.samplingWindow['h0']\n self.dataOut.radarControllerHeaderObj.deltaHeight = self.dataOut.radarControllerHeaderObj.samplingWindow['dh']\n self.dataOut.radarControllerHeaderObj.samplesWin = self.dataOut.radarControllerHeaderObj.samplingWindow['nsa']\n\n self.dataOut.radarControllerHeaderObj.nWindows=1\n self.dataOut.radarControllerHeaderObj.codetype=0\n self.dataOut.radarControllerHeaderObj.numTaus=0\n #self.dataOut.radarControllerHeaderObj.Taus = numpy.zeros((1,),'<f4')\n\n\n #self.dataOut.radarControllerHeaderObj.nCode=numpy.zeros((1,), '<u4')\n #self.dataOut.radarControllerHeaderObj.nBaud=numpy.zeros((1,), '<u4')\n #self.dataOut.radarControllerHeaderObj.code=numpy.zeros(0)\n\n self.dataOut.radarControllerHeaderObj.code_size=0\n self.dataOut.nBaud=0\n self.dataOut.nCode=0\n self.dataOut.nPairs=0\n\n\n #---------------------------------------------------------\n\n self.dataOut.type = \"Voltage\"\n\n self.dataOut.data = None\n\n self.dataOut.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])\n\n self.dataOut.nProfiles = 1\n\n self.dataOut.heightList = self.__firstHeigth + numpy.arange(self.__nSamples, dtype = numpy.float)*self.__deltaHeigth\n\n self.dataOut.channelList = list(range(self.nChannels))\n\n #self.dataOut.channelIndexList = None\n\n self.dataOut.flagNoData = True\n\n #Set to TRUE if the data is discontinuous\n self.dataOut.flagDiscontinuousBlock = False\n\n self.dataOut.utctime = None\n\n self.dataOut.timeZone = self.timezone\n\n self.dataOut.dstFlag = 0\n\n self.dataOut.errorCount = 0\n\n self.dataOut.nCohInt = 1\n\n self.dataOut.blocksize = self.dataOut.nChannels * self.dataOut.nHeights\n\n self.dataOut.flagDecodeData = False #asumo que la data esta decodificada\n\n self.dataOut.flagDeflipData = False #asumo que la data esta sin flip\n\n self.dataOut.flagShiftFFT = False\n\n self.dataOut.ippSeconds = 1.0*self.__nSamples/self.__sample_rate\n\n #Time interval between profiles\n #self.dataOut.timeInterval =self.dataOut.ippSeconds * self.dataOut.nCohInt\n\n\n self.dataOut.frequency = self.__frequency\n\n self.dataOut.realtime = self.__online\n\n def __hasNotDataInBuffer(self):\n\n if self.profileIndex >= self.nProfiles:\n return 1\n\n return 0\n\n def readNextBlock(self):\n if not(self.__setNewBlock()):\n return 0\n\n if not(self.readBlock()):\n return 0\n\n return 1\n\n def __setNewBlock(self):\n\n if self.hfFilePointer==None:\n return 0\n\n if self.flagIsNewFile:\n return 1\n\n if self.profileIndex < self.nProfiles:\n return 1\n\n self.__setNextFile(self.online)\n\n return 1\n\n\n\n def readBlock(self):\n fp=h5py.File(self.filename,'r')\n #Puntero que apunta al archivo hdf5\n ch0=(fp['ch0']).value #Primer canal (100,1000)--(perfiles,alturas)\n ch1=(fp['ch1']).value #Segundo canal (100,1000)--(perfiles,alturas)\n fp.close()\n ch0= ch0.swapaxes(0,1) #Primer canal (100,1000)--(alturas,perfiles)\n ch1= ch1.swapaxes(0,1) #Segundo canal (100,1000)--(alturas,perfiles)\n self.datablock = numpy.array([ch0,ch1])\n self.flagIsNewFile=0\n\n self.profileIndex=0\n\n return 1\n\n def getData(self):\n if self.flagNoMoreFiles:\n self.dataOut.flagNoData = True\n return 0\n\n if self.__hasNotDataInBuffer():\n if not(self.readNextBlock()):\n self.dataOut.flagNodata=True\n return 0\n\n ##############################\n ##############################\n self.dataOut.data = self.datablock[:,:,self.profileIndex]\n self.dataOut.utctime = self.__t0 + self.dataOut.ippSeconds*self.profileIndex\n self.dataOut.profileIndex= self.profileIndex\n self.dataOut.flagNoData=False\n self.profileIndex +=1\n\n return self.dataOut.data\n\n\n def run(self, **kwargs):\n '''\n This method will be called many times so here you should put all your code\n '''\n\n if not self.isConfig:\n self.setup(**kwargs)\n self.isConfig = True\n self.getData()" ]
[ [ "numpy.array", "numpy.arange", "numpy.dtype", "numpy.zeros" ] ]
rkasher/OG-USA
[ "220651fe7f444e66d838971288b10f6932f405fb" ]
[ "ogusa/TPI.py" ]
[ "# imports\nimport numpy as np\nimport pickle\nimport scipy.optimize as opt\nfrom dask.distributed import Client\nfrom dask import compute, delayed\nimport dask.multiprocessing\nfrom ogusa import tax, utils, household, firm, fiscal\nfrom ogusa import aggregates as aggr\nimport os\n\n\n'''\nSet minimizer tolerance\n'''\nMINIMIZER_TOL = 1e-13\n\n'''\nSet flag for enforcement of solution check\n'''\nENFORCE_SOLUTION_CHECKS = True\n\n\ndef get_initial_SS_values(p):\n '''\n Get values of variables for the initial period and the steady state\n equlibrium values.\n\n Args:\n p (OG-USA Specifications object): model parameters\n\n Returns:\n (tuple): initial period and steady state values:\n\n * initial_values (tuple): initial period variable values,\n (b_sinit, b_splus1init, factor, initial_b, initial_n, D0)\n * ss_vars (dictionary): dictionary with steady state\n solution results\n * theta (Numpy array): steady-state retirement replacement\n rates, length J\n * baseline_values (tuple): (TRbaseline, Gbaseline), lump sum\n transfer and government spending amounts from the\n baseline model run\n\n '''\n baseline_ss = os.path.join(p.baseline_dir, \"SS\", \"SS_vars.pkl\")\n ss_baseline_vars = utils.safe_read_pickle(baseline_ss)\n factor = ss_baseline_vars['factor_ss']\n initial_b = ss_baseline_vars['bssmat_splus1']\n initial_n = ss_baseline_vars['nssmat']\n TRbaseline = None\n Gbaseline = None\n if p.baseline_spending:\n baseline_tpi = os.path.join(\n p.baseline_dir, \"TPI\", \"TPI_vars.pkl\")\n tpi_baseline_vars = utils.safe_read_pickle(baseline_tpi)\n TRbaseline = tpi_baseline_vars['TR']\n Gbaseline = tpi_baseline_vars['G']\n\n baseline_values = (TRbaseline, Gbaseline)\n\n if p.baseline:\n ss_vars = ss_baseline_vars\n else:\n reform_ss_path = os.path.join(p.output_base, \"SS/SS_vars.pkl\")\n ss_vars = utils.safe_read_pickle(reform_ss_path)\n theta = ss_vars['theta']\n # What is going on here? Whatever it is, why not done in parameters.py???\n N_tilde = p.omega.sum(1) # this should equal one in\n # each year given how we've constructed omega\n p.omega = p.omega / N_tilde.reshape(p.T + p.S, 1)\n\n '''\n ------------------------------------------------------------------------\n Set other parameters and initial values\n ------------------------------------------------------------------------\n '''\n # Get an initial distribution of wealth with the initial population\n # distribution. When small_open=True, the value of K0 is used as a\n # placeholder for first-period wealth\n B0 = aggr.get_B(initial_b, p, 'SS', True)\n\n b_sinit = np.array(list(np.zeros(p.J).reshape(1, p.J)) +\n list(initial_b[:-1]))\n b_splus1init = initial_b\n\n # Intial gov't debt must match that in the baseline\n if not p.baseline:\n baseline_tpi = os.path.join(\n p.baseline_dir, \"TPI\", \"TPI_vars.pkl\")\n tpi_baseline_vars = utils.safe_read_pickle(baseline_tpi)\n D0 = tpi_baseline_vars['D'][0]\n else:\n D0 = 0.0\n\n initial_values = (B0, b_sinit, b_splus1init, factor, initial_b,\n initial_n, D0)\n\n return initial_values, ss_vars, theta, baseline_values\n\n\ndef firstdoughnutring(guesses, r, w, bq, tr, theta, factor, j,\n initial_b, p):\n '''\n Solves the first entries of the upper triangle of the twist\n doughnut. This is separate from the main TPI function because the\n values of b and n are scalars, so it is easier to just have a\n separate function for these cases.\n\n Args:\n guesses (Numpy array): initial guesses for b and n, length 2\n r (scalar): real interest rate\n w (scalar): real wage rate\n bq (scalar): bequest amounts by age\n tr (scalar): government transfer amount\n theta (Numpy array): retirement replacement rates, length J\n factor (scalar): scaling factor converting model units to dollars\n j (int): index of ability type\n initial_b (Numpy array): savings of agents alive at T=0,\n size = SxJ\n p (OG-USA Specifications object): model parameters\n\n Returns:\n euler errors (Numpy array): errors from first order conditions,\n length 2\n\n '''\n b_splus1 = float(guesses[0])\n n = float(guesses[1])\n b_s = float(initial_b[-2, j])\n\n # Find errors from FOC for savings and FOC for labor supply\n error1 = household.FOC_savings(np.array([r]), np.array([w]), b_s,\n np.array([b_splus1]), np.array([n]),\n np.array([bq]), factor,\n np.array([tr]), theta[j],\n p.e[-1, j], p.rho[-1],\n np.array([p.tau_c[0, -1, j]]),\n p.etr_params[0, -1, :],\n p.mtry_params[0, -1, :], None, j, p,\n 'TPI_scalar')\n\n error2 = household.FOC_labor(\n np.array([r]), np.array([w]), b_s, b_splus1, np.array([n]),\n np.array([bq]), factor, np.array([tr]), theta[j], p.chi_n[-1],\n p.e[-1, j], np.array([p.tau_c[0, -1, j]]), p.etr_params[0, -1, :],\n p.mtrx_params[0, -1, :], None, j, p, 'TPI_scalar')\n\n if n <= 0 or n >= 1:\n error2 += 1e12\n if b_splus1 <= 0:\n error1 += 1e12\n return [np.squeeze(error1)] + [np.squeeze(error2)]\n\n\ndef twist_doughnut(guesses, r, w, bq, tr, theta, factor, j, s, t,\n tau_c, etr_params, mtrx_params, mtry_params,\n initial_b, p):\n '''\n Solves the upper triangle of time path iterations. These are the\n agents who are alive at time T=0 so that we do not solve for their\n full lifetime (so of their life was before the model begins).\n\n Args:\n guesses (Numpy array): initial guesses for b and n, length 2s\n r (scalar): real interest rate\n w (scalar): real wage rate\n bq (Numpy array): bequest amounts by age, length s\n tr (scalar): government transfer amount\n theta (Numpy array): retirement replacement rates, length J\n factor (scalar): scaling factor converting model units to dollars\n j (int): index of ability type\n s (int): years of life remaining\n t (int): model period\n tau_c (Numpy array): consumption tax rates, size = sxJ\n etr_params (Numpy array): ETR function parameters,\n size = sxsxnum_params\n mtrx_params (Numpy array): labor income MTR function parameters,\n size = sxsxnum_params\n mtry_params (Numpy array): capital income MTR function\n parameters, size = sxsxnum_params\n initial_b (Numpy array): savings of agents alive at T=0,\n size = SxJ\n p (OG-USA Specifications object): model parameters\n\n Returns:\n euler errors (Numpy array): errors from first order conditions,\n length 2s\n\n '''\n length = int(len(guesses) / 2)\n b_guess = np.array(guesses[:length])\n n_guess = np.array(guesses[length:])\n\n if length == p.S:\n b_s = np.array([0] + list(b_guess[:-1]))\n else:\n b_s = np.array([(initial_b[-(s + 3), j])] + list(b_guess[:-1]))\n\n b_splus1 = b_guess\n w_s = w[t:t + length]\n r_s = r[t:t + length]\n n_s = n_guess\n chi_n_s = p.chi_n[-length:]\n e_s = p.e[-length:, j]\n rho_s = p.rho[-length:]\n\n error1 = household.FOC_savings(r_s, w_s, b_s, b_splus1, n_s, bq,\n factor, tr, theta, e_s, rho_s,\n tau_c, etr_params, mtry_params, t,\n j, p, 'TPI')\n\n error2 = household.FOC_labor(r_s, w_s, b_s, b_splus1, n_s, bq,\n factor, tr, theta, chi_n_s, e_s,\n tau_c, etr_params, mtrx_params, t, j,\n p, 'TPI')\n\n # Check and punish constraint violations\n mask1 = n_guess < 0\n error2[mask1] += 1e12\n mask2 = n_guess > p.ltilde\n error2[mask2] += 1e12\n mask4 = b_guess <= 0\n error2[mask4] += 1e12\n mask5 = b_splus1 < 0\n error2[mask5] += 1e12\n return list(error1.flatten()) + list(error2.flatten())\n\n\ndef inner_loop(guesses, outer_loop_vars, initial_values, j, ind, p):\n '''\n Given path of economic aggregates and factor prices, solves\n household problem. This has been termed the inner-loop (in\n constrast to the outer fixed point loop that soves for GE factor\n prices and economic aggregates).\n\n Args:\n guesses (tuple): initial guesses for b and n, (guesses_b,\n guesses_n)\n outer_loop_vars (tuple): values for factor prices and economic\n aggregates used in household problem (r, w, r_hh, BQ, TR,\n theta)\n r (Numpy array): real interest rate on private capital\n w (Numpy array): real wage rate\n r (Numpy array): real interest rate on household portfolio\n BQ (array_like): aggregate bequest amounts\n TR (Numpy array): lump sum transfer amount\n theta (Numpy array): retirement replacement rates, length J\n initial_values (tuple): initial period variable values,\n (b_sinit, b_splus1init, factor, initial_b, initial_n, D0)\n j (int): index of ability type\n ind (Numpy array): integers from 0 to S-1\n p (OG-USA Specifications object): model parameters\n\n Returns:\n (tuple): household solution results:\n\n * euler_errors (Numpy array): errors from FOCs, size = Tx2S\n * b_mat (Numpy array): savings amounts, size = TxS\n * n_mat (Numpy array): labor supply amounts, size = TxS\n\n '''\n # unpack variables and parameters pass to function\n (K0, b_sinit, b_splus1init, factor, initial_b, initial_n,\n D0) = initial_values\n guesses_b, guesses_n = guesses\n r, w, r_hh, BQ, TR, theta = outer_loop_vars\n\n # compute w\n w[:p.T] = firm.get_w_from_r(r[:p.T], p, 'TPI')\n # compute bq\n bq = household.get_bq(BQ, None, p, 'TPI')\n # compute tr\n tr = household.get_tr(TR, None, p, 'TPI')\n\n # initialize arrays\n b_mat = np.zeros((p.T + p.S, p.S))\n n_mat = np.zeros((p.T + p.S, p.S))\n euler_errors = np.zeros((p.T, 2 * p.S))\n\n b_mat[0, -1], n_mat[0, -1] =\\\n np.array(opt.fsolve(firstdoughnutring, [guesses_b[0, -1],\n guesses_n[0, -1]],\n args=(r_hh[0], w[0], bq[0, -1, j],\n tr[0, -1, j],\n theta * p.replacement_rate_adjust[0],\n factor, j, initial_b, p),\n xtol=MINIMIZER_TOL))\n\n for s in range(p.S - 2): # Upper triangle\n ind2 = np.arange(s + 2)\n b_guesses_to_use = np.diag(guesses_b[:p.S, :], p.S - (s + 2))\n n_guesses_to_use = np.diag(guesses_n[:p.S, :], p.S - (s + 2))\n theta_to_use = theta[j] * p.replacement_rate_adjust[:p.S]\n bq_to_use = np.diag(bq[:p.S, :, j], p.S - (s + 2))\n tr_to_use = np.diag(tr[:p.S, :, j], p.S - (s + 2))\n tau_c_to_use = np.diag(p.tau_c[:p.S, :, j], p.S - (s + 2))\n\n length_diag =\\\n np.diag(p.etr_params[:p.S, :, 0], p.S-(s + 2)).shape[0]\n etr_params_to_use = np.zeros((length_diag, p.etr_params.shape[2]))\n mtrx_params_to_use = np.zeros((length_diag, p.mtrx_params.shape[2]))\n mtry_params_to_use = np.zeros((length_diag, p.mtry_params.shape[2]))\n for i in range(p.etr_params.shape[2]):\n etr_params_to_use[:, i] =\\\n np.diag(p.etr_params[:p.S, :, i], p.S - (s + 2))\n mtrx_params_to_use[:, i] =\\\n np.diag(p.mtrx_params[:p.S, :, i], p.S - (s + 2))\n mtry_params_to_use[:, i] =\\\n np.diag(p.mtry_params[:p.S, :, i], p.S - (s + 2))\n\n solutions = opt.fsolve(twist_doughnut,\n list(b_guesses_to_use) +\n list(n_guesses_to_use),\n args=(r_hh, w, bq_to_use, tr_to_use,\n theta_to_use, factor, j, s, 0,\n tau_c_to_use,\n etr_params_to_use,\n mtrx_params_to_use,\n mtry_params_to_use, initial_b, p),\n xtol=MINIMIZER_TOL)\n\n b_vec = solutions[:int(len(solutions) / 2)]\n b_mat[ind2, p.S - (s + 2) + ind2] = b_vec\n n_vec = solutions[int(len(solutions) / 2):]\n n_mat[ind2, p.S - (s + 2) + ind2] = n_vec\n\n for t in range(0, p.T):\n b_guesses_to_use = .75 * \\\n np.diag(guesses_b[t:t + p.S, :])\n n_guesses_to_use = np.diag(guesses_n[t:t + p.S, :])\n theta_to_use = theta[j] * p.replacement_rate_adjust[t:t + p.S]\n bq_to_use = np.diag(bq[t:t + p.S, :, j])\n tr_to_use = np.diag(tr[t:t + p.S, :, j])\n tau_c_to_use = np.diag(p.tau_c[t:t + p.S, :, j])\n\n # initialize array of diagonal elements\n etr_params_TP = np.zeros((p.T + p.S, p.S, p.etr_params.shape[2]))\n etr_params_TP[:p.T, :, :] = p.etr_params\n etr_params_TP[p.T:, :, :] = p.etr_params[-1, :, :]\n\n mtrx_params_TP = np.zeros((p.T + p.S, p.S, p.mtrx_params.shape[2]))\n mtrx_params_TP[:p.T, :, :] = p.mtrx_params\n mtrx_params_TP[p.T:, :, :] = p.mtrx_params[-1, :, :]\n\n mtry_params_TP = np.zeros((p.T + p.S, p.S, p.mtry_params.shape[2]))\n mtry_params_TP[:p.T, :, :] = p.mtry_params\n mtry_params_TP[p.T:, :, :] = p.mtry_params[-1, :, :]\n\n length_diag =\\\n np.diag(etr_params_TP[t:t + p.S, :, 0]).shape[0]\n etr_params_to_use = np.zeros((length_diag, p.etr_params.shape[2]))\n mtrx_params_to_use = np.zeros((length_diag, p.mtrx_params.shape[2]))\n mtry_params_to_use = np.zeros((length_diag, p.mtry_params.shape[2]))\n\n for i in range(p.etr_params.shape[2]):\n etr_params_to_use[:, i] = np.diag(etr_params_TP[t:t + p.S, :, i])\n mtrx_params_to_use[:, i] = np.diag(mtrx_params_TP[t:t + p.S, :, i])\n mtry_params_to_use[:, i] = np.diag(mtry_params_TP[t:t + p.S, :, i])\n #\n # TPI_solver_params = (inc_tax_params_TP, tpi_params, None)\n [solutions, infodict, ier, message] =\\\n opt.fsolve(twist_doughnut, list(b_guesses_to_use) +\n list(n_guesses_to_use),\n args=(r_hh, w, bq_to_use, tr_to_use,\n theta_to_use, factor,\n j, None, t, tau_c_to_use,\n etr_params_to_use, mtrx_params_to_use,\n mtry_params_to_use, initial_b, p),\n xtol=MINIMIZER_TOL, full_output=True)\n euler_errors[t, :] = infodict['fvec']\n\n b_vec = solutions[:p.S]\n b_mat[t + ind, ind] = b_vec\n n_vec = solutions[p.S:]\n n_mat[t + ind, ind] = n_vec\n\n print('Type ', j, ' max euler error = ', euler_errors.max())\n\n return euler_errors, b_mat, n_mat\n\n\ndef run_TPI(p, client=None):\n '''\n Solve for transition path equilibrium of OG-USA.\n\n Args:\n p (OG-USA Specifications object): model parameters\n client (Dask client object): client\n\n Returns:\n output (dictionary): dictionary with transition path solution\n results\n\n '''\n # unpack tuples of parameters\n initial_values, ss_vars, theta, baseline_values = get_initial_SS_values(p)\n (B0, b_sinit, b_splus1init, factor, initial_b, initial_n,\n D0) = initial_values\n (TRbaseline, Gbaseline) = baseline_values\n\n print('Government spending breakpoints are tG1: ', p.tG1,\n '; and tG2:', p.tG2)\n\n # Initialize guesses at time paths\n # Make array of initial guesses for labor supply and savings\n guesses_b = utils.get_initial_path(\n initial_b, ss_vars['bssmat_splus1'], p, 'ratio')\n guesses_n = utils.get_initial_path(\n initial_n, ss_vars['nssmat'], p, 'ratio')\n b_mat = guesses_b\n n_mat = guesses_n\n ind = np.arange(p.S)\n\n # Get path for aggregate savings and labor supply`\n L_init = np.ones((p.T + p.S,)) * ss_vars['Lss']\n B_init = np.ones((p.T + p.S,)) * ss_vars['Bss']\n L_init[:p.T] = aggr.get_L(n_mat[:p.T], p, 'TPI')\n B_init[1:p.T] = aggr.get_B(b_mat[:p.T], p, 'TPI', False)[:p.T - 1]\n B_init[0] = B0\n\n if not p.small_open:\n if p.budget_balance:\n K_init = B_init\n else:\n K_init = B_init * ss_vars['Kss'] / ss_vars['Bss']\n else:\n K_init = firm.get_B(L_init, p.firm_r, p, 'TPI')\n\n K = K_init\n K_d = K_init * ss_vars['K_d_ss'] / ss_vars['Kss']\n K_f = K_init * ss_vars['K_f_ss'] / ss_vars['Kss']\n\n L = L_init\n B = B_init\n Y = np.zeros_like(K)\n Y[:p.T] = firm.get_Y(K[:p.T], L[:p.T], p, 'TPI')\n Y[p.T:] = ss_vars['Yss']\n r = np.zeros_like(Y)\n if not p.small_open:\n r[:p.T] = firm.get_r(Y[:p.T], K[:p.T], p, 'TPI')\n r[p.T:] = ss_vars['rss']\n else:\n r = p.firm_r\n # compute w\n w = np.zeros_like(r)\n w[:p.T] = firm.get_w_from_r(r[:p.T], p, 'TPI')\n w[p.T:] = ss_vars['wss']\n r_gov = fiscal.get_r_gov(r, p)\n if p.budget_balance:\n r_hh = r\n else:\n r_hh = aggr.get_r_hh(r, r_gov, K, ss_vars['Dss'])\n if p.small_open:\n r_hh = p.hh_r\n\n BQ0 = aggr.get_BQ(r[0], initial_b, None, p, 'SS', True)\n if not p.use_zeta:\n BQ = np.zeros((p.T + p.S, p.J))\n for j in range(p.J):\n BQ[:, j] = (list(np.linspace(BQ0[j],\n ss_vars['BQss'][j], p.T)) +\n [ss_vars['BQss'][j]] * p.S)\n BQ = np.array(BQ)\n else:\n BQ = (list(np.linspace(BQ0, ss_vars['BQss'], p.T)) +\n [ss_vars['BQss']] * p.S)\n BQ = np.array(BQ)\n if p.budget_balance:\n if np.abs(ss_vars['TR_ss']) < 1e-13:\n TR_ss2 = 0.0 # sometimes SS is very small but not zero,\n # even if taxes are zero, this get's rid of the approximation\n # error, which affects the perc changes below\n else:\n TR_ss2 = ss_vars['TR_ss']\n TR = np.ones(p.T + p.S) * TR_ss2\n total_revenue = TR\n G = np.zeros(p.T + p.S)\n elif not p.baseline_spending:\n TR = p.alpha_T * Y\n G = np.ones(p.T + p.S) * ss_vars['Gss']\n elif p.baseline_spending:\n TR = TRbaseline\n TR_new = p.TR # Need to set TR_new for later reference\n G = Gbaseline\n G_0 = Gbaseline[0]\n\n # Initialize some starting values\n if p.budget_balance:\n D = np.zeros(p.T + p.S)\n else:\n D = np.ones(p.T + p.S) * ss_vars['Dss']\n if ss_vars['Dss'] == 0:\n D_d = np.zeros(p.T + p.S)\n D_f = np.zeros(p.T + p.S)\n else:\n D_d = D * ss_vars['D_d_ss'] / ss_vars['Dss']\n D_f = D * ss_vars['D_f_ss'] / ss_vars['Dss']\n total_revenue = np.ones(p.T + p.S) * ss_vars['total_revenue_ss']\n\n TPIiter = 0\n TPIdist = 10\n euler_errors = np.zeros((p.T, 2 * p.S, p.J))\n TPIdist_vec = np.zeros(p.maxiter)\n\n # TPI loop\n while (TPIiter < p.maxiter) and (TPIdist >= p.mindist_TPI):\n r_gov[:p.T] = fiscal.get_r_gov(r[:p.T], p)\n if p.budget_balance:\n r_hh[:p.T] = r[:p.T]\n else:\n K[:p.T] = firm.get_K_from_Y(Y[:p.T], r[:p.T], p, 'TPI')\n r_hh[:p.T] = aggr.get_r_hh(r[:p.T], r_gov[:p.T], K[:p.T], D[:p.T])\n if p.small_open:\n r_hh[:p.T] = p.hh_r[:p.T]\n\n outer_loop_vars = (r, w, r_hh, BQ, TR, theta)\n\n euler_errors = np.zeros((p.T, 2 * p.S, p.J))\n lazy_values = []\n for j in range(p.J):\n guesses = (guesses_b[:, :, j], guesses_n[:, :, j])\n lazy_values.append(\n delayed(inner_loop)(guesses, outer_loop_vars,\n initial_values, j, ind, p))\n results = compute(*lazy_values, scheduler=dask.multiprocessing.get,\n num_workers=p.num_workers)\n for j, result in enumerate(results):\n euler_errors[:, :, j], b_mat[:, :, j], n_mat[:, :, j] = result\n\n bmat_s = np.zeros((p.T, p.S, p.J))\n bmat_s[0, 1:, :] = initial_b[:-1, :]\n bmat_s[1:, 1:, :] = b_mat[:p.T-1, :-1, :]\n bmat_splus1 = np.zeros((p.T, p.S, p.J))\n bmat_splus1[:, :, :] = b_mat[:p.T, :, :]\n\n etr_params_4D = np.tile(\n p.etr_params.reshape(p.T, p.S, 1, p.etr_params.shape[2]),\n (1, 1, p.J, 1))\n bqmat = household.get_bq(BQ, None, p, 'TPI')\n trmat = household.get_tr(TR, None, p, 'TPI')\n tax_mat = tax.total_taxes(r_hh[:p.T], w[:p.T], bmat_s,\n n_mat[:p.T, :, :], bqmat[:p.T, :, :],\n factor, trmat[:p.T, :, :], theta, 0,\n None, False, 'TPI', p.e,\n etr_params_4D, p)\n r_hh_path = utils.to_timepath_shape(r_hh)\n wpath = utils.to_timepath_shape(w)\n c_mat = household.get_cons(r_hh_path[:p.T, :, :], wpath[:p.T, :, :],\n bmat_s, bmat_splus1,\n n_mat[:p.T, :, :], bqmat[:p.T, :, :],\n tax_mat, p.e, p.tau_c[:p.T, :, :], p)\n y_before_tax_mat = (r_hh_path[:p.T, :, :] * bmat_s[:p.T, :, :] +\n wpath[:p.T, :, :] * p.e * n_mat[:p.T, :, :])\n\n if not p.baseline_spending and not p.budget_balance:\n Y[:p.T] = TR[:p.T] / p.alpha_T[:p.T] # maybe unecessary\n\n (total_rev, T_Ipath, T_Ppath, T_BQpath, T_Wpath,\n T_Cpath, business_revenue) = aggr.revenue(\n r_hh[:p.T], w[:p.T], bmat_s, n_mat[:p.T, :, :],\n bqmat[:p.T, :, :], c_mat[:p.T, :, :], Y[:p.T],\n L[:p.T], K[:p.T], factor, theta, etr_params_4D,\n p, 'TPI')\n total_revenue[:p.T] = total_rev\n # set intial debt value\n if p.baseline:\n D0 = p.initial_debt_ratio * Y[0]\n if not p.baseline_spending:\n G_0 = p.alpha_G[0] * Y[0]\n dg_fixed_values = (Y, total_revenue, TR, D0, G_0)\n Dnew, G[:p.T] = fiscal.D_G_path(r_gov, dg_fixed_values,\n Gbaseline, p)\n # Fix initial amount of foreign debt holding\n D_f[0] = p.initial_foreign_debt_ratio * Dnew[0]\n for t in range(1, p.T):\n D_f[t + 1] = (D_f[t] / (np.exp(p.g_y) * (1 + p.g_n[t + 1]))\n + p.zeta_D[t] * (Dnew[t + 1] -\n (Dnew[t] /\n (np.exp(p.g_y) *\n (1 + p.g_n[t + 1])))))\n D_d[:p.T] = Dnew[:p.T] - D_f[:p.T]\n else: # if budget balance\n Dnew = np.zeros(p.T + 1)\n G[:p.T] = np.zeros(p.T)\n D_f[:p.T] = np.zeros(p.T)\n D_d[:p.T] = np.zeros(p.T)\n\n L[:p.T] = aggr.get_L(n_mat[:p.T], p, 'TPI')\n B[1:p.T] = aggr.get_B(bmat_splus1[:p.T], p, 'TPI',\n False)[:p.T - 1]\n K_demand_open = firm.get_K(L[:p.T], p.firm_r[:p.T], p, 'TPI')\n K_d[:p.T] = B[:p.T] - D_d[:p.T]\n if np.any(K_d < 0):\n print('K_d has negative elements. Setting them ' +\n 'positive to prevent NAN.')\n K_d[:p.T] = np.fmax(K_d[:p.T], 0.05 * B[:p.T])\n K_f[:p.T] = p.zeta_K[:p.T] * (K_demand_open - B[:p.T] + D_d[:p.T])\n K = K_f + K_d\n if np.any(B) < 0:\n print('B has negative elements. B[0:9]:', B[0:9])\n print('B[T-2:T]:', B[p.T - 2, p.T])\n if p.small_open:\n K[:p.T] = K_demand_open\n Ynew = firm.get_Y(K[:p.T], L[:p.T], p, 'TPI')\n rnew = r.copy()\n if not p.small_open:\n rnew[:p.T] = firm.get_r(Ynew[:p.T], K[:p.T], p, 'TPI')\n else:\n rnew[:p.T] = r[:p.T].copy()\n r_gov_new = fiscal.get_r_gov(rnew, p)\n if p.budget_balance:\n r_hh_new = rnew[:p.T]\n else:\n r_hh_new = aggr.get_r_hh(rnew[:p.T], r_gov_new[:p.T], K[:p.T],\n Dnew[:p.T])\n if p.small_open:\n r_hh_new = p.hh_r[:p.T]\n # compute w\n wnew = firm.get_w_from_r(rnew[:p.T], p, 'TPI')\n\n b_mat_shift = np.append(np.reshape(initial_b, (1, p.S, p.J)),\n b_mat[:p.T - 1, :, :], axis=0)\n BQnew = aggr.get_BQ(r_hh_new[:p.T], b_mat_shift, None, p,\n 'TPI', False)\n bqmat_new = household.get_bq(BQnew, None, p, 'TPI')\n (total_rev, T_Ipath, T_Ppath, T_BQpath, T_Wpath, T_Cpath,\n business_revenue) = aggr.revenue(\n r_hh_new[:p.T], wnew[:p.T], bmat_s, n_mat[:p.T, :, :],\n bqmat_new[:p.T, :, :], c_mat[:p.T, :, :], Ynew[:p.T],\n L[:p.T], K[:p.T], factor, theta, etr_params_4D, p, 'TPI')\n total_revenue[:p.T] = total_rev\n\n if p.budget_balance:\n TR_new = total_revenue\n elif not p.baseline_spending:\n TR_new = p.alpha_T[:p.T] * Ynew[:p.T]\n # If baseline_spending==True, no need to update TR, it's fixed\n\n # update vars for next iteration\n w[:p.T] = wnew[:p.T]\n r[:p.T] = utils.convex_combo(rnew[:p.T], r[:p.T], p.nu)\n BQ[:p.T] = utils.convex_combo(BQnew[:p.T], BQ[:p.T], p.nu)\n D[:p.T] = Dnew[:p.T]\n Y[:p.T] = utils.convex_combo(Ynew[:p.T], Y[:p.T], p.nu)\n if not p.baseline_spending:\n TR[:p.T] = utils.convex_combo(TR_new[:p.T], TR[:p.T], p.nu)\n guesses_b = utils.convex_combo(b_mat, guesses_b, p.nu)\n guesses_n = utils.convex_combo(n_mat, guesses_n, p.nu)\n print('r diff: ', (rnew[:p.T] - r[:p.T]).max(),\n (rnew[:p.T] - r[:p.T]).min())\n print('BQ diff: ', (BQnew[:p.T] - BQ[:p.T]).max(),\n (BQnew[:p.T] - BQ[:p.T]).min())\n print('TR diff: ', (TR_new[:p.T]-TR[:p.T]).max(),\n (TR_new[:p.T] - TR[:p.T]).min())\n print('Y diff: ', (Ynew[:p.T]-Y[:p.T]).max(),\n (Ynew[:p.T] - Y[:p.T]).min())\n if not p.baseline_spending:\n if TR.all() != 0:\n TPIdist = np.array(\n list(utils.pct_diff_func(rnew[:p.T], r[:p.T])) +\n list(utils.pct_diff_func(BQnew[:p.T],\n BQ[:p.T]).flatten()) +\n list(utils.pct_diff_func(TR_new[:p.T],\n TR[:p.T]))).max()\n else:\n TPIdist = np.array(\n list(utils.pct_diff_func(rnew[:p.T], r[:p.T])) +\n list(utils.pct_diff_func(BQnew[:p.T],\n BQ[:p.T]).flatten()) +\n list(np.abs(TR[:p.T]))).max()\n else:\n TPIdist = np.array(\n list(utils.pct_diff_func(rnew[:p.T], r[:p.T])) +\n list(utils.pct_diff_func(BQnew[:p.T], BQ[:p.T]).flatten())\n + list(utils.pct_diff_func(Ynew[:p.T], Y[:p.T]))).max()\n\n TPIdist_vec[TPIiter] = TPIdist\n # After T=10, if cycling occurs, drop the value of nu\n # wait til after T=10 or so, because sometimes there is a jump up\n # in the first couple iterations\n # if TPIiter > 10:\n # if TPIdist_vec[TPIiter] - TPIdist_vec[TPIiter - 1] > 0:\n # nu /= 2\n # print 'New Value of nu:', nu\n TPIiter += 1\n print('Iteration:', TPIiter)\n print('\\tDistance:', TPIdist)\n\n # Compute effective and marginal tax rates for all agents\n mtrx_params_4D = np.tile(\n p.mtrx_params.reshape(p.T, p.S, 1, p.mtrx_params.shape[2]),\n (1, 1, p.J, 1))\n mtry_params_4D = np.tile(\n p.mtry_params.reshape(p.T, p.S, 1, p.mtry_params.shape[2]),\n (1, 1, p.J, 1))\n\n e_3D = np.tile(p.e.reshape(1, p.S, p.J), (p.T, 1, 1))\n mtry_path = tax.MTR_income(r_hh_path[:p.T], wpath[:p.T],\n bmat_s[:p.T, :, :],\n n_mat[:p.T, :, :], factor, True,\n e_3D, etr_params_4D, mtry_params_4D, p)\n mtrx_path = tax.MTR_income(r_hh_path[:p.T], wpath[:p.T],\n bmat_s[:p.T, :, :],\n n_mat[:p.T, :, :], factor, False,\n e_3D, etr_params_4D, mtrx_params_4D, p)\n etr_path = tax.ETR_income(r_hh_path[:p.T], wpath[:p.T],\n bmat_s[:p.T, :, :],\n n_mat[:p.T, :, :], factor, e_3D,\n etr_params_4D, p)\n\n C = aggr.get_C(c_mat, p, 'TPI')\n # Note that implicity in this computation is that immigrants'\n # wealth is all in the form of private capital\n I_d = aggr.get_I(bmat_splus1[:p.T], K_d[1:p.T + 1], K_d[:p.T], p,\n 'TPI')\n I = aggr.get_I(bmat_splus1[:p.T], K[1:p.T + 1], K[:p.T], p, 'TPI')\n # solve resource constraint\n # net foreign borrowing\n new_borrowing_f = (D_f[1:p.T + 1] * np.exp(p.g_y) *\n (1 + p.g_n[1:p.T + 1]) - D_f[:p.T])\n debt_service_f = D_f * r_hh\n RC_error = aggr.resource_constraint(Y[:p.T - 1], C[:p.T - 1],\n G[:p.T - 1], I_d[:p.T - 1],\n K_f[:p.T - 1],\n new_borrowing_f[:p.T - 1],\n debt_service_f[:p.T - 1],\n r_hh[:p.T - 1], p)\n\n # Compute total investment (not just domestic)\n I_total = ((1 + p.g_n[:p.T]) * np.exp(p.g_y) * K[1:p.T + 1] -\n (1.0 - p.delta) * K[:p.T])\n\n # Compute income tax revenues\n tax_rev = aggr.get_L(T_Ipath, p, 'TPI')\n payroll_tax_revenue = p.frac_tax_payroll[:p.T] * tax_rev[:p.T]\n iit_revenue = tax_rev[:p.T] - payroll_tax_revenue\n\n # Compute resource constraint error\n rce_max = np.amax(np.abs(RC_error))\n print('Max absolute value resource constraint error:', rce_max)\n\n print('Checking time path for violations of constraints.')\n for t in range(p.T):\n household.constraint_checker_TPI(\n b_mat[t], n_mat[t], c_mat[t], t, p.ltilde)\n\n eul_savings = euler_errors[:, :p.S, :].max(1).max(1)\n eul_laborleisure = euler_errors[:, p.S:, :].max(1).max(1)\n\n print('Max Euler error, savings: ', eul_savings)\n print('Max Euler error labor supply: ', eul_laborleisure)\n\n '''\n ------------------------------------------------------------------------\n Save variables/values so they can be used in other modules\n ------------------------------------------------------------------------\n '''\n\n output = {'Y': Y[:p.T], 'B': B, 'K': K, 'K_f': K_f, 'K_d': K_d,\n 'L': L, 'C': C, 'I': I,\n 'I_total': I_total, 'I_d': I_d, 'BQ': BQ,\n 'total_revenue': total_revenue,\n 'business_revenue': business_revenue,\n 'IITpayroll_revenue': T_Ipath, 'iit_revenue': iit_revenue,\n 'payroll_tax_revenue': payroll_tax_revenue, 'TR': TR,\n 'T_P': T_Ppath, 'T_BQ': T_BQpath, 'T_W': T_Wpath,\n 'T_C': T_Cpath, 'G': G, 'D': D, 'D_f': D_f, 'D_d': D_d,\n 'r': r, 'r_gov': r_gov,\n 'r_hh': r_hh, 'w': w, 'bmat_splus1': bmat_splus1,\n 'bmat_s': bmat_s[:p.T, :, :], 'n_mat': n_mat[:p.T, :, :],\n 'c_path': c_mat, 'bq_path': bqmat, 'tr_path': trmat,\n 'y_before_tax_mat': y_before_tax_mat,\n 'tax_path': tax_mat, 'eul_savings': eul_savings,\n 'eul_laborleisure': eul_laborleisure,\n 'resource_constraint_error': RC_error,\n 'new_borrowing_f': new_borrowing_f,\n 'debt_service_f': debt_service_f,\n 'etr_path': etr_path, 'mtrx_path': mtrx_path,\n 'mtry_path': mtry_path}\n\n tpi_dir = os.path.join(p.output_base, \"TPI\")\n utils.mkdirs(tpi_dir)\n tpi_vars = os.path.join(tpi_dir, \"TPI_vars.pkl\")\n with open(tpi_vars, \"wb\") as f:\n pickle.dump(output, f)\n\n if np.any(G) < 0:\n print('Government spending is negative along transition path' +\n ' to satisfy budget')\n\n if (((TPIiter >= p.maxiter) or\n (np.absolute(TPIdist) > p.mindist_TPI)) and\n ENFORCE_SOLUTION_CHECKS):\n raise RuntimeError('Transition path equlibrium not found' +\n ' (TPIdist)')\n\n if ((np.any(np.absolute(RC_error) >= p.mindist_TPI * 10)) and\n ENFORCE_SOLUTION_CHECKS):\n raise RuntimeError('Transition path equlibrium not found ' +\n '(RC_error)')\n\n if ((np.any(np.absolute(eul_savings) >= p.mindist_TPI) or\n (np.any(np.absolute(eul_laborleisure) > p.mindist_TPI))) and\n ENFORCE_SOLUTION_CHECKS):\n raise RuntimeError('Transition path equlibrium not found ' +\n '(eulers)')\n\n return output\n" ]
[ [ "numpy.zeros_like", "numpy.array", "numpy.reshape", "numpy.zeros", "numpy.squeeze", "numpy.fmax", "numpy.ones", "scipy.optimize.fsolve", "numpy.exp", "numpy.any", "numpy.arange", "numpy.abs", "numpy.absolute", "numpy.linspace", "numpy.diag" ] ]
clonker/PyEMMA
[ "a36534ce2ec6a799428dfbdef0465c979e6c68aa", "a36534ce2ec6a799428dfbdef0465c979e6c68aa" ]
[ "pyemma/_base/model.py", "pyemma/coordinates/data/util/reader_utils.py" ]
[ "import copy\nimport numpy as _np\nimport inspect\nimport warnings\n\nfrom pyemma._ext import six\nfrom pyemma._ext.sklearn.base import _pprint\nfrom pyemma.util.statistics import confidence_interval\nfrom pyemma.util.reflection import call_member\n\n__author__ = 'noe'\n\n\nclass Model(object):\n \"\"\" Base class for pyEMMA models\n\n This class is inspired by sklearn's BaseEstimator class. However, we define parameter names not by the\n current class' __init__ but have to announce them. This allows us to also remember the parameters of model\n superclasses. This class can be mixed with pyEMMA and sklearn Estimators.\n\n \"\"\"\n\n def _get_model_param_names(self):\n \"\"\"Get parameter names for the estimator\"\"\"\n # fetch model parameters\n if hasattr(self, 'set_model_params'):\n set_model_param_method = getattr(self, 'set_model_params')\n # introspect the constructor arguments to find the model parameters\n # to represent\n args, varargs, kw, default = inspect.getargspec(set_model_param_method)\n if varargs is not None:\n raise RuntimeError(\"pyEMMA models should always specify their parameters in the signature\"\n \" of their set_model_params (no varargs). %s doesn't follow this convention.\"\n % (self, ))\n # Remove 'self'\n # XXX: This is going to fail if the init is a staticmethod, but\n # who would do this?\n args.pop(0)\n args.sort()\n return args\n else:\n # No parameters known\n return []\n\n def update_model_params(self, **params):\n \"\"\"Update given model parameter if they are set to specific values\"\"\"\n for key, value in params.iteritems():\n if not hasattr(self, key):\n setattr(self, key, value) # set parameter for the first time.\n elif getattr(self, key) is None:\n setattr(self, key, value) # update because this parameter is still None.\n elif value is not None:\n setattr(self, key, value) # only overwrite if set to a specific value (None does not overwrite).\n\n def get_model_params(self, deep=True):\n \"\"\"Get parameters for this estimator.\n Parameters\n ----------\n deep: boolean, optional\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n Returns\n -------\n params : mapping of string to any\n Parameter names mapped to their values.\n \"\"\"\n out = dict()\n for key in self._get_model_param_names():\n # We need deprecation warnings to always be on in order to\n # catch deprecated param values.\n # This is set in utils/__init__.py but it gets overwritten\n # when running under python3 somehow.\n warnings.simplefilter(\"always\", DeprecationWarning)\n try:\n with warnings.catch_warnings(record=True) as w:\n value = getattr(self, key, None)\n if len(w) and w[0].category == DeprecationWarning:\n # if the parameter is deprecated, don't show it\n continue\n finally:\n warnings.filters.pop(0)\n\n # XXX: should we rather test if instance of estimator?\n if deep and hasattr(value, 'get_params'):\n deep_items = value.get_params().items()\n out.update((key + '__' + k, val) for k, val in deep_items)\n out[key] = value\n return out\n\n # def set_model_params(self, **params):\n # \"\"\"Set the parameters of this estimator.\n # The method works on simple estimators as well as on nested objects\n # (such as pipelines). The former have parameters of the form\n # ``<component>__<parameter>`` so that it's possible to update each\n # component of a nested object.\n # Returns\n # -------\n # self\n # \"\"\"\n # if not params:\n # # Simple optimisation to gain speed (inspect is slow)\n # return self\n # valid_params = self.get_model_params(deep=True)\n # for key, value in six.iteritems(params):\n # split = key.split('__', 1)\n # if len(split) > 1:\n # # nested objects case\n # name, sub_name = split\n # if name not in valid_params:\n # raise ValueError('Invalid parameter %s for estimator %s' %\n # (name, self))\n # sub_object = valid_params[name]\n # sub_object.set_params(**{sub_name: value})\n # else:\n # # simple objects case\n # if key not in valid_params:\n # raise ValueError('Invalid parameter %s ' 'for estimator %s'\n # % (key, self.__class__.__name__))\n # setattr(self, key, value)\n # return self\n\n # FIXME: __repr__ is incompatible with Estimator __repr__. Need a general fix for a nice representation\n# def __repr__(self):\n# class_name = self.__class__.__name__\n# return '%s(%s)' % (class_name, _pprint(self.get_model_params(deep=False),\n# offset=len(class_name),),)\n\n\nclass SampledModel(Model):\n\n def __init__(self, samples, conf=0.95):\n self.set_model_params(samples=samples, conf=conf)\n\n # TODO: maybe rename to parametrize in order to avoid confusion with set_params that has a different behavior?\n def set_model_params(self, samples=None, conf=0.95):\n self.update_model_params(samples=samples, conf=conf)\n if samples is not None:\n self.nsamples = len(samples)\n\n def _check_samples_available(self):\n if self.samples is None:\n raise AttributeError('Model samples not available in '+str(self)+'. Call set_model_params with samples.')\n\n# def mean_model(self):\n# \"\"\"Computes the mean model from the given samples\"\"\"\n# raise NotImplementedError('mean_model is not implemented in class '+str(self.__class__))\n\n def sample_f(self, f, *args, **kw):\n self._check_samples_available()\n return [call_member(M, f, *args, **kw) for M in self.samples]\n\n def sample_mean(self, f, *args, **kw):\n vals = self.sample_f(f, *args, **kw)\n return _np.mean(vals, axis=0)\n\n def sample_std(self, f, *args, **kw):\n vals = self.sample_f(f, *args, **kw)\n return _np.std(vals, axis=0)\n\n def sample_conf(self, f, *args, **kw):\n vals = self.sample_f(f, *args, **kw)\n return confidence_interval(vals, conf=self.conf)\n", "\n# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University\n# Berlin, 14195 Berlin, Germany.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation and/or\n# other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom numpy import vstack\nimport mdtraj as md\nimport numpy as np\nimport os\n\n\ndef create_file_reader(input_files, topology, featurizer, chunk_size=100):\n r\"\"\"\n Creates a (possibly featured) file reader by a number of input files and either a topology file or a featurizer.\n Parameters\n ----------\n :param input_files:\n A single input file or a list of input files.\n :param topology:\n A topology file. If given, the featurizer argument can be None.\n :param featurizer:\n A featurizer. If given, the topology file can be None.\n :param chunk_size:\n The chunk size with which the corresponding reader gets initialized.\n :return: Returns the reader.\n \"\"\"\n from pyemma.coordinates.data.numpy_filereader import NumPyFileReader as _NumPyFileReader\n from pyemma.coordinates.data.py_csv_reader import PyCSVReader as _CSVReader\n from pyemma.coordinates.data import FeatureReader as _FeatureReader\n\n if isinstance(input_files, basestring) \\\n or (isinstance(input_files, (list, tuple))\n and (any(isinstance(item, basestring) for item in input_files) or len(input_files) is 0)):\n reader = None\n # check: if single string create a one-element list\n if isinstance(input_files, basestring):\n input_list = [input_files]\n elif len(input_files) > 0 and all(isinstance(item, basestring) for item in input_files):\n input_list = input_files\n else:\n if len(input_files) is 0:\n raise ValueError(\"The passed input list should not be empty.\")\n else:\n raise ValueError(\"The passed list did not exclusively contain strings.\")\n\n _, suffix = os.path.splitext(input_list[0])\n\n # check: do all files have the same file type? If not: raise ValueError.\n if all(item.endswith(suffix) for item in input_list):\n\n # do all the files exist? If not: Raise value error\n all_exist = True\n err_msg = \"\"\n for item in input_list:\n if not os.path.isfile(item):\n err_msg += \"\\n\" if len(err_msg) > 0 else \"\"\n err_msg += \"File %s did not exist or was no file\" % item\n all_exist = False\n if not all_exist:\n raise ValueError(\"Some of the given input files were directories\"\n \" or did not exist:\\n%s\" % err_msg)\n\n if all_exist:\n from mdtraj.formats.registry import _FormatRegistry\n\n # CASE 1.1: file types are MD files\n if suffix in _FormatRegistry.loaders.keys():\n # check: do we either have a featurizer or a topology file name? If not: raise ValueError.\n # create a MD reader with file names and topology\n if not featurizer and not topology:\n raise ValueError(\"The input files were MD files which makes it mandatory to have either a \"\n \"featurizer or a topology file.\")\n\n reader = _FeatureReader(input_list, featurizer=featurizer, topologyfile=topology,\n chunksize=chunk_size)\n else:\n if suffix in ['.npy', '.npz']:\n reader = _NumPyFileReader(input_list, chunksize=chunk_size)\n # otherwise we assume that given files are ascii tabulated data\n else:\n reader = _CSVReader(input_list, chunksize=chunk_size)\n else:\n raise ValueError(\"Not all elements in the input list were of the type %s!\" % suffix)\n else:\n raise ValueError(\"Input \\\"%s\\\" was no string or list of strings.\" % input)\n return reader\n\n\ndef single_traj_from_n_files(file_list, top):\n \"\"\" Creates a single trajectory object from a list of files\n\n \"\"\"\n traj = None\n for ff in file_list:\n if traj is None:\n traj = md.load(ff, top=top)\n else:\n traj = traj.join(md.load(ff, top=top))\n\n return traj\n\n\ndef copy_traj_attributes(target, origin, start):\n \"\"\" Inserts certain attributes of origin into target\n :param target: target trajectory object\n :param origin: origin trajectory object\n :param start: :py:obj:`origin` attributes will be inserted in :py:obj:`target` starting at this index\n :return: target: the md trajectory with the attributes of :py:obj:`origin` inserted\n \"\"\"\n\n # The list of copied attributes can be extended here with time\n # Or perhaps ask the mdtraj guys to implement something similar?\n\n target._xyz[start:start+origin.n_frames] = origin._xyz\n target._unitcell_lengths[start:start+origin.n_frames] = origin._unitcell_lengths\n target._unitcell_angles[start:start+origin.n_frames] = origin._unitcell_angles\n target._time[start:start+origin.n_frames] = origin._time\n\n return target\n\n\ndef preallocate_empty_trajectory(top, n_frames=1):\n \"\"\"\n\n :param top: md.Topology object to be mimicked in shape\n :param n_frames: desired number of frames of the empty trajectory\n :return: empty_traj: empty md.Trajectory object with n_frames\n \"\"\"\n return md.Trajectory(np.zeros((n_frames,top.n_atoms,3)),\n top,\n time=np.zeros(n_frames),\n unitcell_lengths=np.zeros((n_frames,3)),\n unitcell_angles=np.zeros((n_frames ,3))\n )\n\n\ndef enforce_top(top):\n if isinstance(top,str):\n top = md.load(top).top\n elif isinstance(top, md.Trajectory):\n top = top.top\n elif isinstance(top, md.Topology):\n pass\n else:\n raise TypeError('element %u of the reference list is not of type str, md.Trajectory, or md.Topology, but %s'%\n type(top))\n return top\n\n\ndef save_traj_w_md_load_frame(reader, sets):\n # Creates a single trajectory object from a \"sets\" array via md.load_frames\n traj = None\n for file_idx, frame_idx in vstack(sets):\n if traj is None:\n traj = md.load_frame(reader.trajfiles[file_idx], frame_idx, reader.topfile)\n else:\n traj = traj.join(md.load_frame(reader.trajfiles[file_idx], frame_idx, reader.topfile))\n return traj\n\n\ndef compare_coords_md_trajectory_objects(traj1, traj2, atom = None, eps = 1e-6, mess = False ):\n # Compares the coordinates of \"atom\" for all frames in traj1 and traj2\n # Returns a boolean found_diff and an errmsg informing where\n assert isinstance(traj1, md.Trajectory)\n assert isinstance(traj2, md.Trajectory)\n assert traj1.n_frames == traj2.n_frames\n assert traj2.n_atoms == traj2.n_atoms\n\n R = np.zeros((2, traj1.n_frames, 3))\n if atom is None:\n atom_index = np.random.randint(0, high = traj1.n_atoms)\n else:\n atom_index = atom\n\n # Artificially mess the the coordinates\n if mess:\n traj1.xyz [0, atom_index, 2] += 10*eps\n\n for ii, traj in enumerate([traj1, traj2]):\n R[ii, :] = traj.xyz[:, atom_index]\n\n # Compare the R-trajectories among themselves\n found_diff = False\n first_diff = None\n errmsg = ''\n\n for ii, iR in enumerate(R):\n # Norm of the difference vector\n norm_diff = np.sqrt(((iR - R) ** 2).sum(2))\n\n # Any differences?\n if (norm_diff > eps).any():\n first_diff = np.argwhere(norm_diff > eps)[0]\n found_diff = True\n errmsg = \"Delta R_%u at frame %u: [%2.1e, %2.1e]\" % (atom_index, first_diff[1],\n norm_diff[0, first_diff[1]],\n norm_diff[1, first_diff[1]])\n errmsg2 = \"\\nThe position of atom %u differs by > %2.1e for the same frame between trajectories\" % (\n atom_index, eps)\n errmsg += errmsg2\n break\n\n return found_diff, errmsg\n" ]
[ [ "numpy.std", "numpy.mean" ], [ "numpy.random.randint", "numpy.zeros", "numpy.vstack", "numpy.argwhere" ] ]
brooks-anderson/pytorch
[ "dd928097938b6368fc7e2dc67721550d50ab08ea" ]
[ "torch/testing/_internal/common_quantization.py" ]
[ "r\"\"\"Importing this file includes common utility methods and base clases for\nchecking quantization api and properties of resulting modules.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.quantized as nnq\nimport torch.nn.quantized.dynamic as nnqd\nfrom torch.nn.intrinsic import _FusedModule\nimport torch.distributed as dist\n\nfrom torch.testing._internal.common_utils import TestCase\nfrom torch.quantization import QuantWrapper, QuantStub, DeQuantStub, \\\n default_qconfig, default_dynamic_qconfig, default_per_channel_qconfig, QConfig, default_observer, default_weight_observer, \\\n propagate_qconfig_, convert, get_default_qconfig, quantize_dynamic_jit, quantize_jit, float_qparams_weight_only_qconfig, \\\n get_default_qat_qconfig, PerChannelMinMaxObserver, default_dynamic_quant_observer, QConfigDynamic, QuantType\nfrom torch.quantization.quantization_mappings import (\n get_default_dynamic_quant_module_mappings,\n get_default_qconfig_propagation_list,\n get_default_qat_module_mappings,\n)\n\ntry:\n # graph mode quantization based on fx\n from torch.quantization.quantize_fx import (\n prepare_fx,\n prepare_qat_fx,\n convert_fx,\n )\n from torch.quantization.ns.ns_types import NSSingleResultValuesType, NSSubgraph\n from torch.fx.graph import Node\n from torch.fx import GraphModule\n HAS_FX = True\nexcept ImportError:\n HAS_FX = False\n\nimport copy\nimport io\nimport functools\nimport time\nimport os\n\nimport unittest\nimport numpy as np\nfrom torch.testing import FileCheck\nfrom typing import Callable, Tuple, Dict, Any, Union\n\nclass NodeSpec:\n ''' Used for checking GraphModule Node\n '''\n def __init__(self, op, target):\n '''\n op: call_function | call_module\n target:\n for call_function, target would be a function\n for call_module, target would be the type of PyTorch module\n '''\n self.op = op\n self.target = target\n\n @classmethod\n def call_function(cls, target):\n return NodeSpec('call_function', target)\n\n @classmethod\n def call_method(cls, target):\n return NodeSpec('call_method', target)\n\n @classmethod\n def call_module(cls, target):\n return NodeSpec('call_module', target)\n\n def __hash__(self):\n return hash((self.op, self.target))\n\n def __eq__(self, other):\n if not isinstance(other, NodeSpec):\n return NotImplemented\n\n return self.op == other.op and self.target == other.target\n\n def __repr__(self):\n return repr(self.op) + \" \" + repr(self.target)\n\ndef test_only_eval_fn(model, calib_data):\n r\"\"\"\n Default evaluation function takes a torch.utils.data.Dataset or a list of\n input Tensors and run the model on the dataset\n \"\"\"\n for inp in calib_data:\n output = model(*inp)\n\n_default_loss_fn = torch.nn.CrossEntropyLoss()\ndef test_only_train_fn(model, train_data, loss_fn=_default_loss_fn):\n r\"\"\"\n Default train function takes a torch.utils.data.Dataset and train the model\n on the dataset\n \"\"\"\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n train_loss, correct, total = 0, 0, 0\n for i in range(10):\n model.train()\n for data, target in train_data:\n optimizer.zero_grad()\n output = model(data)\n loss = loss_fn(output, target)\n loss.backward()\n optimizer.step()\n train_loss += loss.item()\n _, predicted = torch.max(output, 1)\n total += target.size(0)\n correct += (predicted == target).sum().item()\n return train_loss, correct, total\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\ndef train_one_epoch(model, criterion, optimizer, data_loader, device, ntrain_batches):\n model.train()\n cnt = 0\n for image, target in data_loader:\n start_time = time.time()\n print('.', end='')\n cnt += 1\n image, target = image.to(device), target.to(device)\n output = model(image)\n loss = criterion(output, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n if cnt >= ntrain_batches:\n return\n return\n\ndef ddp_setup(rank, world_size):\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '12355'\n\n # initialize the process group\n dist.init_process_group(\"gloo\", rank=rank, world_size=world_size)\n\ndef ddp_cleanup():\n dist.destroy_process_group()\n\ndef run_ddp(rank, world_size, prepared):\n ddp_setup(rank, world_size)\n prepared.cuda()\n prepared = torch.nn.parallel.DistributedDataParallel(prepared, device_ids=[rank])\n prepared.to(rank)\n model_with_ddp = prepared\n optimizer = torch.optim.SGD(model_with_ddp.parameters(), lr=0.0001)\n train_one_epoch(model_with_ddp, criterion, optimizer, dataset, rank, 1)\n ddp_cleanup()\n\n\ndef convert_dynamic(module):\n convert(module, get_default_dynamic_quant_module_mappings(), inplace=True)\n\ndef prepare_dynamic(model, qconfig_dict=None):\n propagate_qconfig_(model, qconfig_dict)\n\ndef _make_conv_test_input(\n batch_size, in_channels_per_group, input_feature_map_size,\n out_channels_per_group, groups, kernel_size, X_scale, X_zero_point, W_scale,\n W_zero_point, use_bias, use_channelwise,\n):\n in_channels = in_channels_per_group * groups\n out_channels = out_channels_per_group * groups\n\n (X_value_min, X_value_max) = (0, 4)\n X_init = torch.randint(\n X_value_min, X_value_max,\n (batch_size, in_channels,) + input_feature_map_size)\n X = X_scale * (X_init - X_zero_point).float()\n X_q = torch.quantize_per_tensor(\n X, scale=X_scale, zero_point=X_zero_point, dtype=torch.quint8)\n\n W_scale = W_scale * out_channels\n W_zero_point = W_zero_point * out_channels\n # Resize W_scale and W_zero_points arrays equal to out_channels\n W_scale = W_scale[:out_channels]\n W_zero_point = W_zero_point[:out_channels]\n # For testing, we use small values for weights and for activations so that\n # no overflow occurs in vpmaddubsw instruction. If the overflow occurs in\n # qconv implementation and if there is no overflow.\n # In reference we can't exactly match the results with reference.\n # Please see the comment in qconv implementation file\n # aten/src/ATen/native/quantized/cpu/qconv.cpp for more details.\n (W_value_min, W_value_max) = (-5, 5)\n # The operator expects them in the format\n # (out_channels, in_channels/groups,) + kernel_size\n W_init = torch.randint(\n W_value_min, W_value_max,\n (out_channels, in_channels_per_group,) + kernel_size)\n b_init = torch.randint(0, 10, (out_channels,))\n\n if use_channelwise:\n W_shape = (-1, 1) + (1,) * len(kernel_size)\n W_scales_tensor = torch.tensor(W_scale, dtype=torch.float)\n W_zero_points_tensor = torch.tensor(W_zero_point, dtype=torch.float)\n W = W_scales_tensor.reshape(*W_shape) * (\n W_init.float() - W_zero_points_tensor.reshape(*W_shape)).float()\n b = X_scale * W_scales_tensor * b_init.float()\n W_q = torch.quantize_per_channel(\n W, W_scales_tensor.double(), W_zero_points_tensor.long(), 0,\n dtype=torch.qint8)\n else:\n W = W_scale[0] * (W_init - W_zero_point[0]).float()\n b = X_scale * W_scale[0] * b_init.float()\n W_q = torch.quantize_per_tensor(\n W, scale=W_scale[0], zero_point=W_zero_point[0], dtype=torch.qint8)\n\n return (X, X_q, W, W_q, b if use_bias else None)\n\ndef skipIfNoFBGEMM(fn):\n reason = 'Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs with instruction set support AVX2 or newer.'\n if isinstance(fn, type):\n if 'fbgemm' not in torch.backends.quantized.supported_engines:\n fn.__unittest_skip__ = True\n fn.__unittest_skip_why__ = reason\n return fn\n\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n if 'fbgemm' not in torch.backends.quantized.supported_engines:\n raise unittest.SkipTest(reason)\n else:\n fn(*args, **kwargs)\n return wrapper\n\ntry:\n import torchvision # noqa: F401\n HAS_TORCHVISION = True\nexcept ImportError:\n HAS_TORCHVISION = False\nskip_if_no_torchvision = unittest.skipIf(not HAS_TORCHVISION, \"no torchvision\")\n\ndef get_script_module(model, tracing, data):\n return torch.jit.trace(model, data) if tracing else torch.jit.script(model)\n\ndef lengths_to_offsets(t, offset_type=np.int64, use_begin_offset=True):\n \"\"\"\n Convert lengths to offsets for embedding_bag\n \"\"\"\n tt = np.zeros((t.shape[0] + 1,), dtype=offset_type)\n tt[1:] = t\n tt = torch.from_numpy(np.cumsum(tt, dtype=offset_type))\n if use_begin_offset:\n return tt[:-1]\n return tt[1:]\n\n# QuantizationTestCase used as a base class for testing quantization on modules\nclass QuantizationTestCase(TestCase):\n def setUp(self):\n super().setUp()\n self.calib_data = [[torch.rand(2, 5, dtype=torch.float)] for _ in range(2)]\n self.train_data = [[torch.rand(2, 5, dtype=torch.float), torch.randint(0, 1, (2,), dtype=torch.long)] for _ in range(2)]\n self.img_data_1d = [[torch.rand(2, 3, 10, dtype=torch.float)]\n for _ in range(2)]\n self.img_data_2d = [[torch.rand(1, 3, 10, 10, dtype=torch.float)]\n for _ in range(2)]\n self.img_data_3d = [[torch.rand(1, 3, 5, 5, 5, dtype=torch.float)]\n for _ in range(2)]\n self.img_data_1d_train = [[torch.rand(2, 3, 10, dtype=torch.float),\n torch.randint(0, 1, (1,), dtype=torch.long)]\n for _ in range(2)]\n self.img_data_2d_train = [[torch.rand(1, 3, 10, 10, dtype=torch.float),\n torch.randint(0, 1, (1,), dtype=torch.long)]\n for _ in range(2)]\n self.img_data_3d_train = [[torch.rand(1, 3, 5, 5, 5, dtype=torch.float),\n torch.randint(0, 1, (1,), dtype=torch.long)]\n for _ in range(2)]\n\n self.img_data_dict = {1 : self.img_data_1d,\n 2 : self.img_data_2d,\n 3 : self.img_data_3d}\n\n # Quant types that produce statically quantized ops\n self.static_quant_types = [QuantType.STATIC, QuantType.QAT]\n # All quant types for (fx based) graph mode quantization\n self.all_quant_types = [QuantType.DYNAMIC, QuantType.STATIC, QuantType.QAT]\n\n def checkNoPrepModules(self, module):\n r\"\"\"Checks the module does not contain child\n modules for quantization prepration, e.g.\n quant, dequant and observer\n \"\"\"\n self.assertFalse(hasattr(module, 'quant'))\n self.assertFalse(hasattr(module, 'dequant'))\n\n def checkNoQconfig(self, module):\n r\"\"\"Checks the module does not contain qconfig\n \"\"\"\n self.assertFalse(hasattr(module, 'qconfig'))\n\n for child in module.children():\n self.checkNoQconfig(child)\n\n def checkHasPrepModules(self, module):\n r\"\"\"Checks the module contains child\n modules for quantization prepration, e.g.\n quant, dequant and observer\n \"\"\"\n self.assertTrue(hasattr(module, 'module'))\n self.assertTrue(hasattr(module, 'quant'))\n self.assertTrue(hasattr(module, 'dequant'))\n\n def checkObservers(self, module, propagate_qconfig_list=None, prepare_custom_config_dict=None):\n r\"\"\"Checks the module or module's leaf descendants\n have observers in preperation for quantization\n \"\"\"\n if propagate_qconfig_list is None:\n propagate_qconfig_list = get_default_qconfig_propagation_list()\n if prepare_custom_config_dict is None:\n prepare_custom_config_dict = {}\n float_to_observed_module_class_mapping = prepare_custom_config_dict.get(\"float_to_observed_custom_module_class\", {})\n\n # check if a module is a leaf module, ignoring activation_post_process attribute\n def is_leaf_module(module):\n submodule_name_count = 0\n for name, _ in module.named_children():\n if name != 'activation_post_process':\n submodule_name_count += 1\n return submodule_name_count == 0\n\n if hasattr(module, 'qconfig') and module.qconfig is not None and \\\n ((is_leaf_module(module) and not isinstance(module, torch.nn.Sequential)\n and type(module) in propagate_qconfig_list) or\n type(module) in float_to_observed_module_class_mapping.keys()) and \\\n not isinstance(module, torch.quantization.DeQuantStub):\n self.assertTrue(hasattr(module, 'activation_post_process'),\n 'module: ' + str(type(module)) + ' do not have observer')\n # we don't need to check observers for child modules of the\n # qat modules\n if type(module) not in get_default_qat_module_mappings().values() and \\\n type(module) not in float_to_observed_module_class_mapping.values() and \\\n not isinstance(module, _FusedModule):\n for child in module.children():\n self.checkObservers(child, propagate_qconfig_list, prepare_custom_config_dict)\n\n def checkQuantDequant(self, mod):\n r\"\"\"Checks that mod has nn.Quantize and\n nn.DeQuantize submodules inserted\n \"\"\"\n self.assertEqual(type(mod.quant), nnq.Quantize)\n self.assertEqual(type(mod.dequant), nnq.DeQuantize)\n\n def checkWrappedQuantizedLinear(self, mod):\n r\"\"\"Checks that mod has been swapped for an nnq.Linear\n module, the bias is qint32, and that the module\n has Quantize and DeQuantize submodules\n \"\"\"\n self.assertEqual(type(mod.module), nnq.Linear)\n self.checkQuantDequant(mod)\n\n def checkQuantizedLinear(self, mod):\n self.assertEqual(type(mod), nnq.Linear)\n\n def checkDynamicQuantizedLinear(self, mod, dtype):\n r\"\"\"Checks that mod has been swapped for an nnqd.Linear\n module, the bias is float.\n \"\"\"\n self.assertEqual(type(mod), nnqd.Linear)\n self.assertEqual(mod._packed_params.dtype, dtype)\n\n def check_eager_serialization(self, ref_model, loaded_model, x):\n # Check state dict serialization and torch.save APIs\n model_dict = ref_model.state_dict()\n b = io.BytesIO()\n torch.save(model_dict, b)\n b.seek(0)\n loaded_dict = torch.load(b)\n loaded_model.load_state_dict(loaded_dict)\n ref_out = ref_model(*x)\n load_out = loaded_model(*x)\n\n def check_outputs(ref_out, load_out):\n self.assertEqual(ref_out[0], load_out[0])\n if isinstance(ref_out[1], tuple):\n self.assertEqual(ref_out[1][0], load_out[1][0])\n self.assertEqual(ref_out[1][1], load_out[1][1])\n else:\n self.assertEqual(ref_out[1], load_out[1])\n\n check_outputs(ref_out, load_out)\n b = io.BytesIO()\n torch.save(ref_model, b)\n b.seek(0)\n loaded = torch.load(b)\n load_out = loaded(*x)\n check_outputs(ref_out, load_out)\n\n def check_weight_bias_api(self, ref_model, weight_keys, bias_keys):\n weight = ref_model.get_weight()\n bias = ref_model.get_bias()\n self.assertEqual(weight_keys ^ weight.keys(), set())\n self.assertEqual(bias_keys ^ bias.keys(), set())\n\n def checkDynamicQuantizedLSTM(self, mod, reference_module_type, dtype):\n r\"\"\"Checks that mod has been swapped for an nnqd.LSTM type\n module, the bias is float.\n \"\"\"\n wt_dtype_map = {torch.qint8: 'quantized_dynamic', torch.float16: 'quantized_fp16'}\n self.assertEqual(type(mod), reference_module_type)\n for packed_params in mod._all_weight_values:\n self.assertEqual(packed_params.param.__getstate__()[0][0], wt_dtype_map[dtype])\n\n def checkLinear(self, mod):\n self.assertEqual(type(mod), torch.nn.Linear)\n\n def checkDynamicQuantizedModule(self, mod, reference_module_type, dtype):\n r\"\"\"Checks that mod has been swapped for an nnqd.Linear\n module, the bias is float.\n \"\"\"\n wt_dtype_map = {torch.qint8: 'quantized_dynamic', torch.float16: 'quantized_fp16'}\n self.assertEqual(type(mod), reference_module_type)\n if hasattr(mod, '_all_weight_values'):\n for packed_params in mod._all_weight_values:\n self.assertEqual(packed_params.param.__getstate__()[0][0], wt_dtype_map[dtype])\n\n def checkScriptable(self, orig_mod, calib_data, check_save_load=False):\n scripted = torch.jit.script(orig_mod)\n self._checkScriptable(orig_mod, scripted, calib_data, check_save_load)\n\n # Use first calib_data entry as trace input\n traced = torch.jit.trace(orig_mod, calib_data[0])\n self._checkScriptable(orig_mod, traced, calib_data, check_save_load)\n\n # Call this twice: once for a scripted module and once for a traced module\n def _checkScriptable(self, orig_mod, script_mod, calib_data, check_save_load):\n self._checkModuleCorrectnessAgainstOrig(orig_mod, script_mod, calib_data)\n\n # Test save/load\n buffer = io.BytesIO()\n torch.jit.save(script_mod, buffer)\n\n buffer.seek(0)\n loaded_mod = torch.jit.load(buffer)\n # Pending __get_state_ and __set_state__ support\n # See tracking task https://github.com/pytorch/pytorch/issues/23984\n if check_save_load:\n self._checkModuleCorrectnessAgainstOrig(orig_mod, loaded_mod, calib_data)\n\n def _checkModuleCorrectnessAgainstOrig(self, orig_mod, test_mod, calib_data):\n for inp in calib_data:\n ref_output = orig_mod(*inp)\n scripted_output = test_mod(*inp)\n self.assertEqual(scripted_output, ref_output)\n\n\n def checkGraphModeOp(self, module, inputs, quantized_op, tracing=False, debug=False,\n check=True, eval_mode=True, dynamic=False, qconfig=None):\n if debug:\n print('Testing:', str(module))\n qconfig_dict = {'': get_default_qconfig(torch.backends.quantized.engine)}\n\n if eval_mode:\n module = module.eval()\n if dynamic:\n qconfig_dict = {'': default_dynamic_qconfig if qconfig is None else qconfig}\n model = get_script_module(module, tracing, inputs[0]).eval()\n if debug:\n print('input graph:', model.graph)\n models = {}\n outputs = {}\n for debug in [True, False]:\n if dynamic:\n models[debug] = quantize_dynamic_jit(model, qconfig_dict, debug=debug)\n # make sure it runs\n outputs[debug] = models[debug](inputs)\n else:\n # module under test can contain in-place ops, and we depend on\n # input data staying constant for comparisons\n inputs_copy = copy.deepcopy(inputs)\n models[debug] = quantize_jit(\n model, qconfig_dict, test_only_eval_fn, [inputs_copy], inplace=False,\n debug=debug)\n # make sure it runs\n outputs[debug] = models[debug](*inputs[0])\n\n if debug:\n print('debug graph:', models[True].graph)\n print('non debug graph:', models[False].graph)\n\n if check:\n # debug and non-debug option should have the same numerics\n self.assertEqual(outputs[True], outputs[False])\n\n # non debug graph should produce quantized op\n FileCheck().check(quantized_op) \\\n .run(models[False].graph)\n\n return models[False]\n\n def checkGraphModuleNodes(\n self, graph_module,\n expected_node=None,\n expected_node_occurrence=None,\n expected_node_list=None):\n \"\"\" Check if GraphModule contains the target node\n Args:\n graph_module: the GraphModule instance we want to check\n expected_node, expected_node_occurrence, expected_node_list:\n see docs for checkGraphModeFxOp\n \"\"\"\n nodes_in_graph = dict()\n node_list = []\n modules = dict(graph_module.named_modules(remove_duplicate=False))\n for node in graph_module.graph.nodes:\n n = None\n if node.op == 'call_function' or node.op == 'call_method':\n n = NodeSpec(node.op, node.target)\n elif node.op == 'call_module':\n n = NodeSpec(node.op, type(modules[node.target]))\n\n if n is not None:\n node_list.append(n)\n if n in nodes_in_graph:\n nodes_in_graph[n] += 1\n else:\n nodes_in_graph[n] = 1\n\n if expected_node is not None:\n self.assertTrue(expected_node in nodes_in_graph, 'node:' + str(expected_node) +\n ' not found in the graph module')\n\n if expected_node_occurrence is not None:\n for expected_node, occurrence in expected_node_occurrence.items():\n if occurrence != 0:\n self.assertTrue(\n expected_node in nodes_in_graph,\n 'Check failed for node:' + str(expected_node) +\n ' not found')\n self.assertTrue(\n nodes_in_graph[expected_node] == occurrence,\n 'Check failed for node:' + str(expected_node) +\n ' Expected occurrence:' + str(occurrence) +\n ' Found occurrence:' + str(nodes_in_graph[expected_node]))\n else:\n self.assertTrue(\n expected_node not in nodes_in_graph,\n 'Check failed for node:' + str(expected_node) +\n ' expected no occurrence but found')\n\n if expected_node_list is not None:\n cur_index = 0\n for n in node_list:\n if cur_index == len(expected_node_list):\n return\n if n == expected_node_list[cur_index]:\n cur_index += 1\n self.assertTrue(\n cur_index == len(expected_node_list),\n \"Check failed for graph:\" +\n self.printGraphModule(graph_module, print_str=False) +\n \"Expected ordered list:\" +\n str(expected_node_list))\n\n def printGraphModule(self, graph_module, print_str=True):\n modules = dict(graph_module.named_modules())\n node_infos = []\n for n in graph_module.graph.nodes:\n node_info = ' '.join(map(repr, [n.op, n.name, n.target, n.args, n.kwargs]))\n if n.op == 'call_module':\n node_info += ' module type: ' + repr(type(modules[n.target]))\n node_infos.append(node_info)\n str_to_print = '\\n'.join(node_infos)\n if print_str:\n print(str_to_print)\n return str_to_print\n\n if HAS_FX:\n\n def assert_types_for_matched_subgraph_pairs(\n self,\n matched_subgraph_pairs: Dict[str, Tuple[NSSubgraph, NSSubgraph]],\n expected_types: Dict[str, Tuple[Tuple[Callable, Callable], Tuple[Callable, Callable]]],\n gm_a: GraphModule,\n gm_b: GraphModule,\n ) -> None:\n \"\"\"\n Verifies that the types specified in expected_types match\n the underlying objects pointed to by the nodes in matched_subgraph_pairs.\n\n An example successful test case:\n\n matched_subgraph_pairs = {'x0': (graph_a_conv_0_node, graph_b_conv_0_node)}\n expected_types = {'x0': (nn.Conv2d, nnq.Conv2d)}\n\n The function tests for key equivalence, and verifies types with\n instance checks.\n \"\"\"\n\n def _get_underlying_op_type(\n node: Node, gm: GraphModule\n ) -> Union[Callable, str]:\n if node.op == 'call_module':\n mod = getattr(gm, node.target)\n return type(mod)\n else:\n assert node.op in ('call_function', 'call_method')\n return node.target\n\n self.assertTrue(\n len(matched_subgraph_pairs) == len(expected_types),\n 'Expected length of results to match, but got %d and %d' %\n (len(matched_subgraph_pairs), len(expected_types))\n )\n for k, v in expected_types.items():\n expected_types_a, expected_types_b = v\n exp_type_start_a, exp_type_end_a = expected_types_a\n exp_type_start_b, exp_type_end_b = expected_types_b\n subgraph_a, subgraph_b = matched_subgraph_pairs[k]\n\n act_type_start_a = _get_underlying_op_type(subgraph_a.start_node, gm_a)\n act_type_start_b = _get_underlying_op_type(subgraph_b.start_node, gm_b)\n act_type_end_a = _get_underlying_op_type(subgraph_a.end_node, gm_a)\n act_type_end_b = _get_underlying_op_type(subgraph_b.end_node, gm_b)\n types_match = (exp_type_start_a is act_type_start_a) and \\\n (exp_type_end_a is act_type_end_a) and \\\n (exp_type_start_b is act_type_start_b) and \\\n (exp_type_end_b is act_type_end_b)\n self.assertTrue(\n types_match,\n 'Type mismatch at %s: expected %s, got %s' %\n (k, (exp_type_start_a, exp_type_end_a, exp_type_start_b, exp_type_end_b),\n (act_type_start_a, act_type_end_a, act_type_start_b, act_type_end_b))\n )\n\n def assert_ns_compare_dict_valid(\n self,\n act_compare_dict: Dict[str, Dict[str, Dict[str, Any]]],\n ) -> None:\n \"\"\"\n Verifies that the act_compare_dict (output of Numeric Suite APIs) is valid:\n 1. for each layer, results are recorded for two models\n 2. number of seen tensors match\n 3. shapes of each pair of seen tensors match\n \"\"\"\n for layer_name, result_type_to_data in act_compare_dict.items():\n for result_type, layer_data in result_type_to_data.items():\n self.assertTrue(\n len(layer_data) == 2,\n f\"Layer {layer_name} does not have exactly two model results.\")\n model_name_0, model_name_1 = layer_data.keys()\n for res_idx in range(len(layer_data[model_name_0])):\n layer_data_0 = layer_data[model_name_0][res_idx]\n layer_data_1 = layer_data[model_name_1][res_idx]\n self.assertTrue(\n layer_data_0['type'] == layer_data_0['type'],\n f\"Layer {layer_name}, {model_name_0} and {model_name_1} do not have the same type.\")\n\n self.assertTrue(\n len(layer_data_0['values']) ==\n len(layer_data_1['values']),\n f\"Layer {layer_name}, {model_name_0} and {model_name_1} do not have the same number of seen Tensors.\")\n\n # F.conv1d weight has rank 3, and toq.conv1d unpacked weight\n # has rank 4. For now, skip the length check for conv1d only.\n is_weight_functional_conv1d = (\n result_type == NSSingleResultValuesType.WEIGHT.value and\n (\n 'conv1d' in layer_data_0['prev_node_target_type'] or\n 'conv1d' in layer_data_1['prev_node_target_type']\n )\n )\n if not is_weight_functional_conv1d:\n for idx in range(len(layer_data_0['values'])):\n values_0 = layer_data_0['values'][idx]\n values_1 = layer_data_1['values'][idx]\n if isinstance(values_0, torch.Tensor):\n self.assertTrue(\n values_0.shape == values_1.shape,\n f\"Layer {layer_name}, {model_name_0} and {model_name_1} \" +\n f\"have a shape mismatch at idx {idx}.\")\n else:\n assert isinstance(values_0, tuple), \\\n f\"unhandled type {type(values_0)}\"\n assert len(values_0) == 2\n assert len(values_0[1]) == 2\n assert values_0[0].shape == values_1[0].shape\n assert values_0[1][0].shape == values_1[1][0].shape\n assert values_0[1][1].shape == values_1[1][1].shape\n\n # verify that ref_node_name is valid\n ref_node_name_0 = layer_data_0['ref_node_name']\n ref_node_name_1 = layer_data_1['ref_node_name']\n prev_node_name_0 = layer_data_0['prev_node_name']\n prev_node_name_1 = layer_data_1['prev_node_name']\n if layer_data_0['type'] == NSSingleResultValuesType.NODE_OUTPUT.value:\n self.assertTrue(ref_node_name_0 == prev_node_name_0)\n self.assertTrue(ref_node_name_1 == prev_node_name_1)\n elif layer_data_0['type'] == NSSingleResultValuesType.NODE_INPUT.value:\n self.assertTrue(ref_node_name_0 != prev_node_name_0)\n self.assertTrue(ref_node_name_1 != prev_node_name_1)\n\n def checkGraphModeFxOp(self, model, inputs, quant_type,\n expected_node=None,\n expected_node_occurrence=None,\n expected_node_list=None,\n is_reference=False,\n print_debug_info=False,\n custom_qconfig_dict=None,\n prepare_expected_node=None,\n prepare_expected_node_occurrence=None,\n prepare_expected_node_list=None,\n prepare_custom_config_dict=None):\n \"\"\" Quantizes model with graph mode quantization on fx and check if the\n quantized model contains the quantized_node\n\n Args:\n model: floating point torch.nn.Module\n inputs: one positional sample input arguments for model\n expected_node: NodeSpec\n e.g. NodeSpec.call_function(torch.quantize_per_tensor)\n expected_node_occurrence: a dict from NodeSpec to\n expected number of occurences (int)\n e.g. {NodeSpec.call_function(torch.quantize_per_tensor) : 1,\n NodeSpec.call_method('dequantize'): 1}\n expected_node_list: a list of NodeSpec, used to check the order\n of the occurrence of Node\n e.g. [NodeSpec.call_function(torch.quantize_per_tensor),\n NodeSpec.call_module(nnq.Conv2d),\n NodeSpec.call_function(F.hardtanh_),\n NodeSpec.call_method('dequantize')]\n is_reference: if True, enables reference mode\n print_debug_info: if True, prints debug info\n custom_qconfig_dict: overrides default qconfig_dict\n prepare_expected_node: same as expected_node, but for prepare\n prepare_expected_node_occurrence: same as\n expected_node_occurrence, but for prepare\n prepare_expected_node_list: same as expected_node_list, but\n for prepare\n \"\"\"\n # TODO: make img_data a single example instead of a list\n if type(inputs) == list:\n inputs = inputs[0]\n\n if quant_type == QuantType.QAT:\n qconfig = get_default_qat_qconfig(torch.backends.quantized.engine)\n model.train()\n elif quant_type == QuantType.STATIC:\n qconfig = get_default_qconfig(torch.backends.quantized.engine)\n model.eval()\n else:\n qconfig = default_dynamic_qconfig\n model.eval()\n\n if quant_type == QuantType.QAT:\n prepare = prepare_qat_fx\n else:\n prepare = prepare_fx\n\n qconfig_dict = {\"\": qconfig}\n # overwrite qconfig_dict with custom_qconfig_dict\n if custom_qconfig_dict is not None:\n qconfig_dict = custom_qconfig_dict\n prepared = prepare(\n model, qconfig_dict,\n prepare_custom_config_dict=prepare_custom_config_dict)\n if not quant_type == QuantType.DYNAMIC:\n prepared(*inputs)\n\n if print_debug_info:\n print()\n print('quant type:\\n', quant_type)\n print('original model:\\n', model)\n print()\n print('prepared model:\\n', prepared)\n\n self.checkGraphModuleNodes(\n prepared, prepare_expected_node,\n prepare_expected_node_occurrence, prepare_expected_node_list)\n\n prepared_copy = copy.deepcopy(prepared)\n qgraph = convert_fx(prepared)\n qgraph_reference = convert_fx(prepared_copy, is_reference=True)\n result = qgraph(*inputs)\n result_reference = qgraph_reference(*inputs)\n\n qgraph_to_check = qgraph_reference if is_reference else qgraph\n if print_debug_info:\n print()\n print('quantized model:\\n', qgraph_to_check)\n self.printGraphModule(qgraph_to_check)\n print()\n self.checkGraphModuleNodes(\n qgraph_to_check, expected_node, expected_node_occurrence, expected_node_list)\n return result\n\n\n def checkEmbeddingSerialization(self, qemb, num_embeddings, embedding_dim, indices, offsets,\n set_qconfig, is_emb_bag, dtype=torch.quint8):\n # Test serialization of dynamic EmbeddingBag module using state_dict\n if is_emb_bag:\n inputs = [indices, offsets]\n else:\n inputs = [indices]\n emb_dict = qemb.state_dict()\n b = io.BytesIO()\n torch.save(emb_dict, b)\n b.seek(0)\n loaded_dict = torch.load(b)\n embedding_unpack = torch.ops.quantized.embedding_bag_unpack\n # Check unpacked weight values explicitly\n for key in emb_dict:\n if isinstance(emb_dict[key], torch._C.ScriptObject):\n assert isinstance(loaded_dict[key], torch._C.ScriptObject)\n emb_weight = embedding_unpack(emb_dict[key])\n loaded_weight = embedding_unpack(loaded_dict[key])\n self.assertEqual(emb_weight, loaded_weight)\n\n # Check state dict serialization and torch.save APIs\n if is_emb_bag:\n loaded_qemb = nnq.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim,\n include_last_offset=True, mode='sum', dtype=dtype)\n else:\n loaded_qemb = nnq.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim, dtype=dtype)\n self.check_eager_serialization(qemb, loaded_qemb, inputs)\n\n loaded_qemb.load_state_dict(loaded_dict)\n self.assertEqual(embedding_unpack(qemb._packed_params._packed_weight),\n embedding_unpack(loaded_qemb._packed_params._packed_weight))\n\n\n # Test JIT serialization\n self.checkScriptable(qemb, [inputs], check_save_load=True)\n\n # Test from_float call\n if is_emb_bag:\n float_embedding = torch.nn.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim,\n include_last_offset=True, scale_grad_by_freq=False, mode='sum')\n else:\n float_embedding = torch.nn.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim)\n\n if set_qconfig:\n float_qparams_observer = PerChannelMinMaxObserver.with_args(dtype=dtype,\n qscheme=torch.per_channel_affine_float_qparams,\n ch_axis=0)\n float_embedding.qconfig = QConfigDynamic(activation=default_dynamic_quant_observer,\n weight=float_qparams_observer)\n\n prepare_dynamic(float_embedding)\n\n float_embedding(*inputs)\n if is_emb_bag:\n q_embeddingbag = nnq.EmbeddingBag.from_float(float_embedding)\n expected_name = \"QuantizedEmbeddingBag\"\n else:\n q_embeddingbag = nnq.Embedding.from_float(float_embedding)\n expected_name = \"QuantizedEmbedding\"\n\n q_embeddingbag(*inputs)\n\n self.assertTrue(expected_name in str(q_embeddingbag))\n\n\n# Below are a series of toy models to use in testing quantization\n\nclass SingleLayerLinearModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)\n\n def forward(self, x):\n x = self.fc1(x)\n return x\n\nclass AnnotatedSingleLayerLinearModel(torch.nn.Module):\n def __init__(self, qengine='fbgemm'):\n super().__init__()\n self.qconfig = torch.quantization.get_default_qconfig(qengine)\n self.fc1 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))\n\n def forward(self, x):\n x = self.fc1(x)\n return x\n\nclass SingleLayerLinearDynamicModel(torch.nn.Module):\n def __init__(self, qengine='fbgemm'):\n super().__init__()\n self.qconfig = torch.quantization.get_default_qconfig(qengine)\n self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)\n\n def forward(self, x):\n x = self.fc1(x)\n return x\n\nclass RNNDynamicModel(torch.nn.Module):\n def __init__(self, mod_type):\n super().__init__()\n self.qconfig = default_dynamic_qconfig\n if mod_type == 'GRU':\n self.mod = torch.nn.GRU(2, 2).to(dtype=torch.float)\n if mod_type == 'LSTM':\n self.mod = torch.nn.LSTM(2, 2).to(dtype=torch.float)\n\n def forward(self, x):\n x = self.mod(x)\n return x\n\nclass RNNCellDynamicModel(torch.nn.Module):\n def __init__(self, mod_type):\n super().__init__()\n self.qconfig = default_dynamic_qconfig\n if mod_type == 'GRUCell':\n self.mod = torch.nn.GRUCell(2, 2).to(dtype=torch.float)\n if mod_type == 'LSTMCell':\n self.mod = torch.nn.LSTMCell(2, 2).to(dtype=torch.float)\n if mod_type == 'RNNReLU':\n self.mod = torch.nn.RNNCell(2, 2, nonlinearity='relu').to(dtype=torch.float)\n if mod_type == 'RNNTanh':\n self.mod = torch.nn.RNNCell(2, 2, nonlinearity='tanh').to(dtype=torch.float)\n\n def forward(self, x):\n x = self.mod(x)\n return x\n\nclass LSTMwithHiddenDynamicModel(torch.nn.Module):\n def __init__(self, qengine='fbgemm'):\n super().__init__()\n self.qconfig = torch.quantization.get_default_qconfig(qengine)\n self.lstm = torch.nn.LSTM(2, 2).to(dtype=torch.float)\n\n def forward(self, x, hid):\n x, hid = self.lstm(x, hid)\n return x, hid\n\nclass ConvModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)\n\n def forward(self, x):\n x = self.conv(x)\n return x\n\nclass ConvTransposeModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.ConvTranspose2d(3, 5, 3, bias=False).to(dtype=torch.float)\n\n def forward(self, x):\n x = self.conv(x)\n return x\n\nclass AnnotatedConvModel(torch.nn.Module):\n def __init__(self, qengine):\n super().__init__()\n self.qconfig = torch.quantization.get_default_qconfig(qengine)\n self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)\n self.quant = QuantStub()\n self.dequant = DeQuantStub()\n\n def forward(self, x):\n x = self.quant(x)\n x = self.conv(x)\n x = self.dequant(x)\n return x\n\nclass AnnotatedConvTransposeModel(torch.nn.Module):\n def __init__(self, qengine):\n super().__init__()\n self.qconfig = torch.quantization.get_default_qconfig(qengine)\n self.conv = torch.nn.ConvTranspose2d(3, 5, 3, bias=False).to(dtype=torch.float)\n self.quant = QuantStub()\n self.dequant = DeQuantStub()\n\n def forward(self, x):\n x = self.quant(x)\n x = self.conv(x)\n x = self.dequant(x)\n return x\n\nclass ConvBnModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)\n self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return x\n\nclass AnnotatedConvBnModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.qconfig = default_qconfig\n self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)\n self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)\n self.quant = QuantStub()\n self.dequant = DeQuantStub()\n\n def forward(self, x):\n x = self.quant(x)\n x = self.conv(x)\n x = self.bn(x)\n x = self.dequant(x)\n return x\n\nclass ConvBnReLUModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)\n self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n return x\n\nclass AnnotatedConvBnReLUModel(torch.nn.Module):\n def __init__(self, qengine='fbgemm'):\n super(AnnotatedConvBnReLUModel, self).__init__()\n self.qconfig = torch.quantization.get_default_qconfig(qengine)\n self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)\n self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float)\n self.relu = nn.ReLU(inplace=True)\n self.quant = QuantStub()\n self.dequant = DeQuantStub()\n\n def forward(self, x):\n x = self.quant(x)\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n x = self.dequant(x)\n return x\n\n def fuse_model(self):\n torch.quantization.fuse_modules(self, [['conv', 'bn', 'relu']], inplace=True)\n\nclass TwoLayerLinearModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)\n self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\nclass LinearModelWithSubmodule(nn.Module):\n def __init__(self):\n super(LinearModelWithSubmodule, self).__init__()\n self.subm = TwoLayerLinearModel()\n self.fc = nn.Linear(5, 5)\n\n def forward(self, x):\n x = self.subm(x)\n x = self.fc(x)\n return x\n\nclass AnnotatedTwoLayerLinearModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)\n self.fc2 = QuantWrapper(torch.nn.Linear(8, 5).to(dtype=torch.float))\n self.fc2.qconfig = torch.quantization.get_default_qconfig(\"fbgemm\")\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\nclass ActivationsTestModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.qconfig = torch.quantization.get_default_qconfig(\"fbgemm\")\n self.quant = torch.quantization.QuantStub()\n self.hardswish = torch.nn.Hardswish().to(dtype=torch.float)\n self.elu = torch.nn.ELU().to(dtype=torch.float)\n self.dequant = torch.quantization.DeQuantStub()\n\n def forward(self, x):\n x = self.quant(x)\n x = self.hardswish(x)\n x = self.elu(x)\n x = self.dequant(x)\n return x\n\nclass LinearReluModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)\n self.relu = torch.nn.ReLU()\n\n def forward(self, x):\n x = self.relu(self.fc(x))\n return x\n\nclass NormalizationTestModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.quant = torch.quantization.QuantStub()\n self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)\n self.layer_norm = torch.nn.LayerNorm((8))\n self.group_norm = torch.nn.GroupNorm(2, 8)\n self.instance_norm1d = torch.nn.InstanceNorm1d(8)\n self.instance_norm2d = torch.nn.InstanceNorm2d(8)\n self.instance_norm3d = torch.nn.InstanceNorm3d(8)\n\n def forward(self, x):\n x = self.quant(x)\n x = self.fc1(x)\n x = self.layer_norm(x)\n x = self.group_norm(x.unsqueeze(-1).repeat(1, 1, 3))\n x = self.instance_norm1d(x)\n x = self.instance_norm2d(x.unsqueeze(-1))\n x = self.instance_norm3d(x.unsqueeze(-1))\n return x\n\nclass NestedModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.sub1 = LinearReluModel()\n self.sub2 = TwoLayerLinearModel()\n self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float)\n\n def forward(self, x):\n x = self.sub1(x)\n x = self.sub2(x)\n x = self.fc3(x)\n return x\n\nclass AnnotatedNestedModel(torch.nn.Module):\n def __init__(self, qengine):\n super().__init__()\n self.sub1 = LinearReluModel()\n self.sub2 = TwoLayerLinearModel()\n self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))\n self.fc3.qconfig = default_qconfig\n self.sub2.fc1 = QuantWrapper(self.sub2.fc1)\n if qengine == 'fbgemm':\n self.sub2.fc1.qconfig = default_per_channel_qconfig\n else:\n self.sub2.fc1.qconfig = default_qconfig\n\n def forward(self, x):\n x = self.sub1(x)\n x = self.sub2(x)\n x = self.fc3(x)\n return x\n\nclass AnnotatedSubNestedModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.sub1 = LinearReluModel()\n self.sub2 = QuantWrapper(TwoLayerLinearModel())\n self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))\n self.fc3.qconfig = default_qconfig\n self.sub2.qconfig = default_qconfig\n\n def forward(self, x):\n x = self.sub1(x)\n x = self.sub2(x)\n x = self.fc3(x)\n return x\n\nclass AnnotatedCustomConfigNestedModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.sub1 = LinearReluModel()\n self.sub2 = TwoLayerLinearModel()\n self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))\n self.fc3.qconfig = default_qconfig\n self.sub2.qconfig = default_qconfig\n\n custom_options = {\n 'dtype': torch.quint8,\n 'qscheme': torch.per_tensor_affine\n }\n custom_qconfig = QConfig(activation=default_observer.with_args(**custom_options),\n weight=default_weight_observer)\n self.sub2.fc1.qconfig = custom_qconfig\n\n self.sub2.fc1 = QuantWrapper(self.sub2.fc1)\n self.sub2.fc2 = QuantWrapper(self.sub2.fc2)\n\n def forward(self, x):\n x = self.sub1(x)\n x = self.sub2(x)\n x = self.fc3(x)\n return x\n\nclass QuantSubModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.sub1 = LinearReluModel()\n self.sub2 = QuantWrapper(TwoLayerLinearModel())\n self.sub2.qconfig = default_qconfig\n self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float)\n self.fc3.qconfig = default_qconfig\n\n def forward(self, x):\n x = self.sub1(x)\n x = self.sub2(x)\n x = self.fc3(x)\n return x\n\nclass InnerModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float)\n self.relu1 = torch.nn.ReLU()\n self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float)\n self.relu2 = torch.nn.ReLU()\n\n def forward(self, x):\n return self.relu2(self.fc2(self.relu1(self.fc1(x))))\n\n def fuse_modules(self):\n fusable_layers = []\n named_children = list(self.named_children())\n for idx, (current_name, layer) in enumerate(named_children):\n if isinstance(layer, torch.nn.Linear):\n if idx >= len(named_children) - 1:\n break\n if isinstance(named_children[idx + 1][1], torch.nn.ReLU):\n fusable_layers.append([current_name,\n named_children[idx + 1][0]])\n torch.quantization.fuse_modules(self, fusable_layers, inplace=True)\n\nclass SkipQuantModel(torch.nn.Module):\n r\"\"\"We can skip quantization by explicitly\n setting qconfig of a submodule to None\n \"\"\"\n def __init__(self):\n super().__init__()\n self.sub = InnerModule()\n self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)\n\n def forward(self, x):\n return self.fc(self.sub(x))\n\n def fuse_modules(self):\n self.sub.fuse_modules()\n\nclass AnnotatedSkipQuantModel(torch.nn.Module):\n r\"\"\"We can skip quantization by explicitly\n setting qconfig of a submodule to None\n \"\"\"\n def __init__(self, qengine):\n super().__init__()\n self.qconfig = torch.quantization.get_default_qconfig(qengine)\n self.sub = QuantWrapper(InnerModule())\n self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)\n # don't quantize this fc\n self.fc.qconfig = None\n\n def forward(self, x):\n return self.fc(self.sub(x))\n\n def fuse_modules(self):\n self.sub.module.fuse_modules()\n\nclass QuantStubModel(torch.nn.Module):\n r\"\"\"A Module with manually inserted `QuantStub` and `DeQuantStub`\n \"\"\"\n def __init__(self):\n super().__init__()\n self.qconfig = torch.quantization.get_default_qconfig(\"qnnpack\")\n self.quant = QuantStub()\n self.dequant = DeQuantStub()\n self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)\n\n def forward(self, x):\n x = self.quant(x)\n x = self.fc(x)\n return self.dequant(x)\n\nclass ManualLinearQATModel(torch.nn.Module):\n r\"\"\"A Module with manually inserted `QuantStub` and `DeQuantStub`\n \"\"\"\n def __init__(self, qengine):\n super().__init__()\n self.qconfig = torch.quantization.get_default_qat_qconfig(qengine)\n self.quant = QuantStub()\n self.dequant = DeQuantStub()\n self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float)\n self.fc2 = torch.nn.Linear(1, 10).to(dtype=torch.float)\n\n def forward(self, x):\n x = self.quant(x)\n x = self.fc1(x)\n x = self.fc2(x)\n return self.dequant(x)\n\nclass ManualConvLinearQATModel(torch.nn.Module):\n r\"\"\"A module with manually inserted `QuantStub` and `DeQuantStub`\n and contains both linear and conv modules\n \"\"\"\n def __init__(self):\n super().__init__()\n self.qconfig = torch.quantization.get_default_qat_qconfig(\"qnnpack\")\n self.quant = QuantStub()\n self.dequant = DeQuantStub()\n self.conv = torch.nn.Conv2d(3, 1, kernel_size=3).to(dtype=torch.float)\n self.fc1 = torch.nn.Linear(64, 10).to(dtype=torch.float)\n self.fc2 = torch.nn.Linear(10, 10).to(dtype=torch.float)\n\n def forward(self, x):\n x = self.quant(x)\n x = self.conv(x)\n x = x.view(-1, 64).contiguous()\n x = self.fc1(x)\n x = self.fc2(x)\n return self.dequant(x)\n\n\nclass SubModelForFusion(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float)\n self.bn = nn.BatchNorm2d(2).to(dtype=torch.float)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return x\n\n\nclass SubModelWithoutFusion(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float)\n self.relu = nn.ReLU(inplace=False).to(dtype=torch.float)\n\n def forward(self, x):\n return self.relu(self.conv(x))\n\nclass ModelForFusion(nn.Module):\n def __init__(self, qconfig):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 2, 1, bias=None).to(dtype=torch.float)\n self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float)\n self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float)\n self.sub1 = SubModelForFusion()\n self.sub2 = SubModelWithoutFusion()\n self.fc = nn.Linear(36, 10).to(dtype=torch.float)\n self.quant = QuantStub()\n self.dequant = DeQuantStub()\n self.qconfig = qconfig\n self.conv2 = nn.Conv3d(3, 2, (1, 1, 1), bias=None).to(dtype=torch.float)\n self.relu2 = nn.ReLU(inplace=False).to(dtype=torch.float)\n self.bn2 = nn.BatchNorm3d(2).to(dtype=torch.float)\n self.relu3 = nn.ReLU(inplace=True).to(dtype=torch.float)\n self.conv3 = nn.Conv1d(3, 3, 2).to(dtype=torch.float)\n self.bn3 = nn.BatchNorm1d(3).to(dtype=torch.float)\n self.relu4 = nn.ReLU(inplace=True).to(dtype=torch.float)\n # don't quantize sub2\n self.sub2.qconfig = None\n self.fc.qconfig = None\n\n def forward(self, x):\n x = x.squeeze(2)\n x = self.quant(x)\n x = self.conv3(x)\n x = self.bn3(x)\n x = self.relu4(x)\n x = x.unsqueeze(2)\n y = x.unsqueeze(2)\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu1(x)\n x = self.sub1(x)\n x = self.dequant(x)\n x = self.sub2(x)\n x = x.view(-1, 36).contiguous()\n x = self.fc(x)\n y = self.conv2(y)\n y = self.relu2(y)\n y = self.bn2(y)\n y = self.relu3(y)\n y = self.dequant(y)\n return x\n\nclass ConvBNReLU(nn.Sequential):\n def __init__(self):\n super().__init__(\n nn.Conv2d(3, 3, 1, 1, bias=False),\n nn.BatchNorm2d(3),\n nn.ReLU(inplace=False)\n )\n\nclass ModelWithSequentialFusion(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 3, 1)\n self.relu1 = nn.ReLU(inplace=False)\n layers = []\n for i in range(3):\n layers.append(ConvBNReLU())\n self.features = nn.Sequential(*layers)\n head = [nn.Linear(300, 10), nn.ReLU(inplace=False)]\n self.classifier = nn.Sequential(*head)\n self.seq = nn.Sequential()\n self.quant = QuantStub()\n self.dequant = DeQuantStub()\n\n def forward(self, x):\n x = self.quant(x)\n x = self.conv1(x)\n x = self.relu1(x)\n x = self.features(x)\n x = torch.reshape(x, (-1, 3 * 10 * 10))\n x = self.classifier(x)\n x = self.seq(x)\n x = self.dequant(x)\n return x\n\nclass ModelForFusionWithBias(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 2, 5, bias=True).to(dtype=torch.float)\n self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float)\n self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float)\n self.conv2 = nn.Conv2d(2, 2, 1, bias=True).to(dtype=torch.float)\n self.bn2 = nn.BatchNorm2d(2).to(dtype=torch.float)\n self.quant = QuantStub()\n self.dequant = DeQuantStub()\n\n def forward(self, x):\n x = self.quant(x)\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu1(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.dequant(x)\n return x\n\nclass ModelForLinearBNFusion(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc = nn.Linear(20, 10)\n self.bn = nn.BatchNorm1d(10)\n nn.init.uniform_(self.bn.weight)\n nn.init.uniform_(self.bn.bias)\n\n def forward(self, x):\n return self.bn(self.fc(x))\n\nclass DummyObserver(torch.nn.Module):\n def calculate_qparams(self):\n return 1.0, 0\n\n def forward(self, x):\n return x\n\n\nclass ModelWithFunctionals(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.mycat = nnq.FloatFunctional()\n self.myadd = nnq.FloatFunctional()\n self.myadd_relu = nnq.FloatFunctional()\n # Tracing doesnt work yet for c10 ops with scalar inputs\n # https://github.com/pytorch/pytorch/issues/27097\n # self.my_scalar_add = nnq.FloatFunctional()\n # self.my_scalar_mul = nnq.FloatFunctional()\n\n def forward(self, x):\n y = self.mycat.cat([x, x, x])\n z = self.myadd.add(y, y)\n w = self.myadd_relu.add_relu(z, z)\n # Tracing doesnt work yet for c10 ops with scalar inputs\n # https://github.com/pytorch/pytorch/issues/27097\n # w = self.my_scalar_add.add_scalar(w, -0.5)\n # w = self.my_scalar_mul.mul_scalar(w, 0.5)\n return w\n\n\nclass ResNetBase(torch.nn.Module):\n def __init__(self):\n super().__init__()\n norm_layer = nn.BatchNorm2d\n inplanes = 3\n self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)\n self.bn1 = norm_layer(inplanes)\n self.relu1 = nn.ReLU()\n self.relu2 = nn.ReLU()\n self.downsample = torch.nn.Identity()\n self.myop = nn.quantized.FloatFunctional()\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = torch.nn.Linear(inplanes, 1)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n identity = self.downsample(x)\n out = self.myop.add(out, identity)\n out = self.relu2(out)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.fc(out)\n return out\n\n def fuse_model(self):\n torch.quantization.fuse_modules(self, [['conv1', 'bn1', 'relu1']], inplace=True)\n\nclass ModelMultipleOps(torch.nn.Module):\n def __init__(self):\n super().__init__()\n norm_layer = nn.BatchNorm2d\n inplanes = 3\n self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)\n self.conv2 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)\n self.bn1 = norm_layer(inplanes)\n self.relu1 = nn.ReLU()\n self.relu2 = nn.ReLU()\n self.downsample = torch.nn.Identity()\n self.skip_add = nn.quantized.FloatFunctional()\n self.cat = nn.quantized.FloatFunctional()\n self.avgpool = nn.AdaptiveAvgPool2d((4, 4))\n self.fc = nn.Linear(12, 6)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n identity = self.downsample(x)\n out = self.skip_add.add(out, identity)\n out = self.relu2(out)\n out = self.avgpool(out)\n out = self.conv2(out)\n out = torch.nn.functional.max_pool2d(out, 2, 2)\n out = self.cat.cat([out, out])\n out = out.reshape(-1, 3 * 2 * 2)\n out = self.fc(out)\n return out\n\n# Model to ensure consistency of fake quant with true quant\n# Average pooling and mean operations are not modelled\n# accurately with fake-quant so this model does not\n# contain those operations\nclass ModelMultipleOpsNoAvgPool(torch.nn.Module):\n def __init__(self):\n super().__init__()\n norm_layer = nn.BatchNorm2d\n inplanes = 3\n self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)\n self.conv2 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False)\n self.bn1 = norm_layer(inplanes)\n self.relu1 = nn.ReLU()\n self.relu2 = nn.ReLU()\n self.skip_add = nn.quantized.FloatFunctional()\n self.cat = nn.quantized.FloatFunctional()\n self.maxpool = nn.MaxPool2d((4, 4))\n self.fc = nn.Linear(12, 6)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu1(out)\n skip = self.conv2(x)\n out = self.skip_add.add(out, skip)\n out = self.relu2(out)\n out = self.maxpool(out)\n out = self.conv2(out)\n out = torch.nn.functional.max_pool2d(out, 2, 2)\n out = self.cat.cat([out, out])\n out = out.reshape(-1, 3 * 2 * 2)\n out = self.fc(out)\n return out\n\nclass EmbeddingBagModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12,\n include_last_offset=True, scale_grad_by_freq=False, mode='sum')\n\n def forward(self, indices, offsets, per_sample_weights):\n return self.emb(indices, offsets, per_sample_weights)\n\nclass EmbeddingModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12)\n\n def forward(self, indices):\n return self.emb(indices)\n\nclass EmbeddingWithLinear(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12)\n self.fc = torch.nn.Linear(5, 5)\n self.emb.qconfig = float_qparams_weight_only_qconfig\n self.qconfig = default_qconfig\n\n def forward(self, indices, linear_in):\n return self.emb(indices), self.fc(linear_in)\n\nclass DenseTopMLP(nn.Module):\n\n def __init__(self, dense_dim, dense_out, embedding_dim, top_out_in, top_out_out) -> None:\n super(DenseTopMLP, self).__init__()\n\n self.dense_mlp = nn.Sequential(\n nn.Linear(dense_dim, dense_out),\n )\n self.top_mlp = nn.Sequential(\n nn.Linear(dense_out + embedding_dim, top_out_in),\n nn.Linear(top_out_in, top_out_out),\n )\n\n def forward(\n self,\n sparse_feature: torch.Tensor,\n dense: torch.Tensor,\n ) -> torch.Tensor:\n dense_feature = self.dense_mlp(dense)\n features = torch.cat([dense_feature] + [sparse_feature], dim=1)\n\n out = self.top_mlp(features)\n return out\n\n# thin wrapper around embedding bag, because tracing inside nn.Embedding\n# bag is not supported at the moment and this is top level\nclass EmbBagWrapper(nn.Module):\n def __init__(self, num_embeddings, embedding_dim):\n super().__init__()\n self.emb_bag = nn.EmbeddingBag(num_embeddings, embedding_dim, mode='sum')\n\n def forward(self, indices, offsets):\n return self.emb_bag(indices, offsets)\n\nclass SparseNNModel(nn.Module):\n _NUM_EMBEDDINGS = 10\n _EMBEDDING_DIM = 5\n _DENSE_DIM = 4\n _DENSE_OUTPUT = 2\n _TOP_OUT_IN = 2\n _TOP_OUT_OUT = 2\n _TOP_MLP_DIM = 1\n\n def __init__(self) -> None:\n super(SparseNNModel, self).__init__()\n\n self.model_sparse = EmbBagWrapper(self._NUM_EMBEDDINGS, self._EMBEDDING_DIM)\n self.dense_top = DenseTopMLP(\n self._DENSE_DIM, self._DENSE_OUTPUT, self._EMBEDDING_DIM, self._TOP_OUT_IN,\n self._TOP_OUT_OUT)\n\n def forward(\n self,\n sparse_indices: torch.Tensor,\n sparse_offsets: torch.Tensor,\n dense: torch.Tensor,\n ) -> torch.Tensor:\n\n sparse_feature = self.model_sparse(sparse_indices, sparse_offsets)\n out = self.dense_top(sparse_feature, dense)\n\n return out\n" ]
[ [ "torch.nn.Linear", "torch.nn.EmbeddingBag", "torch.cat", "torch.nn.LSTM", "torch.quantization.QuantStub", "torch.nn.GRU", "torch.nn.LSTMCell", "torch.nn.BatchNorm2d", "torch.testing.FileCheck", "torch.quantization.quantize_jit", "torch.nn.Hardswish", "torch.jit.trace", "numpy.cumsum", "torch.load", "torch.quantization.get_default_qconfig", "torch.nn.quantized.EmbeddingBag", "torch.nn.CrossEntropyLoss", "torch.nn.BatchNorm3d", "torch.reshape", "torch.nn.LayerNorm", "torch.nn.MaxPool2d", "torch.distributed.init_process_group", "torch.nn.Conv1d", "torch.quantization.DeQuantStub", "torch.quantization.quantization_mappings.get_default_qconfig_propagation_list", "torch.nn.ConvTranspose2d", "torch.quantization.quantization_mappings.get_default_qat_module_mappings", "torch.jit.load", "torch.randint", "torch.jit.save", "torch.tensor", "torch.nn.Conv3d", "torch.jit.script", "torch.nn.quantized.FloatFunctional", "torch.quantization.fuse_modules", "torch.nn.Embedding", "torch.nn.quantized.Embedding.from_float", "torch.nn.Identity", "torch.nn.quantized.EmbeddingBag.from_float", "torch.nn.AdaptiveAvgPool2d", "numpy.zeros", "torch.flatten", "torch.nn.Sequential", "torch.max", "torch.save", "torch.quantization.PerChannelMinMaxObserver.with_args", "torch.quantization.QuantWrapper", "torch.nn.parallel.DistributedDataParallel", "torch.nn.GroupNorm", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.quantization.quantize_dynamic_jit", "torch.nn.InstanceNorm2d", "torch.nn.init.uniform_", "torch.quantization.default_observer.with_args", "torch.nn.GRUCell", "torch.nn.InstanceNorm1d", "torch.rand", "torch.quantization.quantize_fx.convert_fx", "torch.nn.RNNCell", "torch.distributed.destroy_process_group", "torch.no_grad", "torch.quantization.quantization_mappings.get_default_dynamic_quant_module_mappings", "torch.quantization.get_default_qat_qconfig", "torch.quantization.QConfigDynamic", "torch.nn.BatchNorm1d", "torch.nn.InstanceNorm3d", "torch.nn.functional.max_pool2d", "torch.quantization.propagate_qconfig_", "torch.quantize_per_tensor", "torch.nn.quantized.Embedding", "torch.nn.ELU" ] ]
patrikkj/algorithms
[ "25799fb57807eca1784202c499fda8a5a94acea3" ]
[ "common/cost.py" ]
[ "import numpy as np\n\nfrom .activations import sigmoid\n\n\n# Regularization\ndef l2_reg(params, l=0.01):\n return l * np.sum(np.square(params))\n\ndef l2_reg_grad(params, l=0.01):\n return 2 * l * params\n\n\n# Sum of squared errors\ndef reduce_mean_sse(W, b, X, y):\n return np.mean(np.square((np.dot(X, W) + b) - y)) / 2\n\ndef reduce_mean_sse_grad(W, b, X, y):\n err = (np.dot(X, W) + b) - y\n dW = np.mean(err * X, axis=0, keepdims=True).T\n db = np.mean(err, keepdims=True)\n return dW, db\n\n\n# Sum of squared errors (Regularized)\ndef reduce_mean_sse_reg(W, b, X, y, l=0.01):\n return reduce_mean_sse(W, b, X, y) + l2_reg(W, l) / (2*X.shape[0])\n\ndef reduce_mean_sse_reg_grad(W, b, X, y, l=0.01):\n dW, db = reduce_mean_sse_grad(W, b, X, y)\n dW = dW + l2_reg_grad(W, l) / (2*X.shape[0])\n return dW, db\n\n\n# Sigmoid\ndef sigmoid_cross_entropy(W, b, X, y):\n a = sigmoid(np.dot(X, W) + b)\n return -np.mean(y * np.log(a) + (1 - y) * np.log(1 - a))\n\ndef sigmoid_cross_entropy_grad(W, b, X, y):\n err = sigmoid(np.dot(X, W) + b) - y\n dW = np.mean(err * X, axis=0, keepdims=True).T\n db = np.mean(err, keepdims=True)\n return dW, db\n\n\n# Sigmoid (Regularized)\ndef sigmoid_cross_entropy_reg(W, b, X, y, l=0.01):\n return sigmoid_cross_entropy(W, b, X, y) + l2_reg(W, l) / (2*X.shape[0])\n\ndef sigmoid_cross_entropy_reg_grad(W, b, X, y, l=0.01):\n dW, db = sigmoid_cross_entropy_grad(W, b, X, y)\n dW = dW + l2_reg_grad(W, l) / (2*X.shape[0])\n return dW, db\n" ]
[ [ "numpy.square", "numpy.log", "numpy.dot", "numpy.mean" ] ]
ml-research/PyTorch-BayesianCNN
[ "7933d6d6523be7d54e2347ba1497f63317f04af6" ]
[ "attacks/pytorch_bayesian.py" ]
[ "from art.estimators.classification import PyTorchClassifier\nimport numpy as np\nfrom typing import Any, Dict, List, Optional, Tuple, Union, TYPE_CHECKING\n\nclass PyTorchBaysianClassifier(PyTorchClassifier):\n\n def predict( # pylint: disable=W0221\n self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, no_kl=True, **kwargs\n ) -> np.ndarray:\n \"\"\"\n Perform prediction for a batch of inputs.\n :param x: Input samples.\n :param batch_size: Size of batches.\n :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode.\n :return: Array of predictions of shape `(nb_inputs, nb_classes)`.\n \"\"\"\n import torch # lgtm [py/repeated-import]\n\n # Set model mode\n self._model.train(mode=training_mode)\n\n # Apply preprocessing\n x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)\n\n results_list = []\n\n # Run prediction with batch processing\n num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size)))\n for m in range(num_batch):\n # Batch indexes\n begin, end = (\n m * batch_size,\n min((m + 1) * batch_size, x_preprocessed.shape[0]),\n )\n\n with torch.no_grad():\n model_outputs = self._model(torch.from_numpy(x_preprocessed[begin:end]).to(self._device)) # TODO: get no_kl into this\n output = model_outputs[-1][0] # added [0] hack for bayesian nets returning a tuple\n output = output.detach().cpu().numpy().astype(np.float32)\n if len(output.shape) == 1:\n output = np.expand_dims(output.detach().cpu().numpy(), axis=1).astype(np.float32)\n\n results_list.append(output)\n\n results = np.vstack(results_list)\n\n # Apply postprocessing\n predictions = self._apply_postprocessing(preds=results, fit=False)\n\n return predictions\n\n def loss_gradient( # pylint: disable=W0221\n self,\n x: Union[np.ndarray, \"torch.Tensor\"],\n y: Union[np.ndarray, \"torch.Tensor\"],\n training_mode: bool = False,\n **kwargs\n ) -> Union[np.ndarray, \"torch.Tensor\"]:\n \"\"\"\n Compute the gradient of the loss function w.r.t. `x`.\n\n :param x: Sample input with shape as expected by the model.\n :param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape\n `(nb_samples,)`.\n :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode.\n Note on RNN-like models: Backpropagation through RNN modules in eval mode raises\n RuntimeError due to cudnn issues and require training mode, i.e. RuntimeError: cudnn RNN\n backward can only be called in training mode. Therefore, if the model is an RNN type we\n always use training mode but freeze batch-norm and dropout layers if\n `training_mode=False.`\n :return: Array of gradients of the same shape as `x`.\n \"\"\"\n import torch # lgtm [py/repeated-import]\n\n self._model.train(mode=training_mode)\n\n # Backpropagation through RNN modules in eval mode raises RuntimeError due to cudnn issues and require training\n # mode, i.e. RuntimeError: cudnn RNN backward can only be called in training mode. Therefore, if the model is\n # an RNN type we always use training mode but freeze batch-norm and dropout layers if training_mode=False.\n if self.is_rnn:\n self._model.train(mode=True)\n if not training_mode:\n logger.debug(\n \"Freezing batch-norm and dropout layers for gradient calculation in train mode with eval parameters\"\n \"of batch-norm and dropout.\"\n )\n self.set_batchnorm(train=False)\n self.set_dropout(train=False)\n\n # Apply preprocessing\n if self.all_framework_preprocessing:\n if isinstance(x, torch.Tensor):\n x_grad = x.clone().detach().requires_grad_(True)\n else:\n x_grad = torch.tensor(x).to(self._device)\n x_grad.requires_grad = True\n if isinstance(y, torch.Tensor):\n y_grad = y.clone().detach()\n else:\n y_grad = torch.tensor(y).to(self._device)\n inputs_t, y_preprocessed = self._apply_preprocessing(x_grad, y=y_grad, fit=False, no_grad=False)\n elif isinstance(x, np.ndarray):\n x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y=y, fit=False, no_grad=True)\n x_grad = torch.from_numpy(x_preprocessed).to(self._device)\n x_grad.requires_grad = True\n inputs_t = x_grad\n else:\n raise NotImplementedError(\"Combination of inputs and preprocessing not supported.\")\n\n # Check label shape\n y_preprocessed = self.reduce_labels(y_preprocessed)\n\n if isinstance(y_preprocessed, np.ndarray):\n labels_t = torch.from_numpy(y_preprocessed).to(self._device)\n else:\n labels_t = y_preprocessed\n\n # Compute the gradient and return\n model_outputs = self._model(inputs_t)[-1][0] # added [-1][0] hack for bayesian nets returning a tuple\n loss = self._loss(model_outputs, labels_t) # removed [-1]# lgtm [py/call-to-non-callable]\n\n # Clean gradients\n self._model.zero_grad()\n\n # Compute gradients\n if self._use_amp:\n from apex import amp # pylint: disable=E0611\n\n with amp.scale_loss(loss, self._optimizer) as scaled_loss:\n scaled_loss.backward()\n\n else:\n loss.backward()\n\n if isinstance(x, torch.Tensor):\n grads = x_grad.grad\n else:\n grads = x_grad.grad.cpu().numpy().copy() # type: ignore\n\n if not self.all_framework_preprocessing:\n grads = self._apply_preprocessing_gradient(x, grads)\n\n assert grads.shape == x.shape\n\n return grads" ]
[ [ "torch.no_grad", "torch.tensor", "numpy.vstack", "torch.from_numpy" ] ]
Project-Ellie/tutorials
[ "9090cc7669d3e59889b15139724e662ce11be1ee" ]
[ "other_stuff/DeepGomoku/GomokuTools_deprecated.py" ]
[ "import numpy as np\n\n\nclass GomokuTools:\n\n \n @staticmethod \n def str_base(number, base, width=8):\n def _str_base(number,base):\n (d, m) = divmod(number, base)\n if d > 0:\n return _str_base(d, base) + str(m)\n return str(m)\n s = _str_base(number, base)\n return '0'*(width-len(s))+s\n\n \n @staticmethod \n def base2_to_xo(number):\n return GomokuTools.str_base(number, 3).replace('2', 'o').replace('1', 'x').replace('0', '.') \n\n \n @staticmethod \n def mask(offensive, defensive):\n n = defensive\n l = n & 0xF0\n l = (l | l<<1 | l<<2 | l<<3) & 0xF0\n\n r = n & 0x0F\n r = (r | r>>1 | r>>2 | r>>3) & 0x0F\n\n mask=(~(l | r)) & 0xFF\n free_stones=mask & offensive\n\n return free_stones, mask\n\n\n @staticmethod \n def num_offensive(o, d):\n s, l, offset = GomokuTools.mask2(o, d)\n m2o_bits = GomokuTools.as_bit_array(s)[:l]\n max_count = 0\n for w in [2,1,0]:\n i = 0\n while i <= len(m2o_bits) - 2 - w:\n count = sum(m2o_bits[i:i+w+2])\n count = 3*count - (w+2)\n if count > max_count:\n max_count = count\n i+=1\n if m2o_bits[0] == 0:\n max_count += 1\n if m2o_bits[-1] == 0:\n max_count += 1\n\n # Criticality correction for the fatal double-open 3\n if max_count == 8:\n max_count=13\n return max_count \n \n \n @staticmethod \n def mask2(offensive, defensive):\n n = defensive\n l = n & 0xF0\n l = (l | l<<1 | l<<2 | l<<3) & 0xF0\n\n r = n & 0x0F\n r = (r | r>>1 | r>>2 | r>>3) & 0x0F\n\n mask=(~(l | r))\n free_stones=mask & offensive\n\n free_length=np.sum([(mask>>i)&1 for i in range(8)], axis=0)\n l_offset = np.sum([(l>>i)&1 for i in range(8)], axis=0)\n #free_length = (free_length > 5) * 5 + (free_length <= 5) * free_length\n return free_stones << l_offset, free_length, l_offset \n\n \n @staticmethod\n def dirs():\n return {\n 'e' : (0, [0, 1]),\n 'ne': (1, [-1, 1]),\n 'n' : (2, [-1, 0]),\n 'nw': (3, [-1, -1]),\n 'w' : (4, [0, -1]),\n 'sw': (5, [1, -1]),\n 's' : (6, [1, 0]),\n 'se': (7, [1, 1])}\n\n @staticmethod\n def int_for_dir(dirstr):\n return GomokuTools.dirs()[dirstr][0]\n \n @staticmethod\n def as_bit(direction, distance):\n return (1<<(distance+3) if direction//4 else 1<<(4-distance))# << (8*(direction%4))\n \n @staticmethod\n def m2b(m, size):\n \"\"\"matrix index to board position\"\"\"\n r, c = m\n return np.array([c+1, size-r])\n\n @staticmethod\n def b2m(p, size):\n \"\"\"board position to matrix index\"\"\"\n x, y = p\n return np.array([size-y, x-1])\n \n \n \n @staticmethod\n def as_bit_array(n):\n \"\"\"\n Returns an array of int 0 or 1 \n \"\"\"\n assert(n >= 0 and n <= 255)\n return [np.sign(n & (1<<i)) for i in range(7, -1, -1)]\n\n @staticmethod\n def line_for_xo(xo_string):\n \"\"\"\n return a 2x8 int array representing the 'x..o..' xo_string \n \"\"\"\n powers=np.array([2**i for i in range(7, -1, -1)])\n return [sum([1 if (ch=='x' and c==0) \n or (ch=='o' and c==1) \n else 0 for ch in xo_string] * powers) for c in [0,1]]\n\n\n\nclass NH9x9:\n \n \"\"\"\n 9-by-9 neighbourhood of an empty Gomoku position. Provides 12 8 bit integers representing \n what is visibile from that field in a particular direction as input for a valuation function.\n Example: the six stones seen from '*' in south-east/north-west:\n\n - - - - - - - - x\n - - - - - - - x - \n - - - - - - - - - \n - - - - - x - - -\n - - - - * - - - - \n - - - - - - - - - \n - - o - - - - - - \n - o - - - - - - -\n o - - - - - - - - \n \n will be represented by the following bytes encoded as two int32\n\n Black:\n e : 0 0 0 0 0 0 0 0\n ne : 0 0 0 0 1 0 1 1\n n : 0 0 0 0 0 0 0 0\n nw : 0 0 0 0 0 0 0 0\n\n White:\n e : 0 0 0 0 0 0 0 0\n ne : 1 1 1 0 0 0 0 0\n n : 0 0 0 0 0 0 0 0\n nw : 0 0 0 0 0 0 0 0\n \n \"\"\"\n def __init__(self, black=[0,0,0,0], white=[0,0,0,0], edges=[0,0,0,0]):\n for n in [black, white, edges]:\n for i in range(4):\n assert(n[i] >= 0 and n[i] < 256)\n self.b = black\n self.w = white\n self.e = edges\n \n \n def register(self, color, direction, distance):\n assert(color==0 or color==1)\n assert(direction>=0 and direction<8)\n assert(distance>=1 and distance <=4)\n \n if color==0:\n self.b[direction%4] |= GomokuTools.as_bit(direction, distance)\n else:\n self.w[direction%4] |= GomokuTools.as_bit(direction, distance)\n return self\n \n def unregister(self, color, direction, distance):\n assert(color==0 or color==1)\n assert(direction>=0 and direction<8)\n assert(distance>=1 and distance <=4)\n \n bit = GomokuTools.as_bit(direction, distance)\n if color==0:\n self.b[direction] &= (0xFF ^ bit)\n else:\n self.w[direction] &= (0xFF ^ bit)\n return self\n\n \n def get_line(self, direction):\n \"\"\"\n Return two arrays of 8 integers representing black and white stones on a line\n of length 9. The middle position is not represented in the array\n Args:\n direction: either one of 'e', 'ne', 'n', 'nw' or their integer representations\n \"\"\"\n assert(direction>=0 and direction <= 3)\n \n return [\n GomokuTools.as_bit_array(self.b[direction]),\n GomokuTools.as_bit_array(self.w[direction]),\n GomokuTools.as_bit_array(self.e[direction])]\n \n\n def as_bits(self):\n return [self.get_line(d) for d in range(4)]\n\n \n def __repr__(self):\n dirs=[[0, 1], [-1, 1], [-1, 0], [-1, -1]]\n field = [[' ' for i in range(9)] for j in range(9)]\n field[4][4]='*'\n for h in range(4): \n step = dirs[h]\n pos0 = np.array([4,4]) - 4 * np.array(step)\n bits = self.get_line(h)\n for x in range(8):\n row, col = pos0 + (x + x//4) * np.array(step)\n field[row][col]='x' if bits[0][x] == 1 \\\n else 'o' if bits[1][x] == 1 \\\n else '+' if bits[2][x] == 1 \\\n else ' ' \n return \"\\n\".join([('|' + ' '.join(field[r]) + '|') for r in range(9)])\n\n \n \n \n \nclass Heuristics:\n \n def __init__(self):\n\n self.color_scheme = [ # visualize the offensive/defensive score\n ['#F0F0F0', '#FFC0C0', '#FF9090', '#FF6060', '#FF0000'],\n ['#A0FFA0', '#E8D088', '#FFA080', '#F86040', '#F01808'],\n ['#00FF00', '#B0D818', '#EFB060', '#F07040', '#E03010'],\n ['#00CF00', '#80B014', '#C0A048', '#E08050', '#D04820'],\n ['#00A000', '#307810', '#607020', '#907828', '#C06030']\n ]\n # map cscore to threat value\n self.c2t={\n (1,1): 1,\n (1,2): 2,\n (2,1): 3,\n (2,2): 4,\n (3,1): 5,\n (3,2): 7,\n (4,1): 8,\n (4,2): 9\n }\n \n self.compute_line_scores()\n \n def nhcombine(self, score_or_count, kappa=1.2):\n \"\"\"\n The neighbourhood score or count.\n Heuristic function of the 4 line scores or counts\n score_or_count: a board of line evaluations: shape = (N,N,4)\n \"\"\"\n e,ne,n,nw = np.rollaxis(score_or_count,2,0)\n return np.power(e**kappa + ne**kappa + n**kappa + nw**kappa, 1/kappa)\n \n \n def compute_line_scores(self):\n gt = GomokuTools()\n self._all_scores = np.zeros(256*256, dtype=int)\n self._all_counts = np.zeros(256*256, dtype=int)\n self._all_scores_and_more = [0 for _ in range(256*256)]\n for n in range(81*81):\n xo = gt.base2_to_xo(n)\n o,d = gt.line_for_xo(xo)\n m = gt.mask(o,d)\n m2 = gt.mask2(o,d)\n if m2[1] >= 4 and sum(gt.as_bit_array(m2[0])) >= 1:\n densities = np.multiply(gt.as_bit_array(o), [0,1,2,3,3,2,1,0])\n density = sum(densities)\n no = gt.num_offensive(o,d)\n no = max(no - 2, 0)\n nf = min(sum(gt.as_bit_array(m[1])),5) \n score = 256*no+16*nf+density\n self._all_scores_and_more[256*o+d]=(xo, score, no, nf, density)\n self._all_scores[256*o+d]=score\n self._all_counts[256*o+d]=no\n\n \n def lookup_score(self,o,d):\n return self._all_scores[256*o+d]\n\n \n def lookup_count(self,o,d):\n return self._all_counts[256*o+d]\n\n \n def lookup_score_and_more(self,o,d):\n return self._all_scores_and_more[256*o+d]\n\n \n def criticality(self, h, l):\n if h == 9: \n return ('lost', 1)\n elif h == 8:\n return ('move or lose in 1', 2)\n elif h == 7: \n return ('move or lose in 2', 3)\n elif (h, l) in [(5,5), (5,4)]:\n return ('move or lose in 2', 4)\n elif (h, l) == (4,4):\n return ('move or lose in 3', 5)\n else:\n return ('defendable', 6)\n \n \n def classify(self, b, w, edges=(None, None)):\n \"\"\"\n Computes a criticality score for the neighbourhood represented by the two int32 \n b for black and w for white stones\n \n Returns:\n A criticality score: that's two triples of ints, one for black and the other for white.\n The triple consists of the largest and the second-larges single-line treats, and the \n total criticality, a number between 1 and 6, 1 for immediate loss and 6 for defendable.\n \"\"\"\n return self.classify_nh(NH9x9(b, w), edges=edges) \n \n \n \n def classify_nh(self, nh, all_edges=None, score_for=0):\n if all_edges==None:\n all_edges=[(None, None) for i in range(4)]\n res = []\n for color in [score_for, 1-score_for]:\n classes=[self.classify_line(nh.get_line(direction, color), all_edges[direction]) \n for direction in range(4)]\n \n l, h = sorted(classes)[-2:]\n c = self.criticality(h, l)\n res.append((h, l, c[1]))\n return res\n \n \n \n def soft_values(self, nh, viewpoint=0):\n classification = self.classify_nh(nh)\n values=[]\n for color in [0,1]:\n h, l, c = classification[color]\n values.append(16*(16*(16*(6-c)+h)+l)+8*(viewpoint==color))\n return values\n \n \n \n def classify_line(self, line, edges=(None, None)):\n cscore = self.cscore(line=line, cap=2, edges=edges)\n return 0 if cscore[0] == 0 else self.c2t[cscore]\n\n \n def describe(self, classification):\n descriptions = {\n 1: \"double open 4 - done.\",\n 2: \"single open 4 - must move\",\n 3: \"double open 3 - move or lose in 2\",\n 4: \"threat to double open 3 - move or lose in 3\",\n 5: \"2 double open 2s - move or lose in 3\",\n 6: \"defendable\" \n }\n return descriptions[classification[2]] \n \n def describe_both(self, classifications):\n return [ (\"White: \" if c==1 else \"Black: \") + describe(classifications[c]) for c in [0,1]]\n \n \n def f_range(self, line, c=0, edges=(None, None)):\n \"\"\"\n The largest adversary-free range within a given line\n \n Args:\n line: 8x2 integer array that represents the stones\n c: 0 to look at black, 1 to consider white\n \"\"\"\n\n i=3\n while i >= 0 and line[1-c][i] == 0 and i != edges[0]:\n i-=1\n left = i + 1\n i=4\n while i <= 7 and line[1-c][i] == 0 and i != edges[1]:\n i+=1\n right = i-1\n return np.array(line[c][left:right+1])\n\n \n def cscore(self, line, c=0, edges=(None, None), cap=2):\n \"\"\"\n count how many sub-lines of 5 come with the max number of stones\n Example: \"oo.x*xx..\" : The max num of blacks if obviously 3. And there are\n two different adversary-free sub-lines counting three, namely '.x*xx' and 'x*xx.'.\n Thus the cscore would be (3,2)\n\n Args:\n line: 8x2 integer array that represents the stones \n c: color: 0 to look at black, 1 to consider white\n \"\"\"\n\n fr = self.f_range(line, c, edges)\n counts = []\n for i in range(len(fr)-3):\n counts.append(sum(fr[i:i+4])) \n m = max(counts) if counts else 0\n c_ = sum(np.array(counts) == max(counts)) if counts else 0\n c_ = min(c_,cap)\n return (m, c_)\n\n \n def color_for_triple(self, h, l, c):\n \"\"\"\n \"\"\"\n if c <= 2:\n return 4\n elif c <= 5:\n return 6-c\n elif h == 6:\n return 1\n elif h >= 4:\n return 0\n else:\n return None\n \n def threat_color(self, offensive, defensive):\n \"\"\"\n return appropriate color for given pair of threat triples\n \"\"\"\n o, d = [self.color_for_triple(*triple) for triple in [offensive, defensive]]\n if o is None and d is None:\n return None\n o, d = 0 if o is None else o, 0 if d is None else d\n return self.color_scheme[int(o)][int(d)]\n \n \n \n \nclass Reasoner():\n def __init__(self, topn, my_color):\n self.topn = topn\n self.my_color = my_color\n self.other_color = 'b' if my_color=='w' else 'w'\n \n def list_by_level(self, level_or_higher, color):\n return [s for s in self.topn if s[2][0] >= level_or_higher and s[0] == color] \n \n def list_by_criticality(self, criticalities, color):\n return [s for s in self.topn if s[2][2] in criticalities and s[0] == color] \n \n def i_can_win_now(self):\n options = self.list_by_level(8, self.my_color)\n return len(options) > 0, options\n \n def i_will_lose(self):\n return not self.i_can_win_now()[0] and self.two_fours_against_me()\n \n def two_fours_against_me(self):\n return (len(self.list_by_level(9, self.other_color)) > 0 or \n len(self.list_by_level(8, self.other_color)) > 1)\n \n def is_winning_attack(self):\n options = self.list_by_criticality([5, 4, 3, 2], self.my_color)\n return options != [], options\n \n def is_urgent_defense(self):\n options = self.list_by_criticality([5, 4, 3, 2], self.other_color)\n return options != [], options\n \n def suggest(self):\n i_can, move = self.i_can_win_now()\n if i_can:\n return move\n if self.i_will_lose():\n return \"Giving up\"\n\n is_winning, move = self.is_winning_attack()\n if is_winning:\n return move\n \n is_urgent, move = self.is_urgent_defense()\n if is_urgent:\n return move\n \n return \"Treesearch\"\n \n \n \n \n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.rollaxis", "numpy.sign", "numpy.power" ] ]
so2liu/CNNArt
[ "9d91bf08a044e7d5068f8446663726411d2236dd" ]
[ "utils/scaling.py" ]
[ "\nimport numpy as np\nfrom scipy import interpolate\nimport math\nimport time\n\ndef fScaleOnePatch(dPatch, randPatchSize, PatchSize):\n xaxis = np.linspace(0, PatchSize[0], randPatchSize[0])\n yaxis = np.linspace(0, PatchSize[1], randPatchSize[1])\n zaxis = np.linspace(0, PatchSize[2], randPatchSize[2])\n inter_train0 = np.mgrid[0:PatchSize[0], 0:PatchSize[1], 0:PatchSize[2]]\n inter_train1 = np.rollaxis(inter_train0, 0, 4)\n inter_train = np.reshape(inter_train1, [inter_train0.size // 3, 3])\n scaleddPatch = interpolate.interpn((xaxis, yaxis, zaxis), dPatch, inter_train, method='linear', bounds_error=False, fill_value=0)\n reshdPatch = np.reshape(scaleddPatch, [PatchSize[0], PatchSize[1], PatchSize[2]])\n return reshdPatch\n\ndef fscaling(X_train, X_test, scpatchSize, iscalefactor) :\n if len(scpatchSize) == 3:\n scX_train, scX_test, afterSize = fscaling3D(X_train, X_test, scpatchSize, iscalefactor)\n else:\n scX_train, scX_test, afterSize = fscaling2D(X_train, X_test, scpatchSize, iscalefactor)\n\n return scX_train, scX_test, afterSize\n\ndef fscaling2D(X_train, X_test, scpatchSize, iscalefactor) :\n start = time.clock()\n afterSize = np.round(np.multiply(scpatchSize, iscalefactor)).astype(int)\n\n # Prepare for the using of scipy.interpolation: create the coordinates of grid\n if iscalefactor == 1:\n return X_train, X_test, scpatchSize\n else:\n xaxis = np.linspace(0, afterSize[0], scpatchSize[0])\n yaxis = np.linspace(0, afterSize[1], scpatchSize[1])\n\n dAllx_train = None\n dAllx_test = None\n\n for ifold in range(len(X_train)):\n lenTrain = X_train[ifold].shape[0]\n lenTest = X_test[ifold].shape[0]\n\n # in batches\n BatchTrain = BatchTest = 20\n for icand in range(15,26):\n if lenTrain % icand == 0:\n BatchTrain = icand\n if lenTest % icand == 0:\n BatchTest = icand\n dx_Train = None\n dx_Test = None\n stepTrain = -((0 - lenTrain) // BatchTrain)\n stepTest = -((0 - lenTest) // BatchTest)\n\n for ibatch in range(BatchTrain):\n indTrain = int(stepTrain*ibatch)\n if (indTrain+stepTrain) < lenTrain:\n inter_train0=np.mgrid[0:stepTrain, 0:afterSize[0], 0:afterSize[1]]\n values_train = X_train[ifold][indTrain:(indTrain+stepTrain)]\n zaxis_train = np.arange(stepTrain)\n else:\n inter_train0 = np.mgrid[0:(lenTrain-indTrain), 0:afterSize[0], 0:afterSize[1]]\n values_train = X_train[ifold][indTrain:lenTrain]\n zaxis_train = np.arange(lenTrain-indTrain)\n inter_train1=np.rollaxis(inter_train0, 0, 4)\n inter_train=np.reshape(inter_train1, [inter_train0.size//3, 3]) # 3 for the dimension of coordinates\n upedTrain=interpolate.interpn((zaxis_train, xaxis, yaxis), values_train, inter_train, method='linear',bounds_error=False, fill_value=0)\n if dx_Train is None:\n dx_Train = upedTrain\n else:\n dx_Train = np.concatenate((dx_Train,upedTrain), axis=0)\n\n dFoldx_train=np.reshape(dx_Train,[1, lenTrain, afterSize[0], afterSize[1]])\n\n if dAllx_train is None:\n dAllx_train = dFoldx_train\n else:\n dAllx_train = np.concatenate((dAllx_train, dFoldx_train), axis=0)\n\n for ibatch in range(BatchTest):\n indTest = int(stepTest * ibatch)\n if (indTest + stepTest) < lenTest:\n inter_test0 = np.mgrid[0:stepTest, 0:afterSize[0], 0:afterSize[1]]\n values_test = X_train[ifold][indTest:(indTest + stepTest)]\n zaxis_test = np.arange(stepTest)\n else:\n inter_test0 = np.mgrid[0:(lenTest - indTest), 0:afterSize[0], 0:afterSize[1]]\n values_test = X_train[ifold][indTest:lenTest]\n zaxis_test = np.arange(lenTest - indTest)\n inter_test1=np.rollaxis(inter_test0, 0, 4)\n inter_test=np.reshape(inter_test1, [inter_test0.size//3, 3]) # 3 for the dimension of coordinates\n upedTest=interpolate.interpn((zaxis_test, xaxis, yaxis), values_test, inter_test, method='linear',bounds_error=False, fill_value=0)\n if dx_Test is None:\n dx_Test = upedTest\n else:\n dx_Test = np.concatenate((dx_Test,upedTest), axis=0)\n\n dFoldx_test = np.reshape(dx_Test, [1, lenTest, afterSize[0], afterSize[1]])\n\n if dAllx_test is None:\n dAllx_test = dFoldx_test\n else:\n dAllx_test = np.concatenate((dAllx_test, dFoldx_test), axis=0)\n stop = time.clock()\n print(stop - start)\n return dAllx_train, dAllx_test, afterSize\n\n\ndef fscaling3D(X_train, X_test, scpatchSize, iscalefactor):\n afterSize = np.ceil(np.multiply(scpatchSize, iscalefactor)).astype(int)\n\n # Prepare for the using of scipy.interpolation: create the coordinates of grid\n if iscalefactor == 1:\n return X_train, X_test, scpatchSize\n else:\n xaxis = np.linspace(0, afterSize[0], scpatchSize[0])\n yaxis = np.linspace(0, afterSize[1], scpatchSize[1])\n zaxis = np.linspace(0, afterSize[2], scpatchSize[2])\n\n dAllx_train = None\n dAllx_test = None\n\n for ifold in range(len(X_train)):\n lenTrain = X_train[ifold].shape[0]\n lenTest = X_test[ifold].shape[0]\n\n start = time.clock()\n\n # no batch\n inter_train0 = np.mgrid[0:lenTrain, 0:afterSize[0], 0:afterSize[1], 0:afterSize[2]]\n inter_train1 = np.rollaxis(inter_train0, 0, 5)\n inter_train = np.reshape(inter_train1, [inter_train0.size // 4, 4]) # 4 for the dimension of coordinates\n\n zaxis_train = np.arange(lenTrain)\n\n upedTrain = interpolate.interpn((zaxis_train, xaxis, yaxis, zaxis),\n X_train[ifold],\n inter_train, method='linear', bounds_error=False, fill_value=0)\n dFoldx_train = np.reshape(upedTrain, [1, lenTrain, afterSize[0], afterSize[1], afterSize[2]])\n\n\n inter_test0 = np.mgrid[0:lenTest, 0:afterSize[0], 0:afterSize[1], 0:afterSize[2]]\n inter_test1 = np.rollaxis(inter_test0, 0, 5)\n inter_test = np.reshape(inter_test1, [inter_test0.size // 4, 4]) # 4 for the dimension of coordinates\n\n zaxis_test = np.arange(lenTest)\n\n upedTest = interpolate.interpn((zaxis_test, xaxis, yaxis, zaxis),\n X_test[ifold],\n inter_test, method='linear', bounds_error=False, fill_value=0)\n dFoldx_test = np.reshape(upedTest, [1, lenTest, afterSize[0], afterSize[1], afterSize[2]])\n\n stop = time.clock()\n print(stop-start)\n\n if dAllx_train is None:\n dAllx_train = dFoldx_train\n else:\n dAllx_train = np.concatenate((dAllx_train, dFoldx_train), axis=0)\n\n if dAllx_test is None:\n dAllx_test = dFoldx_test\n else:\n dAllx_test = np.concatenate((dAllx_test, dFoldx_test), axis=0)\n\n return dAllx_train, dAllx_test, afterSize\n\ndef fcutMiddelPartOfPatch(X_train_sp, X_test_sp, scpatchSize, patchSize):\n cropStart = [(scpatchSize[idim]-patchSize[idim])//2 for idim in range(len(patchSize))]\n if np.array(X_train_sp).ndim == 4:\n if len(patchSize) == 2:\n X_train = np.array(X_train_sp)[:, cropStart[0]:cropStart[0] + patchSize[0], cropStart[1]:cropStart[1] + patchSize[1]]\n else:\n X_train = np.array(X_train_sp)[:, cropStart[0]:cropStart[0] + patchSize[0], cropStart[1]:cropStart[1] + patchSize[1], cropStart[2]:cropStart[2] + patchSize[2]]\n return X_train\n else:\n if len(patchSize) == 2:\n X_train = np.array(X_train_sp)[:, :, cropStart[0]:cropStart[0] + patchSize[0], cropStart[1]:cropStart[1] + patchSize[1]]\n X_test = np.array(X_test_sp)[:, :, cropStart[0]:cropStart[0] + patchSize[0], cropStart[1]:cropStart[1] + patchSize[1]]\n else:\n X_train = np.array(X_train_sp)[:, :, cropStart[0]:cropStart[0] + patchSize[0], cropStart[1]:cropStart[1] + patchSize[1], cropStart[2]:cropStart[2] + patchSize[2]]\n X_test = np.array(X_test_sp)[:, :, cropStart[0]:cropStart[0] + patchSize[0], cropStart[1]:cropStart[1] + patchSize[1], cropStart[2]:cropStart[2] + patchSize[2]]\n return X_train, X_test\n\ndef fcutMiddelPartOfOnePatch(Patch, fromPatchSize, toPatchSize):\n cropStart = [(fromPatchSize[idim]-toPatchSize[idim])//2 for idim in range(len(toPatchSize))]\n if len(toPatchSize) == 2:\n toPatch = np.array(Patch)[cropStart[0]:cropStart[0] + toPatchSize[0], cropStart[1]:cropStart[1] + toPatchSize[1]]\n else:\n toPatch = np.array(Patch)[cropStart[0]:cropStart[0] + toPatchSize[0], cropStart[1]:cropStart[1] + toPatchSize[1], cropStart[2]:cropStart[2] + toPatchSize[2]]\n return toPatch\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.reshape", "numpy.rollaxis", "scipy.interpolate.interpn", "numpy.multiply", "numpy.arange", "numpy.linspace" ] ]
km-t/dcpython
[ "c0fcd5557691004d7d9d22a662d90e52ecc5f34f" ]
[ "digital-curling/named/network/train.py" ]
[ "import numpy as np\nfrom keras import metrics, callbacks\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, BatchNormalization\nfrom keras.optimizers import rmsprop\nfrom tqdm import tqdm\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nclass Train:\n x = None\n y = None\n df = None\n key = None\n out_dim = None\n\n def getInputData(self):\n inputSize = 103\n x = np.empty((0, inputSize), dtype=np.float32)\n for i in tqdm(range(len(self.df))):\n vec = str(self.df.iloc[i, 0])\n v = np.zeros(inputSize, dtype=np.float32)\n for j in range(inputSize):\n v[j] = float(vec[j])\n IN = np.array([v], dtype=np.float32)\n x = np.append(x, IN, axis=0)\n return x\n\n def getOutputData(self):\n if self.key == 0: # w\n y = np.empty((0, 3), dtype=np.float32)\n self.out_dim = 3\n if self.key == 1: # a\n y = np.empty((0, 2), dtype=np.float32)\n self.out_dim = 2\n if self.key == 2: # p\n y = np.empty((0, 5), dtype=np.float32)\n self.out_dim = 5\n if self.key == 3: # s\n y = np.empty((0, 8), dtype=np.float32)\n self.out_dim = 8\n\n for i in tqdm(range(len(self.df))):\n ans = int(self.df.iloc[i, self.key+1])\n if self.key == 0:\n o = np.zeros(3, dtype=np.float32)\n o[ans] = 1\n if self.key == 1:\n o = np.zeros(2, dtype=np.float32)\n o[ans] = 1\n if self.key == 2:\n o = np.zeros(5, dtype=np.float32)\n pData = [4, 6, 8, 10, 12]\n for i in range(len(pData)):\n if pData[i] == ans:\n ans = i\n break\n o[ans] = 1\n if self.key == 3:\n o = np.zeros(8, dtype=np.float32)\n o[ans] = 1\n OUT = np.array([o], dtype=np.float32)\n y = np.append(y, OUT, axis=0)\n return y\n\n def train(self):\n # ネットワーク定義\n model = Sequential()\n model.add(Dense(128, input_dim=103, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.2))\n for _ in range(3):\n model.add(Dense(128, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n model.add(Dense(64, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n model.add(Dense(32, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n model.add(Dense(self.out_dim, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='RMSProp',\n metrics=[metrics.categorical_accuracy])\n history = model.fit(self.x, self.y,\n epochs=200,\n batch_size=256,\n validation_split=0.3)\n return model, history\n\n def __init__(self, _df, _key):\n self.df = _df\n self.key = _key\n self.x = self.getInputData()\n self.y = self.getOutputData()\n\n\ndef plotScore(histories):\n fig, (axw, axa, axp, axs) = plt.subplots(\n ncols=4, figsize=(10, 4), sharex=True)\n\n axs.plot(histories[3].history['loss'], label=\"loss\")\n axs.plot(histories[3].history['categorical_accuracy'], label=\"acc\")\n axs.set_title('shot')\n axs.set_xlabel('epochs')\n axs.set_ylim(-0.5, 4)\n axs.grid(True)\n\n axw.plot(histories[0].history['loss'], label=\"loss\")\n axw.plot(histories[0].history['categorical_accuracy'], label=\"acc\")\n axw.set_title('power')\n axw.set_xlabel('epochs')\n axw.set_ylim(-0.5, 4)\n axw.grid(True)\n\n axa.plot(histories[1].history['loss'], label=\"loss\")\n axa.plot(histories[1].history['categorical_accuracy'], label=\"acc\")\n axa.set_title('where')\n axa.set_xlabel('epochs')\n axa.set_ylim(-0.5, 4)\n axa.grid(True)\n\n axp.plot(histories[2].history['loss'], label=\"loss\")\n axp.plot(histories[2].history['categorical_accuracy'], label=\"acc\")\n axp.set_title('angle')\n axp.set_xlabel('epochs')\n axp.set_ylim(-0.5, 4)\n axp.grid(True)\n\n fig.show()\n plt.legend()\n plt.show()\n\n\nif __name__ == \"__main__\":\n models = []\n histories = []\n for i in range(4):\n file = '../logs/log'+str(i)+'.csv'\n print('train by {}'.format(file))\n df = pd.read_csv(file, sep=',', header=None, names=(\n 'vector', 'where', 'angle', 'power', 'shot', 'reward'))\n df = df.drop_duplicates()\n df = df.sample(frac=1)\n trainer = Train(df, i)\n model, history = trainer.train()\n models.append(model)\n histories.append(history)\n model.save('model'+str(i)+'.h5')\n plotScore(histories)\n" ]
[ [ "numpy.array", "numpy.empty", "numpy.zeros", "matplotlib.pyplot.legend", "matplotlib.pyplot.subplots", "numpy.append", "matplotlib.pyplot.show", "pandas.read_csv" ] ]
xinpingwang/tf-faster-rcnn
[ "b70382b3787906c7f7e46bfd372f6894d58d78fd" ]
[ "tools/convert_from_depre.py" ]
[ "# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Xinlei Chen\n# --------------------------------------------------------\n\"\"\"\nConvert depreciated VGG16 snapshots to the ones that support tensorflow format\n\nIt will check the specific snapshot at the vgg16_depre folder, and copy it to the same location at vgg16 folder\nSee experimental/scripts/convert_vgg16.sh for how to use it.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nfrom model.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\nfrom model.train_val import filter_roidb, get_training_roidb\nfrom datasets.factory import get_imdb\nimport datasets.imdb\nimport argparse\nimport pprint\nimport numpy as np\nimport sys\nimport os\nimport os.path as osp\nimport shutil\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nimport tensorflow as tf\nfrom tensorflow.python import pywrap_tensorflow\n\nfrom nets.vgg16 import vgg16\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Convert an old VGG16 snapshot to new format')\n parser.add_argument('--cfg', dest='cfg_file',\n help='optional config file',\n default=None, type=str)\n parser.add_argument('--snapshot', dest='snapshot',\n help='vgg snapshot prefix',\n type=str)\n parser.add_argument('--imdb', dest='imdb_name',\n help='dataset to train on',\n default='voc_2007_trainval', type=str)\n parser.add_argument('--iters', dest='max_iters',\n help='number of iterations to train',\n default=70000, type=int)\n parser.add_argument('--tag', dest='tag',\n help='tag of the model',\n default=None, type=str)\n parser.add_argument('--set', dest='set_cfgs',\n help='set config keys', default=None,\n nargs=argparse.REMAINDER)\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n args = parser.parse_args()\n return args\n\n\ndef combined_roidb(imdb_names):\n \"\"\"\n Combine multiple roidbs\n \"\"\"\n\n def get_roidb(imdb_name):\n imdb = get_imdb(imdb_name)\n print('Loaded dataset `{:s}` for training'.format(imdb.name))\n imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)\n print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD))\n roidb = get_training_roidb(imdb)\n return roidb\n\n roidbs = [get_roidb(s) for s in imdb_names.split('+')]\n roidb = roidbs[0]\n if len(roidbs) > 1:\n for r in roidbs[1:]:\n roidb.extend(r)\n tmp = get_imdb(imdb_names.split('+')[1])\n imdb = datasets.imdb.imdb(imdb_names, tmp.classes)\n else:\n imdb = get_imdb(imdb_names)\n return imdb, roidb\n\n\ndef get_variables_in_checkpoint_file(file_name):\n try:\n reader = pywrap_tensorflow.NewCheckpointReader(file_name)\n var_to_shape_map = reader.get_variable_to_shape_map()\n return var_to_shape_map\n except Exception as e: # pylint: disable=broad-except\n print(str(e))\n if \"corrupted compressed block contents\" in str(e):\n print(\"It's likely that your checkpoint file has been compressed \"\n \"with SNAPPY.\")\n\n\ndef convert_names(name):\n # removing :0\n name = name.replace(':0', '')\n # replace\n name = name.replace('vgg_16/', 'vgg16_default/')\n name = name.replace('/biases', '/bias')\n name = name.replace('/weights', '/weight')\n name = name.replace('/conv1/', '/')\n name = name.replace('/conv2/', '/')\n name = name.replace('/conv3/', '/')\n name = name.replace('/conv4/', '/')\n name = name.replace('/conv5/', '/')\n\n return name\n\n\n# Just build the graph, load the weights/statistics, and save them\ndef convert_from_depre(net, imdb, input_dir, output_dir, snapshot, max_iters):\n if not osp.exists(output_dir):\n os.makedirs(output_dir)\n\n tfconfig = tf.ConfigProto(allow_soft_placement=True)\n tfconfig.gpu_options.allow_growth = True\n sess = tf.Session(config=tfconfig)\n\n num_classes = imdb.num_classes\n with sess.graph.as_default():\n tf.set_random_seed(cfg.RNG_SEED)\n layers = net.create_architecture(sess, 'TRAIN', num_classes, tag='default',\n anchor_scales=cfg.ANCHOR_SCALES,\n anchor_ratios=cfg.ANCHOR_RATIOS)\n loss = layers['total_loss']\n # Learning rate should be reduced already\n lr = tf.Variable(cfg.TRAIN.LEARNING_RATE * cfg.TRAIN.GAMMA, trainable=False)\n momentum = cfg.TRAIN.MOMENTUM\n optimizer = tf.train.MomentumOptimizer(lr, momentum)\n gvs = optimizer.compute_gradients(loss)\n if cfg.TRAIN.DOUBLE_BIAS:\n final_gvs = []\n with tf.variable_scope('Gradient_Mult') as scope:\n for grad, var in gvs:\n scale = 1.\n if cfg.TRAIN.DOUBLE_BIAS and '/biases:' in var.name:\n scale *= 2.\n if not np.allclose(scale, 1.0):\n grad = tf.multiply(grad, scale)\n final_gvs.append((grad, var))\n train_op = optimizer.apply_gradients(final_gvs)\n else:\n train_op = optimizer.apply_gradients(gvs)\n\n checkpoint = osp.join(input_dir, snapshot + '.ckpt')\n variables = tf.global_variables()\n name2var = {convert_names(v.name): v for v in variables}\n target_names = get_variables_in_checkpoint_file(checkpoint)\n restorer = tf.train.Saver(name2var)\n saver = tf.train.Saver()\n\n print('Importing...')\n restorer.restore(sess, checkpoint)\n checkpoint = osp.join(output_dir, snapshot + '.ckpt')\n print('Exporting...')\n saver.save(sess, checkpoint)\n\n # also copy the pkl file\n index = osp.join(input_dir, snapshot + '.pkl')\n outdex = osp.join(output_dir, snapshot + '.pkl')\n shutil.copy(index, outdex)\n\n sess.close()\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n print('Called with args:')\n print(args)\n\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n print('Using config:')\n pprint.pprint(cfg)\n\n np.random.seed(cfg.RNG_SEED)\n\n # train set\n imdb, _ = combined_roidb(args.imdb_name)\n\n # output directory where the snapshot will be exported\n output_dir = get_output_dir(imdb, args.tag)\n print('Output will be exported to `{:s}`'.format(output_dir))\n\n # input directory where the snapshot will be imported\n input_dir = output_dir.replace('/vgg16/', '/vgg16_depre/')\n print('Input will be imported from `{:s}`'.format(input_dir))\n\n net = vgg16()\n\n convert_from_depre(net, imdb, input_dir, output_dir, args.snapshot, args.max_iters)\n" ]
[ [ "tensorflow.set_random_seed", "tensorflow.multiply", "numpy.random.seed", "tensorflow.train.MomentumOptimizer", "tensorflow.Session", "tensorflow.Variable", "tensorflow.global_variables", "tensorflow.train.Saver", "tensorflow.ConfigProto", "tensorflow.variable_scope", "numpy.allclose", "tensorflow.python.pywrap_tensorflow.NewCheckpointReader" ] ]
zhjpqq/scaledensenet
[ "5ae56786c7f628b8320b76d559ecaa6fa1d2ac0e" ]
[ "xmodels/msnet.py" ]
[ "# -*- coding: utf-8 -*-\n__author__ = 'ooo'\n__date__ = '2019/6/9 12:17'\n\n\"\"\"\nMulti-Resolution Net\n\"\"\"\nfrom collections import OrderedDict\nimport math\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom xmodules.classifier import AdaPoolView, ReturnX, ViewLayer\nimport xtils\nfrom xtils import GCU\n\n\nclass HSigmoid(nn.Module):\n\n def __init__(self, inplace=True):\n super(HSigmoid, self).__init__()\n self.inplace = inplace\n\n def forward(self, x):\n out = F.relu6(x + 3, self.inplace) / 6\n return out\n\n\ndef hsigmoid(x):\n out = F.relu6(x + 3, inplace=True) / 6\n return out\n\n\nclass SeModule(nn.Module):\n _ActiveFuc = {'relu': nn.ReLU, 'hsig': HSigmoid, 'relu6': nn.ReLU6}\n\n def __init__(self, indepth, reduction=4, active='hsig'):\n super(SeModule, self).__init__()\n \"\"\"\n Squeeze-> x ->Expand, x => [batch, channels, 1, 1]\n \"\"\"\n assert active in self._ActiveFuc.keys()\n Active = self._ActiveFuc[active]\n\n self.se = nn.Sequential(\n nn.AdaptiveAvgPool2d(1),\n nn.Conv2d(indepth, indepth // reduction, 1, 1, 0, bias=False),\n nn.ReLU(inplace=True),\n nn.Conv2d(indepth // reduction, indepth, 1, 1, 0, bias=False),\n Active(inplace=True)\n )\n\n def forward(self, x):\n return x * self.se(x)\n\n\nclass BranchDownsize(nn.Module):\n def __init__(self, factor=None, size=None, mode='nearest', align_corners=False):\n super(BranchDownsize, self).__init__()\n self.downsize = nn.Upsample(size, factor, mode, align_corners)\n\n def forward(self, x):\n if isinstance(x, (tuple, list)):\n x3, x2, x1 = x\n x3 = self.downsize(x3)\n x2 = self.downsize(x2)\n x1 = self.downsize(x1)\n x = (x3, x2, x1)\n else:\n x = self.downsize(x)\n # print('---->', x[0].size())\n return x\n\n\ndef drop_connect(inputs, p, training):\n \"\"\" Drop connect. \"\"\"\n if not training or p == 0:\n return inputs\n batch_size = inputs.shape[0]\n keep_prob = 1 - p\n random_tensor = keep_prob\n random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype) # uniform [0,1)\n binary_tensor = torch.floor(random_tensor)\n output = inputs / keep_prob * binary_tensor\n return output\n\n\nclass PreProc(nn.Module):\n\n def __init__(self, indepth=3, outdepth=16, outnums=1, stride_dilate='1/4-1'):\n super(PreProc, self).__init__()\n assert outnums in [1, 2, 3]\n assert stride_dilate in ['1/1-1', '1/2-1', '1/2-2', '1/4-1', '1/4-2']\n stride, dilate = stride_dilate.split('-')\n self.stride = stride\n self.outnums = outnums\n\n if stride == '1/1': # for cifar10\n self.conv1 = nn.Conv2d(indepth, outdepth, 3, 1, 1, dilation=1, bias=False)\n self.bn1 = nn.BatchNorm2d(outdepth)\n self.act1 = nn.ReLU()\n\n elif stride == '1/2':\n if dilate == '1':\n self.conv1 = nn.Conv2d(indepth, outdepth, 3, 2, 1, dilation=1, bias=False)\n else:\n self.conv1 = nn.Conv2d(indepth, outdepth, 3, 2, 2, dilation=2, bias=False)\n self.bn1 = nn.BatchNorm2d(outdepth)\n self.act1 = nn.ReLU()\n\n elif stride == '1/4':\n if dilate == '1':\n self.conv2 = nn.Conv2d(outdepth, outdepth, 3, 2, 1, dilation=1, bias=False)\n else:\n self.conv2 = nn.Conv2d(outdepth, outdepth, 3, 2, 2, dilation=2, bias=False)\n self.bn2 = nn.BatchNorm2d(outdepth)\n self.act2 = nn.ReLU()\n\n def forward(self, x):\n # stride = '1/1' or '1/2'\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.act1(x)\n if self.stride == '1/4':\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.act2(x)\n if self.outnums == 1:\n return x\n elif self.outnums == 2:\n return x, None\n elif self.outnums == 3:\n return x, None, None\n\n\nclass MoBlock(nn.Module):\n _ActiveFuc = {'relu': nn.ReLU, 'hsig': HSigmoid, 'relu6': nn.ReLU6}\n\n def __init__(self, indepth, outdepth, growth, pre_ind_grow, ksp='3.1.1', pre_ksp_half=False, groups='auto',\n skgroups='gcu', active='relu', dropout=0.0, isse=1, seactive='hsig', first=False, idx=1):\n \"\"\"\n - indepth: 当前block的输入通道数.\n - outdepth: 当前block的输出通道数.\n - growth: 当前block的通道增长数.\n - pre_ind_grow: 上一个block内, 输入通道数indepth + 通道增长数growth 的值.\n - ksp: kernel_size, stride, padding in Depth-Wise Convolution. cur_ksp_half, 当前block内是否将特征图尺寸减半.\n - pre_ksp_half: 上一个block内, 是否对特征图进行了尺寸减半.\n - groups: groups 值 in Depth-Wise Convolution.\n - skgroups: 所有 skip 连接的groups值, skip-groups\n - active:\n - dropout:\n - isse: 是否包含SeModule. =1: no-SeModule ; >1: has-SeModule(reduction=isse) 默认值4\n - first:\n - idx:\n \"\"\"\n super(MoBlock, self).__init__()\n Active = self._ActiveFuc[active]\n ksp = [int(x) for x in ksp.split(sep='.')]\n assert len(ksp) == 3\n cur_ksp_half = bool(ksp[1] == 2)\n self.ksp = ksp\n assert dropout * (0.5 - dropout) >= 0, '<dropout> must be in [0, 0.5], but get %s .' % dropout\n self.dropout = dropout\n assert isse >= 1 and isinstance(isse, int), '<isse> must be a int >=1, but get %s .' % isse\n self.isse = isse\n self.first = first\n self.idx = idx\n\n if groups == '1x':\n groups = 1\n elif groups == 'auto':\n groups = indepth + growth\n\n curr_ind_grow = indepth + growth\n\n self.conv1 = nn.Conv2d(indepth, curr_ind_grow, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn1 = nn.BatchNorm2d(curr_ind_grow, eps=1e-05, momentum=0.1)\n self.act1 = Active(inplace=True)\n\n # depth-wise conv\n self.conv2 = nn.Conv2d(curr_ind_grow, curr_ind_grow, ksp[0], ksp[1], ksp[2], groups=groups, bias=False)\n self.bn2 = nn.BatchNorm2d(curr_ind_grow, eps=1e-05, momentum=0.1)\n self.act2 = Active(inplace=True)\n self.segate2 = [ReturnX(), SeModule(curr_ind_grow, isse, active=seactive)][isse != 1]\n\n # 计算 skip1 & skip2\n if self.first:\n self.skip1 = ReturnX()\n self.skip2 = ReturnX()\n else:\n if curr_ind_grow == pre_ind_grow:\n skip_group = GCU(pre_ind_grow, curr_ind_grow) if skgroups == 'gcu' else 1\n if not pre_ksp_half:\n # print('init---> idx %s .' % idx)\n self.skip1 = ReturnX()\n else:\n skip1_ksp = (2, 2, 0)\n self.skip1 = nn.Sequential(\n nn.Conv2d(pre_ind_grow, curr_ind_grow, skip1_ksp[0], skip1_ksp[1], skip1_ksp[2],\n bias=False, groups=skip_group),\n nn.BatchNorm2d(curr_ind_grow, eps=1e-05, momentum=0.1))\n if not cur_ksp_half:\n self.skip2 = ReturnX()\n else:\n skip2_ksp = (2, 2, 0)\n self.skip2 = nn.Sequential(\n nn.Conv2d(pre_ind_grow, curr_ind_grow, skip2_ksp[0], skip2_ksp[1], skip2_ksp[2],\n bias=False, groups=skip_group),\n nn.BatchNorm2d(curr_ind_grow, eps=1e-05, momentum=0.1))\n\n elif curr_ind_grow != pre_ind_grow:\n skip_group = GCU(pre_ind_grow, curr_ind_grow) if skgroups == 'gcu' else 1\n skip1_ksp = (2, 2, 0) if pre_ksp_half else (1, 1, 0)\n skip2_ksp = (2, 2, 0) if cur_ksp_half else (1, 1, 0)\n self.skip1 = nn.Sequential(\n nn.Conv2d(pre_ind_grow, curr_ind_grow, skip1_ksp[0], skip1_ksp[1], skip1_ksp[2],\n bias=False, groups=skip_group),\n nn.BatchNorm2d(curr_ind_grow, eps=1e-05, momentum=0.1))\n self.skip2 = nn.Sequential(nn.Conv2d(pre_ind_grow, curr_ind_grow, skip2_ksp[0], skip2_ksp[1],\n skip2_ksp[2], bias=False, groups=skip_group),\n nn.BatchNorm2d(curr_ind_grow, eps=1e-05, momentum=0.1))\n\n # 计算skip3\n if outdepth == indepth and not cur_ksp_half:\n self.skip3 = ReturnX()\n else:\n skip3_ksp = (2, 2, 0) if cur_ksp_half else (1, 1, 0)\n skip_group = GCU(indepth, outdepth) if skgroups == 'gcu' else 1\n self.skip3 = nn.Sequential(nn.Conv2d(indepth, outdepth, skip3_ksp[0], skip3_ksp[1], skip3_ksp[2],\n bias=False, groups=skip_group),\n nn.BatchNorm2d(outdepth, eps=1e-05, momentum=0.1))\n\n # point-wise conv\n self.conv3 = nn.Conv2d(curr_ind_grow, outdepth, kernel_size=1, stride=1, padding=0, groups=1, bias=False)\n self.bn3 = nn.BatchNorm2d(outdepth, eps=1e-05, momentum=0.1)\n self.act3 = Active(inplace=True)\n self.drop3 = nn.Dropout2d(p=dropout, inplace=False)\n\n def forward(self, x):\n # print('\\n-----> %s' % self.idx)\n assert isinstance(x, (list, tuple)) and len(x) == 3\n x3, x2, x1 = x # c3, c2, c1\n if self.first:\n c1 = self.act1(self.bn1(self.conv1(x3)))\n c2 = self.act2(self.bn2(self.conv2(c1)))\n c2 = self.segate2(c2)\n c3 = self.act3(self.bn3(self.conv3(c2)))\n c3 = self.drop3(c3)\n c3 = c3 + self.skip3(x3)\n # return c3, c2, c1\n else:\n c1 = self.act1(self.bn1(self.conv1(x3)))\n c2 = self.act2(self.bn2(self.conv2(c1 + self.skip1(x1))))\n c2 = self.segate2(c2)\n c3 = self.act3(self.bn3(self.conv3(c2 + self.skip2(x2))))\n c3 = self.drop3(c3)\n c3 = c3 + self.skip3(x3)\n # return c3, c2, c1\n # xtils.print_size(c3)\n return c3, c2, c1\n\n\nclass AdaConvView(nn.Module):\n _ActiveFuc = {'relu': nn.ReLU, 'hsig': HSigmoid,\n 'relu6': nn.ReLU6, 'none': ReturnX}\n\n def __init__(self, indepth, outdepth, ksize=1, stride=1, padding=0,\n dilation=1, groups='gcu||1', active='relu', isview=True, which=0):\n \"\"\"\n 用conv_KxK提升fmap通道,并转换为特征向量fvector。ksize为当前特征图的平面尺寸。\n self.view ==> AdaPoolView(). 比如224x224训练,ksize=28x28,可获得1x1的特征图;\n 但当改变输入尺寸为320x320时,ksize却不能随之而变为40x40,仍然是固定的28x28,\n 因而获得的fmap不是1x1,需要AdaPoolView()。\n \"\"\"\n super(AdaConvView, self).__init__()\n self.which = which\n if groups == 'gcu':\n groups = xtils.GCU(indepth, outdepth)\n self.conv = nn.Conv2d(indepth, outdepth, ksize, stride, padding, dilation, groups, bias=False)\n # can not work on 1x1-fmap\n # self.bn = nn.BatchNorm1d(outdepth)\n active = self._ActiveFuc[active]\n self.active = active(inplace=True)\n self.view = [ReturnX(), AdaPoolView('avg', -1, 0)][isview]\n\n def forward(self, x):\n if isinstance(x, tuple):\n x = x[self.which]\n x = self.conv(x)\n # x = self.bn(x)\n x = self.active(x)\n x = self.view(x)\n return x\n\n\nclass Clssifier(nn.Module):\n _ActiveFuc = {'relu': nn.ReLU, 'hsig': HSigmoid, 'relu6': nn.ReLU6}\n\n def __init__(self, indepth, middepth=0, outdepth=1000, dropout=(0,), active='relu'):\n super(Clssifier, self).__init__()\n assert isinstance(dropout, (list, tuple))\n\n self.dropout = dropout\n self.middepth = middepth\n\n if middepth == 0:\n assert len(self.dropout) >= 1\n self.drop = nn.Dropout(p=self.dropout[0], inplace=False)\n self.fc = nn.Linear(indepth, outdepth)\n elif middepth > 0:\n assert len(self.dropout) == 2\n self.drop1 = nn.Dropout(p=self.dropout[0], inplace=False)\n self.fc1 = nn.Linear(indepth, middepth)\n self.drop2 = nn.Dropout(p=self.dropout[1], inplace=False)\n self.fc2 = nn.Linear(middepth, outdepth)\n\n def forward(self, x):\n if self.middepth == 0:\n x = self.drop(x)\n x = self.fc(x)\n elif self.middepth > 0:\n x = self.drop1(x)\n x = self.fc1(x)\n x = self.drop2(x)\n x = self.fc2(x)\n return x\n\n\nclass Summary(nn.Module):\n\n def __init__(self, active_me=True):\n super(Summary, self).__init__()\n self.active_me = active_me\n\n\nclass RetxSummary(Summary):\n \"\"\"\n 将输入直接返回,作为输出.\n \"\"\"\n\n def __init__(self, active_me=True):\n super(RetxSummary, self).__init__(active_me=active_me)\n self.classifier = ReturnX()\n\n def forward(self, x):\n x = self.classifier(x)\n return x\n\n def __repr__(self):\n strme = self.__class__.__name__ + '(\\n' + \\\n ' (classifier): ' + self.classifier.__repr__() + '\\n)'\n return strme\n\n\nclass ConcatSummary(Summary):\n \"\"\"\n 汇总多个xfc的输出到一个fc; 或 汇总多个squeeze的输出到一个fc.\n \"\"\"\n\n def __init__(self, indepth, middepth=0, outdepth=1000, dropout=(0, 0),\n active='relu', with_fc=True, active_me=True):\n \"\"\"\n - indepth: 对所有输入x, 进行拼接后的输入通道数\n - middepth: fc 层的中间隐藏层,=0 则无隐藏层\n - outdepth: 输出通道数 => nlabels\n - dropout: fc 层的辍学率\n - active: fc 层的激活函数\n - withfc: when indepth==outdepth, False => 不添加fc层,直接输出拼接向量进行分类.\n - active_me: 是否激活当前模块,不激活则计算时绕过此模块\n \"\"\"\n super(ConcatSummary, self).__init__(active_me)\n\n if not with_fc:\n assert indepth == outdepth, '<withfc> can be False only under <indepth>==<outdepth>.'\n self.classifier = ReturnX()\n else:\n self.classifier = Clssifier(indepth, middepth, outdepth, dropout, active)\n\n def forward(self, x):\n # assert isinstance(x, (tuple, list))\n x = torch.cat(x, dim=1)\n x = self.classifier(x)\n return x\n\n def __repr__(self):\n strme = self.__class__.__name__ + '(\\n (concat): torch.cat(dim=1)()\\n' + \\\n ' (classifier): ' + self.classifier.__repr__() + ')'\n return strme\n\n\nclass PollSummary(Summary):\n \"\"\"\n 汇总多个xfc的输出, 进行投票 ==> 平均投票法 & 最大投票法.\n 投票前可选择是否先进行归一化 F.softmax() or F.normalize().\n \"\"\"\n\n def __init__(self, method='avg', isnorm='none', active_me=True):\n super(PollSummary, self).__init__(active_me)\n assert isnorm in ['none', 'softmax', 'normal', 'minmax']\n self.isnorm = isnorm\n self.method = method\n\n if isnorm == 'none':\n self.normalize = None\n elif isnorm == 'softmax':\n self.normalize = F.softmax\n elif isnorm == 'normal':\n self.normalize = F.normalize\n elif isnorm == 'minmax':\n self.normalize = self.minmax\n else:\n raise NotImplementedError\n\n if method == 'avg':\n self.reduce = torch.mean\n elif method == 'max':\n self.reduce = torch.max\n elif method == 'sum':\n self.reduce = torch.sum\n else:\n raise NotImplementedError\n\n def minmax(self, x, dim=-1):\n assert x.ndimension() == 2\n min_x, max_x = x.min(dim)[0], x.max(dim)[0]\n factor = (max_x - min_x).unsqueeze(dim)\n x = (x - min_x.unsqueeze(dim)) / factor\n return x\n\n def forward(self, x):\n # assert isinstance(x, (tuple, list))\n if self.isnorm != 'none':\n x = [self.normalize(z, dim=-1) for z in x]\n x = [z.unsqueeze_(dim=-1) for z in x]\n x = torch.cat(x, dim=-1)\n x = self.reduce(x, dim=-1)\n return x\n\n def __repr__(self):\n strme = self.__class__.__name__ + \\\n '(method={}, isnorm={})'.format(self.method, self.isnorm)\n return strme\n\n\nclass Features(nn.ModuleList):\n\n def __init__(self):\n super(Features, self).__init__()\n\n\nclass MoHead(nn.ModuleList):\n\n def __init__(self, active_me=True, active_fc=True, with_fc=True, main_aux='main'):\n \"\"\"\n - active_me: 是否激活整个head, MoBlock + fc.\n - active_fc: 是否去激活Head中的fc,否则截断计算,只输出squeeze()后的特征向量.\n - with_fc: head头中是否带有fc,否则直接输出squeeze()后的特征向量. must be False when with_fc=False.\n - main_aux: 是否是主分类头 or 辅分类头.\n \"\"\"\n super(MoHead, self).__init__()\n self.active_me = active_me\n self.active_fc = active_fc\n self.with_fc = with_fc\n self.main_aux = main_aux\n if not with_fc:\n assert not active_fc, '不存在fc时,无法截断fc.'\n else:\n assert active_fc or not active_fc, 'head中有fc时,可用/可不用fc.'\n\n\nclass MultiScaleNet(nn.Module):\n\n def __init__(self, depth, growth, blocks=9,\n expand=((2, 30), (3, 50), (5, 70)),\n exksp=((2, '3.2.1'), (3, '3.2.1'), (5, '3.2.1')),\n exisse='ft-0:7', seactive='hsig', groups='auto', skgroups='gcu', prestride='1/4-2',\n conv_drop='all-0.2', conv_act='relu', dataset='imagenet',\n summar='concat', sum_active=True, sfc_poll=('avg', 'minmax'),\n sfc_with=True, sfc_indep=256, sfc_middep=512, sfc_drop=(0.3, 0.3), sfc_active='relu',\n head_key_cfg={'2-4-4': {}, '3-8-8': {}, '3-8-12': {}, '9-32-32': {}}):\n \"\"\"\n - depth: => 起始通道数\n - growth: => channel-growth可正/可负\n - blocks:\n - expand: => feature-size & feature-channels 之间; 可同步调节 & 可异步调节\n - exksp: => 任意Bottleneck-Block的核尺寸可任意指定,'3.1.1' or '3.2.1' or '5.2.2'\n - exisse: => 任意MoBlock上可附加一个SeModule.\n - groups:\n - skgroups:\n - prestride:\n - conv_drop: 卷积骨架层中的辍学率,'all-0.2': 全部block都为0.2;\n 'bet-0.1-0.2': between 0.1 & 0.2, 逐block递增从0.1到0.2\n - conv_act: 卷积骨架层网络的激活函数.\n - dataset:\n\n - summar: Summary 中执行Summary操作的方式.\n - sum_active: Summary 模块的计算过程是否被激活.\n - sfc_poll: Summary 中按投票方式操作时,具体的投票规则. only useful when summar == 'fcpoll'.\n - sfc_with: Summary 中按拼接方式操作时,当输入/输出通道数相等,是否还需要fc层. 若不需要fc即直接squeeze出lables.\n - sfc_indep: Summary 中Linear层的输入通道数.\n - sfc_middep: Summary 中Linear层的中间隐藏层,为0则无隐藏层.\n - sfc_drop: Summary 中Linear层的辍学率.\n - sfc_active: Summary 中Linear层的激活函数.\n - head_key_cfg: {pos-scale-key: cfg-dict, ...} => {'block_id-scale1-scale2': {}, ...}.\n \"\"\"\n\n super(MultiScaleNet, self).__init__()\n assert groups in ['auto', '1x'], '<groups> must be in [\"1x\", \"auto\"], but get %s .' % groups\n assert skgroups in ['gcu', '1x'], '<skgroups> must be in [\"gcu\", \"1x\"], but get %s.' % skgroups\n assert (sfc_middep == 0 and len(sfc_drop) >= 1) or (sfc_middep != 0 and len(sfc_drop) == 2)\n assert conv_drop[:3] in ['all', 'bet'], '<conv_drop> must be \"all-a\" or \"bet-a-b\".'\n assert summar in ['concat', 'independ', 'fcpoll']\n\n if dataset == 'imagenet':\n nlabels = 1000\n elif dataset == 'cifar10':\n nlabels = 10\n elif dataset == 'cifar100':\n nlabels = 100\n else:\n raise NotImplementedError('Unknown <dataset: %s>' % dataset)\n self.nlabels = nlabels\n self.dataset = dataset\n self.summar = summar\n self.sum_active = sum_active\n\n expand = OrderedDict(expand)\n exksp = OrderedDict(exksp)\n exisse = self.get_semodule(exisse, blocks)\n conv_drop = self.get_dropout(conv_drop, blocks)\n sfc_drop = sfc_drop\n\n # 预处理\n self.preproc = PreProc(indepth=3, outdepth=depth, outnums=3, stride_dilate=prestride)\n\n # 骨架特征提取 backbone feature-extractor\n self.features = Features()\n indepth = depth\n pre_ind_grow = 0\n pre_ksp_half = False\n for i in range(1, blocks + 1):\n outdepth = indepth + expand.get(i, 0)\n curr_ksp = exksp.get(i, '3.1.1')\n curr_se = exisse.get(i, 1)\n curr_drop = conv_drop.get(i, 0)\n block = MoBlock(indepth, outdepth, growth, pre_ind_grow, ksp=curr_ksp, pre_ksp_half=pre_ksp_half,\n groups=groups, skgroups=skgroups, active=conv_act, isse=curr_se, seactive=seactive,\n dropout=curr_drop, first=bool(i == 1), idx=i)\n pre_ind_grow = indepth + growth\n pre_ksp_half = curr_ksp.split('.')[1] == '2'\n indepth = outdepth\n self.features.add_module('mo_%s' % i, block)\n\n # 添加分类头 fc-head.\n # 方法:用key衔接骨架fmap和分类head (key->pos.fmap & key->model.head)\n # 运行前将head以key为属性注册到网络中, head=model.key.\n # 运行前将fmap以{key, pos}进行标记, 运行时缓存对应pos上的fmap即可\n self.bone_feat_maps = {} # 用于缓存骨架特征图 backbone-fmaps\n self.head_key_pos = {} # 用于关联特征图和分类头 fcHead-pose\n for key, cfg in head_key_cfg.items():\n print('**** -> head-%s' % key)\n head = self.get_fc_head(key=key, **cfg)\n setattr(self, 'head-%s' % key, head) # 注册head\n moid = key.split('-')[0] # 标记fmap\n self.head_key_pos.setdefault('head-%s' % key, moid)\n print(self.head_key_pos)\n\n # 汇总各个分类头的输出\n if summar == 'independ':\n self.summary = RetxSummary(active_me=sum_active)\n elif summar == 'concat':\n self.summary = ConcatSummary(sfc_indep, sfc_middep, nlabels, sfc_drop, sfc_active, sfc_with, sum_active)\n elif summar == 'fcpoll':\n self.summary = PollSummary(method=sfc_poll[0], isnorm=sfc_poll[1], active_me=sum_active)\n\n self.train_which_now = {'bone+mhead': False, 'auxhead': False, 'summary': False}\n self.eval_which_now = {'bone+mhead': False, 'bone+mhead+auxhead': False, 'bone+mhead+auxhead+summary': False}\n\n self._init_params()\n\n def get_semodule(self, exisse, blocks):\n \"\"\"\n 以多种方式添加SeModule到MoBlock上,默认reduction=4\n \"\"\"\n if exisse == () or isinstance(exisse[0], tuple):\n # 指定要添加的block-id 和 reduce-val\n exisse = OrderedDict(exisse)\n elif isinstance(exisse, (list, tuple)):\n # 指定要添加的block-id\n exisse_dict = OrderedDict()\n for id in exisse:\n exisse_dict.setdefault(id, 4)\n exisse = exisse_dict\n elif exisse == 'all':\n # 所有block都添加SeModule\n exisse = OrderedDict()\n for i in range(1, blocks + 1):\n exisse.setdefault(i, 4)\n elif exisse.startswith('ft-'):\n # 指定block添加SeModule\n # ft-3:5-7:10-15:18, from 3-5, 7-10, 15-18.\n # ft-0:0 不添加\n ft = exisse[3:].split('-')\n exisse = []\n for idx in ft:\n idx = [int(x) for x in idx.split(':')]\n for i in range(idx[0], idx[1] + 1):\n exisse.append((i, 4))\n exisse = OrderedDict(exisse)\n return exisse\n\n def get_dropout(self, conv_drop='', blocks=0):\n if conv_drop.startswith('all-'):\n dropout = float(conv_drop.split('-')[1])\n conv_drop = OrderedDict()\n for i in range(1, blocks + 1):\n conv_drop.setdefault(i, dropout)\n return conv_drop\n elif conv_drop.startswith('bet-'):\n import numpy as np\n dropout = conv_drop.split('-')\n dropout = [float(dropout[1]), float(dropout[2])]\n dropout = np.arange(dropout[0], dropout[1],\n (dropout[1] - dropout[0]) / blocks)\n conv_drop = OrderedDict()\n for i in range(1, blocks + 1):\n conv_drop.setdefault(i, round(dropout[i - 1], 3))\n return conv_drop\n\n def get_fc_head(self, key, blocks, indepth, expand, exksp, exisse, growth, pre_ind_grow,\n pre_ksp_half, groups='auto', skgroups='gcu', conv_drop='all-0', conv_active='relu',\n seactive='hsig', fc_middep=0, fc_drop=0, fc_active='relu', with_fc=True,\n active_fc=True, active_me=True, main_aux='main',\n squeeze='pool', sq_outdep=1, sq_ksize=1, sq_groups='gcu', sq_active='relu', sq_isview=True):\n # fc_head内的各MoBlock中不进行尺寸削减stride==1, 以固定的分辨率对接到fc层.\n\n # key=\"moid-scale1-scale2\". # \".\" 不能出现在model.state_dict.key()中,否则model.load_state_dict()出错.\n # moid=> 使用骨架中的第moid个MoBlock的特征图作为当前fchead的输入.\n # scale1=> 骨架挂载点(moid)处的MoBlock相对原图的尺寸系数.\n # scale2=> 当前fc_head内,MoBlock相对原图的尺寸系数.\n # scale1=> scale2, 当需要更精细的分辨率时,将输入特征图的分辨率(scale1)降采样到当前head的分辨率(scale2).\n # (moid=5, scale1=4, scale2=6): 在骨架中第5个MoBlock所产生的1/4特征图上,接一个1/6的分支头,需要先对其特征图降采样(4/6).\n # active_me: 是否激活当前分类头的计算过程\n # active_fc: 是否激活当前分类头中fc的计算过程,否则输出squeeze()的计算.\n # squeeze: ==pool时,sq_xxx参数全不不使用,仅在==conv时使用。\n # main_aux: 是否是主分类头 或 辅助分类头\n key = key.replace('@', '.')\n moid, scale1, scale2 = [float(x) for x in key.split('-')]\n expand = OrderedDict(expand)\n exksp = OrderedDict(exksp)\n exisse = self.get_semodule(exisse, blocks)\n conv_drop = self.get_dropout(conv_drop, blocks)\n\n head = MoHead(active_me, active_fc, with_fc, main_aux)\n\n if scale2 != scale1:\n head.add_module('downsize',\n BranchDownsize(factor=round(scale1 / scale2, 7), mode='bilinear', align_corners=True))\n for i in range(1, blocks + 1):\n outdepth = indepth + expand.get(i, 0)\n curr_ksp = exksp.get(i, '3.1.1')\n curr_se = exisse.get(i, 1)\n curr_drop = conv_drop.get(i, 0)\n block = MoBlock(indepth, outdepth, growth, pre_ind_grow, ksp=curr_ksp, pre_ksp_half=pre_ksp_half,\n groups=groups, skgroups=skgroups, active=conv_active, isse=curr_se, seactive=seactive,\n dropout=curr_drop, first=False, idx=i)\n pre_ind_grow = indepth + growth\n pre_ksp_half = curr_ksp.split('.')[1] == '2'\n indepth = outdepth\n head.add_module('mo_%s' % i, block)\n\n if squeeze == 'pool':\n squeeze = AdaPoolView(pool='avg', dim=-1, which=0)\n elif squeeze == 'conv':\n squeeze = AdaConvView(indepth, sq_outdep, sq_ksize, stride=1, padding=0,\n groups=sq_groups, active=sq_active, isview=sq_isview, which=0)\n indepth = sq_outdep\n else:\n raise NotImplementedError('<squeeze> must be <pool || conv>, but get %s.' % squeeze)\n head.add_module('squeeze', squeeze)\n\n if with_fc:\n head.add_module('classifier', Clssifier(indepth, fc_middep, self.nlabels, fc_drop, fc_active))\n return head\n\n def _init_params(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight.data)\n if getattr(m, 'bias', None) is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n # nn.init.kaiming_normal_(m.weight)\n # nn.init.normal_(m.weight, mean=0, std=1)\n # nn.init.xavier_normal_(m.weight, gain=1)\n if getattr(m, 'bias', None) is not None:\n m.bias.data.zero_()\n\n def train_mode(self, ite, cfg):\n # 当迭代次数 ite 超过设定值,开启对应的训练阶段\n which = None\n for key in sorted(cfg.train_which.keys())[::-1]:\n if ite >= key:\n which = cfg.train_which[key]\n break\n self.set_train_which(part=which, name_which=cfg.name_which)\n\n def eval_mode(self, ite, cfg):\n # 当迭代次数 ite 超过设定值,开启对应的测试阶段\n which = None\n for key in sorted(cfg.eval_which.keys())[::-1]:\n if ite >= key:\n which = cfg.eval_which[key]\n break\n self.set_eval_which(part=which, name_which=cfg.name_which)\n\n def reset_mode(self, mode='train'):\n if mode == 'train':\n for k, v in self.train_which_now.items():\n self.train_which_now[k] = False\n elif mode == 'val':\n for k, v in self.eval_which_now.items():\n self.eval_which_now[k] = False\n\n def set_train_which(self, part, name_which=None):\n \"\"\"\n -part: 基于Module类的控制, eg. PreProc, Features, MoHead\n -name_which: 基于Block实例的控制, eg. MoHead.head-3-2-2, MoHead.head-3-2-2\n 要控制哪一类Module下的哪一个模块Block => eg. MoHead 下的 MoHead.head-3-2-2\n \"\"\"\n assert part in self.train_which_now, '设定超出可选项范围--> %s' % part\n self.reset_mode(mode='val')\n if self.train_which_now[part]:\n return\n else:\n self.reset_mode(mode='train')\n self.train_which_now[part] = True\n\n if part == 'bone+mhead':\n for name, module in self.named_modules():\n if isinstance(module, (PreProc, Features)):\n module.train()\n for p in module.parameters():\n p.requires_grad = True\n if isinstance(module, MoHead):\n if module.main_aux == 'main':\n module.active_me = True\n module.active_fc = True\n module.train()\n for p in module.parameters():\n p.requires_grad = True\n elif module.main_aux == 'aux':\n module.active_me = False\n module.active_fc = False\n module.eval()\n for p in module.parameters():\n p.requires_grad = False\n if isinstance(module, Summary):\n module.active_me = False\n module.eval()\n for p in module.parameters():\n p.requires_grad = False\n elif part == 'auxhead':\n for name, module in self.named_modules():\n if isinstance(module, (PreProc, Features)):\n module.eval()\n for p in module.parameters():\n p.requires_grad = False\n if isinstance(module, MoHead):\n if module.main_aux == 'main':\n # 其实可以输出,但不能反传梯度\n module.active_me = True\n module.active_fc = True\n module.eval()\n for p in module.parameters():\n p.requires_grad = False\n elif module.main_aux == 'aux':\n if name == name_which:\n module.active_me = True\n module.active_fc = True\n module.train()\n for p in module.parameters():\n p.requires_grad = True\n else:\n module.active_me = False\n module.active_fc = False\n module.eval()\n for p in module.parameters():\n p.requires_grad = False\n if isinstance(module, Summary):\n module.active_me = False\n module.eval()\n for p in module.parameters():\n p.requires_grad = False\n elif part == 'summary':\n for name, module in self.named_modules():\n if isinstance(module, (PreProc, Features)):\n module.eval()\n for p in module.parameters():\n p.requires_grad = False\n if isinstance(module, MoHead):\n module.active_me = True\n module.active_fc = False\n module.eval()\n for p in module.parameters():\n p.requires_grad = False\n if isinstance(module, Summary):\n module.active_me = True\n module.train()\n for p in module.parameters():\n p.requires_grad = True\n\n def set_eval_which(self, part, name_which=None):\n assert part in self.eval_which_now, '设定超出可选项范围--> %s' % part\n self.reset_mode(mode='train')\n if self.eval_which_now[part]:\n return\n else:\n self.reset_mode(mode='val')\n self.eval_which_now[part] = True\n\n if part == 'bone+mhead':\n self.eval()\n for name, module in self.named_modules():\n if isinstance(module, (PreProc, Features)):\n pass\n elif isinstance(module, MoHead):\n if module.main_aux == 'main':\n module.active_me = True\n module.active_fc = True\n elif module.main_aux == 'aux':\n module.active_me = False\n module.active_fc = False\n if isinstance(module, Summary):\n module.active_me = False\n elif part == 'bone+mhead+auxhead':\n self.eval()\n for name, module in self.named_modules():\n if isinstance(module, (PreProc, Features)):\n pass\n elif isinstance(module, MoHead):\n if module.main_aux == 'main':\n module.active_me = True\n module.active_fc = True\n elif module.main_aux == 'aux':\n if name == name_which:\n module.active_me = True\n module.active_fc = True\n else:\n module.active_me = False\n module.active_fc = False\n elif isinstance(module, Summary):\n module.active_me = False\n elif part == 'bone+mhead+auxhead+summary':\n self.eval()\n for name, module in self.named_modules():\n if isinstance(module, (PreProc, Features)):\n pass\n elif isinstance(module, MoHead):\n module.active_me = True\n module.active_fc = False\n elif isinstance(module, Summary):\n module.active_me = True\n\n def forward(self, x):\n bone_feat_maps = {}\n x = self.preproc(x)\n # print('pre->', x[0].size(), len(x))\n for id, mo in enumerate(self.features):\n x = mo(x)\n if str(id + 1) in self.head_key_pos.values():\n bone_feat_maps.setdefault(str(id + 1), x)\n # print('id->%s' % (id + 1), x[0].size(), len(x), x[1].size(), x[2].size())\n logits = []\n for key, pos in self.head_key_pos.items():\n # print('*** --> ', key)\n head = getattr(self, key, None)\n # assert head is not None\n if not head.active_me: # 跳过未激活头head\n continue\n if head.with_fc and not head.active_fc:\n head = head[:-1] # 去掉head中未激活的分类器\n x = bone_feat_maps.get(pos, None)\n # assert x is not None\n for id, mo in enumerate(head):\n x = mo(x)\n logits.append(x)\n if self.summary.active_me:\n logits = [self.summary(logits)]\n return logits\n\n\nif __name__ == '__main__':\n # imagenet\n\n # cifar10\n ms6_hkc = {\n '8-2-2': dict(blocks=3, indepth=32, growth=10, pre_ind_grow=48,\n expand=((1, 10), (2, 10), (3, 10)), exksp=(), exisse='ft-0:0', pre_ksp_half=False,\n groups='auto', skgroups='gcu', conv_drop='all-0.', conv_active='relu',\n fc_middep=0, fc_drop=(0.0, 0.0), fc_active='relu', with_fc=True,\n active_fc=False, active_me=True, main_aux='aux',\n squeeze='conv', sq_outdep=62*2, sq_ksize=16, sq_groups='gcu', sq_active='relu', sq_isview=True),\n '8-2-2@6': dict(blocks=3, indepth=32, growth=10, pre_ind_grow=48,\n expand=((1, 10), (2, 10), (3, 10)), exksp=(), exisse='ft-0:0', pre_ksp_half=False,\n groups='auto', skgroups='gcu', conv_drop='all-0.', conv_active='relu',\n fc_middep=0, fc_drop=(0.0, 0.0), fc_active='relu', with_fc=True,\n active_fc=False, active_me=True, main_aux='aux',\n squeeze='conv', sq_outdep=62*2, sq_ksize=12, sq_groups='gcu', sq_active='relu', sq_isview=True),\n '15-4-4': dict(blocks=0, indepth=96, growth=0, pre_ind_grow=0,\n expand=(), exksp=(), exisse='ft-0:0', pre_ksp_half=False,\n groups='auto', skgroups='gcu', conv_drop='all-0.', conv_active='relu',\n fc_middep=0, fc_drop=(0.0, 0.0), fc_active='relu', with_fc=True,\n active_fc=False, active_me=True, main_aux='main',\n squeeze='conv', sq_outdep=96*2, sq_ksize=8, sq_groups='gcu', sq_active='relu', sq_isview=True),\n } # 97L 0.298M 0.031G 440fc 0.08s\n ms6 = OrderedDict(depth=16, growth=16, blocks=15, expand=((4, 16), (9, 32), (9, 64)),\n exksp=((4, '3.2.1'), (9, '3.2.1')), exisse='ft-0:0',\n groups='auto', skgroups='gcu', prestride='1/1-1', conv_drop='all-0.0', conv_act='relu',\n summar='concat', sfc_with=True, sfc_poll=('avg', 'minmax'), sfc_indep=440, sfc_middep=0,\n sfc_drop=(0.0, 0.0), sfc_active='relu', seactive='hsig', head_key_cfg=ms6_hkc, dataset='cifar10')\n\n model = MultiScaleNet(**ms6)\n print(model)\n\n model.set_train_which(part=['bone+mhead', 'auxhead', 'summary'][2], name_which='head-8-2-2@6')\n # model.set_eval_which(part=['bone+mhead', 'bone+mhead+auxhead', 'bone+mhead+auxhead+summary'][1])\n # paramsx = [p for p in model.parameters() if p.requires_grad]\n #\n # model.set_train_which(part=['bone+mhead', 'auxhead', 'summary'][0])\n # paramsy = [p for p in model.parameters() if p.requires_grad]\n #\n # share = list(set(paramsx).intersection(set(paramsy)))\n\n insize = [32, 224][model.dataset == 'imagenet']\n x = torch.randn(1, 3, insize, insize)\n z = model(x)\n print('z-->', len(z), '\\n')\n\n xtils.calculate_layers_num(model)\n xtils.calculate_FLOPs_scale(model, input_size=insize, use_gpu=False, multiply_adds=False)\n xtils.calculate_params_scale(model, format='million')\n xtils.calculate_time_cost(model, insize=insize, toc=1, pritout=True, use_gpu=False)\n" ]
[ [ "torch.nn.Linear", "torch.rand", "torch.nn.functional.relu6", "torch.cat", "torch.nn.Dropout", "torch.nn.AdaptiveAvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.init.kaiming_normal_", "torch.nn.Upsample", "torch.nn.ReLU", "torch.nn.Conv2d", "numpy.arange", "torch.floor", "torch.nn.Dropout2d", "torch.randn" ] ]
53X/asteroid
[ "69e82fed49bab84975592ae868aaf6dceb91d6cd" ]
[ "asteroid/dsp/overlap_add.py" ]
[ "import torch\nfrom scipy.signal import get_window\nfrom asteroid.losses import PITLossWrapper\nfrom torch import nn\n\n\nclass LambdaOverlapAdd(torch.nn.Module):\n \"\"\"Segment signal, apply func, combine with OLA.\n\n Args:\n nnet (callable): function to apply to each segment.\n n_src (int): Number of sources in the output of nnet.\n window_size (int): Size of segmenting window.\n hop_size (int): segmentation hop size.\n window (str): Name of the window (see scipy.signal.get_window)\n reorder_chunks (bool): whether to reorder each consecutive segment.\n\n Examples:\n >>> from asteroid import ConvTasNet\n >>> nnet = ConvTasNet(n_src=2)\n >>> continuous_nnet = LambdaOverlapAdd(\n >>> nnet=nnet,\n >>> n_src=2,\n >>> window_size=64000,\n >>> hop_size=None,\n >>> window=\"hanning\",\n >>> reorder_chunks=True,\n >>> enable_grad=False,\n >>> )\n >>> wav = torch.randn(1, 1, 500000)\n >>> out_wavs = continuous_nnet.forward(wav)\n \"\"\"\n\n def __init__(\n self,\n nnet,\n n_src,\n window_size,\n hop_size=None,\n window=\"hanning\",\n reorder_chunks=True,\n enable_grad=False,\n ):\n super().__init__()\n assert window_size % 2 == 0, \"Window size must be even\"\n\n self.nnet = nnet\n self.window_size = window_size\n self.hop_size = hop_size if hop_size is not None else window_size // 2\n self.n_src = n_src\n\n if window:\n window = get_window(window, self.window_size).astype(\"float32\")\n window = torch.from_numpy(window)\n self.use_window = True\n else:\n self.use_window = False\n\n self.register_buffer(\"window\", window)\n self.reorder_chunks = reorder_chunks\n self.enable_grad = enable_grad\n\n def ola_forward(self, x):\n \"\"\"Heart of the class: segment signal, apply func, combine with OLA.\"\"\"\n\n assert x.ndim == 3\n\n batch, channels, n_frames = x.size()\n # Overlap and add:\n # [batch, chans, n_frames] -> [batch, chans, win_size, n_chunks]\n unfolded = torch.nn.functional.unfold(\n x.unsqueeze(-1),\n kernel_size=(self.window_size, 1),\n padding=(self.window_size, 0),\n stride=(self.hop_size, 1),\n )\n\n out = []\n n_chunks = unfolded.shape[-1]\n for frame_idx in range(n_chunks): # for loop to spare memory\n frame = self.nnet(unfolded[..., frame_idx])\n # user must handle multichannel by reshaping to batch\n if frame_idx == 0:\n assert frame.ndim == 3, \"nnet should return (batch, n_src, time)\"\n assert frame.shape[1] == self.n_src, \"nnet should return (batch, n_src, time)\"\n frame = frame.reshape(batch * self.n_src, -1)\n\n if frame_idx != 0 and self.reorder_chunks:\n # we determine best perm based on xcorr with previous sources\n frame = _reorder_sources(\n frame, out[-1], self.n_src, self.window_size, self.hop_size\n )\n\n if self.use_window:\n frame = frame * self.window\n else:\n frame = frame / (self.window_size / self.hop_size)\n out.append(frame)\n\n out = torch.stack(out).reshape(n_chunks, batch * self.n_src, self.window_size)\n out = out.permute(1, 2, 0)\n\n out = torch.nn.functional.fold(\n out,\n (n_frames, 1),\n kernel_size=(self.window_size, 1),\n padding=(self.window_size, 0),\n stride=(self.hop_size, 1),\n )\n return out.squeeze(-1).reshape(batch, self.n_src, -1)\n\n def forward(self, x):\n \"\"\"Forward module: segment signal, apply func, combine with OLA.\n\n Args:\n x (:class:`torch.Tensor`): waveform signal of shape (batch, 1, time).\n\n Returns:\n :class:`torch.Tensor`: The output of the lambda OLA.\n \"\"\"\n # Here we can do the reshaping\n with torch.autograd.set_grad_enabled(self.enable_grad):\n olad = self.ola_forward(x)\n return olad\n\n\ndef _reorder_sources(\n current: torch.FloatTensor,\n previous: torch.FloatTensor,\n n_src: int,\n window_size: int,\n hop_size: int,\n):\n \"\"\"\n Reorder sources in current chunk to maximize correlation with previous chunk.\n Used for Continuous Source Separation. Standard dsp correlation is used\n for reordering.\n\n\n Args:\n current (:class:`torch.Tensor`): current chunk, tensor\n of shape (batch, n_src, window_size)\n previous (:class:`torch.Tensor`): previous chunk, tensor\n of shape (batch, n_src, window_size)\n n_src (:class:`int`): number of sources.\n window_size (:class:`int`): window_size, equal to last dimension of\n both current and previous.\n hop_size (:class:`int`): hop_size between current and previous tensors.\n\n Returns:\n current:\n\n \"\"\"\n batch, frames = current.size()\n current = current.reshape(-1, n_src, frames)\n previous = previous.reshape(-1, n_src, frames)\n\n overlap_f = window_size - hop_size\n pw_losses = PITLossWrapper.get_pw_losses(\n lambda x, y: torch.sum((x.unsqueeze(1) * y.unsqueeze(2))),\n current[..., :overlap_f],\n previous[..., -overlap_f:],\n )\n _, perms = PITLossWrapper.find_best_perm(pw_losses, n_src)\n current = PITLossWrapper.reorder_source(current, n_src, perms)\n return current.reshape(batch, frames)\n\n\nclass DualPathProcessing(nn.Module):\n \"\"\"Perform Dual-Path processing via overlap-add as in DPRNN [1].\n\n Args:\n chunk_size (int): Size of segmenting window.\n hop_size (int): segmentation hop size.\n\n References:\n [1] \"Dual-path RNN: efficient long sequence modeling for\n time-domain single-channel speech separation\", Yi Luo, Zhuo Chen\n and Takuya Yoshioka. https://arxiv.org/abs/1910.06379\n \"\"\"\n\n def __init__(self, chunk_size, hop_size):\n super(DualPathProcessing, self).__init__()\n self.chunk_size = chunk_size\n self.hop_size = hop_size\n self.n_orig_frames = None\n\n def unfold(self, x):\n \"\"\"Unfold the feature tensor from\n\n (batch, channels, time) to (batch, channels, chunk_size, n_chunks).\n\n Args:\n x: (:class:`torch.Tensor`): feature tensor of shape (batch, channels, time).\n\n Returns:\n x: (:class:`torch.Tensor`): spliced feature tensor of shape\n (batch, channels, chunk_size, n_chunks).\n\n \"\"\"\n # x is (batch, chan, frames)\n batch, chan, frames = x.size()\n assert x.ndim == 3\n self.n_orig_frames = x.shape[-1]\n unfolded = torch.nn.functional.unfold(\n x.unsqueeze(-1),\n kernel_size=(self.chunk_size, 1),\n padding=(self.chunk_size, 0),\n stride=(self.hop_size, 1),\n )\n\n return unfolded.reshape(\n batch, chan, self.chunk_size, -1\n ) # (batch, chan, chunk_size, n_chunks)\n\n def fold(self, x, output_size=None):\n \"\"\"Folds back the spliced feature tensor.\n\n Input shape (batch, channels, chunk_size, n_chunks) to original shape\n (batch, channels, time) using overlap-add.\n\n Args:\n x: (:class:`torch.Tensor`): spliced feature tensor of shape\n (batch, channels, chunk_size, n_chunks).\n output_size: (int, optional): sequence length of original feature tensor.\n If None, the original length cached by the previous call of `unfold`\n will be used.\n\n Returns:\n x: (:class:`torch.Tensor`): feature tensor of shape (batch, channels, time).\n\n .. note:: `fold` caches the original length of the pr\n\n \"\"\"\n output_size = output_size if output_size is not None else self.n_orig_frames\n # x is (batch, chan, chunk_size, n_chunks)\n batch, chan, chunk_size, n_chunks = x.size()\n to_unfold = x.reshape(batch, chan * self.chunk_size, n_chunks)\n x = torch.nn.functional.fold(\n to_unfold,\n (output_size, 1),\n kernel_size=(self.chunk_size, 1),\n padding=(self.chunk_size, 0),\n stride=(self.hop_size, 1),\n )\n\n x /= self.chunk_size / self.hop_size\n\n return x.reshape(batch, chan, self.n_orig_frames)\n\n @staticmethod\n def intra_process(x, module):\n \"\"\"Performs intra-chunk processing.\n\n Args:\n x (:class:`torch.Tensor`): spliced feature tensor of shape\n (batch, channels, chunk_size, n_chunks).\n module (:class:`torch.nn.Module`): module one wish to apply to each chunk\n of the spliced feature tensor.\n\n\n Returns:\n x (:class:`torch.Tensor`): processed spliced feature tensor of shape\n (batch, channels, chunk_size, n_chunks).\n\n .. note:: the module should have the channel first convention and accept\n a 3D tensor of shape (batch, channels, time).\n \"\"\"\n\n # x is (batch, channels, chunk_size, n_chunks)\n batch, channels, chunk_size, n_chunks = x.size()\n # we reshape to batch*chunk_size, channels, n_chunks\n x = x.transpose(1, -1).reshape(batch * n_chunks, chunk_size, channels).transpose(1, -1)\n x = module(x)\n x = x.reshape(batch, n_chunks, channels, chunk_size).transpose(1, -1).transpose(1, 2)\n return x\n\n @staticmethod\n def inter_process(x, module):\n \"\"\"Performs inter-chunk processing.\n\n Args:\n x (:class:`torch.Tensor`): spliced feature tensor of shape\n (batch, channels, chunk_size, n_chunks).\n module (:class:`torch.nn.Module`): module one wish to apply between\n each chunk of the spliced feature tensor.\n\n\n Returns:\n x (:class:`torch.Tensor`): processed spliced feature tensor of shape\n (batch, channels, chunk_size, n_chunks).\n\n .. note:: the module should have the channel first convention and accept\n a 3D tensor of shape (batch, channels, time).\n \"\"\"\n\n batch, channels, chunk_size, n_chunks = x.size()\n x = x.transpose(1, 2).reshape(batch * chunk_size, channels, n_chunks)\n x = module(x)\n x = x.reshape(batch, chunk_size, channels, n_chunks).transpose(1, 2)\n return x\n" ]
[ [ "torch.nn.functional.fold", "torch.stack", "torch.autograd.set_grad_enabled", "torch.from_numpy", "scipy.signal.get_window" ] ]
leo-ware/dowhy
[ "3a2a79e2159a7f29456dd419a3c90395a384364e" ]
[ "dowhy/utils/dgps/linear_dgp.py" ]
[ "from dowhy.utils.dgp import DataGeneratingProcess\nimport numpy as np\nimport pandas as pd\n\nclass LinearDataGeneratingProcess(DataGeneratingProcess):\n '''\n Implements a data generating process that returns data having linear relationship between the treatment, outcome and confounders \n '''\n\n NAME = \"Linear DGP\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.auto_gen = False\n\n if self.weights == {} and self.bias == {}:\n self.auto_gen = True\n\n def generate_data(self, sample_size):\n if self.auto_gen:\n self.generation_process()\n \n control_value = np.zeros( (sample_size, len(self.treatment) ) )\n treatment_value = np.ones( (sample_size, len(self.treatment) ) )\n confounder = np.random.randn(sample_size, len(self.confounder))\n effect_modifier = np.zeros((sample_size, len(self.effect_modifier) )) # random.randn\n\n treatment = np.matmul(confounder, self.weights['confounder=>treatment']) + np.random.randn(sample_size, len(self.treatment)) + self.bias['confounder=>treatment']\n if self.treatment_is_binary:\n treatment = self.convert_to_binary(treatment)\n\n outcome = np.matmul(confounder, self.weights['confounder=>outcome']) + np.matmul(effect_modifier, self.weights['effect_modifier=>outcome']) + np.matmul(treatment, self.weights['treatment=>outcome']) + self.bias['confounder=>outcome']\n y_control = np.matmul(confounder, self.weights['confounder=>outcome']) + np.matmul(effect_modifier, self.weights['effect_modifier=>outcome']) + np.matmul(control_value, self.weights['treatment=>outcome']) + self.bias['confounder=>outcome']\n y_treatment = np.matmul(confounder, self.weights['confounder=>outcome']) + np.matmul(effect_modifier, self.weights['effect_modifier=>outcome']) + np.matmul(treatment_value, self.weights['treatment=>outcome']) + self.bias['confounder=>outcome']\n self.true_value = np.mean(y_treatment - y_control, axis=0)\n\n return pd.DataFrame(np.hstack( (effect_modifier, confounder, treatment, outcome) ), columns=self.effect_modifier + self.confounder + self.treatment + self.outcome)\n\n def generation_process(self):\n self.weights['confounder=>treatment'] = self.generate_weights( (len(self.confounder), len(self.treatment)) )\n self.weights['confounder=>treatment'][0,] = self.weights['confounder=>treatment'][0,] + 100 # increasing weight of the first confounder\n self.weights['confounder=>outcome'] = self.generate_weights( (len(self.confounder), len(self.outcome)) )\n self.weights['confounder=>outcome'][0,]= self.weights['confounder=>outcome'][0,] + 100\n self.weights['effect_modifier=>outcome'] = self.generate_weights( (len(self.effect_modifier), len(self.outcome)) )\n self.weights['treatment=>outcome'] = self.generate_weights( (len(self.treatment), len(self.outcome)) )\n\n self.bias['confounder=>treatment'] = self.generate_bias( len(self.treatment) )\n self.bias['confounder=>outcome'] = self.generate_bias( len(self.outcome) )\n\n def generate_weights(self, dimensions):\n return np.random.randn(*dimensions)\n\n def generate_bias(self, dimensions):\n return np.random.randn(dimensions)\n\n def __str__(self):\n rep = super().__str__()\n\n header = \"\"\"\n Linear Data Generating Process\n -------------------------------\n \"\"\"\n\n rep = header + rep\n\n return rep\n" ]
[ [ "numpy.hstack", "numpy.random.randn", "numpy.matmul", "numpy.mean" ] ]
calumrussell/qstrader
[ "826d3eeb63b95b9d8587f5e2152c030f2c57bbba" ]
[ "tests/unit/broker/transaction/test_transaction.py" ]
[ "import pandas as pd\n\nfrom qstrader.asset.equity import Equity\nfrom qstrader.broker.transaction.transaction import Transaction\n\n\ndef test_transaction_representation():\n \"\"\"\n Tests that the Transaction representation\n correctly recreates the object.\n \"\"\"\n dt = pd.Timestamp('2015-05-06')\n asset = Equity('Apple, Inc.', 'AAPL')\n transaction = Transaction(\n asset, quantity=168, dt=dt, price=56.18, order_id=153\n )\n exp_repr = (\n \"Transaction(asset=Equity(name='Apple, Inc.', symbol='AAPL', tax_exempt=True), \"\n \"quantity=168, dt=2015-05-06 00:00:00, price=56.18, order_id=153)\"\n )\n assert repr(transaction) == exp_repr\n" ]
[ [ "pandas.Timestamp" ] ]
gyan42/ml-serving-playground
[ "262837afc78d8c59954b17efc8e3fc027393bf76" ]
[ "streamlit/gans/backend/model/dcgan.py" ]
[ "\nimport torch\nimport torchvision.transforms as Transforms\n\nuse_gpu = True if torch.cuda.is_available() else False\n\nmodel = torch.hub.load('facebookresearch/pytorch_GAN_zoo:hub', 'DCGAN', pretrained=True, useGPU=use_gpu)\n\nnum_images = 1\n\ndef dcgan():\n noise, _ = model.buildNoiseData(num_images)\n with torch.no_grad():\n generated_images = model.test(noise)\n\n transform = Transforms.Compose([Transforms.Normalize((-1., -1., -1.), (2, 2, 2)),\n Transforms.ToPILImage()])\n generated_images = generated_images[0]\n generated_images = transform(generated_images.clamp(min=-1, max=1))\n return generated_images" ]
[ [ "torch.no_grad", "torch.cuda.is_available", "torch.hub.load" ] ]
charmsoya/pytorch-3dunet
[ "07a8dabf988ac3df110a3c10db6ed5fb769498d9" ]
[ "pytorch3dunet/unet3d/metrics.py" ]
[ "import importlib\nimport os\nimport time\n\nimport hdbscan\nimport numpy as np\nimport torch\nfrom skimage import measure\nfrom skimage.metrics import adapted_rand_error, peak_signal_noise_ratio\nfrom sklearn.cluster import MeanShift\n\nfrom pytorch3dunet.unet3d.losses import compute_per_channel_dice\nfrom pytorch3dunet.unet3d.seg_metrics import AveragePrecision, Accuracy\nfrom pytorch3dunet.unet3d.utils import get_logger, expand_as_one_hot, plot_segm, convert_to_numpy\nimport ipdb\nlogger = get_logger('EvalMetric')\n\n\nclass DiceCoefficient:\n \"\"\"Computes Dice Coefficient.\n Generalized to multiple channels by computing per-channel Dice Score\n (as described in https://arxiv.org/pdf/1707.03237.pdf) and theTn simply taking the average.\n Input is expected to be probabilities instead of logits.\n This metric is mostly useful when channels contain the same semantic class (e.g. affinities computed with different offsets).\n DO NOT USE this metric when training with DiceLoss, otherwise the results will be biased towards the loss.\n \"\"\"\n\n def __init__(self, epsilon=1e-6, **kwargs):\n self.epsilon = epsilon\n\n def __call__(self, input, target):\n # Average across channels in order to get the final score\n return torch.mean(compute_per_channel_dice(input, target, epsilon=self.epsilon))\n\n\nclass MeanIoU:\n \"\"\"\n Computes IoU for each class separately and then averages over all classes.\n \"\"\"\n\n def __init__(self, skip_channels=(), ignore_index=None, **kwargs):\n \"\"\"\n :param skip_channels: list/tuple of channels to be ignored from the IoU computation\n :param ignore_index: id of the label to be ignored from IoU computation\n \"\"\"\n self.ignore_index = ignore_index\n self.skip_channels = skip_channels\n\n def __call__(self, input, target):\n \"\"\"\n :param input: 5D probability maps torch float tensor (NxCxDxHxW)\n :param target: 4D or 5D ground truth torch tensor. 4D (NxDxHxW) tensor will be expanded to 5D as one-hot\n :return: intersection over union averaged over all channels\n \"\"\"\n assert input.dim() == 5\n\n n_classes = input.size()[1]\n\n if target.dim() == 4:\n target = expand_as_one_hot(target, C=n_classes, ignore_index=self.ignore_index)\n\n assert input.size() == target.size()\n\n per_batch_iou = []\n for _input, _target in zip(input, target):\n binary_prediction = self._binarize_predictions(_input, n_classes)\n\n if self.ignore_index is not None:\n # zero out ignore_index\n mask = _target == self.ignore_index\n binary_prediction[mask] = 0\n _target[mask] = 0\n\n # convert to uint8 just in case\n binary_prediction = binary_prediction.byte()\n _target = _target.byte()\n\n per_channel_iou = []\n for c in range(n_classes):\n if c in self.skip_channels:\n continue\n\n per_channel_iou.append(self._jaccard_index(binary_prediction[c], _target[c]))\n\n assert per_channel_iou, \"All channels were ignored from the computation\"\n mean_iou = torch.mean(torch.tensor(per_channel_iou))\n per_batch_iou.append(per_channel_iou)\n return torch.mean(torch.tensor(per_batch_iou), axis = 0)\n\n def _binarize_predictions(self, input, n_classes):\n \"\"\"\n Puts 1 for the class/channel with the highest probability and 0 in other channels. Returns byte tensor of the\n same size as the input tensor.\n \"\"\"\n if n_classes == 1:\n # for single channel input just threshold the probability map\n result = input > 0.5\n return result.long()\n\n _, max_index = torch.max(input, dim=0, keepdim=True)\n return torch.zeros_like(input, dtype=torch.uint8).scatter_(0, max_index, 1)\n\n def _jaccard_index(self, prediction, target):\n \"\"\"\n Computes IoU for a given target and prediction tensors\n \"\"\"\n return torch.sum(prediction & target).float() / torch.clamp(torch.sum(prediction | target).float(), min=1e-8)\n\n\nclass AdaptedRandError:\n \"\"\"\n A functor which computes an Adapted Rand error as defined by the SNEMI3D contest\n (http://brainiac2.mit.edu/SNEMI3D/evaluation).\n\n This is a generic implementation which takes the input, converts it to the segmentation image (see `input_to_segm()`)\n and then computes the ARand between the segmentation and the ground truth target. Depending on one's use case\n it's enough to extend this class and implement the `input_to_segm` method.\n\n Args:\n use_last_target (bool): use only the last channel from the target to compute the ARand\n save_plots (bool): save predicted segmentation (result from `input_to_segm`) together with GT segmentation as a PNG\n plots_dir (string): directory where the plots are to be saved\n \"\"\"\n\n def __init__(self, use_last_target=False, save_plots=False, plots_dir='.', **kwargs):\n self.use_last_target = use_last_target\n self.save_plots = save_plots\n self.plots_dir = plots_dir\n if not os.path.exists(plots_dir) and save_plots:\n os.makedirs(plots_dir)\n\n def __call__(self, input, target):\n \"\"\"\n Compute ARand Error for each input, target pair in the batch and return the mean value.\n\n Args:\n input (torch.tensor): 5D (NCDHW) output from the network\n target (torch.tensor): 4D (NDHW) ground truth segmentation\n\n Returns:\n average ARand Error across the batch\n \"\"\"\n def _arand_err(gt, seg):\n n_seg = len(np.unique(seg))\n if n_seg == 1:\n return 0.\n return adapted_rand_error(gt, seg)[0]\n\n # converts input and target to numpy arrays\n input, target = convert_to_numpy(input, target)\n if self.use_last_target:\n target = target[:, -1, ...] # 4D\n else:\n # use 1st target channel\n target = target[:, 0, ...] # 4D\n\n # ensure target is of integer type\n target = target.astype(np.int)\n\n per_batch_arand = []\n for _input, _target in zip(input, target):\n n_clusters = len(np.unique(_target))\n # skip ARand eval if there is only one label in the patch due to the zero-division error in Arand impl\n # xxx/skimage/metrics/_adapted_rand_error.py:70: RuntimeWarning: invalid value encountered in double_scalars\n # precision = sum_p_ij2 / sum_a2\n logger.info(f'Number of ground truth clusters: {n_clusters}')\n if n_clusters == 1:\n logger.info('Skipping ARandError computation: only 1 label present in the ground truth')\n per_batch_arand.append(0.)\n continue\n\n # convert _input to segmentation CDHW\n segm = self.input_to_segm(_input)\n assert segm.ndim == 4\n\n if self.save_plots:\n # save predicted and ground truth segmentation\n plot_segm(segm, _target, self.plots_dir)\n\n # compute per channel arand and return the minimum value\n per_channel_arand = [_arand_err(_target, channel_segm) for channel_segm in segm]\n logger.info(f'Min ARand for channel: {np.argmin(per_channel_arand)}')\n per_batch_arand.append(np.min(per_channel_arand))\n\n # return mean arand error\n mean_arand = torch.mean(torch.tensor(per_batch_arand))\n logger.info(f'ARand: {mean_arand.item()}')\n return mean_arand\n\n def input_to_segm(self, input):\n \"\"\"\n Converts input tensor (output from the network) to the segmentation image. E.g. if the input is the boundary\n pmaps then one option would be to threshold it and run connected components in order to return the segmentation.\n\n :param input: 4D tensor (CDHW)\n :return: segmentation volume either 4D (segmentation per channel)\n \"\"\"\n # by deafult assume that input is a segmentation volume itself\n return input\n\n\nclass BoundaryAdaptedRandError(AdaptedRandError):\n \"\"\"\n Compute ARand between the input boundary map and target segmentation.\n Boundary map is thresholded, and connected components is run to get the predicted segmentation\n \"\"\"\n\n def __init__(self, thresholds=None, use_last_target=True, input_channel=None, invert_pmaps=True,\n save_plots=False, plots_dir='.', **kwargs):\n super().__init__(use_last_target=use_last_target, save_plots=save_plots, plots_dir=plots_dir, **kwargs)\n if thresholds is None:\n thresholds = [0.3, 0.4, 0.5, 0.6]\n assert isinstance(thresholds, list)\n self.thresholds = thresholds\n self.input_channel = input_channel\n self.invert_pmaps = invert_pmaps\n\n def input_to_segm(self, input):\n if self.input_channel is not None:\n input = np.expand_dims(input[self.input_channel], axis=0)\n\n segs = []\n for predictions in input:\n for th in self.thresholds:\n # threshold probability maps\n predictions = predictions > th\n\n if self.invert_pmaps:\n # for connected component analysis we need to treat boundary signal as background\n # assign 0-label to boundary mask\n predictions = np.logical_not(predictions)\n\n predictions = predictions.astype(np.uint8)\n # run connected components on the predicted mask; consider only 1-connectivity\n seg = measure.label(predictions, background=0, connectivity=1)\n segs.append(seg)\n\n return np.stack(segs)\n\n\nclass GenericAdaptedRandError(AdaptedRandError):\n def __init__(self, input_channels, thresholds=None, use_last_target=True, invert_channels=None,\n save_plots=False, plots_dir='.', **kwargs):\n\n super().__init__(use_last_target=use_last_target, save_plots=save_plots, plots_dir=plots_dir, **kwargs)\n assert isinstance(input_channels, list) or isinstance(input_channels, tuple)\n self.input_channels = input_channels\n if thresholds is None:\n thresholds = [0.3, 0.4, 0.5, 0.6]\n assert isinstance(thresholds, list)\n self.thresholds = thresholds\n if invert_channels is None:\n invert_channels = []\n self.invert_channels = invert_channels\n\n def input_to_segm(self, input):\n # pick only the channels specified in the input_channels\n results = []\n for i in self.input_channels:\n c = input[i]\n # invert channel if necessary\n if i in self.invert_channels:\n c = 1 - c\n results.append(c)\n\n input = np.stack(results)\n\n segs = []\n for predictions in input:\n for th in self.thresholds:\n # run connected components on the predicted mask; consider only 1-connectivity\n seg = measure.label((predictions > th).astype(np.uint8), background=0, connectivity=1)\n segs.append(seg)\n\n return np.stack(segs)\n\n\nclass EmbeddingsAdaptedRandError(AdaptedRandError):\n def __init__(self, min_cluster_size=100, min_samples=None, metric='euclidean', cluster_selection_method='eom',\n save_plots=False, plots_dir='.', **kwargs):\n super().__init__(save_plots=save_plots, plots_dir=plots_dir, **kwargs)\n\n logger.info(f'HDBSCAN params: min_cluster_size: {min_cluster_size}, min_samples: {min_samples}')\n self.clustering = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric,\n cluster_selection_method=cluster_selection_method)\n\n def input_to_segm(self, embeddings):\n logger.info(\"Computing clusters with HDBSCAN...\")\n\n # shape of the output segmentation\n output_shape = embeddings.shape[1:]\n # reshape (C, D, H, W) -> (C, D * H * W) and transpose\n flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose()\n\n # perform clustering and reshape in order to get the segmentation volume\n start = time.time()\n segm = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape)\n logger.info(f'Number of clusters found by HDBSCAN: {np.max(segm)}. Duration: {time.time() - start} sec.')\n\n # assign noise to new cluster (by default hdbscan gives -1 label to outliers)\n noise_label = np.max(segm) + 1\n segm[segm == -1] = noise_label\n\n return np.expand_dims(segm, axis=0)\n\n\n# Just for completeness, however sklean MeanShift implementation is just too slow for clustering embeddings\nclass EmbeddingsMeanShiftAdaptedRandError(AdaptedRandError):\n def __init__(self, bandwidth, save_plots=False, plots_dir='.', **kwargs):\n super().__init__(save_plots=save_plots, plots_dir=plots_dir, **kwargs)\n logger.info(f'MeanShift params: bandwidth: {bandwidth}')\n # use bin_seeding to speedup the mean-shift significantly\n self.clustering = MeanShift(bandwidth=bandwidth, bin_seeding=True)\n\n def input_to_segm(self, embeddings):\n logger.info(\"Computing clusters with MeanShift...\")\n\n # shape of the output segmentation\n output_shape = embeddings.shape[1:]\n # reshape (C, D, H, W) -> (C, D * H * W) and transpose\n flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose()\n\n # perform clustering and reshape in order to get the segmentation volume\n start = time.time()\n segm = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape)\n logger.info(f'Number of clusters found by MeanShift: {np.max(segm)}. Duration: {time.time() - start} sec.')\n return np.expand_dims(segm, axis=0)\n\n\nclass GenericAveragePrecision:\n def __init__(self, min_instance_size=None, use_last_target=False, metric='ap', **kwargs):\n self.min_instance_size = min_instance_size\n self.use_last_target = use_last_target\n assert metric in ['ap', 'acc']\n if metric == 'ap':\n # use AveragePrecision\n self.metric = AveragePrecision()\n else:\n # use Accuracy at 0.5 IoU\n self.metric = Accuracy(iou_threshold=0.5)\n\n def __call__(self, input, target):\n assert isinstance(input, torch.Tensor) and isinstance(target, torch.Tensor)\n assert input.dim() == 5\n assert target.dim() == 5\n\n input, target = convert_to_numpy(input, target)\n if self.use_last_target:\n target = target[:, -1, ...] # 4D\n else:\n # use 1st target channel\n target = target[:, 0, ...] # 4D\n\n batch_aps = []\n # iterate over the batch\n for inp, tar in zip(input, target):\n segs = self.input_to_seg(inp) # 4D\n # convert target to seg\n tar = self.target_to_seg(tar)\n # filter small instances if necessary\n tar = self._filter_instances(tar)\n\n # compute average precision per channel\n segs_aps = [self.metric(self._filter_instances(seg), tar) for seg in segs]\n\n logger.info(f'Max Average Precision for channel: {np.argmax(segs_aps)}')\n # save max AP\n batch_aps.append(np.max(segs_aps))\n\n return torch.tensor(batch_aps).mean()\n\n def _filter_instances(self, input):\n \"\"\"\n Filters instances smaller than 'min_instance_size' by overriding them with 0-index\n :param input: input instance segmentation\n \"\"\"\n if self.min_instance_size is not None:\n labels, counts = np.unique(input, return_counts=True)\n for label, count in zip(labels, counts):\n if count < self.min_instance_size:\n input[input == label] = 0\n return input\n\n def input_to_seg(self, input):\n raise NotImplementedError\n\n def target_to_seg(self, target):\n return target\n\n\nclass BlobsAveragePrecision(GenericAveragePrecision):\n \"\"\"\n Computes Average Precision given foreground prediction and ground truth instance segmentation.\n \"\"\"\n\n def __init__(self, thresholds=None, metric='ap', min_instance_size=None, input_channel=0, **kwargs):\n super().__init__(min_instance_size=min_instance_size, use_last_target=True, metric=metric)\n if thresholds is None:\n thresholds = [0.4, 0.5, 0.6, 0.7, 0.8]\n assert isinstance(thresholds, list)\n self.thresholds = thresholds\n self.input_channel = input_channel\n\n def input_to_seg(self, input):\n input = input[self.input_channel]\n segs = []\n for th in self.thresholds:\n # threshold and run connected components\n mask = (input > th).astype(np.uint8)\n seg = measure.label(mask, background=0, connectivity=1)\n segs.append(seg)\n return np.stack(segs)\n\n\nclass BlobsBoundaryAveragePrecision(GenericAveragePrecision):\n \"\"\"\n Computes Average Precision given foreground prediction, boundary prediction and ground truth instance segmentation.\n Segmentation mask is computed as (P_mask - P_boundary) > th followed by a connected component\n \"\"\"\n def __init__(self, thresholds=None, metric='ap', min_instance_size=None, **kwargs):\n super().__init__(min_instance_size=min_instance_size, use_last_target=True, metric=metric)\n if thresholds is None:\n thresholds = [0.3, 0.4, 0.5, 0.6, 0.7]\n assert isinstance(thresholds, list)\n self.thresholds = thresholds\n\n def input_to_seg(self, input):\n # input = P_mask - P_boundary\n input = input[0] - input[1]\n segs = []\n for th in self.thresholds:\n # threshold and run connected components\n mask = (input > th).astype(np.uint8)\n seg = measure.label(mask, background=0, connectivity=1)\n segs.append(seg)\n return np.stack(segs)\n\n\nclass BoundaryAveragePrecision(GenericAveragePrecision):\n \"\"\"\n Computes Average Precision given boundary prediction and ground truth instance segmentation.\n \"\"\"\n\n def __init__(self, thresholds=None, min_instance_size=None, input_channel=0, **kwargs):\n super().__init__(min_instance_size=min_instance_size, use_last_target=True)\n if thresholds is None:\n thresholds = [0.3, 0.4, 0.5, 0.6]\n assert isinstance(thresholds, list)\n self.thresholds = thresholds\n self.input_channel = input_channel\n\n def input_to_seg(self, input):\n input = input[self.input_channel]\n segs = []\n for th in self.thresholds:\n seg = measure.label(np.logical_not(input > th).astype(np.uint8), background=0, connectivity=1)\n segs.append(seg)\n return np.stack(segs)\n\n\nclass PSNR:\n \"\"\"\n Computes Peak Signal to Noise Ratio. Use e.g. as an eval metric for denoising task\n \"\"\"\n\n def __init__(self, **kwargs):\n pass\n\n def __call__(self, input, target):\n input, target = convert_to_numpy(input, target)\n return peak_signal_noise_ratio(target, input)\n\n\nclass WithinAngleThreshold:\n \"\"\"\n Returns the percentage of predicted directions which are more than 'angle_threshold' apart from the ground\n truth directions. 'angle_threshold' is expected to be given in degrees not radians.\n \"\"\"\n\n def __init__(self, angle_threshold, **kwargs):\n self.threshold_radians = angle_threshold / 360 * np.pi\n\n def __call__(self, inputs, targets):\n assert isinstance(inputs, list)\n if len(inputs) == 1:\n targets = [targets]\n assert len(inputs) == len(targets)\n\n within_count = 0\n total_count = 0\n for input, target in zip(inputs, targets):\n # normalize and multiply by the stability_coeff in order to prevent NaN results from torch.acos\n stability_coeff = 0.999999\n input = input / torch.norm(input, p=2, dim=1).detach().clamp(min=1e-8) * stability_coeff\n target = target / torch.norm(target, p=2, dim=1).detach().clamp(min=1e-8) * stability_coeff\n # compute cosine map\n cosines = (input * target).sum(dim=1)\n error_radians = torch.acos(cosines)\n # increase by the number of directions within the threshold\n within_count += error_radians[error_radians < self.threshold_radians].numel()\n # increase by the number of all directions\n total_count += error_radians.numel()\n\n return torch.tensor(within_count / total_count)\n\n\nclass InverseAngularError:\n def __init__(self, **kwargs):\n pass\n\n def __call__(self, inputs, targets, **kwargs):\n assert isinstance(inputs, list)\n if len(inputs) == 1:\n targets = [targets]\n assert len(inputs) == len(targets)\n\n total_error = 0\n for input, target in zip(inputs, targets):\n # normalize and multiply by the stability_coeff in order to prevent NaN results from torch.acos\n stability_coeff = 0.999999\n input = input / torch.norm(input, p=2, dim=1).detach().clamp(min=1e-8) * stability_coeff\n target = target / torch.norm(target, p=2, dim=1).detach().clamp(min=1e-8) * stability_coeff\n # compute cosine map\n cosines = (input * target).sum(dim=1)\n error_radians = torch.acos(cosines)\n total_error += error_radians.sum()\n\n return torch.tensor(1. / total_error)\n\n\ndef get_evaluation_metric(config):\n \"\"\"\n Returns the evaluation metric function based on provided configuration\n :param config: (dict) a top level configuration object containing the 'eval_metric' key\n :return: an instance of the evaluation metric\n \"\"\"\n\n def _metric_class(class_name):\n m = importlib.import_module('pytorch3dunet.unet3d.metrics')\n clazz = getattr(m, class_name)\n return clazz\n\n assert 'eval_metric' in config, 'Could not find evaluation metric configuration'\n metric_config = config['eval_metric']\n metric_class = _metric_class(metric_config['name'])\n return metric_class(**metric_config)\n" ]
[ [ "numpy.max", "numpy.logical_not", "torch.acos", "numpy.argmin", "torch.max", "torch.norm", "sklearn.cluster.MeanShift", "numpy.min", "numpy.stack", "torch.tensor", "numpy.argmax", "numpy.unique", "torch.zeros_like", "numpy.expand_dims", "torch.sum" ] ]
LizhengMathAi/svgd
[ "9606388cf4565e4fafe82869feef7a7ba8986ef2" ]
[ "experiments/resnet_adam.py" ]
[ "import numpy as np\nimport tensorflow as tf\n\n\ndef bn_layer(input_tensor):\n size = input_tensor.get_shape().as_list()[-1]\n\n mean, variance = tf.nn.moments(input_tensor, axes=[0, 1, 2])\n beta = tf.Variable(initial_value=tf.zeros(size, dtype=tf.float32), name=\"beta\")\n gamma = tf.Variable(initial_value=tf.ones(size, dtype=tf.float32), name=\"gamma\")\n\n return tf.nn.batch_normalization(input_tensor, mean, variance, beta, gamma, 0.001)\n\n\ndef conv_layer(input_tensor, filter_shape, stride, transpose=False):\n \"\"\"\n A helper function to conv, batch normalize and relu the input tensor sequentially\n :param input_tensor: 4D tensor\n :param filter_shape: list. [filter_height, filter_width, in_channels, out_channels]\n :param stride: stride size for conv\n :param transpose: bool\n :return: 4D tensor. Y = conv2d(ReLU(batch_normalize(X))) or Y = ReLU(batch_normalize(conv2d(X)))\n \"\"\"\n if transpose:\n assert input_tensor.get_shape().as_list()[-1] == filter_shape[2]\n\n bn_tensor = bn_layer(input_tensor)\n\n relu_tensor = tf.nn.relu(bn_tensor)\n\n kernel_init = tf.truncated_normal(filter_shape, stddev=np.sqrt(2 / (filter_shape[2] + filter_shape[3])))\n kernel = tf.Variable(initial_value=kernel_init, dtype=tf.float32, name=\"kernel\")\n tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, tf.reduce_sum(tf.abs(kernel)))\n\n output_tensor = tf.nn.conv2d(relu_tensor, kernel, strides=[1, stride, stride, 1], padding=\"SAME\")\n else:\n assert input_tensor.get_shape().as_list()[-1] == filter_shape[2]\n\n kernel_init = tf.truncated_normal(filter_shape, stddev=np.sqrt(2 / (filter_shape[2] + filter_shape[3])))\n kernel = tf.Variable(initial_value=kernel_init, dtype=tf.float32, name=\"kernel\")\n tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, tf.reduce_sum(tf.abs(kernel)))\n\n conv2d_tensor = tf.nn.conv2d(input_tensor, kernel, strides=[1, stride, stride, 1], padding='SAME')\n\n bn_tensor = bn_layer(conv2d_tensor)\n\n output_tensor = tf.nn.relu(bn_tensor)\n return output_tensor\n\n\ndef residual_block(input_tensor, output_channel, first_block=False):\n \"\"\"\n Defines a residual block in ResNet\n :param input_tensor: 4D tensor\n :param output_channel: int. return_tensor.get_shape().as_list()[-1] = output_channel\n :param first_block: if this is the first residual block of the whole network\n :return: 4D tensor.\n \"\"\"\n input_channel = input_tensor.get_shape().as_list()[-1]\n\n if input_channel * 2 == output_channel:\n increase_dim = True\n stride = 2\n elif input_channel == output_channel:\n increase_dim = False\n stride = 1\n else:\n raise ValueError('Output and input channel does not match in residual blocks!!!')\n\n with tf.name_scope('conv_1'):\n if first_block:\n kernel_init = tf.truncated_normal(\n [3, 3, input_channel, output_channel], stddev=np.sqrt(2 / (input_channel + output_channel)))\n kernel = tf.Variable(initial_value=kernel_init, dtype=tf.float32, name=\"kernel\")\n tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, tf.reduce_sum(tf.abs(kernel)))\n conv1 = tf.nn.conv2d(input_tensor, kernel, strides=[1, 1, 1, 1], padding=\"SAME\")\n else:\n conv1 = conv_layer(input_tensor, [3, 3, input_channel, output_channel], stride, transpose=True)\n\n with tf.name_scope('conv_2'):\n conv2 = conv_layer(conv1, [3, 3, output_channel, output_channel], 1, transpose=True)\n\n if increase_dim is True:\n pooled_tensor = tf.nn.avg_pool(\n input_tensor, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n padded_tensor = tf.pad(\n pooled_tensor, [[0, 0], [0, 0], [0, 0], [input_channel // 2, input_channel // 2]])\n else:\n padded_tensor = input_tensor\n\n output_tensor = conv2 + padded_tensor\n return output_tensor\n\n\ndef flat(input_tensor):\n with tf.name_scope(\"flat\"):\n bn_tensor = bn_layer(input_tensor)\n\n relu_tensor = tf.nn.relu(bn_tensor)\n\n output_tensor = tf.reduce_mean(relu_tensor, [1, 2])\n assert output_tensor.get_shape().as_list()[-1:] == [64]\n return output_tensor\n\n\ndef fc_layer(input_tensor, out_channels):\n \"\"\"\n Helper function to do batch normalization of full connection layer\n :param input_tensor: 2D tensor\n :param out_channels: int\n :return: the 2D tensor after being normalized\n \"\"\"\n weights_shape = [input_tensor.get_shape().as_list()[-1], out_channels]\n\n weights_init = tf.truncated_normal(weights_shape, stddev=np.sqrt(2 / (weights_shape[0] + weights_shape[1])))\n weights = tf.Variable(initial_value=weights_init, dtype=tf.float32, name=\"weights\")\n tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, tf.reduce_sum(tf.abs(weights)))\n\n mul_tensor = tf.matmul(input_tensor, weights)\n\n bias = tf.Variable(initial_value=tf.zeros((weights_shape[1]), dtype=tf.float32), name=\"bias\")\n tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, tf.reduce_sum(tf.abs(bias)))\n add_tensor = mul_tensor + bias\n\n return add_tensor\n\n\ndef inference(input_tensor_batch, n):\n \"\"\"\n The main function that defines the ResNet. total layers = 1 + 2n + 2n + 2n +1 = 6n + 2\n :param input_tensor_batch: 4D tensor\n :param n: num_residual_blocks\n :return: last layer in the network. Not softmax-ed\n \"\"\"\n with tf.name_scope('conv0'):\n tensor = conv_layer(input_tensor_batch, [3, 3, 3, 16], 1, transpose=False)\n\n for i in range(n):\n with tf.name_scope('conv1_%d' % i):\n if i == 0:\n tensor = residual_block(tensor, 16, first_block=True)\n else:\n tensor = residual_block(tensor, 16)\n\n for i in range(n):\n with tf.name_scope('conv2_%d' % i):\n tensor = residual_block(tensor, 32)\n\n for i in range(n):\n with tf.name_scope('conv3_%d' % i):\n tensor = residual_block(tensor, 64)\n assert tensor.get_shape().as_list()[1:] == [8, 8, 64]\n\n tensor = flat(tensor)\n\n with tf.name_scope('fc'):\n logits = fc_layer(tensor, 10)\n\n return logits\n\n\ndef gen_loss(labels, logits, reg_rate):\n with tf.name_scope(\"loss\"):\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n loss_reg = loss + reg_rate * tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n return loss, loss_reg\n\n\ndef gen_accuracy(labels, logits):\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n correct_prediction = tf.equal(tf.argmax(logits, 1), labels)\n with tf.name_scope('accuracy'):\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n return accuracy\n\n\ndef gen_train_op(loss, lr):\n return tf.train.AdamOptimizer(lr).minimize(loss)\n\n\ntf.app.flags.DEFINE_float(\"p1\", 0.001, \"p1.\")\ntf.app.flags.DEFINE_float(\"p2\", 0.0001, \"p2.\")\ntf.app.flags.DEFINE_float(\"p3\", 0.00005, \"p3.\")\nFLAGS = tf.app.flags.FLAGS\n\n\ndef main():\n from datetime import datetime\n import pandas as pd\n import cifar10\n\n images = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name=\"images\")\n labels = tf.placeholder(tf.int64, shape=[None], name=\"labels\")\n\n reg_rate = tf.placeholder(tf.float32, shape=(), name=\"reg_rate\")\n lr = tf.placeholder(tf.float32, shape=(), name=\"lr\")\n\n logits = inference(images, 3)\n\n count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])\n print(\"count: \", count)\n\n loss, loss_reg = gen_loss(labels, logits, reg_rate)\n accuracy = gen_accuracy(labels, logits)\n train_op = gen_train_op(loss_reg, lr)\n\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n\n data = cifar10.Cifar10()\n test_images, test_labels = data.test_batch()\n lr_val = FLAGS.p1\n\n df = pd.DataFrame(columns=[\"datetime\", \"loss\", \"accuracy\"])\n for i in range(50000):\n if i % 1000 == 0:\n loss_list, accuracy_list = [], []\n for j in range(10):\n batch_test_images = test_images[j * 1000:(j + 1) * 1000]\n batch_test_labels = test_labels[j * 1000:(j + 1) * 1000]\n l, a = sess.run([loss, accuracy], feed_dict={\n images: batch_test_images, labels: batch_test_labels})\n loss_list.append(l)\n accuracy_list.append(a)\n test_loss = np.mean(loss_list)\n test_accuracy = 100 * np.mean(accuracy_list)\n print(\"{}\\tstep:{}\\tloss:{:.4f}\\taccuracy:{:.2f}%\".format(datetime.now(), i, test_loss, test_accuracy))\n df.loc[i] = [str(datetime.now()), str(test_loss), str(test_accuracy)]\n\n if i == 32000:\n lr_val = FLAGS.p2\n elif i == 42000:\n lr_val = FLAGS.p3\n\n batch_images, batch_labels = data.next_batch(128)\n sess.run(train_op, feed_dict={\n images: batch_images, labels: batch_labels, reg_rate: 1e-4, lr: lr_val})\n\n df.to_csv(\"./logs/csv/resnet_adam_({})_({})_({}).csv\".format(FLAGS.p1, FLAGS.p2, FLAGS.p3))\n\n\nmain()\n" ]
[ [ "tensorflow.nn.conv2d", "tensorflow.matmul", "tensorflow.ones", "tensorflow.nn.moments", "numpy.mean", "tensorflow.global_variables_initializer", "tensorflow.nn.avg_pool", "tensorflow.cast", "tensorflow.trainable_variables", "pandas.DataFrame", "tensorflow.argmax", "tensorflow.Variable", "numpy.sqrt", "tensorflow.pad", "tensorflow.nn.batch_normalization", "tensorflow.get_collection", "tensorflow.abs", "tensorflow.zeros", "tensorflow.train.AdamOptimizer", "tensorflow.nn.relu", "tensorflow.Session", "tensorflow.placeholder", "tensorflow.name_scope", "tensorflow.app.flags.DEFINE_float", "tensorflow.losses.sparse_softmax_cross_entropy", "tensorflow.reduce_mean" ] ]
JLefortBesnard/nilearn
[ "5385ad69337a12463baa2c60d408d1f3bb95fcb3" ]
[ "nilearn/datasets/func.py" ]
[ "\"\"\"\nDownloading NeuroImaging datasets: functional datasets (task + resting-state)\n\"\"\"\nimport fnmatch\nimport glob\nimport warnings\nimport os\nimport re\nimport json\n\nimport nibabel as nib\nimport numpy as np\nimport numbers\n\nfrom io import BytesIO\n\nimport nibabel\nimport pandas as pd\nfrom scipy.io import loadmat\nfrom scipy.io.matlab.miobase import MatReadError\nfrom sklearn.utils import Bunch, deprecated\n\nfrom .utils import (_get_dataset_dir, _fetch_files, _get_dataset_descr,\n _read_md5_sum_file, _tree, _filter_columns, _fetch_file, _uncompress_file)\nfrom .._utils import check_niimg\nfrom .._utils.numpy_conversions import csv_to_array\nfrom nilearn.image import get_data\n\n\ndef fetch_haxby(data_dir=None, subjects=(2,),\n fetch_stimuli=False, url=None, resume=True, verbose=1):\n \"\"\"Download and loads complete haxby dataset.\n\n See :footcite:`Haxby2425`.\n\n Parameters\n ----------\n data_dir : string, optional\n Path of the data directory. Used to force data storage in a specified\n location. Default: None\n\n subjects : list or int, optional\n Either a list of subjects or the number of subjects to load, from 1 to\n 6. By default, 2nd subject will be loaded. Empty list returns no subject\n data. Default=(2,).\n\n fetch_stimuli : boolean, optional\n Indicate if stimuli images must be downloaded. They will be presented\n as a dictionary of categories. Default=False.\n\n resume : bool, optional\n Whether to resume download of a partly-downloaded file.\n Default=True.\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, the interest attributes are :\n\n - 'anat': string list. Paths to anatomic images.\n - 'func': string list. Paths to nifti file with bold data.\n - 'session_target': string list. Paths to text file containing session and target data.\n - 'mask': string. Path to fullbrain mask file.\n - 'mask_vt': string list. Paths to nifti ventral temporal mask file.\n - 'mask_face': string list. Paths to nifti ventral temporal mask file.\n - 'mask_house': string list. Paths to nifti ventral temporal mask file.\n - 'mask_face_little': string list. Paths to nifti ventral temporal mask file.\n - 'mask_house_little': string list. Paths to nifti ventral temporal mask file.\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n PyMVPA provides a tutorial making use of this dataset:\n http://www.pymvpa.org/tutorial.html\n\n More information about its structure:\n http://dev.pymvpa.org/datadb/haxby2001.html\n\n See `additional information\n <http://www.sciencemag.org/content/293/5539/2425>`\n\n Run 8 in subject 5 does not contain any task labels.\n The anatomical image for subject 6 is unavailable.\n\n \"\"\"\n if isinstance(subjects, numbers.Number) and subjects > 6:\n subjects = 6\n\n if subjects is not None and (isinstance(subjects, list) or\n isinstance(subjects, tuple)):\n for sub_id in subjects:\n if sub_id not in [1, 2, 3, 4, 5, 6]:\n raise ValueError(\"You provided invalid subject id {0} in a \"\n \"list. Subjects must be selected in \"\n \"[1, 2, 3, 4, 5, 6]\".format(sub_id))\n\n dataset_name = 'haxby2001'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n\n # Get the mask\n url_mask = 'https://www.nitrc.org/frs/download.php/7868/mask.nii.gz'\n mask = _fetch_files(data_dir, [('mask.nii.gz', url_mask, {})],\n verbose=verbose)[0]\n\n # Dataset files\n if url is None:\n url = 'http://data.pymvpa.org/datasets/haxby2001/'\n md5sums = _fetch_files(data_dir, [('MD5SUMS', url + 'MD5SUMS', {})],\n verbose=verbose)[0]\n md5sums = _read_md5_sum_file(md5sums)\n\n # definition of dataset files\n sub_files = ['bold.nii.gz', 'labels.txt',\n 'mask4_vt.nii.gz', 'mask8b_face_vt.nii.gz',\n 'mask8b_house_vt.nii.gz', 'mask8_face_vt.nii.gz',\n 'mask8_house_vt.nii.gz', 'anat.nii.gz']\n n_files = len(sub_files)\n\n if subjects is None:\n subjects = []\n\n if isinstance(subjects, numbers.Number):\n subject_mask = np.arange(1, subjects + 1)\n else:\n subject_mask = np.array(subjects)\n\n files = [\n (os.path.join('subj%d' % i, sub_file),\n url + 'subj%d-2010.01.14.tar.gz' % i,\n {'uncompress': True,\n 'md5sum': md5sums.get('subj%d-2010.01.14.tar.gz' % i, None)})\n for i in subject_mask\n for sub_file in sub_files\n if not (sub_file == 'anat.nii.gz' and i == 6) # no anat for sub. 6\n ]\n\n files = _fetch_files(data_dir, files, resume=resume, verbose=verbose)\n\n if ((isinstance(subjects, numbers.Number) and subjects == 6) or\n np.any(subject_mask == 6)):\n files.append(None) # None value because subject 6 has no anat\n\n kwargs = {}\n if fetch_stimuli:\n stimuli_files = [(os.path.join('stimuli', 'README'),\n url + 'stimuli-2010.01.14.tar.gz',\n {'uncompress': True})]\n readme = _fetch_files(data_dir, stimuli_files, resume=resume,\n verbose=verbose)[0]\n kwargs['stimuli'] = _tree(os.path.dirname(readme), pattern='*.jpg',\n dictionary=True)\n\n fdescr = _get_dataset_descr(dataset_name)\n\n # return the data\n return Bunch(\n anat=files[7::n_files],\n func=files[0::n_files],\n session_target=files[1::n_files],\n mask_vt=files[2::n_files],\n mask_face=files[3::n_files],\n mask_house=files[4::n_files],\n mask_face_little=files[5::n_files],\n mask_house_little=files[6::n_files],\n mask=mask,\n description=fdescr,\n **kwargs)\n\n\ndef fetch_nyu_rest(n_subjects=None, sessions=[1], data_dir=None, resume=True,\n verbose=1):\n \"\"\"Download and loads the NYU resting-state test-retest dataset.\n\n For documentation see :footcite:`NYUdataset`,\n and see :footcite:`NYUdatasetlist` for download.\n See :footcite:`Shehzad2009restingstate` for citing the paper,\n and see :footcite:`ZUO20101432`,\n and :footcite:`ZUO20102163` for additional references.\n\n Warnings\n --------\n This function is deprecated and will be removed in the 0.8.x\n release, due to lack of preprocessing.\n\n Parameters\n ----------\n n_subjects : int, optional\n The number of subjects to load. If None is given, all the\n subjects are used.\n\n sessions : iterable of int, optional\n The sessions to load. Load only the first session by default.\n Default=[1].\n\n data_dir : string, optional\n Path of the data directory. Used to force data storage in a specified\n location. Default: None\n\n resume : bool, optional\n Whether to resume download of a partly-downloaded file.\n Default=True.\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, the interest attributes are :\n\n - 'func': string list. Paths to functional images.\n - 'anat_anon': string list. Paths to anatomic images.\n - 'anat_skull': string. Paths to skull-stripped images.\n - 'session': numpy array. List of ids corresponding to images sessions.\n\n Notes\n ------\n This dataset is composed of 3 sessions of 26 participants (11 males).\n For each session, three sets of data are available:\n\n - anatomical:\n\n * anonymized data (defaced thanks to BIRN defacer)\n * skullstripped data (using 3DSkullStrip from AFNI)\n\n - functional\n\n For each participant, 3 resting-state scans of 197 continuous EPI\n functional volumes were collected :\n\n - 39 slices\n - matrix = 64 x 64\n - acquisition voxel size = 3 x 3 x 3 mm\n\n Sessions 2 and 3 were conducted in a single scan session, 45 min\n apart, and were 5-16 months after Scan 1.\n\n All details about this dataset can be found here :\n http://cercor.oxfordjournals.org/content/19/10/2209.full\n\n References\n ----------\n .. footbibliography::\n\n \"\"\"\n warnings.warn(\"fetch_nyu_rest has been deprecated and will \"\n \"be removed in the 0.8.x release.\",\n np.VisibleDeprecationWarning, stacklevel=2)\n\n fa1 = 'http://www.nitrc.org/frs/download.php/1071/NYU_TRT_session1a.tar.gz'\n fb1 = 'http://www.nitrc.org/frs/download.php/1072/NYU_TRT_session1b.tar.gz'\n fa2 = 'http://www.nitrc.org/frs/download.php/1073/NYU_TRT_session2a.tar.gz'\n fb2 = 'http://www.nitrc.org/frs/download.php/1074/NYU_TRT_session2b.tar.gz'\n fa3 = 'http://www.nitrc.org/frs/download.php/1075/NYU_TRT_session3a.tar.gz'\n fb3 = 'http://www.nitrc.org/frs/download.php/1076/NYU_TRT_session3b.tar.gz'\n fa1_opts = {'uncompress': True,\n 'move': os.path.join('session1', 'NYU_TRT_session1a.tar.gz')}\n fb1_opts = {'uncompress': True,\n 'move': os.path.join('session1', 'NYU_TRT_session1b.tar.gz')}\n fa2_opts = {'uncompress': True,\n 'move': os.path.join('session2', 'NYU_TRT_session2a.tar.gz')}\n fb2_opts = {'uncompress': True,\n 'move': os.path.join('session2', 'NYU_TRT_session2b.tar.gz')}\n fa3_opts = {'uncompress': True,\n 'move': os.path.join('session3', 'NYU_TRT_session3a.tar.gz')}\n fb3_opts = {'uncompress': True,\n 'move': os.path.join('session3', 'NYU_TRT_session3b.tar.gz')}\n\n p_anon = os.path.join('anat', 'mprage_anonymized.nii.gz')\n p_skull = os.path.join('anat', 'mprage_skullstripped.nii.gz')\n p_func = os.path.join('func', 'lfo.nii.gz')\n\n subs_a = ['sub05676', 'sub08224', 'sub08889', 'sub09607', 'sub14864',\n 'sub18604', 'sub22894', 'sub27641', 'sub33259', 'sub34482',\n 'sub36678', 'sub38579', 'sub39529']\n subs_b = ['sub45463', 'sub47000', 'sub49401', 'sub52738', 'sub55441',\n 'sub58949', 'sub60624', 'sub76987', 'sub84403', 'sub86146',\n 'sub90179', 'sub94293']\n\n # Generate the list of files by session\n anat_anon_files = [\n [(os.path.join('session1', sub, p_anon), fa1, fa1_opts)\n for sub in subs_a]\n + [(os.path.join('session1', sub, p_anon), fb1, fb1_opts)\n for sub in subs_b],\n [(os.path.join('session2', sub, p_anon), fa2, fa2_opts)\n for sub in subs_a]\n + [(os.path.join('session2', sub, p_anon), fb2, fb2_opts)\n for sub in subs_b],\n [(os.path.join('session3', sub, p_anon), fa3, fa3_opts)\n for sub in subs_a]\n + [(os.path.join('session3', sub, p_anon), fb3, fb3_opts)\n for sub in subs_b]]\n\n anat_skull_files = [\n [(os.path.join('session1', sub, p_skull), fa1, fa1_opts)\n for sub in subs_a]\n + [(os.path.join('session1', sub, p_skull), fb1, fb1_opts)\n for sub in subs_b],\n [(os.path.join('session2', sub, p_skull), fa2, fa2_opts)\n for sub in subs_a]\n + [(os.path.join('session2', sub, p_skull), fb2, fb2_opts)\n for sub in subs_b],\n [(os.path.join('session3', sub, p_skull), fa3, fa3_opts)\n for sub in subs_a]\n + [(os.path.join('session3', sub, p_skull), fb3, fb3_opts)\n for sub in subs_b]]\n\n func_files = [\n [(os.path.join('session1', sub, p_func), fa1, fa1_opts)\n for sub in subs_a]\n + [(os.path.join('session1', sub, p_func), fb1, fb1_opts)\n for sub in subs_b],\n [(os.path.join('session2', sub, p_func), fa2, fa2_opts)\n for sub in subs_a]\n + [(os.path.join('session2', sub, p_func), fb2, fb2_opts)\n for sub in subs_b],\n [(os.path.join('session3', sub, p_func), fa3, fa3_opts)\n for sub in subs_a]\n + [(os.path.join('session3', sub, p_func), fb3, fb3_opts)\n for sub in subs_b]]\n\n max_subjects = len(subs_a) + len(subs_b)\n # Check arguments\n if n_subjects is None:\n n_subjects = len(subs_a) + len(subs_b)\n if n_subjects > max_subjects:\n warnings.warn('Warning: there are only %d subjects' % max_subjects)\n n_subjects = 25\n\n anat_anon = []\n anat_skull = []\n func = []\n session = []\n for i in sessions:\n if not (i in [1, 2, 3]):\n raise ValueError('NYU dataset session id must be in [1, 2, 3]')\n anat_anon += anat_anon_files[i - 1][:n_subjects]\n anat_skull += anat_skull_files[i - 1][:n_subjects]\n func += func_files[i - 1][:n_subjects]\n session += [i] * n_subjects\n\n dataset_name = 'nyu_rest'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n anat_anon = _fetch_files(data_dir, anat_anon, resume=resume,\n verbose=verbose)\n anat_skull = _fetch_files(data_dir, anat_skull, resume=resume,\n verbose=verbose)\n func = _fetch_files(data_dir, func, resume=resume,\n verbose=verbose)\n\n fdescr = _get_dataset_descr(dataset_name)\n\n return Bunch(anat_anon=anat_anon, anat_skull=anat_skull, func=func,\n session=session, description=fdescr)\n\n\ndef fetch_adhd(n_subjects=30, data_dir=None, url=None, resume=True,\n verbose=1):\n \"\"\"Download and load the ADHD resting-state dataset.\n\n See :footcite:`ADHDdataset`.\n\n Parameters\n ----------\n n_subjects : int, optional\n The number of subjects to load from maximum of 40 subjects.\n By default, 30 subjects will be loaded. If None is given,\n all 40 subjects will be loaded. Default=30.\n\n data_dir : string, optional\n Path of the data directory. Used to force data storage in a specified\n location. Default: None\n\n url : string, optional\n Override download URL. Used for test only (or if you setup a mirror of\n the data). Default: None\n\n resume : bool, optional\n Whether to resume download of a partly-downloaded file. Default=True.\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, the interest attributes are :\n\n - 'func': Paths to functional resting-state images\n - 'phenotypic': Explanations of preprocessing steps\n - 'confounds': CSV files containing the nuisance variables\n\n References\n ----------\n .. footbibliography::\n\n \"\"\"\n if url is None:\n url = 'https://www.nitrc.org/frs/download.php/'\n\n # Preliminary checks and declarations\n dataset_name = 'adhd'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n ids = ['0010042', '0010064', '0010128', '0021019', '0023008', '0023012',\n '0027011', '0027018', '0027034', '0027037', '1019436', '1206380',\n '1418396', '1517058', '1552181', '1562298', '1679142', '2014113',\n '2497695', '2950754', '3007585', '3154996', '3205761', '3520880',\n '3624598', '3699991', '3884955', '3902469', '3994098', '4016887',\n '4046678', '4134561', '4164316', '4275075', '6115230', '7774305',\n '8409791', '8697774', '9744150', '9750701']\n nitrc_ids = range(7782, 7822)\n max_subjects = len(ids)\n if n_subjects is None:\n n_subjects = max_subjects\n if n_subjects > max_subjects:\n warnings.warn('Warning: there are only %d subjects' % max_subjects)\n n_subjects = max_subjects\n ids = ids[:n_subjects]\n nitrc_ids = nitrc_ids[:n_subjects]\n\n opts = dict(uncompress=True)\n\n # Dataset description\n fdescr = _get_dataset_descr(dataset_name)\n\n # First, get the metadata\n phenotypic = ('ADHD200_40subs_motion_parameters_and_phenotypics.csv',\n url + '7781/adhd40_metadata.tgz', opts)\n\n phenotypic = _fetch_files(data_dir, [phenotypic], resume=resume,\n verbose=verbose)[0]\n\n # Load the csv file\n phenotypic = np.genfromtxt(phenotypic, names=True, delimiter=',',\n dtype=None)\n\n # Keep phenotypic information for selected subjects\n int_ids = np.asarray(ids, dtype=int)\n phenotypic = phenotypic[[np.where(phenotypic['Subject'] == i)[0][0]\n for i in int_ids]]\n\n # Download dataset files\n\n archives = [url + '%i/adhd40_%s.tgz' % (ni, ii)\n for ni, ii in zip(nitrc_ids, ids)]\n functionals = ['data/%s/%s_rest_tshift_RPI_voreg_mni.nii.gz' % (i, i)\n for i in ids]\n confounds = ['data/%s/%s_regressors.csv' % (i, i) for i in ids]\n\n functionals = _fetch_files(\n data_dir, zip(functionals, archives, (opts,) * n_subjects),\n resume=resume, verbose=verbose)\n\n confounds = _fetch_files(\n data_dir, zip(confounds, archives, (opts,) * n_subjects),\n resume=resume, verbose=verbose)\n\n return Bunch(func=functionals, confounds=confounds,\n phenotypic=phenotypic, description=fdescr)\n\n\ndef fetch_miyawaki2008(data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and loads Miyawaki et al. 2008 dataset (153MB).\n\n See :footcite:`MIYAWAKI2008915`.\n\n Parameters\n ----------\n data_dir : string, optional\n Path of the data directory. Used to force data storage in a specified\n location. Default: None\n\n url : string, optional\n Override download URL. Used for test only (or if you setup a mirror of\n the data). Default: None\n\n resume : bool, optional\n Whether to resume download of a partly-downloaded file. Default=True.\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n data : Bunch\n Dictionary-like object, the interest attributes are :\n\n - 'func': string list\n Paths to nifti file with bold data\n - 'label': string list\n Paths to text file containing session and target data\n - 'mask': string\n Path to nifti mask file to define target volume in visual\n cortex\n - 'background': string\n Path to nifti file containing a background image usable as a\n background image for miyawaki images.\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n This dataset is available on the `brainliner website\n <http://brainliner.jp/data/brainliner-admin/Reconstruct>`_\n\n See `additional information\n <http://www.cns.atr.jp/dni/en/downloads/\n fmri-data-set-for-visual-image-reconstruction/>`_\n\n \"\"\"\n url = 'https://www.nitrc.org/frs/download.php' \\\n '/8486/miyawaki2008.tgz?i_agree=1&download_now=1'\n opts = {'uncompress': True}\n\n # Dataset files\n\n # Functional MRI:\n # * 20 random scans (usually used for training)\n # * 12 figure scans (usually used for testing)\n\n func_figure = [(os.path.join('func', 'data_figure_run%02d.nii.gz' % i),\n url, opts) for i in range(1, 13)]\n\n func_random = [(os.path.join('func', 'data_random_run%02d.nii.gz' % i),\n url, opts) for i in range(1, 21)]\n\n # Labels, 10x10 patches, stimuli shown to the subject:\n # * 20 random labels\n # * 12 figure labels (letters and shapes)\n\n label_filename = 'data_%s_run%02d_label.csv'\n label_figure = [(os.path.join('label', label_filename % ('figure', i)),\n url, opts) for i in range(1, 13)]\n\n label_random = [(os.path.join('label', label_filename % ('random', i)),\n url, opts) for i in range(1, 21)]\n\n # Masks\n\n file_mask = [\n 'mask.nii.gz',\n 'LHlag0to1.nii.gz',\n 'LHlag10to11.nii.gz',\n 'LHlag1to2.nii.gz',\n 'LHlag2to3.nii.gz',\n 'LHlag3to4.nii.gz',\n 'LHlag4to5.nii.gz',\n 'LHlag5to6.nii.gz',\n 'LHlag6to7.nii.gz',\n 'LHlag7to8.nii.gz',\n 'LHlag8to9.nii.gz',\n 'LHlag9to10.nii.gz',\n 'LHV1d.nii.gz',\n 'LHV1v.nii.gz',\n 'LHV2d.nii.gz',\n 'LHV2v.nii.gz',\n 'LHV3A.nii.gz',\n 'LHV3.nii.gz',\n 'LHV4v.nii.gz',\n 'LHVP.nii.gz',\n 'RHlag0to1.nii.gz',\n 'RHlag10to11.nii.gz',\n 'RHlag1to2.nii.gz',\n 'RHlag2to3.nii.gz',\n 'RHlag3to4.nii.gz',\n 'RHlag4to5.nii.gz',\n 'RHlag5to6.nii.gz',\n 'RHlag6to7.nii.gz',\n 'RHlag7to8.nii.gz',\n 'RHlag8to9.nii.gz',\n 'RHlag9to10.nii.gz',\n 'RHV1d.nii.gz',\n 'RHV1v.nii.gz',\n 'RHV2d.nii.gz',\n 'RHV2v.nii.gz',\n 'RHV3A.nii.gz',\n 'RHV3.nii.gz',\n 'RHV4v.nii.gz',\n 'RHVP.nii.gz'\n ]\n\n file_mask = [(os.path.join('mask', m), url, opts) for m in file_mask]\n\n file_names = func_figure + func_random + \\\n label_figure + label_random + \\\n file_mask\n\n dataset_name = 'miyawaki2008'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n files = _fetch_files(data_dir, file_names, resume=resume, verbose=verbose)\n\n # Fetch the background image\n bg_img = _fetch_files(data_dir, [('bg.nii.gz', url, opts)], resume=resume,\n verbose=verbose)[0]\n\n fdescr = _get_dataset_descr(dataset_name)\n\n # Return the data\n return Bunch(\n func=files[:32],\n label=files[32:64],\n mask=files[64],\n mask_roi=files[65:],\n background=bg_img,\n description=fdescr)\n\n\ndef fetch_localizer_contrasts(contrasts, n_subjects=None, get_tmaps=False,\n get_masks=False, get_anats=False,\n data_dir=None, url=None, resume=True, verbose=1):\n \"\"\"Download and load Brainomics/Localizer dataset (94 subjects).\n\n \"The Functional Localizer is a simple and fast acquisition\n procedure based on a 5-minute functional magnetic resonance\n imaging (fMRI) sequence that can be run as easily and as\n systematically as an anatomical scan. This protocol captures the\n cerebral bases of auditory and visual perception, motor actions,\n reading, language comprehension and mental calculation at an\n individual level. Individual functional maps are reliable and\n quite precise. The procedure is decribed in more detail on the\n Functional Localizer page.\"\n (see http://brainomics.cea.fr/localizer/)\n\n You may cite :footcite:`PAPADOPOULOSORFANOS2017309`\n when using this dataset.\n\n Scientific results obtained using this dataset are described\n in :footcite:`Pinel2007fast`.\n\n Parameters\n ----------\n contrasts : list of str\n The contrasts to be fetched (for all 94 subjects available).\n Allowed values are::\n\n - \"checkerboard\"\n - \"horizontal checkerboard\"\n - \"vertical checkerboard\"\n - \"horizontal vs vertical checkerboard\"\n - \"vertical vs horizontal checkerboard\"\n - \"sentence listening\"\n - \"sentence reading\"\n - \"sentence listening and reading\"\n - \"sentence reading vs checkerboard\"\n - \"calculation (auditory cue)\"\n - \"calculation (visual cue)\"\n - \"calculation (auditory and visual cue)\"\n - \"calculation (auditory cue) vs sentence listening\"\n - \"calculation (visual cue) vs sentence reading\"\n - \"calculation vs sentences\"\n - \"calculation (auditory cue) and sentence listening\"\n - \"calculation (visual cue) and sentence reading\"\n - \"calculation and sentence listening/reading\"\n - \"calculation (auditory cue) and sentence listening vs \"\n - \"calculation (visual cue) and sentence reading\"\n - \"calculation (visual cue) and sentence reading vs checkerboard\"\n - \"calculation and sentence listening/reading vs button press\"\n - \"left button press (auditory cue)\"\n - \"left button press (visual cue)\"\n - \"left button press\"\n - \"left vs right button press\"\n - \"right button press (auditory cue)\"\n - \"right button press (visual cue)\"\n - \"right button press\"\n - \"right vs left button press\"\n - \"button press (auditory cue) vs sentence listening\"\n - \"button press (visual cue) vs sentence reading\"\n - \"button press vs calculation and sentence listening/reading\"\n\n or equivalently on can use the original names::\n\n - \"checkerboard\"\n - \"horizontal checkerboard\"\n - \"vertical checkerboard\"\n - \"horizontal vs vertical checkerboard\"\n - \"vertical vs horizontal checkerboard\"\n - \"auditory sentences\"\n - \"visual sentences\"\n - \"auditory&visual sentences\"\n - \"visual sentences vs checkerboard\"\n - \"auditory calculation\"\n - \"visual calculation\"\n - \"auditory&visual calculation\"\n - \"auditory calculation vs auditory sentences\"\n - \"visual calculation vs sentences\"\n - \"auditory&visual calculation vs sentences\"\n - \"auditory processing\"\n - \"visual processing\"\n - \"visual processing vs auditory processing\"\n - \"auditory processing vs visual processing\"\n - \"visual processing vs checkerboard\"\n - \"cognitive processing vs motor\"\n - \"left auditory click\"\n - \"left visual click\"\n - \"left auditory&visual click\"\n - \"left auditory & visual click vs right auditory&visual click\"\n - \"right auditory click\"\n - \"right visual click\"\n - \"right auditory&visual click\"\n - \"right auditory & visual click vs left auditory&visual click\"\n - \"auditory click vs auditory sentences\"\n - \"visual click vs visual sentences\"\n - \"auditory&visual motor vs cognitive processing\"\n\n n_subjects : int or list, optional\n The number or list of subjects to load. If None is given,\n all 94 subjects are used.\n\n get_tmaps : boolean, optional\n Whether t maps should be fetched or not. Default=False.\n\n get_masks : boolean, optional\n Whether individual masks should be fetched or not.\n Default=False.\n\n get_anats : boolean, optional\n Whether individual structural images should be fetched or not.\n Default=False.\n\n data_dir : string, optional\n Path of the data directory. Used to force data storage in a specified\n location.\n\n url : string, optional\n Override download URL. Used for test only (or if you setup a mirror of\n the data).\n\n resume : bool, optional\n Whether to resume download of a partly-downloaded file. Default=True.\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n data : Bunch\n Dictionary-like object, the interest attributes are :\n\n - 'cmaps': string list\n Paths to nifti contrast maps\n - 'tmaps' string list (if 'get_tmaps' set to True)\n Paths to nifti t maps\n - 'masks': string list\n Paths to nifti files corresponding to the subjects individual masks\n - 'anats': string\n Path to nifti files corresponding to the subjects structural images\n\n References\n ----------\n .. footbibliography::\n\n See Also\n ---------\n nilearn.datasets.fetch_localizer_calculation_task\n nilearn.datasets.fetch_localizer_button_task\n\n \"\"\"\n if isinstance(contrasts, str):\n raise ValueError('Contrasts should be a list of strings, but '\n 'a single string was given: \"%s\"' % contrasts)\n if n_subjects is None:\n n_subjects = 94 # 94 subjects available\n if (isinstance(n_subjects, numbers.Number) and\n ((n_subjects > 94) or (n_subjects < 1))):\n warnings.warn(\"Wrong value for \\'n_subjects\\' (%d). The maximum \"\n \"value will be used instead (\\'n_subjects=94\\')\")\n n_subjects = 94 # 94 subjects available\n\n # we allow the user to use alternatives to Brainomics contrast names\n contrast_name_wrapper = {\n # Checkerboard\n \"checkerboard\": \"checkerboard\",\n \"horizontal checkerboard\": \"horizontal checkerboard\",\n \"vertical checkerboard\": \"vertical checkerboard\",\n \"horizontal vs vertical checkerboard\":\n \"horizontal vs vertical checkerboard\",\n \"vertical vs horizontal checkerboard\":\n \"vertical vs horizontal checkerboard\",\n # Sentences\n \"sentence listening\": \"auditory sentences\",\n \"sentence reading\": \"visual sentences\",\n \"sentence listening and reading\": \"auditory&visual sentences\",\n \"sentence reading vs checkerboard\": \"visual sentences vs checkerboard\",\n # Calculation\n \"calculation (auditory cue)\": \"auditory calculation\",\n \"calculation (visual cue)\": \"visual calculation\",\n \"calculation (auditory and visual cue)\": \"auditory&visual calculation\",\n \"calculation (auditory cue) vs sentence listening\":\n \"auditory calculation vs auditory sentences\",\n \"calculation (visual cue) vs sentence reading\":\n \"visual calculation vs sentences\",\n \"calculation vs sentences\": \"auditory&visual calculation vs sentences\",\n # Calculation + Sentences\n \"calculation (auditory cue) and sentence listening\":\n \"auditory processing\",\n \"calculation (visual cue) and sentence reading\":\n \"visual processing\",\n \"calculation (visual cue) and sentence reading vs \"\n \"calculation (auditory cue) and sentence listening\":\n \"visual processing vs auditory processing\",\n \"calculation (auditory cue) and sentence listening vs \"\n \"calculation (visual cue) and sentence reading\":\n \"auditory processing vs visual processing\",\n \"calculation (visual cue) and sentence reading vs checkerboard\":\n \"visual processing vs checkerboard\",\n \"calculation and sentence listening/reading vs button press\":\n \"cognitive processing vs motor\",\n # Button press\n \"left button press (auditory cue)\": \"left auditory click\",\n \"left button press (visual cue)\": \"left visual click\",\n \"left button press\": \"left auditory&visual click\",\n \"left vs right button press\": \"left auditory & visual click vs \"\n + \"right auditory&visual click\",\n \"right button press (auditory cue)\": \"right auditory click\",\n \"right button press (visual cue)\": \"right visual click\",\n \"right button press\": \"right auditory & visual click\",\n \"right vs left button press\": \"right auditory & visual click \"\n + \"vs left auditory&visual click\",\n \"button press (auditory cue) vs sentence listening\":\n \"auditory click vs auditory sentences\",\n \"button press (visual cue) vs sentence reading\":\n \"visual click vs visual sentences\",\n \"button press vs calculation and sentence listening/reading\":\n \"auditory&visual motor vs cognitive processing\"}\n allowed_contrasts = list(contrast_name_wrapper.values())\n\n # convert contrast names\n contrasts_wrapped = []\n # get a unique ID for each contrast. It is used to give a unique name to\n # each download file and avoid name collisions.\n contrasts_indices = []\n for contrast in contrasts:\n if contrast in allowed_contrasts:\n contrasts_wrapped.append(contrast.title().replace(\" \", \"\"))\n contrasts_indices.append(allowed_contrasts.index(contrast))\n elif contrast in contrast_name_wrapper:\n name = contrast_name_wrapper[contrast]\n contrasts_wrapped.append(name.title().replace(\" \", \"\"))\n contrasts_indices.append(allowed_contrasts.index(name))\n else:\n raise ValueError(\"Contrast \\'%s\\' is not available\" % contrast)\n\n # Get the dataset OSF index\n dataset_name = \"brainomics_localizer\"\n index_url = \"https://osf.io/hwbm2/download\"\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n index_file = _fetch_file(index_url, data_dir, verbose=verbose)\n with open(index_file, \"rt\") as of:\n index = json.load(of)\n\n # Build data URLs that will be fetched\n files = {}\n # Download from the relevant OSF project, using hashes generated\n # from the OSF API. Note the trailing slash. For more info, see:\n # https://gist.github.com/emdupre/3cb4d564511d495ea6bf89c6a577da74\n root_url = \"https://osf.io/download/{0}/\"\n if isinstance(n_subjects, numbers.Number):\n subject_mask = np.arange(1, n_subjects + 1)\n subject_id_max = \"S%02d\" % n_subjects\n else:\n subject_mask = np.array(n_subjects)\n subject_id_max = \"S%02d\" % np.max(n_subjects)\n n_subjects = len(n_subjects)\n subject_ids = [\"S%02d\" % s for s in subject_mask]\n data_types = [\"cmaps\"]\n if get_tmaps:\n data_types.append(\"tmaps\")\n filenames = []\n\n def _is_valid_path(path, index, verbose):\n if path not in index:\n if verbose > 0:\n print(\"Skiping path '{0}'...\".format(path))\n return False\n return True\n\n for subject_id in subject_ids:\n for data_type in data_types:\n for contrast_id, contrast in enumerate(contrasts_wrapped):\n name_aux = str.replace(\n str.join('_', [data_type, contrast]), ' ', '_')\n file_path = os.path.join(\n \"brainomics_data\", subject_id, \"%s.nii.gz\" % name_aux)\n path = \"/\".join([\n \"/localizer\", \"derivatives\", \"spm_1st_level\",\n \"sub-%s\" % subject_id,\n \"sub-%s_task-localizer_acq-%s_%s.nii.gz\" % (\n subject_id, contrast, data_type)])\n if _is_valid_path(path, index, verbose=verbose):\n file_url = root_url.format(index[path][1:])\n opts = {\"move\": file_path}\n filenames.append((file_path, file_url, opts))\n files.setdefault(data_type, []).append(file_path)\n\n # Fetch masks if asked by user\n if get_masks:\n for subject_id in subject_ids:\n file_path = os.path.join(\n \"brainomics_data\", subject_id, \"boolean_mask_mask.nii.gz\")\n path = \"/\".join([\n \"/localizer\", \"derivatives\", \"spm_1st_level\",\n \"sub-%s\" % subject_id, \"sub-%s_mask.nii.gz\" % subject_id])\n if _is_valid_path(path, index, verbose=verbose):\n file_url = root_url.format(index[path][1:])\n opts = {\"move\": file_path}\n filenames.append((file_path, file_url, opts))\n files.setdefault(\"masks\", []).append(file_path)\n\n # Fetch anats if asked by user\n if get_anats:\n for subject_id in subject_ids:\n file_path = os.path.join(\n \"brainomics_data\", subject_id,\n \"normalized_T1_anat_defaced.nii.gz\")\n path = \"/\".join([\n \"/localizer\", \"derivatives\", \"spm_preprocessing\",\n \"sub-%s\" % subject_id, \"sub-%s_T1w.nii.gz\" % subject_id])\n if _is_valid_path(path, index, verbose=verbose):\n file_url = root_url.format(index[path][1:])\n opts = {\"move\": file_path}\n filenames.append((file_path, file_url, opts))\n files.setdefault(\"anats\", []).append(file_path)\n\n # Fetch subject characteristics\n participants_file = os.path.join(\"brainomics_data\", \"participants.tsv\")\n path = \"/localizer/participants.tsv\"\n if _is_valid_path(path, index, verbose=verbose):\n file_url = root_url.format(index[path][1:])\n opts = {\"move\": participants_file}\n filenames.append((participants_file, file_url, opts))\n\n # Fetch behavioural\n behavioural_file = os.path.join(\n \"brainomics_data\", \"phenotype\", \"behavioural.tsv\")\n path = \"/localizer/phenotype/behavioural.tsv\"\n if _is_valid_path(path, index, verbose=verbose):\n file_url = root_url.format(index[path][1:])\n opts = {\"move\": behavioural_file}\n filenames.append((behavioural_file, file_url, opts))\n\n # Actual data fetching\n fdescr = _get_dataset_descr(dataset_name)\n _fetch_files(data_dir, filenames, verbose=verbose)\n for key, value in files.items():\n files[key] = [os.path.join(data_dir, val) for val in value]\n\n # Load covariates file\n from numpy.lib.recfunctions import join_by\n participants_file = os.path.join(data_dir, participants_file)\n csv_data = np.recfromcsv(participants_file, delimiter='\\t')\n behavioural_file = os.path.join(data_dir, behavioural_file)\n csv_data2 = np.recfromcsv(behavioural_file, delimiter='\\t')\n csv_data = join_by(\n \"participant_id\", csv_data, csv_data2, usemask=False, asrecarray=True)\n subject_names = csv_data[\"participant_id\"].tolist()\n subjects_indices = []\n for name in subject_ids:\n name = name.encode(\"utf8\")\n if name not in subject_names:\n continue\n subjects_indices.append(subject_names.index(name))\n csv_data = csv_data[subjects_indices]\n\n return Bunch(ext_vars=csv_data, description=fdescr, **files)\n\n\ndef fetch_localizer_calculation_task(n_subjects=1, data_dir=None, url=None,\n verbose=1):\n \"\"\"Fetch calculation task contrast maps from the localizer.\n\n Parameters\n ----------\n n_subjects : int, optional\n The number of subjects to load. If None is given,\n all 94 subjects are used. Default=1.\n\n data_dir : string, optional\n Path of the data directory. Used to force data storage in a specified\n location.\n\n url : string, optional\n Override download URL. Used for test only (or if you setup a mirror of\n the data).\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n data : Bunch\n Dictionary-like object, the interest attributes are :\n 'cmaps': string list, giving paths to nifti contrast maps\n\n Notes\n ------\n This function is only a caller for the fetch_localizer_contrasts in order\n to simplify examples reading and understanding.\n The 'calculation (auditory and visual cue)' contrast is used.\n\n See Also\n ---------\n nilearn.datasets.fetch_localizer_button_task\n nilearn.datasets.fetch_localizer_contrasts\n\n \"\"\"\n data = fetch_localizer_contrasts([\"calculation (auditory and visual cue)\"],\n n_subjects=n_subjects,\n get_tmaps=False, get_masks=False,\n get_anats=False, data_dir=data_dir,\n url=url, resume=True, verbose=verbose)\n return data\n\n\ndef fetch_localizer_button_task(data_dir=None, url=None,\n verbose=1):\n \"\"\"Fetch left vs right button press contrast maps from the localizer.\n\n Parameters\n ----------\n data_dir : string, optional\n Path of the data directory. Used to force data storage in a specified\n location.\n\n url : string, optional\n Override download URL. Used for test only (or if you setup a mirror of\n the data).\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n data : Bunch\n Dictionary-like object, the interest attributes are :\n\n - 'cmaps': string list, giving paths to nifti contrast maps\n - 'tmap': string, giving paths to nifti contrast maps\n - 'anat': string, giving paths to normalized anatomical image\n\n Notes\n ------\n This function is only a caller for the fetch_localizer_contrasts in order\n to simplify examples reading and understanding.\n The 'left vs right button press' contrast is used.\n\n See Also\n ---------\n nilearn.datasets.fetch_localizer_calculation_task\n nilearn.datasets.fetch_localizer_contrasts\n\n \"\"\"\n data = fetch_localizer_contrasts([\"left vs right button press\"],\n n_subjects=[2],\n get_tmaps=True, get_masks=False,\n get_anats=True, data_dir=data_dir,\n url=url, resume=True, verbose=verbose)\n # Additional keys for backward compatibility\n data['tmap'] = data['tmaps'][0]\n data['anat'] = data['anats'][0]\n return data\n\n\ndef fetch_abide_pcp(data_dir=None, n_subjects=None, pipeline='cpac',\n band_pass_filtering=False, global_signal_regression=False,\n derivatives=['func_preproc'],\n quality_checked=True, url=None, verbose=1, **kwargs):\n \"\"\"Fetch ABIDE dataset.\n\n Fetch the Autism Brain Imaging Data Exchange (ABIDE) dataset wrt criteria\n that can be passed as parameter. Note that this is the preprocessed\n version of ABIDE provided by the preprocess connectome projects (PCP).\n See :footcite:`Nielsen2013Multisite`.\n\n Parameters\n ----------\n data_dir : string, optional\n Path of the data directory. Used to force data storage in a specified\n location. Default: None\n\n n_subjects : int, optional\n The number of subjects to load. If None is given,\n all available subjects are used (this number depends on the\n preprocessing pipeline used).\n\n pipeline : string {'cpac', 'css', 'dparsf', 'niak'}, optional\n Possible pipelines are \"ccs\", \"cpac\", \"dparsf\" and \"niak\".\n Default='cpac'.\n\n band_pass_filtering : boolean, optional\n Due to controversies in the literature, band pass filtering is\n optional. If true, signal is band filtered between 0.01Hz and 0.1Hz.\n Default=False.\n\n global_signal_regression : boolean optional\n Indicates if global signal regression should be applied on the\n signals. Default=False.\n\n derivatives : string list, optional\n Types of downloaded files. Possible values are: alff, degree_binarize,\n degree_weighted, dual_regression, eigenvector_binarize,\n eigenvector_weighted, falff, func_mask, func_mean, func_preproc, lfcd,\n reho, rois_aal, rois_cc200, rois_cc400, rois_dosenbach160, rois_ez,\n rois_ho, rois_tt, and vmhc. Please refer to the PCP site for more\n details. Default=['func_preproc'].\n\n quality_checked : boolean, optional\n If true (default), restrict the list of the subjects to the one that\n passed quality assessment for all raters. Default=True.\n\n kwargs : parameter list, optional\n Any extra keyword argument will be used to filter downloaded subjects\n according to the CSV phenotypic file. Some examples of filters are\n indicated below.\n\n SUB_ID : list of integers in [50001, 50607], optional\n Ids of the subjects to be loaded.\n\n DX_GROUP : integer in {1, 2}, optional\n 1 is autism, 2 is control.\n\n DSM_IV_TR : integer in [0, 4], optional\n O is control, 1 is autism, 2 is Asperger, 3 is PPD-NOS,\n 4 is Asperger or PPD-NOS.\n\n AGE_AT_SCAN : float in [6.47, 64], optional\n Age of the subject.\n\n SEX : integer in {1, 2}, optional\n 1 is male, 2 is female.\n\n HANDEDNESS_CATEGORY : string in {'R', 'L', 'Mixed', 'Ambi'}, optional\n R = Right, L = Left, Ambi = Ambidextrous.\n\n HANDEDNESS_SCORE : integer in [-100, 100], optional\n Positive = Right, Negative = Left, 0 = Ambidextrous.\n\n Notes\n -----\n Code and description of preprocessing pipelines are provided on the\n `PCP website <http://preprocessed-connectomes-project.github.io/>`.\n\n References\n ----------\n .. footbibliography::\n\n \"\"\"\n # People keep getting it wrong and submiting a string instead of a\n # list of strings. We'll make their life easy\n if isinstance(derivatives, str):\n derivatives = [derivatives, ]\n\n # Parameter check\n for derivative in derivatives:\n if derivative not in [\n 'alff', 'degree_binarize', 'degree_weighted',\n 'dual_regression', 'eigenvector_binarize',\n 'eigenvector_weighted', 'falff', 'func_mask', 'func_mean',\n 'func_preproc', 'lfcd', 'reho', 'rois_aal', 'rois_cc200',\n 'rois_cc400', 'rois_dosenbach160', 'rois_ez', 'rois_ho',\n 'rois_tt', 'vmhc']:\n raise KeyError('%s is not a valid derivative' % derivative)\n\n strategy = ''\n if not band_pass_filtering:\n strategy += 'no'\n strategy += 'filt_'\n if not global_signal_regression:\n strategy += 'no'\n strategy += 'global'\n\n # General file: phenotypic information\n dataset_name = 'ABIDE_pcp'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n if url is None:\n url = ('https://s3.amazonaws.com/fcp-indi/data/Projects/'\n 'ABIDE_Initiative')\n\n if quality_checked:\n kwargs['qc_rater_1'] = b'OK'\n kwargs['qc_anat_rater_2'] = [b'OK', b'maybe']\n kwargs['qc_func_rater_2'] = [b'OK', b'maybe']\n kwargs['qc_anat_rater_3'] = b'OK'\n kwargs['qc_func_rater_3'] = b'OK'\n\n # Fetch the phenotypic file and load it\n csv = 'Phenotypic_V1_0b_preprocessed1.csv'\n path_csv = _fetch_files(data_dir, [(csv, url + '/' + csv, {})],\n verbose=verbose)[0]\n\n # Note: the phenotypic file contains string that contains comma which mess\n # up numpy array csv loading. This is why I do a pass to remove the last\n # field. This can be\n # done simply with pandas but we don't want such dependency ATM\n # pheno = pandas.read_csv(path_csv).to_records()\n with open(path_csv, 'r') as pheno_f:\n pheno = ['i' + pheno_f.readline()]\n\n # This regexp replaces commas between double quotes\n for line in pheno_f:\n pheno.append(re.sub(r',(?=[^\"]*\"(?:[^\"]*\"[^\"]*\")*[^\"]*$)', \";\", line))\n\n # bytes (encode()) needed for python 2/3 compat with numpy\n pheno = '\\n'.join(pheno).encode()\n pheno = BytesIO(pheno)\n pheno = np.recfromcsv(pheno, comments='$', case_sensitive=True)\n\n # First, filter subjects with no filename\n pheno = pheno[pheno['FILE_ID'] != b'no_filename']\n # Apply user defined filters\n user_filter = _filter_columns(pheno, kwargs)\n pheno = pheno[user_filter]\n\n # Go into specific data folder and url\n data_dir = os.path.join(data_dir, pipeline, strategy)\n url = '/'.join([url, 'Outputs', pipeline, strategy])\n\n # Get the files\n results = {}\n file_ids = [file_id.decode() for file_id in pheno['FILE_ID']]\n if n_subjects is not None:\n file_ids = file_ids[:n_subjects]\n pheno = pheno[:n_subjects]\n\n results['description'] = _get_dataset_descr(dataset_name)\n results['phenotypic'] = pheno\n for derivative in derivatives:\n ext = '.1D' if derivative.startswith('rois') else '.nii.gz'\n files = []\n for file_id in file_ids:\n file_ = [(\n file_id + '_' + derivative + ext,\n '/'.join([url, derivative, file_id + '_' + derivative + ext]),\n {}\n )]\n files.append(_fetch_files(data_dir, file_, verbose=verbose)[0])\n # Load derivatives if needed\n if ext == '.1D':\n files = [np.loadtxt(f) for f in files]\n results[derivative] = files\n return Bunch(**results)\n\n\ndef _load_mixed_gambles(zmap_imgs):\n \"\"\"Ravel zmaps (one per subject) along time axis, resulting,\n in a n_subjects * n_trials 3D niimgs and, and then make\n gain vector y of same length.\n \"\"\"\n X = []\n y = []\n mask = []\n for zmap_img in zmap_imgs:\n # load subject data\n this_X = get_data(zmap_img)\n affine = zmap_img.affine\n finite_mask = np.all(np.isfinite(this_X), axis=-1)\n this_mask = np.logical_and(np.all(this_X != 0, axis=-1),\n finite_mask)\n this_y = np.array([np.arange(1, 9)] * 6).ravel()\n\n # gain levels\n if len(this_y) != this_X.shape[-1]:\n raise RuntimeError(\"%s: Expecting %i volumes, got %i!\" % (\n zmap_img, len(this_y), this_X.shape[-1]))\n\n # standardize subject data\n this_X -= this_X.mean(axis=-1)[..., np.newaxis]\n std = this_X.std(axis=-1)\n std[std == 0] = 1\n this_X /= std[..., np.newaxis]\n\n # commit subject data\n X.append(this_X)\n y.extend(this_y)\n mask.append(this_mask)\n y = np.array(y)\n X = np.concatenate(X, axis=-1)\n mask = np.sum(mask, axis=0) > .5 * len(mask)\n mask = np.logical_and(mask, np.all(np.isfinite(X), axis=-1))\n X = X[mask, :].T\n tmp = np.zeros(list(mask.shape) + [len(X)])\n tmp[mask, :] = X.T\n mask_img = nibabel.Nifti1Image(mask.astype(int), affine)\n X = nibabel.four_to_three(nibabel.Nifti1Image(tmp, affine))\n return X, y, mask_img\n\n\ndef fetch_mixed_gambles(n_subjects=1, data_dir=None, url=None, resume=True,\n return_raw_data=False, verbose=1):\n \"\"\"Fetch Jimura \"mixed gambles\" dataset.\n\n See :footcite:`JIMURA2012544`.\n\n Parameters\n ----------\n n_subjects : int, optional\n The number of subjects to load. If None is given, all the\n subjects are used. Default=1.\n\n data_dir : string, optional\n Path of the data directory. Used to force data storage in a specified\n location. Default: None.\n\n url : string, optional\n Override download URL. Used for test only (or if you setup a mirror of\n the data).\n\n resume : bool, optional\n If true, try resuming download if possible. Default=True.\n\n verbose : int, optional\n Defines the level of verbosity of the output. Default=1.\n\n return_raw_data : bool, optional\n If false, then the data will transformed into and (X, y) pair, suitable\n for machine learning routines. X is a list of n_subjects * 48\n Nifti1Image objects (where 48 is the number of trials),\n and y is an array of shape (n_subjects * 48,).\n Default=False.\n\n Returns\n -------\n data : Bunch\n Dictionary-like object, the interest attributes are :\n 'zmaps': string list\n Paths to realigned gain betamaps (one nifti per subject).\n 'gain': ..\n If make_Xy is true, this is a list of n_subjects * 48\n Nifti1Image objects, else it is None.\n 'y': array of shape (n_subjects * 48,) or None\n If make_Xy is true, then this is an array of shape\n (n_subjects * 48,), else it is None.\n\n References\n ----------\n .. footbibliography::\n\n \"\"\"\n if n_subjects > 16:\n warnings.warn('Warning: there are only 16 subjects!')\n n_subjects = 16\n if url is None:\n url = (\"https://www.nitrc.org/frs/download.php/7229/\"\n \"jimura_poldrack_2012_zmaps.zip\")\n opts = dict(uncompress=True)\n files = [(\"zmaps%ssub%03i_zmaps.nii.gz\" % (os.sep, (j + 1)), url, opts)\n for j in range(n_subjects)]\n data_dir = _get_dataset_dir('jimura_poldrack_2012_zmaps',\n data_dir=data_dir)\n zmap_fnames = _fetch_files(data_dir, files, resume=resume, verbose=verbose)\n subject_id = np.repeat(np.arange(n_subjects), 6 * 8)\n data = Bunch(zmaps=zmap_fnames,\n subject_id=subject_id)\n if not return_raw_data:\n X, y, mask_img = _load_mixed_gambles(check_niimg(data.zmaps,\n return_iterator=True))\n data.zmaps, data.gain, data.mask_img = X, y, mask_img\n return data\n\n\ndef fetch_megatrawls_netmats(dimensionality=100, timeseries='eigen_regression',\n matrices='partial_correlation', data_dir=None,\n resume=True, verbose=1):\n \"\"\"Downloads and returns Network Matrices data from MegaTrawls release in HCP.\n\n This data can be used to predict relationships between imaging data and\n non-imaging behavioural measures such as age, sex, education, etc.\n The network matrices are estimated from functional connectivity\n datasets of 461 subjects. Full technical details in references.\n\n More information available in :footcite:`smithhcp2015`,\n :footcite:`smith2015positive`, :footcite:`Filippini7209`,\n :footcite:`smith2014methods`, and :footcite:`reilly2009cerebellum`.\n\n Parameters\n ----------\n dimensionality : int, optional\n Valid inputs are 25, 50, 100, 200, 300. By default, network matrices\n estimated using Group ICA brain parcellations of 100 components/dimensions\n will be returned. Default=100.\n\n timeseries : str, optional\n Valid inputs are 'multiple_spatial_regression' or 'eigen_regression'. By\n default 'eigen_regression', matrices estimated using first principal\n eigen component timeseries signals extracted from each subject data\n parcellations will be returned. Otherwise, 'multiple_spatial_regression'\n matrices estimated using spatial regressor based timeseries signals\n extracted from each subject data parcellations will be returned.\n Default='eigen_regression'.\n\n matrices : str, optional\n Valid inputs are 'full_correlation' or 'partial_correlation'. By default,\n partial correlation matrices will be returned otherwise if selected\n full correlation matrices will be returned.\n Default='partial_correlation'.\n\n data_dir : str, optional\n Path of the data directory. Used to force data storage in a specified\n location.\n\n resume : bool, optional\n This parameter is required if a partially downloaded file is needed\n to be resumed to download again. Default=True.\n\n verbose : int, optional\n This parameter is used to set the verbosity level to print the message\n to give information about the processing.\n 0 indicates no information will be given.\n Default=1.\n\n Returns\n -------\n data : Bunch\n Dictionary-like object, the attributes are :\n\n - 'dimensions': int, consists of given input in dimensions.\n\n - 'timeseries': str, consists of given input in timeseries method.\n\n - 'matrices': str, consists of given type of specific matrices.\n\n - 'correlation_matrices': ndarray, consists of correlation matrices\n based on given type of matrices. Array size will depend on given\n dimensions (n, n).\n\n - 'description': data description\n\n References\n ----------\n .. footbibliography::\n\n Notes\n -----\n See description for terms & conditions on data usage.\n\n \"\"\"\n url = \"http://www.nitrc.org/frs/download.php/8037/Megatrawls.tgz\"\n opts = {'uncompress': True}\n\n error_message = \"Invalid {0} input is provided: {1}, choose one of them {2}\"\n # standard dataset terms\n dimensionalities = [25, 50, 100, 200, 300]\n if dimensionality not in dimensionalities:\n raise ValueError(error_message.format('dimensionality', dimensionality,\n dimensionalities))\n timeseries_methods = ['multiple_spatial_regression', 'eigen_regression']\n if timeseries not in timeseries_methods:\n raise ValueError(error_message.format('timeseries', timeseries,\n timeseries_methods))\n output_matrices_names = ['full_correlation', 'partial_correlation']\n if matrices not in output_matrices_names:\n raise ValueError(error_message.format('matrices', matrices,\n output_matrices_names))\n\n dataset_name = 'Megatrawls'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)\n description = _get_dataset_descr(dataset_name)\n\n timeseries_map = dict(multiple_spatial_regression='ts2', eigen_regression='ts3')\n matrices_map = dict(full_correlation='Znet1.txt', partial_correlation='Znet2.txt')\n filepath = [(os.path.join(\n '3T_Q1-Q6related468_MSMsulc_d%d_%s' % (dimensionality, timeseries_map[timeseries]),\n matrices_map[matrices]), url, opts)]\n\n # Fetch all the files\n files = _fetch_files(data_dir, filepath, resume=resume, verbose=verbose)\n\n # Load the files into arrays\n correlation_matrices = csv_to_array(files[0])\n\n return Bunch(\n dimensions=dimensionality,\n timeseries=timeseries,\n matrices=matrices,\n correlation_matrices=correlation_matrices,\n description=description)\n\n\n@deprecated(\"'fetch_cobre' has been deprecated and will be removed \"\n \"in release 0.9 . \"\n \"Please consider using a different datasets or downloading it \"\n \"with a different tool than nilearn.\")\ndef fetch_cobre(n_subjects=10, data_dir=None, url=None, verbose=1):\n \"\"\"Fetch COBRE datasets preprocessed using NIAK 0.17 under CentOS\n version 6.3 with Octave version 4.0.2 and the Minc toolkit version 0.3.18.\n\n Downloads and returns COBRE preprocessed resting state fMRI datasets,\n covariates and phenotypic information such as demographic, clinical\n variables, measure of frame displacement FD (an average FD for all the time\n frames left after censoring).\n\n Each subject `fmri_XXXXXXX.nii.gz` is a 3D+t nifti volume (150 volumes).\n WARNING: no confounds were actually regressed from the data, so it can be\n done interactively by the user who will be able to explore different\n analytical paths easily.\n\n For each subject, there is `fmri_XXXXXXX.tsv` files which contains the\n covariates such as motion parameters, mean CSF signal that should to be\n regressed out of the functional data.\n\n `keys_confounds.json`: a json file, that describes each variable mentioned\n in the files `fmri_XXXXXXX.tsv.gz`. It also contains a list of time frames\n that have been removed from the time series by censoring for high motion.\n\n `phenotypic_data.tsv` contains the data of clinical variables that\n explained in `keys_phenotypic_data.json`\n\n .. versionadded:: 0.3\n\n Warnings\n --------\n 'fetch_cobre' has been deprecated and will be removed in release 0.9.\n\n Parameters\n ----------\n n_subjects : int, optional\n The number of subjects to load from maximum of 146 subjects.\n By default, 10 subjects will be loaded. If n_subjects=None,\n all subjects will be loaded. Default=10.\n\n data_dir : str, optional\n Path to the data directory. Used to force data storage in a\n specified location. Default: None\n\n url : str, optional\n Override download url. Used for test only (or if you setup a\n mirror of the data). Default: None\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n data : Bunch\n Dictionary-like object, the attributes are:\n\n - 'func': string list\n Paths to Nifti images.\n - 'confounds': string list\n Paths to .tsv files of each subject, confounds.\n - 'phenotypic': numpy.recarray\n Contains data of clinical variables, sex, age, FD.\n - 'description': data description of the release and references.\n - 'desc_con': str\n description of the confounds variables\n - 'desc_phenotypic': str\n description of the phenotypic variables.\n\n Notes\n -----\n See `more information about datasets structure\n <https://figshare.com/articles/COBRE_preprocessed_with_NIAK_0_17_-_lightweight_release/4197885>`_\n\n \"\"\"\n if url is None:\n # Here we use the file that provides URL for all others\n url = 'https://api.figshare.com/v2/articles/4197885'\n dataset_name = 'cobre'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n fdescr = _get_dataset_descr(dataset_name)\n\n # First, fetch the file that references all individual URLs\n files = _fetch_files(data_dir, [(\"4197885\", url, {})],\n verbose=verbose)[0]\n\n files = json.load(open(files, 'r'))\n files = files['files']\n # Index files by name\n files_ = {}\n for f in files:\n files_[f['name']] = f\n files = files_\n\n # Fetch the phenotypic file and load it\n csv_name_gz = 'phenotypic_data.tsv.gz'\n csv_name = os.path.splitext(csv_name_gz)[0]\n csv_file_phen = _fetch_files(\n data_dir, [(csv_name, files[csv_name_gz]['download_url'],\n {'md5': files[csv_name_gz].get('md5', None),\n 'move': csv_name_gz,\n 'uncompress': True})],\n verbose=verbose)[0]\n\n # Load file in filename to numpy arrays\n names = ['ID', 'Current Age', 'Gender', 'Handedness', 'Subject Type',\n 'Diagnosis', 'Frames OK', 'FD', 'FD Scrubbed']\n\n csv_array_phen = np.recfromcsv(csv_file_phen, names=names,\n skip_header=True, delimiter='\\t')\n\n # Check number of subjects\n max_subjects = len(csv_array_phen)\n if n_subjects is None:\n n_subjects = max_subjects\n\n if n_subjects > max_subjects:\n warnings.warn('Warning: there are only %d subjects' % max_subjects)\n n_subjects = max_subjects\n\n sz_count = list(csv_array_phen['subject_type']).count(b'Patient')\n ct_count = list(csv_array_phen['subject_type']).count(b'Control')\n\n n_sz = np.round(float(n_subjects) / max_subjects * sz_count).astype(int)\n n_ct = np.round(float(n_subjects) / max_subjects * ct_count).astype(int)\n\n # First, restrict the csv files to the adequate number of subjects\n sz_ids = csv_array_phen[csv_array_phen['subject_type'] ==\n b'Patient']['id'][:n_sz]\n ct_ids = csv_array_phen[csv_array_phen['subject_type'] ==\n b'Control']['id'][:n_ct]\n ids = np.hstack([sz_ids, ct_ids])\n csv_array_phen = csv_array_phen[np.in1d(csv_array_phen['id'], ids)]\n\n # Call fetch_files once per subject.\n\n func = []\n con = []\n for i in ids:\n f = 'fmri_00' + str(i) + '.nii.gz'\n c_gz = 'fmri_00' + str(i) + '.tsv.gz'\n c = os.path.splitext(c_gz)[0]\n\n f, c = _fetch_files(\n data_dir,\n [(f, files[f]['download_url'], {'md5': files[f].get('md5', None),\n 'move': f}),\n (c, files[c_gz]['download_url'],\n {'md5': files[c_gz].get('md5', None),\n 'move': c_gz, 'uncompress': True})\n ],\n verbose=verbose)\n func.append(f)\n con.append(c)\n\n # Fetch the the complementary files\n keys_con = \"keys_confounds.json\"\n keys_phen = \"keys_phenotypic_data.json\"\n\n csv_keys_con, csv_keys_phen = _fetch_files(\n data_dir,\n [(keys_con, files[keys_con]['download_url'],\n {'md5': files[keys_con].get('md5', None), 'move': keys_con}),\n (keys_phen, files[keys_phen]['download_url'],\n {'md5': files[keys_phen].get('md5', None), 'move': keys_phen})\n ],\n verbose=verbose)\n\n files_keys_con = open(csv_keys_con, 'r').read()\n files_keys_phen = open(csv_keys_phen, 'r').read()\n\n return Bunch(func=func, confounds=con, phenotypic=csv_array_phen,\n description=fdescr, desc_con=files_keys_con,\n desc_phenotypic=files_keys_phen)\n\n\ndef fetch_surf_nki_enhanced(n_subjects=10, data_dir=None,\n url=None, resume=True, verbose=1):\n \"\"\"Download and load the NKI enhanced resting-state dataset,\n preprocessed and projected to the fsaverage5 space surface.\n\n See :footcite:`Nooner2012NKI`.\n\n Direct download link :footcite:`NKIdataset`.\n\n .. versionadded:: 0.3\n\n Parameters\n ----------\n n_subjects : int, optional\n The number of subjects to load from maximum of 102 subjects.\n By default, 10 subjects will be loaded. If None is given,\n all 102 subjects will be loaded. Default=10.\n\n data_dir : str, optional\n Path of the data directory. Used to force data storage in a specified\n location. Default: None\n\n url : str, optional\n Override download URL. Used for test only (or if you setup a mirror of\n the data). Default: None\n\n resume : bool, optional\n If True, try resuming download if possible. Default=True.\n\n verbose : int, optional\n Defines the level of verbosity of the output. Default=1.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, the interest attributes are :\n\n - 'func_left': Paths to Gifti files containing resting state\n time series left hemisphere\n - 'func_right': Paths to Gifti files containing resting state\n time series right hemisphere\n - 'phenotypic': array containing tuple with subject ID, age,\n dominant hand and sex for each subject.\n - 'description': data description of the release and references.\n\n References\n ----------\n .. footbibliography::\n\n \"\"\"\n if url is None:\n url = 'https://www.nitrc.org/frs/download.php/'\n\n # Preliminary checks and declarations\n dataset_name = 'nki_enhanced_surface'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n ids = ['A00028185', 'A00033747', 'A00035072', 'A00035827', 'A00035840',\n 'A00037112', 'A00037511', 'A00038998', 'A00039391', 'A00039431',\n 'A00039488', 'A00040524', 'A00040623', 'A00040944', 'A00043299',\n 'A00043520', 'A00043677', 'A00043722', 'A00045589', 'A00050998',\n 'A00051063', 'A00051064', 'A00051456', 'A00051457', 'A00051477',\n 'A00051513', 'A00051514', 'A00051517', 'A00051528', 'A00051529',\n 'A00051539', 'A00051604', 'A00051638', 'A00051658', 'A00051676',\n 'A00051678', 'A00051679', 'A00051726', 'A00051774', 'A00051796',\n 'A00051835', 'A00051882', 'A00051925', 'A00051927', 'A00052070',\n 'A00052117', 'A00052118', 'A00052126', 'A00052180', 'A00052197',\n 'A00052214', 'A00052234', 'A00052307', 'A00052319', 'A00052499',\n 'A00052502', 'A00052577', 'A00052612', 'A00052639', 'A00053202',\n 'A00053369', 'A00053456', 'A00053474', 'A00053546', 'A00053576',\n 'A00053577', 'A00053578', 'A00053625', 'A00053626', 'A00053627',\n 'A00053874', 'A00053901', 'A00053927', 'A00053949', 'A00054038',\n 'A00054153', 'A00054173', 'A00054358', 'A00054482', 'A00054532',\n 'A00054533', 'A00054534', 'A00054621', 'A00054895', 'A00054897',\n 'A00054913', 'A00054929', 'A00055061', 'A00055215', 'A00055352',\n 'A00055353', 'A00055542', 'A00055738', 'A00055763', 'A00055806',\n 'A00056097', 'A00056098', 'A00056164', 'A00056372', 'A00056452',\n 'A00056489', 'A00056949']\n\n nitrc_ids = range(8260, 8464)\n max_subjects = len(ids)\n if n_subjects is None:\n n_subjects = max_subjects\n if n_subjects > max_subjects:\n warnings.warn('Warning: there are only %d subjects' % max_subjects)\n n_subjects = max_subjects\n ids = ids[:n_subjects]\n\n # Dataset description\n fdescr = _get_dataset_descr(dataset_name)\n\n # First, get the metadata\n phenotypic_file = 'NKI_enhanced_surface_phenotypics.csv'\n phenotypic = (phenotypic_file, url + '8470/pheno_nki_nilearn.csv',\n {'move': phenotypic_file})\n\n phenotypic = _fetch_files(data_dir, [phenotypic], resume=resume,\n verbose=verbose)[0]\n\n # Load the csv file\n phenotypic = np.genfromtxt(phenotypic, skip_header=True,\n names=['Subject', 'Age',\n 'Dominant Hand', 'Sex'],\n delimiter=',', dtype=['U9', '<f8',\n 'U1', 'U1'])\n\n # Keep phenotypic information for selected subjects\n int_ids = np.asarray(ids)\n phenotypic = phenotypic[[np.where(phenotypic['Subject'] == i)[0][0]\n for i in int_ids]]\n\n # Download subjects' datasets\n func_right = []\n func_left = []\n for i in range(len(ids)):\n\n archive = url + '%i/%s_%s_preprocessed_fsaverage5_fwhm6.gii'\n func = os.path.join('%s', '%s_%s_preprocessed_fwhm6.gii')\n rh = _fetch_files(data_dir,\n [(func % (ids[i], ids[i], 'right'),\n archive % (nitrc_ids[2*i+1], ids[i], 'rh'),\n {'move': func % (ids[i], ids[i], 'right')}\n )],\n resume=resume, verbose=verbose)\n lh = _fetch_files(data_dir,\n [(func % (ids[i], ids[i], 'left'),\n archive % (nitrc_ids[2*i], ids[i], 'lh'),\n {'move': func % (ids[i], ids[i], 'left')}\n )],\n resume=resume, verbose=verbose)\n\n func_right.append(rh[0])\n func_left.append(lh[0])\n\n return Bunch(func_left=func_left, func_right=func_right,\n phenotypic=phenotypic,\n description=fdescr)\n\n\ndef _fetch_development_fmri_participants(data_dir, url, verbose):\n \"\"\"Helper function to fetch_development_fmri.\n\n This function helps in downloading and loading participants data from .tsv\n uploaded on Open Science Framework (OSF).\n\n The original .tsv file contains many columns but this function picks only\n those columns that are relevant.\n\n Parameters\n ----------\n data_dir : str\n Path of the data directory. Used to force data storage in a specified\n location. If None is given, data are stored in home directory.\n\n url : str\n Override download URL. Used for test only (or if you setup a mirror of\n the data).\n\n verbose : int\n Defines the level of verbosity of the output.\n\n Returns\n -------\n participants : numpy.ndarray\n Contains data of each subject age, age group, child or adult,\n gender, handedness.\n\n \"\"\"\n dataset_name = 'development_fmri'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n\n if url is None:\n url = 'https://osf.io/yr3av/download'\n\n files = [('participants.tsv', url, {'move': 'participants.tsv'})]\n path_to_participants = _fetch_files(data_dir, files, verbose=verbose)[0]\n\n # Load path to participants\n dtype = [('participant_id', 'U12'), ('Age', '<f8'), ('AgeGroup', 'U6'),\n ('Child_Adult', 'U5'), ('Gender', 'U4'), ('Handedness', 'U4')]\n names = ['participant_id', 'Age', 'AgeGroup', 'Child_Adult', 'Gender',\n 'Handedness']\n participants = csv_to_array(path_to_participants, skip_header=True,\n dtype=dtype, names=names)\n return participants\n\n\ndef _fetch_development_fmri_functional(participants, data_dir, url, resume,\n verbose):\n \"\"\"Helper function to fetch_development_fmri.\n\n This function helps in downloading functional MRI data in Nifti\n and its confound corresponding to each subject.\n\n The files are downloaded from Open Science Framework (OSF).\n\n Parameters\n ----------\n participants : numpy.ndarray\n Should contain column participant_id which represents subjects id. The\n number of files are fetched based on ids in this column.\n\n data_dir : str\n Path of the data directory. Used to force data storage in a specified\n location. If None is given, data are stored in home directory.\n\n url : str\n Override download URL. Used for test only (or if you setup a mirror of\n the data).\n\n resume : bool\n Whether to resume download of a partly-downloaded file.\n\n verbose : int\n Defines the level of verbosity of the output.\n\n Returns\n -------\n func : list of str (Nifti files)\n Paths to functional MRI data (4D) for each subject.\n\n regressors : list of str (tsv files)\n Paths to regressors related to each subject.\n\n \"\"\"\n dataset_name = 'development_fmri'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n\n if url is None:\n # Download from the relevant OSF project, using hashes generated\n # from the OSF API. Note the trailing slash. For more info, see:\n # https://gist.github.com/emdupre/3cb4d564511d495ea6bf89c6a577da74\n url = 'https://osf.io/download/{}/'\n\n confounds = '{}_task-pixar_desc-confounds_regressors.tsv'\n func = '{0}_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz'\n\n # The gzip contains unique download keys per Nifti file and confound\n # pre-extracted from OSF. Required for downloading files.\n package_directory = os.path.dirname(os.path.abspath(__file__))\n dtype = [('participant_id', 'U12'), ('key_regressor', 'U24'),\n ('key_bold', 'U24')]\n names = ['participant_id', 'key_r', 'key_b']\n # csv file contains download information related to OpenScience(osf)\n osf_data = csv_to_array(os.path.join(package_directory, \"data\",\n \"development_fmri.csv\"),\n skip_header=True, dtype=dtype, names=names)\n\n funcs = []\n regressors = []\n\n for participant_id in participants['participant_id']:\n this_osf_id = osf_data[osf_data['participant_id'] == participant_id]\n # Download regressors\n confound_url = url.format(this_osf_id['key_r'][0])\n regressor_file = [(confounds.format(participant_id),\n confound_url,\n {'move': confounds.format(participant_id)})]\n path_to_regressor = _fetch_files(data_dir, regressor_file,\n verbose=verbose)[0]\n regressors.append(path_to_regressor)\n # Download bold images\n func_url = url.format(this_osf_id['key_b'][0])\n func_file = [(func.format(participant_id, participant_id), func_url,\n {'move': func.format(participant_id)})]\n path_to_func = _fetch_files(data_dir, func_file, resume=resume,\n verbose=verbose)[0]\n funcs.append(path_to_func)\n return funcs, regressors\n\n\ndef fetch_development_fmri(n_subjects=None, reduce_confounds=True,\n data_dir=None, resume=True, verbose=1,\n age_group='both'):\n \"\"\"Fetch movie watching based brain development dataset (fMRI)\n\n The data is downsampled to 4mm resolution for convenience with a repetition time (TR)\n of 2 secs. The origin of the data is coming from OpenNeuro. See Notes below.\n\n Please cite :footcite:`richardson2018development`\n if you are using this dataset.\n\n .. versionadded:: 0.5.2\n\n Parameters\n ----------\n n_subjects : int, optional\n The number of subjects to load. If None, all the subjects are\n loaded. Total 155 subjects.\n\n reduce_confounds : bool, optional\n If True, the returned confounds only include 6 motion parameters,\n mean framewise displacement, signal from white matter, csf, and\n 6 anatomical compcor parameters. This selection only serves the\n purpose of having realistic examples. Depending on your research\n question, other confounds might be more appropriate.\n If False, returns all fmriprep confounds.\n Default=True.\n\n data_dir : str, optional\n Path of the data directory. Used to force data storage in a specified\n location. If None, data are stored in home directory.\n\n resume : bool, optional\n Whether to resume download of a partly-downloaded file.\n Default=True.\n\n verbose : int, optional\n Defines the level of verbosity of the output. Default=1.\n\n age_group : str, optional\n Default='both'. Which age group to fetch\n\n - 'adults' = fetch adults only (n=33, ages 18-39)\n - 'child' = fetch children only (n=122, ages 3-12)\n - 'both' = fetch full sample (n=155)\n\n Returns\n -------\n data : Bunch\n Dictionary-like object, the interest attributes are :\n\n - 'func': list of str (Nifti files)\n Paths to downsampled functional MRI data (4D) for each subject.\n\n - 'confounds': list of str (tsv files)\n Paths to confounds related to each subject.\n\n - 'phenotypic': numpy.ndarray\n Contains each subject age, age group, child or adult, gender,\n handedness.\n\n Notes\n -----\n The original data is downloaded from OpenNeuro\n https://openneuro.org/datasets/ds000228/versions/1.0.0\n\n This fetcher downloads downsampled data that are available on Open\n Science Framework (OSF). Located here: https://osf.io/5hju4/files/\n\n Preprocessing details: https://osf.io/wjtyq/\n\n Note that if n_subjects > 2, and age_group is 'both',\n fetcher will return a ratio of children and adults representative\n of the total sample.\n\n References\n ----------\n .. footbibliography::\n\n \"\"\"\n dataset_name = 'development_fmri'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=1)\n keep_confounds = ['trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y',\n 'rot_z', 'framewise_displacement', 'a_comp_cor_00',\n 'a_comp_cor_01', 'a_comp_cor_02', 'a_comp_cor_03',\n 'a_comp_cor_04', 'a_comp_cor_05', 'csf',\n 'white_matter']\n\n # Dataset description\n fdescr = _get_dataset_descr(dataset_name)\n\n # Participants data: ids, demographics, etc\n participants = _fetch_development_fmri_participants(data_dir=data_dir,\n url=None,\n verbose=verbose)\n\n adult_count, child_count = _filter_func_regressors_by_participants(\n participants, age_group) # noqa: E126\n max_subjects = adult_count + child_count\n\n n_subjects = _set_invalid_n_subjects_to_max(n_subjects,\n max_subjects,\n age_group)\n\n # To keep the proportion of children versus adults\n percent_total = float(n_subjects) / max_subjects\n n_child = np.round(percent_total * child_count).astype(int)\n n_adult = np.round(percent_total * adult_count).astype(int)\n\n # We want to return adults by default (i.e., `age_group=both`) or\n # if explicitly requested.\n if (age_group != 'child') and (n_subjects == 1):\n n_adult, n_child = 1, 0\n\n if (age_group == 'both') and (n_subjects == 2):\n n_adult, n_child = 1, 1\n\n participants = _filter_csv_by_n_subjects(participants, n_adult, n_child)\n\n funcs, regressors = _fetch_development_fmri_functional(participants,\n data_dir=data_dir,\n url=None,\n resume=resume,\n verbose=verbose)\n\n if reduce_confounds:\n regressors = _reduce_confounds(regressors, keep_confounds)\n return Bunch(func=funcs, confounds=regressors, phenotypic=participants,\n description=fdescr)\n\n\ndef _filter_func_regressors_by_participants(participants, age_group):\n \"\"\" Filter functional and regressors based on participants\n \"\"\"\n valid_age_groups = ('both', 'child', 'adult')\n if age_group not in valid_age_groups:\n raise ValueError(\"Wrong value for age_group={0}. \"\n \"Valid arguments are: {1}\".format(age_group,\n valid_age_groups)\n )\n\n child_adult = participants['Child_Adult'].tolist()\n\n if age_group != 'adult':\n child_count = child_adult.count('child')\n else:\n child_count = 0\n\n if age_group != 'child':\n adult_count = child_adult.count('adult')\n else:\n adult_count = 0\n return adult_count, child_count\n\n\ndef _filter_csv_by_n_subjects(participants, n_adult, n_child):\n \"\"\"Restrict the csv files to the adequate number of subjects\n \"\"\"\n child_ids = participants[participants['Child_Adult'] ==\n 'child']['participant_id'][:n_child]\n adult_ids = participants[participants['Child_Adult'] ==\n 'adult']['participant_id'][:n_adult]\n ids = np.hstack([adult_ids, child_ids])\n participants = participants[np.in1d(participants['participant_id'], ids)]\n participants = participants[np.argsort(participants, order='Child_Adult')]\n return participants\n\n\ndef _set_invalid_n_subjects_to_max(n_subjects, max_subjects, age_group):\n \"\"\" If n_subjects is invalid, sets it to max.\n \"\"\"\n if n_subjects is None:\n n_subjects = max_subjects\n\n if (isinstance(n_subjects, numbers.Number) and\n ((n_subjects > max_subjects) or (n_subjects < 1))):\n warnings.warn(\"Wrong value for n_subjects={0}. The maximum \"\n \"value (for age_group={1}) will be used instead: \"\n \"n_subjects={2}\"\n .format(n_subjects, age_group, max_subjects))\n n_subjects = max_subjects\n return n_subjects\n\n\ndef _reduce_confounds(regressors, keep_confounds):\n reduced_regressors = []\n for in_file in regressors:\n out_file = in_file.replace('desc-confounds',\n 'desc-reducedConfounds')\n if not os.path.isfile(out_file):\n confounds = np.recfromcsv(in_file, delimiter='\\t')\n selected_confounds = confounds[keep_confounds]\n header = '\\t'.join(selected_confounds.dtype.names)\n np.savetxt(out_file, np.array(selected_confounds.tolist()),\n header=header, delimiter='\\t', comments='')\n reduced_regressors.append(out_file)\n return reduced_regressors\n\n\n# datasets originally belonging to nistats follow\n\n\ndef fetch_language_localizer_demo_dataset(data_dir=None, verbose=1):\n \"\"\"Download language localizer demo dataset.\n\n Parameters\n ----------\n data_dir : string, optional\n Path to store the downloaded dataset. if None employ nilearn\n datasets default download directory.\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n data_dir : string\n Path to downloaded dataset.\n\n downloaded_files : list of string\n Absolute paths of downloaded files on disk\n\n \"\"\"\n url = 'https://osf.io/3dj2a/download'\n # When it starts working again change back to:\n # url = 'https://osf.io/nh987/download'\n main_folder = 'fMRI-language-localizer-demo-dataset'\n\n data_dir = _get_dataset_dir(main_folder, data_dir=data_dir,\n verbose=verbose)\n # The files_spec needed for _fetch_files\n files_spec = [(main_folder + '.zip', url, {'move': main_folder + '.zip'})]\n # Only download if directory is empty\n # Directory will have been created by the call to _get_dataset_dir above\n if not os.listdir(data_dir):\n downloaded_files = _fetch_files(data_dir, files_spec, resume=True,\n verbose=verbose)\n _uncompress_file(downloaded_files[0])\n\n file_list = [os.path.join(path, f) for\n path, dirs, files in os.walk(data_dir) for f in files]\n return data_dir, sorted(file_list)\n\n\ndef fetch_bids_langloc_dataset(data_dir=None, verbose=1):\n \"\"\"Download language localizer example :term:`bids<BIDS>` dataset.\n\n Parameters\n ----------\n data_dir : string, optional\n Path to store the downloaded dataset. if None employ nilearn\n datasets default download directory.\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n data_dir : string\n Path to downloaded dataset.\n\n downloaded_files : list of string\n Absolute paths of downloaded files on disk.\n\n \"\"\"\n url = 'https://files.osf.io/v1/resources/9q7dv/providers/osfstorage/5888d9a76c613b01fc6acc4e' # noqa: E501\n dataset_name = 'bids_langloc_example'\n main_folder = 'bids_langloc_dataset'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n # The files_spec needed for _fetch_files\n files_spec = [(main_folder + '.zip', url, {'move': main_folder + '.zip'})]\n if not os.path.exists(os.path.join(data_dir, main_folder)):\n downloaded_files = _fetch_files(data_dir, files_spec, resume=True,\n verbose=verbose)\n _uncompress_file(downloaded_files[0])\n main_path = os.path.join(data_dir, main_folder)\n file_list = [os.path.join(path, f) for\n path, dirs, files in os.walk(main_path) for f in files]\n return os.path.join(data_dir, main_folder), sorted(file_list)\n\n\ndef fetch_openneuro_dataset_index(data_dir=None,\n dataset_version='ds000030_R1.0.4',\n verbose=1):\n \"\"\"Download a file with OpenNeuro :term:`BIDS` dataset index.\n\n Downloading the index allows to explore the dataset directories\n to select specific files to download. The index is a sorted list of urls.\n\n Parameters\n ----------\n data_dir : string, optional\n Path to store the downloaded dataset. if None employ nilearn\n datasets default download directory.\n\n dataset_version : string, optional\n Dataset version name. Assumes it is of the form [name]_[version].\n Default='ds000030_R1.0.4'.\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n urls_path : string\n Path to downloaded dataset index.\n\n urls : list of string\n Sorted list of dataset directories.\n\n \"\"\"\n data_prefix = '{}/{}/uncompressed'.format(dataset_version.split('_')[0],\n dataset_version,\n )\n data_dir = _get_dataset_dir(data_prefix, data_dir=data_dir,\n verbose=verbose)\n\n file_url = 'https://osf.io/86xj7/download'\n final_download_path = os.path.join(data_dir, 'urls.json')\n downloaded_file_path = _fetch_files(data_dir=data_dir,\n files=[(final_download_path,\n file_url,\n {'move': final_download_path}\n )],\n resume=True\n )\n urls_path = downloaded_file_path[0]\n with open(urls_path, 'r') as json_file:\n urls = json.load(json_file)\n return urls_path, urls\n\n\ndef select_from_index(urls, inclusion_filters=None, exclusion_filters=None,\n n_subjects=None):\n \"\"\"Select subset of urls with given filters.\n\n Parameters\n ----------\n urls : list of str\n List of dataset urls obtained from index download.\n\n inclusion_filters : list of str, optional\n List of unix shell-style wildcard strings\n that will be used to filter the url list.\n If a filter matches the url it is retained for download.\n Multiple filters work on top of each other.\n Like an \"and\" logical operator, creating a more restrictive query.\n Inclusion and exclusion filters apply together.\n For example the filter '*task-rest*'' would keep only urls\n that contain the 'task-rest' string.\n\n exclusion_filters : list of str, optional\n List of unix shell-style wildcard strings\n that will be used to filter the url list.\n If a filter matches the url it is discarded for download.\n Multiple filters work on top of each other.\n Like an \"and\" logical operator, creating a more restrictive query.\n Inclusion and exclusion filters apply together.\n For example the filter '*task-rest*' would discard all urls\n that contain the 'task-rest' string.\n\n n_subjects : int, optional\n Number of subjects to download from the dataset. All by default.\n\n Returns\n -------\n urls : list of string\n Sorted list of filtered dataset directories.\n\n \"\"\"\n inclusion_filters = inclusion_filters if inclusion_filters else []\n exclusion_filters = exclusion_filters if exclusion_filters else []\n # We apply filters to the urls\n for exclusion in exclusion_filters:\n urls = [url for url in urls if not fnmatch.fnmatch(url, exclusion)]\n for inclusion in inclusion_filters:\n urls = [url for url in urls if fnmatch.fnmatch(url, inclusion)]\n\n # subject selection filter\n # from the url list we infer all available subjects like 'sub-xxx/'\n subject_regex = 'sub-[a-z|A-Z|0-9]*[_./]'\n\n def infer_subjects(urls):\n subjects = set()\n for url in urls:\n if 'sub-' in url:\n subjects.add(re.search(subject_regex, url).group(0)[:-1])\n return sorted(subjects)\n\n # We get a list of subjects (for the moment the first n subjects)\n selected_subjects = set(infer_subjects(urls)[:n_subjects])\n # We exclude urls of subjects not selected\n urls = [\n url for url in urls\n if 'sub-' not in url or re.search(subject_regex, url).group(0)[:-1]\n in selected_subjects\n ]\n return urls\n\n\ndef patch_openneuro_dataset(file_list):\n \"\"\"Add symlinks for files not named according to latest :term:`BIDS` conventions.\n \"\"\"\n rep = {'_T1w_brainmask': '_desc-brain_mask',\n '_T1w_preproc': '_desc-preproc_T1w',\n '_T1w_space-MNI152NLin2009cAsym_brainmask':\n '_space-MNI152NLin2009cAsym_desc-brain_mask',\n '_T1w_space-MNI152NLin2009cAsym_class-':\n '_space-MNI152NLin2009cAsym_label-',\n '_T1w_space-MNI152NLin2009cAsym_preproc':\n '_space-MNI152NLin2009cAsym_desc-preproc_T1w',\n '_bold_confounds': '_desc-confounds_regressors',\n '_bold_space-MNI152NLin2009cAsym_brainmask':\n '_space-MNI152NLin2009cAsym_desc-brain_mask',\n '_bold_space-MNI152NLin2009cAsym_preproc':\n '_space-MNI152NLin2009cAsym_desc-preproc_bold'\n }\n # Create a symlink if a file with the modified filename does not exist\n for old in rep:\n for name in file_list:\n if old in name:\n if not os.path.exists(name.replace(old, rep[old])):\n os.symlink(name, name.replace(old, rep[old]))\n name = name.replace(old, rep[old])\n\n\ndef fetch_openneuro_dataset(\n urls=None, data_dir=None, dataset_version='ds000030_R1.0.4',\n verbose=1):\n \"\"\"Download OpenNeuro :term:`BIDS` dataset.\n\n Parameters\n ----------\n urls : list of string, optional\n Openneuro url list of dataset files to download. If not specified\n all files of the specified dataset will be downloaded.\n\n data_dir : string, optional\n Path to store the downloaded dataset. if None employ nilearn\n datasets default download directory.\n\n dataset_version : string, optional\n Dataset version name. Assumes it is of the form [name]_[version].\n Default is `ds000030_R1.0.4`.\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n data_dir : string\n Path to downloaded dataset.\n\n downloaded_files : list of string\n Absolute paths of downloaded files on disk.\n\n \"\"\"\n data_prefix = '{}/{}/uncompressed'.format(\n dataset_version.split('_')[0], dataset_version)\n data_dir = _get_dataset_dir(data_prefix, data_dir=data_dir,\n verbose=verbose)\n\n # if urls are not specified we download the complete dataset index\n if urls is None:\n _, urls = fetch_openneuro_dataset_index(\n data_dir=data_dir, dataset_version=dataset_version,\n verbose=verbose)\n\n # The files_spec needed for _fetch_files\n files_spec = []\n files_dir = []\n for url in urls:\n url_path = url.split(data_prefix + '/')[1]\n file_dir = os.path.join(data_dir, url_path)\n files_spec.append((os.path.basename(file_dir), url, {}))\n files_dir.append(os.path.dirname(file_dir))\n\n # download the files\n downloaded = []\n for file_spec, file_dir in zip(files_spec, files_dir):\n # Timeout errors are common in the s3 connection so we try to avoid\n # failure of the dataset download for a transient instability\n success = False\n download_attempts = 4\n while download_attempts > 0 and not success:\n try:\n downloaded_files = _fetch_files(\n file_dir, [file_spec], resume=True, verbose=verbose)\n downloaded += downloaded_files\n success = True\n except Exception:\n download_attempts -= 1\n if not success:\n raise Exception('multiple failures downloading %s' % file_spec[1])\n patch_openneuro_dataset(downloaded)\n\n return data_dir, sorted(downloaded)\n\n\ndef fetch_localizer_first_level(data_dir=None, verbose=1):\n \"\"\"Download a first-level localizer fMRI dataset\n\n Parameters\n ----------\n data_dir : string\n Directory where data should be downloaded and unpacked.\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, with the keys:\n epi_img: the input 4D image\n events: a csv file describing the paardigm\n\n \"\"\"\n url = 'https://osf.io/2bqxn/download'\n epi_img = 'sub-12069_task-localizer_space-MNI305.nii.gz'\n events = 'sub-12069_task-localizer_events.tsv'\n opts = {'uncompress': True}\n options = ('epi_img', 'events')\n dir_ = 'localizer_first_level'\n filenames = [(os.path.join(dir_, name), url, opts)\n for name in [epi_img, events]]\n\n dataset_name = 'localizer_first_level'\n data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,\n verbose=verbose)\n files = _fetch_files(data_dir, filenames, verbose=verbose)\n\n params = dict(list(zip(options, files)))\n return Bunch(**params)\n\n\ndef _download_spm_auditory_data(data_dir, subject_dir, subject_id):\n print('Data absent, downloading...')\n url = ('http://www.fil.ion.ucl.ac.uk/spm/download/data/MoAEpilot/'\n 'MoAEpilot.zip')\n archive_path = os.path.join(subject_dir, os.path.basename(url))\n _fetch_file(url, subject_dir)\n try:\n _uncompress_file(archive_path)\n except: # noqa:E722\n print('Archive corrupted, trying to download it again.')\n return fetch_spm_auditory(data_dir=data_dir, data_name='',\n subject_id=subject_id)\n\n\ndef _prepare_downloaded_spm_auditory_data(subject_dir):\n \"\"\" Uncompresses downloaded spm_auditory dataset and organizes\n the data into apprpriate directories.\n\n Parameters\n ----------\n subject_dir : string\n Path to subject's data directory.\n\n Returns\n -------\n _subject_data : skl.Bunch object\n Scikit-Learn Bunch object containing data of a single subject\n from the SPM Auditory dataset.\n\n \"\"\"\n subject_data = {}\n spm_auditory_data_files = [\"fM00223/fM00223_%03i.img\" % index\n for index in range(4, 100)]\n spm_auditory_data_files.append(\"sM00223/sM00223_002.img\")\n\n for file_name in spm_auditory_data_files:\n file_path = os.path.join(subject_dir, file_name)\n if os.path.exists(file_path):\n subject_data[file_name] = file_path\n else:\n print('%s missing from filelist!' % file_name)\n return None\n\n _subject_data = {}\n _subject_data['func'] = sorted(\n [subject_data[x] for x in subject_data.keys()\n if re.match(r'^fM00223_0\\d\\d\\.img$',\n os.path.basename(x))])\n\n # volumes for this dataset of shape (64, 64, 64, 1); let's fix this\n for x in _subject_data['func']:\n vol = nib.load(x)\n if len(vol.shape) == 4:\n vol = nib.Nifti1Image(get_data(vol)[:, :, :, 0],\n vol.affine)\n nib.save(vol, x)\n\n _subject_data['anat'] = [subject_data[x] for x in subject_data.keys()\n if re.match(r'^sM00223_002\\.img$',\n os.path.basename(x))][0]\n\n # ... same thing for anat\n vol = nib.load(_subject_data['anat'])\n if len(vol.shape) == 4:\n vol = nib.Nifti1Image(get_data(vol)[:, :, :, 0],\n vol.affine)\n nib.save(vol, _subject_data['anat'])\n\n return Bunch(**_subject_data)\n\n\ndef _make_path_events_file_spm_auditory_data(spm_auditory_data):\n \"\"\"Accepts data for spm_auditory dataset as Bunch\n and constructs the filepath for its events descriptor file.\n\n Parameters\n ----------\n spm_auditory_data : Bunch\n\n Returns\n -------\n events_filepath : string\n Full path to the events.tsv file for spm_auditory dataset.\n\n \"\"\"\n events_file_location = os.path.dirname(spm_auditory_data['func'][0])\n events_filename = os.path.basename(events_file_location) + '_events.tsv'\n events_filepath = os.path.join(events_file_location, events_filename)\n return events_filepath\n\n\ndef _make_events_file_spm_auditory_data(events_filepath):\n \"\"\"Accepts destination filepath including filename and\n creates the events.tsv file for the spm_auditory dataset.\n\n Parameters\n ----------\n events_filepath : string\n The path where the events file will be created.\n\n Returns\n -------\n None\n\n \"\"\"\n tr = 7.\n epoch_duration = 6 * tr # duration in seconds\n conditions = ['rest', 'active'] * 8\n n_blocks = len(conditions)\n duration = epoch_duration * np.ones(n_blocks)\n onset = np.linspace(0, (n_blocks - 1) * epoch_duration, n_blocks)\n events = pd.DataFrame(\n {'onset': onset, 'duration': duration, 'trial_type': conditions})\n events.to_csv(events_filepath, sep='\\t', index=False,\n columns=['onset', 'duration', 'trial_type'])\n\n\ndef fetch_spm_auditory(data_dir=None, data_name='spm_auditory',\n subject_id='sub001', verbose=1):\n \"\"\"Function to fetch SPM auditory single-subject data.\n\n See :footcite:`spm_auditory`.\n\n Parameters\n ----------\n data_dir : string, optional.\n Path of the data directory. Used to force data storage in a specified\n location. If the data is already present there, then will simply\n glob it.\n\n data_name : string, optional\n Name of the dataset. Default='spm_auditory'.\n\n subject_id : string, optional\n Indicates which subject to retrieve.\n Default='sub001'.\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, the interest attributes are:\n - 'func': string list. Paths to functional images\n - 'anat': string list. Path to anat image\n\n References\n ----------\n .. footbibliography::\n\n \"\"\"\n data_dir = _get_dataset_dir(data_name, data_dir=data_dir,\n verbose=verbose)\n subject_dir = os.path.join(data_dir, subject_id)\n if not os.path.exists(subject_dir):\n _download_spm_auditory_data(data_dir, subject_dir, subject_id)\n spm_auditory_data = _prepare_downloaded_spm_auditory_data(subject_dir)\n try:\n spm_auditory_data['events']\n except KeyError:\n events_filepath = _make_path_events_file_spm_auditory_data(\n spm_auditory_data)\n if not os.path.isfile(events_filepath):\n _make_events_file_spm_auditory_data(events_filepath)\n spm_auditory_data['events'] = events_filepath\n return spm_auditory_data\n\n\ndef _get_func_data_spm_multimodal(subject_dir, session, _subject_data):\n session_func = sorted(glob.glob(\n os.path.join(\n subject_dir,\n ('fMRI/Session%i/fMETHODS-000%i-*-01.img' % (\n session, session + 4)\n )\n )\n ))\n if len(session_func) < 390:\n print('Missing %i functional scans for session %i.' % (\n 390 - len(session_func), session))\n return None\n\n _subject_data['func%i' % (session)] = session_func\n return _subject_data\n\n\ndef _get_session_trials_spm_multimodal(subject_dir, session, _subject_data):\n sess_trials = os.path.join(\n subject_dir,\n 'fMRI/trials_ses%i.mat' % (session))\n if not os.path.isfile(sess_trials):\n print('Missing session file: %s' % sess_trials)\n return None\n\n _subject_data['trials_ses%i' % (session)] = sess_trials\n return _subject_data\n\n\ndef _get_anatomical_data_spm_multimodal(subject_dir, _subject_data):\n anat = os.path.join(subject_dir, 'sMRI/smri.img')\n if not os.path.isfile(anat):\n print('Missing structural image.')\n return None\n\n _subject_data['anat'] = anat\n return _subject_data\n\n\ndef _glob_spm_multimodal_fmri_data(subject_dir):\n \"\"\"glob data from subject_dir.\"\"\"\n _subject_data = {'slice_order': 'descending'}\n\n for session in range(1, 3):\n # glob func data for session\n _subject_data = _get_func_data_spm_multimodal(subject_dir,\n session,\n _subject_data)\n if not _subject_data:\n return None\n # glob trials .mat file\n _subject_data = _get_session_trials_spm_multimodal(subject_dir,\n session,\n _subject_data)\n if not _subject_data:\n return None\n try:\n events = _make_events_file_spm_multimodal_fmri(_subject_data,\n session)\n except MatReadError as mat_err:\n warnings.warn(\n '{}. An events.tsv file '\n 'cannot be generated'.format(str(mat_err)))\n else:\n events_filepath = _make_events_filepath_spm_multimodal_fmri(\n _subject_data, session)\n events.to_csv(events_filepath, sep='\\t', index=False)\n _subject_data['events{}'.format(session)] = events_filepath\n\n # glob for anat data\n _subject_data = _get_anatomical_data_spm_multimodal(subject_dir,\n _subject_data)\n if not _subject_data:\n return None\n\n return Bunch(**_subject_data)\n\n\ndef _download_data_spm_multimodal(data_dir, subject_dir, subject_id):\n print('Data absent, downloading...')\n urls = [\n # fmri\n ('http://www.fil.ion.ucl.ac.uk/spm/download/data/mmfaces/'\n 'multimodal_fmri.zip'),\n # structural\n ('http://www.fil.ion.ucl.ac.uk/spm/download/data/mmfaces/'\n 'multimodal_smri.zip')\n ]\n\n for url in urls:\n archive_path = os.path.join(subject_dir, os.path.basename(url))\n _fetch_file(url, subject_dir)\n try:\n _uncompress_file(archive_path)\n except: # noqa:E722\n print('Archive corrupted, trying to download it again.')\n return fetch_spm_multimodal_fmri(data_dir=data_dir,\n data_name='',\n subject_id=subject_id)\n\n return _glob_spm_multimodal_fmri_data(subject_dir)\n\n\ndef _make_events_filepath_spm_multimodal_fmri(_subject_data, session):\n key = 'trials_ses{}'.format(session)\n events_file_location = os.path.dirname(_subject_data[key])\n events_filename = 'session{}_events.tsv'.format(session)\n events_filepath = os.path.join(events_file_location, events_filename)\n return events_filepath\n\n\ndef _make_events_file_spm_multimodal_fmri(_subject_data, session):\n tr = 2.\n timing = loadmat(_subject_data['trials_ses%i' % (session)],\n squeeze_me=True, struct_as_record=False)\n faces_onsets = timing['onsets'][0].ravel()\n scrambled_onsets = timing['onsets'][1].ravel()\n onsets = np.hstack((faces_onsets, scrambled_onsets))\n onsets *= tr # because onsets were reporting in 'scans' units\n conditions = (\n ['faces'] * len(faces_onsets) + ['scrambled'] * len(scrambled_onsets)\n )\n duration = np.ones_like(onsets)\n events = pd.DataFrame({'trial_type': conditions, 'onset': onsets,\n 'duration': duration})\n return events\n\n\ndef fetch_spm_multimodal_fmri(data_dir=None, data_name='spm_multimodal_fmri',\n subject_id='sub001', verbose=1):\n \"\"\"Fetcher for Multi-modal Face Dataset.\n\n See :footcite:`spm_multiface`.\n\n Parameters\n ----------\n data_dir : string, optional.\n Path of the data directory. Used to force data storage in a specified\n location. If the data is already present there, then will simply glob it.\n\n data_name : string, optional\n Name of the dataset. Default='spm_multimodal_fmri'.\n\n subject_id : string, optional\n Indicates which subject to retrieve. Default='sub001'.\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n Returns\n -------\n data : sklearn.datasets.base.Bunch\n Dictionary-like object, the interest attributes are:\n - 'func1': string list. Paths to functional images for session 1\n - 'func2': string list. Paths to functional images for session 2\n - 'trials_ses1': string list. Path to onsets file for session 1\n - 'trials_ses2': string list. Path to onsets file for session 2\n - 'anat': string. Path to anat file\n\n References\n ----------\n .. footbibliography::\n\n \"\"\"\n data_dir = _get_dataset_dir(data_name, data_dir=data_dir, verbose=verbose)\n subject_dir = os.path.join(data_dir, subject_id)\n\n # maybe data_dir already contains the data ?\n data = _glob_spm_multimodal_fmri_data(subject_dir)\n if data is not None:\n return data\n\n # No. Download the data\n return _download_data_spm_multimodal(data_dir, subject_dir, subject_id)\n\n\ndef fetch_fiac_first_level(data_dir=None, verbose=1):\n \"\"\"Download a first-level fiac fMRI dataset (2 sessions)\n\n Parameters\n ----------\n data_dir : string, optional\n Directory where data should be downloaded and unpacked.\n\n verbose : int, optional\n Verbosity level (0 means no message). Default=1.\n\n \"\"\"\n data_dir = _get_dataset_dir('fiac_nilearn.glm', data_dir=data_dir,\n verbose=verbose)\n\n def _glob_fiac_data():\n \"\"\"glob data from subject_dir.\"\"\"\n _subject_data = {}\n subject_dir = os.path.join(data_dir, 'nipy-data-0.2/data/fiac/fiac0')\n for session in [1, 2]:\n # glob func data for session\n session_func = os.path.join(subject_dir, 'run%i.nii.gz' % session)\n if not os.path.isfile(session_func):\n print('Missing functional scan for session %i.' % session)\n return None\n\n _subject_data['func%i' % session] = session_func\n\n # glob design matrix .npz file\n sess_dmtx = os.path.join(subject_dir,\n 'run%i_design.npz' % session)\n if not os.path.isfile(sess_dmtx):\n print('Missing session file: %s' % sess_dmtx)\n return None\n\n _subject_data['design_matrix%i' % session] = sess_dmtx\n\n # glob for mask data\n mask = os.path.join(subject_dir, 'mask.nii.gz')\n if not os.path.isfile(mask):\n print('Missing mask image.')\n return None\n\n _subject_data['mask'] = mask\n return Bunch(**_subject_data)\n\n # maybe data_dir already contains the data ?\n data = _glob_fiac_data()\n if data is not None:\n return data\n\n # No. Download the data\n print('Data absent, downloading...')\n url = 'http://nipy.sourceforge.net/data-packages/nipy-data-0.2.tar.gz'\n\n archive_path = os.path.join(data_dir, os.path.basename(url))\n _fetch_file(url, data_dir)\n try:\n _uncompress_file(archive_path)\n except: # noqa:E722\n print('Archive corrupted, trying to download it again.')\n return fetch_fiac_first_level(data_dir=data_dir)\n\n return _glob_fiac_data()\n" ]
[ [ "numpy.ones_like", "numpy.genfromtxt", "sklearn.utils.deprecated", "numpy.where", "numpy.concatenate", "numpy.max", "sklearn.utils.Bunch", "pandas.DataFrame", "numpy.lib.recfunctions.join_by", "numpy.arange", "numpy.isfinite", "numpy.in1d", "numpy.recfromcsv", "numpy.array", "numpy.round", "numpy.loadtxt", "numpy.argsort", "numpy.hstack", "numpy.asarray", "numpy.sum", "scipy.io.loadmat", "numpy.ones", "numpy.any", "numpy.all", "numpy.linspace" ] ]
trimcao/lef-parser
[ "9872122203b064451fd30ad7eb39bead3415eb5e" ]
[ "plot_cell.py" ]
[ "\"\"\"\nProgram to plot cell using DEF and LEF data.\n\nAuthor: Tri Minh Cao\nEmail: [email protected]\nDate: September 2016\n\"\"\"\nfrom def_parser import *\nfrom lef_parser import *\nimport util\nimport matplotlib.pyplot as plt\nimport time\n\ndef inside_area(location, corners):\n \"\"\"\n Check if the location is inside an area.\n :param location: location\n :param corners: corner points of the rectangle area.\n :return:\n \"\"\"\n x1 = corners[0][0]\n x2 = corners[1][0]\n y1 = corners[0][1]\n y2 = corners[1][1]\n return (location[0] > x1 and location[0] < x2\n and location[1] > y1 and location[1] < y2)\n\n\ndef macro_and_via1(def_info, via_type):\n \"\"\"\n Method to get macros/cells info and via1 information.\n :param def_info: information from a DEF file\n :param via_type: the name of the via type, such as \"via1\" or \"M2_M1_via\"\n :return: a macro dictionary that contains via info\n \"\"\"\n result_dict = {}\n # add components to the dictionary\n for each_comp in def_info.components.comps:\n result_dict[each_comp.name] = {}\n result_dict[each_comp.name][\"MACRO\"] = each_comp.macro\n # process the nets\n for net in def_info.nets.nets:\n for route in net.routed:\n if route.end_via != None:\n # check for the via type of the end_via\n if route.end_via[:len(via_type)] == via_type:\n via_loc = route.end_via_loc\n via_name = route.end_via\n via_info = (via_loc, via_name)\n # add the via to the component dict\n for each_comp in net.comp_pin:\n comp_name = each_comp[0]\n pin_name = each_comp[1]\n if comp_name in result_dict:\n if pin_name in result_dict[comp_name]:\n result_dict[comp_name][pin_name].append(via_info)\n else:\n result_dict[comp_name][pin_name] = [via_info]\n #print (result_dict)\n return result_dict\n\ndef draw_via(location, via_info, color='blue'):\n \"\"\"\n Method to draw a via using the location and VIA info from the LEF file.\n :param location: via location\n :param via_info: VIA data from LEF file.\n :return: void\n \"\"\"\n for each_layer in via_info.layers:\n # print (each_layer.name)\n if each_layer.name == 'metal2':\n color = 'red'\n elif each_layer.name == 'metal1':\n color = 'blue'\n for shape in each_layer.shapes:\n scaled_pts = scalePts(shape.points, SCALE)\n for i in range(len(scaled_pts)):\n scaled_pts[i][0] += location[0]\n scaled_pts[i][1] += location[1]\n # print (scaled_pts)\n if shape.type == \"RECT\":\n scaled_pts = rect_to_polygon(scaled_pts)\n # print (scaled_pts)\n draw_shape = plt.Polygon(scaled_pts, closed=True, fill=True,\n color=color)\n plt.gca().add_patch(draw_shape)\n\ndef plot_component(comp_name, lef_data, def_data, macro_via1_dict):\n \"\"\"\n Use pyplot to plot a component from the DEF data\n :param comp_name: name of the component\n :param lef_data: data parsed from LEF file.\n :param def_data: data parsed from DEF file.\n :param macro_via_dict: dictionary contains macro and via1 data\n :return: void\n \"\"\"\n # get info of the component and macro from DEF and LEF\n comp_info = def_data.components.comp_dict[comp_name]\n macro_name = comp_info.macro\n macro_info = lef_data.macro_dict[macro_name]\n macro_size = macro_info.info[\"SIZE\"]\n scale = float(def_data.scale)\n # get the placement of the component\n bottom_left_pt = comp_info.placed\n top_right_pt = [int(macro_size[0] * scale),\n int(macro_size[1] * scale)]\n corners = [[0, 0], top_right_pt]\n # find the vias inside the component's area\n vias_in_comp = macro_via1_dict[comp_name]\n vias_draw = []\n for pin in vias_in_comp:\n if pin != \"MACRO\":\n for each_via in vias_in_comp[pin]:\n each_via_loc = each_via[0]\n via_type = each_via[1]\n new_via_loc = [0, 0]\n new_via_loc[0] = each_via_loc[0] - bottom_left_pt[0]\n new_via_loc[1] = each_via_loc[1] - bottom_left_pt[1]\n if inside_area(new_via_loc, corners):\n vias_draw.append((new_via_loc, via_type))\n\n # NOTE: figsize(6, 9) can be changed to adapt to other cell size\n plt.figure(figsize=(3, 5), dpi=80, frameon=False)\n # draw the cell boundary\n # scaled_pts = rect_to_polygon(corners)\n # draw_shape = plt.Polygon(scaled_pts, closed=True, fill=None,\n # color=\"blue\")\n # plt.gca().add_patch(draw_shape)\n # plot vias\n for via in vias_draw:\n via_name = via[1]\n via_info = lef_data.via_dict[via_name]\n via_loc = via[0]\n draw_via(via_loc, via_info)\n # scale the axis of the subplot\n test_axis = [corners[0][0], corners[1][0], corners[0][1], corners[1][1]]\n # print (test_axis)\n plt.axis(test_axis)\n plt.axis('off')\n plt.gca().set_aspect('equal', adjustable='box')\n # plt.savefig('foo.png', bbox_inches='tight')\n # compose the output file name\n out_folder = './images/'\n current_time = time.strftime('%H%M%d%m%Y')\n out_file = comp_name + '_' + macro_name + '_' + current_time\n # plt.savefig(out_folder + out_file, transparent=True)\n plt.savefig(out_folder + out_file, transparent=False)\n # plt.show()\n plt.close('all')\n\ndef plot_component2(comp_name, lef_data, def_data, macro_via1_dict):\n \"\"\"\n Use pyplot to plot a component from the DEF data\n :param comp_name: name of the component\n :param lef_data: data parsed from LEF file.\n :param def_data: data parsed from DEF file.\n :param macro_via_dict: dictionary contains macro and via1 data\n :return: void\n \"\"\"\n # get info of the component and macro from DEF and LEF\n comp_info = def_data.components.comp_dict[comp_name]\n macro_name = comp_info.macro\n macro_info = lef_data.macro_dict[macro_name]\n macro_size = macro_info.info[\"SIZE\"]\n scale = float(def_data.scale)\n # get the placement of the component\n bottom_left_pt = comp_info.placed\n top_right_pt = [bottom_left_pt[0] + int(macro_size[0] * scale),\n bottom_left_pt[1] + int(macro_size[1] * scale)]\n corners = [bottom_left_pt, top_right_pt]\n # find the vias inside the component's area\n vias_in_comp = macro_via1_dict[comp_name]\n vias_draw = []\n for pin in vias_in_comp:\n if pin != \"MACRO\":\n for each_via in vias_in_comp[pin]:\n each_via_loc = each_via[0]\n via_type = each_via[1]\n # new_via_loc = [0, 0]\n # new_via_loc[0] = each_via_loc[0]\n # new_via_loc[1] = each_via_loc[1]\n if inside_area(each_via_loc, corners):\n vias_draw.append((each_via_loc, via_type))\n\n # sort the vias by x-coordinate\n vias_draw.sort(key=lambda x: x[0][0])\n # print (vias_draw)\n # NOTE: figsize(6, 9) can be changed to adapt to other cell size\n plt.figure(figsize=(1, 1.6), dpi=80, frameon=False)\n margin = 350\n left_pt = [vias_draw[0][0][0] - margin, bottom_left_pt[1]]\n width = vias_draw[-1][0][0] - left_pt[0] + margin\n height = macro_size[1] * scale\n # print (height)\n corners = [left_pt]\n corners.append((left_pt[0] + width, left_pt[1] + height))\n # draw the cell boundary\n # scaled_pts = rect_to_polygon(corners)\n # draw_shape = plt.Polygon(scaled_pts, closed=True, fill=None,\n # color=\"blue\")\n # plt.gca().add_patch(draw_shape)\n # plot vias\n for via in vias_draw:\n via_name = via[1]\n via_info = lef_data.via_dict[via_name]\n via_loc = via[0]\n draw_via(via_loc, via_info)\n\n # scale the axis of the subplot\n axis = [corners[0][0], corners[1][0], corners[0][1], corners[1][1]]\n # print (test_axis)\n plt.axis(axis)\n plt.axis('off')\n plt.gca().set_aspect('equal', adjustable='box')\n # plt.savefig('foo.png', bbox_inches='tight')\n # compose the output file name\n out_folder = './images/'\n current_time = time.strftime('%H%M%S%d%m%Y')\n out_file = comp_name + '_' + macro_name + '_' + current_time\n # plt.savefig(out_folder + out_file, transparent=True)\n plt.savefig(out_folder + out_file, transparent=False)\n # plt.show()\n plt.close('all')\n\n# Main Class\nif __name__ == '__main__':\n # read_path = './libraries/DEF/c1908_tri_no_metal1.def'\n read_path = './libraries/layout_freepdk45/c3540.def'\n def_parser = DefParser(read_path)\n def_parser.parse()\n\n lef_file = \"./libraries/FreePDK45/gscl45nm.lef\"\n lef_parser = LefParser(lef_file)\n lef_parser.parse()\n\n print (\"Process file:\", read_path)\n # test macro and via (note: only via1)\n macro_via1_dict = macro_and_via1(def_parser, via_type=\"M2_M1_via\")\n # for comp in macro_via1_dict:\n # print (comp)\n # for pin in macro_via1_dict[comp]:\n # print (\" \" + pin + \": \" + str(macro_via1_dict[comp][pin]))\n # print ()\n # plot_component(\"U521\", lef_parser, def_parser, macro_via1_dict)\n num_comps = 0\n for each_comp in macro_via1_dict:\n comp_info = def_parser.components.comp_dict[each_comp]\n # if (comp_info.macro == \"INVX8\"):\n print (each_comp)\n plot_component2(each_comp, lef_parser, def_parser, macro_via1_dict)\n num_comps += 1\n # if num_comps > 20:\n # break\n print (\"Finished!\")\n # plot_component(\"U4068\", lef_parser, def_parser, macro_via1_dict)\n\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.Polygon", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "matplotlib.pyplot.gca", "matplotlib.pyplot.axis" ] ]
raphaelmerx/fairseq_extension
[ "89e29008d0c6a56fe4a5daad727e3c663e6b3962" ]
[ "fairseq_cli/generate.py" ]
[ "#!/usr/bin/env python3 -u\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nTranslate pre-processed data with a trained model.\n\"\"\"\n\nimport logging\nimport math\nimport os\nimport sys\n\nimport numpy as np\n\nimport torch\n\nfrom fairseq import checkpoint_utils, options, scoring, tasks, utils\nfrom fairseq.logging import progress_bar\nfrom fairseq.logging.meters import StopwatchMeter, TimeMeter\nfrom fairseq.data import encoders\n\n\ndef main(args):\n assert args.path is not None, '--path required for generation!'\n assert not args.sampling or args.nbest == args.beam, \\\n '--sampling requires --nbest to be equal to --beam'\n assert args.replace_unk is None or args.dataset_impl == 'raw', \\\n '--replace-unk requires a raw text dataset (--dataset-impl=raw)'\n\n if args.results_path is not None:\n os.makedirs(args.results_path, exist_ok=True)\n output_path = os.path.join(args.results_path, 'generate-{}.txt'.format(args.gen_subset))\n with open(output_path, 'w', buffering=1, encoding='utf-8') as h:\n return _main(args, h)\n else:\n return _main(args, sys.stdout)\n\n\ndef get_symbols_to_strip_from_output(generator):\n if hasattr(generator, 'symbols_to_strip_from_output'):\n return generator.symbols_to_strip_from_output\n else:\n return {generator.eos}\n\n\ndef _main(args, output_file):\n logging.basicConfig(\n format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO,\n stream=output_file,\n )\n logger = logging.getLogger('fairseq_cli.generate')\n\n utils.import_user_module(args)\n\n if args.max_tokens is None and args.max_sentences is None:\n args.max_tokens = 12000\n logger.info(args)\n\n # Fix seed for stochastic decoding\n if args.seed is not None and not args.no_seed_provided:\n np.random.seed(args.seed)\n utils.set_torch_seed(args.seed)\n\n use_cuda = torch.cuda.is_available() and not args.cpu\n\n # Load dataset splits\n task = tasks.setup_task(args)\n task.load_dataset(args.gen_subset)\n\n # Set dictionaries\n try:\n src_dict = getattr(task, 'source_dictionary', None)\n except NotImplementedError:\n src_dict = None\n tgt_dict = task.target_dictionary\n\n # Load ensemble\n logger.info('loading model(s) from {}'.format(args.path))\n models, _model_args = checkpoint_utils.load_model_ensemble(\n utils.split_paths(args.path),\n arg_overrides=eval(args.model_overrides),\n task=task,\n suffix=getattr(args, \"checkpoint_suffix\", \"\"),\n )\n\n # Optimize ensemble for generation\n for model in models:\n model.prepare_for_inference_(args)\n if args.fp16:\n model.half()\n if use_cuda:\n model.cuda()\n\n # Load alignment dictionary for unknown word replacement\n # (None if no unknown word replacement, empty if no path to align dictionary)\n align_dict = utils.load_align_dict(args.replace_unk)\n\n # Load dataset (possibly sharded)\n itr = task.get_batch_iterator(\n dataset=task.dataset(args.gen_subset),\n max_tokens=args.max_tokens,\n max_sentences=args.max_sentences,\n max_positions=utils.resolve_max_positions(\n task.max_positions(),\n *[model.max_positions() for model in models]\n ),\n ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,\n required_batch_size_multiple=args.required_batch_size_multiple,\n num_shards=args.num_shards,\n shard_id=args.shard_id,\n num_workers=args.num_workers,\n ).next_epoch_itr(shuffle=False)\n progress = progress_bar.progress_bar(\n itr,\n log_format=args.log_format,\n log_interval=args.log_interval,\n default_log_format=('tqdm' if not args.no_progress_bar else 'none'),\n )\n\n # Initialize generator\n gen_timer = StopwatchMeter()\n generator = task.build_generator(models, args)\n\n # Handle tokenization and BPE\n tokenizer = encoders.build_tokenizer(args)\n bpe = encoders.build_bpe(args)\n\n def decode_fn(x):\n if bpe is not None:\n x = bpe.decode(x)\n if tokenizer is not None:\n x = tokenizer.decode(x)\n return x\n\n scorer = scoring.build_scorer(args, tgt_dict)\n\n num_sentences = 0\n has_target = True\n wps_meter = TimeMeter()\n for sample in progress:\n sample = utils.move_to_cuda(sample) if use_cuda else sample\n if 'net_input' not in sample:\n continue\n\n prefix_tokens = None\n if args.prefix_size > 0:\n prefix_tokens = sample['target'][:, :args.prefix_size]\n\n gen_timer.start()\n hypos = task.inference_step(generator, models, sample, prefix_tokens)\n num_generated_tokens = sum(len(h[0]['tokens']) for h in hypos)\n gen_timer.stop(num_generated_tokens)\n\n for i, sample_id in enumerate(sample['id'].tolist()):\n has_target = sample['target'] is not None\n\n # Remove padding\n if 'src_tokens' in sample['net_input']:\n src_tokens = utils.strip_pad(sample['net_input']['src_tokens'][i, :], tgt_dict.pad())\n else:\n src_tokens = None\n\n target_tokens = None\n if has_target:\n target_tokens = utils.strip_pad(sample['target'][i, :], tgt_dict.pad()).int().cpu()\n\n # Either retrieve the original sentences or regenerate them from tokens.\n if align_dict is not None:\n src_str = task.dataset(args.gen_subset).src.get_original_text(sample_id)\n target_str = task.dataset(args.gen_subset).tgt.get_original_text(sample_id)\n else:\n if src_dict is not None:\n src_str = src_dict.string(src_tokens, args.remove_bpe)\n else:\n src_str = \"\"\n if has_target:\n target_str = tgt_dict.string(\n target_tokens,\n args.remove_bpe,\n escape_unk=True,\n extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator),\n )\n\n src_str = decode_fn(src_str)\n if has_target:\n target_str = decode_fn(target_str)\n\n if not args.quiet:\n if src_dict is not None:\n print('S-{}\\t{}'.format(sample_id, src_str), file=output_file)\n if has_target:\n print('T-{}\\t{}'.format(sample_id, target_str), file=output_file)\n\n # Process top predictions\n for j, hypo in enumerate(hypos[i][:args.nbest]):\n hypo_tokens, hypo_str, alignment = utils.post_process_prediction(\n hypo_tokens=hypo['tokens'].int().cpu(),\n src_str=src_str,\n alignment=hypo['alignment'],\n align_dict=align_dict,\n tgt_dict=tgt_dict,\n remove_bpe=args.remove_bpe,\n extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator),\n )\n detok_hypo_str = decode_fn(hypo_str)\n if not args.quiet:\n score = hypo['score'] / math.log(2) # convert to base 2\n # original hypothesis (after tokenization and BPE)\n print('H-{}\\t{}\\t{}'.format(sample_id, score, hypo_str), file=output_file)\n # detokenized hypothesis\n print('D-{}\\t{}\\t{}'.format(sample_id, score, detok_hypo_str), file=output_file)\n print('P-{}\\t{}'.format(\n sample_id,\n ' '.join(map(\n lambda x: '{:.4f}'.format(x),\n # convert from base e to base 2\n hypo['positional_scores'].div_(math.log(2)).tolist(),\n ))\n ), file=output_file)\n\n if args.print_alignment:\n print('A-{}\\t{}'.format(\n sample_id,\n ' '.join(['{}-{}'.format(src_idx, tgt_idx) for src_idx, tgt_idx in alignment])\n ), file=output_file)\n\n if args.print_step:\n print('I-{}\\t{}'.format(sample_id, hypo['steps']), file=output_file)\n\n if getattr(args, 'retain_iter_history', False):\n for step, h in enumerate(hypo['history']):\n _, h_str, _ = utils.post_process_prediction(\n hypo_tokens=h['tokens'].int().cpu(),\n src_str=src_str,\n alignment=None,\n align_dict=None,\n tgt_dict=tgt_dict,\n remove_bpe=None,\n )\n print('E-{}_{}\\t{}'.format(sample_id, step, h_str), file=output_file)\n\n # Score only the top hypothesis\n if has_target and j == 0:\n if align_dict is not None or args.remove_bpe is not None:\n # Convert back to tokens for evaluation with unk replacement and/or without BPE\n target_tokens = tgt_dict.encode_line(target_str, add_if_not_exist=True)\n hypo_tokens = tgt_dict.encode_line(detok_hypo_str, add_if_not_exist=True)\n if hasattr(scorer, 'add_string'):\n scorer.add_string(target_str, detok_hypo_str)\n else:\n scorer.add(target_tokens, hypo_tokens)\n\n wps_meter.update(num_generated_tokens)\n progress.log({'wps': round(wps_meter.avg)})\n num_sentences += sample[\"nsentences\"] if \"nsentences\" in sample else sample['id'].numel()\n\n logger.info('NOTE: hypothesis and token scores are output in base 2')\n logger.info('Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)'.format(\n num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1. / gen_timer.avg))\n if has_target:\n if args.bpe and not args.sacrebleu:\n if args.remove_bpe:\n logger.warning(\"BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization\")\n else:\n logger.warning(\"If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization\")\n # use print to be consistent with other main outputs: S-, H-, T-, D- and so on\n print(\n 'Generate {} with beam={}: {}'.format(args.gen_subset, args.beam, scorer.result_string()),\n file=output_file)\n\n return scorer\n\n\ndef cli_main():\n parser = options.get_generation_parser()\n args = options.parse_args_and_arch(parser)\n main(args)\n\n\nif __name__ == '__main__':\n cli_main()\n" ]
[ [ "numpy.random.seed", "torch.cuda.is_available" ] ]
kliiu/Texygen
[ "b8896f5477e7a02d99ba23c29871731bc31aca19" ]
[ "models/mle/MleGenerator.py" ]
[ "import numpy as np\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nfrom tensorflow.python.ops import tensor_array_ops, control_flow_ops\n\n\nclass Generator(object):\n def __init__(self, num_vocabulary, batch_size, emb_dim, hidden_dim,\n sequence_length, start_token,\n learning_rate=0.01, reward_gamma=0.95):\n self.num_vocabulary = num_vocabulary\n self.batch_size = batch_size\n self.emb_dim = emb_dim\n self.hidden_dim = hidden_dim\n self.sequence_length = sequence_length\n self.start_token = tf.constant([start_token] * self.batch_size, dtype=tf.int32)\n self.learning_rate = tf.Variable(float(learning_rate), trainable=False)\n self.reward_gamma = reward_gamma\n self.g_params = []\n self.d_params = []\n self.temperature = 1.0\n self.grad_clip = 5.0\n\n self.expected_reward = tf.Variable(tf.zeros([self.sequence_length]))\n\n with tf.variable_scope('generator'):\n self.g_embeddings = tf.Variable(self.init_matrix([self.num_vocabulary, self.emb_dim]))\n self.g_params.append(self.g_embeddings)\n self.g_recurrent_unit = self.create_recurrent_unit(self.g_params) # maps h_tm1 to h_t for generator\n self.g_output_unit = self.create_output_unit(self.g_params) # maps h_t to o_t (output token logits)\n\n # placeholder definition\n self.x = tf.placeholder(tf.int32, shape=[self.batch_size,\n self.sequence_length]) # sequence of tokens generated by generator\n self.rewards = tf.placeholder(tf.float32, shape=[self.batch_size,\n self.sequence_length]) # get from rollout policy and discriminator\n\n # processed for batch\n with tf.device(\"/cpu:0\"):\n self.processed_x = tf.transpose(tf.nn.embedding_lookup(self.g_embeddings, self.x),\n perm=[1, 0, 2]) # seq_length x batch_size x emb_dim\n\n # Initial states\n self.h0 = tf.zeros([self.batch_size, self.hidden_dim])\n self.h0 = tf.stack([self.h0, self.h0])\n\n gen_o = tensor_array_ops.TensorArray(dtype=tf.float32, size=self.sequence_length,\n dynamic_size=False, infer_shape=True)\n gen_x = tensor_array_ops.TensorArray(dtype=tf.int32, size=self.sequence_length,\n dynamic_size=False, infer_shape=True)\n\n def _g_recurrence(i, x_t, h_tm1, gen_o, gen_x):\n h_t = self.g_recurrent_unit(x_t, h_tm1) # hidden_memory_tuple\n o_t = self.g_output_unit(h_t) # batch x vocab , logits not prob\n log_prob = tf.log(tf.nn.softmax(o_t))\n next_token = tf.cast(tf.reshape(tf.multinomial(log_prob, 1), [self.batch_size]), tf.int32)\n x_tp1 = tf.nn.embedding_lookup(self.g_embeddings, next_token) # batch x emb_dim\n gen_o = gen_o.write(i, tf.reduce_sum(tf.multiply(tf.one_hot(next_token, self.num_vocabulary, 1.0, 0.0),\n tf.nn.softmax(o_t)), 1)) # [batch_size] , prob\n gen_x = gen_x.write(i, next_token) # indices, batch_size\n return i + 1, x_tp1, h_t, gen_o, gen_x\n\n _, _, _, self.gen_o, self.gen_x = control_flow_ops.while_loop(\n cond=lambda i, _1, _2, _3, _4: i < self.sequence_length,\n body=_g_recurrence,\n loop_vars=(tf.constant(0, dtype=tf.int32),\n tf.nn.embedding_lookup(self.g_embeddings, self.start_token), self.h0, gen_o, gen_x))\n\n self.gen_x = self.gen_x.stack() # seq_length x batch_size\n self.gen_x = tf.transpose(self.gen_x, perm=[1, 0]) # batch_size x seq_length\n\n # supervised pretraining for generator\n g_predictions = tensor_array_ops.TensorArray(\n dtype=tf.float32, size=self.sequence_length,\n dynamic_size=False, infer_shape=True)\n\n ta_emb_x = tensor_array_ops.TensorArray(\n dtype=tf.float32, size=self.sequence_length)\n ta_emb_x = ta_emb_x.unstack(self.processed_x)\n\n def _pretrain_recurrence(i, x_t, h_tm1, g_predictions):\n h_t = self.g_recurrent_unit(x_t, h_tm1)\n o_t = self.g_output_unit(h_t)\n g_predictions = g_predictions.write(i, tf.nn.softmax(o_t)) # batch x vocab_size\n x_tp1 = ta_emb_x.read(i)\n return i + 1, x_tp1, h_t, g_predictions\n\n _, _, _, self.g_predictions = control_flow_ops.while_loop(\n cond=lambda i, _1, _2, _3: i < self.sequence_length,\n body=_pretrain_recurrence,\n loop_vars=(tf.constant(0, dtype=tf.int32),\n tf.nn.embedding_lookup(self.g_embeddings, self.start_token),\n self.h0, g_predictions))\n\n self.g_predictions = tf.transpose(self.g_predictions.stack(),\n perm=[1, 0, 2]) # batch_size x seq_length x vocab_size\n\n # pretraining loss\n self.pretrain_loss = -tf.reduce_sum(\n tf.one_hot(tf.to_int32(tf.reshape(self.x, [-1])), self.num_vocabulary, 1.0, 0.0) * tf.log(\n tf.clip_by_value(tf.reshape(self.g_predictions, [-1, self.num_vocabulary]), 1e-20, 1.0)\n )\n ) / (self.sequence_length * self.batch_size)\n\n # training updates\n pretrain_opt = self.g_optimizer(self.learning_rate)\n\n self.pretrain_grad, _ = tf.clip_by_global_norm(tf.gradients(self.pretrain_loss, self.g_params), self.grad_clip)\n self.pretrain_updates = pretrain_opt.apply_gradients(zip(self.pretrain_grad, self.g_params))\n\n #######################################################################################################\n # Unsupervised Training\n #######################################################################################################\n self.g_loss = -tf.reduce_sum(\n tf.reduce_sum(\n tf.one_hot(tf.to_int32(tf.reshape(self.x, [-1])), self.num_vocabulary, 1.0, 0.0) * tf.log(\n tf.clip_by_value(tf.reshape(self.g_predictions, [-1, self.num_vocabulary]), 1e-20, 1.0)\n ), 1) * tf.reshape(self.rewards, [-1])\n )\n\n g_opt = self.g_optimizer(self.learning_rate)\n\n self.g_grad, _ = tf.clip_by_global_norm(tf.gradients(self.g_loss, self.g_params), self.grad_clip)\n self.g_updates = g_opt.apply_gradients(zip(self.g_grad, self.g_params))\n\n def generate(self, sess):\n outputs = sess.run(self.gen_x)\n return outputs\n\n def pretrain_step(self, sess, x):\n outputs = sess.run([self.pretrain_updates, self.pretrain_loss], feed_dict={self.x: x})\n return outputs\n\n def init_matrix(self, shape):\n return tf.random_normal(shape, stddev=0.1)\n\n def init_vector(self, shape):\n return tf.zeros(shape)\n\n def create_recurrent_unit(self, params):\n # Weights and Bias for input and hidden tensor\n self.Wi = tf.Variable(self.init_matrix([self.emb_dim, self.hidden_dim]))\n self.Ui = tf.Variable(self.init_matrix([self.hidden_dim, self.hidden_dim]))\n self.bi = tf.Variable(self.init_matrix([self.hidden_dim]))\n\n self.Wf = tf.Variable(self.init_matrix([self.emb_dim, self.hidden_dim]))\n self.Uf = tf.Variable(self.init_matrix([self.hidden_dim, self.hidden_dim]))\n self.bf = tf.Variable(self.init_matrix([self.hidden_dim]))\n\n self.Wog = tf.Variable(self.init_matrix([self.emb_dim, self.hidden_dim]))\n self.Uog = tf.Variable(self.init_matrix([self.hidden_dim, self.hidden_dim]))\n self.bog = tf.Variable(self.init_matrix([self.hidden_dim]))\n\n self.Wc = tf.Variable(self.init_matrix([self.emb_dim, self.hidden_dim]))\n self.Uc = tf.Variable(self.init_matrix([self.hidden_dim, self.hidden_dim]))\n self.bc = tf.Variable(self.init_matrix([self.hidden_dim]))\n params.extend([\n self.Wi, self.Ui, self.bi,\n self.Wf, self.Uf, self.bf,\n self.Wog, self.Uog, self.bog,\n self.Wc, self.Uc, self.bc])\n\n def unit(x, hidden_memory_tm1):\n previous_hidden_state, c_prev = tf.unstack(hidden_memory_tm1)\n\n # Input Gate\n i = tf.sigmoid(\n tf.matmul(x, self.Wi) +\n tf.matmul(previous_hidden_state, self.Ui) + self.bi\n )\n\n # Forget Gate\n f = tf.sigmoid(\n tf.matmul(x, self.Wf) +\n tf.matmul(previous_hidden_state, self.Uf) + self.bf\n )\n\n # Output Gate\n o = tf.sigmoid(\n tf.matmul(x, self.Wog) +\n tf.matmul(previous_hidden_state, self.Uog) + self.bog\n )\n\n # New Memory Cell\n c_ = tf.nn.tanh(\n tf.matmul(x, self.Wc) +\n tf.matmul(previous_hidden_state, self.Uc) + self.bc\n )\n\n # Final Memory cell\n c = f * c_prev + i * c_\n\n # Current Hidden state\n current_hidden_state = o * tf.nn.tanh(c)\n\n return tf.stack([current_hidden_state, c])\n\n return unit\n\n def create_output_unit(self, params):\n self.Wo = tf.Variable(self.init_matrix([self.hidden_dim, self.num_vocabulary]))\n self.bo = tf.Variable(self.init_matrix([self.num_vocabulary]))\n params.extend([self.Wo, self.bo])\n\n def unit(hidden_memory_tuple):\n hidden_state, c_prev = tf.unstack(hidden_memory_tuple)\n logits = tf.matmul(hidden_state, self.Wo) + self.bo\n return logits\n\n return unit\n\n def g_optimizer(self, *args, **kwargs):\n return tf.train.AdamOptimizer(*args, **kwargs)\n\n # Compute the similarity between minibatch examples and all embeddings.\n # We use the cosine distance:\n\n def set_similarity(self, valid_examples=None, pca=True):\n if valid_examples == None:\n if pca:\n valid_examples = np.array(range(20))\n else:\n valid_examples = np.array(range(self.num_vocabulary))\n self.valid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n self.norm = tf.sqrt(tf.reduce_sum(tf.square(self.g_embeddings), 1, keep_dims=True))\n self.normalized_embeddings = self.g_embeddings / self.norm\n # PCA\n if self.num_vocabulary >= 20 and pca == True:\n emb = tf.matmul(self.normalized_embeddings, tf.transpose(self.normalized_embeddings))\n s, u, v = tf.svd(emb)\n u_r = tf.strided_slice(u, begin=[0, 0], end=[20, self.num_vocabulary], strides=[1, 1])\n self.normalized_embeddings = tf.matmul(u_r, self.normalized_embeddings)\n self.valid_embeddings = tf.nn.embedding_lookup(\n self.normalized_embeddings, self.valid_dataset)\n self.similarity = tf.matmul(self.valid_embeddings, tf.transpose(self.normalized_embeddings))\n" ]
[ [ "tensorflow.compat.v1.strided_slice", "tensorflow.compat.v1.zeros", "tensorflow.compat.v1.transpose", "tensorflow.compat.v1.disable_v2_behavior", "tensorflow.compat.v1.matmul", "tensorflow.compat.v1.device", "tensorflow.compat.v1.constant", "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.reshape", "tensorflow.compat.v1.gradients", "tensorflow.compat.v1.train.AdamOptimizer", "tensorflow.compat.v1.unstack", "tensorflow.compat.v1.stack", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.svd", "tensorflow.compat.v1.nn.tanh", "tensorflow.compat.v1.one_hot", "tensorflow.compat.v1.square", "tensorflow.compat.v1.nn.embedding_lookup", "tensorflow.compat.v1.nn.softmax", "tensorflow.compat.v1.multinomial", "tensorflow.compat.v1.random_normal", "tensorflow.python.ops.tensor_array_ops.TensorArray" ] ]
acdmammoths/parallelcubesampling
[ "c6fa613e9877f6a57c73c24d93223f738bdf3aae" ]
[ "src/dataprep.py" ]
[ "import numpy as np\nimport math\nfrom typing import Optional, Tuple, Union\nfrom cubeutils import get_active_indices, get_active_strata\n\n\ndef prepare_inputs(\n data: np.ndarray,\n init_probs: np.ndarray,\n is_pop_size_fixed: bool = False,\n is_sample_size_fixed: bool = False,\n strata: Optional[np.ndarray] = None,\n is_sample_size_in_strata_fixed: bool = False,\n) -> Union[Tuple[np.ndarray, np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]:\n \"\"\"\n Prepares inputs by scaling the data, selecting active units,\n and prepending the required constraints.\n\n :param data: a matrix with dimensions population size x number of auxiliary variables\n :param init_probs: a vector of inclusion probabilities\n :param is_pop_size_fixed: whether the algorithm should fix the population size\n :param is_sample_size_fixed: whether the algorithm should fix the sample size\n :param strata: the strata that the balanced sample should respect\n :param is_sample_size_in_strata_fixed: whether the algorithm should fix sample size\n within each strata\n :returns: 3-tuple of (matrix of prepared data, vector of prepared inclusion probabilities,\n vector of prepared strata) if strata is not none. If strata is none, returns tuple of\n a matrix of prepared data and a vector of prepared inclusion probabilities\n \"\"\"\n\n prepared_data = data.copy()\n\n # fixed pop size constraint needs to be added first\n # so that it is included as an aux var in stratified sample cube methods\n if is_pop_size_fixed:\n prepared_data = prepend_pop_size_constraint(prepared_data)\n\n if is_sample_size_fixed:\n prepared_data = prepend_sample_size_constraint(prepared_data, init_probs)\n\n prepared_data, prepared_init_probs = drop_inactive_units_and_scale(prepared_data, init_probs)\n\n if strata is not None:\n # Use natural numbers as strata labels.\n unique_strata = np.unique(strata)\n labels = np.arange(len(unique_strata))\n unique_strata_labels_dict = dict(zip(unique_strata, labels))\n strata = np.array([unique_strata_labels_dict[s] for s in strata])\n active_strata = get_active_strata(init_probs, strata)\n if is_sample_size_in_strata_fixed:\n prepared_data = prepend_strata_constraints(prepared_data, active_strata)\n return prepared_data.T, prepared_init_probs, active_strata\n else:\n return prepared_data.T, prepared_init_probs\n\n\ndef prepare_inputs_for_strat_fp_2(\n prepared_data: np.ndarray,\n flight_probs: np.ndarray,\n strata: np.ndarray,\n num_aux_vars: int,\n is_sample_size_in_strata_fixed: bool = False,\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Prepare data for second flight phase in sample cubes with strata.\n\n :param prepared_data: a matrix of scaled, active, and transposed data\n with desired constraints prepended\n :param flight_probs: a vector of inclusion probabilities after the first flight phase\n :param strata: the strata that the balanced sample should respect\n :param num_aux_vars: the number of auxiliary variables including\n fixed population size constraints\n :param is_sample_size_in_strata_fixed: whether the algorithm should fix the sample size\n within each strata\n :returns: a tuple with a matrix of the active prepared data and a vector of\n the active inclusion probabilities from the first flight phase\n \"\"\"\n\n # Remove column with inclusion probabilities and\n # keep original auxiliary variables and pop size contraint\n num_constraints = prepared_data.shape[0] - num_aux_vars\n prepared_data_new = prepared_data[num_constraints:, :]\n\n active_indices = get_active_indices(flight_probs)\n prepared_data_new = prepared_data_new[:, active_indices]\n active_flight_probs = flight_probs[active_indices]\n strata = strata[active_indices]\n\n if is_sample_size_in_strata_fixed:\n prepared_data_new = np.append(get_strata_constraints(strata).T, prepared_data_new, axis=0)\n\n return prepared_data_new, active_flight_probs\n\n\ndef drop_inactive_units_and_scale(\n data: np.ndarray, init_probs: np.ndarray\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Selects active units and scales the data.\n\n :param data: a matrix with dimensions population size x number of auxiliary variables\n :param init_probs: a vector of inclusion probabilities\n :returns: a tuple with a matrix of active and scaled data and a vector of\n active inclusion probabilities\n \"\"\"\n\n init_probs = init_probs.copy()\n active_indices = get_active_indices(init_probs)\n prepared_init_probs = init_probs[active_indices]\n prepared_data = data[active_indices, :] / prepared_init_probs.reshape(-1, 1)\n\n return (prepared_data, prepared_init_probs)\n\n\ndef prepend_pop_size_constraint(prepared_data: np.ndarray) -> np.ndarray:\n \"\"\"\n Prepend constraint to prepared_data to fix the population size.\n\n :param prepared_data: a matrix of scaled, active, and transposed data with\n desired constraints prepended\n :returns: a matrix of the prepared data with columns to fix the population size prepended\n \"\"\"\n\n prepared_data = np.append(\n np.repeat(1, prepared_data.shape[0]).reshape(-1, 1), prepared_data, axis=1\n )\n return prepared_data\n\n\ndef prepend_sample_size_constraint(\n prepared_data: np.ndarray, prepared_init_probs: np.ndarray\n) -> np.ndarray:\n \"\"\"\n Prepend constraint to prepared_data to fix the sample size\n\n :param prepared_data: a matrix of scaled, active, and transposed data with\n desired constraints prepended\n :param prepared_init_probs: a vector of active inclusion probabilities\n :returns: a matrix of the prepared data with a column to fix the sample size prepended\n \"\"\"\n prepared_data = np.append(prepared_init_probs.reshape(-1, 1), prepared_data, axis=1)\n return prepared_data\n\n\ndef prepend_strata_constraints(prepared_data: np.ndarray, active_strata: np.ndarray) -> np.ndarray:\n \"\"\"\n Prepend constraints to prepared_data to fix the sample size within each strata\n\n :param prepared_data: a matrix of scaled, active, and transposed data with\n desired constraints prepended\n :param active_strata: a vector of strata indices that have active inclusion probabilities\n :returns: a matrix of the prepared data with columns to fix the sample size within each strata\n prepended\n \"\"\"\n strata_constraints = get_strata_constraints(active_strata)\n prepared_data = np.append(strata_constraints, prepared_data, axis=1)\n return prepared_data\n\n\ndef get_strata_constraints(strata: np.ndarray) -> np.ndarray:\n \"\"\"\n Create constraints to generate a sample that respects each strata.\n\n :param strata: the strata that the balanced sample should respect\n :returns: a matrix of strata constraints where each row is a member of the population and\n the ith column is a 1 if the belongs to ith strata and 0 in every other column.\n \"\"\"\n\n num_strata = len(np.unique(strata))\n pop_size = len(strata)\n strata_constraints = np.zeros((pop_size, num_strata))\n for i in range(pop_size):\n strata_constraints[i, strata[i]] = 1\n return strata_constraints\n\n\ndef create_inc_probs_for_strata(pop_size: int, strata: np.ndarray) -> np.ndarray:\n \"\"\"\n Given strata indices for every member of the population,\n create init_probs such that the sum of the\n inclusion probabilities in each strata is an integer.\n\n :param pop_size: the population size\n :param strata: the strata that the balanced sample should respect\n :returns: a vector of inclusion probabilities\n \"\"\"\n\n rng = np.random.default_rng()\n init_probs = rng.random(pop_size)\n num_strata = max(strata) + 1\n\n for i in range(num_strata):\n stratum = init_probs[strata == i]\n diff = math.ceil(sum(stratum)) - sum(stratum)\n\n if stratum[-1] + diff < 1:\n stratum[-1] = stratum[-1] + diff\n else:\n stratum[-1] = stratum[-1] - (1 - diff)\n\n init_probs[strata == i] = stratum\n return init_probs\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.random.default_rng", "numpy.append", "numpy.repeat", "numpy.unique" ] ]
andrewhuman/ava_action_location
[ "0a7c4bd3e71b8f366dd5ae4174fdf8d7f5286f25" ]
[ "lib/datasets/ucf24data/util.py" ]
[ "import numpy as np\nfrom PIL import Image\nimport random\n\n\ndef read_image(path, dtype=np.float32, color=True):\n \"\"\"Read an image from a file.\n\n This function reads an image from given file. The image is CHW format and\n the range of its value is :math:`[0, 255]`. If :obj:`color = True`, the\n order of the channels is RGB.\n\n Args:\n path (str): A path of image file.\n dtype: The type of array. The default value is :obj:`~numpy.float32`.\n color (bool): This option determines the number of channels.\n If :obj:`True`, the number of channels is three. In this case,\n the order of the channels is RGB. This is the default behaviour.\n If :obj:`False`, this function returns a grayscale image.\n\n Returns:\n ~numpy.ndarray: An image.\n \"\"\"\n\n f = Image.open(path)\n try:\n if color:\n img = f.convert('RGB')\n else:\n img = f.convert('P')\n img = np.asarray(img, dtype=dtype)\n finally:\n if hasattr(f, 'close'):\n f.close()\n\n if img.ndim == 2:\n # reshape (H, W) -> (1, H, W)\n return img[np.newaxis]\n else:\n # transpose (H, W, C) -> (C, H, W)\n return img.transpose((2, 0, 1))\n\n\ndef resize_bbox(bbox, in_size, out_size):\n \"\"\"Resize bounding boxes according to image resize.\n\n The bounding boxes are expected to be packed into a two dimensional\n tensor of shape :math:`(R, 4)`, where :math:`R` is the number of\n bounding boxes in the image. The second axis represents attributes of\n the bounding box. They are :math:`(y_{min}, x_{min}, y_{max}, x_{max})`,\n where the four attributes are coordinates of the top left and the\n bottom right vertices.\n\n Args:\n bbox (~numpy.ndarray): An array whose shape is :math:`(R, 4)`.\n :math:`R` is the number of bounding boxes.\n in_size (tuple): A tuple of length 2. The height and the width\n of the image before resized.\n out_size (tuple): A tuple of length 2. The height and the width\n of the image after resized.\n\n Returns:\n ~numpy.ndarray:\n Bounding boxes rescaled according to the given image shapes.\n\n \"\"\"\n bbox = bbox.copy()\n y_scale = float(out_size[0]) / in_size[0]\n x_scale = float(out_size[1]) / in_size[1]\n bbox[:, 0] = y_scale * bbox[:, 0]\n bbox[:, 2] = y_scale * bbox[:, 2]\n bbox[:, 1] = x_scale * bbox[:, 1]\n bbox[:, 3] = x_scale * bbox[:, 3]\n return bbox\n\n\ndef flip_bbox(bbox, size, y_flip=False, x_flip=False):\n \"\"\"Flip bounding boxes accordingly.\n\n The bounding boxes are expected to be packed into a two dimensional\n tensor of shape :math:`(R, 4)`, where :math:`R` is the number of\n bounding boxes in the image. The second axis represents attributes of\n the bounding box. They are :math:`(y_{min}, x_{min}, y_{max}, x_{max})`,\n where the four attributes are coordinates of the top left and the\n bottom right vertices.\n\n Args:\n bbox (~numpy.ndarray): An array whose shape is :math:`(R, 4)`.\n :math:`R` is the number of bounding boxes.\n size (tuple): A tuple of length 2. The height and the width\n of the image before resized.\n y_flip (bool): Flip bounding box according to a vertical flip of\n an image.\n x_flip (bool): Flip bounding box according to a horizontal flip of\n an image.\n\n Returns:\n ~numpy.ndarray:\n Bounding boxes flipped according to the given flips.\n\n \"\"\"\n H, W = size\n bbox = bbox.copy()\n if y_flip:\n y_max = H - bbox[:, 0]\n y_min = H - bbox[:, 2]\n bbox[:, 0] = y_min\n bbox[:, 2] = y_max\n if x_flip:\n x_max = W - bbox[:, 0]\n x_min = W - bbox[:, 2]\n bbox[:, 0] = x_min\n bbox[:, 2] = x_max\n return bbox\n\n\ndef crop_bbox(\n bbox, y_slice=None, x_slice=None,\n allow_outside_center=True, return_param=False):\n \"\"\"Translate bounding boxes to fit within the cropped area of an image.\n\n This method is mainly used together with image cropping.\n This method translates the coordinates of bounding boxes like\n :func:`data.util.translate_bbox`. In addition,\n this function truncates the bounding boxes to fit within the cropped area.\n If a bounding box does not overlap with the cropped area,\n this bounding box will be removed.\n\n The bounding boxes are expected to be packed into a two dimensional\n tensor of shape :math:`(R, 4)`, where :math:`R` is the number of\n bounding boxes in the image. The second axis represents attributes of\n the bounding box. They are :math:`(y_{min}, x_{min}, y_{max}, x_{max})`,\n where the four attributes are coordinates of the top left and the\n bottom right vertices.\n\n Args:\n bbox (~numpy.ndarray): Bounding boxes to be transformed. The shape is\n :math:`(R, 4)`. :math:`R` is the number of bounding boxes.\n y_slice (slice): The slice of y axis.\n x_slice (slice): The slice of x axis.\n allow_outside_center (bool): If this argument is :obj:`False`,\n bounding boxes whose centers are outside of the cropped area\n are removed. The default value is :obj:`True`.\n return_param (bool): If :obj:`True`, this function returns\n indices of kept bounding boxes.\n\n Returns:\n ~numpy.ndarray or (~numpy.ndarray, dict):\n\n If :obj:`return_param = False`, returns an array :obj:`bbox`.\n\n If :obj:`return_param = True`,\n returns a tuple whose elements are :obj:`bbox, param`.\n :obj:`param` is a dictionary of intermediate parameters whose\n contents are listed below with key, value-type and the description\n of the value.\n\n * **index** (*numpy.ndarray*): An array holding indices of used \\\n bounding boxes.\n\n \"\"\"\n\n t, b = _slice_to_bounds(y_slice)\n l, r = _slice_to_bounds(x_slice)\n crop_bb = np.array((t, l, b, r))\n\n if allow_outside_center:\n mask = np.ones(bbox.shape[0], dtype=bool)\n else:\n center = (bbox[:, :2] + bbox[:, 2:]) / 2.0\n mask = np.logical_and(crop_bb[:2] <= center, center < crop_bb[2:]) \\\n .all(axis=1)\n\n bbox = bbox.copy()\n bbox[:, :2] = np.maximum(bbox[:, :2], crop_bb[:2])\n bbox[:, 2:] = np.minimum(bbox[:, 2:], crop_bb[2:])\n bbox[:, :2] -= crop_bb[:2]\n bbox[:, 2:] -= crop_bb[:2]\n\n mask = np.logical_and(mask, (bbox[:, :2] < bbox[:, 2:]).all(axis=1))\n bbox = bbox[mask]\n\n if return_param:\n return bbox, {'index': np.flatnonzero(mask)}\n else:\n return bbox\n\n\ndef _slice_to_bounds(slice_):\n if slice_ is None:\n return 0, np.inf\n\n if slice_.start is None:\n l = 0\n else:\n l = slice_.start\n\n if slice_.stop is None:\n u = np.inf\n else:\n u = slice_.stop\n\n return l, u\n\n\ndef translate_bbox(bbox, y_offset=0, x_offset=0):\n \"\"\"Translate bounding boxes.\n\n This method is mainly used together with image transforms, such as padding\n and cropping, which translates the left top point of the image from\n coordinate :math:`(0, 0)` to coordinate\n :math:`(y, x) = (y_{offset}, x_{offset})`.\n\n The bounding boxes are expected to be packed into a two dimensional\n tensor of shape :math:`(R, 4)`, where :math:`R` is the number of\n bounding boxes in the image. The second axis represents attributes of\n the bounding box. They are :math:`(y_{min}, x_{min}, y_{max}, x_{max})`,\n where the four attributes are coordinates of the top left and the\n bottom right vertices.\n\n Args:\n bbox (~numpy.ndarray): Bounding boxes to be transformed. The shape is\n :math:`(R, 4)`. :math:`R` is the number of bounding boxes.\n y_offset (int or float): The offset along y axis.\n x_offset (int or float): The offset along x axis.\n\n Returns:\n ~numpy.ndarray:\n Bounding boxes translated according to the given offsets.\n\n \"\"\"\n\n out_bbox = bbox.copy()\n out_bbox[:, :2] += (y_offset, x_offset)\n out_bbox[:, 2:] += (y_offset, x_offset)\n\n return out_bbox\n\ndef flip(img,copy=False):\n img = img[:, :, ::-1]\n if copy:\n img = img.copy()\n\n return img\n\n\ndef random_flip(img, y_random=False, x_random=False,\n return_param=False, copy=False):\n \"\"\"Randomly flip an image in vertical or horizontal direction.\n\n Args:\n img (~numpy.ndarray): An array that gets flipped. This is in\n CHW format.\n y_random (bool): Randomly flip in vertical direction.\n x_random (bool): Randomly flip in horizontal direction.\n return_param (bool): Returns information of flip.\n copy (bool): If False, a view of :obj:`img` will be returned.\n\n Returns:\n ~numpy.ndarray or (~numpy.ndarray, dict):\n\n If :obj:`return_param = False`,\n returns an array :obj:`out_img` that is the result of flipping.\n\n If :obj:`return_param = True`,\n returns a tuple whose elements are :obj:`out_img, param`.\n :obj:`param` is a dictionary of intermediate parameters whose\n contents are listed below with key, value-type and the description\n of the value.\n\n * **y_flip** (*bool*): Whether the image was flipped in the\\\n vertical direction or not.\n * **x_flip** (*bool*): Whether the image was flipped in the\\\n horizontal direction or not.\n\n \"\"\"\n y_flip, x_flip = False, False\n if y_random:\n y_flip = random.choice([True, False])\n if x_random:\n x_flip = random.choice([True, False])\n\n if y_flip:\n img = img[:, ::-1, :]\n if x_flip:\n img = img[:, :, ::-1]\n\n if copy:\n img = img.copy()\n\n if return_param:\n return img, {'y_flip': y_flip, 'x_flip': x_flip}\n else:\n return img\n\n\ndef flip_img(img,copy =False):\n img = img[:, :, ::-1]\n if copy:\n img = img.copy()\n\n return img\n\n\ndef flip_box(bbox,size):\n H, W = size\n bbox_c = bbox.copy()\n\n x_max = W - bbox[:, 0]-1\n x_min = W - bbox[:, 2]-1\n bbox_c[:, 0] = x_min\n bbox_c[:, 2] = x_max\n assert (bbox_c[:, 2]>=bbox_c[:, 0]).all()\n\n return bbox_c" ]
[ [ "numpy.array", "numpy.asarray", "numpy.minimum", "numpy.flatnonzero", "numpy.ones", "numpy.logical_and", "numpy.maximum" ] ]
TommasoPino/pyquaternion
[ "5f55e38bc5adcd34db73b7dd9fa96d2391a2427a" ]
[ "pyquaternion/quaternion.py" ]
[ "\"\"\"\nThis file is part of the pyquaternion python module\n\nAuthor: Kieran Wynn\nWebsite: https://github.com/KieranWynn/pyquaternion\nDocumentation: http://kieranwynn.github.io/pyquaternion/\n\nVersion: 1.0.0\nLicense: The MIT License (MIT)\n\nCopyright (c) 2015 Kieran Wynn\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\nquaternion.py - This file defines the core Quaternion class\n\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function # Add compatibility for Python 2.7+\n\nfrom math import sqrt, pi, sin, cos, asin, acos, atan2, exp, log\nfrom copy import deepcopy\nimport numpy as np # Numpy is required for many vector operations\n\n\nclass Quaternion:\n \"\"\"Class to represent a 4-dimensional complex number or quaternion.\n\n Quaternion objects can be used generically as 4D numbers,\n or as unit quaternions to represent rotations in 3D space.\n\n Attributes:\n q: Quaternion 4-vector represented as a Numpy array\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialise a new Quaternion object.\n\n See Object Initialisation docs for complete behaviour:\n\n https://kieranwynn.github.io/pyquaternion/#object-initialisation\n\n \"\"\"\n s = len(args)\n if s == 0:\n # No positional arguments supplied\n if kwargs:\n # Keyword arguments provided\n if (\"scalar\" in kwargs) or (\"vector\" in kwargs):\n scalar = kwargs.get(\"scalar\", 0.0)\n if scalar is None:\n scalar = 0.0\n else:\n scalar = float(scalar)\n\n vector = kwargs.get(\"vector\", [])\n vector = self._validate_number_sequence(vector, 3)\n\n self.q = np.hstack((scalar, vector))\n elif (\"real\" in kwargs) or (\"imaginary\" in kwargs):\n real = kwargs.get(\"real\", 0.0)\n if real is None:\n real = 0.0\n else:\n real = float(real)\n\n imaginary = kwargs.get(\"imaginary\", [])\n imaginary = self._validate_number_sequence(imaginary, 3)\n\n self.q = np.hstack((real, imaginary))\n elif (\"axis\" in kwargs) or (\"radians\" in kwargs) or (\"degrees\" in kwargs) or (\"angle\" in kwargs):\n try:\n axis = self._validate_number_sequence(kwargs[\"axis\"], 3)\n except KeyError:\n raise ValueError(\n \"A valid rotation 'axis' parameter must be provided to describe a meaningful rotation.\"\n )\n angle = kwargs.get('radians') or self.to_radians(kwargs.get('degrees')) or kwargs.get('angle') or 0.0\n self.q = Quaternion._from_axis_angle(axis, angle).q\n elif \"array\" in kwargs:\n self.q = self._validate_number_sequence(kwargs[\"array\"], 4)\n elif \"matrix\" in kwargs:\n optional_args = {key: kwargs[key] for key in kwargs if key in ['rtol', 'atol']}\n self.q = Quaternion._from_matrix(kwargs[\"matrix\"], **optional_args).q\n else:\n keys = sorted(kwargs.keys())\n elements = [kwargs[kw] for kw in keys]\n if len(elements) == 1:\n r = float(elements[0])\n self.q = np.array([r, 0.0, 0.0, 0.0])\n else:\n self.q = self._validate_number_sequence(elements, 4)\n\n else:\n # Default initialisation\n self.q = np.array([1.0, 0.0, 0.0, 0.0])\n elif s == 1:\n # Single positional argument supplied\n if isinstance(args[0], Quaternion):\n self.q = args[0].q\n return\n if args[0] is None:\n raise TypeError(\"Object cannot be initialised from {}\".format(type(args[0])))\n try:\n r = float(args[0])\n self.q = np.array([r, 0.0, 0.0, 0.0])\n return\n except TypeError:\n pass # If the single argument is not scalar, it should be a sequence\n\n self.q = self._validate_number_sequence(args[0], 4)\n return\n\n else:\n # More than one positional argument supplied\n self.q = self._validate_number_sequence(args, 4)\n\n def __hash__(self):\n return hash(tuple(self.q))\n\n def _validate_number_sequence(self, seq, n):\n \"\"\"Validate a sequence to be of a certain length and ensure it's a numpy array of floats.\n\n Raises:\n ValueError: Invalid length or non-numeric value\n \"\"\"\n if seq is None:\n return np.zeros(n)\n if len(seq) == n:\n try:\n l = [float(e) for e in seq]\n except ValueError:\n raise ValueError(\"One or more elements in sequence <{!r}> cannot be interpreted as a real number\".format(seq))\n else:\n return np.asarray(l)\n elif len(seq) == 0:\n return np.zeros(n)\n else:\n raise ValueError(\"Unexpected number of elements in sequence. Got: {}, Expected: {}.\".format(len(seq), n))\n\n # Initialise from matrix\n @classmethod\n def _from_matrix(cls, matrix, rtol=1e-05, atol=1e-08):\n \"\"\"Initialise from matrix representation\n\n Create a Quaternion by specifying the 3x3 rotation or 4x4 transformation matrix\n (as a numpy array) from which the quaternion's rotation should be created.\n\n \"\"\"\n try:\n shape = matrix.shape\n except AttributeError:\n raise TypeError(\"Invalid matrix type: Input must be a 3x3 or 4x4 numpy array or matrix\")\n\n if shape == (3, 3):\n R = matrix\n elif shape == (4, 4):\n R = matrix[:-1][:,:-1] # Upper left 3x3 sub-matrix\n else:\n raise ValueError(\"Invalid matrix shape: Input must be a 3x3 or 4x4 numpy array or matrix\")\n\n # Check matrix properties\n if not np.allclose(np.dot(R, R.conj().transpose()), np.eye(3), rtol=rtol, atol=atol):\n raise ValueError(\"Matrix must be orthogonal, i.e. its transpose should be its inverse\")\n if not np.isclose(np.linalg.det(R), 1.0, rtol=rtol, atol=atol):\n raise ValueError(\"Matrix must be special orthogonal i.e. its determinant must be +1.0\")\n\n def decomposition_method(matrix):\n \"\"\" Method supposedly able to deal with non-orthogonal matrices - NON-FUNCTIONAL!\n Based on this method: http://arc.aiaa.org/doi/abs/10.2514/2.4654\n \"\"\"\n x, y, z = 0, 1, 2 # indices\n K = np.array([\n [R[x, x]-R[y, y]-R[z, z], R[y, x]+R[x, y], R[z, x]+R[x, z], R[y, z]-R[z, y]],\n [R[y, x]+R[x, y], R[y, y]-R[x, x]-R[z, z], R[z, y]+R[y, z], R[z, x]-R[x, z]],\n [R[z, x]+R[x, z], R[z, y]+R[y, z], R[z, z]-R[x, x]-R[y, y], R[x, y]-R[y, x]],\n [R[y, z]-R[z, y], R[z, x]-R[x, z], R[x, y]-R[y, x], R[x, x]+R[y, y]+R[z, z]]\n ])\n K = K / 3.0\n\n e_vals, e_vecs = np.linalg.eig(K)\n print('Eigenvalues:', e_vals)\n print('Eigenvectors:', e_vecs)\n max_index = np.argmax(e_vals)\n principal_component = e_vecs[max_index]\n return principal_component\n\n def trace_method(matrix):\n \"\"\"\n This code uses a modification of the algorithm described in:\n https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf\n which is itself based on the method described here:\n http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/\n\n Altered to work with the column vector convention instead of row vectors\n \"\"\"\n # m = matrix.conj().transpose() # This method assumes row-vector and postmultiplication of that vector\n m = matrix # the transpose is not required in python\n # based on https://math.stackexchange.com/questions/893984/conversion-of-rotation-matrix-to-quaternion\n if m[2, 2] < 0:\n if m[0, 0] > m[1, 1]:\n t = 1 + m[0, 0] - m[1, 1] - m[2, 2]\n q = [m[1, 2]-m[2, 1], t, m[0, 1]+m[1, 0], m[2, 0]+m[0, 2]]\n else:\n t = 1 - m[0, 0] + m[1, 1] - m[2, 2]\n q = [m[2, 0]-m[0, 2], m[0, 1]+m[1, 0], t, m[1, 2]+m[2, 1]]\n else:\n if m[0, 0] < -m[1, 1]:\n t = 1 - m[0, 0] - m[1, 1] + m[2, 2]\n q = [m[0, 1]-m[1, 0], m[2, 0]+m[0, 2], m[1, 2]+m[2, 1], t]\n else:\n t = 1 + m[0, 0] + m[1, 1] + m[2, 2]\n q = [t, m[1, 2]-m[2, 1], m[2, 0]-m[0, 2], m[0, 1]-m[1, 0]]\n\n q = np.array(q).astype('float64')\n q *= 0.5 / sqrt(t)\n return q\n\n return cls(array=trace_method(R))\n\n # Initialise from axis-angle\n @classmethod\n def _from_axis_angle(cls, axis, angle):\n \"\"\"Initialise from axis and angle representation\n\n Create a Quaternion by specifying the 3-vector rotation axis and rotation\n angle (in radians) from which the quaternion's rotation should be created.\n\n Params:\n axis: a valid numpy 3-vector\n angle: a real valued angle in radians\n \"\"\"\n mag_sq = np.dot(axis, axis)\n if mag_sq == 0.0:\n raise ZeroDivisionError(\"Provided rotation axis has no length\")\n # Ensure axis is in unit vector form\n if (abs(1.0 - mag_sq) > 1e-12):\n axis = axis / sqrt(mag_sq)\n theta = angle / 2.0\n r = cos(theta)\n i = axis * sin(theta)\n\n return cls(r, i[0], i[1], i[2])\n\n @classmethod\n def random(cls):\n \"\"\"Generate a random unit quaternion.\n\n Uniformly distributed across the rotation space\n As per: http://planning.cs.uiuc.edu/node198.html\n \"\"\"\n r1, r2, r3 = np.random.random(3)\n\n q1 = sqrt(1.0 - r1) * (sin(2 * pi * r2))\n q2 = sqrt(1.0 - r1) * (cos(2 * pi * r2))\n q3 = sqrt(r1) * (sin(2 * pi * r3))\n q4 = sqrt(r1) * (cos(2 * pi * r3))\n\n return cls(q1, q2, q3, q4)\n\n # Representation\n def __str__(self):\n \"\"\"An informal, nicely printable string representation of the Quaternion object.\n \"\"\"\n return \"{:.3f} {:+.3f}i {:+.3f}j {:+.3f}k\".format(self.q[0], self.q[1], self.q[2], self.q[3])\n\n def __repr__(self):\n \"\"\"The 'official' string representation of the Quaternion object.\n\n This is a string representation of a valid Python expression that could be used\n to recreate an object with the same value (given an appropriate environment)\n \"\"\"\n return \"Quaternion({!r}, {!r}, {!r}, {!r})\".format(self.q[0], self.q[1], self.q[2], self.q[3])\n\n def __format__(self, formatstr):\n \"\"\"Inserts a customisable, nicely printable string representation of the Quaternion object\n\n The syntax for `format_spec` mirrors that of the built in format specifiers for floating point types.\n Check out the official Python [format specification mini-language](https://docs.python.org/3.4/library/string.html#formatspec) for details.\n \"\"\"\n if formatstr.strip() == '': # Defualt behaviour mirrors self.__str__()\n formatstr = '+.3f'\n\n string = \\\n \"{:\" + formatstr +\"} \" + \\\n \"{:\" + formatstr +\"}i \" + \\\n \"{:\" + formatstr +\"}j \" + \\\n \"{:\" + formatstr +\"}k\"\n return string.format(self.q[0], self.q[1], self.q[2], self.q[3])\n\n # Type Conversion\n def __int__(self):\n \"\"\"Implements type conversion to int.\n\n Truncates the Quaternion object by only considering the real\n component and rounding to the next integer value towards zero.\n Note: to round to the closest integer, use int(round(float(q)))\n \"\"\"\n return int(self.q[0])\n\n def __float__(self):\n \"\"\"Implements type conversion to float.\n\n Truncates the Quaternion object by only considering the real\n component.\n \"\"\"\n return float(self.q[0])\n\n def __complex__(self):\n \"\"\"Implements type conversion to complex.\n\n Truncates the Quaternion object by only considering the real\n component and the first imaginary component.\n This is equivalent to a projection from the 4-dimensional hypersphere\n to the 2-dimensional complex plane.\n \"\"\"\n return complex(self.q[0], self.q[1])\n\n def __bool__(self):\n return not (self == Quaternion(0.0))\n\n def __nonzero__(self):\n return not (self == Quaternion(0.0))\n\n def __invert__(self):\n return (self == Quaternion(0.0))\n\n # Comparison\n def __eq__(self, other):\n \"\"\"Returns true if the following is true for each element:\n `absolute(a - b) <= (atol + rtol * absolute(b))`\n \"\"\"\n if isinstance(other, Quaternion):\n r_tol = 1.0e-13\n a_tol = 1.0e-14\n try:\n isEqual = np.allclose(self.q, other.q, rtol=r_tol, atol=a_tol)\n except AttributeError:\n raise AttributeError(\"Error in internal quaternion representation means it cannot be compared like a numpy array.\")\n return isEqual\n return self.__eq__(self.__class__(other))\n\n # Negation\n def __neg__(self):\n return self.__class__(array= -self.q)\n\n # Absolute value\n def __abs__(self):\n return self.norm\n\n # Addition\n def __add__(self, other):\n if isinstance(other, Quaternion):\n return self.__class__(array=self.q + other.q)\n return self + self.__class__(other)\n\n def __iadd__(self, other):\n return self + other\n\n def __radd__(self, other):\n return self + other\n\n # Subtraction\n def __sub__(self, other):\n return self + (-other)\n\n def __isub__(self, other):\n return self + (-other)\n\n def __rsub__(self, other):\n return -(self - other)\n\n # Multiplication\n def __mul__(self, other):\n if isinstance(other, Quaternion):\n return self.__class__(array=np.dot(self._q_matrix(), other.q))\n return self * self.__class__(other)\n\n def __imul__(self, other):\n return self * other\n\n def __rmul__(self, other):\n return self.__class__(other) * self\n\n def __matmul__(self, other):\n if isinstance(other, Quaternion):\n return self.q.__matmul__(other.q)\n return self.__matmul__(self.__class__(other))\n\n def __imatmul__(self, other):\n return self.__matmul__(other)\n\n def __rmatmul__(self, other):\n return self.__class__(other).__matmul__(self)\n\n # Division\n def __div__(self, other):\n if isinstance(other, Quaternion):\n if other == self.__class__(0.0):\n raise ZeroDivisionError(\"Quaternion divisor must be non-zero\")\n return self * other.inverse\n return self.__div__(self.__class__(other))\n\n def __idiv__(self, other):\n return self.__div__(other)\n\n def __rdiv__(self, other):\n return self.__class__(other) * self.inverse\n\n def __truediv__(self, other):\n return self.__div__(other)\n\n def __itruediv__(self, other):\n return self.__idiv__(other)\n\n def __rtruediv__(self, other):\n return self.__rdiv__(other)\n\n # Exponentiation\n def __pow__(self, exponent):\n # source: https://en.wikipedia.org/wiki/Quaternion#Exponential.2C_logarithm.2C_and_power\n exponent = float(exponent) # Explicitly reject non-real exponents\n norm = self.norm\n if norm > 0.0:\n try:\n n, theta = self.polar_decomposition\n except ZeroDivisionError:\n # quaternion is a real number (no vector or imaginary part)\n return Quaternion(scalar=self.scalar ** exponent)\n return (self.norm ** exponent) * Quaternion(scalar=cos(exponent * theta), vector=(n * sin(exponent * theta)))\n return Quaternion(self)\n\n def __ipow__(self, other):\n return self ** other\n\n def __rpow__(self, other):\n return other ** float(self)\n\n # Quaternion Features\n def _vector_conjugate(self):\n return np.hstack((self.q[0], -self.q[1:4]))\n\n def _sum_of_squares(self):\n return np.dot(self.q, self.q)\n\n @property\n def conjugate(self):\n \"\"\"Quaternion conjugate, encapsulated in a new instance.\n\n For a unit quaternion, this is the same as the inverse.\n\n Returns:\n A new Quaternion object clone with its vector part negated\n \"\"\"\n return self.__class__(scalar=self.scalar, vector=-self.vector)\n\n @property\n def inverse(self):\n \"\"\"Inverse of the quaternion object, encapsulated in a new instance.\n\n For a unit quaternion, this is the inverse rotation, i.e. when combined with the original rotation, will result in the null rotation.\n\n Returns:\n A new Quaternion object representing the inverse of this object\n \"\"\"\n ss = self._sum_of_squares()\n if ss > 0:\n return self.__class__(array=(self._vector_conjugate() / ss))\n else:\n raise ZeroDivisionError(\"a zero quaternion (0 + 0i + 0j + 0k) cannot be inverted\")\n\n @property\n def norm(self):\n \"\"\"L2 norm of the quaternion 4-vector.\n\n This should be 1.0 for a unit quaternion (versor)\n Slow but accurate. If speed is a concern, consider using _fast_normalise() instead\n\n Returns:\n A scalar real number representing the square root of the sum of the squares of the elements of the quaternion.\n \"\"\"\n mag_squared = self._sum_of_squares()\n return sqrt(mag_squared)\n\n @property\n def magnitude(self):\n return self.norm\n\n def _normalise(self):\n \"\"\"Object is guaranteed to be a unit quaternion after calling this\n operation UNLESS the object is equivalent to Quaternion(0)\n \"\"\"\n if not self.is_unit():\n n = self.norm\n if n > 0:\n self.q = self.q / n\n\n def _fast_normalise(self):\n \"\"\"Normalise the object to a unit quaternion using a fast approximation method if appropriate.\n\n Object is guaranteed to be a quaternion of approximately unit length\n after calling this operation UNLESS the object is equivalent to Quaternion(0)\n \"\"\"\n if not self.is_unit():\n mag_squared = np.dot(self.q, self.q)\n if (mag_squared == 0):\n return\n if (abs(1.0 - mag_squared) < 2.107342e-08):\n mag = ((1.0 + mag_squared) / 2.0) # More efficient. Pade approximation valid if error is small\n else:\n mag = sqrt(mag_squared) # Error is too big, take the performance hit to calculate the square root properly\n\n self.q = self.q / mag\n\n @property\n def normalised(self):\n \"\"\"Get a unit quaternion (versor) copy of this Quaternion object.\n\n A unit quaternion has a `norm` of 1.0\n\n Returns:\n A new Quaternion object clone that is guaranteed to be a unit quaternion\n \"\"\"\n q = Quaternion(self)\n q._normalise()\n return q\n\n @property\n def polar_unit_vector(self):\n vector_length = np.linalg.norm(self.vector)\n if vector_length <= 0.0:\n raise ZeroDivisionError('Quaternion is pure real and does not have a unique unit vector')\n return self.vector / vector_length\n\n @property\n def polar_angle(self):\n return acos(self.scalar / self.norm)\n\n @property\n def polar_decomposition(self):\n \"\"\"\n Returns the unit vector and angle of a non-scalar quaternion according to the following decomposition\n\n q = q.norm() * (e ** (q.polar_unit_vector * q.polar_angle))\n\n source: https://en.wikipedia.org/wiki/Polar_decomposition#Quaternion_polar_decomposition\n \"\"\"\n return self.polar_unit_vector, self.polar_angle\n\n @property\n def unit(self):\n return self.normalised\n\n def is_unit(self, tolerance=1e-14):\n \"\"\"Determine whether the quaternion is of unit length to within a specified tolerance value.\n\n Params:\n tolerance: [optional] maximum absolute value by which the norm can differ from 1.0 for the object to be considered a unit quaternion. Defaults to `1e-14`.\n\n Returns:\n `True` if the Quaternion object is of unit length to within the specified tolerance value. `False` otherwise.\n \"\"\"\n return abs(1.0 - self._sum_of_squares()) < tolerance # if _sum_of_squares is 1, norm is 1. This saves a call to sqrt()\n\n def _q_matrix(self):\n \"\"\"Matrix representation of quaternion for multiplication purposes.\n \"\"\"\n return np.array([\n [self.q[0], -self.q[1], -self.q[2], -self.q[3]],\n [self.q[1], self.q[0], -self.q[3], self.q[2]],\n [self.q[2], self.q[3], self.q[0], -self.q[1]],\n [self.q[3], -self.q[2], self.q[1], self.q[0]]])\n\n def _q_bar_matrix(self):\n \"\"\"Matrix representation of quaternion for multiplication purposes.\n \"\"\"\n return np.array([\n [self.q[0], -self.q[1], -self.q[2], -self.q[3]],\n [self.q[1], self.q[0], self.q[3], -self.q[2]],\n [self.q[2], -self.q[3], self.q[0], self.q[1]],\n [self.q[3], self.q[2], -self.q[1], self.q[0]]])\n\n def _rotate_quaternion(self, q):\n \"\"\"Rotate a quaternion vector using the stored rotation.\n\n Params:\n q: The vector to be rotated, in quaternion form (0 + xi + yj + kz)\n\n Returns:\n A Quaternion object representing the rotated vector in quaternion from (0 + xi + yj + kz)\n \"\"\"\n self._normalise()\n return self * q * self.conjugate\n \n def _rotate_quaternion_fast(self, v):\n \"\"\"Rotate a quaternion vector using the stored rotation.\n\n Params:\n v: The vector to be rotated, in vect form [x, y, z]\n\n Returns:\n A Quaternion object representing the rotated vector in quaternion from (0 + xi + yj + kz)\n \"\"\"\n \n self._normalise()\n u = self.elements[1:]\n s = self.w\n return 2 *np.dot(u,v)*u +(s*s - np.dot(u,u)) * v + 2 * s * np.cross(u,v)\n\n def rotate(self, vector):\n \"\"\"Rotate a 3D vector by the rotation stored in the Quaternion object.\n\n Params:\n vector: A 3-vector specified as any ordered sequence of 3 real numbers corresponding to x, y, and z values.\n Some types that are recognised are: numpy arrays, lists and tuples.\n A 3-vector can also be represented by a Quaternion object who's scalar part is 0 and vector part is the required 3-vector.\n Thus it is possible to call `Quaternion.rotate(q)` with another quaternion object as an input.\n\n Returns:\n The rotated vector returned as the same type it was specified at input.\n\n Raises:\n TypeError: if any of the vector elements cannot be converted to a real number.\n ValueError: if `vector` cannot be interpreted as a 3-vector or a Quaternion object.\n\n \"\"\"\n if isinstance(vector, Quaternion):\n return self._rotate_quaternion(vector)\n# q = Quaternion(vector=vector)\n a = self._rotate_quaternion_fast(vector)\n if isinstance(vector, list):\n l = [x for x in a]\n return l\n elif isinstance(vector, tuple):\n l = [x for x in a]\n return tuple(l)\n else:\n return a\n\n @classmethod\n def exp(cls, q):\n \"\"\"Quaternion Exponential.\n\n Find the exponential of a quaternion amount.\n\n Params:\n q: the input quaternion/argument as a Quaternion object.\n\n Returns:\n A quaternion amount representing the exp(q). See [Source](https://math.stackexchange.com/questions/1030737/exponential-function-of-quaternion-derivation for more information and mathematical background).\n\n Note:\n The method can compute the exponential of any quaternion.\n \"\"\"\n tolerance = 1e-17\n v_norm = np.linalg.norm(q.vector)\n vec = q.vector\n if v_norm > tolerance:\n vec = vec / v_norm\n magnitude = exp(q.scalar)\n return Quaternion(scalar = magnitude * cos(v_norm), vector = magnitude * sin(v_norm) * vec)\n\n @classmethod\n def log(cls, q):\n \"\"\"Quaternion Logarithm.\n\n Find the logarithm of a quaternion amount.\n\n Params:\n q: the input quaternion/argument as a Quaternion object.\n\n Returns:\n A quaternion amount representing log(q) := (log(|q|), v/|v|acos(w/|q|)).\n\n Note:\n The method computes the logarithm of general quaternions. See [Source](https://math.stackexchange.com/questions/2552/the-logarithm-of-quaternion/2554#2554) for more details.\n \"\"\"\n v_norm = np.linalg.norm(q.vector)\n q_norm = q.norm\n tolerance = 1e-17\n if q_norm < tolerance:\n # 0 quaternion - undefined\n return Quaternion(scalar=-float('inf'), vector=float('nan')*q.vector)\n if v_norm < tolerance:\n # real quaternions - no imaginary part\n return Quaternion(scalar=log(q_norm), vector=[0, 0, 0])\n vec = q.vector / v_norm\n return Quaternion(scalar=log(q_norm), vector=acos(q.scalar/q_norm)*vec)\n\n @classmethod\n def exp_map(cls, q, eta):\n \"\"\"Quaternion exponential map.\n\n Find the exponential map on the Riemannian manifold described\n by the quaternion space.\n\n Params:\n q: the base point of the exponential map, i.e. a Quaternion object\n eta: the argument of the exponential map, a tangent vector, i.e. a Quaternion object\n\n Returns:\n A quaternion p such that p is the endpoint of the geodesic starting at q\n in the direction of eta, having the length equal to the magnitude of eta.\n\n Note:\n The exponential map plays an important role in integrating orientation\n variations (e.g. angular velocities). This is done by projecting\n quaternion tangent vectors onto the quaternion manifold.\n \"\"\"\n return q * Quaternion.exp(eta)\n\n @classmethod\n def sym_exp_map(cls, q, eta):\n \"\"\"Quaternion symmetrized exponential map.\n\n Find the symmetrized exponential map on the quaternion Riemannian\n manifold.\n\n Params:\n q: the base point as a Quaternion object\n eta: the tangent vector argument of the exponential map\n as a Quaternion object\n\n Returns:\n A quaternion p.\n\n Note:\n The symmetrized exponential formulation is akin to the exponential\n formulation for symmetric positive definite tensors [Source](http://www.academia.edu/7656761/On_the_Averaging_of_Symmetric_Positive-Definite_Tensors)\n \"\"\"\n sqrt_q = q ** 0.5\n return sqrt_q * Quaternion.exp(eta) * sqrt_q\n\n @classmethod\n def log_map(cls, q, p):\n \"\"\"Quaternion logarithm map.\n\n Find the logarithm map on the quaternion Riemannian manifold.\n\n Params:\n q: the base point at which the logarithm is computed, i.e.\n a Quaternion object\n p: the argument of the quaternion map, a Quaternion object\n\n Returns:\n A tangent vector having the length and direction given by the\n geodesic joining q and p.\n \"\"\"\n return Quaternion.log(q.inverse * p)\n\n @classmethod\n def sym_log_map(cls, q, p):\n \"\"\"Quaternion symmetrized logarithm map.\n\n Find the symmetrized logarithm map on the quaternion Riemannian manifold.\n\n Params:\n q: the base point at which the logarithm is computed, i.e.\n a Quaternion object\n p: the argument of the quaternion map, a Quaternion object\n\n Returns:\n A tangent vector corresponding to the symmetrized geodesic curve formulation.\n\n Note:\n Information on the symmetrized formulations given in [Source](https://www.researchgate.net/publication/267191489_Riemannian_L_p_Averaging_on_Lie_Group_of_Nonzero_Quaternions).\n \"\"\"\n inv_sqrt_q = (q ** (-0.5))\n return Quaternion.log(inv_sqrt_q * p * inv_sqrt_q)\n\n @classmethod\n def absolute_distance(cls, q0, q1):\n \"\"\"Quaternion absolute distance.\n\n Find the distance between two quaternions accounting for the sign ambiguity.\n\n Params:\n q0: the first quaternion\n q1: the second quaternion\n\n Returns:\n A positive scalar corresponding to the chord of the shortest path/arc that\n connects q0 to q1.\n\n Note:\n This function does not measure the distance on the hypersphere, but\n it takes into account the fact that q and -q encode the same rotation.\n It is thus a good indicator for rotation similarities.\n \"\"\"\n q0_minus_q1 = q0 - q1\n q0_plus_q1 = q0 + q1\n d_minus = q0_minus_q1.norm\n d_plus = q0_plus_q1.norm\n if d_minus < d_plus:\n return d_minus\n else:\n return d_plus\n\n @classmethod\n def distance(cls, q0, q1):\n \"\"\"Quaternion intrinsic distance.\n\n Find the intrinsic geodesic distance between q0 and q1.\n\n Params:\n q0: the first quaternion\n q1: the second quaternion\n\n Returns:\n A positive amount corresponding to the length of the geodesic arc\n connecting q0 to q1.\n\n Note:\n Although the q0^(-1)*q1 != q1^(-1)*q0, the length of the path joining\n them is given by the logarithm of those product quaternions, the norm\n of which is the same.\n \"\"\"\n q = Quaternion.log_map(q0, q1)\n return q.norm\n\n @classmethod\n def sym_distance(cls, q0, q1):\n \"\"\"Quaternion symmetrized distance.\n\n Find the intrinsic symmetrized geodesic distance between q0 and q1.\n\n Params:\n q0: the first quaternion\n q1: the second quaternion\n\n Returns:\n A positive amount corresponding to the length of the symmetrized\n geodesic curve connecting q0 to q1.\n\n Note:\n This formulation is more numerically stable when performing\n iterative gradient descent on the Riemannian quaternion manifold.\n However, the distance between q and -q is equal to pi, rendering this\n formulation not useful for measuring rotation similarities when the\n samples are spread over a \"solid\" angle of more than pi/2 radians\n (the spread refers to quaternions as point samples on the unit hypersphere).\n \"\"\"\n q = Quaternion.sym_log_map(q0, q1)\n return q.norm\n\n @classmethod\n def slerp(cls, q0, q1, amount=0.5):\n \"\"\"Spherical Linear Interpolation between quaternions.\n Implemented as described in https://en.wikipedia.org/wiki/Slerp\n\n Find a valid quaternion rotation at a specified distance along the\n minor arc of a great circle passing through any two existing quaternion\n endpoints lying on the unit radius hypersphere.\n\n This is a class method and is called as a method of the class itself rather than on a particular instance.\n\n Params:\n q0: first endpoint rotation as a Quaternion object\n q1: second endpoint rotation as a Quaternion object\n amount: interpolation parameter between 0 and 1. This describes the linear placement position of\n the result along the arc between endpoints; 0 being at `q0` and 1 being at `q1`.\n Defaults to the midpoint (0.5).\n\n Returns:\n A new Quaternion object representing the interpolated rotation. This is guaranteed to be a unit quaternion.\n\n Note:\n This feature only makes sense when interpolating between unit quaternions (those lying on the unit radius hypersphere).\n Calling this method will implicitly normalise the endpoints to unit quaternions if they are not already unit length.\n \"\"\"\n # Ensure quaternion inputs are unit quaternions and 0 <= amount <=1\n q0._fast_normalise()\n q1._fast_normalise()\n amount = np.clip(amount, 0, 1)\n\n dot = np.dot(q0.q, q1.q)\n\n # If the dot product is negative, slerp won't take the shorter path.\n # Note that v1 and -v1 are equivalent when the negation is applied to all four components.\n # Fix by reversing one quaternion\n if dot < 0.0:\n q0.q = -q0.q\n dot = -dot\n\n # sin_theta_0 can not be zero\n if dot > 0.9995:\n qr = Quaternion(q0.q + amount * (q1.q - q0.q))\n qr._fast_normalise()\n return qr\n\n theta_0 = np.arccos(dot) # Since dot is in range [0, 0.9995], np.arccos() is safe\n sin_theta_0 = np.sin(theta_0)\n\n theta = theta_0 * amount\n sin_theta = np.sin(theta)\n\n s0 = np.cos(theta) - dot * sin_theta / sin_theta_0\n s1 = sin_theta / sin_theta_0\n qr = Quaternion((s0 * q0.q) + (s1 * q1.q))\n qr._fast_normalise()\n return qr\n\n @classmethod\n def intermediates(cls, q0, q1, n, include_endpoints=False):\n \"\"\"Generator method to get an iterable sequence of `n` evenly spaced quaternion\n rotations between any two existing quaternion endpoints lying on the unit\n radius hypersphere.\n\n This is a convenience function that is based on `Quaternion.slerp()` as defined above.\n\n This is a class method and is called as a method of the class itself rather than on a particular instance.\n\n Params:\n q_start: initial endpoint rotation as a Quaternion object\n q_end: final endpoint rotation as a Quaternion object\n n: number of intermediate quaternion objects to include within the interval\n include_endpoints: [optional] if set to `True`, the sequence of intermediates\n will be 'bookended' by `q_start` and `q_end`, resulting in a sequence length of `n + 2`.\n If set to `False`, endpoints are not included. Defaults to `False`.\n\n Yields:\n A generator object iterating over a sequence of intermediate quaternion objects.\n\n Note:\n This feature only makes sense when interpolating between unit quaternions (those lying on the unit radius hypersphere).\n Calling this method will implicitly normalise the endpoints to unit quaternions if they are not already unit length.\n \"\"\"\n step_size = 1.0 / (n + 1)\n if include_endpoints:\n steps = [i * step_size for i in range(0, n + 2)]\n else:\n steps = [i * step_size for i in range(1, n + 1)]\n for step in steps:\n yield cls.slerp(q0, q1, step)\n\n def derivative(self, rate):\n \"\"\"Get the instantaneous quaternion derivative representing a quaternion rotating at a 3D rate vector `rate`\n\n Params:\n rate: numpy 3-array (or array-like) describing rotation rates about the global x, y and z axes respectively.\n\n Returns:\n A unit quaternion describing the rotation rate\n \"\"\"\n rate = self._validate_number_sequence(rate, 3)\n return 0.5 * self * Quaternion(vector=rate)\n\n def integrate(self, rate, timestep):\n \"\"\"Advance a time varying quaternion to its value at a time `timestep` in the future.\n\n The Quaternion object will be modified to its future value.\n It is guaranteed to remain a unit quaternion.\n\n Params:\n\n rate: numpy 3-array (or array-like) describing rotation rates about the\n global x, y and z axes respectively.\n timestep: interval over which to integrate into the future.\n Assuming *now* is `T=0`, the integration occurs over the interval\n `T=0` to `T=timestep`. Smaller intervals are more accurate when\n `rate` changes over time.\n\n Note:\n The solution is closed form given the assumption that `rate` is constant\n over the interval of length `timestep`.\n \"\"\"\n self._fast_normalise()\n rate = self._validate_number_sequence(rate, 3)\n\n rotation_vector = rate * timestep\n rotation_norm = np.linalg.norm(rotation_vector)\n if rotation_norm > 0:\n axis = rotation_vector / rotation_norm\n angle = rotation_norm\n q2 = Quaternion(axis=axis, angle=angle)\n self.q = (self * q2).q\n self._fast_normalise()\n\n\n @property\n def rotation_matrix(self):\n \"\"\"Get the 3x3 rotation matrix equivalent of the quaternion rotation.\n\n Returns:\n A 3x3 orthogonal rotation matrix as a 3x3 Numpy array\n\n Note:\n This feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.\n\n \"\"\"\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:, 1:].T # added the required transposition in order to correct the Trace method for Quaternion calculation\n\n @property\n def transformation_matrix(self):\n \"\"\"Get the 4x4 homogeneous transformation matrix equivalent of the quaternion rotation.\n\n Returns:\n A 4x4 homogeneous transformation matrix as a 4x4 Numpy array\n\n Note:\n This feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.\n \"\"\"\n t = np.array([[0.0], [0.0], [0.0]])\n Rt = np.hstack([self.rotation_matrix, t])\n return np.vstack([Rt, np.array([0.0, 0.0, 0.0, 1.0])])\n\n @property\n def yaw_pitch_roll(self):\n \"\"\"Get the equivalent yaw-pitch-roll angles aka. intrinsic Tait-Bryan angles following the z-y'-x'' convention\n\n Returns:\n yaw: rotation angle around the z-axis in radians, in the range `[-pi, pi]`\n pitch: rotation angle around the y'-axis in radians, in the range `[-pi/2, -pi/2]`\n roll: rotation angle around the x''-axis in radians, in the range `[-pi, pi]`\n\n The resulting rotation_matrix would be R = R_x(roll) R_y(pitch) R_z(yaw)\n\n Note:\n This feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.\n \"\"\"\n\n self._normalise()\n yaw = np.arctan2(2 * (self.q[0] * self.q[3] - self.q[1] * self.q[2]),\n 1 - 2 * (self.q[2] ** 2 + self.q[3] ** 2))\n pitch = np.arcsin(2 * (self.q[0] * self.q[2] + self.q[3] * self.q[1]))\n roll = np.arctan2(2 * (self.q[0] * self.q[1] - self.q[2] * self.q[3]),\n 1 - 2 * (self.q[1] ** 2 + self.q[2] ** 2))\n\n return yaw, pitch, roll\n\n def _wrap_angle(self, theta):\n \"\"\"Helper method: Wrap any angle to lie between -pi and pi\n\n Odd multiples of pi are wrapped to +pi (as opposed to -pi)\n \"\"\"\n result = ((theta + pi) % (2 * pi)) - pi\n if result == -pi:\n result = pi\n return result\n\n def get_axis(self, undefined=np.zeros(3)):\n \"\"\"Get the axis or vector about which the quaternion rotation occurs\n\n For a null rotation (a purely real quaternion), the rotation angle will\n always be `0`, but the rotation axis is undefined.\n It is by default assumed to be `[0, 0, 0]`.\n\n Params:\n undefined: [optional] specify the axis vector that should define a null rotation.\n This is geometrically meaningless, and could be any of an infinite set of vectors,\n but can be specified if the default (`[0, 0, 0]`) causes undesired behaviour.\n\n Returns:\n A Numpy unit 3-vector describing the Quaternion object's axis of rotation.\n\n Note:\n This feature only makes sense when referring to a unit quaternion.\n Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.\n \"\"\"\n tolerance = 1e-17\n self._normalise()\n norm = np.linalg.norm(self.vector)\n if norm < tolerance:\n # Here there are an infinite set of possible axes, use what has been specified as an undefined axis.\n return undefined\n else:\n return self.vector / norm\n\n @property\n def axis(self):\n return self.get_axis()\n\n @property\n def angle(self):\n \"\"\"Get the angle (in radians) describing the magnitude of the quaternion rotation about its rotation axis.\n\n This is guaranteed to be within the range (-pi:pi) with the direction of\n rotation indicated by the sign.\n\n When a particular rotation describes a 180 degree rotation about an arbitrary\n axis vector `v`, the conversion to axis / angle representation may jump\n discontinuously between all permutations of `(-pi, pi)` and `(-v, v)`,\n each being geometrically equivalent (see Note in documentation).\n\n Returns:\n A real number in the range (-pi:pi) describing the angle of rotation\n in radians about a Quaternion object's axis of rotation.\n\n Note:\n This feature only makes sense when referring to a unit quaternion.\n Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.\n \"\"\"\n self._normalise()\n norm = np.linalg.norm(self.vector)\n return self._wrap_angle(2.0 * atan2(norm, self.scalar))\n\n @property\n def degrees(self):\n return self.to_degrees(self.angle)\n\n @property\n def radians(self):\n return self.angle\n\n @property\n def scalar(self):\n \"\"\" Return the real or scalar component of the quaternion object.\n\n Returns:\n A real number i.e. float\n \"\"\"\n return self.q[0]\n\n @property\n def vector(self):\n \"\"\" Return the imaginary or vector component of the quaternion object.\n\n Returns:\n A numpy 3-array of floats. NOT guaranteed to be a unit vector\n \"\"\"\n return self.q[1:4]\n\n @property\n def real(self):\n return self.scalar\n\n @property\n def imaginary(self):\n return self.vector\n\n @property\n def w(self):\n return self.q[0]\n\n @property\n def x(self):\n return self.q[1]\n\n @property\n def y(self):\n return self.q[2]\n\n @property\n def z(self):\n return self.q[3]\n\n @property\n def elements(self):\n \"\"\" Return all the elements of the quaternion object.\n\n Returns:\n A numpy 4-array of floats. NOT guaranteed to be a unit vector\n \"\"\"\n return self.q\n\n def __getitem__(self, index):\n index = int(index)\n return self.q[index]\n\n def __setitem__(self, index, value):\n index = int(index)\n self.q[index] = float(value)\n\n def __copy__(self):\n result = self.__class__(self.q)\n return result\n\n def __deepcopy__(self, memo):\n result = self.__class__(deepcopy(self.q, memo))\n memo[id(self)] = result\n return result\n\n @staticmethod\n def to_degrees(angle_rad):\n if angle_rad is not None:\n return float(angle_rad) / pi * 180.0\n\n @staticmethod\n def to_radians(angle_deg):\n if angle_deg is not None:\n return float(angle_deg) / 180.0 * pi\n" ]
[ [ "numpy.dot", "numpy.arccos", "numpy.cos", "numpy.random.random", "numpy.sin", "numpy.linalg.norm", "numpy.arcsin", "numpy.eye", "numpy.argmax", "numpy.cross", "numpy.array", "numpy.zeros", "numpy.linalg.det", "numpy.allclose", "numpy.arctan2", "numpy.clip", "numpy.hstack", "numpy.asarray", "numpy.linalg.eig" ] ]
rsasaki0109/ensemble_kalman_filter
[ "d5e9d71c8726357379dc121515a8b370dbb8bc7f" ]
[ "ensemble_kalman_filter.py" ]
[ "\n\"\"\"\n\nEnsemble Kalman Filter(EnKF) localization sample\n\nauthor: Ryohei Sasaki(rsasaki0109)\n\nRef:\n- [Ensemble Kalman filtering](https://rmets.onlinelibrary.wiley.com/doi/10.1256/qj.05.135)\n\n\"\"\"\n\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\n# Simulation parameter\nQsim = np.diag([0.2, np.deg2rad(1.0)])**2\nRsim = np.diag([1.0, np.deg2rad(30.0)])**2\n\nDT = 0.1 # time tick [s]\nSIM_TIME = 50.0 # simulation time [s]\nMAX_RANGE = 20.0 # maximum observation range\n\n# Ensemble Kalman filter parameter\nNP = 20 # Number of Particle\n\nshow_animation = True\n\n\ndef calc_input():\n v = 1.0 # [m/s]\n yawrate = 0.1 # [rad/s]\n u = np.array([[v, yawrate]]).T\n return u\n\n\ndef observation(xTrue, xd, u, RFID):\n\n xTrue = motion_model(xTrue, u)\n\n z = np.zeros((0, 3))\n\n for i in range(len(RFID[:, 0])):\n\n dx = RFID[i, 0] - xTrue[0, 0]\n dy = RFID[i, 1] - xTrue[1, 0]\n d = math.sqrt(dx**2 + dy**2)\n if d <= MAX_RANGE:\n dn = d + np.random.randn() * Qsim[0, 0] # add noise\n zi = np.array([dn, RFID[i, 0], RFID[i, 1]])\n z = np.vstack((z, zi))\n\n # add noise to input\n ud = np.array([[\n u[0, 0] + np.random.randn() * Rsim[0, 0],\n u[1, 0] + np.random.randn() * Rsim[1, 1]]]).T\n\n xd = motion_model(xd, ud)\n return xTrue, z, xd, ud\n\n\ndef motion_model(x, u):\n F = np.array([[1.0, 0, 0, 0],\n [0, 1.0, 0, 0],\n [0, 0, 1.0, 0],\n [0, 0, 0, 0]])\n\n B = np.array([[DT * math.cos(x[2, 0]), 0],\n [DT * math.sin(x[2, 0]), 0],\n [0.0, DT],\n [1.0, 0.0]])\n x = F.dot(x) + B.dot(u)\n\n return x\n\n\ndef calc_covariance(xEst, px):\n cov = np.zeros((3, 3))\n\n for i in range(px.shape[1]):\n dx = (px[:, i] - xEst)[0:3]\n cov += dx.dot(dx.T)\n\n return cov\n\n\ndef enkf_localization(px, xEst, PEst, z, u):\n \"\"\"\n Localization with Ensemble Kalman filter\n \"\"\"\n pz = np.zeros((z.shape[0], NP)) # Particle store of z\n for ip in range(NP):\n x = np.array([px[:, ip]]).T\n\n # Predict with random input sampling\n ud1 = u[0, 0] + np.random.randn() * Rsim[0, 0]\n ud2 = u[1, 0] + np.random.randn() * Rsim[1, 1]\n ud = np.array([[ud1, ud2]]).T\n x = motion_model(x, ud)\n px[:, ip] = x[:, 0]\n\n for i in range(len(z[:, 0])):\n dx = x[0, 0] - z[i, 1]\n dy = x[1, 0] - z[i, 2]\n prez = math.sqrt(dx**2 + dy**2) + np.random.randn() * Qsim[0, 0] # add noise\n pz[i, ip] = prez\n\n x_ave = np.mean(px, axis=1)\n x_dif = px - np.tile(x_ave, (NP, 1)).T\n\n z_ave = np.mean(pz, axis=1)\n z_dif = pz - np.tile(z_ave, (NP, 1)).T\n\n U = 1/(NP-1) * x_dif @ z_dif.T\n V = 1/(NP-1) * z_dif @ z_dif.T\n\n K = U @ np.linalg.inv(V) # Kalman Gain\n\n px_hat = px + K @ (np.tile(z[:,0], (NP, 1)).T - pz)\n\n xEst = np.average(px_hat, axis=1).reshape(4, 1)\n PEst = calc_covariance(xEst, px_hat)\n\n return xEst, PEst, px_hat\n\n\ndef plot_covariance_ellipse(xEst, PEst): # pragma: no cover\n Pxy = PEst[0:2, 0:2]\n eigval, eigvec = np.linalg.eig(Pxy)\n\n if eigval[0] >= eigval[1]:\n bigind = 0\n smallind = 1\n else:\n bigind = 1\n smallind = 0\n\n t = np.arange(0, 2 * math.pi + 0.1, 0.1)\n\n # eigval[bigind] or eiqval[smallind] were occassionally negative numbers extremely\n # close to 0 (~10^-20), catch these cases and set the respective variable to 0\n try:\n a = math.sqrt(eigval[bigind])\n except ValueError:\n a = 0\n\n try:\n b = math.sqrt(eigval[smallind])\n except ValueError:\n b = 0\n\n x = [a * math.cos(it) for it in t]\n y = [b * math.sin(it) for it in t]\n angle = math.atan2(eigvec[bigind, 1], eigvec[bigind, 0])\n R = np.array([[math.cos(angle), math.sin(angle)],\n [-math.sin(angle), math.cos(angle)]])\n fx = R.dot(np.array([[x, y]]))\n px = np.array(fx[0, :] + xEst[0, 0]).flatten()\n py = np.array(fx[1, :] + xEst[1, 0]).flatten()\n plt.plot(px, py, \"--r\")\n\n\ndef main():\n print(__file__ + \" start!!\")\n\n time = 0.0\n\n # RFID positions [x, y]\n RFID = np.array([[10.0, 0.0],\n [10.0, 10.0],\n [0.0, 15.0],\n [-5.0, 20.0]])\n\n # State Vector [x y yaw v]'\n xEst = np.zeros((4, 1))\n xTrue = np.zeros((4, 1))\n PEst = np.eye(4)\n\n px = np.zeros((4, NP)) # Particle store of x\n\n xDR = np.zeros((4, 1)) # Dead reckoning\n\n # history\n hxEst = xEst\n hxTrue = xTrue\n hxDR = xTrue\n\n while SIM_TIME >= time:\n time += DT\n u = calc_input()\n\n xTrue, z, xDR, ud = observation(xTrue, xDR, u, RFID)\n\n xEst, PEst, px = enkf_localization(px, xEst, PEst, z, ud)\n\n # store data history\n hxEst = np.hstack((hxEst, xEst))\n hxDR = np.hstack((hxDR, xDR))\n hxTrue = np.hstack((hxTrue, xTrue))\n\n if show_animation:\n plt.cla()\n\n for i in range(len(z[:, 0])):\n plt.plot([xTrue[0, 0], z[i, 1]], [xTrue[1, 0], z[i, 2]], \"-k\")\n plt.plot(RFID[:, 0], RFID[:, 1], \"*k\")\n plt.plot(px[0, :], px[1, :], \".r\")\n plt.plot(np.array(hxTrue[0, :]).flatten(),\n np.array(hxTrue[1, :]).flatten(), \"-b\")\n plt.plot(np.array(hxDR[0, :]).flatten(),\n np.array(hxDR[1, :]).flatten(), \"-k\")\n plt.plot(np.array(hxEst[0, :]).flatten(),\n np.array(hxEst[1, :]).flatten(), \"-r\")\n #plot_covariance_ellipse(xEst, PEst)\n plt.axis(\"equal\")\n plt.grid(True)\n plt.pause(0.001)\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.array", "matplotlib.pyplot.axis", "numpy.zeros", "matplotlib.pyplot.grid", "matplotlib.pyplot.plot", "numpy.tile", "numpy.average", "numpy.mean", "numpy.eye", "matplotlib.pyplot.cla", "numpy.random.randn", "numpy.linalg.eig", "numpy.arange", "matplotlib.pyplot.pause", "numpy.deg2rad", "numpy.hstack", "numpy.linalg.inv", "numpy.vstack" ] ]
verolero86/rl_graph_generation
[ "2f278c46a179cc43583298d24983e42fa0d536a6" ]
[ "gym-molecule/gym_molecule/envs/molecule.py" ]
[ "import gym\nimport itertools\nimport numpy as np\nfrom rdkit import Chem # TODO(Bowen): remove and just use AllChem\nfrom rdkit.Chem import AllChem\nfrom rdkit.Chem.Descriptors import qed, MolLogP\nfrom rdkit.Chem import rdMolDescriptors\nfrom rdkit.Chem.FilterCatalog import FilterCatalogParams, FilterCatalog\n# import gym_molecule\nimport copy\nimport networkx as nx\nfrom gym_molecule.envs.sascorer import calculateScore\nfrom gym_molecule.dataset.dataset_utils import gdb_dataset,mol_to_nx,nx_to_mol\nimport random\nimport time\nimport matplotlib.pyplot as plt\nimport csv\n\nfrom contextlib import contextmanager\nimport sys, os\n\n# block std out\n@contextmanager\ndef nostdout():\n with open(os.devnull, \"w\") as devnull:\n old_stdout = sys.stdout\n sys.stdout = devnull\n try:\n yield\n finally:\n sys.stdout = old_stdout\n\n# TODO(Bowen): check, esp if input is not radical\ndef convert_radical_electrons_to_hydrogens(mol):\n \"\"\"\n Converts radical electrons in a molecule into bonds to hydrogens. Only\n use this if molecule is valid. Results a new mol object\n :param mol: rdkit mol object\n :return: rdkit mol object\n \"\"\"\n m = copy.deepcopy(mol)\n if Chem.Descriptors.NumRadicalElectrons(m) == 0: # not a radical\n return m\n else: # a radical\n for a in m.GetAtoms():\n num_radical_e = a.GetNumRadicalElectrons()\n if num_radical_e > 0:\n a.SetNumRadicalElectrons(0)\n a.SetNumExplicitHs(num_radical_e)\n return m\n\ndef load_scaffold():\n cwd = os.path.dirname(__file__)\n path = os.path.join(os.path.dirname(cwd), 'dataset',\n 'vocab.txt') # gdb 13\n with open(path, 'r') as fp:\n reader = csv.reader(fp, delimiter=',', quotechar='\"')\n data = [Chem.MolFromSmiles(row[0]) for row in reader]\n data = [mol for mol in data if mol.GetRingInfo().NumRings() == 1 and (mol.GetRingInfo().IsAtomInRingOfSize(0, 5) or mol.GetRingInfo().IsAtomInRingOfSize(0, 6))]\n for mol in data:\n Chem.SanitizeMol(mol, sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)\n print('num of scaffolds:', len(data))\n return data\n\n\n\n\ndef load_conditional(type='low'):\n if type=='low':\n cwd = os.path.dirname(__file__)\n path = os.path.join(os.path.dirname(cwd), 'dataset',\n 'opt.test.logP-SA')\n import csv\n with open(path, 'r') as fp:\n reader = csv.reader(fp, delimiter=' ', quotechar='\"')\n data = [row+[id] for id,row in enumerate(reader)]\n # print(len(data))\n # print(data[799])\n elif type=='high':\n cwd = os.path.dirname(__file__)\n path = os.path.join(os.path.dirname(cwd), 'dataset',\n 'zinc_plogp_sorted.csv')\n import csv\n with open(path, 'r') as fp:\n reader = csv.reader(fp, delimiter=',', quotechar='\"')\n data = [[row[1], row[0],id] for id, row in enumerate(reader)]\n # data = [row for id, row in enumerate(reader)]\n data = data[0:800]\n return data\n# data = load_conditional('low')\n# data = load_conditional('high')\n# print(data[799])\n\n\nclass MoleculeEnv(gym.Env):\n metadata = {'render.modes': ['human']}\n def __init__(self):\n pass\n\n def init(self,data_type='zinc',logp_ratio=1, qed_ratio=1,sa_ratio=1,reward_step_total=1,is_normalize=0,reward_type='gan',reward_target=0.5,has_scaffold=False,has_feature=False,is_conditional=False,conditional='low',max_action=128,min_action=20,force_final=False):\n '''\n own init function, since gym does not support passing argument\n '''\n self.is_normalize = bool(is_normalize)\n self.is_conditional = is_conditional\n self.has_feature = has_feature\n self.reward_type = reward_type\n self.reward_target = reward_target\n self.force_final = force_final\n\n self.conditional_list = load_conditional(conditional)\n if self.is_conditional:\n self.conditional = random.sample(self.conditional_list,1)[0]\n self.mol = Chem.RWMol(Chem.MolFromSmiles(self.conditional[0]))\n Chem.SanitizeMol(self.mol, sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)\n else:\n self.mol = Chem.RWMol()\n self.smile_list = []\n if data_type=='gdb':\n possible_atoms = ['C', 'N', 'O', 'S', 'Cl'] # gdb 13\n elif data_type=='zinc':\n possible_atoms = ['C', 'N', 'O', 'S', 'P', 'F', 'I', 'Cl',\n 'Br'] # ZINC\n if self.has_feature:\n self.possible_formal_charge = np.array([-1, 0, 1])\n self.possible_implicit_valence = np.array([-1,0, 1, 2, 3, 4])\n self.possible_ring_atom = np.array([True, False])\n self.possible_degree = np.array([0, 1, 2, 3, 4, 5, 6, 7])\n self.possible_hybridization = np.array([\n Chem.rdchem.HybridizationType.SP,\n Chem.rdchem.HybridizationType.SP2,\n Chem.rdchem.HybridizationType.SP3,\n Chem.rdchem.HybridizationType.SP3D,\n Chem.rdchem.HybridizationType.SP3D2],\n dtype=object)\n possible_bonds = [Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE,\n Chem.rdchem.BondType.TRIPLE] #, Chem.rdchem.BondType.AROMATIC\n self.atom_type_num = len(possible_atoms)\n self.possible_atom_types = np.array(possible_atoms)\n self.possible_bond_types = np.array(possible_bonds, dtype=object)\n\n if self.has_feature:\n # self.d_n = len(self.possible_atom_types) + len(\n # self.possible_formal_charge) + len(\n # self.possible_implicit_valence) + len(self.possible_ring_atom) + \\\n # len(self.possible_degree) + len(self.possible_hybridization)\n self.d_n = len(self.possible_atom_types)+6 # 6 is the ring feature\n else:\n self.d_n = len(self.possible_atom_types)\n\n self.max_action = max_action\n self.min_action = min_action\n if data_type=='gdb':\n self.max_atom = 13 + len(possible_atoms) # gdb 13\n elif data_type=='zinc':\n if self.is_conditional:\n self.max_atom = 38 + len(possible_atoms) + self.min_action # ZINC\n else:\n self.max_atom = 38 + len(possible_atoms) # ZINC + self.min_action\n\n self.logp_ratio = logp_ratio\n self.qed_ratio = qed_ratio\n self.sa_ratio = sa_ratio\n self.reward_step_total = reward_step_total\n self.action_space = gym.spaces.MultiDiscrete([self.max_atom, self.max_atom, 3, 2])\n self.observation_space = {}\n self.observation_space['adj'] = gym.Space(shape=[len(possible_bonds), self.max_atom, self.max_atom])\n self.observation_space['node'] = gym.Space(shape=[1, self.max_atom, self.d_n])\n\n self.counter = 0\n\n ## load expert data\n cwd = os.path.dirname(__file__)\n if data_type=='gdb':\n path = os.path.join(os.path.dirname(cwd), 'dataset',\n 'gdb13.rand1M.smi.gz') # gdb 13\n elif data_type=='zinc':\n path = os.path.join(os.path.dirname(cwd), 'dataset',\n '250k_rndm_zinc_drugs_clean_sorted.smi') # ZINC\n self.dataset = gdb_dataset(path)\n\n ## load scaffold data if necessary\n self.has_scaffold = has_scaffold\n if has_scaffold:\n self.scaffold = load_scaffold()\n self.max_scaffold = 6\n\n\n self.level = 0 # for curriculum learning, level starts with 0, and increase afterwards\n\n def level_up(self):\n self.level += 1\n\n def seed(self,seed):\n np.random.seed(seed=seed)\n random.seed(seed)\n\n def normalize_adj(self,adj):\n degrees = np.sum(adj,axis=2)\n # print('degrees',degrees)\n D = np.zeros((adj.shape[0],adj.shape[1],adj.shape[2]))\n for i in range(D.shape[0]):\n D[i,:,:] = np.diag(np.power(degrees[i,:],-0.5))\n adj_normal = D@adj@D\n adj_normal[np.isnan(adj_normal)]=0\n return adj_normal\n\n #TODO(Bowen): The top try, except clause allows error messages from step\n # to be printed when running run_molecules.py. For debugging only\n def step(self, action):\n \"\"\"\n Perform a given action\n :param action:\n :param action_type:\n :return: reward of 1 if resulting molecule graph does not exceed valency,\n -1 if otherwise\n \"\"\"\n ### init\n info = {} # info we care about\n self.mol_old = copy.deepcopy(self.mol) # keep old mol\n total_atoms = self.mol.GetNumAtoms()\n\n ### take action\n if action[0,3]==0 or self.counter < self.min_action: # not stop\n stop = False\n if action[0, 1] >= total_atoms:\n self._add_atom(action[0, 1] - total_atoms) # add new node\n action[0, 1] = total_atoms # new node id\n self._add_bond(action) # add new edge\n else:\n self._add_bond(action) # add new edge\n else: # stop\n stop = True\n\n ### calculate intermediate rewards\n if self.check_valency():\n if self.mol.GetNumAtoms()+self.mol.GetNumBonds()-self.mol_old.GetNumAtoms()-self.mol_old.GetNumBonds()>0:\n reward_step = self.reward_step_total/self.max_atom # successfully add node/edge\n self.smile_list.append(self.get_final_smiles())\n else:\n reward_step = -self.reward_step_total/self.max_atom # edge exist\n else:\n reward_step = -self.reward_step_total/self.max_atom # invalid action\n self.mol = self.mol_old\n\n ### calculate terminal rewards\n # todo: add terminal action\n\n if self.is_conditional:\n terminate_condition = (self.mol.GetNumAtoms() >= self.max_atom-self.possible_atom_types.shape[0]-self.min_action or self.counter >= self.max_action or stop) and self.counter >= self.min_action\n else:\n terminate_condition = (self.mol.GetNumAtoms() >= self.max_atom-self.possible_atom_types.shape[0] or self.counter >= self.max_action or stop) and self.counter >= self.min_action\n if terminate_condition or self.force_final:\n # default reward\n reward_valid = 2\n reward_qed = 0\n reward_sa = 0\n reward_logp = 0\n reward_final = 0\n flag_steric_strain_filter = True\n flag_zinc_molecule_filter = True\n\n if not self.check_chemical_validity():\n reward_valid -= 5\n else:\n # final mol object where any radical electrons are changed to bonds to hydrogen\n final_mol = self.get_final_mol()\n s = Chem.MolToSmiles(final_mol, isomericSmiles=True)\n final_mol = Chem.MolFromSmiles(s)\n\n # mol filters with negative rewards\n if not steric_strain_filter(final_mol): # passes 3D conversion, no excessive strain\n reward_valid -= 1\n flag_steric_strain_filter = False\n if not zinc_molecule_filter(final_mol): # does not contain any problematic functional groups\n reward_valid -= 1\n flag_zinc_molecule_filter = False\n\n\n # property rewards\n try:\n # 1. QED reward. Can have values [0, 1]. Higher the better\n reward_qed += qed(final_mol)*self.qed_ratio\n # 2. Synthetic accessibility reward. Values naively normalized to [0, 1]. Higher the better\n sa = -1 * calculateScore(final_mol)\n reward_sa += (sa + 10) / (10 - 1) * self.sa_ratio\n # 3. Logp reward. Higher the better\n # reward_logp += MolLogP(self.mol)/10 * self.logp_ratio\n reward_logp += reward_penalized_log_p(final_mol) * self.logp_ratio\n if self.reward_type == 'logppen':\n reward_final += reward_penalized_log_p(final_mol)/3\n elif self.reward_type == 'logp_target':\n # reward_final += reward_target(final_mol,target=self.reward_target,ratio=0.5,val_max=2,val_min=-2,func=MolLogP)\n # reward_final += reward_target_logp(final_mol,target=self.reward_target)\n reward_final += reward_target_new(final_mol,MolLogP ,x_start=self.reward_target, x_mid=self.reward_target+0.25)\n elif self.reward_type == 'qed':\n reward_final += reward_qed*2\n elif self.reward_type == 'qedsa':\n reward_final += (reward_qed*1.5 + reward_sa*0.5)\n elif self.reward_type == 'qed_target':\n # reward_final += reward_target(final_mol,target=self.reward_target,ratio=0.1,val_max=2,val_min=-2,func=qed)\n reward_final += reward_target_qed(final_mol,target=self.reward_target)\n elif self.reward_type == 'mw_target':\n # reward_final += reward_target(final_mol,target=self.reward_target,ratio=40,val_max=2,val_min=-2,func=rdMolDescriptors.CalcExactMolWt)\n # reward_final += reward_target_mw(final_mol,target=self.reward_target)\n reward_final += reward_target_new(final_mol, rdMolDescriptors.CalcExactMolWt,x_start=self.reward_target, x_mid=self.reward_target+25)\n\n\n elif self.reward_type == 'gan':\n reward_final = 0\n else:\n print('reward error!')\n reward_final = 0\n\n\n\n except: # if any property reward error, reset all\n print('reward error')\n\n new = True # end of episode\n if self.force_final:\n reward = reward_final\n else:\n reward = reward_step + reward_valid + reward_final\n info['smile'] = self.get_final_smiles()\n if self.is_conditional:\n info['reward_valid'] = self.conditional[-1] ### temp change\n else:\n info['reward_valid'] = reward_valid\n info['reward_qed'] = reward_qed\n info['reward_sa'] = reward_sa\n info['final_stat'] = reward_final\n info['reward'] = reward\n info['flag_steric_strain_filter'] = flag_steric_strain_filter\n info['flag_zinc_molecule_filter'] = flag_zinc_molecule_filter\n info['stop'] = stop\n\n ### use stepwise reward\n else:\n new = False\n # print('counter', self.counter, 'new', new, 'reward_step', reward_step)\n reward = reward_step\n\n # get observation\n ob = self.get_observation()\n\n self.counter += 1\n if new:\n self.counter = 0\n\n return ob,reward,new,info\n\n\n def reset(self,smile=None):\n '''\n to avoid error, assume an atom already exists\n :return: ob\n '''\n if self.is_conditional:\n self.conditional = random.sample(self.conditional_list, 1)[0]\n self.mol = Chem.RWMol(Chem.MolFromSmiles(self.conditional[0]))\n Chem.SanitizeMol(self.mol, sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)\n elif smile is not None:\n self.mol = Chem.RWMol(Chem.MolFromSmiles(smile))\n Chem.SanitizeMol(self.mol, sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)\n else:\n self.mol = Chem.RWMol()\n # self._add_atom(np.random.randint(len(self.possible_atom_types))) # random add one atom\n self._add_atom(0) # always add carbon first\n self.smile_list= [self.get_final_smiles()]\n self.counter = 0\n ob = self.get_observation()\n return ob\n\n def render(self, mode='human', close=False):\n return\n\n def _add_atom(self, atom_type_id):\n \"\"\"\n Adds an atom\n :param atom_type_id: atom_type id\n :return:\n \"\"\"\n # assert action.shape == (len(self.possible_atom_types),)\n # atom_type_idx = np.argmax(action)\n atom_symbol = self.possible_atom_types[atom_type_id]\n self.mol.AddAtom(Chem.Atom(atom_symbol))\n\n def _add_bond(self, action):\n '''\n\n :param action: [first_node, second_node, bong_type_id]\n :return:\n '''\n # GetBondBetweenAtoms fails for np.int64\n bond_type = self.possible_bond_types[action[0,2]]\n\n # if bond exists between current atom and other atom, modify the bond\n # type to new bond type. Otherwise create bond between current atom and\n # other atom with the new bond type\n bond = self.mol.GetBondBetweenAtoms(int(action[0,0]), int(action[0,1]))\n if bond:\n # print('bond exist!')\n return False\n else:\n self.mol.AddBond(int(action[0,0]), int(action[0,1]), order=bond_type)\n # bond = self.mol.GetBondBetweenAtoms(int(action[0, 0]), int(action[0, 1]))\n # bond.SetIntProp('ordering',self.mol.GetNumBonds())\n return True\n\n def _modify_bond(self, action):\n \"\"\"\n Adds or modifies a bond (currently no deletion is allowed)\n :param action: np array of dim N-1 x d_e, where N is the current total\n number of atoms, d_e is the number of bond types\n :return:\n \"\"\"\n assert action.shape == (self.current_atom_idx, len(self.possible_bond_types))\n other_atom_idx = int(np.argmax(action.sum(axis=1))) # b/c\n # GetBondBetweenAtoms fails for np.int64\n bond_type_idx = np.argmax(action.sum(axis=0))\n bond_type = self.possible_bond_types[bond_type_idx]\n\n # if bond exists between current atom and other atom, modify the bond\n # type to new bond type. Otherwise create bond between current atom and\n # other atom with the new bond type\n bond = self.mol.GetBondBetweenAtoms(self.current_atom_idx, other_atom_idx)\n if bond:\n bond.SetBondType(bond_type)\n else:\n self.mol.AddBond(self.current_atom_idx, other_atom_idx, order=bond_type)\n self.total_bonds += 1\n\n def get_num_atoms(self):\n return self.total_atoms\n\n def get_num_bonds(self):\n return self.total_bonds\n\n def check_chemical_validity(self):\n \"\"\"\n Checks the chemical validity of the mol object. Existing mol object is\n not modified. Radicals pass this test.\n :return: True if chemically valid, False otherwise\n \"\"\"\n s = Chem.MolToSmiles(self.mol, isomericSmiles=True)\n m = Chem.MolFromSmiles(s) # implicitly performs sanitization\n if m:\n return True\n else:\n return False\n\n def check_valency(self):\n \"\"\"\n Checks that no atoms in the mol have exceeded their possible\n valency\n :return: True if no valency issues, False otherwise\n \"\"\"\n try:\n Chem.SanitizeMol(self.mol,\n sanitizeOps=Chem.SanitizeFlags.SANITIZE_PROPERTIES)\n return True\n except ValueError:\n return False\n\n # TODO(Bowen): check if need to sanitize again\n def get_final_smiles(self):\n \"\"\"\n Returns a SMILES of the final molecule. Converts any radical\n electrons into hydrogens. Works only if molecule is valid\n :return: SMILES\n \"\"\"\n m = convert_radical_electrons_to_hydrogens(self.mol)\n return Chem.MolToSmiles(m, isomericSmiles=True)\n\n # TODO(Bowen): check if need to sanitize again\n def get_final_mol(self):\n \"\"\"\n Returns a rdkit mol object of the final molecule. Converts any radical\n electrons into hydrogens. Works only if molecule is valid\n :return: SMILES\n \"\"\"\n m = convert_radical_electrons_to_hydrogens(self.mol)\n return m\n\n\n def get_observation(self):\n \"\"\"\n ob['adj']:d_e*n*n --- 'E'\n ob['node']:1*n*d_n --- 'F'\n n = atom_num + atom_type_num\n \"\"\"\n mol = copy.deepcopy(self.mol)\n try:\n Chem.SanitizeMol(mol)\n except:\n pass\n n = mol.GetNumAtoms()\n n_shift = len(self.possible_atom_types) # assume isolated nodes new nodes exist\n\n\n F = np.zeros((1, self.max_atom, self.d_n))\n for a in mol.GetAtoms():\n atom_idx = a.GetIdx()\n atom_symbol = a.GetSymbol()\n if self.has_feature:\n formal_charge = a.GetFormalCharge()\n implicit_valence = a.GetImplicitValence()\n ring_atom = a.IsInRing()\n degree = a.GetDegree()\n hybridization = a.GetHybridization()\n # print(atom_symbol,formal_charge,implicit_valence,ring_atom,degree,hybridization)\n if self.has_feature:\n # float_array = np.concatenate([(atom_symbol ==\n # self.possible_atom_types),\n # (formal_charge ==\n # self.possible_formal_charge),\n # (implicit_valence ==\n # self.possible_implicit_valence),\n # (ring_atom ==\n # self.possible_ring_atom),\n # (degree == self.possible_degree),\n # (hybridization ==\n # self.possible_hybridization)]).astype(float)\n float_array = np.concatenate([(atom_symbol ==\n self.possible_atom_types),\n ([not a.IsInRing()]),\n ([a.IsInRingSize(3)]),\n ([a.IsInRingSize(4)]),\n ([a.IsInRingSize(5)]),\n ([a.IsInRingSize(6)]),\n ([a.IsInRing() and (not a.IsInRingSize(3))\n and (not a.IsInRingSize(4))\n and (not a.IsInRingSize(5))\n and (not a.IsInRingSize(6))]\n )]).astype(float)\n else:\n float_array = (atom_symbol == self.possible_atom_types).astype(float)\n # assert float_array.sum() == 6 # because there are 6 types of one\n # print(float_array,float_array.sum())\n # hot atom features\n F[0, atom_idx, :] = float_array\n # add the atom features for the auxiliary atoms. We only include the\n # atom symbol features\n auxiliary_atom_features = np.zeros((n_shift, self.d_n)) # for padding\n temp = np.eye(n_shift)\n auxiliary_atom_features[:temp.shape[0], :temp.shape[1]] = temp\n F[0,n:n+n_shift,:] = auxiliary_atom_features\n # print('n',n,'n+n_shift',n+n_shift,auxiliary_atom_features.shape)\n\n d_e = len(self.possible_bond_types)\n E = np.zeros((d_e, self.max_atom, self.max_atom))\n for i in range(d_e):\n E[i,:n+n_shift,:n+n_shift] = np.eye(n+n_shift)\n for b in self.mol.GetBonds(): # self.mol, very important!! no aromatic\n begin_idx = b.GetBeginAtomIdx()\n end_idx = b.GetEndAtomIdx()\n bond_type = b.GetBondType()\n float_array = (bond_type == self.possible_bond_types).astype(float)\n try:\n assert float_array.sum() != 0\n except:\n print('error',bond_type)\n E[:, begin_idx, end_idx] = float_array\n E[:, end_idx, begin_idx] = float_array\n ob = {}\n if self.is_normalize:\n E = self.normalize_adj(E)\n ob['adj'] = E\n ob['node'] = F\n return ob\n\n\n def get_observation_mol(self,mol):\n \"\"\"\n ob['adj']:b*n*n --- 'E'\n ob['node']:1*n*m --- 'F'\n n = atom_num + atom_type_num\n \"\"\"\n\n n = self.max_scaffold\n d_n = len(self.possible_atom_types)\n F = np.zeros((1, n, d_n))\n for a in mol.GetAtoms():\n atom_idx = a.GetIdx()\n atom_symbol = a.GetSymbol()\n float_array = (atom_symbol == self.possible_atom_types).astype(float)\n assert float_array.sum() != 0\n F[0, atom_idx, :] = float_array\n\n d_e = len(self.possible_bond_types)\n E = np.zeros((d_e, n, n))\n for i in range(d_e):\n E[i,:,:] = np.eye(n)\n for b in mol.GetBonds():\n begin_idx = b.GetBeginAtomIdx()\n end_idx = b.GetEndAtomIdx()\n bond_type = b.GetBondType()\n float_array = (bond_type == self.possible_bond_types).astype(float)\n assert float_array.sum() != 0\n E[:, begin_idx, end_idx] = float_array\n E[:, end_idx, begin_idx] = float_array\n ob = {}\n if self.is_normalize:\n E = self.normalize_adj(E)\n ob['adj'] = E\n ob['node'] = F\n return ob\n\n\n def get_observation_scaffold(self):\n ob = {}\n atom_type_num = len(self.possible_atom_types)\n bond_type_num = len(self.possible_bond_types)\n batch_size = len(self.scaffold)\n ob['node'] = np.zeros((batch_size, 1, self.max_scaffold, atom_type_num))\n ob['adj'] = np.zeros((batch_size, bond_type_num, self.max_scaffold, self.max_scaffold))\n for idx,mol in enumerate(self.scaffold):\n ob_temp = self.get_observation_mol(mol)\n ob['node'][idx]=ob_temp['node']\n ob['adj'][idx]=ob_temp['adj']\n return ob\n\n\n def get_expert(self, batch_size,is_final=False,curriculum=0,level_total=6,level=0):\n ob = {}\n atom_type_num = len(self.possible_atom_types)\n bond_type_num = len(self.possible_bond_types)\n ob['node'] = np.zeros((batch_size, 1, self.max_atom, self.d_n))\n ob['adj'] = np.zeros((batch_size, bond_type_num, self.max_atom, self.max_atom))\n\n ac = np.zeros((batch_size, 4))\n ### select molecule\n dataset_len = len(self.dataset)\n for i in range(batch_size):\n is_final_temp = is_final\n # print('--------------------------------------------------')\n ### get a subgraph\n if curriculum==1:\n ratio_start = level/float(level_total)\n ratio_end = (level+1)/float(level_total)\n idx = np.random.randint(int(ratio_start*dataset_len), int(ratio_end*dataset_len))\n else:\n idx = np.random.randint(0, dataset_len)\n mol = self.dataset[idx]\n # print('ob_before',Chem.MolToSmiles(mol, isomericSmiles=True))\n # from rdkit.Chem import Draw\n # Draw.MolToFile(mol, 'ob_before'+str(i)+'.png')\n # mol = self.dataset[i] # sanitity check\n Chem.SanitizeMol(mol,sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)\n graph = mol_to_nx(mol)\n edges = graph.edges()\n # # always involve is_final probability\n # if is_final==False and np.random.rand()<1.0/batch_size:\n # is_final = True\n\n # select the edge num for the subgraph\n if is_final_temp:\n edges_sub_len = len(edges)\n else:\n # edges_sub_len = random.randint(1,len(edges))\n edges_sub_len = random.randint(1,len(edges)+1)\n if edges_sub_len==len(edges)+1:\n edges_sub_len = len(edges)\n is_final_temp=True\n edges_sub = random.sample(edges,k=edges_sub_len)\n graph_sub = nx.Graph(edges_sub)\n graph_sub = max(nx.connected_component_subgraphs(graph_sub), key=len)\n if is_final_temp: # when the subgraph the whole molecule, the expert show stop sign\n node1 = random.randint(0,mol.GetNumAtoms()-1)\n while True:\n node2 = random.randint(0,mol.GetNumAtoms()+atom_type_num-1)\n if node2!=node1:\n break\n edge_type = random.randint(0,bond_type_num-1)\n ac[i,:] = [node1,node2,edge_type,1] # stop\n else:\n ### random pick an edge from the subgraph, then remove it\n edge_sample = random.sample(graph_sub.edges(),k=1)\n graph_sub.remove_edges_from(edge_sample)\n graph_sub = max(nx.connected_component_subgraphs(graph_sub), key=len)\n edge_sample = edge_sample[0] # get value\n ### get action\n if edge_sample[0] in graph_sub.nodes() and edge_sample[1] in graph_sub.nodes():\n node1 = graph_sub.nodes().index(edge_sample[0])\n node2 = graph_sub.nodes().index(edge_sample[1])\n elif edge_sample[0] in graph_sub.nodes():\n node1 = graph_sub.nodes().index(edge_sample[0])\n node2 = np.argmax(\n graph.node[edge_sample[1]]['symbol'] == self.possible_atom_types) + graph_sub.number_of_nodes()\n elif edge_sample[1] in graph_sub.nodes():\n node1 = graph_sub.nodes().index(edge_sample[1])\n node2 = np.argmax(\n graph.node[edge_sample[0]]['symbol'] == self.possible_atom_types) + graph_sub.number_of_nodes()\n else:\n print('Expert policy error!')\n edge_type = np.argmax(graph[edge_sample[0]][edge_sample[1]]['bond_type'] == self.possible_bond_types)\n ac[i,:] = [node1,node2,edge_type,0] # don't stop\n # print('action',[node1,node2,edge_type,0])\n # print('action',ac)\n # plt.axis(\"off\")\n # nx.draw_networkx(graph_sub)\n # plt.show()\n ### get observation\n # rw_mol = Chem.RWMol()\n n = graph_sub.number_of_nodes()\n for node_id, node in enumerate(graph_sub.nodes()):\n if self.has_feature:\n # float_array = np.concatenate([(graph.node[node]['symbol'] ==\n # self.possible_atom_types),\n # (graph.node[node]['formal_charge'] ==\n # self.possible_formal_charge),\n # (graph.node[node]['implicit_valence'] ==\n # self.possible_implicit_valence),\n # (graph.node[node]['ring_atom'] ==\n # self.possible_ring_atom),\n # (graph.node[node]['degree'] == self.possible_degree),\n # (graph.node[node]['hybridization'] ==\n # self.possible_hybridization)]).astype(float)\n cycle_info = nx.cycle_basis(graph_sub, node)\n cycle_len_info = [len(cycle) for cycle in cycle_info]\n # print(cycle_len_info)\n float_array = np.concatenate([(graph.node[node]['symbol'] ==\n self.possible_atom_types),\n ([len(cycle_info)==0]),\n ([3 in cycle_len_info]),\n ([4 in cycle_len_info]),\n ([5 in cycle_len_info]),\n ([6 in cycle_len_info]),\n ([len(cycle_info)!=0 and (not 3 in cycle_len_info)\n and (not 4 in cycle_len_info)\n and (not 5 in cycle_len_info)\n and (not 6 in cycle_len_info)]\n )]).astype(float)\n else:\n float_array = (graph.node[node]['symbol'] == self.possible_atom_types).astype(float)\n\n # assert float_array.sum() == 6\n ob['node'][i, 0, node_id, :] = float_array\n # print('node',node_id,graph.node[node]['symbol'])\n # atom = Chem.Atom(graph.node[node]['symbol'])\n # rw_mol.AddAtom(atom)\n auxiliary_atom_features = np.zeros((atom_type_num, self.d_n)) # for padding\n temp = np.eye(atom_type_num)\n auxiliary_atom_features[:temp.shape[0], :temp.shape[1]] = temp\n ob['node'][i ,0, n:n + atom_type_num, :] = auxiliary_atom_features\n\n for j in range(bond_type_num):\n ob['adj'][i, j, :n + atom_type_num, :n + atom_type_num] = np.eye(n + atom_type_num)\n for edge in graph_sub.edges():\n begin_idx = graph_sub.nodes().index(edge[0])\n end_idx = graph_sub.nodes().index(edge[1])\n bond_type = graph[edge[0]][edge[1]]['bond_type']\n float_array = (bond_type == self.possible_bond_types).astype(float)\n assert float_array.sum() != 0\n ob['adj'][i, :, begin_idx, end_idx] = float_array\n ob['adj'][i, :, end_idx, begin_idx] = float_array\n # print('edge',begin_idx,end_idx,bond_type)\n # rw_mol.AddBond(begin_idx, end_idx, order=bond_type)\n if self.is_normalize:\n ob['adj'][i] = self.normalize_adj(ob['adj'][i])\n # print('ob',Chem.MolToSmiles(rw_mol, isomericSmiles=True))\n # from rdkit.Chem import Draw\n # Draw.MolToFile(rw_mol, 'ob' + str(i) + '.png')\n\n return ob,ac\n\n\n\n\n\n\n\n\n\n## below are for general graph generation env\n\ndef caveman_special(c=2,k=20,p_path=0.1,p_edge=0.3):\n p = p_path\n path_count = max(int(np.ceil(p * k)),1)\n G = nx.caveman_graph(c, k)\n # remove 50% edges\n p = 1-p_edge\n for (u, v) in list(G.edges()):\n if np.random.rand() < p and ((u < k and v < k) or (u >= k and v >= k)):\n G.remove_edge(u, v)\n # add path_count links\n for i in range(path_count):\n u = np.random.randint(0, k)\n v = np.random.randint(k, k * 2)\n G.add_edge(u, v)\n G = max(nx.connected_component_subgraphs(G), key=len)\n return G\n\n\nclass GraphEnv(gym.Env):\n \"\"\"\n Environment for a general graph\n \"\"\"\n def __init__(self):\n pass\n def init(self, reward_step_total=1, is_normalize=0,dataset='ba'):\n '''\n own init function, since gym does not support passing argument\n '''\n self.is_normalize = bool(is_normalize)\n self.graph = nx.Graph()\n self.reward_step_total = reward_step_total\n\n\n self.counter = 0\n\n ## load expert data\n if dataset == 'caveman':\n self.dataset = []\n for i in range(2, 3):\n for j in range(6, 11):\n for k in range(20):\n self.dataset.append(caveman_special(i, j, p_edge=0.8)) # default 0.8\n self.max_node = 25\n self.max_action = 150\n elif dataset == 'grid':\n self.dataset = []\n for i in range(2, 5):\n for j in range(2, 6):\n self.dataset.append(nx.grid_2d_graph(i, j))\n self.max_node = 25\n self.max_action = 100\n else:\n print('default dataset: barabasi')\n self.dataset = []\n for i in range(4, 21):\n for j in range(3, 4):\n for k in range(10):\n self.dataset.append(nx.barabasi_albert_graph(i, j))\n self.max_node = 25\n self.max_action = 150\n\n self.action_space = gym.spaces.MultiDiscrete([self.max_node, self.max_node, 3, 2])\n self.observation_space = {}\n self.observation_space['adj'] = gym.Space(shape=[1, self.max_node, self.max_node])\n self.observation_space['node'] = gym.Space(shape=[1, self.max_node, 1])\n\n self.level = 0 # for curriculum learning, level starts with 0, and increase afterwards\n\n # compatible with molecule env\n self.max_atom = self.max_node\n self.atom_type_num = 1\n\n def level_up(self):\n self.level += 1\n\n def normalize_adj(self, adj):\n degrees = np.sum(adj, axis=2)\n # print('degrees',degrees)\n D = np.zeros((adj.shape[0], adj.shape[1], adj.shape[2]))\n for i in range(D.shape[0]):\n D[i, :, :] = np.diag(np.power(degrees[i, :], -0.5))\n adj_normal = D @ adj @ D\n adj_normal[np.isnan(adj_normal)] = 0\n return adj_normal\n\n # TODO(Bowen): check\n def step(self, action):\n \"\"\"\n\n :param action:\n :return:\n \"\"\"\n ### init\n info = {} # info we care about\n self.graph_old = copy.deepcopy(self.graph)\n total_nodes = self.graph.number_of_nodes()\n\n ### take action\n if action[0, 3] == 0: # not stop\n stop = False\n if action[0, 1] >= total_nodes:\n self.graph.add_node(int(action[0, 1]))\n self._add_edge(action)\n else:\n self._add_edge(action) # add new edge\n else: # stop\n stop = True\n\n ### calculate intermediate rewards\n # todo: add neccessary rules for the task\n if self.graph.number_of_nodes() + self.graph.number_of_edges()-self.graph_old.number_of_nodes() - \\\n self.graph_old.number_of_edges() > 0:\n reward_step = self.reward_step_total / self.max_node\n # successfully added node/edge\n else:\n reward_step = -self.reward_step_total / self.max_node # edge\n self.graph = self.graph_old\n # already exists\n\n ### calculate and use terminal reward\n if self.graph.number_of_nodes() >= self.max_node - 1 or self.counter >= self.max_action or stop:\n\n # property rewards\n ## todo: add property reward\n reward_terminal = 1 # arbitrary choice\n\n new = True # end of episode\n reward = reward_step + reward_terminal\n\n # print terminal graph information\n info['final_stat'] = reward_terminal\n info['reward'] = reward\n info['stop'] = stop\n ### use stepwise reward\n else:\n new = False\n reward = reward_step\n\n # get observation\n ob = self.get_observation()\n\n self.counter += 1\n if new:\n self.counter = 0\n\n return ob, reward, new, info\n\n def reset(self):\n \"\"\"\n to avoid error, assume a node already exists\n :return: ob\n \"\"\"\n self.graph.clear()\n self.graph.add_node(0)\n self.counter = 0\n ob = self.get_observation()\n return ob\n\n # TODO(Bowen): is this necessary\n def render(self, mode='human', close=False):\n return\n\n # TODO(Bowen): check\n def _add_node(self):\n \"\"\"\n\n :param node_type_id:\n :return:\n \"\"\"\n new_node_idx = self.graph.number_of_nodes()\n self.graph.add_node(new_node_idx)\n\n # TODO(Bowen): check\n def _add_edge(self, action):\n \"\"\"\n\n :param action: [first_node, second_node, edge_type_id]\n :return:\n \"\"\"\n\n if self.graph.has_edge(int(action[0,0]), int(action[0,1])) or int(action[0,0])==int(action[0,1]):\n return False\n else:\n self.graph.add_edge(int(action[0,0]), int(action[0,1]))\n return True\n\n def get_final_graph(self):\n return self.graph\n\n # TODO(Bowen): check [for featured graph]\n # def get_observation(self):\n # \"\"\"\n #\n # :return: ob, where ob['adj'] is E with dim b x n x n and ob['node']\n # is F with dim 1 x n x m. NB: n = node_num + node_type_num\n # \"\"\"\n # n = self.graph.number_of_nodes()\n # n_shift = len(self.possible_node_types) # assume isolated nodes new nodes exist\n #\n # d_n = len(self.possible_node_types)\n # F = np.zeros((1, self.max_node, d_n))\n #\n # for node in self.graph.nodes_iter(data=True):\n # node_idx = node[0]\n # node_type = node[1]['type']\n # float_array = (node_type == self.possible_node_types).astype(float)\n # assert float_array.sum() != 0\n # F[0, node_idx, :] = float_array\n # temp = F[0, n:n + n_shift, :]\n # F[0, n:n + n_shift, :] = np.eye(n_shift)\n #\n # d_e = len(self.possible_edge_types)\n # E = np.zeros((d_e, self.max_node, self.max_node))\n # for i in range(d_e):\n # E[i, :n + n_shift, :n + n_shift] = np.eye(n + n_shift)\n # for e in self.graph.edges_iter(data=True):\n # begin_idx = e[0]\n # end_idx = e[1]\n # edge_type = e[2]['type']\n # float_array = (edge_type == self.possible_edge_types).astype(float)\n # assert float_array.sum() != 0\n # E[:, begin_idx, end_idx] = float_array\n # E[:, end_idx, begin_idx] = float_array\n # ob = {}\n # if self.is_normalize:\n # E = self.normalize_adj(E)\n # ob['adj'] = E\n # ob['node'] = F\n # return ob\n\n\n # for graphs without features\n def get_observation(self,feature='deg'):\n \"\"\"\n\n :return: ob, where ob['adj'] is E with dim b x n x n and ob['node']\n is F with dim 1 x n x m. NB: n = node_num + node_type_num\n \"\"\"\n n = self.graph.number_of_nodes()\n F = np.zeros((1, self.max_node, 1))\n F[0,:n+1,0] = 1\n\n E = np.zeros((1, self.max_node, self.max_node))\n E[0,:n,:n] = np.asarray(nx.to_numpy_matrix(self.graph))[np.newaxis,:,:]\n E[0,:n+1,:n+1] += np.eye(n+1)\n\n ob = {}\n if self.is_normalize:\n E = self.normalize_adj(E)\n ob['adj'] = E\n ob['node'] = F\n return ob\n\n def get_expert(self, batch_size, is_final=False, curriculum=0,\n level_total=6, level=0):\n ob = {}\n ob['node'] = np.zeros((batch_size, 1, self.max_node, 1))\n ob['adj'] = np.zeros((batch_size, 1, self.max_node, self.max_node))\n\n ac = np.zeros((batch_size, 4))\n ### select graph\n dataset_len = len(self.dataset)\n for i in range(batch_size):\n ### get a subgraph\n if curriculum == 1:\n ratio_start = level / float(level_total)\n ratio_end = (level + 1) / float(level_total)\n idx = np.random.randint(int(ratio_start * dataset_len),\n int(ratio_end * dataset_len))\n else:\n idx = np.random.randint(0, dataset_len)\n graph = self.dataset[idx]\n edges = graph.edges()\n # select the edge num for the subgraph\n if is_final:\n edges_sub_len = len(edges)\n else:\n edges_sub_len = random.randint(1, len(edges))\n edges_sub = random.sample(edges, k=edges_sub_len)\n graph_sub = nx.Graph(edges_sub)\n graph_sub = max(nx.connected_component_subgraphs(graph_sub),\n key=len)\n if is_final: # when the subgraph the whole graph, the expert show\n # stop sign\n node1 = random.randint(0, graph.number_of_nodes() - 1)\n while True:\n node2 = random.randint(0,graph.number_of_nodes())\n if node2 != node1:\n break\n edge_type = 0\n ac[i, :] = [node1, node2, edge_type, 1] # stop\n else:\n ### random pick an edge from the subgraph, then remove it\n edge_sample = random.sample(graph_sub.edges(), k=1)\n graph_sub.remove_edges_from(edge_sample)\n graph_sub = max(nx.connected_component_subgraphs(graph_sub),\n key=len)\n edge_sample = edge_sample[0] # get value\n ### get action\n if edge_sample[0] in graph_sub.nodes() and edge_sample[\n 1] in graph_sub.nodes():\n node1 = graph_sub.nodes().index(edge_sample[0])\n node2 = graph_sub.nodes().index(edge_sample[1])\n elif edge_sample[0] in graph_sub.nodes():\n node1 = graph_sub.nodes().index(edge_sample[0])\n node2 = graph_sub.number_of_nodes()\n elif edge_sample[1] in graph_sub.nodes():\n node1 = graph_sub.nodes().index(edge_sample[1])\n node2 = graph_sub.number_of_nodes()\n else:\n print('Expert policy error!')\n edge_type = 0\n ac[i, :] = [node1, node2, edge_type, 0] # don't stop\n # print('action',[node1,node2,edge_type,0])\n # print('action',ac)\n # plt.axis(\"off\")\n # nx.draw_networkx(graph_sub)\n # plt.show()\n ### get observation\n n = graph_sub.number_of_nodes()\n F = np.zeros((1, self.max_node, 1))\n F[0, :n + 1, 0] = 1\n if self.is_normalize:\n ob['adj'][i] = self.normalize_adj(F)\n else:\n ob['node'][i]=F\n # print(F)\n E = np.zeros((1, self.max_node, self.max_node))\n E[0, :n, :n] = np.asarray(nx.to_numpy_matrix(graph_sub))[np.newaxis, :, :]\n E[0, :n + 1, :n + 1] += np.eye(n + 1)\n ob['adj'][i]=E\n # print(E)\n\n return ob, ac\n\n\n### YES/NO filters ###\ndef zinc_molecule_filter(mol):\n \"\"\"\n Flags molecules based on problematic functional groups as\n provided set of ZINC rules from\n http://blaster.docking.org/filtering/rules_default.txt.\n :param mol: rdkit mol object\n :return: Returns True if molecule is okay (ie does not match any of\n therules), False if otherwise\n \"\"\"\n params = FilterCatalogParams()\n params.AddCatalog(FilterCatalogParams.FilterCatalogs.ZINC)\n catalog = FilterCatalog(params)\n return not catalog.HasMatch(mol)\n\n\n# TODO(Bowen): check\ndef steric_strain_filter(mol, cutoff=0.82,\n max_attempts_embed=20,\n max_num_iters=200):\n \"\"\"\n Flags molecules based on a steric energy cutoff after max_num_iters\n iterations of MMFF94 forcefield minimization. Cutoff is based on average\n angle bend strain energy of molecule\n :param mol: rdkit mol object\n :param cutoff: kcal/mol per angle . If minimized energy is above this\n threshold, then molecule fails the steric strain filter\n :param max_attempts_embed: number of attempts to generate initial 3d\n coordinates\n :param max_num_iters: number of iterations of forcefield minimization\n :return: True if molecule could be successfully minimized, and resulting\n energy is below cutoff, otherwise False\n \"\"\"\n # check for the trivial cases of a single atom or only 2 atoms, in which\n # case there is no angle bend strain energy (as there are no angles!)\n if mol.GetNumAtoms() <= 2:\n return True\n\n # make copy of input mol and add hydrogens\n m = copy.deepcopy(mol)\n m_h = Chem.AddHs(m)\n\n # generate an initial 3d conformer\n try:\n flag = AllChem.EmbedMolecule(m_h, maxAttempts=max_attempts_embed)\n if flag == -1:\n # print(\"Unable to generate 3d conformer\")\n return False\n except: # to catch error caused by molecules such as C=[SH]1=C2OC21ON(N)OC(=O)NO\n # print(\"Unable to generate 3d conformer\")\n return False\n\n # set up the forcefield\n AllChem.MMFFSanitizeMolecule(m_h)\n if AllChem.MMFFHasAllMoleculeParams(m_h):\n mmff_props = AllChem.MMFFGetMoleculeProperties(m_h)\n try: # to deal with molecules such as CNN1NS23(=C4C5=C2C(=C53)N4Cl)S1\n ff = AllChem.MMFFGetMoleculeForceField(m_h, mmff_props)\n except:\n # print(\"Unable to get forcefield or sanitization error\")\n return False\n else:\n # print(\"Unrecognized atom type\")\n return False\n\n # minimize steric energy\n try:\n ff.Minimize(maxIts=max_num_iters)\n except:\n # print(\"Minimization error\")\n return False\n\n # ### debug ###\n # min_e = ff.CalcEnergy()\n # print(\"Minimized energy: {}\".format(min_e))\n # ### debug ###\n\n # get the angle bend term contribution to the total molecule strain energy\n mmff_props.SetMMFFBondTerm(False)\n mmff_props.SetMMFFAngleTerm(True)\n mmff_props.SetMMFFStretchBendTerm(False)\n mmff_props.SetMMFFOopTerm(False)\n mmff_props.SetMMFFTorsionTerm(False)\n mmff_props.SetMMFFVdWTerm(False)\n mmff_props.SetMMFFEleTerm(False)\n\n ff = AllChem.MMFFGetMoleculeForceField(m_h, mmff_props)\n\n min_angle_e = ff.CalcEnergy()\n # print(\"Minimized angle bend energy: {}\".format(min_angle_e))\n\n # find number of angles in molecule\n # TODO(Bowen): there must be a better way to get a list of all angles\n # from molecule... This is too hacky\n num_atoms = m_h.GetNumAtoms()\n atom_indices = range(num_atoms)\n angle_atom_triplets = itertools.permutations(atom_indices, 3) # get all\n # possible 3 atom indices groups. Currently, each angle is represented by\n # 2 duplicate groups. Should remove duplicates here to be more efficient\n double_num_angles = 0\n for triplet in list(angle_atom_triplets):\n if mmff_props.GetMMFFAngleBendParams(m_h, *triplet):\n double_num_angles += 1\n num_angles = double_num_angles / 2 # account for duplicate angles\n\n # print(\"Number of angles: {}\".format(num_angles))\n\n avr_angle_e = min_angle_e / num_angles\n\n # print(\"Average minimized angle bend energy: {}\".format(avr_angle_e))\n\n # ### debug ###\n # for i in range(7):\n # termList = [['BondStretch', False], ['AngleBend', False],\n # ['StretchBend', False], ['OopBend', False],\n # ['Torsion', False],\n # ['VdW', False], ['Electrostatic', False]]\n # termList[i][1] = True\n # mmff_props.SetMMFFBondTerm(termList[0][1])\n # mmff_props.SetMMFFAngleTerm(termList[1][1])\n # mmff_props.SetMMFFStretchBendTerm(termList[2][1])\n # mmff_props.SetMMFFOopTerm(termList[3][1])\n # mmff_props.SetMMFFTorsionTerm(termList[4][1])\n # mmff_props.SetMMFFVdWTerm(termList[5][1])\n # mmff_props.SetMMFFEleTerm(termList[6][1])\n # ff = AllChem.MMFFGetMoleculeForceField(m_h, mmff_props)\n # print('{0:>16s} energy: {1:12.4f} kcal/mol'.format(termList[i][0],\n # ff.CalcEnergy()))\n # ## end debug ###\n\n if avr_angle_e < cutoff:\n return True\n else:\n return False\n\n\n\n### TARGET VALUE REWARDS ###\n\ndef reward_target(mol, target, ratio, val_max, val_min, func):\n x = func(mol)\n reward = max(-1*np.abs((x-target)/ratio) + val_max,val_min)\n return reward\n\ndef reward_target_new(mol, func,r_max1=4,r_max2=2.25,r_mid=2,r_min=-2,x_start=500, x_mid=525):\n x = func(mol)\n return max((r_max1-r_mid)/(x_start-x_mid)*np.abs(x-x_mid)+r_max1, (r_max2-r_mid)/(x_start-x_mid)*np.abs(x-x_mid)+r_max2,r_min)\n\ndef reward_target_logp(mol, target,ratio=0.5,max=4):\n \"\"\"\n Reward for a target log p\n :param mol: rdkit mol object\n :param target: float\n :return: float (-inf, max]\n \"\"\"\n x = MolLogP(mol)\n reward = -1 * np.abs((x - target)/ratio) + max\n return reward\n\ndef reward_target_penalizelogp(mol, target,ratio=3,max=4):\n \"\"\"\n Reward for a target log p\n :param mol: rdkit mol object\n :param target: float\n :return: float (-inf, max]\n \"\"\"\n x = reward_penalized_log_p(mol)\n reward = -1 * np.abs((x - target)/ratio) + max\n return reward\n\ndef reward_target_qed(mol, target,ratio=0.1,max=4):\n \"\"\"\n Reward for a target log p\n :param mol: rdkit mol object\n :param target: float\n :return: float (-inf, max]\n \"\"\"\n x = qed(mol)\n reward = -1 * np.abs((x - target)/ratio) + max\n return reward\n\ndef reward_target_mw(mol, target,ratio=40,max=4):\n \"\"\"\n Reward for a target molecular weight\n :param mol: rdkit mol object\n :param target: float\n :return: float (-inf, max]\n \"\"\"\n x = rdMolDescriptors.CalcExactMolWt(mol)\n reward = -1 * np.abs((x - target)/ratio) + max\n return reward\n\n# TODO(Bowen): num rings is a discrete variable, so what is the best way to\n# calculate the reward?\ndef reward_target_num_rings(mol, target):\n \"\"\"\n Reward for a target number of rings\n :param mol: rdkit mol object\n :param target: int\n :return: float (-inf, 1]\n \"\"\"\n x = rdMolDescriptors.CalcNumRings(mol)\n reward = -1 * (x - target)**2 + 1\n return reward\n\n# TODO(Bowen): more efficient if we precalculate the target fingerprint\nfrom rdkit import DataStructs\ndef reward_target_molecule_similarity(mol, target, radius=2, nBits=2048,\n useChirality=True):\n \"\"\"\n Reward for a target molecule similarity, based on tanimoto similarity\n between the ECFP fingerprints of the x molecule and target molecule\n :param mol: rdkit mol object\n :param target: rdkit mol object\n :return: float, [0.0, 1.0]\n \"\"\"\n x = rdMolDescriptors.GetMorganFingerprintAsBitVect(mol, radius=radius,\n nBits=nBits,\n useChirality=useChirality)\n target = rdMolDescriptors.GetMorganFingerprintAsBitVect(target,\n radius=radius,\n nBits=nBits,\n useChirality=useChirality)\n return DataStructs.TanimotoSimilarity(x, target)\n\n\n### TERMINAL VALUE REWARDS ###\n\ndef reward_penalized_log_p(mol):\n \"\"\"\n Reward that consists of log p penalized by SA and # long cycles,\n as described in (Kusner et al. 2017). Scores are normalized based on the\n statistics of 250k_rndm_zinc_drugs_clean.smi dataset\n :param mol: rdkit mol object\n :return: float\n \"\"\"\n # normalization constants, statistics from 250k_rndm_zinc_drugs_clean.smi\n logP_mean = 2.4570953396190123\n logP_std = 1.434324401111988\n SA_mean = -3.0525811293166134\n SA_std = 0.8335207024513095\n cycle_mean = -0.0485696876403053\n cycle_std = 0.2860212110245455\n\n log_p = MolLogP(mol)\n SA = -calculateScore(mol)\n\n # cycle score\n cycle_list = nx.cycle_basis(nx.Graph(\n Chem.rdmolops.GetAdjacencyMatrix(mol)))\n if len(cycle_list) == 0:\n cycle_length = 0\n else:\n cycle_length = max([len(j) for j in cycle_list])\n if cycle_length <= 6:\n cycle_length = 0\n else:\n cycle_length = cycle_length - 6\n cycle_score = -cycle_length\n\n normalized_log_p = (log_p - logP_mean) / logP_std\n normalized_SA = (SA - SA_mean) / SA_std\n normalized_cycle = (cycle_score - cycle_mean) / cycle_std\n\n return normalized_log_p + normalized_SA + normalized_cycle\n\n\n# # TEST compare with junction tree paper examples from Figure 7\n# assert round(reward_penalized_log_p(Chem.MolFromSmiles('ClC1=CC=C2C(C=C(C('\n# 'C)=O)C(C(NC3=CC(NC('\n# 'NC4=CC(C5=C('\n# 'C)C=CC=C5)=CC=C4)=O)=CC=C3)=O)=C2)=C1')), 2) == 5.30\n# assert round(reward_penalized_log_p(Chem.MolFromSmiles('CC(NC1=CC(C2=CC=CC('\n# 'NC(NC3=CC=CC(C4=CC('\n# 'F)=CC=C4)=C3)=O)=C2)=CC=C1)=O')), 2) == 4.49\n# assert round(reward_penalized_log_p(Chem.MolFromSmiles('ClC(C('\n# 'Cl)=C1)=CC=C1NC2=CC=CC=C2C(NC(NC3=C(C(NC4=C(Cl)C=CC=C4)=S)C=CC=C3)=O)=O')), 2) == 4.93\n\ndef get_normalized_values():\n fname = '/home/bowen/pycharm_deployment_directory/rl_graph_generation/gym-molecule/gym_molecule/dataset/250k_rndm_zinc_drugs_clean.smi'\n with open(fname) as f:\n smiles = f.readlines()\n\n for i in range(len(smiles)):\n smiles[i] = smiles[i].strip()\n smiles_rdkit = []\n\n for i in range(len(smiles)):\n smiles_rdkit.append(Chem.MolToSmiles(Chem.MolFromSmiles(smiles[i])))\n print(i)\n\n logP_values = []\n for i in range(len(smiles)):\n logP_values.append(MolLogP(Chem.MolFromSmiles(smiles_rdkit[i])))\n print(i)\n\n SA_scores = []\n for i in range(len(smiles)):\n SA_scores.append(\n -calculateScore(Chem.MolFromSmiles(smiles_rdkit[i])))\n print(i)\n\n cycle_scores = []\n for i in range(len(smiles)):\n cycle_list = nx.cycle_basis(nx.Graph(\n Chem.rdmolops.GetAdjacencyMatrix(Chem.MolFromSmiles(smiles_rdkit[\n i]))))\n if len(cycle_list) == 0:\n cycle_length = 0\n else:\n cycle_length = max([len(j) for j in cycle_list])\n if cycle_length <= 6:\n cycle_length = 0\n else:\n cycle_length = cycle_length - 6\n cycle_scores.append(-cycle_length)\n print(i)\n\n SA_scores_normalized = (np.array(SA_scores) - np.mean(SA_scores)) / np.std(\n SA_scores)\n logP_values_normalized = (np.array(logP_values) - np.mean(\n logP_values)) / np.std(logP_values)\n cycle_scores_normalized = (np.array(cycle_scores) - np.mean(\n cycle_scores)) / np.std(cycle_scores)\n\n return np.mean(SA_scores), np.std(SA_scores), np.mean(\n logP_values), np.std(logP_values), np.mean(\n cycle_scores), np.std(cycle_scores)\n\n\n\n\n# smile = 'C'*38\nsmile = 'CCCCCCCCCC(CCC)(CCCCCCC)CCCCCCCCC(CCCCC)CC(C)C'\nprint(smile, reward_penalized_log_p(Chem.MolFromSmiles(smile)))\n\nif __name__ == '__main__':\n env = gym.make('molecule-v0') # in gym format\n # env = GraphEnv()\n # env.init(has_scaffold=True)\n\n ## debug\n m_env = MoleculeEnv()\n m_env.init(data_type='zinc',has_feature=True,is_conditional=True)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n######### GraphEnv potentially with feature\n# class GraphEnv(gym.Env):\n# \"\"\"\n# Environment for a general graph\n# \"\"\"\n# def __init__(self):\n# pass\n# # TODO(Bowen): check\n# def init(self, data_type='simple', reward_step_total=1, is_normalize=0):\n# '''\n# own init function, since gym does not support passing argument\n# '''\n# self.is_normalize = bool(is_normalize)\n# self.graph = nx.Graph()\n# if data_type == 'simple':\n# possible_nodes = ['n']\n# possible_edges = ['e']\n# self.max_node = 100 + len(possible_nodes)\n# else:\n# raise ValueError('Invalid data type')\n# self.node_type_num = len(possible_nodes)\n# self.possible_node_types = np.array(possible_nodes)\n# self.possible_edge_types = np.array(possible_edges)\n#\n# self.max_action = 128\n# self.reward_step_total = reward_step_total\n# self.action_space = gym.spaces.MultiDiscrete([self.max_node, self.max_node, 3, 2])\n# self.observation_space = {}\n# self.observation_space['adj'] = gym.Space(shape=[len(possible_edges),\n# self.max_node,\n# self.max_node])\n# self.observation_space['node'] = gym.Space(shape=[1, self.max_node,\n# len(possible_nodes)])\n#\n# self.counter = 0\n#\n# ## load expert data\n# cwd = os.path.dirname(__file__)\n# # if data_type == 'simple':\n# # path = os.path.join(os.path.dirname(cwd), 'dataset',\n# # 'simple') #TODO: Add a suitable dataset\n# # self.dataset = dataset(path) #TODO: Add a suitable dataset loader\n# if data_type == 'simple':\n# self.dataset = [nx.barabasi_albert_graph(100,2) for i in range(200)]\n#\n#\n# self.level = 0 # for curriculum learning, level starts with 0, and increase afterwards\n#\n# def level_up(self):\n# self.level += 1\n#\n# def normalize_adj(self, adj):\n# degrees = np.sum(adj, axis=2)\n# # print('degrees',degrees)\n# D = np.zeros((adj.shape[0], adj.shape[1], adj.shape[2]))\n# for i in range(D.shape[0]):\n# D[i, :, :] = np.diag(np.power(degrees[i, :], -0.5))\n# adj_normal = D @ adj @ D\n# adj_normal[np.isnan(adj_normal)] = 0\n# return adj_normal\n#\n# # TODO(Bowen): check\n# def step(self, action):\n# \"\"\"\n#\n# :param action:\n# :return:\n# \"\"\"\n# ### init\n# info = {} # info we care about\n# self.graph_old = copy.deepcopy(self.graph)\n# total_nodes = self.graph.number_of_nodes()\n#\n# ### take action\n# if action[0, 3] == 0: # not stop\n# stop = False\n# if action[0, 1] >= total_nodes:\n# self._add_node(action[0, 1] - total_nodes) # add new node\n# action[0, 1] = total_nodes # new node id\n# self._add_edge(action) # add new edge\n# else:\n# self._add_edge(action) # add new edge\n# else: # stop\n# stop = True\n#\n# ### calculate intermediate rewards\n# if self.graph.number_of_nodes() + self.graph.number_of_edges() - \\\n# self.graph_old.number_of_nodes() - \\\n# self.graph_old.number_of_edges() > 0:\n# reward_step = self.reward_step_total / self.max_node\n# # successfully added node/edge\n# else:\n# reward_step = -self.reward_step_total / self.max_node # edge\n# # already exists\n#\n# ### calculate and use terminal reward\n# if self.graph.number_of_nodes() >= self.max_node - \\\n# self.possible_node_types.shape[0] or self.counter >= \\\n# self.max_action or stop:\n#\n# # property rewards\n# reward_terminal = 1 # arbitrary choice\n#\n# new = True # end of episode\n# reward = reward_step + reward_terminal\n#\n# # print terminal graph information\n# info['reward_terminal'] = reward_terminal\n# info['reward'] = reward\n# info['stop'] = stop\n# ### use stepwise reward\n# else:\n# new = False\n# reward = reward_step\n#\n# # get observation\n# ob = self.get_observation()\n#\n# self.counter += 1\n# if new:\n# self.counter = 0\n#\n# return ob, reward, new, info\n#\n# def reset(self):\n# \"\"\"\n# to avoid error, assume a node already exists\n# :return: ob\n# \"\"\"\n# self.graph.clear()\n# self._add_node(0)\n# self.counter = 0\n# ob = self.get_observation()\n# return ob\n#\n# # TODO(Bowen): is this necessary\n# def render(self, mode='human', close=False):\n# return\n#\n# # TODO(Bowen): check\n# def _add_node(self, node_type_id):\n# \"\"\"\n#\n# :param node_type_id:\n# :return:\n# \"\"\"\n# new_node_idx = self.graph.number_of_nodes()\n# self.graph.add_node(new_node_idx, type=self.possible_node_types[node_type_id])\n#\n# # TODO(Bowen): check\n# def _add_edge(self, action):\n# \"\"\"\n#\n# :param action: [first_node, second_node, edge_type_id]\n# :return:\n# \"\"\"\n# edge_type = self.possible_edge_types[action[0, 2]]\n#\n# if self.graph.has_edge(int(action[0,0]), int(action[0,1])):\n# return False\n# else:\n# self.graph.add_edge(int(action[0,0]), int(action[0,1]), type=edge_type)\n# return True\n#\n# def get_final_graph(self):\n# return self.graph\n#\n# # TODO(Bowen): check [for featured graph]\n# # def get_observation(self):\n# # \"\"\"\n# #\n# # :return: ob, where ob['adj'] is E with dim b x n x n and ob['node']\n# # is F with dim 1 x n x m. NB: n = node_num + node_type_num\n# # \"\"\"\n# # n = self.graph.number_of_nodes()\n# # n_shift = len(self.possible_node_types) # assume isolated nodes new nodes exist\n# #\n# # d_n = len(self.possible_node_types)\n# # F = np.zeros((1, self.max_node, d_n))\n# #\n# # for node in self.graph.nodes_iter(data=True):\n# # node_idx = node[0]\n# # node_type = node[1]['type']\n# # float_array = (node_type == self.possible_node_types).astype(float)\n# # assert float_array.sum() != 0\n# # F[0, node_idx, :] = float_array\n# # temp = F[0, n:n + n_shift, :]\n# # F[0, n:n + n_shift, :] = np.eye(n_shift)\n# #\n# # d_e = len(self.possible_edge_types)\n# # E = np.zeros((d_e, self.max_node, self.max_node))\n# # for i in range(d_e):\n# # E[i, :n + n_shift, :n + n_shift] = np.eye(n + n_shift)\n# # for e in self.graph.edges_iter(data=True):\n# # begin_idx = e[0]\n# # end_idx = e[1]\n# # edge_type = e[2]['type']\n# # float_array = (edge_type == self.possible_edge_types).astype(float)\n# # assert float_array.sum() != 0\n# # E[:, begin_idx, end_idx] = float_array\n# # E[:, end_idx, begin_idx] = float_array\n# # ob = {}\n# # if self.is_normalize:\n# # E = self.normalize_adj(E)\n# # ob['adj'] = E\n# # ob['node'] = F\n# # return ob\n#\n#\n# # for graphs without features\n# def get_observation(self,feature='deg'):\n# \"\"\"\n#\n# :return: ob, where ob['adj'] is E with dim b x n x n and ob['node']\n# is F with dim 1 x n x m. NB: n = node_num + node_type_num\n# \"\"\"\n# n = self.graph.number_of_nodes()\n# F = np.zeros((1, self.max_node, 1))\n# F[0,:n+1,0] = 1\n#\n# E = np.zeros((1, self.max_node, self.max_node))\n# E[0,:n,:n] = np.asarray(nx.to_numpy_matrix(self.graph))[np.newaxis,:,:]\n# E[0,:n+1,:n+1] += np.eye(n+1)\n#\n# ob = {}\n# if self.is_normalize:\n# E = self.normalize_adj(E)\n# ob['adj'] = E\n# ob['node'] = F\n# return ob\n#\n# def get_expert(self, batch_size, is_final=False, curriculum=0,\n# level_total=6, level=0):\n# ob = {}\n# node_type_num = len(self.possible_node_types)\n# edge_type_num = len(self.possible_edge_types)\n# ob['node'] = np.zeros((batch_size, 1, self.max_node, node_type_num))\n# ob['adj'] = np.zeros(\n# (batch_size, edge_type_num, self.max_node, self.max_node))\n#\n# ac = np.zeros((batch_size, 4))\n# ### select graph\n# dataset_len = len(self.dataset)\n# for i in range(batch_size):\n# # print('--------------------------------------------------')\n# ### get a subgraph\n# if curriculum == 1:\n# ratio_start = level / float(level_total)\n# ratio_end = (level + 1) / float(level_total)\n# idx = np.random.randint(int(ratio_start * dataset_len),\n# int(ratio_end * dataset_len))\n# else:\n# idx = np.random.randint(0, dataset_len)\n# graph = self.dataset[idx]\n# edges = graph.edges()\n# # select the edge num for the subgraph\n# if is_final:\n# edges_sub_len = len(edges)\n# else:\n# edges_sub_len = random.randint(1, len(edges))\n# edges_sub = random.sample(edges, k=edges_sub_len)\n# graph_sub = nx.Graph(edges_sub)\n# graph_sub = max(nx.connected_component_subgraphs(graph_sub),\n# key=len)\n# if is_final: # when the subgraph the whole graph, the expert show\n# # stop sign\n# node1 = random.randint(0, graph.number_of_nodes() - 1)\n# while True:\n# node2 = random.randint(0,\n# graph.number_of_nodes() + node_type_num - 1)\n# if node2 != node1:\n# break\n# edge_type = random.randint(0, edge_type_num - 1)\n# ac[i, :] = [node1, node2, edge_type, 1] # stop\n# else:\n# ### random pick an edge from the subgraph, then remove it\n# edge_sample = random.sample(graph_sub.edges(), k=1)\n# graph_sub.remove_edges_from(edge_sample)\n# graph_sub = max(nx.connected_component_subgraphs(graph_sub),\n# key=len)\n# edge_sample = edge_sample[0] # get value\n# ### get action\n# if edge_sample[0] in graph_sub.nodes() and edge_sample[\n# 1] in graph_sub.nodes():\n# node1 = graph_sub.nodes().index(edge_sample[0])\n# node2 = graph_sub.nodes().index(edge_sample[1])\n# elif edge_sample[0] in graph_sub.nodes():\n# node1 = graph_sub.nodes().index(edge_sample[0])\n# node2 = np.argmax(\n# graph.node[edge_sample[1]][\n# 'type'] == self.possible_node_types) + \\\n# graph_sub.number_of_nodes()\n# elif edge_sample[1] in graph_sub.nodes():\n# node1 = graph_sub.nodes().index(edge_sample[1])\n# node2 = np.argmax(\n# graph.node[edge_sample[0]][\n# 'type'] == self.possible_node_types) + graph_sub.number_of_nodes()\n# else:\n# print('Expert policy error!')\n# edge_type = np.argmax(graph[edge_sample[0]][edge_sample[1]][\n# 'type'] == self.possible_edge_types)\n# ac[i, :] = [node1, node2, edge_type, 0] # don't stop\n# # print('action',[node1,node2,edge_type,0])\n# # print('action',ac)\n# # plt.axis(\"off\")\n# # nx.draw_networkx(graph_sub)\n# # plt.show()\n# ### get observation\n# n = graph_sub.number_of_nodes()\n# for node_id, node in enumerate(graph_sub.nodes()):\n# float_array = (\n# graph.node[node]['type'] == self.possible_node_types).astype(\n# float)\n# assert float_array.sum() != 0\n# ob['node'][i, 0, node_id, :] = float_array\n# ob['node'][i, 0, n:n + node_type_num, :] = np.eye(node_type_num)\n#\n# for j in range(edge_type_num):\n# ob['adj'][i, j, :n + node_type_num,\n# :n + node_type_num] = np.eye(n + node_type_num)\n# for edge in graph_sub.edges():\n# begin_idx = graph_sub.nodes().index(edge[0])\n# end_idx = graph_sub.nodes().index(edge[1])\n# edge_type = graph[edge[0]][edge[1]]['type']\n# float_array = (edge_type == self.possible_edge_types).astype(\n# float)\n# assert float_array.sum() != 0\n# ob['adj'][i, :, begin_idx, end_idx] = float_array\n# ob['adj'][i, :, end_idx, begin_idx] = float_array\n# if self.is_normalize:\n# ob['adj'][i] = self.normalize_adj(ob['adj'][i])\n#\n# return ob, ac\n" ]
[ [ "numpy.array", "numpy.ceil", "numpy.isnan", "numpy.random.rand", "numpy.zeros", "numpy.random.seed", "numpy.sum", "numpy.mean", "numpy.eye", "numpy.std", "numpy.random.randint", "numpy.power", "numpy.abs", "numpy.argmax" ] ]
chuajiesheng/twitter-sentiment-analysis
[ "7617243c953a20c517a737c79fe0f54e55aef140" ]
[ "analysis/end_to_end.py" ]
[ "import numpy as np\nimport nltk\nimport sklearn\nimport tokenizers\nimport multiprocessing\nimport itertools\nimport functools\n\n\ndef get_dataset():\n files = ['./analysis/input/negative_tweets.txt', './analysis/input/neutral_tweets.txt', './analysis/input/positive_tweets.txt']\n\n x = []\n for file in files:\n s = []\n with open(file, 'r') as f:\n for line in f:\n s.append(line.strip())\n\n assert len(s) == 1367\n x.extend(s)\n\n y = np.array([-1] * 1367 + [0] * 1367 + [1] * 1367)\n return x, y\n\n\ndef test_tokenizer(X, y, tokenizer, train_size, k_best):\n ss = sklearn.model_selection.ShuffleSplit(n_splits=10, train_size=(train_size / 100), test_size=None,\n random_state=42)\n total_train_error = 0.0\n total_test_error = 0.0\n total_f1 = 0.0\n runs = 0\n for train, test in ss.split(X, y):\n X_train = np.array(X)[train]\n y_train = y[train]\n\n X_test = np.array(X)[test]\n y_test = y[test]\n\n vect = sklearn.feature_extraction.text.CountVectorizer(tokenizer=tokenizer)\n X_train_counts = vect.fit_transform(X_train)\n tf_transformer = sklearn.feature_extraction.text.TfidfTransformer(use_idf=False).fit(X_train_counts)\n X_train_tfidf = tf_transformer.transform(X_train_counts)\n\n max_k = X_train_tfidf.shape[1]\n if k_best != 'all' and k_best > max_k:\n k_best = 'all'\n\n ch2 = sklearn.feature_selection.SelectKBest(sklearn.feature_selection.mutual_info_classif, k=k_best)\n X_train_ch2 = ch2.fit_transform(X_train_tfidf, y_train)\n\n clf = sklearn.linear_model.LogisticRegression().fit(X_train_ch2, y_train)\n\n predicted = clf.predict(X_train_ch2)\n train_error = 1 - sklearn.metrics.accuracy_score(y_train, predicted)\n total_train_error += train_error\n\n X_test_counts = vect.transform(X_test)\n X_test_tfidf = tf_transformer.transform(X_test_counts)\n X_test_ch2 = ch2.transform(X_test_tfidf)\n predicted = clf.predict(X_test_ch2)\n\n test_error = 1 - sklearn.metrics.accuracy_score(y_test, predicted)\n total_test_error += test_error\n\n total_f1 += sklearn.metrics.f1_score(y_test, predicted, average='macro')\n runs += 1\n\n return total_train_error / runs, total_test_error / runs, total_f1 / runs\n\n\nclass WhitespaceTokenizer(object):\n def __init__(self):\n pass\n\n def __call__(self, doc):\n return doc.split(' ')\n\n\nclass TreebankTokenizer(object):\n def __init__(self):\n self.treebank_word_tokenize = nltk.tokenize.treebank.TreebankWordTokenizer().tokenize\n\n def __call__(self, doc):\n return self.treebank_word_tokenize(doc)\n\n\nclass SentimentTokenizer(object):\n def __init__(self):\n self.sentiment_aware_tokenize = tokenizers.happy_tokenizer.Tokenizer().tokenize\n\n def __call__(self, doc):\n return self.sentiment_aware_tokenize(doc)\n\n\nvarious_tokenizers = {\n # 'Whitespace': WhitespaceTokenizer(),\n 'Treebank-style': TreebankTokenizer(),\n 'Sentiment-aware': SentimentTokenizer()\n}\ntrain_sizes = list(range(60, 100, 10))\nk_sizes = list(range(100, 10000, 200))\nX, y = get_dataset()\n\nTOKENIZER_F1_FILE = 'analysis/output/tokenizer_f1.csv'\nTOKENIZER_ACC_FILE = 'analysis/output/tokenizer_accuracy.csv'\n\nwith open(TOKENIZER_F1_FILE, 'w') as f:\n f.writelines('tokenizer, train_size, k, f1\\n')\n\nwith open(TOKENIZER_ACC_FILE, 'w') as f:\n f.writelines('tokenizer, train_size, k, train_error, test_error\\n')\n\n\ndef train_and_output(X, y, tokenizer, train_size, k_best):\n tokenizer_name = tokenizer.__class__.__name__\n print('tokenizer={}, train_size={}, k_best={}'.format(tokenizer_name, train_size, k_best))\n average_train_error, average_test_error, average_f1 = test_tokenizer(X, y, tokenizer, train_size, k_best)\n with open('analysis/output/tokenizer_accuracy.csv', 'a') as acc_file:\n acc_file.write('{}, {}, {}, {:.3f}, {:.3f}\\n'.format(tokenizer_name, train_size, k_best, average_train_error, average_test_error))\n acc_file.flush()\n with open('analysis/output/tokenizer_f1.csv', 'a') as f1_file:\n f1_file.write('{}, {}, {}, {:.3f}\\n'.format(tokenizer_name, train_size, k_best, average_f1))\n f1_file.flush()\n\ncombi = itertools.product(various_tokenizers.values(), train_sizes, k_sizes)\nwith multiprocessing.Pool() as pool:\n p = pool.starmap(functools.partial(train_and_output, X, y), combi)\n\nexit(0)\n\n\nclass SkipgramSentimentTokenizer(object):\n def __init__(self, n, k, negate=False):\n self.sentiment_aware_tokenize = tokenizers.happy_tokenizer.Tokenizer().tokenize\n self.n = n\n self.k = k\n self.negate = negate\n\n def __call__(self, doc):\n tokens = list(self.sentiment_aware_tokenize(doc))\n\n if self.negate:\n tokens = nltk.sentiment.util.mark_negation(tokens)\n\n if self.n == 1:\n return tokens\n\n skipgrams = list(nltk.skipgrams(tokens, self.n, self.k))\n return list([' '.join(s) for s in skipgrams])\n\nfeatures_extraction = {\n 'Unigram': SkipgramSentimentTokenizer(1, 0),\n 'Bigram': SkipgramSentimentTokenizer(2, 0),\n 'Trigram': SkipgramSentimentTokenizer(3, 0),\n 'Bigram with 1 skip': SkipgramSentimentTokenizer(2, 1),\n 'Bigram with 2 skip': SkipgramSentimentTokenizer(2, 2),\n 'Bigram with 3 skip': SkipgramSentimentTokenizer(2, 3),\n 'Bigram with 4 skip': SkipgramSentimentTokenizer(2, 4),\n 'Trigram with 1 skip': SkipgramSentimentTokenizer(3, 1),\n 'Trigram with 2 skip': SkipgramSentimentTokenizer(3, 2),\n 'Trigram with 3 skip': SkipgramSentimentTokenizer(3, 3),\n 'Trigram with 4 skip': SkipgramSentimentTokenizer(3, 4),\n 'Unigram (with negation)': SkipgramSentimentTokenizer(1, 0, negate=True),\n 'Bigram (with negation)': SkipgramSentimentTokenizer(2, 0, negate=True),\n 'Trigram (with negation)': SkipgramSentimentTokenizer(3, 0, negate=True),\n 'Bigram with 1 skip (with negation)': SkipgramSentimentTokenizer(2, 1, negate=True),\n 'Bigram with 2 skip (with negation)': SkipgramSentimentTokenizer(2, 2, negate=True),\n 'Bigram with 3 skip (with negation)': SkipgramSentimentTokenizer(2, 3, negate=True),\n 'Bigram with 4 skip (with negation)': SkipgramSentimentTokenizer(2, 4, negate=True),\n 'Trigram with 1 skip (with negation)': SkipgramSentimentTokenizer(3, 1, negate=True),\n 'Trigram with 2 skip (with negation)': SkipgramSentimentTokenizer(3, 2, negate=True),\n 'Trigram with 3 skip (with negation)': SkipgramSentimentTokenizer(3, 3, negate=True),\n 'Trigram with 4 skip (with negation)': SkipgramSentimentTokenizer(3, 4, negate=True),\n}\n\n\nfeatures_acc_csv = open('analysis/output/features_accuracy.csv', 'w')\nfeatures_f1_csv = open('analysis/output/features_f1.csv', 'w')\n\nfeatures_acc_csv.writelines('features, train_size, train_error, test_error\\n')\nfeatures_f1_csv.writelines('features, train_size, f1\\n')\n\nfor keys in features_extraction.keys():\n print(keys)\n tok = features_extraction[keys]\n\n for size in train_sizes:\n average_train_error, average_test_error, average_f1 = test_tokenizer(X, y, tok, size)\n features_acc_csv.write('{}, {}%, {:.3f}, {:.3f}\\n'.format(keys, size, average_train_error, average_test_error))\n features_f1_csv.write('{}, {}%, {:.3f}\\n'.format(keys, size, average_f1))\n\nfeatures_acc_csv.close()\nfeatures_f1_csv.close()\n" ]
[ [ "numpy.array", "sklearn.feature_extraction.text.TfidfTransformer", "sklearn.metrics.accuracy_score", "sklearn.linear_model.LogisticRegression", "sklearn.feature_extraction.text.CountVectorizer", "sklearn.feature_selection.SelectKBest", "sklearn.metrics.f1_score", "sklearn.model_selection.ShuffleSplit" ] ]
kangvcar/MoviesAnalyse
[ "31d37ae745af6dd9b5bd5d007aebd63cf1b4247c" ]
[ "analyse/movie_analyse.py" ]
[ "#!/usr/bin/python\n# coding=utf-8\n\nimport time\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport jieba\nimport jieba.analyse\nimport os\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Map\nfrom pyecharts.charts import Pie\nfrom pyecharts.charts import Bar\nfrom pyecharts.charts import TreeMap\nfrom pyecharts.charts import Line\nfrom pyecharts.faker import Faker\nfrom pyecharts.render import make_snapshot\n# 使用 snapshot-selenium 渲染图片\nfrom snapshot_selenium import snapshot\nfrom snownlp import SnowNLP\n\n\ndef get_current_time():\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n\nclass MovieInfoAnalyse(object):\n \"\"\"\n TOP500电影信息分析类\n \"\"\"\n def __init__(self):\n if not os.path.exists('analyse_data'):\n os.mkdir('analyse_data')\n print(\"所有分析结果保存在 analyse_data 文件夹下...\")\n\n def make_geo_map(self):\n \"\"\"\n 生成世界地图,根据各国电影发行量\n :return:\n \"\"\"\n # print(get_current_time() + '|-------> 正在生成 世界各国电影发行量 图表...')\n # 导入TOP500电影数据\n csv_path = os.path.abspath(os.path.join(os.path.dirname(\"__file__\"), os.path.pardir, \"moviespider\", \"movie_info_top500.csv\"))\n rows = pd.read_csv(csv_path, encoding='utf-8', dtype=str)\n # 分析并统计数据\n col_country = rows['国别'].to_frame()\n res = col_country.groupby('国别')['国别'].count().sort_values(ascending=False)\n raw_data = [i for i in res.items()]\n\n # 导入映射数据,英文名 -> 中文名\n country_name = pd.read_json('countries_zh_to_en.json', orient='index')\n stand_data = [i for i in country_name[0].items()]\n\n # 数据转换\n res_code = []\n for raw_country in raw_data:\n for stand_country in stand_data:\n if stand_country[1] in raw_country[0]:\n res_code.append(stand_country[0])\n code = pd.DataFrame(res_code).groupby(0)[0].count().sort_values(ascending=False)\n data = []\n for k, v in code.items():\n data.append([k, v])\n\n # 制作图表\n c = Map()\n c.add(\"电影发行量\", data, \"world\")\n c.set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n c.set_global_opts(title_opts=opts.TitleOpts(title=\"电影TOP500榜单中 - 世界各国电影发行量\"),\n visualmap_opts=opts.VisualMapOpts(max_=55))\n htmlPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), \"analyse_data\", \"世界各国电影发行量.html\"))\n pngPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), \"analyse_data\", \"世界各国电影发行量.png\"))\n # 生成html\n c.render(htmlPath)\n # 生成png\n # make_snapshot(snapshot, c.render(), pngPath)\n # print(get_current_time() + '|-------> 已生成 世界各国电影发行量 图表...')\n return c\n\n def make_pid_charts(self):\n \"\"\"\n 根据电影类型生成饼图\n :return:\n \"\"\"\n # print(get_current_time() + '|-------> 正在生成 各类型占比 图表...')\n # 导入数据并初始化\n csv_path = os.path.abspath(os.path.join(os.path.dirname(\"__file__\"), os.path.pardir, \"moviespider\", \"movie_info_top500.csv\"))\n rows = pd.read_csv(csv_path, encoding='utf-8', dtype=str)\n to_drop = ['名称', '导演', '演员', '国别', '年份', '语言', '评分', '评分人数', '五星占比', '四星占比', '三星占比', '二星占比', '一星占比', '短评数',\n '简介']\n res = rows.drop(to_drop, axis=1)\n # 数据分割\n type_list = []\n for i in res.itertuples():\n for j in i[1].split(','):\n type_list.append(j)\n # 数据统计\n df = pd.DataFrame(type_list, columns=['类型'])\n res = df.groupby('类型')['类型'].count().sort_values(ascending=False)\n res_list = []\n for i in res.items():\n res_list.append(i)\n # 生成饼图\n c = Pie()\n c.add(\"\", res_list, center=[\"40%\", \"55%\"], )\n c.set_global_opts(\n title_opts=opts.TitleOpts(title=\"电影TOP500榜单中 - 各类型占比\"),\n legend_opts=opts.LegendOpts(type_=\"scroll\", pos_left=\"80%\", orient=\"vertical\"),\n )\n c.set_series_opts(label_opts=opts.LabelOpts(formatter=\"{b}: {c}\"))\n\n htmlPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), \"analyse_data\", \"各类型占比.html\"))\n pngPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), \"analyse_data\", \"各类型占比.png\"))\n # 生成html\n c.render(htmlPath)\n # 生成png\n # make_snapshot(snapshot, c.render(), pngPath)\n # print(get_current_time() + '|-------> 已生成 各类型占比 图表...')\n return c\n\n def make_relase_year_bar(self):\n \"\"\"\n 生成各年份电影发行量柱状图\n :return:\n \"\"\"\n # print(get_current_time() + '|-------> 正在生成 各年份电影发行量 图表...')\n # 导入数据并初始化\n csv_path = os.path.abspath(os.path.join(os.path.dirname(\"__file__\"), os.path.pardir, \"moviespider\", \"movie_info_top500.csv\"))\n rows = pd.read_csv(csv_path, encoding='utf-8', dtype=str)\n to_drop = ['名称', '导演', '演员', '国别', '类型', '语言', '评分', '评分人数', '五星占比', '四星占比', '三星占比', '二星占比', '一星占比', '短评数',\n '简介']\n res = rows.drop(to_drop, axis=1)\n # 数据分析\n res_by = res.groupby('年份')['年份'].count().sort_values(ascending=False)\n res_by2 = res_by.sort_index(ascending=False)\n type(res_by2)\n years = []\n datas = []\n for k, v in res_by2.items():\n years.append(k)\n datas.append(v)\n # 生成图标\n c = Bar()\n c.add_xaxis(years)\n c.add_yaxis(\"发行电影数量\", datas, color=Faker.rand_color())\n c.set_global_opts(\n title_opts=opts.TitleOpts(title=\"电影TOP500榜单中 - 各年份电影发行量\"),\n datazoom_opts=[opts.DataZoomOpts(), opts.DataZoomOpts(type_=\"inside\")],\n )\n htmlPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), \"analyse_data\", \"各年份电影发行量.html\"))\n pngPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), \"analyse_data\", \"各年份电影发行量.png\"))\n # 生成html\n c.render(htmlPath)\n # 生成png\n # make_snapshot(snapshot, c.render(), pngPath)\n # print(get_current_time() + '|-------> 已生成 各年份电影发行量 图表...')\n return c\n\n def make_star_treemap(self):\n \"\"\"\n 根据演员参演电影数生成矩形树图\n :return:\n \"\"\"\n # print(get_current_time() + '|-------> 正在生成 演员参演电影数 图表...')\n # 导入数据并初始化\n csv_path = os.path.abspath(os.path.join(os.path.dirname(\"__file__\"), os.path.pardir, \"moviespider\", \"movie_info_top500.csv\"))\n rows = pd.read_csv(csv_path, encoding='utf-8', dtype=str)\n # rows = pd.read_csv('../comments/movie_info_top500.csv', encoding='utf-8', dtype=str)\n to_drop = ['名称', '导演', '年份', '国别', '类型', '语言', '评分', '评分人数', '五星占比', '四星占比', '三星占比', '二星占比', '一星占比', '短评数',\n '简介']\n res = rows.drop(to_drop, axis=1)\n # 数据分割\n all_star_list = []\n for i in res.itertuples():\n # print(i[1] + '\\n')\n for j in i[1].split(','):\n all_star_list.append(j)\n # 数据统计\n df = pd.DataFrame(all_star_list, columns=['演员'])\n res = df.groupby('演员')['演员'].count().sort_values(ascending=False)\n all_star_list = []\n for i in res.items():\n if i[1] > 4:\n all_star_list.append({\"value\": i[1], \"name\": i[0]})\n # 生成图标\n c = TreeMap()\n c.add(\"参演电影数\", all_star_list)\n c.set_global_opts(title_opts=opts.TitleOpts(title=\"电影TOP500榜单中 - 演员参演电影数\", subtitle=\"至少参演5部影评以上\"))\n\n htmlPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), \"analyse_data\", \"演员参演电影数.html\"))\n pngPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), \"analyse_data\", \"演员参演电影数.png\"))\n # 生成html\n c.render(htmlPath)\n # 生成png\n # make_snapshot(snapshot, c.render(), pngPath)\n # print(get_current_time() + '|-------> 已生成 演员参演电影数 图表...')\n return c\n\n def make_sentiments_line(self):\n csv_path = os.path.abspath(os.path.join(os.path.dirname(\"__file__\"), os.path.pardir, \"moviespider\", \"comment_data\", \"极速车王.csv\"))\n # csv_path = os.path.abspath(os.path.join(\"D:\\\\MoviesAnalyse\", \"moviespider\", \"comment_data\", \"极速车王.csv\"))\n df = pd.read_csv(csv_path)\n to_drop = ['用户', '是否看过', '评分', '评论时间', '有用数']\n df.drop(to_drop, axis=1, inplace=True)\n str = df.to_string(index=False, columns=['评论'], header=False)\n str = [i.strip() for i in str.split('\\n')]\n sentimentslist = []\n for i in str:\n s = SnowNLP(i)\n sentimentslist.append(s.sentiments - 0.5)\n c = (\n Line()\n .add_xaxis([x for x in range(len(sentimentslist))])\n .add_yaxis(\"情感积极度\", sentimentslist, is_smooth=True)\n .set_series_opts(\n areastyle_opts=opts.AreaStyleOpts(opacity=0.3),\n label_opts=opts.LabelOpts(is_show=False),\n )\n .set_global_opts(\n title_opts=opts.TitleOpts(title=\"《极速车王》影评情感分析\", subtitle=\"接近0.5为积极,接近-0.5为消极\"),\n xaxis_opts=opts.AxisOpts(\n axistick_opts=opts.AxisTickOpts(is_align_with_label=True),\n is_scale=False,\n boundary_gap=False,\n ),\n )\n )\n htmlPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), \"analyse_data\", \"《极速车王》影评情感分析.html\"))\n pngPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), \"analyse_data\", \"《极速车王》影评情感分析.png\"))\n # 生成html\n c.render(htmlPath)\n # 生成png\n # make_snapshot(snapshot, c.render(), pngPath)\n return c\n\nif __name__ == '__main__':\n m = MovieInfoAnalyse()\n m.make_geo_map()\n m.make_pid_charts()\n m.make_relase_year_bar()\n m.make_star_treemap()\n m.make_sentiments_line()\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv", "pandas.read_json" ] ]
bio-phys/pyDHAMed
[ "d42f2a67a2650a0f8b09a798c2afe3b6d85aab08" ]
[ "pydhamed/prepare_dhamed.py" ]
[ "from __future__ import print_function\nfrom six.moves import range\n\nimport numpy as np\nfrom collections import defaultdict\n\ndef state_lifetimes_counts(transition_count_matrix_l,\n n, nwin):\n \"\"\"\n \n Calculate lifetimes in each of the states (for each run/window)\n \n Parameters:\n -----------\n transition_count_matrix_l: list of arrays\n List of arrays with transition count matrices. One array for\n each run/windows. \n n: int\n Number of (structural) states\n nwin: int\n Number of simulations runs/windows. I.e., how many umbrella\n winodws were run. \n\n Returns:\n --------\n t_ar: array_like\n Array, n x nwin, where n is number of states,\n nwin is number of windows with aggregate lifetimes in the states.\n\n \"\"\"\n #n = len(transition_count_matrix_l[0][:,0])\n #nwin = len(transition_count_matrix_l)\n t_ar = np.zeros((n,nwin), dtype=np.float64)\n\n for iwin, win in enumerate(transition_count_matrix_l):\n # sum over the column gives all counts in a state\n t_ar[:, iwin] = np.sum(win, axis=0)\n return t_ar\n\n\ndef total_transition_counts(transition_count_matrix_l, n):\n \"\"\"\n Parameters:\n -----------\n transition_count_matrix_l: list of arrays\n\n Returns:\n --------\n nn_ar: array_like, total transitions j->i\n\n \"\"\"\n #n = len(transition_count_matrix_l[0][:,0])\n nn_ar = np.zeros((n,n))\n\n # do j=1,n\n # do i=1,n\n # nn(i,j)=0.d0\n # do iwin=1,nwin\n # nn(i,j)=nn(i,j)+nij(i,j,iwin)\n # enddo\n # enddo\n\n for j in range(n):\n for i in range(n):\n for iwin, win in enumerate(transition_count_matrix_l):\n nn_ar[i,j] += win[i,j]\n return nn_ar\n\n\ndef counts_in_out(transition_count_matrix_l, n, nwin):\n \"\"\"\n Parameters:\n -----------\n transition_count_matrix_l: list of arrays\n List of arrays with transition count matrices. One array for\n each run/windows. \n n: int\n Number of (structural) states\n nwin: int\n Number of simulations runs/windows. I.e., how many umbrella\n winodws were run. \n\n Returns:\n --------\n n_in: array_like\n Array length n. Total number of transitions into state i.\n n_out: array_like\n Array length n. Total number of transitions out of state j.\n\n \"\"\"\n n_in = np.zeros(n)\n n_out = np.zeros(n)\n\n #for k in range(n):\n for iwin, count_matrix in enumerate(transition_count_matrix_l):\n for i, row in enumerate(count_matrix):\n for j, col_e in enumerate(row):\n if i != j:\n n_in[i] += count_matrix[i,j]\n n_out[i] += count_matrix[j,i]\n return n_in, n_out\n\n\ndef check_transition_pairs(transition_count_matrix_l, n_in, n_out, n_states, t_ar):\n \"\"\"\n Check if bin/state i is paired at least once, with at least one transition into\n the state and one transition out of the state. Unpaiews states are subsequently\n excluded from the analysis since no proper equilibrium can be established for them.\n \n Parameters:\n -----------\n transition_count_matrix_l: list of arrays\n List of arrays with transition count matrices. One array for\n each run/windows. \n n_in: array\n Number of transitions into given states.\n n_out: array\n Number of transitions from given state.\n n_states: integer\n t_ar: array_like\n Array with dimension, n x nwin, where n is number of states,\n nwin is number of windows. Contains aggregate \n \n Returns:\n --------\n paired_ar: array\n Number of transition pairs for each state.\n \n \"\"\"\n paired_ar = np.zeros(n_states)\n\n for iwin, count_matrix in enumerate(transition_count_matrix_l):\n for i in range(n_states-1):\n # At least a transition into and out of the state\n if (n_in[i] > 0.0) and (n_out[i] > 0.0):\n for j in range(i+1, n_states):\n # Transition to/from connected states \n if (n_in[j] > 0.0) and (n_out[j] > 0.0):\n # After checking that equilibrium can be established\n #check if states paired in this run/window\n if count_matrix[i,j] + count_matrix[j,i] > 0.0:\n if t_ar[i,iwin] + t_ar[j,iwin] > 0.0:\n paired_ar[i] += 1\n paired_ar[j] += 1\n return paired_ar\n\n\ndef actual_transition_pairs(n_in, n_out, n_states, paired_ar, verbose=False):\n \"\"\"\n Generate indeces of transition pairs. The indices of transition pairs are\n required to setup the input for the actual optimization of the DHAMed\n effective likelihood.\n \n Parameters:\n -----------\n n_in: array\n Number of transitions into given states\n n_out: array\n Number of transitions from given states\n paired_ar: array\n Number of transition pairs for each state.\n verbose: Boolean\n \n Returns:\n --------\n pair_idx_d: defaultdict\n Indeces of the paired states. Shifts the indices to account for\n excluded, unpaired states.\n n_actual: int\n Actual number of states included in the DHAMed calculation.\n \n \"\"\"\n n_actual = 0\n pair_idx_d = defaultdict(list)\n # index of included states/bins\n for i in range(n_states):\n if (n_in[i] > 0.0) and (n_out[i] > 0.0) and (paired_ar[i] > 0):\n\n pair_idx_d[i].append(n_actual)\n n_actual += 1\n else:\n print (\"bin {} excluded\".format(i))\n if verbose:\n print(n_actual)\n return pair_idx_d, n_actual\n\n\ndef prepare_dhamed_input_pairs(n_states, transition_count_matrix_l,\n n_in, n_out,\n paired_ar, t_ar, pair_idx_d,\n v_ar):\n \"\"\" \n Prepare formatted inputs for DHAMed optimization.\n \n Parameters:\n -----------\n n_states: int\n Number of (conformational/structural) states/bins\n transition_count_matrix_l: list of arrays\n List of transition count matrices\n n_in: array\n Number of transitions into a given state.\n n_out: array\n Number of transitions from given state.\n paired_ar: array\n Number of transition pairs for each state. \n t_ar: array_like\n n x nwin, where n is number of states, nwin is number of windows.\n pair_idx_d: dictionary\n Indeces of the paired states. Shifts the indices to account for\n excluded, unpaired states.\n v_ar: array\n Bias potentials in units of kT.\n\n Returns:\n --------\n ip: array_like\n npair entries, list of indices of bin i in transition pair.\n jp: array_like\n npair entries, list of indices of bin j in transition pair.\n vi: array_like\n npair entries, list of potentials in kT units at bin i of a pair.\n vj: array_like\n npair entries, list of potentials in kT units at bin j of a pair. \n ti: array_like\n npair entries, list of residence times in bin i of a pair.\n tj: array like\n npair entries, list of residence times in bin j of a pair.\n nijp: array_like\n npair entries, number of j->i and i->j transitions combined for a pair.\n\n \"\"\"\n ip_l = []\n jp_l = []\n vi = []\n vj = []\n ti = []\n tj = []\n nijp = []\n n_pair = 0\n\n for iwin, count_matrix in enumerate(transition_count_matrix_l):\n for i in range(n_states - 1):\n # test whether transition in/out of paired states i and j were observed\n if (n_in[i] > 0.0) and (n_out[i] > 0.0) and (paired_ar[i] > 0 ):\n for j in range(i+1, n_states):\n if (n_in[j] > 0.0) and (n_out[j] > 0.0) and (paired_ar[i] > 0 ):\n # transition in current window?\n if count_matrix[i,j] + count_matrix[j,i] > 0:\n if (t_ar[i, iwin] + t_ar[j, iwin] > 0.0):\n # lifetime > 0\n n_pair += 1\n ip_l.append(pair_idx_d[i][0] + 1) # where is the +1 coming from? N.B.: it is removed later on. \n jp_l.append(pair_idx_d[j][0] + 1)\n vi.append(v_ar[i,iwin])\n vj.append(v_ar[j,iwin])\n ti.append(t_ar[i,iwin])\n tj.append(t_ar[j,iwin])\n nijp.append( count_matrix[i,j] + count_matrix[j,i])\n \n print(\"Number of transition pairs {}\".format(n_pair))\n return np.array(ip_l, dtype=int), np.array(jp_l, dtype=int), np.array(vi), np.array(vj), np.array(ti), np.array(tj), np.array(nijp)\n\n\ndef check_total_transition_counts(n_out, n_in, paired_ar, n_actual):\n \"\"\"\n Remove excluded states/bin from the array with the total number of transitions\n out of state/bin i. \n \n Parameters:\n -----------\n n_out: array_like\n Total number of transitions out of state/bin i, with length N, the numer of states.\n n_in: array_like\n Total number of transitions into state/bin i, with length N, the number of states.\n paired_ar: array_like\n Number of transition pairs for each state, with length N, the number of states.\n n_actual: int\n Actual number of connected states which can be analyzed.\n \n Returns:\n --------\n n_k: array_like\n Total number of transitions out of state/bin i. Excluding states for which\n no proper equilibrium can be established. The array has the length N_actual. \n \"\"\"\n n_k = np.zeros(n_actual)\n c = 0\n for i in range(len(n_out)):\n if (n_in[i] > 0.0) and (n_out[i] > 0.0) and (paired_ar[i] > 0):\n n_k[c] = n_out[i]\n c += 1\n return n_k\n\n\ndef generate_dhamed_input(c_l, v_ar, n_states, n_win, return_included_state_indices=False):\n \"\"\"\n Converts a list of count matrices and an array of bias potentials\n to the input for DHAMed. For efficient calculation DHAMed input data\n is organized into transition pairs.\n\n Parameters:\n -----------\n c_l: list,\n List of arrays. Each array contains a transition count matrix.\n v_ar: array\n Array of bias potentials\n n_states: int\n Number of states/bins.\n n_win: int\n Number of simulation runs or windows.\n return_included_state_indices: boolean, optional\n Return indices of the states to be included in the calculation.\n\n Returns:\n --------\n n_k: array like\n n_actual entries, list of total number of transitions out of bin i. N_actual\n number of bins/states for which equilibrium can be established.\n ip: array_like\n npair entries, list of indices of bin i in transition pair.\n jp: array_like\n npair entries, list of indices of bin j in transition pair.\n vi: array_like\n npair entries, list of potentials in kT units at bin i of a pair.\n vj: array_like\n npair entries, list of potentials in kT units at bin j of a pair. \n ti: array_like\n npair entries, list of residence times in bin i of a pair.\n tj: array like\n npair entries, list of residence times in bin j of a pair.\n nijp: array_like\n npair entries, number of j->i and i->j transitions combined for a pair.\n n_actual: int\n Actual number of connected states which can be analyzed.\n included_state_indices: array_like\n Indices of states included in the DHAMed calculation (optional)\n\n \"\"\"\n t = state_lifetimes_counts(c_l, n_states, n_win)\n n_in, n_out = counts_in_out(c_l, n_states, n_win)\n paired_ar = check_transition_pairs(c_l, n_in, n_out, n_states, t)\n pair_idx_d, n_actual = actual_transition_pairs(n_in, n_out, n_states, paired_ar)\n ip, jp, vi, vj, ti, tj, nijp = prepare_dhamed_input_pairs(n_states, c_l, n_in, n_out,\n paired_ar, t, pair_idx_d, v_ar)\n # Remove excluded counts from the total number of transitions out of state.\n nk = check_total_transition_counts(n_out, n_in, paired_ar, n_actual)\n \n if return_included_state_indices:\n return nk, ip, jp, vi, vj, ti, tj, nijp, n_actual, pair_idx_d\n else: \n return nk, ip, jp, vi, vj, ti, tj, nijp, n_actual\n" ]
[ [ "numpy.sum", "numpy.array", "numpy.zeros" ] ]
CristianoPizzamiglio/scikit-spatial
[ "95ca2d4f2948cf6a69ec4bc7236b70fd66db1de5" ]
[ "src/skspatial/objects/vector.py" ]
[ "\"\"\"Module for the Vector class.\"\"\"\nfrom __future__ import annotations\n\nimport math\nfrom typing import cast\n\nimport numpy as np\nfrom matplotlib.axes import Axes\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom skspatial._functions import np_float\nfrom skspatial.objects._base_array import _BaseArray1D\nfrom skspatial.plotting import _connect_points_3d\nfrom skspatial.typing import array_like\n\n\nclass Vector(_BaseArray1D):\n \"\"\"\n A vector implemented as a 1D array.\n\n The array is a subclass of :class:`numpy.ndarray`.\n\n Parameters\n ----------\n array : array_like\n Input array.\n\n Attributes\n ----------\n dimension : int\n Dimension of the vector.\n\n Raises\n ------\n ValueError\n If the array is empty, the values are not finite,\n or the dimension is not one.\n\n Examples\n --------\n >>> from skspatial.objects import Vector\n\n >>> vector = Vector([1, 2, 3])\n\n >>> vector.dimension\n 3\n\n The object inherits methods from :class:`numpy.ndarray`.\n\n >>> vector.mean()\n array(2.)\n\n >>> Vector([])\n Traceback (most recent call last):\n ...\n ValueError: The array must not be empty.\n\n >>> import numpy as np\n\n >>> Vector([1, 2, np.nan])\n Traceback (most recent call last):\n ...\n ValueError: The values must all be finite.\n\n >>> Vector([[1, 2], [3, 4]])\n Traceback (most recent call last):\n ...\n ValueError: The array must be 1D.\n\n \"\"\"\n\n @classmethod\n def from_points(cls, point_a: array_like, point_b: array_like) -> Vector:\n \"\"\"\n Instantiate a vector from point A to point B.\n\n Parameters\n ----------\n point_a, point_b : array_like\n Points defining the vector.\n\n Returns\n -------\n Vector\n Vector from point A to point B.\n\n Examples\n --------\n >>> from skspatial.objects import Vector\n\n >>> Vector.from_points([0, 0], [1, 0])\n Vector([1, 0])\n\n >>> Vector.from_points([5, 2], [-2, 8])\n Vector([-7, 6])\n\n >>> Vector.from_points([3, 1, 1], [7, 7, 0])\n Vector([ 4, 6, -1])\n\n \"\"\"\n array_vector_ab = cast(np.ndarray, np.subtract(point_b, point_a))\n\n return cls(array_vector_ab)\n\n def norm(self, **kwargs) -> np.float64:\n \"\"\"\n Return the norm of the vector.\n\n Parameters\n ----------\n kwargs : dict, optional\n Additional keywords passed to :func:`numpy.linalg.norm`.\n\n Returns\n -------\n np.float64\n Norm of the vector.\n\n Examples\n --------\n >>> from skspatial.objects import Vector\n\n >>> vector = Vector([1, 2, 3])\n\n >>> vector.norm().round(3)\n 3.742\n\n >>> vector.norm(ord=1)\n 6.0\n\n >>> vector.norm(ord=0)\n 3.0\n\n \"\"\"\n return np.linalg.norm(self, **kwargs)\n\n def unit(self) -> Vector:\n \"\"\"\n Return the unit vector in the same direction as the vector.\n\n A unit vector is a vector with a magnitude of one.\n\n Returns\n -------\n Vector\n Unit vector.\n\n Raises\n ------\n ValueError\n If the magnitude of the vector is zero.\n\n Examples\n --------\n >>> from skspatial.objects import Vector\n\n >>> Vector([1, 0]).unit()\n Vector([1., 0.])\n\n >>> Vector([-20, 0]).unit()\n Vector([-1., 0.])\n\n >>> Vector([1, 1]).unit().round(3)\n Vector([0.707, 0.707])\n\n >>> Vector([1, 1, 1]).unit().round(3)\n Vector([0.577, 0.577, 0.577])\n\n >>> Vector([0, 0]).unit()\n Traceback (most recent call last):\n ...\n ValueError: The magnitude must not be zero.\n\n \"\"\"\n magnitude = self.norm()\n\n if magnitude == 0:\n raise ValueError(\"The magnitude must not be zero.\")\n\n return self / magnitude\n\n def is_zero(self, **kwargs: float) -> bool:\n \"\"\"\n Check if the vector is the zero vector.\n\n The zero vector in n dimensions is the vector containing n zeros.\n\n Parameters\n ----------\n kwargs : dict, optional\n Additional keywords passed to :func:`math.isclose`.\n\n Returns\n -------\n bool\n True if vector is the zero vector; false otherwise.\n\n Examples\n --------\n >>> from skspatial.objects import Vector\n\n >>> Vector([0, 0]).is_zero()\n True\n >>> Vector([1, 0]).is_zero()\n False\n\n >>> Vector([0, 0, 1e-4]).is_zero()\n False\n >>> Vector([0, 0, 1e-4]).is_zero(abs_tol=1e-3)\n True\n\n \"\"\"\n return math.isclose(self.dot(self), 0, **kwargs)\n\n def cross(self, other: array_like) -> Vector:\n \"\"\"\n Compute the cross product with another vector.\n\n Parameters\n ----------\n other : array_like\n Other vector.\n\n Returns\n -------\n Vector\n 3D vector perpendicular to both inputs.\n\n Examples\n --------\n >>> from skspatial.objects import Vector\n\n >>> Vector([1, 0]).cross([0, 1])\n Vector([0, 0, 1])\n\n >>> Vector([2, 5]).cross([1, 1])\n Vector([ 0, 0, -3])\n\n >>> Vector([1, 0]).cross([0, 1])\n Vector([0, 0, 1])\n\n >>> Vector([1, 1, 1]).cross([0, 1, 0])\n Vector([-1, 0, 1])\n\n \"\"\"\n # Convert to 3D vectors so that cross product is also 3D.\n vector_a = self.set_dimension(3)\n vector_b = Vector(other).set_dimension(3)\n\n return Vector(np.cross(vector_a, vector_b))\n\n def cosine_similarity(self, other: array_like) -> np.float64:\n \"\"\"\n Return the cosine similarity of the vector with another.\n\n This is the cosine of the angle between the vectors.\n\n Parameters\n ----------\n other : array_like\n Other vector.\n\n Returns\n -------\n np.float64\n Cosine similarity.\n\n Raises\n ------\n ValueError\n If either vector has a magnitude of zero.\n\n Examples\n --------\n >>> from skspatial.objects import Vector\n\n >>> Vector([1, 0]).cosine_similarity([0, 1])\n 0.0\n\n >>> Vector([30, 0]).cosine_similarity([0, 20])\n 0.0\n\n >>> Vector([1, 0]).cosine_similarity([-1, 0])\n -1.0\n\n >>> Vector([1, 0]).cosine_similarity([1, 1]).round(3)\n 0.707\n\n >>> Vector([0, 0]).cosine_similarity([1, 1])\n Traceback (most recent call last):\n ...\n ValueError: The vectors must have non-zero magnitudes.\n\n \"\"\"\n denom = self.norm() * Vector(other).norm()\n\n if denom == 0:\n raise ValueError(\"The vectors must have non-zero magnitudes.\")\n\n cos_theta = self.dot(other) / denom\n\n # Ensure that the output is in the range [-1, 1],\n # so that the angle theta is defined.\n clipped = np.clip(cos_theta, -1, 1)\n\n return np.float64(clipped)\n\n @np_float\n def angle_between(self, other: array_like) -> float:\n \"\"\"\n Return the angle in radians between the vector and another.\n\n Parameters\n ----------\n other : array_like\n Other vector.\n\n Returns\n -------\n np.float64\n Angle between vectors in radians.\n\n Examples\n --------\n >>> import numpy as np\n >>> from skspatial.objects import Vector\n\n >>> Vector([1, 0]).angle_between([1, 0])\n 0.0\n\n >>> Vector([1, 1, 1]).angle_between([1, 1, 1])\n 0.0\n\n >>> angle = Vector([1, 0]).angle_between([1, 1])\n >>> np.degrees(angle).round()\n 45.0\n\n >>> angle = Vector([1, 0]).angle_between([-2, 0])\n >>> np.degrees(angle).round()\n 180.0\n\n \"\"\"\n cos_theta = self.cosine_similarity(other)\n\n return math.acos(cos_theta)\n\n @np_float\n def angle_signed(self, other: array_like) -> float:\n \"\"\"\n Return the signed angle in radians between the vector and another.\n\n The vectors must be 2D.\n\n Parameters\n ----------\n other : array_like\n Other vector.\n\n Returns\n -------\n np.float64\n Signed angle between vectors in radians.\n\n Raises\n ------\n ValueError\n If the vectors are not 2D.\n\n Examples\n --------\n >>> import numpy as np\n >>> from skspatial.objects import Vector\n\n >>> Vector([1, 0]).angle_signed([1, 0])\n 0.0\n\n >>> np.degrees(Vector([1, 0]).angle_signed([0, 1]))\n 90.0\n\n >>> np.degrees(Vector([1, 0]).angle_signed([0, -1]))\n -90.0\n\n >>> Vector([1, 0, 0]).angle_signed([0, -1, 0])\n Traceback (most recent call last):\n ...\n ValueError: The vectors must be 2D.\n\n \"\"\"\n if not (self.dimension == 2 and Vector(other).dimension == 2):\n raise ValueError(\"The vectors must be 2D.\")\n\n dot = self.dot(other)\n det = np.linalg.det([self, other])\n\n return math.atan2(det, dot)\n\n def is_perpendicular(self, other: array_like, **kwargs: float) -> bool:\n r\"\"\"\n Check if the vector is perpendicular to another.\n\n Two vectors :math:`u` and :math:`v` are perpendicular if\n\n .. math::\n u \\cdot v = 0\n\n Parameters\n ----------\n other : array_like\n Other vector.\n kwargs : dict, optional\n Additional keywords passed to :func:`math.isclose`.\n\n Returns\n -------\n bool\n True if the vector is perpendicular; false otherwise.\n\n Examples\n --------\n >>> from skspatial.objects import Vector\n\n >>> Vector([0, 1]).is_perpendicular([1, 0])\n True\n\n >>> Vector([-1, 5]).is_perpendicular([3, 4])\n False\n\n >>> Vector([2, 0, 0]).is_perpendicular([0, 0, 2])\n True\n\n The zero vector is perpendicular to all vectors.\n\n >>> Vector([0, 0, 0]).is_perpendicular([1, 2, 3])\n True\n\n \"\"\"\n return math.isclose(self.dot(other), 0, **kwargs)\n\n def is_parallel(self, other: array_like, **kwargs: float) -> bool:\n r\"\"\"\n Check if the vector is parallel to another.\n\n Two nonzero vectors :math:`u` and :math:`v` are parallel if\n\n .. math::\n \\texttt{abs}(\\texttt{cosine_similarity}(u, v)) = 1\n\n The zero vector is parallel to all vectors.\n\n Parameters\n ----------\n other : array_like\n Other vector.\n kwargs : dict, optional\n Additional keywords passed to :func:`math.isclose`.\n\n Returns\n -------\n bool\n True if the vector is parallel; false otherwise.\n\n Examples\n --------\n >>> from skspatial.objects import Vector\n\n >>> Vector([0, 1]).is_parallel([1, 0])\n False\n\n >>> Vector([1, 1]).is_parallel([1, 1])\n True\n\n >>> Vector([-1, 5]).is_parallel([2, -10])\n True\n\n >>> Vector([1, 2, 3]).is_parallel([3, 6, 9])\n True\n\n >>> Vector([1, 2, 3, 4]).is_parallel([-2, -4, -6, -8])\n True\n\n The zero vector is parallel to all vectors.\n\n >>> Vector([1, 2, 3]).is_parallel([0, 0, 0])\n True\n\n \"\"\"\n if self.is_zero(**kwargs) or Vector(other).is_zero(**kwargs):\n # The zero vector is perpendicular to all vectors.\n return True\n\n similarity = self.cosine_similarity(other)\n\n return math.isclose(abs(similarity), 1, **kwargs)\n\n def side_vector(self, other: array_like) -> int:\n \"\"\"\n Find the side of the vector where another vector is directed.\n\n Both vectors must be 2D.\n\n Parameters\n ----------\n other : array_like\n Other 2D vector.\n\n Returns\n -------\n int\n 1 if the other vector is to the right.\n 0 if the other is parallel.\n -1 if the other is to the left.\n\n Raises\n ------\n ValueError\n If the vectors are not 2D.\n\n Examples\n --------\n >>> from skspatial.objects import Vector\n\n >>> vector_target = Vector([0, 1])\n\n The vector is parallel to the target vector.\n\n >>> vector_target.side_vector([0, 2])\n 0\n >>> vector_target.side_vector([0, -5])\n 0\n\n The vector is to the right of the target vector.\n\n >>> vector_target.side_vector([1, 1])\n 1\n >>> vector_target.side_vector([1, -10])\n 1\n\n The vector is to the left of the target vector.\n\n >>> vector_target.side_vector([-3, 4])\n -1\n\n The vectors are not 2D.\n\n >>> Vector([1]).side_vector([2])\n Traceback (most recent call last):\n ...\n ValueError: The vectors must be 2D.\n\n >>> Vector([1, 0, 0]).side_vector([1, 2, 3])\n Traceback (most recent call last):\n ...\n ValueError: The vectors must be 2D.\n\n \"\"\"\n if self.dimension != 2 or Vector(other).dimension != 2:\n raise ValueError(\"The vectors must be 2D.\")\n\n value_cross = np.cross(other, self)\n\n return int(np.sign(value_cross))\n\n def scalar_projection(self, other: array_like) -> np.float64:\n \"\"\"\n Return the scalar projection of an other vector onto the vector.\n\n Parameters\n ----------\n other : array_like\n Other vector.\n\n Returns\n -------\n np.float64\n Scalar projection.\n\n Examples\n --------\n >>> from skspatial.objects import Vector\n\n >>> Vector([0, 1]).scalar_projection([2, 1])\n 1.0\n\n >>> Vector([-1, -1]).scalar_projection([1, 0]).round(3)\n -0.707\n\n >>> Vector([0, 100]).scalar_projection([9, 5])\n 5.0\n\n >>> Vector([5, 0]).scalar_projection([-10, 3])\n -10.0\n\n \"\"\"\n result = self.unit().dot(other)\n\n return np.float64(result)\n\n def project_vector(self, other: array_like) -> Vector:\n \"\"\"\n Project an other vector onto the vector.\n\n Parameters\n ----------\n other : array_like\n Other vector.\n\n Returns\n -------\n Vector\n Vector projection.\n\n Examples\n --------\n >>> from skspatial.objects import Vector\n\n >>> Vector([0, 1]).project_vector([2, 1])\n Vector([0., 1.])\n\n >>> Vector([0, 100]).project_vector([2, 1])\n Vector([0., 1.])\n\n >>> Vector([0, 1]).project_vector([9, 5])\n Vector([0., 5.])\n\n >>> Vector([0, 100]).project_vector([9, 5])\n Vector([0., 5.])\n\n \"\"\"\n return self.dot(other) / self.dot(self) * self\n\n def different_direction(self, **kwargs: float) -> Vector:\n \"\"\"\n Return a simple vector that is in a different direction.\n\n This is useful for finding a vector perpendicular to the original,\n by taking the cross product of the original with the one in a different direction.\n\n Parameters\n ----------\n kwargs : dict, optional\n Additional keywords passed to :meth:`Vector.is_zero` and :meth:`Vector.is_parallel`.\n :meth:`Vector.is_zero` is used to ensure the input vector is not the zero vector,\n and :meth:`Vector.is_parallel` is used to ensure the new vector is not parallel to the input.\n\n Returns\n -------\n Vector\n A unit vector in a different direction from the original.\n\n Raises\n ------\n ValueError\n If the vector is the zero vector.\n\n Examples\n --------\n >>> from skspatial.objects import Vector\n\n >>> Vector([1]).different_direction()\n Vector([-1])\n >>> Vector([100]).different_direction()\n Vector([-1])\n >>> Vector([-100]).different_direction()\n Vector([1])\n >>> Vector([1, 0]).different_direction()\n Vector([0., 1.])\n >>> Vector([1, 1]).different_direction()\n Vector([1., 0.])\n >>> Vector([1, 1, 1, 1]).different_direction()\n Vector([1., 0., 0., 0.])\n\n \"\"\"\n if self.is_zero(**kwargs):\n raise ValueError(\"The vector must not be the zero vector.\")\n\n if self.dimension == 1:\n return Vector([-np.sign(self[0])])\n\n vector_different_direction = Vector(np.zeros(self.dimension))\n vector_different_direction[0] = 1\n\n if self.is_parallel(vector_different_direction, **kwargs):\n vector_different_direction[0] = 0\n vector_different_direction[1] = 1\n\n return vector_different_direction\n\n def plot_2d(self, ax_2d: Axes, point: array_like = (0, 0), scalar: float = 1, **kwargs) -> None:\n \"\"\"\n Plot a 2D vector.\n\n The vector is plotted as an arrow.\n\n Parameters\n ----------\n ax_2d : Axes\n Instance of :class:`~matplotlib.axes.Axes`.\n point : array_like, optional\n Position of the vector tail (default is origin).\n scalar : {int, float}, optional\n Value used to scale the vector (default 1).\n kwargs : dict, optional\n Additional keywords passed to :meth:`~matplotlib.axes.Axes.arrow`.\n\n Examples\n --------\n .. plot::\n :include-source:\n\n >>> import matplotlib.pyplot as plt\n >>> from skspatial.objects import Vector\n\n >>> _, ax = plt.subplots()\n\n >>> Vector([1, 1]).plot_2d(ax, point=(-3, 5), scalar=2, head_width=0.5)\n\n >>> limits = ax.axis([-5, 5, 0, 10])\n\n \"\"\"\n x, y = point\n dx, dy = scalar * self\n\n ax_2d.arrow(x, y, dx, dy, **kwargs)\n\n def plot_3d(self, ax_3d: Axes3D, point: array_like = (0, 0, 0), scalar: float = 1, **kwargs) -> None:\n \"\"\"\n Plot a 3D vector.\n\n The vector is plotted by connecting two 3D points\n (the head and tail of the vector).\n\n Parameters\n ----------\n ax_3d : Axes3D\n Instance of :class:`~mpl_toolkits.mplot3d.axes3d.Axes3D`.\n point : array_like, optional\n Position of the vector tail (default is origin).\n scalar : {int, float}, optional\n Value used to scale the vector (default 1).\n kwargs : dict, optional\n Additional keywords passed to :meth:`~mpl_toolkits.mplot3d.axes3d.Axes3D.plot`.\n\n Examples\n --------\n .. plot::\n :include-source:\n\n >>> import matplotlib.pyplot as plt\n >>> from mpl_toolkits.mplot3d import Axes3D\n\n >>> from skspatial.objects import Vector\n\n >>> fig = plt.figure()\n >>> ax = fig.add_subplot(111, projection='3d')\n\n >>> Vector([-1, 1, 1]).plot_3d(ax, point=(1, 2, 3), c='r')\n\n \"\"\"\n point_2 = np.array(point) + scalar * self\n\n _connect_points_3d(ax_3d, point, point_2, **kwargs)\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.zeros", "numpy.linalg.det", "numpy.float64", "numpy.sign", "numpy.subtract", "numpy.clip", "numpy.cross" ] ]
tomtix/osux
[ "cf87171ffca9513c3a05e2156618b20cea4aef98" ]
[ "src/taikorank/test/linear_fun.py" ]
[ "#!/usr/bin/python3\n\n# Copyright (©) 2015-2016 Lucas Maugère, Thomas Mijieux\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport yaml\nimport matplotlib.pylab as mp\n\ndef plot(x, y, name):\n mp.plot(x, y, linewidth = 1.0)\n mp.xlabel('')\n mp.ylabel('')\n mp.title(name)\n mp.show()\n\ndef lf_plot(data, name):\n l = data[name + \"_length\"]\n lx = []\n ly = []\n for i in range(1, l+1):\n x = float(data[name + \"_x\" + str(i)])\n y = float(data[name + \"_y\" + str(i)])\n if i == l and ly[l-2] == y:\n # avoid some ugly plot with nothing visible\n continue\n lx.append(x)\n ly.append(y)\n plot(lx, ly, name)\n\ndef usage():\n print(\"\"\"Usage:\n ./tr_linear_fun.py path/to/file.yaml vect_name1 vect_name2 ...\n \"\"\")\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n usage()\n sys.exit(0)\n filepath = sys.argv[1]\n with open(filepath) as f:\n data = yaml.load(f)\n for name in sys.argv[2:]:\n lf_plot(data, name)\n" ]
[ [ "matplotlib.pylab.ylabel", "matplotlib.pylab.show", "matplotlib.pylab.xlabel", "matplotlib.pylab.title", "matplotlib.pylab.plot" ] ]
ArielYssou/Aperiodic_CP
[ "f8dda241c20850f49a5046dd8306cb122dd7a652" ]
[ "Aperiodic_CP/abax_bissection.py" ]
[ "import subprocess\nfrom scipy.optimize import curve_fit\nfrom numpy import linspace, logspace, log, polyfit, isnan\nfrom random import randint\nfrom os import listdir\nfrom os.path import isfile, isdir, join\nfrom time import sleep\n\ndef file_len(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return i + 1\n\ndef progress_bar(completed = 0, total = 1, text = '', color = 0, size = 10):\n '''\n INPUT: (Number of) completed tasks, total amount of tasks, text to display inside the bar, color and total size of the bar.\n OUTPUT: String of the progress bar\n '''\n offset = 2\n text = \" \" * offset + text\n perc = completed / total\n hilight = int( size * perc )\n bar = ''\n for index in range(hilight):\n if index < len(text):\n bar += \"\\033[48;5;{}m\\033[38;5;0m{}\\033[0m\".format(color, text[index])\n else:\n bar += \"\\033[48;5;{}m\\033[38;5;0m \\033[0m\".format(color)\n for index in range(hilight, size):\n if index < len(text):\n bar += \"\\033[48;5;0m\\033[38;5;{}m{}\\033[0m\".format(color, text[index])\n else:\n bar += \"\\033[48;5;0m\\033[38;5;0m \\033[0m\"\n bar += \" {}%\".format(int(perc*100))\n return bar\n\ndef func(x, a, b, c):\n return a + (b * x) + c * (x ** 2) \ndef Curvature(fname, begin = 0, end= -1):\n try:\n dfile = open(fname, 'r')\n times = []\n rhos = []\n for line in dfile.read().splitlines():\n t, r = line.split(',')\n if float(t) == 0 or float(r) == 0:\n pass\n else:\n if isnan(float(t)) == False:\n times.append(float(t))\n rhos.append(float(r))\n fit_parans, fit_cov = curve_fit(func, log(times[begin:end]), log(rhos[begin:end]))\n return fit_parans[2]\n except:\n print(\"file {} not found\".format(fname))\n raise FileNotFoundError\n\ndef lin_func(x, a, b):\n return (a * x) + b\ndef Slope(fname, start = 0, end = -1):\n try:\n dfile = open(fname, 'r')\n times = []\n rhos = []\n for line in dfile.read().splitlines():\n t, r = line.split(',')\n #t, r, dt, dr = line.split(',')\n if float(t) == 0 or float(r) == 0:\n pass\n else:\n if isnan(float(t)) == False:\n times.append(float(t))\n rhos.append(float(r))\n fit_parans, fit_cov = curve_fit(\n lin_func,\n log(times[start:end]),\n log(rhos[start:end])\n )\n return fit_parans[0]\n except:\n print(\"file {} not found\".format(fname))\n raise FileNotFoundError\n\ndef IsActive(fname, s1 = 100, e1 = 150, s2 = -50, e2 = -1, method = 'lin'):\n if not isfile(fname):\n return 'active'\n elif file_len(fname) <= 200:\n return 'inactive'\n else:\n pass\n \n crit1 = 'active'\n crit2 = 'active'\n \n # Method 1: Compare the slop in the middle to the final slope\n reference_slope = abs(Slope(fname, s1, e1))\n final_slope = abs(Slope(fname, s2, e2))\n tolerance = 1.0001\n if final_slope <= tolerance * reference_slope:\n crit1 = 'active'\n else:\n crit1 = 'inactive'\n\n # Method 2: \n if Curvature(fname, s1, e2) > 0:\n crit2 = 'active'\n else:\n crit2 = 'inactive'\n\n if 'inactive' in (crit1, crit2):\n if 'active' in (crit1, crit2):\n tolerance = 1.0001\n reference_slope = abs(Slope(fname, s1, e1))\n final_slope = abs(Slope(fname, -20, -1))\n tolerance = 1.01\n if final_slope <= tolerance * reference_slope:\n return 'active'\n else:\n return 'inactive'\n else:\n return 'inactive'\n \n else:\n return 'active'\n\ndef RunSims(code, k, rho_0, delta_t, lamb_a, lamb_b, tsup, size, sim_i, sim_f, jobs):\n process = subprocess.Popen(\n [\n \"bash\",\n \"aurora.sh\",\n \"-e\",\n code,\n str(k),\n str(rho_0),\n str(delta_t),\n str(lamb_a), \n str(lamb_b), \n str(tsup),\n str(size),\n str(sim_i),\n str(sim_f),\n str(jobs)\n ],\n stdout = subprocess.PIPE)\n out, err = process.communicate()\n\ndef RunAnalysis(analysis, k, delta_t, la, lb, tsup, size, sim_f):\n process = subprocess.Popen(\n [\n analysis,\n str(k),\n str(delta_t),\n str(la), \n str(lb), \n str(tsup),\n str(size),\n str(0),\n str(sim_f)\n ],\n stdout = subprocess.PIPE)\n out, err = process.communicate()\n fname = out.decode(\"utf-8\") \n return fname\n\ndef BissecStep(code, analysis, k, rho_0, delta_t, lamb_a, lamb_b, tsup, size, sim_i, sim_f, jobs):\n RunSims(\n code,\n k,\n rho_0,\n delta_t,\n lamb_a,\n lamb_b,\n tsup,\n size,\n sim_i,\n sim_f,\n jobs\n )\n\n while True:\n if Finished(code):\n break\n else:\n MoveQueue()\n\n fname = RunAnalysis(\n analysis,\n k,\n delta_t,\n lamb_a,\n lamb_b,\n tsup,\n size,\n sim_f\n )\n\n return fname\n\ndef Finished(code):\n process = subprocess.Popen(\n [\n \"bash\",\n \"aurora.sh\",\n \"-s\",\n code.split('/')[-1]\n ],\n stdout = subprocess.PIPE)\n out, err = process.communicate()\n if float(out) == 1:\n return False\n else:\n return True\n\ndef MoveQueue():\n process = subprocess.Popen(\n [\n \"bash\",\n \"aurora.sh\",\n \"-m\"\n ],\n stdout = subprocess.PIPE)\n out, err = process.communicate()\n\n\ndef CheckGateway(remote_host, remote_basedir):\n dir = remote_basedir\n process = subprocess.Popen(\n [\n \"ssh\",\n remote_host,\n \"bash\",\n \"{}/gateway.sh\".format(dir)\n ],\n stdout = subprocess.PIPE)\n out, err = process.communicate()\n if out.decode('utf-8') == 'open':\n print(\"Gateway is open\")\n return True\n else:\n print(\"Gateway is closed. Aborting\")\n return False\n\ndef UploadFile(file, remote_host, remote_basedir):\n # Creates additional dirs\n additional_dir = ''\n ignore_dirs = [\n 'Simulacoes',\n 'data_aperiodic'\n ]\n dirs = file.replace('/home/ariel/','').rsplit('/')\n for dir in dirs[:-1]: # The last element is the file name\n if dir not in ignore_dirs:\n additional_dir += '/' + dir\n\n remote_fulldir = remote_basedir + additional_dir\n if additional_dir != '':\n process = subprocess.Popen(\n [\n \"ssh\",\n remote_host,\n \"mkdir\",\n \"-p\",\n remote_fulldir,\n ],\n stdout = subprocess.PIPE)\n out, err = process.communicate()\n\n # Upload file\n process = subprocess.Popen(\n [\n \"scp\",\n file,\n \"{}:{}\".format(remote_host, remote_fulldir),\n ],\n stdout = subprocess.PIPE)\n out, err = process.communicate()\n return 0\n\ndef ExistingSims(k, lamb_a, lamb_b, tmax, size):\n target = '/home/ariel/Simulacoes/data_aperiodic'\n target = target + '/k={}'.format(int(k))\n target = target + '/Lambda_a={:.3f}'.format(lamb_a)\n target = target + '/Lambda_b={:.8f}'.format(lamb_b)\n target = target + '/Tmax={:e}'.format(tmax)\n target = target + '/Size={}'.format(int(size))\n\n if not isdir(target):\n return 0\n else:\n pass\n\n files = [f for f in listdir(target) if isfile(join(target, f))]\n\n for f in ['rho_av.dat', 'surv_prob.dat']:\n try:\n files.remove(f)\n except ValueError:\n pass\n for f in files:\n if 'rho' not in f:\n files.remove(f)\n\n return len(files)\n\ndef Bissection(k = 2, lsup = 200, linf = 100.0, steps = 15, resume = False):\n size = 50000\n tmax = 1e6\n delta_t = 1\n sims = 50000\n sim_increment = 0\n lamb_a = 1.3\n rho_0 = -1\n tinf = 0\n jobs = 150\n\n# if not resume:\n# if k == 1:\n# linf, lsup = (3, 8)\n# elif k == 2:\n# linf, lsup = (3, 8)\n# elif k == 3:\n# linf, lsup = (3, 8)\n# else:\n# linf, lsup = (3, 8)\n lmid = (lsup + linf) / 2\n\n times = linspace(0, tmax, steps)\n #flicks = linspace(0.95, 0.96, steps+1)\n #flicks = linspace(0.95, 0.99, steps+1)\n\n Sups = []\n Infs = []\n\n root = '/home/ariel/Simulacoes'\n remote_host = \"ariel@ariel-Inspiron-5421\"\n remote_basedir = \"/home/ariel/Desktop/Mestrado/Aperiodic_CP/Aperiodic_Bissection/AbaxData\"\n\n code = '{}/acp_abax'.format(root)\n analysis = '{}/acp_analysis'.format(root)\n\n root += '/data_aperiodic/k={}'.format(k)\n\n out_fname = '{}/acp_bissec_results.dat'.format(root)\n\n if isfile(out_fname):\n if resume:\n out_file = open(out_fname, 'a')\n else:\n print(\"Warning, there is a set of existing data, overwrite? (yes/no)\")\n ans = input(\"> \")\n if ans.lower() in ('y','yes'):\n out_file = open(out_fname, 'w+')\n else:\n return\n else:\n out_file = open(out_fname, 'w+')\n\n transfer_files = [\n out_fname\n ]\n\n out_file.write(\"step, delta_t, lamb_a, lamb_b, tmax, size, sims, fname, regime\\n\")\n\n regimes = {}\n regimes[linf] = 'inactive'\n regimes[lsup] = 'active'\n\n for step in range(steps - 1):\n print(\"-\" * 40)\n print(progress_bar(\n step,\n steps-1,\n \"Depth {}\".format(step),\n color = 2,\n size = 50)\n )\n\n for lamb_b in [lmid]:\n tsup = times[step + 1]\n\n print(\"Testing value \\033[38;5;7m{:4f}\\033[0m... \".format(lamb_b), end = '')\n\n sim_i = 0\n sim_f = sims + sim_i\n\n if k == 0:\n lamb_a = lamb_b\n else:\n pass\n\n fname = BissecStep(\n code,\n analysis,\n k,\n rho_0,\n delta_t,\n lamb_a,\n lamb_b,\n tsup,\n size,\n sim_i,\n sim_f,\n jobs\n )\n\n regimes[lamb_b] = IsActive(fname)\n if regimes[lamb_b] == 'active':\n print(\"\\033[38;5;2;1m{}.\\033[0m\".format(regimes[lamb_b].title()))\n else:\n print(\"\\033[38;5;1;1m{}.\\033[0m\".format(regimes[lamb_b].title()))\n transfer_files.append(fname)\n fname_alt = fname \n transfer_files.append(fname_alt.replace('surv_prob', 'rho_av'))\n transfer_files.append(fname_alt)\n fname = fname.replace('/home/ariel/','')\n fname = fname.replace('Simulacoes/','')\n fname = fname.replace('data_aperiodic/','')\n fname = remote_basedir + '/' + fname\n\n out_file.write(\"{},{},{},{},{},{},{},{},{}\\n\".format(step,delta_t, lamb_a, lamb_b, tsup, size, sim_f, fname, regimes[lamb_b]))\n\n if regimes[lmid] == 'active':\n if regimes[linf] == 'active':\n lsup = linf\n for lamb, status in sorted(regimes.items(), key= lambda l: l[0])[::-1]:\n if status == 'inactive':\n linf = lamb\n else:\n lsup = lmid\n else:\n if regimes[lsup] == 'active':\n linf = lmid\n else:\n lsup = linf\n for lamb, status in sorted(regimes.items(), key= lambda l: l[0]):\n if status == 'inactive':\n linf = lamb\n lmid = (lsup + linf) / 2\n\n print(\"-\" * 40)\n print(progress_bar(\n 100,\n 100,\n \"Done =D\",\n color = 3,\n size = 50)\n )\n\n regimes[lmid] = 'crit'\n \n lamb_b = lmid\n\n fname = BissecStep(\n code,\n analysis,\n k,\n rho_0,\n delta_t,\n lamb_a,\n lamb_b,\n tsup,\n size,\n sim_i,\n sim_f,\n jobs\n )\n\n transfer_files.append(fname)\n fname_alt = fname \n transfer_files.append(fname_alt.replace('surv_prob', 'rho_av'))\n transfer_files.append(fname_alt)\n fname = fname.replace('/home/ariel/','')\n fname = fname.replace('Simulacoes/','')\n fname = fname.replace('data_aperiodic/','')\n fname = remote_basedir + '/' + fname\n\n out_file.write(\"{},{},{},{},{},{},{},{},{}\\n\".format(step, delta_t, lamb_a, lamb_b, tsup, size, sim_f, fname, regimes[lamb_b]))\n\n out_file.close()\n\n CheckGateway(remote_host, remote_basedir)\n for file in transfer_files:\n UploadFile(file, remote_host, remote_basedir)\n\nif __name__ == '__main__':\n from sys import argv\n\n if '-b' in argv:\n Bissection()\n elif '-i' in argv:\n Increase_Resolution()\n elif '-c' in argv:\n Continue_Bissec()\n else:\n Bissection()\n" ]
[ [ "numpy.linspace", "numpy.log" ] ]
symant233/zfsoft-captcha2
[ "feb689bbdbb0a306f8d342152d67cf23c5849ea5" ]
[ "app.py" ]
[ "from flask import Flask, url_for, request, redirect\nfrom predictor import split_pic, analyse\nfrom tensorflow import keras\napp = Flask(__name__) # __main__\nmodel = keras.models.load_model('./model/Model_tf.net')\n\n\[email protected](\"/\")\ndef hello(name=None):\n return \"\"\"home page\n upload: /upload\n api: /api\n \"\"\"\n\n\[email protected]('/upload', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n return 'No file part'\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n return 'No selected file'\n else:\n result = analyse(file.read(), model)\n return \"\".join(result)\n elif request.method == 'GET':\n return '''\n <!doctype html>\n <title>Upload new File</title>\n <h1>Upload new File</h1>\n <form method=post enctype=multipart/form-data>\n <input type=file name=file>\n <input type=submit value=Upload>\n </form>\n '''\n\[email protected]('/api', methods=['POST'])\ndef api():\n if request.method == 'POST':\n stream = request.data\n result = analyse(stream, model)\n return \"\".join(result)\n\nif __name__ == \"__main__\":\n from gevent.pywsgi import WSGIServer\n # app.run()\n http_server = WSGIServer(('localhost',5000),app)\n http_server.serve_forever()\n" ]
[ [ "tensorflow.keras.models.load_model" ] ]
rizkifatihah/k-means
[ "1b50ac6d4c72f67e9116d9e9dcc9732e590aa006" ]
[ "k-means.py" ]
[ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom sklearn.preprocessing import MinMaxScaler\n\nobejct = pd.read_csv(\"name_file.csv\")\nobejct.head()\n\nplt.scatter(obejct.x, obejct.y, s =10, c = \"c\", marker = \"o\", alpha = 1)\nplt.show()\nx_array = np.array(obejct)\nprint(x_array)\nscaler = MinMaxScaler()\nx_scaled = scaler.fit_transform(x_array)\nx_scaled\nkmeans = KMeans(n_clusters = 3, random_state=123)\nkmeans.fit(x_scaled)\nprint(kmeans.cluster_centers_)\nprint(kmeans.labels_)\nobejct[\"cluster\"] = kmeans.labels_\noutput = plt.scatter(x_scaled[:,0], x_scaled[:,1], s = 100, c = obejct.cluster, marker = \"o\", alpha = 1, )\ncenters = kmeans.cluster_centers_\nplt.scatter(centers[:,0], centers[:,1], c='red', s=200, alpha=1 , marker=\"s\")\nplt.title(\"Cluster Object\")\nplt.colorbar (output)\nplt.show()\ndf = pd.DataFrame(obejct, columns = ['x', 'y','kluster'])\ndf.to_excel('name_file.xlsx')" ]
[ [ "numpy.array", "matplotlib.pyplot.colorbar", "pandas.DataFrame", "sklearn.cluster.KMeans", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "sklearn.preprocessing.MinMaxScaler", "matplotlib.pyplot.scatter", "pandas.read_csv" ] ]
bemu/diagnosis_covid19
[ "625954beb136caa3348edfc75de16cc4db21ee43", "84abe2fd1cc46e4f16d3f59be18ff3c8b5fa08c0" ]
[ "multi_period_scores/analysis_mp.py", "analysis_tools/analysis_lesion_size.py" ]
[ "import numpy as np\nimport seaborn as sb\nimport os\nimport matplotlib.pyplot as plt\ndef inter_vecter(v):\n length=v.shape[0]\n x=np.linspace(0, 1, 40)\n xp = np.linspace(0, 1, length)\n new_v=np.interp(x, xp, v)\n return new_v\n\ndatas=open('val_slices_count.txt','r').readlines()\nfull_names=[da.split('\\t')[0].split('/')[-1] for da in datas]\nperson_names=[da.split('_')[0] for da in full_names]\npres=[np.array(da.split('\\t')[1].split(','),np.float) for da in datas]\nabnormal_count=[da.split('\\t')[2] for da in datas]\nslice_id=[np.array(da.split('\\t')[3].split(','),np.int) for da in datas]\n\n\npresent_names=list(set(person_names))\nperson_names=np.array(person_names)\nfull_names=np.array(full_names)\npres=np.array(pres)\na='figs_re/'\nb='npys_re/'\nos.makedirs(a,exist_ok=True)\nos.makedirs(b,exist_ok=True)\nfor a_name in present_names:\n this_names=full_names[person_names==a_name]\n this_pred = pres[person_names == a_name]\n dates=[da.split('_')[-1][:-4] for da in this_names]\n idx=np.argsort(dates)## sorted idx\n this_names=this_names[idx]\n this_pred=this_pred[idx]\n this_pred=np.stack([inter_vecter(da) for da in this_pred])\n plt.figure(figsize =(4,8))\n sb.heatmap(this_pred.transpose(),vmin=0,vmax=1,annot=True, fmt=\".3f\",cmap='jet',xticklabels=this_names)\n plt.xticks(rotation=90)\n plt.subplots_adjust(bottom=0.29, top=0.94)\n plt.savefig(a+this_names[0].split('_')[0]+'.jpg')\n np.save(b+this_names[0].split('_')[0]+'.npy',np.concatenate([this_names[:,np.newaxis],this_pred],1))\n\n", "import os\nimport SimpleITK as sitk\nimport numpy as np\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nage_list='raw_ages.txt'\nreload=False\ninpuath='/mnt/data7/NCP_mp_CTs/crop/lesions'\nif reload:\n Lsize=[]\n df=pd.read_csv(age_list,sep='\\t')\n for setname in os.listdir(inpuath):\n for person in os.listdir(os.path.join(inpuath,setname)):\n for stage in os.listdir(os.path.join(inpuath,setname,person)):\n data=sitk.ReadImage(os.path.join(inpuath,setname,person,stage))\n data=sitk.GetArrayFromImage(data)\n lesion_size=np.sum(data)/data.shape[0]/data.shape[1]/data.shape[2]\n this_name=setname+'/'+person.split('_')[0]\n idx=df[(df['name'] ==this_name)].index\n try:\n this_age=int(df['age'][idx])//20\n this_sex=int(df['sex'][idx]=='M')\n except:\n this_age=45\n this_sex=1\n Lsize.append([lesion_size,this_age,this_sex])\n Lsize=np.array(Lsize)\n df = pd.DataFrame(Lsize, columns=('size','age','sex'))\n df.to_csv('size_age.csv',index=False)\nelse:\n df=pd.read_csv('size_age.csv')\n\nplt.figure(figsize=(7,15))\nplt.subplot(5,1,1)\nidx=df[(df['sex']==1)].index\nsns.distplot(df['size'][idx]*100,kde=True,label='male',bins=np.arange(0,9,0.25),norm_hist=True)\nidx=df[(df['sex']==0)].index\nsns.distplot(df['size'][idx]*100,kde=True,label='female',bins=np.arange(0,9,0.25),norm_hist=True)\nplt.legend()\nplt.ylim([0,3])\nplt.xlim([0,8])\nplt.xlabel('')\nplt.title('all')\nfor age in range(1,5):\n plt.subplot(5,1,age+1)\n idx=df[(df['age'] ==age * (df['sex']==1))].index\n sns.distplot(df['size'][idx]*100,kde=True,label='male',bins=np.arange(0,9,0.25),norm_hist=True)\n idx=df[(df['age'] ==age *(df['sex']==0))].index\n sns.distplot(df['size'][idx]*100,kde=True,label='female',bins=np.arange(0,9,0.25),norm_hist=True)\n plt.xlim([0,8])\n plt.ylim([0, 3])\n plt.xlabel('')\n plt.legend()\n plt.title(str(age*20))\n\nplt.xlabel('Size (%)')\n#plt.legend()\n\nplt.tight_layout()\nplt.suptitle('Distribution of Relative Size')\nplt.subplots_adjust(top=0.90)\nplt.savefig('dis_size_agesex.jpg')\nplt.show()" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.interp", "matplotlib.pyplot.figure", "numpy.argsort", "numpy.linspace", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.xticks" ], [ "numpy.array", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure", "pandas.DataFrame", "numpy.sum", "numpy.arange", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.show", "pandas.read_csv", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.subplot" ] ]
ZettaAI/DeepEM
[ "98de568d96fc793dd7461e088ef7fc079d828c8a" ]
[ "deepem/data/dataset/pinky_basil/mip0_padded_x512_y512_z32.py" ]
[ "import numpy as np\nimport os\n\nimport dataprovider3.emio as emio\n\n\n# Basil dataset\nbasil_dir = 'basil/ground_truth/mip0/padded_x512_y512_z32'\nbasil_info = {\n 'vol001':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.d128.h5',\n 'blv': 'blv.h5',\n 'loc': True,\n },\n 'vol001a':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'blv': 'blv.h5',\n 'loc': True,\n },\n 'vol002':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.d128.h5',\n 'loc': True,\n },\n 'vol002a':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'loc': True,\n },\n 'vol003':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'mye': 'mye.h5',\n 'loc': True,\n },\n 'vol004':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'loc': True,\n },\n 'vol005':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'mye': 'mye.h5',\n 'loc': True,\n },\n 'vol006':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'loc': True,\n },\n 'vol008':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'loc': True,\n },\n 'vol011':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'loc': True,\n },\n}\n\n\n# Pinky dataset\npinky_dir = 'pinky/ground_truth/mip0/padded_x512_y512_z32'\npinky_info = {\n 'stitched_vol19-vol34':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'loc': True,\n },\n 'stitched_vol40-vol41':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'loc': True,\n },\n 'vol101':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'blv': 'blv.h5',\n 'loc': True,\n },\n 'vol102':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'loc': True,\n },\n 'vol103':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'loc': True,\n },\n 'vol104':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'loc': True,\n },\n 'vol401':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'mye': 'mye.h5',\n 'blv': 'blv.h5',\n 'loc': True,\n },\n 'vol501':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.d128.h5',\n 'loc': True,\n },\n 'vol501a':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'loc': True,\n },\n 'vol502':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'mye': 'mye.h5',\n 'loc': True,\n },\n 'vol503':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'psd': 'psd.h5',\n 'msk': 'msk.h5',\n 'blv': 'blv.h5',\n 'loc': True,\n },\n 'vol201':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'msk': 'msk.d128.h5',\n 'blv': 'blv.h5',\n 'loc': True,\n },\n 'vol201a':{\n 'img': 'img.h5',\n 'seg': 'seg.h5',\n 'msk': 'msk.h5',\n 'blv': 'blv.h5',\n 'loc': True,\n },\n}\n\n\ndef load_data(data_dir, data_ids=None, **kwargs):\n if data_ids is None:\n data_ids = basil_info.keys() + pinky_info.keys()\n\n data = dict()\n base = os.path.expanduser(data_dir)\n\n for data_id in data_ids:\n # Basil\n if data_id in basil_info:\n dpath = os.path.join(base, basil_dir)\n info = basil_info[data_id]\n data[data_id] = load_dataset(dpath, data_id, info, **kwargs)\n # Pinky\n if data_id in pinky_info:\n dpath = os.path.join(base, pinky_dir)\n info = pinky_info[data_id]\n data[data_id] = load_dataset(dpath, data_id, info, **kwargs)\n\n return data\n\n\ndef load_dataset(dpath, tag, info, class_keys=[], **kwargs):\n assert len(class_keys) > 0\n dset = dict()\n\n # Image\n dname = tag[:-1] if tag[-1] == 'a' else tag\n fpath = os.path.join(dpath, dname, info['img'])\n print(fpath)\n dset['img'] = emio.imread(fpath).astype('float32')\n dset['img'] /= 255.0\n\n # Mask\n if dname == 'stitched_vol19-vol34':\n fpath = os.path.join(dpath, dname, 'msk_train.h5')\n print(fpath)\n dset['msk_train'] = emio.imread(fpath).astype('uint8')\n fpath = os.path.join(dpath, dname, 'msk_val.h5')\n print(fpath)\n dset['msk_val'] = emio.imread(fpath).astype('uint8')\n else:\n fpath = os.path.join(dpath, dname, info['msk'])\n print(fpath)\n dset['msk'] = emio.imread(fpath).astype('uint8')\n\n # Segmentation\n if 'aff' in class_keys or 'long' in class_keys:\n fpath = os.path.join(dpath, dname, info['seg'])\n print(fpath)\n dset['seg'] = emio.imread(fpath).astype('uint32')\n\n # Synapse\n if 'psd' in class_keys:\n if 'psd' in info:\n fpath = os.path.join(dpath, dname, info['psd'])\n print(fpath)\n psd = (emio.imread(fpath) > 0).astype('uint8')\n else:\n psd = np.zeros(dset['img'].shape, dtype='uint8')\n dset['psd'] = psd\n\n # Special volumes\n special = ['stitched_vol40-vol41','vol101','vol102','vol103','vol104']\n if dname in special:\n fpath = os.path.join(dpath, dname, 'psd_msk.h5')\n print(fpath)\n psd_msk = emio.imread(fpath).astype('uint8')\n else:\n psd_msk = dset['msk']\n dset['psd_msk'] = psd_msk\n\n # Myelin\n if 'mye' in class_keys:\n if 'mye' in info:\n fpath = os.path.join(dpath, dname, info['mye'])\n print(fpath)\n mye = emio.imread(fpath).astype('uint8')\n else:\n mye = np.zeros(dset['img'].shape, dtype='uint8')\n dset['mye'] = mye\n\n # Blood vessel\n if 'blv' in class_keys:\n if 'blv' in info:\n fpath = os.path.join(dpath, dname, info['blv'])\n print(fpath)\n blv = emio.imread(fpath).astype('uint8')\n else:\n blv = np.zeros(dset['img'].shape, dtype='uint8')\n dset['blv'] = blv\n\n # Additoinal info\n dset['loc'] = info['loc']\n\n return dset\n" ]
[ [ "numpy.zeros" ] ]
tsesarrizqi/tflite2
[ "f48c1868e5f64f5fcdd1939a54cfad28a84be2b0" ]
[ "tensorflow/contrib/estimator/python/estimator/replicate_model_fn_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for utilities that replicate `Estimator.model_fn` over GPUs.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport shutil\nimport tempfile\nimport numpy as np\nimport six\n\nfrom tensorflow.contrib.estimator.python.estimator import replicate_model_fn\nfrom tensorflow.python.estimator import estimator as estimator_lib\nfrom tensorflow.python.estimator import model_fn as model_fn_lib\nfrom tensorflow.python.estimator.canned import dnn\nfrom tensorflow.python.estimator.canned import optimizers\nfrom tensorflow.python.estimator.canned import prediction_keys\nfrom tensorflow.python.estimator.export import export\nfrom tensorflow.python.estimator.export import export_output\nfrom tensorflow.python.estimator.inputs import numpy_io\nfrom tensorflow.python.feature_column import feature_column\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops as ops_lib\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import losses\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import metrics as metrics_lib\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.ops.losses import losses\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.summary.writer import writer_cache\nfrom tensorflow.python.training import adam\nfrom tensorflow.python.training import device_setter\nfrom tensorflow.python.training import gradient_descent\nfrom tensorflow.python.training import training\n\n\n# TODO(isaprykin): Parametrize all the tests on\n# replicate_model_fn._VariableDistributionMode when it's supported.\nclass DNNClassifierIntegrationTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n self._model_dir = tempfile.mkdtemp()\n\n def test_complete_flow_with_public_version(self):\n return self._complete_flow_with_mode(mode=None)\n\n def test_complete_flow_with_mode_local_ps_server(self):\n return self._complete_flow_with_mode(\n replicate_model_fn._VariableDistributionMode.\n SHARED_LOCAL_PARAMETER_SERVER)\n\n def test_complete_flow_with_mode_round_robin(self):\n return self._complete_flow_with_mode(\n replicate_model_fn._VariableDistributionMode.SHARED_ROUND_ROBIN)\n\n def _complete_flow_with_mode(self, mode):\n n_classes = 3\n input_dimension = 2\n batch_size = 12\n\n data = np.linspace(\n 0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)\n x_data = data.reshape(batch_size, input_dimension)\n categorical_data = np.random.random_integers(\n 0, len(x_data), size=len(x_data))\n y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))\n train_input_fn = numpy_io.numpy_input_fn(\n x={'x': x_data,\n 'categories': categorical_data},\n y=y_data,\n batch_size=batch_size,\n num_epochs=None,\n shuffle=True)\n eval_input_fn = numpy_io.numpy_input_fn(\n x={'x': x_data,\n 'categories': categorical_data},\n y=y_data,\n batch_size=batch_size,\n shuffle=False)\n predict_input_fn = numpy_io.numpy_input_fn(\n x={'x': x_data,\n 'categories': categorical_data},\n batch_size=batch_size,\n shuffle=False)\n\n feature_columns = [\n feature_column.numeric_column('x', shape=(input_dimension,)),\n feature_column.embedding_column(\n feature_column.categorical_column_with_vocabulary_list(\n 'categories',\n vocabulary_list=np.linspace(\n 0., len(x_data), len(x_data), dtype=np.int64)), 1)\n ]\n\n def optimizer_fn():\n return optimizers.get_optimizer_instance('Adagrad', learning_rate=0.05)\n\n estimator = dnn.DNNClassifier(\n hidden_units=(2, 2),\n # Adagrad is configured with `get_optimizer_instance`, so the function\n # form of `TowerOptimizer.__init__` is used.\n optimizer=replicate_model_fn.TowerOptimizer(optimizer_fn),\n feature_columns=feature_columns,\n n_classes=n_classes,\n model_dir=self._model_dir)\n\n if not mode: # Use the public `replicate_model_fn`.\n model_fn = replicate_model_fn.replicate_model_fn(\n estimator.model_fn, devices=['/gpu:0', '/gpu:1', '/gpu:2'])\n else:\n model_fn = replicate_model_fn._replicate_model_fn_with_mode(\n estimator.model_fn,\n devices=['/gpu:0', '/gpu:1', '/gpu:2'],\n loss_reduction=losses.Reduction.SUM,\n mode=mode)\n\n estimator = estimator_lib.Estimator(\n model_fn=model_fn,\n model_dir=estimator.model_dir,\n config=estimator.config,\n params=estimator.params)\n\n num_steps = 10\n estimator.train(train_input_fn, steps=num_steps)\n\n scores = estimator.evaluate(eval_input_fn)\n self.assertEqual(num_steps, scores[ops_lib.GraphKeys.GLOBAL_STEP])\n self.assertIn('loss', six.iterkeys(scores))\n\n predicted_proba = np.array([\n x[prediction_keys.PredictionKeys.PROBABILITIES]\n for x in estimator.predict(predict_input_fn)\n ])\n self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)\n\n feature_spec = feature_column.make_parse_example_spec(feature_columns)\n serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(\n feature_spec)\n export_dir = estimator.export_savedmodel(tempfile.mkdtemp(),\n serving_input_receiver_fn)\n self.assertTrue(gfile.Exists(export_dir))\n\n # Nothing should be left in the graph so that it doesn't get serialized.\n self.assertFalse(ops_lib.get_default_graph().get_collection_ref(\n replicate_model_fn.TowerOptimizer.COLLECTION_FOR_GRAPH_STATES))\n\n def _as_label(self, data_in_float):\n return np.rint(data_in_float).astype(np.int64)\n\n def tearDown(self):\n if self._model_dir:\n writer_cache.FileWriterCache.clear()\n shutil.rmtree(self._model_dir)\n\n\nclass ReplicateModelTest(test_util.TensorFlowTestCase):\n\n def model_fn(self, mode, features, labels, params):\n c = variable_scope.get_variable(\n 'c',\n initializer=constant_op.constant(10, dtype=dtypes.float64),\n dtype=dtypes.float64)\n\n predictions = math_ops.multiply(features, c)\n\n loss = losses.absolute_difference(\n labels=labels, predictions=predictions, reduction=losses.Reduction.SUM)\n loss = math_ops.reduce_sum(loss)\n\n metrics = {\n 'accuracy': metrics_lib.accuracy(labels, predictions),\n 'auc': metrics_lib.auc(labels, predictions)\n }\n\n optimizer = replicate_model_fn.TowerOptimizer(\n gradient_descent.GradientDescentOptimizer(params['learning_rate']))\n\n return model_fn_lib.EstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metric_ops=metrics,\n predictions={'probabilities': predictions},\n train_op=optimizer.minimize(loss))\n\n @property\n def params(self):\n params = {}\n params['learning_rate'] = 1.0\n return params\n\n def test_train(self):\n features = np.array([[1.0], [2.0]])\n labels = np.array([[1.0], [2.0]])\n\n with self.test_session() as session:\n replicated_model_fn = replicate_model_fn.replicate_model_fn(\n self.model_fn,\n loss_reduction=losses.Reduction.SUM,\n devices=['/gpu:0', '/gpu:1'])\n estimator_spec = replicated_model_fn(\n features, labels, model_fn_lib.ModeKeys.TRAIN, self.params)\n session.run(variables.global_variables_initializer())\n\n # loss = feature * c - label\n total_loss = (1.0 * 10 - 1.0) + (2.0 * 10 - 2.0)\n self.assertEqual(total_loss, session.run(estimator_spec.loss))\n\n # derivative of loss = (1*c - 1) + (2*c - 2) is 3.\n # new value of c = 10 - learning rate * 3 = 7.0.\n session.run(estimator_spec.train_op)\n with variable_scope.variable_scope('', reuse=True):\n c = variable_scope.get_variable('c', dtype=dtypes.float64)\n self.assertEqual(7.0, session.run(c))\n\n def test_train_with_mean_reduction(self):\n features = np.array([[1.0], [2.0]])\n labels = np.array([[1.0], [2.0]])\n\n with self.test_session() as session:\n replicated_model_fn = replicate_model_fn.replicate_model_fn(\n self.model_fn, losses.Reduction.MEAN, devices=['/gpu:0', '/gpu:1'])\n estimator_spec = replicated_model_fn(\n features, labels, model_fn_lib.ModeKeys.TRAIN, self.params)\n session.run(variables.global_variables_initializer())\n\n # loss = feature * c - label\n total_loss = ((1.0 * 10 - 1.0) + (2.0 * 10 - 2.0)) / 2.0\n self.assertEqual(total_loss, session.run(estimator_spec.loss))\n\n # derivative of loss = (1*c - 1)/2 + (2*c - 2)/2 is 1.5.\n # It's the same computation as without mean reduction, but the\n # loss from every tower is scaled by 1/<number of towers>.\n # new value of c = 10 - learning rate * 1.5 = 8.5\n session.run(estimator_spec.train_op)\n with variable_scope.variable_scope('', reuse=True):\n c = variable_scope.get_variable('c', dtype=dtypes.float64)\n self.assertEqual(8.5, session.run(c))\n\n def test_train_two_steps_collected_gradients_are_reset_between_steps(self):\n with ops_lib.Graph().as_default():\n features = array_ops.placeholder(dtypes.float64)\n labels = array_ops.placeholder(dtypes.float64)\n\n feature_inputs = np.array([[1.0], [2.0]]), np.array([[1.5], [2.5]])\n label_inputs = np.array([[1.0], [2.0]]), np.array([[1.5], [2.5]])\n\n # loss = feature * c - label\n expected_losses = ((1.0 * 10 - 1.0) + (2.0 * 10 - 2.0),\n (1.5 * 7.0 - 1.5) + (2.5 * 7.0 - 2.5))\n # Derivative of the loss is 1.0 + 2.0 for the first step and 1.5 + 2.5\n # for the second.\n expected_c = 10.0 - 3.0, 7.0 - 4.0\n\n with self.test_session() as session, variable_scope.variable_scope(\n '', reuse=variable_scope.AUTO_REUSE):\n replicated_model_fn = replicate_model_fn.replicate_model_fn(\n self.model_fn,\n loss_reduction=losses.Reduction.SUM,\n devices=['/gpu:0', '/gpu:1'])\n estimator_spec = replicated_model_fn(\n features, labels, model_fn_lib.ModeKeys.TRAIN, self.params)\n session.run(variables.global_variables_initializer())\n\n for feature_input, label_input, loss, weight in zip(\n feature_inputs, label_inputs, expected_losses, expected_c):\n feeds = {features: feature_input, labels: label_input}\n\n self.assertEqual(loss, session.run(estimator_spec.loss, feeds))\n\n session.run(estimator_spec.train_op, feeds)\n c = variable_scope.get_variable('c', dtype=dtypes.float64)\n self.assertEqual(weight, session.run(c, feeds))\n\n def test_eval(self):\n features = np.array([[0.01], [0.002]])\n labels = np.array([[0.01], [0.02]])\n\n with self.test_session() as session:\n replicated_model_fn = replicate_model_fn.replicate_model_fn(\n self.model_fn,\n loss_reduction=losses.Reduction.SUM,\n devices=['/gpu:0', '/gpu:1'])\n estimator_spec = replicated_model_fn(\n features, labels, model_fn_lib.ModeKeys.EVAL, self.params)\n session.run(variables.local_variables_initializer())\n session.run(variables.global_variables_initializer())\n\n accuracy, a = estimator_spec.eval_metric_ops['accuracy']\n auc, b = estimator_spec.eval_metric_ops['auc']\n\n session.run([a, b])\n accuracy = session.run(accuracy)\n auc = session.run(auc)\n\n # loss[i] = features[i] * 10 - labels[i].\n # Accuracy is 0.0 (no match) in the first tower.\n # Accuracy is 1.0 (match) in the second tower, since the feature\n # times weight \"c\" happened to be equal to the label.\n total_loss = ((0.01 * 10 - 0.01) + (0.002 * 10 - 0.02))\n\n self.assertNear((0.0 + 1.0) / 2.0, accuracy, 0.01)\n self.assertEqual(0, auc)\n self.assertNear(total_loss, session.run(estimator_spec.loss), 0.01)\n\n def test_eval_with_mean_reduction(self):\n features = np.array([[0.01], [0.002]])\n labels = np.array([[0.01], [0.02]])\n\n with self.test_session() as session:\n replicated_model_fn = replicate_model_fn.replicate_model_fn(\n self.model_fn, losses.Reduction.MEAN, devices=['/gpu:0', '/gpu:1'])\n estimator_spec = replicated_model_fn(\n features, labels, model_fn_lib.ModeKeys.EVAL, self.params)\n session.run(variables.local_variables_initializer())\n session.run(variables.global_variables_initializer())\n\n accuracy, a = estimator_spec.eval_metric_ops['accuracy']\n auc, b = estimator_spec.eval_metric_ops['auc']\n\n session.run([a, b])\n accuracy = session.run(accuracy)\n auc = session.run(auc)\n\n # loss[i] = features[i] * 10 - labels[i].\n # Accuracy is 0.0 (no match) in the first tower.\n # Accuracy is 1.0 (match) in the second tower, since the feature\n # times weight \"c\" happened to be equal to the label.\n total_loss = ((0.01 * 10 - 0.01) + (0.002 * 10 - 0.02)) / 2.0\n\n self.assertNear((0.0 + 1.0) / 2.0, accuracy, 0.01)\n self.assertEqual(0, auc)\n self.assertNear(total_loss, session.run(estimator_spec.loss), 0.01)\n\n def test_predict(self):\n features = np.array([[0.01], [0.002]])\n labels = np.array([[0.01], [0.02]])\n\n with self.test_session() as session:\n replicated_model_fn = replicate_model_fn.replicate_model_fn(\n self.model_fn, devices=['/gpu:0', '/gpu:1'])\n estimator_spec = replicated_model_fn(\n features, labels, model_fn_lib.ModeKeys.PREDICT, self.params)\n session.run(variables.global_variables_initializer())\n\n self.assertAllClose({\n 'probabilities': np.array([[0.1], [0.02]])\n }, session.run(estimator_spec.predictions))\n\n def test_train_single_tower(self):\n features = np.array([[1.0], [2.0]])\n labels = np.array([[1.0], [2.0]])\n\n with self.test_session() as session:\n replicated_model_fn = replicate_model_fn.replicate_model_fn(\n self.model_fn, devices=['/gpu:0'])\n estimator_spec = replicated_model_fn(\n features, labels, model_fn_lib.ModeKeys.TRAIN, self.params)\n session.run(variables.global_variables_initializer())\n\n # loss = feature * c - label\n total_loss = (1.0 * 10 - 1.0) + (2.0 * 10 - 2.0)\n self.assertEqual(total_loss, session.run(estimator_spec.loss))\n\n # loss' of c is 3.\n # new value of c = 10 - learning rate * 3 = 7.0.\n session.run(estimator_spec.train_op)\n with variable_scope.variable_scope('', reuse=True):\n c = variable_scope.get_variable('c', dtype=dtypes.float64)\n self.assertEqual(7.0, session.run(c))\n\n def test_eval_single_tower(self):\n features = np.array([[0.01], [0.002]])\n labels = np.array([[0.01], [0.02]])\n\n with self.test_session() as session:\n replicated_model_fn = replicate_model_fn.replicate_model_fn(\n self.model_fn, devices=['/gpu:0'])\n estimator_spec = replicated_model_fn(\n features, labels, model_fn_lib.ModeKeys.EVAL, self.params)\n session.run(variables.local_variables_initializer())\n session.run(variables.global_variables_initializer())\n\n accuracy, a = estimator_spec.eval_metric_ops['accuracy']\n auc, b = estimator_spec.eval_metric_ops['auc']\n\n session.run([a, b])\n accuracy = session.run(accuracy)\n auc = session.run(auc)\n\n # Accuracy is 0.0 (no match) in the first tower.\n # Accuracy is 1.0 (match) in the second tower, since the feature\n # times weight \"c\" happened to be equal to the label.\n total_loss = ((0.01 * 10 - 0.01) + (0.002 * 10 - 0.02))\n\n self.assertNear((0.0 + 1.0) / 2.0, accuracy, 0.01)\n self.assertEqual(0, auc)\n self.assertNear(total_loss, session.run(estimator_spec.loss), 0.01)\n\n def test_predict_single_tower(self):\n features = np.array([[0.01], [0.002]])\n labels = np.array([[0.01], [0.02]])\n\n with self.test_session() as session:\n replicated_model_fn = replicate_model_fn.replicate_model_fn(\n self.model_fn, devices=['/gpu:0'])\n estimator_spec = replicated_model_fn(\n features, labels, model_fn_lib.ModeKeys.PREDICT, self.params)\n session.run(variables.global_variables_initializer())\n\n self.assertAllClose({\n 'probabilities': np.array([[0.1], [0.02]])\n }, session.run(estimator_spec.predictions))\n\n def test_unsupported_loss_reduction(self):\n with self.assertRaisesRegexp(ValueError,\n '.+none.+reduction.+is.+specified.+'):\n _ = replicate_model_fn.replicate_model_fn(self.model_fn,\n losses.Reduction.NONE)\n\n\nclass ReplicateAcrossASingleDeviceWithoutTowerOptimizer(\n test_util.TensorFlowTestCase):\n\n def model_fn(self, mode, features, labels, params):\n c = variable_scope.get_variable(\n 'c',\n initializer=constant_op.constant(10, dtype=dtypes.float64),\n dtype=dtypes.float64)\n\n predictions = math_ops.multiply(features, c)\n\n loss = losses.absolute_difference(\n labels=labels, predictions=predictions, reduction=losses.Reduction.SUM)\n loss = math_ops.reduce_sum(loss)\n\n metrics = {\n 'accuracy': metrics_lib.accuracy(labels, predictions),\n 'auc': metrics_lib.auc(labels, predictions)\n }\n\n optimizer = gradient_descent.GradientDescentOptimizer(\n params['learning_rate'])\n\n return model_fn_lib.EstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metric_ops=metrics,\n predictions={'probabilities': predictions},\n train_op=optimizer.minimize(loss))\n\n @property\n def params(self):\n params = {}\n params['learning_rate'] = 1.0\n return params\n\n def test_train_single_tower(self):\n features = np.array([[1.0], [2.0]])\n labels = np.array([[1.0], [2.0]])\n\n with self.test_session() as session:\n replicated_model_fn = replicate_model_fn.replicate_model_fn(\n self.model_fn, devices=['/gpu:0'])\n estimator_spec = replicated_model_fn(\n features, labels, model_fn_lib.ModeKeys.TRAIN, self.params)\n session.run(variables.global_variables_initializer())\n\n # loss = feature * c - label\n total_loss = (1.0 * 10 - 1.0) + (2.0 * 10 - 2.0)\n self.assertEqual(total_loss, session.run(estimator_spec.loss))\n\n # loss' of c is 3.\n # new value of c = 10 - learning rate * 3 = 7.0.\n session.run(estimator_spec.train_op)\n with variable_scope.variable_scope('', reuse=True):\n c = variable_scope.get_variable('c', dtype=dtypes.float64)\n self.assertEqual(7.0, session.run(c))\n\n\nclass UseTowerEstimatorWithoutReplication(test_util.TensorFlowTestCase):\n\n def model_fn(self, mode, features, labels, params):\n c = variable_scope.get_variable(\n 'c',\n initializer=constant_op.constant(10, dtype=dtypes.float64),\n dtype=dtypes.float64)\n\n features = features['features']\n predictions = math_ops.multiply(features, c)\n\n loss = losses.absolute_difference(\n labels=labels, predictions=predictions, reduction=losses.Reduction.SUM)\n loss = math_ops.reduce_sum(loss)\n\n metrics = {\n 'accuracy': metrics_lib.accuracy(labels, predictions),\n 'auc': metrics_lib.auc(labels, predictions)\n }\n\n optimizer = replicate_model_fn.TowerOptimizer(\n gradient_descent.GradientDescentOptimizer(params['learning_rate']))\n\n return model_fn_lib.EstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metric_ops=metrics,\n predictions={'probabilities': predictions},\n train_op=optimizer.minimize(loss))\n\n @property\n def params(self):\n params = {}\n params['learning_rate'] = 1.0\n return params\n\n def test_train_single_tower(self):\n features = np.array([[1.0], [2.0]])\n labels = np.array([[1.0], [2.0]])\n\n train_input_fn = numpy_io.numpy_input_fn(\n x={'features': features}, y=labels, batch_size=2, shuffle=False)\n\n with self.test_session():\n estimator = estimator_lib.Estimator(\n model_fn=self.model_fn,\n model_dir=tempfile.mkdtemp(),\n params=self.params)\n estimator.train(train_input_fn, steps=1)\n\n self.assertEqual(7.0, estimator.get_variable_value('c'))\n\n\nclass MakeSureSyncReplicasOptimizerWorks(test_util.TensorFlowTestCase):\n\n def model_fn(self, mode, features, labels, params):\n c = variable_scope.get_variable(\n 'c',\n initializer=constant_op.constant(10, dtype=dtypes.float64),\n dtype=dtypes.float64)\n\n features = features['features']\n predictions = math_ops.multiply(features, c)\n\n loss = losses.absolute_difference(\n labels=labels, predictions=predictions, reduction=losses.Reduction.SUM)\n loss = math_ops.reduce_sum(loss)\n\n metrics = {\n 'accuracy': metrics_lib.accuracy(labels, predictions),\n 'auc': metrics_lib.auc(labels, predictions)\n }\n\n optimizer = gradient_descent.GradientDescentOptimizer(\n params['learning_rate'])\n optimizer = training.SyncReplicasOptimizer(\n optimizer, replicas_to_aggregate=1)\n sync_hook = optimizer.make_session_run_hook(True)\n optimizer = replicate_model_fn.TowerOptimizer(optimizer)\n\n return model_fn_lib.EstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metric_ops=metrics,\n training_hooks=[sync_hook],\n predictions={'probabilities': predictions},\n train_op=optimizer.minimize(\n loss, global_step=training.get_global_step()))\n\n @property\n def params(self):\n params = {}\n params['learning_rate'] = 1.0\n return params\n\n def test_train_multiple_towers(self):\n features = np.array([[1.0], [2.0]])\n labels = np.array([[1.0], [2.0]])\n\n train_input_fn = numpy_io.numpy_input_fn(\n x={'features': features}, y=labels, batch_size=2, shuffle=False)\n\n model_fn = replicate_model_fn.replicate_model_fn(\n self.model_fn,\n loss_reduction=losses.Reduction.SUM,\n devices=['/gpu:0', '/gpu:1'])\n\n estimator = estimator_lib.Estimator(\n model_fn=model_fn, model_dir=tempfile.mkdtemp(), params=self.params)\n estimator.train(train_input_fn, steps=1)\n\n self.assertEqual(7.0, estimator.get_variable_value('c'))\n\n\nclass ReplicateWithTwoOptimizersTest(test_util.TensorFlowTestCase):\n\n def model_fn(self, mode, features, labels, params):\n c = variable_scope.get_variable(\n 'c',\n initializer=constant_op.constant(10, dtype=dtypes.float64),\n dtype=dtypes.float64)\n\n side_effects = variable_scope.get_variable(\n 'side_effects',\n initializer=constant_op.constant(0, dtype=dtypes.float64),\n dtype=dtypes.float64,\n trainable=False)\n\n predictions = math_ops.multiply(features, c)\n\n loss = losses.absolute_difference(\n labels=labels, predictions=predictions, reduction=losses.Reduction.SUM)\n loss = math_ops.reduce_sum(loss)\n\n metrics = {\n 'accuracy': metrics_lib.accuracy(labels, predictions),\n 'auc': metrics_lib.auc(labels, predictions)\n }\n\n first_optimizer = replicate_model_fn.TowerOptimizer(\n gradient_descent.GradientDescentOptimizer(1.0))\n second_optimizer = replicate_model_fn.TowerOptimizer(\n adam.AdamOptimizer(1.0))\n\n with ops_lib.control_dependencies([side_effects.assign_add(1.0)]):\n first_grads_and_vars = first_optimizer.compute_gradients(loss)\n\n train_op = control_flow_ops.group(\n [first_optimizer.apply_gradients(first_grads_and_vars),\n second_optimizer.minimize(loss)])\n\n return model_fn_lib.EstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metric_ops=metrics,\n predictions={'probabilities': predictions},\n train_op=train_op)\n\n def test_train(self):\n features = np.array([[1.0], [2.0]])\n labels = np.array([[1.0], [2.0]])\n\n with self.test_session() as session:\n replicated_model_fn = replicate_model_fn.replicate_model_fn(\n self.model_fn,\n loss_reduction=losses.Reduction.SUM,\n devices=['/gpu:0', '/gpu:1'])\n estimator_spec = replicated_model_fn(features, labels,\n model_fn_lib.ModeKeys.TRAIN, {})\n session.run(variables.global_variables_initializer())\n\n # loss = feature * c - label\n total_loss = (1.0 * 10 - 1.0) + (2.0 * 10 - 2.0)\n self.assertEqual(total_loss, session.run(estimator_spec.loss))\n\n # loss' of c is 3.\n # new value of c = 10 - learning rate * 3 = 7.0.\n # Adam subtracts another ~1.\n session.run(estimator_spec.train_op)\n with variable_scope.variable_scope('', reuse=True):\n c = variable_scope.get_variable('c', dtype=dtypes.float64)\n self.assertNear(6.0, session.run(c), 0.000001)\n\n side_effects = variable_scope.get_variable(\n 'side_effects', dtype=dtypes.float64)\n self.assertNear(2.0, session.run(side_effects), 0.000001)\n\n\nclass ReplicateWithTwoLossesAndOneOptimizer(test_util.TensorFlowTestCase):\n\n def setUp(self):\n self._should_skip_optimizer = False\n self._towers_left_before_skipping_optimizer = -1\n\n def incorrectly_skip_optimizer_for_tower(self, tower_number):\n self._should_skip_optimizer = True\n self._towers_left_before_skipping_optimizer = tower_number\n\n def should_skip_optimizer(self):\n if not self._should_skip_optimizer:\n return False\n if self._towers_left_before_skipping_optimizer == 0:\n return True\n else:\n self._towers_left_before_skipping_optimizer -= 1\n return False\n\n def model_fn(self, mode, features, labels, params):\n c = variable_scope.get_variable(\n 'c',\n initializer=constant_op.constant(10, dtype=dtypes.float64),\n dtype=dtypes.float64)\n d = variable_scope.get_variable(\n 'd',\n initializer=constant_op.constant(2, dtype=dtypes.float64),\n dtype=dtypes.float64)\n\n predictions = math_ops.multiply(features, c)\n\n loss = losses.absolute_difference(\n labels=labels, predictions=predictions, reduction=losses.Reduction.SUM)\n loss = math_ops.reduce_sum(loss)\n\n another_predictions = math_ops.multiply(features, d)\n another_loss = losses.absolute_difference(\n labels=labels,\n predictions=another_predictions,\n reduction=losses.Reduction.SUM)\n another_loss = math_ops.reduce_sum(another_loss)\n\n total_loss = math_ops.add(loss, another_loss)\n\n metrics = {\n 'accuracy': metrics_lib.accuracy(labels, predictions),\n 'auc': metrics_lib.auc(labels, predictions)\n }\n\n train_ops = []\n\n optimizer = replicate_model_fn.TowerOptimizer(\n gradient_descent.GradientDescentOptimizer(1.0))\n train_ops.append(optimizer.minimize(loss, var_list=[c]))\n if not self.should_skip_optimizer():\n another_optimizer = replicate_model_fn.TowerOptimizer(\n gradient_descent.GradientDescentOptimizer(1.0))\n train_ops.append(another_optimizer.minimize(another_loss, var_list=[d]))\n\n train_op = control_flow_ops.group(train_ops)\n return model_fn_lib.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=metrics,\n predictions={'probabilities': predictions},\n train_op=train_op)\n\n def test_train(self):\n features = np.array([[1.0], [2.0]])\n labels = np.array([[1.0], [2.0]])\n\n with self.test_session() as session:\n replicated_model_fn = replicate_model_fn.replicate_model_fn(\n self.model_fn,\n loss_reduction=losses.Reduction.SUM,\n devices=['/gpu:0', '/gpu:1'])\n estimator_spec = replicated_model_fn(features, labels,\n model_fn_lib.ModeKeys.TRAIN, {})\n session.run(variables.global_variables_initializer())\n\n # For each tower, loss = (feature * c - label) + (feature * d - label).\n total_loss = (1.0 * 10 - 1.0 + 1.0 * 2.0 - 1.0) + (\n 2.0 * 10 - 2.0 + 2.0 * 2.0 - 2.0)\n self.assertEqual(total_loss, session.run(estimator_spec.loss))\n\n session.run(estimator_spec.train_op)\n\n # loss' of c or loss' of d is 3.\n # new value of c = 10 - learning rate * 3 = 7.0.\n # new value of d = 2 - learning rate * 3 = -1.0.\n with variable_scope.variable_scope('', reuse=True):\n c = variable_scope.get_variable('c', dtype=dtypes.float64)\n self.assertNear(7.0, session.run(c), 0.000001)\n d = variable_scope.get_variable('d', dtype=dtypes.float64)\n self.assertNear(-1.0, session.run(d), 0.000001)\n\n def test_different_optimizer_calls_within_towers(self):\n self.incorrectly_skip_optimizer_for_tower(1)\n\n features = np.array([[1.0], [2.0]])\n labels = np.array([[1.0], [2.0]])\n\n with self.test_session(), ops_lib.Graph().as_default():\n with self.assertRaisesRegexp(\n ValueError, '.+was.+supposed.+to.+make.+same.+optimizer.+calls.+'):\n replicated_model_fn = replicate_model_fn.replicate_model_fn(\n self.model_fn, devices=['/gpu:0', '/gpu:1'])\n _ = replicated_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN,\n {})\n\n\nclass FailToWrapOptimizerInTheModelFn(test_util.TensorFlowTestCase):\n\n def model_fn(self, mode, features, labels, params):\n c = variable_scope.get_variable(\n 'c',\n initializer=constant_op.constant(10, dtype=dtypes.float64),\n dtype=dtypes.float64)\n\n predictions = math_ops.multiply(features, c)\n\n loss = losses.absolute_difference(\n labels=labels, predictions=predictions, reduction=losses.Reduction.SUM)\n loss = math_ops.reduce_sum(loss)\n\n metrics = {\n 'accuracy': metrics_lib.accuracy(labels, predictions),\n 'auc': metrics_lib.auc(labels, predictions)\n }\n\n optimizer = gradient_descent.GradientDescentOptimizer(1.0)\n train_op = optimizer.minimize(loss)\n\n return model_fn_lib.EstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metric_ops=metrics,\n predictions={'probabilities': predictions},\n train_op=train_op)\n\n def test_train(self):\n features = np.array([[1.0], [2.0]])\n labels = np.array([[1.0], [2.0]])\n\n with self.test_session():\n with self.assertRaisesRegexp(ValueError,\n 'Please.+wrap.+with.+TowerOptimizer'):\n replicated_model_fn = replicate_model_fn.replicate_model_fn(\n self.model_fn, devices=['/gpu:0', '/gpu:1'])\n _ = replicated_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN,\n {})\n\n\nclass GetLossTowersTest(test_util.TensorFlowTestCase):\n\n def model_fn(self, mode, features, labels, params):\n c = variable_scope.get_variable(\n 'c',\n initializer=constant_op.constant(0.25, dtype=dtypes.float64),\n dtype=dtypes.float64)\n\n predictions = math_ops.add(np.array([0.1, 0.2, 0.3, features[0]]), c)\n labels = np.array([0.1, 0.2, 0.3, labels[0]])\n\n loss = losses.absolute_difference(\n labels=labels, predictions=predictions, reduction=losses.Reduction.SUM)\n\n return model_fn_lib.EstimatorSpec(mode=mode, loss=math_ops.reduce_sum(loss))\n\n def test_gradients_are_computed(self):\n with self.test_session() as session:\n tower_specs = replicate_model_fn._get_loss_towers(\n self.model_fn,\n mode=None,\n features=[[0.6], [1.6]],\n labels=[[0.6], [0.6]],\n params=None,\n config=None,\n loss_reduction=losses.Reduction.SUM,\n devices=['/gpu:0', '/gpu:1'],\n local_ps_devices=['/gpu:0'],\n name_scope_pattern='test_tower_{}')\n session.run(variables.global_variables_initializer())\n\n self.assertEqual(len(tower_specs), 2)\n\n self.assertEqual('/device:GPU:0', tower_specs[0].loss.device)\n self.assertEqual('Sum:0', tower_specs[0].loss.name)\n self.assertEqual(1.0, session.run(tower_specs[0].loss))\n\n self.assertEqual('/device:GPU:1', tower_specs[1].loss.device)\n self.assertEqual('test_tower_1/Sum:0', tower_specs[1].loss.name)\n # The input batch for the second tower had a loss that is 1.0\n # bigger: 0.6 vs 1.6.\n self.assertEqual(2.0, session.run(tower_specs[1].loss))\n\n self.assertEqual(1, len(variables.global_variables()))\n self.assertEqual(1, len(variables.trainable_variables()))\n\n with variable_scope.variable_scope('', reuse=True):\n c = variable_scope.get_variable('c', dtype=dtypes.float64)\n self.assertEqual(0.25, session.run(c))\n\n def test_gradients_are_computed_with_mean_reduction(self):\n with self.test_session() as session:\n tower_specs = replicate_model_fn._get_loss_towers(\n self.model_fn,\n mode=model_fn_lib.ModeKeys.EVAL,\n features=[[0.6], [1.6]],\n labels=[[0.6], [0.6]],\n params=None,\n loss_reduction=losses.Reduction.MEAN,\n config=None,\n devices=['/gpu:0', '/gpu:1'],\n local_ps_devices=['/gpu:0'],\n name_scope_pattern='test_tower_{}')\n session.run(variables.global_variables_initializer())\n\n self.assertEqual(len(tower_specs), 2)\n\n self.assertEqual('/device:GPU:0', tower_specs[0].loss.device)\n self.assertEqual('averaged_loss:0', tower_specs[0].loss.name)\n self.assertEqual(0.5, session.run(tower_specs[0].loss))\n\n self.assertEqual('/device:GPU:1', tower_specs[1].loss.device)\n self.assertEqual('test_tower_1/averaged_loss:0', tower_specs[1].loss.name)\n # The input batch for the second tower had a loss that is 1.0\n # bigger: 0.6 vs 1.6.\n self.assertEqual(1.0, session.run(tower_specs[1].loss))\n\n self.assertEqual(1, len(variables.global_variables()))\n self.assertEqual(1, len(variables.trainable_variables()))\n\n with variable_scope.variable_scope('', reuse=True):\n c = variable_scope.get_variable('c', dtype=dtypes.float64)\n self.assertEqual(0.25, session.run(c))\n\n def test_variables_are_round_robined_correctly(self):\n \"\"\"Test that creates multiple variables and tests round-robin placement.\"\"\"\n\n def model_fn(mode, features, labels, params):\n del params\n for variable_name in ['a', 'b', 'c', 'd']:\n c = variable_scope.get_variable(\n variable_name,\n initializer=constant_op.constant(0.25, dtype=dtypes.float64),\n dtype=dtypes.float64)\n\n predictions = math_ops.add(np.array([0.1, 0.2, 0.3, features[0]]), c)\n labels = np.array([0.1, 0.2, 0.3, labels[0]])\n loss = losses.absolute_difference(\n labels=labels,\n predictions=predictions,\n reduction=losses.Reduction.SUM)\n return model_fn_lib.EstimatorSpec(\n mode=mode, loss=math_ops.reduce_sum(loss))\n\n with self.test_session() as session:\n tower_specs = replicate_model_fn._get_loss_towers(\n model_fn,\n mode=None,\n features=[[0.6], [1.6], [2.6]],\n labels=[[0.6], [0.6], [2.6]],\n params=None,\n loss_reduction=losses.Reduction.SUM,\n config=None,\n devices=['/gpu:0', '/gpu:1', '/gpu:3'],\n local_ps_devices=['/gpu:0', '/gpu:1', '/gpu:3'],\n name_scope_pattern='test_tower_{}')\n session.run(variables.global_variables_initializer())\n\n self.assertEqual(len(tower_specs), 3)\n self.assertEqual('/device:GPU:0', tower_specs[0].loss.device)\n self.assertEqual('/device:GPU:1', tower_specs[1].loss.device)\n self.assertEqual('/device:GPU:3', tower_specs[2].loss.device)\n\n with variable_scope.variable_scope('', reuse=True):\n a = variable_scope.get_variable('a', dtype=dtypes.float64)\n self.assertEqual('/device:GPU:0', a.device)\n b = variable_scope.get_variable('b', dtype=dtypes.float64)\n self.assertEqual('/device:GPU:1', b.device)\n c = variable_scope.get_variable('c', dtype=dtypes.float64)\n self.assertEqual('/device:GPU:3', c.device)\n d = variable_scope.get_variable('d', dtype=dtypes.float64)\n self.assertEqual('/device:GPU:0', d.device)\n\n\nclass SplitBatchTest(test_util.TensorFlowTestCase):\n\n def evaluate_shards(self, first_list, second_list):\n evaluate_items = lambda x: x.eval()\n return list(map(evaluate_items, first_list)), list(\n map(evaluate_items, second_list))\n\n def test_simple_half_split(self):\n with self.test_session() as session: # pylint: disable=unused-variable\n features = [0.0, 1.0, 2.0, 3.0]\n labels = [10.0, 11.0, 12.0, 13.0]\n feature_shards, label_shards = replicate_model_fn._split_batch(\n features, labels, 2, device='/gpu:0')\n\n feature_shards, label_shards = self.evaluate_shards(\n feature_shards, label_shards)\n\n self.assertAllEqual([[0.0, 1.0], [2.0, 3.0]], feature_shards)\n self.assertAllEqual([[10.0, 11.0], [12.0, 13.0]], label_shards)\n\n def test_to_each_their_own(self):\n with self.test_session() as session: # pylint: disable=unused-variable\n features = [0.0, 1.0, 2.0, 3.0]\n labels = [10.0, 11.0, 12.0, 13.0]\n feature_shards, label_shards = replicate_model_fn._split_batch(\n features, labels, 4, device='/gpu:0')\n\n feature_shards, label_shards = self.evaluate_shards(\n feature_shards, label_shards)\n\n self.assertAllEqual([[0.0], [1.0], [2.0], [3.0]], feature_shards)\n self.assertAllEqual([[10.0], [11.0], [12.0], [13.0]], label_shards)\n\n def test_one_batch(self):\n with self.test_session() as session: # pylint: disable=unused-variable\n features = [0.0, 1.0, 2.0, 3.0]\n labels = [10.0, 11.0, 12.0, 13.0]\n feature_shards, label_shards = replicate_model_fn._split_batch(\n features, labels, 1, device='/gpu:0')\n\n feature_shards, label_shards = self.evaluate_shards(\n feature_shards, label_shards)\n\n self.assertAllEqual([[0.0, 1.0, 2.0, 3.0]], feature_shards)\n self.assertAllEqual([[10.0, 11.0, 12.0, 13.0]], label_shards)\n\n def test_half_split_in_dictionary(self):\n with self.test_session() as session: # pylint: disable=unused-variable\n features = {'first': [0.0, 1.0, 2.0, 3.0], 'second': [4.0, 5.0, 6.0, 7.0]}\n labels = [10.0, 11.0, 12.0, 13.0]\n\n feature_shards, label_shards = replicate_model_fn._split_batch(\n features, labels, 2, device='/gpu:0')\n\n self.assertAllEqual([0.0, 1.0], feature_shards[0]['first'].eval())\n self.assertAllEqual([4.0, 5.0], feature_shards[0]['second'].eval())\n self.assertAllEqual([2.0, 3.0], feature_shards[1]['first'].eval())\n self.assertAllEqual([6.0, 7.0], feature_shards[1]['second'].eval())\n self.assertAllEqual([10.0, 11.0], label_shards[0].eval())\n self.assertAllEqual([12.0, 13.0], label_shards[1].eval())\n\n def test_one_batch_in_dictionary(self):\n with self.test_session() as session: # pylint: disable=unused-variable\n features = {'first': [0.0, 1.0, 2.0, 3.0], 'second': [4.0, 5.0, 6.0, 7.0]}\n labels = [10.0, 11.0, 12.0, 13.0]\n\n feature_shards, label_shards = replicate_model_fn._split_batch(\n features, labels, 1, device='/gpu:0')\n\n self.assertAllEqual([0.0, 1.0, 2.0, 3.0],\n feature_shards[0]['first'].eval())\n self.assertAllEqual([4.0, 5.0, 6.0, 7.0],\n feature_shards[0]['second'].eval())\n self.assertAllEqual([10.0, 11.0, 12.0, 13.0], label_shards[0].eval())\n\n def test_feature_and_label_dictionaries(self):\n with self.test_session() as session: # pylint: disable=unused-variable\n features = {'first': [0.0, 1.0, 2.0, 3.0], 'second': [4.0, 5.0, 6.0, 7.0]}\n labels = {'first': [10.0, 11.0], 'second': [12.0, 13.0]}\n\n feature_shards, label_shards = replicate_model_fn._split_batch(\n features, labels, 2, device='/gpu:0')\n\n self.assertAllEqual([0.0, 1.0], feature_shards[0]['first'].eval())\n self.assertAllEqual([4.0, 5.0], feature_shards[0]['second'].eval())\n self.assertAllEqual([2.0, 3.0], feature_shards[1]['first'].eval())\n self.assertAllEqual([6.0, 7.0], feature_shards[1]['second'].eval())\n self.assertAllEqual([10.0], label_shards[0]['first'].eval())\n self.assertAllEqual([12.0], label_shards[0]['second'].eval())\n self.assertAllEqual([11], label_shards[1]['first'].eval())\n self.assertAllEqual([13.0], label_shards[1]['second'].eval())\n\n\nclass TrainSpecTest(test_util.TensorFlowTestCase):\n\n expected_predictions = {}\n\n def create_estimator_spec(self, loss):\n return model_fn_lib.EstimatorSpec(\n mode=model_fn_lib.ModeKeys.TRAIN,\n loss=loss,\n train_op=loss, # Not used; currently required.\n predictions=self.expected_predictions)\n\n def create_constant_loss(self, loss_value):\n return constant_op.constant(loss_value, dtype=dtypes.float64)\n\n def test_example(self):\n with self.test_session() as session:\n tower_losses = list(map(self.create_constant_loss, [2, 4, 6]))\n tower_specs = list(map(self.create_estimator_spec, tower_losses))\n\n expected_train_op = tower_losses[1]\n\n estimator_spec = replicate_model_fn._train_spec(\n tower_specs, expected_train_op, aggregation_device='/gpu:0')\n\n self.assertEqual(expected_train_op, estimator_spec.train_op)\n self.assertEqual(2 + 4 + 6, session.run(estimator_spec.loss))\n self.assertEqual(self.expected_predictions, estimator_spec.predictions)\n\n\nclass EvalSpecTest(test_util.TensorFlowTestCase):\n\n def create_estimator_spec(self, loss, metrics):\n return model_fn_lib.EstimatorSpec(\n mode=model_fn_lib.ModeKeys.EVAL, loss=loss, eval_metric_ops=metrics)\n\n def create_constant_loss(self, loss_value):\n return constant_op.constant(loss_value, dtype=dtypes.float64)\n\n def create_eval_metrics(self, noise):\n predictions = np.array([0.1, 0.2, 0.3, 0.6 + noise])\n labels = np.array([0.1, 0.2, 0.3, 0.6])\n\n metrics = {\n 'accuracy': metrics_lib.accuracy(labels, predictions),\n 'auc': metrics_lib.auc(labels, predictions)\n }\n return metrics\n\n def test_example(self):\n with self.test_session() as session:\n tower_losses = map(self.create_constant_loss, [2, 4, 6])\n tower_metrics = map(self.create_eval_metrics, [0, 0.2, 0.3])\n tower_specs = [\n self.create_estimator_spec(l, m)\n for l, m in zip(tower_losses, tower_metrics)\n ]\n session.run(variables.local_variables_initializer())\n\n estimator_spec = replicate_model_fn._eval_spec(\n tower_specs, aggregation_device='/device:GPU:0')\n\n accuracy, a = estimator_spec.eval_metric_ops['accuracy']\n auc, b = estimator_spec.eval_metric_ops['auc']\n\n self.assertEqual('/device:CPU:0', accuracy.device)\n self.assertEqual('/device:CPU:0', auc.device)\n\n session.run([a, b])\n accuracy, auc = session.run([accuracy, auc])\n\n self.assertNear((12 - 2) / 12, accuracy, 0.01)\n self.assertEqual(0, auc)\n self.assertEqual(2 + 4 + 6, session.run(estimator_spec.loss))\n\n def test_handles_single_tower(self):\n with self.test_session() as session:\n tower_losses = map(self.create_constant_loss, [5])\n tower_metrics = map(self.create_eval_metrics, [0.2])\n tower_specs = [\n self.create_estimator_spec(l, m)\n for l, m in zip(tower_losses, tower_metrics)\n ]\n session.run(variables.local_variables_initializer())\n\n estimator_spec = replicate_model_fn._eval_spec(\n tower_specs, aggregation_device='/device:GPU:0')\n\n accuracy, a = estimator_spec.eval_metric_ops['accuracy']\n auc, b = estimator_spec.eval_metric_ops['auc']\n\n self.assertEqual('/device:CPU:0', accuracy.device)\n self.assertEqual('/device:CPU:0', auc.device)\n\n session.run([a, b])\n accuracy = session.run(accuracy)\n auc = session.run(auc)\n\n self.assertNear((4 - 1) / 4, accuracy, 0.01)\n self.assertEqual(0, auc)\n self.assertEqual(5, session.run(estimator_spec.loss))\n\n\nclass PredictSpecTest(test_util.TensorFlowTestCase):\n\n def model_fn(self, mode, features, labels, params):\n c = variable_scope.get_variable(\n 'c',\n initializer=constant_op.constant(0.25, dtype=dtypes.float64),\n dtype=dtypes.float64)\n\n predictions = math_ops.add(np.array([features[0], features[0]]), c)\n\n return model_fn_lib.EstimatorSpec(\n mode=model_fn_lib.ModeKeys.PREDICT,\n predictions={\n 'probabilities': predictions\n })\n\n def test_example(self):\n with self.test_session() as session:\n tower_specs = replicate_model_fn._get_loss_towers(\n self.model_fn,\n mode=None,\n features=[[0.1], [0.2]],\n loss_reduction=losses.Reduction.SUM,\n labels=[[], []],\n params=None,\n config=None,\n devices=['/gpu:0', '/gpu:1'],\n local_ps_devices=['/gpu:0'],\n )\n session.run(variables.global_variables_initializer())\n\n estimator_spec = replicate_model_fn._predict_spec(\n tower_specs, aggregation_device='/gpu:0')\n\n self.assertEqual('/device:GPU:0',\n estimator_spec.predictions['probabilities'].device)\n self.assertAllClose({\n 'probabilities': np.array([0.35, 0.35, 0.45, 0.45])\n }, session.run(estimator_spec.predictions))\n\n\nclass ReduceMetricVariablesTest(test_util.TensorFlowTestCase):\n\n def create_metric_variable(self, initial_value, name):\n return variable_scope.variable(\n initial_value,\n trainable=False,\n collections=[ops_lib.GraphKeys.METRIC_VARIABLES],\n validate_shape=True,\n name=name)\n\n def create_tower_metrics(self, tower_id):\n with variable_scope.variable_scope('', reuse=(tower_id != 0)):\n self.create_metric_variable(1.3 * (tower_id + 1), 'total')\n self.create_metric_variable(2.3 * (tower_id + 1), 'count')\n self.create_metric_variable(\n np.array([3.3, 3.5, 3.7]) * (tower_id + 1), 'total')\n\n def test_example(self):\n with self.test_session() as session:\n for tower_id in range(3):\n self.create_tower_metrics(tower_id)\n\n session.run(\n variables.variables_initializer(\n ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES)))\n\n session.run(\n replicate_model_fn._reduce_metric_variables(number_of_towers=3))\n\n # 1st tower = 1.3, 2.3, [3.3, 3.5, 3.7]\n # 2nd tower = 2.6, 4.6, [6.6, 7.0, 7.4]\n # 3rd tower = 3.9, 6.9, [9.9, 10.5, 11.1]\n # Reduced = 7.8, 13.8, [19.8, 21.0, 22.2]\n # Towers are accumulated in the first tower.\n local_metrics = session.run(\n ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES))\n\n self.assertNear(7.8, local_metrics[0], 0.01)\n self.assertNear(13.8, local_metrics[1], 0.01)\n self.assertAllClose([19.8, 21., 22.1], local_metrics[2], 0.01)\n self.assertNear(0.0, local_metrics[3], 0.01)\n self.assertNear(0.0, local_metrics[4], 0.01)\n self.assertAllClose([0.0, 0.0, 0.0], local_metrics[5], 0.01)\n self.assertNear(0.0, local_metrics[6], 0.01)\n self.assertNear(0.0, local_metrics[7], 0.01)\n self.assertAllClose([0.0, 0.0, 0.0], local_metrics[8], 0.01)\n\n def test_reduce_is_idempotent(self):\n with self.test_session() as session:\n for tower_id in range(3):\n self.create_tower_metrics(tower_id)\n\n session.run(\n variables.variables_initializer(\n ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES)))\n\n for _ in range(20):\n session.run(\n replicate_model_fn._reduce_metric_variables(number_of_towers=3))\n\n local_metrics = session.run(\n ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES))\n\n self.assertNear(7.8, local_metrics[0], 0.01)\n self.assertNear(13.8, local_metrics[1], 0.01)\n self.assertAllClose([19.8, 21., 22.1], local_metrics[2], 0.01)\n self.assertNear(0.0, local_metrics[3], 0.01)\n self.assertNear(0.0, local_metrics[4], 0.01)\n self.assertAllClose([0.0, 0.0, 0.0], local_metrics[5], 0.01)\n self.assertNear(0.0, local_metrics[6], 0.01)\n self.assertNear(0.0, local_metrics[7], 0.01)\n self.assertAllClose([0.0, 0.0, 0.0], local_metrics[8], 0.01)\n\n def test_handles_single_tower(self):\n with self.test_session() as session:\n self.create_tower_metrics(0)\n session.run(\n variables.variables_initializer(\n ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES)))\n\n session.run(\n replicate_model_fn._reduce_metric_variables(number_of_towers=1))\n\n local_metrics = session.run(\n ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES))\n\n self.assertNear(1.3, local_metrics[0], 0.01)\n self.assertNear(2.3, local_metrics[1], 0.01)\n self.assertAllClose([3.3, 3.5, 3.7], local_metrics[2], 0.01)\n\n def test_doesnt_accept_uneven_number_of_variables(self):\n with self.test_session() as session:\n for tower_id in range(3):\n self.create_tower_metrics(tower_id)\n self.create_metric_variable(-1.0, 'oddball')\n\n session.run(\n variables.variables_initializer(\n ops_lib.get_collection(ops_lib.GraphKeys.METRIC_VARIABLES)))\n\n with self.assertRaisesRegexp(\n ValueError, '.+Expected.+local.+variables.+but.+got.+instead.+'):\n session.run(\n replicate_model_fn._reduce_metric_variables(number_of_towers=3))\n\n\nclass MergeExportOutputsTest(test_util.TensorFlowTestCase):\n\n def model_fn(self, mode, features, labels, params):\n c = variable_scope.get_variable(\n 'c',\n initializer=constant_op.constant(10, dtype=dtypes.float64),\n dtype=dtypes.float64)\n\n predictions = {'probabilities': math_ops.multiply(features, c)}\n loss = losses.absolute_difference(\n labels=labels,\n predictions=predictions['probabilities'],\n reduction=losses.Reduction.SUM)\n\n metrics = {\n 'accuracy': metrics_lib.accuracy(labels, predictions['probabilities']),\n 'auc': metrics_lib.auc(labels, predictions['probabilities'])\n }\n tensor_string_repr = str(features)\n classes = constant_op.constant(\n re.search('(split_inputs/split:[0-9])', tensor_string_repr).group(1),\n dtype=dtypes.string)\n\n export_outputs = {\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n export_output.PredictOutput(predictions),\n 'classification_output':\n export_output.ClassificationOutput(predictions['probabilities'],\n classes),\n 'classification_scores':\n export_output.ClassificationOutput(\n scores=predictions['probabilities']),\n 'classification_classes':\n export_output.ClassificationOutput(classes=classes),\n 'regression_output':\n export_output.RegressionOutput(predictions['probabilities']),\n }\n\n return model_fn_lib.EstimatorSpec(\n mode=mode,\n loss=math_ops.reduce_sum(loss),\n eval_metric_ops=metrics,\n predictions=predictions,\n export_outputs=export_outputs)\n\n def replicate_estimator_spec(self, session):\n features = np.array([0.01, 0.002])\n labels = np.array([0.01, 0.02])\n\n replicated_model_fn = replicate_model_fn.replicate_model_fn(\n self.model_fn, devices=['/gpu:0', '/gpu:1'])\n estimator_spec = replicated_model_fn(features, labels,\n model_fn_lib.ModeKeys.PREDICT, {})\n session.run(variables.global_variables_initializer())\n return estimator_spec\n\n def test_merge_predict_output(self):\n with self.test_session() as session:\n estimator_spec = self.replicate_estimator_spec(session)\n self.assertAllClose(\n {\n 'probabilities': np.array([0.1, 0.02])\n },\n session.run(estimator_spec.export_outputs[\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY].outputs))\n\n def test_merge_classification_output_scores_classes(self):\n with self.test_session() as session:\n estimator_spec = self.replicate_estimator_spec(session)\n self.assertAllClose(\n [0.1, 0.02],\n session.run(\n estimator_spec.export_outputs['classification_output'].scores))\n self.assertAllEqual(\n [b'split_inputs/split:0', b'split_inputs/split:1'],\n session.run(\n estimator_spec.export_outputs['classification_output'].classes))\n\n def test_merge_classification_output_scores(self):\n with self.test_session() as session:\n estimator_spec = self.replicate_estimator_spec(session)\n self.assertAllClose(\n [0.1, 0.02],\n session.run(\n estimator_spec.export_outputs['classification_scores'].scores))\n self.assertEqual(\n None, estimator_spec.export_outputs['classification_scores'].classes)\n\n def test_merge_classification_output_classes(self):\n with self.test_session() as session:\n estimator_spec = self.replicate_estimator_spec(session)\n self.assertAllEqual(\n [b'split_inputs/split:0', b'split_inputs/split:1'],\n session.run(\n estimator_spec.export_outputs['classification_classes'].classes))\n self.assertEqual(\n None, estimator_spec.export_outputs['classification_classes'].scores)\n\n def test_merge_regression_output(self):\n with self.test_session() as session:\n estimator_spec = self.replicate_estimator_spec(session)\n self.assertAllClose(\n [0.1, 0.02],\n session.run(estimator_spec.export_outputs['regression_output'].value))\n\n\nclass GetLocalDevicesTest(test_util.TensorFlowTestCase):\n\n def test_there_is_at_least_a_cpu(self):\n self.assertTrue(replicate_model_fn._get_local_devices('CPU'))\n\n def test_there_is_no_xpu(self):\n self.assertFalse(\n replicate_model_fn._get_local_devices('XPU')) # XPU doesn't exist.\n\n def test_whether_there_is_a_gpu(self):\n if test.is_gpu_available():\n self.assertTrue(len(replicate_model_fn._get_local_devices('GPU')))\n\n\nclass LocalDeviceSetterTest(test_util.TensorFlowTestCase):\n\n def test_vars_are_on_ps_but_ops_are_on_workers(self):\n ps_devices = ['/device:GPU:3']\n round_robin = device_setter._RoundRobinStrategy(num_tasks=len(ps_devices))\n\n local_device_setter = replicate_model_fn._local_device_setter(\n ps_devices=ps_devices,\n ps_strategy=round_robin,\n worker_device='/device:GPU:2')\n\n with ops_lib.device(local_device_setter):\n a = variables.Variable(0.01)\n self.assertEqual('/device:GPU:3', a.device)\n\n b = variables.Variable(0.02)\n self.assertEqual('/device:GPU:3', b.device)\n\n c = variables.Variable(0.03)\n self.assertEqual('/device:GPU:3', c.device)\n\n a_op = array_ops.concat(a, axis=0)\n self.assertEqual('/device:GPU:2', a_op.device)\n\n b_op = array_ops.concat(b, axis=0)\n self.assertEqual('/device:GPU:2', b_op.device)\n\n def test_round_robin_placement(self):\n ps_devices = [\n '/device:GPU:0', '/device:GPU:1', '/device:GPU:3', '/device:GPU:4'\n ]\n round_robin = device_setter._RoundRobinStrategy(num_tasks=len(ps_devices))\n\n local_device_setter = replicate_model_fn._local_device_setter(\n ps_devices=ps_devices,\n ps_strategy=round_robin,\n worker_device='/device:GPU:2')\n\n with ops_lib.device(local_device_setter):\n a = variables.Variable(0.01)\n self.assertEqual('/device:GPU:0', a.device)\n\n b = variables.Variable(0.02)\n self.assertEqual('/device:GPU:1', b.device)\n\n c = variables.Variable(0.03)\n self.assertEqual('/device:GPU:3', c.device)\n\n a_op = array_ops.concat(a, axis=0)\n self.assertEqual('/device:GPU:2', a_op.device)\n\n b_op = array_ops.concat(b, axis=0)\n self.assertEqual('/device:GPU:2', b_op.device)\n\n c = variables.Variable(0.03)\n self.assertEqual('/device:GPU:4', c.device)\n\n d = variables.Variable(0.03)\n self.assertEqual('/device:GPU:0', d.device)\n\n c_op = array_ops.concat(c, axis=0)\n self.assertEqual('/device:GPU:2', c_op.device)\n\n\nclass ComputeSumWithDevicePlacementTest(test_util.TensorFlowTestCase):\n\n def test_vectors(self):\n with self.test_session() as session:\n total = replicate_model_fn._compute_sum_on_device(\n [1.0, 2.0, 3.0, 4.0], device='/device:GPU:0', name='test_sum')\n\n self.assertEqual('/device:GPU:0', total.device)\n self.assertEqual('test_sum', total.op.name)\n self.assertEqual(10.0, session.run(total))\n\n def test_tensors(self):\n with self.test_session() as session:\n total = replicate_model_fn._compute_sum_on_device(\n [[1.0, 2.0], [3.0, 4.0]], device='/device:GPU:0', name='test_sum')\n\n self.assertEqual('/device:GPU:0', total.device)\n self.assertEqual('test_sum', total.op.name)\n self.assertAllEqual([4.0, 6.0], session.run(total))\n\n def test_indexedslices(self):\n with self.test_session() as session:\n a = ops_lib.IndexedSlices(\n constant_op.constant([1.0, 2.0]), [0, 1],\n dense_shape=constant_op.constant([2]))\n b = ops_lib.IndexedSlices(constant_op.constant([3.0, 4.0]), [0, 1])\n\n total = replicate_model_fn._compute_sum_on_device(\n [a, b], device='/device:GPU:0')\n\n self.assertEqual('/device:GPU:0', total.device)\n self.assertAllEqual([4.0, 6.0],\n session.run(ops_lib.convert_to_tensor(total)))\n\n def test_indexedslices_higher_dimensions(self):\n with self.test_session() as session:\n a = ops_lib.IndexedSlices(\n constant_op.constant([[1.0, 5.0], [2.0, 6.0]]), [0, 1],\n dense_shape=constant_op.constant([2, 4]))\n b = ops_lib.IndexedSlices(\n constant_op.constant([[3.0, 7.0], [4.0, 8.0]]), [0, 1])\n\n total = replicate_model_fn._compute_sum_on_device(\n [a, b], device='/device:GPU:0')\n\n self.assertEqual('/device:GPU:0', total.device)\n self.assertAllEqual([[4.0, 12.0], [6.0, 14.0]],\n session.run(ops_lib.convert_to_tensor(total)))\n\n def test_indexedslices_some_dont_overlap(self):\n with self.test_session() as session:\n a = ops_lib.IndexedSlices(\n constant_op.constant([1.0, 2.0]), [0, 3],\n dense_shape=constant_op.constant([4]))\n b = ops_lib.IndexedSlices(constant_op.constant([3.0, 4.0]), [0, 1])\n\n total = replicate_model_fn._compute_sum_on_device(\n [a, b], device='/device:GPU:0')\n\n self.assertEqual('/device:GPU:0', total.device)\n self.assertAllEqual([4.0, 4.0, 0.0, 2.0],\n session.run(ops_lib.convert_to_tensor(total)))\n\n def test_no_name_for_indexslices(self):\n a = ops_lib.IndexedSlices(\n constant_op.constant([1.0, 2.0]), [0, 1],\n dense_shape=constant_op.constant([2]))\n b = ops_lib.IndexedSlices(constant_op.constant([3.0, 4.0]), [0, 1])\n\n with self.assertRaisesRegexp(ValueError, '.+name.+not.+expected.+'):\n _ = replicate_model_fn._compute_sum_on_device(\n [a, b], device='/device:GPU:0', name='cant_name_indexslices')\n\n\nclass ConcatTensorDictsTest(test_util.TensorFlowTestCase):\n\n def test_example(self):\n tensor_dicts = [\n {\n 'a': np.array([1.0, 2.0]),\n 'b': np.array([11.0]),\n 'c': np.array([21.0]),\n },\n {\n 'a': np.array([3.0]),\n 'b': np.array([12.0, 13.0]),\n },\n {\n 'b': np.array([14.0]),\n },\n ]\n\n with self.test_session() as session:\n self.assertAllClose({\n 'a': np.array([1.0, 2.0, 3.0]),\n 'b': np.array([11.0, 12.0, 13.0, 14.0]),\n 'c': np.array([21.0]),\n }, session.run(replicate_model_fn._concat_tensor_dicts(*tensor_dicts)))\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "tensorflow.python.ops.losses.losses.absolute_difference", "tensorflow.contrib.estimator.python.estimator.replicate_model_fn._replicate_model_fn_with_mode", "tensorflow.python.ops.variables.Variable", "tensorflow.python.training.training.get_global_step", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.summary.writer.writer_cache.FileWriterCache.clear", "tensorflow.python.estimator.export.export_output.RegressionOutput", "tensorflow.python.ops.variable_scope.get_variable", "tensorflow.python.ops.variables.local_variables_initializer", "numpy.rint", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.estimator.export.export_output.PredictOutput", "tensorflow.python.platform.gfile.Exists", "tensorflow.contrib.estimator.python.estimator.replicate_model_fn._train_spec", "tensorflow.python.framework.ops.device", "tensorflow.python.platform.test.main", "tensorflow.python.framework.ops.get_collection", "tensorflow.python.ops.variable_scope.variable", "tensorflow.python.feature_column.feature_column.numeric_column", "tensorflow.python.feature_column.feature_column.make_parse_example_spec", "tensorflow.python.ops.array_ops.concat", "tensorflow.contrib.estimator.python.estimator.replicate_model_fn._split_batch", "tensorflow.python.ops.metrics.auc", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.variables.trainable_variables", "tensorflow.contrib.estimator.python.estimator.replicate_model_fn._predict_spec", "tensorflow.python.ops.math_ops.add", "tensorflow.python.ops.variables.global_variables_initializer", "numpy.array", "tensorflow.python.framework.ops.Graph", "tensorflow.python.estimator.export.export_output.ClassificationOutput", "tensorflow.contrib.estimator.python.estimator.replicate_model_fn.replicate_model_fn", "tensorflow.python.estimator.canned.optimizers.get_optimizer_instance", "tensorflow.contrib.estimator.python.estimator.replicate_model_fn._local_device_setter", "tensorflow.python.ops.metrics.accuracy", "tensorflow.python.platform.test.is_gpu_available", "tensorflow.contrib.estimator.python.estimator.replicate_model_fn._concat_tensor_dicts", "tensorflow.contrib.estimator.python.estimator.replicate_model_fn._get_loss_towers", "tensorflow.python.estimator.inputs.numpy_io.numpy_input_fn", "tensorflow.contrib.estimator.python.estimator.replicate_model_fn._reduce_metric_variables", "tensorflow.python.training.gradient_descent.GradientDescentOptimizer", "tensorflow.python.estimator.estimator.Estimator", "tensorflow.contrib.estimator.python.estimator.replicate_model_fn.TowerOptimizer", "tensorflow.contrib.estimator.python.estimator.replicate_model_fn._get_local_devices", "tensorflow.python.training.training.SyncReplicasOptimizer", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.training.adam.AdamOptimizer", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.ops.variables.global_variables", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.contrib.estimator.python.estimator.replicate_model_fn._eval_spec", "tensorflow.python.estimator.export.export.build_parsing_serving_input_receiver_fn", "tensorflow.python.ops.math_ops.multiply", "tensorflow.python.estimator.model_fn.EstimatorSpec", "numpy.linspace", "tensorflow.contrib.estimator.python.estimator.replicate_model_fn._compute_sum_on_device" ] ]
hellotem/fixmatch
[ "5b27a3bc057a8f2a144e5c8287bc44f715621508" ]
[ "third_party/auto_augment/wrn.py" ]
[ "# coding=utf-8\n# Copyright 2019 The Google UDA Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Builds the WideResNet Model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nimport third_party.auto_augment.custom_ops as ops\n\n\ndef residual_block(\n x, in_filter, out_filter, stride, update_bn=True,\n activate_before_residual=False):\n \"\"\"Adds residual connection to `x` in addition to applying BN->ReLU->3x3 Conv.\n\n Args:\n x: Tensor that is the output of the previous layer in the model.\n in_filter: Number of filters `x` has.\n out_filter: Number of filters that the output of this layer will have.\n stride: Integer that specified what stride should be applied to `x`.\n activate_before_residual: Boolean on whether a BN->ReLU should be applied\n to x before the convolution is applied.\n\n Returns:\n A Tensor that is the result of applying two sequences of BN->ReLU->3x3 Conv\n and then adding that Tensor to `x`.\n \"\"\"\n\n if activate_before_residual: # Pass up RELU and BN activation for resnet\n with tf.variable_scope('shared_activation'):\n x = ops.batch_norm(x, update_stats=update_bn, scope='init_bn')\n x = tf.nn.relu(x)\n orig_x = x\n else:\n orig_x = x\n\n block_x = x\n if not activate_before_residual:\n with tf.variable_scope('residual_only_activation'):\n block_x = ops.batch_norm(block_x, update_stats=update_bn,\n scope='init_bn')\n block_x = tf.nn.relu(block_x)\n\n with tf.variable_scope('sub1'):\n block_x = ops.conv2d(\n block_x, out_filter, 3, stride=stride, scope='conv1')\n\n with tf.variable_scope('sub2'):\n block_x = ops.batch_norm(block_x, update_stats=update_bn, scope='bn2')\n block_x = tf.nn.relu(block_x)\n block_x = ops.conv2d(\n block_x, out_filter, 3, stride=1, scope='conv2')\n\n with tf.variable_scope(\n 'sub_add'): # If number of filters do not agree then zero pad them\n if in_filter != out_filter:\n orig_x = ops.avg_pool(orig_x, stride, stride)\n orig_x = ops.zero_pad(orig_x, in_filter, out_filter)\n x = orig_x + block_x\n return x\n\n\ndef _res_add(in_filter, out_filter, stride, x, orig_x):\n \"\"\"Adds `x` with `orig_x`, both of which are layers in the model.\n\n Args:\n in_filter: Number of filters in `orig_x`.\n out_filter: Number of filters in `x`.\n stride: Integer specifying the stide that should be applied `orig_x`.\n x: Tensor that is the output of the previous layer.\n orig_x: Tensor that is the output of an earlier layer in the network.\n\n Returns:\n A Tensor that is the result of `x` and `orig_x` being added after\n zero padding and striding are applied to `orig_x` to get the shapes\n to match.\n \"\"\"\n if in_filter != out_filter:\n orig_x = ops.avg_pool(orig_x, stride, stride)\n orig_x = ops.zero_pad(orig_x, in_filter, out_filter)\n x = x + orig_x\n orig_x = x\n return x, orig_x\n\n\ndef build_wrn_model(images, num_classes, wrn_size, update_bn=True):\n \"\"\"Builds the WRN model.\n\n Build the Wide ResNet model from https://arxiv.org/abs/1605.07146.\n\n Args:\n images: Tensor of images that will be fed into the Wide ResNet Model.\n num_classes: Number of classed that the model needs to predict.\n wrn_size: Parameter that scales the number of filters in the Wide ResNet\n model.\n\n Returns:\n The logits of the Wide ResNet model.\n \"\"\"\n # wrn_size = 16 * widening factor k\n kernel_size = wrn_size\n filter_size = 3\n # depth = num_blocks_per_resnet * 6 + 4 = 28\n num_blocks_per_resnet = 4\n filters = [\n min(kernel_size, 16), kernel_size, kernel_size * 2, kernel_size * 4\n ]\n strides = [1, 2, 2] # stride for each resblock\n\n # Run the first conv\n with tf.variable_scope('init'):\n x = images\n output_filters = filters[0]\n x = ops.conv2d(x, output_filters, filter_size, scope='init_conv')\n\n first_x = x # Res from the beginning\n orig_x = x # Res from previous block\n\n for block_num in range(1, 4):\n with tf.variable_scope('unit_{}_0'.format(block_num)):\n activate_before_residual = True if block_num == 1 else False\n x = residual_block(\n x,\n filters[block_num - 1],\n filters[block_num],\n strides[block_num - 1],\n update_bn=update_bn,\n activate_before_residual=activate_before_residual)\n for i in range(1, num_blocks_per_resnet):\n with tf.variable_scope('unit_{}_{}'.format(block_num, i)):\n x = residual_block(\n x,\n filters[block_num],\n filters[block_num],\n 1,\n update_bn=update_bn,\n activate_before_residual=False)\n x, orig_x = _res_add(filters[block_num - 1], filters[block_num],\n strides[block_num - 1], x, orig_x)\n final_stride_val = np.prod(strides)\n x, _ = _res_add(filters[0], filters[3], final_stride_val, x, first_x)\n with tf.variable_scope('unit_last'):\n x = ops.batch_norm(x, scope='final_bn')\n x = tf.nn.relu(x)\n x = ops.global_avg_pool(x)\n logits = ops.fc(x, num_classes)\n return logits\n" ]
[ [ "tensorflow.variable_scope", "numpy.prod", "tensorflow.nn.relu" ] ]
albertonavaa/Cirq
[ "76352585b9667873e60d51ee8cf7e6549c9d9a5e" ]
[ "cirq/sim/clifford/clifford_simulator.py" ]
[ "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"An efficient simulator for Clifford circuits.\n\nAllowed operations include:\n\t- X,Y,Z,H,S,CNOT,CZ\n\t- measurements in the computational basis\n\nThe quantum state is specified in two forms:\n 1. In terms of stabilizer generators. These are a set of n Pauli operators\n {S_1,S_2,...,S_n} such that S_i |psi> = |psi>.\n\n This implementation is based on Aaronson and Gottesman,\n 2004 (arXiv:quant-ph/0406196).\n\n 2. In the CH-form defined by Bravyi et al, 2018 (arXiv:1808.00128).\n This representation keeps track of overall phase and enables access\n to state vector amplitudes.\n\"\"\"\n\nfrom typing import Any, Dict, List, Iterator, Sequence\n\nimport numpy as np\n\nimport cirq\nfrom cirq import circuits, study, ops, protocols, value\nfrom cirq.ops.dense_pauli_string import DensePauliString\nfrom cirq.protocols import act_on\nfrom cirq.sim import clifford, simulator\nfrom cirq._compat import deprecated\nfrom cirq.sim.simulator import check_all_resolved\n\n\nclass CliffordSimulator(\n simulator.SimulatesSamples,\n simulator.SimulatesIntermediateState[\n 'CliffordSimulatorStepResult', 'CliffordTrialResult', 'CliffordState'\n ],\n):\n \"\"\"An efficient simulator for Clifford circuits.\"\"\"\n\n def __init__(self, seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None):\n \"\"\"Creates instance of `CliffordSimulator`.\n\n Args:\n seed: The random seed to use for this simulator.\n \"\"\"\n self.init = True\n self._prng = value.parse_random_state(seed)\n\n @staticmethod\n def is_supported_operation(op: 'cirq.Operation') -> bool:\n \"\"\"Checks whether given operation can be simulated by this simulator.\"\"\"\n # TODO: support more general Pauli measurements\n return protocols.has_stabilizer_effect(op)\n\n def _base_iterator(\n self, circuit: circuits.Circuit, qubit_order: ops.QubitOrderOrList, initial_state: int\n ) -> Iterator['cirq.CliffordSimulatorStepResult']:\n \"\"\"Iterator over CliffordSimulatorStepResult from Moments of a Circuit\n\n Args:\n circuit: The circuit to simulate.\n qubit_order: Determines the canonical ordering of the qubits. This\n is often used in specifying the initial state, i.e. the\n ordering of the computational basis states.\n initial_state: The initial state for the simulation in the\n computational basis. Represented as a big endian int.\n\n\n Yields:\n CliffordStepResult from simulating a Moment of the Circuit.\n \"\"\"\n qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for(circuit.all_qubits())\n\n qubit_map = {q: i for i, q in enumerate(qubits)}\n\n if len(circuit) == 0:\n yield CliffordSimulatorStepResult(\n measurements={}, state=CliffordState(qubit_map, initial_state=initial_state)\n )\n return\n\n state = CliffordState(qubit_map, initial_state=initial_state)\n ch_form_args = clifford.ActOnStabilizerCHFormArgs(\n state.ch_form,\n [],\n self._prng,\n {},\n )\n\n for moment in circuit:\n ch_form_args.log_of_measurement_results = {}\n\n for op in moment:\n try:\n ch_form_args.axes = tuple(state.qubit_map[i] for i in op.qubits)\n act_on(op, ch_form_args)\n except TypeError:\n raise NotImplementedError(\n f\"CliffordSimulator doesn't support {op!r}\"\n ) # type: ignore\n\n yield CliffordSimulatorStepResult(\n measurements=ch_form_args.log_of_measurement_results, state=state\n )\n\n def _create_simulator_trial_result(\n self,\n params: study.ParamResolver,\n measurements: Dict[str, np.ndarray],\n final_simulator_state,\n ):\n\n return CliffordTrialResult(\n params=params, measurements=measurements, final_simulator_state=final_simulator_state\n )\n\n def _run(\n self, circuit: circuits.Circuit, param_resolver: study.ParamResolver, repetitions: int\n ) -> Dict[str, List[np.ndarray]]:\n\n param_resolver = param_resolver or study.ParamResolver({})\n resolved_circuit = protocols.resolve_parameters(circuit, param_resolver)\n check_all_resolved(resolved_circuit)\n\n measurements = {} # type: Dict[str, List[np.ndarray]]\n\n for _ in range(repetitions):\n all_step_results = self._base_iterator(\n resolved_circuit, qubit_order=ops.QubitOrder.DEFAULT, initial_state=0\n )\n\n for step_result in all_step_results:\n for k, v in step_result.measurements.items():\n if not k in measurements:\n measurements[k] = []\n measurements[k].append(np.array(v, dtype=bool))\n\n return {k: np.array(v) for k, v in measurements.items()}\n\n\nclass CliffordTrialResult(simulator.SimulationTrialResult):\n def __init__(\n self,\n params: study.ParamResolver,\n measurements: Dict[str, np.ndarray],\n final_simulator_state: 'CliffordState',\n ) -> None:\n super().__init__(\n params=params, measurements=measurements, final_simulator_state=final_simulator_state\n )\n\n self.final_state = final_simulator_state\n\n def __str__(self) -> str:\n samples = super().__str__()\n final = self._final_simulator_state\n return f'measurements: {samples}\\noutput state: {final}'\n\n\nclass CliffordSimulatorStepResult(simulator.StepResult['CliffordState']):\n \"\"\"A `StepResult` that includes `StateVectorMixin` methods.\"\"\"\n\n def __init__(self, state, measurements):\n \"\"\"Results of a step of the simulator.\n Attributes:\n state: A CliffordState\n measurements: A dictionary from measurement gate key to measurement\n results, ordered by the qubits that the measurement operates on.\n qubit_map: A map from the Qubits in the Circuit to the the index\n of this qubit for a canonical ordering. This canonical ordering\n is used to define the state vector (see the state_vector()\n method).\n \"\"\"\n self.measurements = measurements\n self.state = state.copy()\n\n def __str__(self) -> str:\n def bitstring(vals):\n return ''.join('1' if v else '0' for v in vals)\n\n results = sorted([(key, bitstring(val)) for key, val in self.measurements.items()])\n\n if len(results) == 0:\n measurements = ''\n else:\n measurements = ' '.join([f'{key}={val}' for key, val in results]) + '\\n'\n\n final = self.state\n\n return f'{measurements}{final}'\n\n def _simulator_state(self):\n return self.state\n\n def sample(\n self,\n qubits: List[ops.Qid],\n repetitions: int = 1,\n seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,\n ) -> np.ndarray:\n\n measurements = {} # type: Dict[str, List[np.ndarray]]\n\n for i in range(repetitions):\n self.state.apply_measurement(\n cirq.measure(*qubits, key=str(i)),\n measurements,\n value.parse_random_state(seed),\n collapse_state_vector=False,\n )\n\n return np.array(list(measurements.values()), dtype=bool)\n\n\[email protected]_equality\nclass CliffordState:\n \"\"\"A state of the Clifford simulation.\n\n The state is stored using Bravyi's CH-form which allows access to the full\n state vector (including phase).\n\n Gates and measurements are applied to each representation in O(n^2) time.\n \"\"\"\n\n def __init__(self, qubit_map, initial_state=0):\n self.qubit_map = qubit_map\n self.n = len(qubit_map)\n\n self.ch_form = clifford.StabilizerStateChForm(self.n, initial_state)\n\n def _json_dict_(self):\n return {\n 'cirq_type': self.__class__.__name__,\n 'qubit_map': [(k, v) for k, v in self.qubit_map.items()],\n 'ch_form': self.ch_form,\n }\n\n @classmethod\n def _from_json_dict_(cls, qubit_map, ch_form, **kwargs):\n state = cls(dict(qubit_map))\n state.ch_form = ch_form\n\n return state\n\n def _value_equality_values_(self) -> Any:\n return self.qubit_map, self.ch_form\n\n def copy(self) -> 'CliffordState':\n state = CliffordState(self.qubit_map)\n state.ch_form = self.ch_form.copy()\n\n return state\n\n def __repr__(self) -> str:\n return repr(self.ch_form)\n\n def __str__(self) -> str:\n \"\"\"Return the state vector string representation of the state.\"\"\"\n return str(self.ch_form)\n\n def to_numpy(self) -> np.ndarray:\n return self.ch_form.to_state_vector()\n\n @deprecated(deadline='v0.11', fix='use CliffordTableau instead')\n def stabilizers(self) -> List[DensePauliString]:\n \"\"\"Returns the stabilizer generators of the state. These\n are n operators {S_1,S_2,...,S_n} such that S_i |psi> = |psi>\"\"\"\n return []\n\n @deprecated(deadline='v0.11', fix='use CliffordTableau instead')\n def destabilizers(self) -> List[DensePauliString]:\n \"\"\"Returns the destabilizer generators of the state. These\n are n operators {S_1,S_2,...,S_n} such that along with the stabilizer\n generators above generate the full Pauli group on n qubits.\"\"\"\n return []\n\n def state_vector(self):\n return self.ch_form.state_vector()\n\n def apply_unitary(self, op: 'cirq.Operation'):\n ch_form_args = clifford.ActOnStabilizerCHFormArgs(\n self.ch_form, [self.qubit_map[i] for i in op.qubits], np.random.RandomState(), {}\n )\n try:\n act_on(op, ch_form_args)\n except TypeError:\n raise ValueError(\n f'{str(op.gate)} cannot be run with Clifford simulator.'\n ) # type: ignore\n return\n\n def apply_measurement(\n self,\n op: 'cirq.Operation',\n measurements: Dict[str, List[np.ndarray]],\n prng: np.random.RandomState,\n collapse_state_vector=True,\n ):\n if not isinstance(op.gate, cirq.MeasurementGate):\n raise TypeError(\n 'apply_measurement only supports cirq.MeasurementGate operations. Found %s instead.'\n % str(op.gate)\n )\n\n if collapse_state_vector:\n state = self\n else:\n state = self.copy()\n\n qids = [self.qubit_map[i] for i in op.qubits]\n\n ch_form_args = clifford.ActOnStabilizerCHFormArgs(state.ch_form, qids, prng, measurements)\n act_on(op, ch_form_args)\n\n @deprecated(deadline='v0.11', fix='Use the apply_measurement instead')\n def perform_measurement(\n self, qubits: Sequence[ops.Qid], prng: np.random.RandomState, collapse_state_vector=True\n ):\n results = []\n\n if collapse_state_vector:\n state = self\n else:\n state = self.copy()\n\n for qubit in qubits:\n result = state.ch_form._measure(self.qubit_map[qubit], prng)\n results.append(result)\n\n return results\n" ]
[ [ "numpy.array", "numpy.random.RandomState" ] ]
lee14257/delphi-epidata
[ "b007d778321e68be5526ca9ce1113b13d24d6fe8" ]
[ "src/acquisition/covid_hosp/common/database.py" ]
[ "\"\"\"Common database code used by multiple `covid_hosp` scrapers.\"\"\"\n\n# standard library\nfrom contextlib import contextmanager\nimport math\n\n# third party\nimport mysql.connector\nimport pandas as pd\n\n# first party\nimport delphi.operations.secrets as secrets\n\n\nclass Database:\n\n def __init__(self,\n connection,\n table_name=None,\n columns_and_types=None,\n additional_fields=None):\n \"\"\"Create a new Database object.\n\n Parameters\n ----------\n connection\n An open connection to a database.\n table_name : str\n The name of the table which holds the dataset.\n columns_and_types : tuple[str, str, Callable]\n List of 3-tuples of (CSV header name, SQL column name, data type) for\n all the columns in the CSV file.\n additional_fields : tuple[str]\n List of 2-tuples of (value, SQL column name) fordditional fields to include\n at the end of the row which are not present in the CSV data.\n \"\"\"\n\n self.connection = connection\n self.table_name = table_name\n self.publication_col_name = \"issue\" if table_name == 'covid_hosp_state_timeseries' else \\\n 'publication_date'\n self.columns_and_types = columns_and_types\n self.additional_fields = additional_fields if additional_fields is not None else []\n\n @classmethod\n @contextmanager\n def connect(database_class, mysql_connector_impl=mysql.connector):\n \"\"\"Connect to a database and provide the connection as a context manager.\n\n As long as the context manager exits normally, the connection's transaction\n will be committed. Otherwise, if the context is exited by an Exception, the\n transaction will be rolled back.\n\n In any case, the connection will be gracefully closed upon exiting the\n context manager.\n \"\"\"\n\n # connect to the database\n user, password = secrets.db.epi\n connection = mysql_connector_impl.connect(\n host=secrets.db.host,\n user=user,\n password=password,\n database='epidata')\n\n try:\n # provide the connection to the context manager\n yield database_class(connection)\n\n # rollback by default; the following commit will only take place if no\n # exception was raised in calling code\n connection.commit()\n finally:\n # close the connection in any case\n connection.close()\n\n @contextmanager\n def new_cursor(self):\n \"\"\"Create and provide a database cursor as a context manager.\n\n The cursor will be gracefully closed upon exiting the context manager.\n \"\"\"\n\n cursor = self.connection.cursor()\n try:\n yield cursor\n finally:\n cursor.close()\n\n def contains_revision(self, revision):\n \"\"\"Return whether the given revision already exists in the database.\n\n Parameters\n ----------\n revision : str\n Unique revision string.\n\n Returns\n -------\n bool\n True iff the revision already exists.\n \"\"\"\n\n with self.new_cursor() as cursor:\n cursor.execute('''\n SELECT\n count(1) > 0\n FROM\n `covid_hosp_meta`\n WHERE\n `dataset_name` = %s AND `revision_timestamp` = %s\n ''', (self.table_name, revision))\n for (result,) in cursor:\n return bool(result)\n\n def insert_metadata(self, publication_date, revision, meta_json):\n \"\"\"Add revision metadata to the database.\n\n Parameters\n ----------\n publication_date : int\n Date when the dataset was published in YYYYMMDD format.\n revision : str\n Unique revision string.\n meta_json : str\n Metadata serialized as a JSON string.\n \"\"\"\n\n with self.new_cursor() as cursor:\n cursor.execute('''\n INSERT INTO\n `covid_hosp_meta` (\n `dataset_name`,\n `publication_date`,\n `revision_timestamp`,\n `metadata_json`,\n `acquisition_datetime`\n )\n VALUES\n (%s, %s, %s, %s, NOW())\n ''', (self.table_name, publication_date, revision, meta_json))\n\n def insert_dataset(self, publication_date, dataframe):\n \"\"\"Add a dataset to the database.\n\n Parameters\n ----------\n publication_date : int\n Date when the dataset was published in YYYYMMDD format.\n dataframe : pandas.DataFrame\n The dataset.\n \"\"\"\n dataframe_columns_and_types = [\n x for x in self.columns_and_types if x[0] in dataframe.columns\n ]\n num_columns = 2 + len(dataframe_columns_and_types) + len(self.additional_fields)\n value_placeholders = ', '.join(['%s'] * num_columns)\n columns = ', '.join(f'`{i[1]}`' for i in dataframe_columns_and_types + self.additional_fields)\n sql = f'INSERT INTO `{self.table_name}` (`id`, `{self.publication_col_name}`, {columns}) ' \\\n f'VALUES ({value_placeholders})'\n id_and_publication_date = (0, publication_date)\n with self.new_cursor() as cursor:\n for _, row in dataframe.iterrows():\n values = []\n for name, _, dtype in dataframe_columns_and_types:\n if isinstance(row[name], float) and math.isnan(row[name]):\n values.append(None)\n else:\n values.append(dtype(row[name]))\n cursor.execute(sql,\n id_and_publication_date +\n tuple(values) +\n tuple(i[0] for i in self.additional_fields))\n\n def get_max_issue(self):\n \"\"\"Fetch the most recent issue.\n\n This is used to bookend what updates we pull in from the HHS metadata.\n \"\"\"\n with self.new_cursor() as cursor:\n cursor.execute(f'''\n SELECT\n max(publication_date)\n from\n `covid_hosp_meta`\n WHERE\n dataset_name = \"{self.table_name}\"\n ''')\n for (result,) in cursor:\n if result is not None:\n return pd.Timestamp(str(result))\n return pd.Timestamp(\"1900/1/1\")\n" ]
[ [ "pandas.Timestamp" ] ]
ammunk/Bayesian-Non-Parametric-NMF
[ "f233c2278cb5c44cc14d7e2c14625cda36428a09" ]
[ "npbNMF/truncated_normal_functions/trandn.py" ]
[ "import sys\nimport numpy as np\nfrom scipy.special import erfc, erfcinv, expm1\n\ndef trandn(l,u):\n ## truncated normal generator\n # * efficient generator of a vector of length(l)=length(u)\n # from the standard multivariate normal distribution,\n # truncated over the region [l,u];\n # infinite values for 'u' and 'l' are accepted;\n # * Remark:\n # If you wish to simulate a random variable\n # 'Z' from the non-standard Gaussian N(m,s^2)\n # conditional on l<Z<u, then first simulate\n # X=trandn((l-m)/s,(u-m)/s) and set Z=m+s*X;\n\n # Reference:\n # Botev, Z. I. (2016). \"The normal law under linear restrictions:\n # simulation and estimation via minimax tilting\". Journal of the\n # Royal Statistical Society: Series B (Statistical Methodology).\n # doi:10.1111/rssb.12162\n\n l = np.asarray(l)\n u = np.asarray(u)\n l = l.ravel()\n u = u.ravel() # make 'l' and 'u' column vectors\n\n if len(l) != len(u):\n print('Truncation limits have to be vectors of the same length')\n sys.exit()\n\n x = np.empty(len(l))\n a = .66 # treshold for switching between methods\n\n # three cases to consider:\n # case 1: a<l<u\n\n I = l>a\n if np.any(I):\n tl=l[I]\n tu=u[I]\n x[I]=ntail(tl,tu)\n\n # case 2: l<u<-a\n\n J = u<-a\n if np.any(J):\n tl=-u[J]\n tu=-l[J]\n x[J] = -ntail(tl,tu)\n\n # case 3: otherwise use inverse transform or accept-reject\n\n I=~(I|J);\n if np.any(I):\n tl=l[I]\n tu=u[I]\n x[I]=tn(tl,tu)\n\n return x\n\n#################################################################\n\ndef ntail(l,u):\n\n # samples a column vector of length=length(l)=length(u)\n # from the standard multivariate normal distribution,\n # truncated over the region [l,u], where l>0 and\n # l and u are column vectors;\n # uses acceptance-rejection from Rayleigh distr.\n # similar to Marsaglia (1964);\n\n c = l**2/2\n n = len(l)\n f = expm1(c-u**2/2)\n\n x = c - np.log(1+np.random.uniform(size=n)*f); # sample using Rayleigh\n\n # keep list of rejected\n\n I = np.random.uniform(size=n)**2*x > c\n\n while np.any(I): # while there are rejections\n cy = c[I] # find the thresholds of rejected\n y = cy - np.log(1+np.random.uniform(size=len(cy))*f[I])\n idx = (np.random.uniform(size=len(cy))**2)*y < cy # accepted\n tmp = I.copy()\n I[tmp] = idx # make the list of elements in x to update\n x[I] = y[idx] # store the accepted\n I[tmp] = np.logical_not(idx) # remove accepted from list\n\n# while d>0: # while there are rejections\n# cy = c[I] # find the thresholds of rejected\n# y = cy - np.log(1+np.random.uniform(size=d)*f[I])\n# idx = (np.random.uniform(size=d)**2)*y < cy # accepted\n# x[I[idx]] = y[idx] # store the accepted\n# I = I[~idx] # remove accepted from list\n# d = len(I) # number of rejected\n\n x = np.sqrt(2*x); # this Rayleigh transform can be delayed till the end\n\n return x\n\n##################################################################\n\ndef tn(l,u):\n\n # samples a column vector of length=length(l)=length(u)\n # from the standard multivariate normal distribution,\n # truncated over the region [l,u], where -a<l<u<a for some\n # 'a' and l and u are column vectors;\n # uses acceptance rejection and inverse-transform method;\n tol = 2 # controls switch between methods\n\n # threshold can be tuned for maximum speed for each platform\n # case: abs(u-l)>tol, uses accept-reject from randn\n\n I = np.abs(u-l)>tol\n x = l\n\n if np.any(I):\n tl=l[I]\n tu=u[I]\n x[I]=trnd(tl,tu)\n\n # case: abs(u-l)<tol, uses inverse-transform\n\n I=~I\n if np.any(I):\n\n tl=l[I]\n tu=u[I]\n pl = erfc(tl/np.sqrt(2))/2\n pu = erfc(tu/np.sqrt(2))/2\n\n x[I] = np.sqrt(2)*erfcinv(2*(pl-(pl-pu)\n *np.random.uniform(size=len(tl))))\n\n return x\n\n#############################################################\n\ndef trnd(l,u):\n\n # uses acceptance rejection to simulate from truncated normal\n x=np.random.randn(len(l)) # sample normal\n\n # keep list of rejected\n I = np.logical_or(x<l ,x>u)\n while np.any(I): # while there are rejections\n ly = l[I] # find the thresholds of rejected\n uy = u[I]\n y = np.random.randn(len(ly))\n idx = np.logical_and(y>ly,y<uy) # accepted\n tmp = I.copy()\n I[tmp] = idx # make the list of elements in x to update\n x[I] = y[idx] # store the accepted\n I[tmp] = np.logical_not(idx) # remove accepted from list\n\n\n# d = len(I)\n# while d>0: # while there are rejections\n# ly = l[I] # find the thresholds of rejected\n# uy = u[I]\n# y = np.random.randn(len(ly))\n# idx = np.logical_and(y>ly,y<uy) # accepted\n# x[I[idx]] = y[idx] # store the accepted\n# I = I[~idx] # remove accepted from list\n# d = len(I) # number of rejected\n\n return x\n" ]
[ [ "numpy.logical_not", "numpy.logical_or", "numpy.asarray", "numpy.logical_and", "numpy.any", "numpy.random.uniform", "numpy.sqrt", "numpy.abs", "scipy.special.expm1" ] ]
RollyAngell/ComputerVision-ND-Udacity
[ "5d4254af6fcf9e36c2dc0f4369b35365e6f54ed6" ]
[ "Projects/P1_Facial_Keypoints/models.py" ]
[ "## TODO: define the convolutional neural network architecture\n## Making changes\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\n# can use the below import should you choose to initialize the weights of your Net\nimport torch.nn.init as I\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n \n ## TODO: Define all the layers of this CNN, the only requirements are:\n ## 1. This network takes in a square (same width and height), grayscale image as input\n ## 2. It ends with a linear layer that represents the keypoints\n ## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs\n \n # As an example, you've been given a convolutional layer, which you may (but don't have to) change:\n # 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel\n self.conv1 = nn.Conv2d(1, 32, 5)\n self.conv2 = nn.Conv2d(32, 64, 3)\n self.conv3 = nn.Conv2d(64, 128, 3)\n self.conv4 = nn.Conv2d(128, 256, 2)\n \n ## Note that among the layers to add, consider including:\n # maxpooling layers, multiple conv layers, fully-connected layers, and other layers (such as dropout or batch normalization) to avoid overfitting\n self.pool = nn.MaxPool2d(kernel_size = 2, stride = 2)\n \n # Fully Connected Layers\n self.fc1 = nn.Linear(in_features = 36864, out_features = 1000) # The number of input gained by \"print(\"Flatten size: \", x.shape)\" in below\n self.fc2 = nn.Linear(in_features = 1000, out_features = 1000)\n self.fc3 = nn.Linear(in_features = 1000, out_features = 136) # the output 136 in order to having 2 for each of the 68 keypoint (x, y) pairs\n\n # Dropouts\n self.drop1 = nn.Dropout(p = 0.1)\n self.drop2 = nn.Dropout(p = 0.2)\n self.drop3 = nn.Dropout(p = 0.3)\n self.drop4 = nn.Dropout(p = 0.4)\n self.drop5 = nn.Dropout(p = 0.5)\n self.drop6 = nn.Dropout(p = 0.6)\n \n\n \n def forward(self, x):\n ## TODO: Define the feedforward behavior of this model\n ## x is the input image and, as an example, here you may choose to include a pool/conv step:\n ## x = self.pool(F.relu(self.conv1(x)))\n \n # First - Convolution + Activation + Pooling + Dropout\n x = self.conv1(x)\n x = F.relu(x)\n x = self.pool(x)\n x = self.drop1(x)\n #print(\"First size: \", x.shape)\n\n # Second - Convolution + Activation + Pooling + Dropout\n x = self.drop2(self.pool(F.relu(self.conv2(x))))\n #print(\"Second size: \", x.shape)\n\n # Third - Convolution + Activation + Pooling + Dropout\n x = self.drop3(self.pool(F.relu(self.conv3(x))))\n #print(\"Third size: \", x.shape)\n\n # Forth - Convolution + Activation + Pooling + Dropout\n x = self.drop4(self.pool(F.relu(self.conv4(x))))\n #print(\"Forth size: \", x.shape)\n\n # Flattening the layer\n x = x.view(x.size(0), -1)\n #print(\"Flatten size: \", x.shape)\n\n # First - Dense + Activation + Dropout\n x = self.drop5(F.relu(self.fc1(x)))\n #print(\"First dense size: \", x.shape)\n\n # Second - Dense + Activation + Dropout\n x = self.drop6(F.relu(self.fc2(x)))\n #print(\"Second dense size: \", x.shape)\n\n # Final Dense Layer\n x = self.fc3(x)\n #print(\"Final dense size: \", x.shape)\n \n # a modified x, having gone through all the layers of your model, should be returned\n return x\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.MaxPool2d", "torch.nn.Conv2d", "torch.nn.functional.relu" ] ]
hfathian/porespy
[ "8747e675ba8e6410d8448492c70f6911e0eb816a" ]
[ "porespy/networks/__snow_dual__.py" ]
[ "import numpy as np\nfrom porespy.networks import regions_to_network\nfrom porespy.networks import add_boundary_regions\nfrom porespy.networks import label_boundary_cells\nfrom porespy.networks import _net_dict\nfrom porespy.tools import pad_faces\nfrom porespy.filters import snow_partitioning\nfrom porespy.metrics import region_surface_areas, region_interface_areas\n\n\ndef snow_dual(im,\n voxel_size=1,\n boundary_faces=['top', 'bottom', 'left', 'right', 'front', 'back'],\n marching_cubes_area=False):\n r\"\"\"\n Analyzes an image that has been partitioned into void and solid regions\n and extracts the void and solid phase geometry as well as network\n connectivity.\n\n Parameters\n ----------\n im : ND-array\n Binary image in the Boolean form with True’s as void phase and False’s\n as solid phase. It can process the inverted configuration of the\n boolean image as well, but output labelling of phases will be inverted\n and solid phase properties will be assigned to void phase properties\n labels which will cause confusion while performing the simulation.\n voxel_size : scalar\n The resolution of the image, expressed as the length of one side of a\n voxel, so the volume of a voxel would be **voxel_size**-cubed. The\n default is 1, which is useful when overlaying the PNM on the original\n image since the scale of the image is alway 1 unit lenth per voxel.\n boundary_faces : list of strings\n Boundary faces labels are provided to assign hypothetical boundary\n nodes having zero resistance to transport process. For cubical\n geometry, the user can choose ‘left’, ‘right’, ‘top’, ‘bottom’,\n ‘front’ and ‘back’ face labels to assign boundary nodes. If no label is\n assigned then all six faces will be selected as boundary nodes\n automatically which can be trimmed later on based on user requirements.\n marching_cubes_area : bool\n If ``True`` then the surface area and interfacial area between regions\n will be using the marching cube algorithm. This is a more accurate\n representation of area in extracted network, but is quite slow, so\n it is ``False`` by default. The default method simply counts voxels\n so does not correctly account for the voxelated nature of the images.\n\n Returns\n -------\n A dictionary containing all the void and solid phase size data, as well as\n the network topological information. The dictionary names use the OpenPNM\n convention (i.e. 'pore.coords', 'throat.conns') so it may be converted\n directly to an OpenPNM network object using the ``update`` command.\n * ``net``: A dictionary containing all the void and solid phase size data,\n as well as the network topological information. The dictionary names\n use the OpenPNM convention (i.e. 'pore.coords', 'throat.conns') so it\n may be converted directly to an OpenPNM network object using the\n ``update`` command.\n * ``im``: The binary image of the void space\n * ``dt``: The combined distance transform of the image\n * ``regions``: The void and solid space partitioned into pores and solids\n phases using a marker based watershed with the peaks found by the\n SNOW Algorithm.\n\n References\n ----------\n [1] Gostick, J. \"A versatile and efficient network extraction algorithm\n using marker-based watershed segmenation\". Phys. Rev. E 96, 023307 (2017)\n\n [2] Khan, ZA et al. \"Dual network extraction algorithm to investigate\n multiple transport processes in porous materials: Image-based modeling\n of pore and grain-scale processes. Computers and Chemical Engineering.\n 123(6), 64-77 (2019)\n\n \"\"\"\n # -------------------------------------------------------------------------\n # SNOW void phase\n pore_regions = snow_partitioning(im, return_all=True)\n # SNOW solid phase\n solid_regions = snow_partitioning(~im, return_all=True)\n # -------------------------------------------------------------------------\n # Combined Distance transform of two phases.\n pore_dt = pore_regions.dt\n solid_dt = solid_regions.dt\n dt = pore_dt + solid_dt\n # Calculates combined void and solid regions for dual network extraction\n pore_regions = pore_regions.regions\n solid_regions = solid_regions.regions\n pore_region = pore_regions*im\n solid_region = solid_regions*~im\n solid_num = np.amax(pore_regions)\n solid_region = solid_region + solid_num\n solid_region = solid_region * ~im\n regions = pore_region + solid_region\n b_num = np.amax(regions)\n # -------------------------------------------------------------------------\n # Boundary Conditions\n regions = add_boundary_regions(regions=regions, faces=boundary_faces)\n # -------------------------------------------------------------------------\n # Padding distance transform to extract geometrical properties\n dt = pad_faces(im=dt, faces=boundary_faces)\n # -------------------------------------------------------------------------\n # Extract void,solid and throat information from image\n net = regions_to_network(im=regions, dt=dt, voxel_size=voxel_size)\n # -------------------------------------------------------------------------\n # Extract marching cube surface area and interfacial area of regions\n if marching_cubes_area:\n areas = region_surface_areas(regions=regions)\n interface_area = region_interface_areas(regions=regions, areas=areas,\n voxel_size=voxel_size)\n net['pore.surface_area'] = areas * voxel_size**2\n net['throat.area'] = interface_area.area\n # -------------------------------------------------------------------------\n # Find void to void, void to solid and solid to solid throat conns\n loc1 = net['throat.conns'][:, 0] < solid_num\n loc2 = net['throat.conns'][:, 1] >= solid_num\n loc3 = net['throat.conns'][:, 1] < b_num\n pore_solid_labels = loc1 * loc2 * loc3\n\n loc4 = net['throat.conns'][:, 0] >= solid_num\n loc5 = net['throat.conns'][:, 0] < b_num\n solid_solid_labels = loc4 * loc2 * loc5 * loc3\n\n loc6 = net['throat.conns'][:, 1] < solid_num\n pore_pore_labels = loc1 * loc6\n\n loc7 = net['throat.conns'][:, 1] >= b_num\n boundary_throat_labels = loc5 * loc7\n\n solid_labels = ((net['pore.label'] > solid_num) * ~\n (net['pore.label'] > b_num))\n boundary_labels = net['pore.label'] > b_num\n b_sa = np.zeros(len(boundary_labels[boundary_labels == 1.0]))\n # -------------------------------------------------------------------------\n # Calculates void interfacial area that connects with solid and vice versa\n p_conns = net['throat.conns'][:, 0][pore_solid_labels]\n ps = net['throat.area'][pore_solid_labels]\n p_sa = np.bincount(p_conns, ps)\n s_conns = net['throat.conns'][:, 1][pore_solid_labels]\n s_pa = np.bincount(s_conns, ps)\n s_pa = np.trim_zeros(s_pa) # remove pore surface area labels\n p_solid_surf = np.concatenate((p_sa, s_pa, b_sa))\n # -------------------------------------------------------------------------\n # Calculates interfacial area using marching cube method\n if marching_cubes_area:\n ps_c = net['throat.area'][pore_solid_labels]\n p_sa_c = np.bincount(p_conns, ps_c)\n s_pa_c = np.bincount(s_conns, ps_c)\n s_pa_c = np.trim_zeros(s_pa_c) # remove pore surface area labels\n p_solid_surf = np.concatenate((p_sa_c, s_pa_c, b_sa))\n # -------------------------------------------------------------------------\n # Adding additional information of dual network\n net['pore.solid_void_area'] = (p_solid_surf * voxel_size**2)\n net['throat.void'] = pore_pore_labels\n net['throat.interconnect'] = pore_solid_labels\n net['throat.solid'] = solid_solid_labels\n net['throat.boundary'] = boundary_throat_labels\n net['pore.void'] = net['pore.label'] <= solid_num\n net['pore.solid'] = solid_labels\n net['pore.boundary'] = boundary_labels\n # -------------------------------------------------------------------------\n # label boundary cells\n net = label_boundary_cells(network=net, boundary_faces=boundary_faces)\n # -------------------------------------------------------------------------\n # assign out values to dummy dict\n\n temp = _net_dict(net)\n temp.im = im.copy()\n temp.dt = dt\n temp.regions = regions\n return temp\n" ]
[ [ "numpy.bincount", "numpy.trim_zeros", "numpy.amax", "numpy.concatenate" ] ]
phenix2017/dl-ift6135-h19
[ "8a720744c094e903901d36a35edca99f8faf6a85" ]
[ "Assignment_1/Practice/kaggle/model_functions.py" ]
[ "import csv\nimport datetime\nimport numpy as np\nimport os\nimport torch\nimport torch.nn as nn\nimport tqdm\nimport time\n\nimport utils\n\n\ndef train(args, model, train_loader, optimizer, epoch, start_time, log_file,\n train_epochs, train_losses, train_accuracy, valid_epochs, valid_losses, valid_accuracy, lr_change):\n\n model.train()\n\n for batch_idx, (data, target) in enumerate(train_loader):\n\n # Get data\n data, target = data.to(args.device), target.to(args.device)\n\n # Get model output\n optimizer.zero_grad()\n output = model(data)\n\n # Calc loss\n loss = nn.NLLLoss()(output, target)\n\n # Backprop\n loss.backward()\n optimizer.step()\n\n # Log, Plot\n if (epoch*len(train_loader) + batch_idx) % args.log_interval == 0:\n\n # Check loss, accuracy\n train_epochs.append(epoch + batch_idx/len(train_loader))\n train_losses.append(loss.item())\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n train_accuracy.append(pred.eq(target.view_as(pred)).sum().item()/len(pred))\n\n # Get time elapsed\n curr_time = time.time()\n curr_time_str, elapsed_str = utils.get_time_str(start_time, curr_time)\n\n # Log\n log = '[{}] : Elapsed [{}]: Epoch: {} [{}/{} ({:.0f}%)]\\tTRAIN Loss: {:.6f}\\tAccuracy: {:.4f}\\n'.format(\n curr_time_str, elapsed_str, epoch, batch_idx, len(train_loader), 100.*batch_idx/len(train_loader),\n train_losses[-1], train_accuracy[-1])\n print(log)\n log_file.write(log)\n log_file.flush()\n utils.mem_check()\n utils.make_plots(args.out_path, train_epochs, train_losses, train_accuracy, valid_epochs, valid_losses, valid_accuracy, lr_change)\n\n # Save models\n if (epoch*len(train_loader) + batch_idx) % args.model_save_interval == 0:\n model_name = os.path.join(args.out_path, 'model_epoch_{:04d}_batch_{:05d}_of_{:05d}.pth'.format(epoch, batch_idx, len(train_loader)))\n print(\"Saving model\", model_name)\n torch.save(model.state_dict(), model_name)\n\n\ndef test(args, model, test_loader, epoch, start_time, log_file,\n train_epochs, train_losses, train_accuracy, valid_epochs, valid_losses, valid_accuracy, lr_change):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n counter = 0\n for data, target in test_loader:\n data, target = data.to(args.device), target.to(args.device)\n output = model(data)\n test_loss += nn.NLLLoss(reduction='sum')(output, target).item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n counter += len(pred)\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= counter\n test_accuracy = correct/counter\n\n valid_epochs.append(epoch)\n valid_losses.append(test_loss)\n valid_accuracy.append(test_accuracy)\n\n # Get time elapsed\n curr_time = time.time()\n curr_time_str, elapsed_str = utils.get_time_str(start_time, curr_time)\n\n log = '\\n[{}] : Elapsed [{}] : Epoch {}:\\tVALIDATION Loss: {:.4f}, Accuracy: {:.4f} ({}/{})\\n'.format(\n curr_time_str, elapsed_str, epoch,\n test_loss, test_accuracy, correct, counter)\n print(log)\n log_file.write(log)\n log_file.flush()\n\n utils.make_plots(args.out_path, train_epochs, train_losses, train_accuracy, valid_epochs, valid_losses, valid_accuracy, lr_change)\n\n\ndef eval(args, model, eval_loader):\n model.eval()\n preds = []\n\n # Predict\n with torch.no_grad():\n for data, target in tqdm.tqdm(eval_loader):\n data, target = data.to(args.device), target.to(args.device)\n output = model(data)\n preds += output.argmax(dim=1).tolist() # get the index of the max log-probability\n\n # Read image names\n ids = [int(i.split('.')[0]) for i in sorted(os.listdir(os.path.join(args.data_path, 'test')))]\n\n # Sort ids\n sort_order = np.argsort(ids)\n ids = [ids[i] for i in sort_order]\n\n # Sort preds and make labels\n labels = ['Cat', 'Dog']\n pred_labels = [labels[preds[i]] for i in sort_order]\n\n # Write csv\n csv_file_name = os.path.join(args.out_path, 'submission_' + os.path.basename(os.path.dirname(args.pth)) + '_' + os.path.splitext(os.path.basename(args.pth))[0] + '.csv')\n print(\"Writing\", csv_file_name)\n with open(csv_file_name, mode='w') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow(['id', 'label'])\n for i, l in zip(ids, pred_labels):\n csv_writer.writerow([str(i), l])\n" ]
[ [ "torch.nn.NLLLoss", "torch.no_grad", "numpy.argsort" ] ]
zeeshanahmad10809/sst-deep-tensorflow
[ "4d21b3a3d90d92eade1e09b7dc50c19b56a0b43c" ]
[ "testing.py" ]
[ "from utils import load_saved_model\nfrom sst.dataset import SSTContainer\nimport numpy as np\nfrom sklearn.metrics import (\n precision_score,\n recall_score,\n f1_score,\n confusion_matrix,\n accuracy_score,\n)\nfrom loguru import logger\n\n\ndef test(root, binary, filename=\"\"):\n model = load_saved_model(filename=filename)\n\n dataset_container = SSTContainer(root=root, binary=binary)\n test_X, test_Y = dataset_container.data(\"test\")\n\n pred_Y = model.predict(test_X)\n pred_Y = np.argmax(pred_Y, axis=1)\n\n accuracy_value = accuracy_score(test_Y, pred_Y)\n precision_value = precision_score(test_Y, pred_Y, average=\"macro\")\n recall_value = recall_score(test_Y, pred_Y, average=\"macro\")\n f1_score_value = f1_score(\n test_Y,\n pred_Y,\n average=\"macro\",\n )\n cm = confusion_matrix(test_Y, pred_Y, labels=np.sort(np.unique(np.array(test_Y))))\n\n logger.info(\n f\"accuracy: {accuracy_value}, precision: {precision_value}, recall: {recall_value}, f1-score: {f1_score_value}\"\n )\n logger.info(f\"confusion matrix: \\n {cm}\")\n" ]
[ [ "numpy.array", "sklearn.metrics.accuracy_score", "numpy.argmax", "sklearn.metrics.precision_score", "sklearn.metrics.f1_score", "sklearn.metrics.recall_score" ] ]
atpk/Computer-Networks-Lab
[ "5df51cc2ef38ebde56922799db507d6d059bcb7e" ]
[ "4/PythonCodesForGraphs/q3_1.py" ]
[ "import matplotlib.pyplot as plt\r\n\r\nx = [103.0, 96.2, 213.0, 238.0, 259.0, 282.0, 73.2, 226.0, 96.6, 271.0, 191.0, 216.0, 237.0, 103.0, 82.3, 101.0, 229.0, 249.0, 271.0, 192.0, 215.0, 238.0, 92.1, 72.0, 102.0, 89.2, 249.0, 170.0, 295.0, 216.0, 130.0, 88.0, 186.0, 206.0, 226.0, 249.0, 273.0, 193.0, 216.0, 98.7, 92.0, 694.0, 206.0, 229.0, 153.0, 174.0, 110.0, 834.0, 241.0, 85.3, 84.9, 80.7, 84.5, 254.0, 102.0, 197.0, 94.2, 242.0, 265.0, 157.0, 198.0, 230.0, 87.2, 174.0, 189.0, 83.0, 94.5, 101.0, 288.0, 99.6, 82.7, 254.0, 96.4, 197.0, 221.0, 157.0, 266.0, 188.0, 90.9, 165.0, 257.0, 177.0, 96.7, 224.0, 247.0, 94.8, 293.0, 102.0, 237.0, 260.0, 285.0, 206.0, 228.0, 103.0, 274.0, 197.0, 219.0, 241.0, 269.0, 185.0, 211.0, 91.3, 254.0, 90.6, 154.0, 138.0, 169.0, 84.4, 123.0, 82.6, 167.0, 90.7, 77.8, 177.0, 146.0, 143.0, 94.3, 99.0, 122.0, 91.6, 155.0, 328.0, 203.0, 193.0, 146.0, 271.0, 191.0, 85.9, 132.0, 261.0, 82.3, 90.7, 79.4, 159.0, 128.0, 82.3, 104.0, 240.0, 162.0, 184.0, 98.3, 233.0, 257.0, 279.0, 202.0, 151.0, 161.0, 895.0, 437.0, 81.7, 153.0, 88.0, 184.0, 619.0, 232.0, 91.5, 74.1, 89.7, 86.9, 250.0, 167.0, 84.1, 215.0, 240.0, 96.9, 183.0, 208.0, 232.0, 256.0, 180.0, 202.0, 226.0, 148.0, 171.0, 195.0, 219.0, 93.1, 266.0, 290.0, 103.0, 135.0, 169.0, 281.0, 201.0, 78.2, 248.0, 158.0, 194.0, 103.0, 95.3, 156.0, 184.0, 208.0, 94.6, 256.0, 104.0, 97.8, 224.0, 153.0, 272.0, 194.0, 219.0, 140.0, 92.9, 92.6, 213.0, 235.0, 258.0, 180.0, 95.5, 328.0, 146.0, 70.6, 86.4, 216.0, 241.0, 262.0, 143.0, 83.2, 92.2, 256.0, 232.0, 200.0, 224.0, 248.0, 172.0, 194.0, 93.4, 80.8, 160.0, 90.4, 208.0, 233.0, 257.0, 177.0, 204.0, 226.0, 250.0, 274.0, 195.0, 219.0, 244.0, 166.0, 188.0, 177.0, 238.0, 260.0, 285.0, 140.0, 84.5, 72.8, 76.9, 299.0, 221.0, 354.0, 95.9, 191.0, 215.0, 240.0, 101.0, 285.0, 309.0, 232.0, 255.0, 99.0, 201.0, 98.4, 92.4, 171.0, 192.0, 91.6, 172.0, 93.1, 102.0, 208.0, 130.0, 257.0, 182.0, 203.0, 226.0, 250.0, 275.0, 197.0, 220.0, 244.0, 268.0, 190.0, 316.0, 99.3, 231.0, 183.0, 92.1, 231.0, 257.0, 122.0, 200.0, 91.3, 91.5, 102.0, 73.6, 90.5, 240.0, 264.0, 74.4, 99.2, 233.0, 156.0, 105.0, 192.0, 175.0, 181.0, 154.0, 168.0, 183.0, 245.0, 88.2, 189.0, 213.0, 239.0, 260.0, 180.0, 205.0, 230.0, 97.8, 71.4, 97.9, 75.5, 85.5, 269.0, 192.0, 214.0, 84.4, 264.0, 183.0, 85.1, 231.0, 255.0, 72.4, 200.0, 82.7, 247.0, 373.0, 194.0, 216.0, 240.0, 265.0, 185.0, 105.0, 233.0, 258.0, 179.0, 203.0, 79.6, 99.9, 82.9, 86.2, 219.0, 243.0, 165.0, 189.0, 214.0, 238.0, 95.2, 70.2, 80.2, 230.0, 76.9, 177.0, 199.0, 223.0, 246.0, 98.7, 194.0, 162.0, 136.0, 103.0, 127.0, 96.4, 230.0, 98.7, 276.0, 90.4, 91.9, 89.7, 264.0, 185.0, 208.0, 91.8, 253.0, 168.0, 85.5, 219.0, 242.0, 778.0, 86.4, 102.0, 96.3, 98.6, 153.0, 301.0, 531.0, 552.0, 166.0, 190.0, 213.0, 86.2, 261.0, 485.0, 88.8, 82.3, 61.7, 82.4, 84.6, 206.0, 389.0, 76.0, 284.0, 101.0, 102.0, 864.0, 92.1, 191.0, 68.5, 239.0, 260.0, 179.0, 202.0, 326.0, 350.0, 170.0, 78.3, 81.0, 96.8, 93.6, 181.0, 128.0, 224.0, 87.9, 87.0, 196.0, 89.3, 103.0, 261.0, 182.0, 206.0, 228.0, 251.0, 580.0, 194.0, 97.0, 242.0, 770.0, 93.7, 92.9, 113.0, 253.0, 173.0, 99.6, 219.0, 242.0, 265.0, 83.5, 208.0, 538.0, 101.0, 80.2, 197.0, 160.0, 243.0, 93.7, 98.5, 92.4, 91.0, 151.0, 276.0, 196.0, 222.0, 109.0, 102.0, 185.0, 80.2, 333.0, 252.0, 378.0, 196.0, 74.3, 242.0, 265.0, 186.0, 210.0, 233.0, 258.0, 150.0, 90.1, 135.0, 86.4, 86.3, 410.0, 86.4, 82.6, 81.4, 181.0, 202.0, 97.9, 248.0, 272.0, 193.0, 101.0, 339.0, 158.0, 180.0, 79.8, 226.0, 86.8, 273.0, 84.3, 144.0, 239.0, 72.9, 87.3, 208.0, 229.0, 253.0, 87.6, 299.0, 83.4, 243.0, 76.7, 288.0, 171.0, 234.0, 254.0, 88.3, 198.0, 222.0, 555.0, 268.0, 188.0, 210.0, 232.0, 155.0, 93.2, 97.4, 221.0, 245.0, 269.0, 189.0, 212.0, 234.0, 155.0, 89.3, 304.0, 147.0, 85.5, 113.0, 414.0, 292.0, 236.0, 86.5, 93.1, 201.0, 278.0, 247.0, 270.0, 194.0, 214.0, 79.7, 88.5, 92.6, 204.0, 225.0, 90.8, 134.0, 193.0, 218.0, 238.0, 260.0, 282.0, 204.0, 227.0, 250.0, 170.0, 67.2, 216.0, 239.0, 175.0, 153.0, 205.0, 228.0, 95.7, 273.0, 297.0, 217.0, 343.0, 161.0, 286.0, 104.0, 86.4, 254.0, 277.0, 158.0, 92.7, 243.0, 163.0, 186.0, 209.0, 232.0, 255.0, 176.0, 199.0, 73.3, 84.2, 165.0, 85.9, 212.0, 336.0, 168.0, 280.0, 200.0, 223.0, 246.0, 102.0, 103.0, 95.0, 69.3, 259.0, 158.0, 201.0, 224.0, 250.0, 158.0, 91.0, 214.0, 238.0, 260.0, 181.0, 204.0, 228.0, 251.0, 98.5, 193.0, 77.4, 95.3, 158.0, 187.0, 205.0, 227.0, 250.0, 224.0, 195.0, 218.0, 137.0, 365.0, 286.0, 206.0, 229.0, 255.0, 276.0, 76.3, 107.0, 98.3, 265.0, 287.0, 208.0, 230.0, 98.1, 174.0, 82.0, 91.8, 139.0, 265.0, 185.0, 208.0, 232.0, 129.0, 114.0, 152.0, 222.0, 242.0, 188.0, 186.0, 209.0, 233.0, 68.9, 175.0, 301.0, 222.0, 248.0, 98.6, 188.0, 97.1, 161.0, 94.4, 78.3, 98.7, 224.0, 247.0, 88.4, 94.0, 213.0, 237.0, 260.0, 285.0, 203.0, 226.0, 146.0, 86.3, 79.0, 82.9, 97.3, 164.0, 161.0, 87.3, 94.3, 96.8, 101.0, 99.2, 92.9, 148.0, 112.0, 85.6, 172.0, 378.0, 442.0, 90.1, 321.0, 81.3, 87.1, 265.0, 75.5, 85.6, 333.0, 94.2, 273.0, 173.0, 98.2, 94.8, 78.9, 189.0, 213.0, 235.0, 259.0, 149.0, 93.5, 1117.0, 130.0, 85.4, 84.0, 200.0, 389.0, 83.0, 106.0, 98.9, 198.0, 234.0, 76.2, 282.0, 200.0, 190.0, 248.0, 167.0, 190.0, 99.7, 236.0, 258.0, 179.0, 130.0, 224.0, 84.7, 475.0, 171.0, 81.0, 89.5, 262.0, 80.7, 411.0, 97.7, 93.7, 92.7, 194.0, 218.0, 240.0, 160.0, 98.0, 88.5, 151.0, 254.0, 174.0, 198.0, 221.0, 243.0, 164.0, 291.0, 210.0, 234.0, 126.0, 155.0, 111.0, 93.6, 98.4, 268.0, 189.0, 91.4, 237.0, 258.0, 282.0, 202.0, 88.7, 249.0, 168.0, 190.0, 164.0, 236.0, 258.0, 179.0, 93.0, 326.0, 248.0, 270.0, 190.0, 216.0, 238.0, 88.4, 87.4, 146.0, 95.0, 252.0, 275.0, 298.0, 220.0, 140.0, 268.0, 188.0, 210.0, 233.0, 256.0, 180.0, 128.0, 102.0, 247.0, 98.7, 190.0, 208.0, 170.0, 156.0, 178.0, 78.5, 326.0, 150.0, 168.0, 80.6, 214.0, 103.0, 97.3, 101.0, 201.0, 225.0, 94.7, 247.0, 192.0, 219.0, 136.0, 261.0, 289.0, 208.0, 209.0, 253.0, 93.2, 200.0, 221.0, 80.2, 99.6, 290.0, 212.0, 235.0, 259.0, 181.0, 102.0, 88.7, 81.5, 109.0, 194.0, 217.0, 241.0, 164.0, 185.0, 208.0, 231.0, 160.0, 134.0, 301.0, 222.0, 247.0, 92.6, 293.0, 96.7, 237.0, 261.0, 183.0, 208.0, 229.0, 98.8, 277.0, 177.0, 222.0, 245.0, 269.0, 191.0, 216.0, 240.0, 263.0, 72.5, 86.6, 272.0, 242.0, 139.0, 104.0, 123.0, 202.0, 165.0, 188.0, 212.0, 236.0, 260.0, 110.0, 99.4, 229.0, 253.0, 381.0, 118.0, 222.0, 141.0, 164.0, 291.0, 99.8, 235.0, 259.0, 181.0, 205.0, 229.0, 113.0, 93.5, 197.0, 221.0, 245.0, 270.0, 191.0, 215.0, 239.0, 161.0, 185.0, 311.0, 97.2, 258.0, 177.0, 202.0, 226.0, 90.9, 239.0, 194.0, 100.0, 231.0, 265.0, 184.0, 97.9, 103.0, 254.0, 278.0, 199.0, 223.0, 247.0, 170.0, 105.0, 216.0, 240.0, 155.0, 187.0, 210.0, 234.0, 156.0, 179.0, 89.2, 94.3, 250.0, 274.0, 84.1, 221.0, 243.0, 165.0, 189.0, 214.0, 241.0, 262.0, 84.3, 206.0, 229.0, 152.0, 80.3, 197.0, 221.0]\r\n\r\nplt.hist(x, rwidth=0.5, color=\"red\")\r\nplt.xlabel(\"Ping latency (in ms)\", color=\"blue\")\r\nplt.ylabel(\"Frequency\", color=\"blue\")\r\nplt.title(\"Distribution of ping latency for ping -n <ip>\", color=\"magenta\")\r\nplt.show()" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.hist", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
SpencerCompton/EconML
[ "3e66b9507b43f8af291009d26186283fa4bb4ced" ]
[ "econml/_ortho_learner.py" ]
[ "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"\n\nOrthogonal Machine Learning is a general approach to estimating causal models\nby formulating them as minimizers of some loss function that depends on\nauxiliary regression models that also need to be estimated from data. The\nclass in this module implements the general logic in a very versatile way\nso that various child classes can simply instantiate the appropriate models\nand save a lot of code repetition.\n\nReferences\n----------\n\nDylan Foster, Vasilis Syrgkanis (2019). Orthogonal Statistical Learning.\n ACM Conference on Learning Theory. https://arxiv.org/abs/1901.09036\n\nXinkun Nie, Stefan Wager (2017). Quasi-Oracle Estimation of Heterogeneous Treatment Effects.\n https://arxiv.org/abs/1712.04912\n\nChernozhukov et al. (2017). Double/debiased machine learning for treatment and structural parameters.\n The Econometrics Journal. https://arxiv.org/abs/1608.00060\n\n\"\"\"\n\nimport copy\nfrom collections import namedtuple\nfrom warnings import warn\nfrom abc import abstractmethod\n\nimport numpy as np\nfrom sklearn.base import clone\nfrom sklearn.model_selection import KFold, StratifiedKFold, check_cv\nfrom sklearn.preprocessing import (FunctionTransformer, LabelEncoder,\n OneHotEncoder)\nfrom sklearn.utils import check_random_state\n\nfrom ._cate_estimator import (BaseCateEstimator, LinearCateEstimator,\n TreatmentExpansionMixin)\nfrom .inference import BootstrapInference\nfrom .utilities import (_deprecate_positional, _EncoderWrapper, check_input_arrays,\n cross_product, filter_none_kwargs,\n inverse_onehot, ndim, reshape, shape, transpose)\n\n\ndef _crossfit(model, folds, *args, **kwargs):\n \"\"\"\n General crossfit based calculation of nuisance parameters.\n\n Parameters\n ----------\n model : object\n An object that supports fit and predict. Fit must accept all the args\n and the keyword arguments kwargs. Similarly predict must all accept\n all the args as arguments and kwards as keyword arguments. The fit\n function estimates a model of the nuisance function, based on the input\n data to fit. Predict evaluates the fitted nuisance function on the input\n data to predict.\n folds : list of tuples or None\n The crossfitting fold structure. Every entry in the list is a tuple whose\n first element are the training indices of the args and kwargs data and\n the second entry are the test indices. If the union of the test indices\n is not the full set of all indices, then the remaining nuisance parameters\n for the missing indices have value NaN. If folds is None, then cross fitting\n is not performed; all indices are used for both model fitting and prediction\n args : a sequence of (numpy matrices or None)\n Each matrix is a data variable whose first index corresponds to a sample\n kwargs : a sequence of key-value args, with values being (numpy matrices or None)\n Each keyword argument is of the form Var=x, with x a numpy array. Each\n of these arrays are data variables. The model fit and predict will be\n called with signature: `model.fit(*args, **kwargs)` and\n `model.predict(*args, **kwargs)`. Key-value arguments that have value\n None, are ommitted from the two calls. So all the args and the non None\n kwargs variables must be part of the models signature.\n\n Returns\n -------\n nuisances : tuple of numpy matrices\n Each entry in the tuple is a nuisance parameter matrix. Each row i-th in the\n matrix corresponds to the value of the nuisance parameter for the i-th input\n sample.\n model_list : list of objects of same type as input model\n The cloned and fitted models for each fold. Can be used for inspection of the\n variability of the fitted models across folds.\n fitted_inds : np array1d\n The indices of the arrays for which the nuisance value was calculated. This\n corresponds to the union of the indices of the test part of each fold in\n the input fold list.\n scores : tuple of list of float or None\n The out-of-sample model scores for each nuisance model\n\n Examples\n --------\n\n .. testcode::\n\n import numpy as np\n from sklearn.model_selection import KFold\n from sklearn.linear_model import Lasso\n from econml._ortho_learner import _crossfit\n class Wrapper:\n def __init__(self, model):\n self._model = model\n def fit(self, X, y, W=None):\n self._model.fit(X, y)\n return self\n def predict(self, X, y, W=None):\n return self._model.predict(X)\n np.random.seed(123)\n X = np.random.normal(size=(5000, 3))\n y = X[:, 0] + np.random.normal(size=(5000,))\n folds = list(KFold(2).split(X, y))\n model = Lasso(alpha=0.01)\n nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), folds, X, y, W=y, Z=None)\n\n >>> nuisance\n (array([-1.105728... , -1.537566..., -2.451827... , ..., 1.106287...,\n -1.829662..., -1.782273...]),)\n >>> model_list\n [<Wrapper object at 0x...>, <Wrapper object at 0x...>]\n >>> fitted_inds\n array([ 0, 1, 2, ..., 4997, 4998, 4999])\n\n \"\"\"\n model_list = []\n fitted_inds = []\n calculate_scores = hasattr(model, 'score')\n\n # remove None arguments\n kwargs = filter_none_kwargs(**kwargs)\n\n if folds is None: # skip crossfitting\n model_list.append(clone(model, safe=False))\n model_list[0].fit(*args, **kwargs)\n nuisances = model_list[0].predict(*args, **kwargs)\n scores = model_list[0].score(*args, **kwargs) if calculate_scores else None\n\n if not isinstance(nuisances, tuple):\n nuisances = (nuisances,)\n if not isinstance(scores, tuple):\n scores = (scores,)\n\n # scores entries should be lists of scores, so make each entry a singleton list\n scores = tuple([s] for s in scores)\n\n first_arr = args[0] if args else kwargs.items()[0][1]\n return nuisances, model_list, np.arange(first_arr.shape[0]), scores\n\n for idx, (train_idxs, test_idxs) in enumerate(folds):\n model_list.append(clone(model, safe=False))\n if len(np.intersect1d(train_idxs, test_idxs)) > 0:\n raise AttributeError(\"Invalid crossfitting fold structure.\" +\n \"Train and test indices of each fold must be disjoint.\")\n if len(np.intersect1d(fitted_inds, test_idxs)) > 0:\n raise AttributeError(\"Invalid crossfitting fold structure. The same index appears in two test folds.\")\n fitted_inds = np.concatenate((fitted_inds, test_idxs))\n\n args_train = tuple(var[train_idxs] if var is not None else None for var in args)\n args_test = tuple(var[test_idxs] if var is not None else None for var in args)\n\n kwargs_train = {key: var[train_idxs] for key, var in kwargs.items()}\n kwargs_test = {key: var[test_idxs] for key, var in kwargs.items()}\n\n model_list[idx].fit(*args_train, **kwargs_train)\n\n nuisance_temp = model_list[idx].predict(*args_test, **kwargs_test)\n\n if not isinstance(nuisance_temp, tuple):\n nuisance_temp = (nuisance_temp,)\n\n if idx == 0:\n nuisances = tuple([np.full((args[0].shape[0],) + nuis.shape[1:], np.nan) for nuis in nuisance_temp])\n\n for it, nuis in enumerate(nuisance_temp):\n nuisances[it][test_idxs] = nuis\n\n if calculate_scores:\n score_temp = model_list[idx].score(*args_test, **kwargs_test)\n\n if not isinstance(score_temp, tuple):\n score_temp = (score_temp,)\n\n if idx == 0:\n scores = tuple([] for _ in score_temp)\n\n for it, score in enumerate(score_temp):\n scores[it].append(score)\n\n return nuisances, model_list, np.sort(fitted_inds.astype(int)), (scores if calculate_scores else None)\n\n\nCachedValues = namedtuple('_CachedValues', ['nuisances',\n 'Y', 'T', 'X', 'W', 'Z', 'sample_weight', 'sample_var', 'groups'])\n\n\nclass _OrthoLearner(TreatmentExpansionMixin, LinearCateEstimator):\n \"\"\"\n Base class for all orthogonal learners. This class is a parent class to any method that has\n the following architecture:\n\n 1. The CATE :math:`\\\\theta(X)` is the minimizer of some expected loss function\n\n .. math ::\n \\\\mathbb{E}[\\\\ell(V; \\\\theta(X), h(V))]\n\n where :math:`V` are all the random variables and h is a vector of nuisance functions. Alternatively,\n the class would also work if :math:`\\\\theta(X)` is the solution to a set of moment equations that\n also depend on nuisance functions :math:`h`.\n\n 2. To estimate :math:`\\\\theta(X)` we first fit the h functions and calculate :math:`h(V_i)` for each sample\n :math:`i` in a crossfit manner:\n\n - Let (F1_train, F1_test), ..., (Fk_train, Fk_test) be any KFold partition\n of the data, where Ft_train, Ft_test are subsets of indices of the input samples and such that\n F1_train is disjoint from F1_test. The sets F1_test, ..., Fk_test form an incomplete partition\n of all the input indices, i.e. they are be disjoint and their union could potentially be a subset of\n all input indices. For instance, in a time series split F0_train could be a prefix of the data and\n F0_test the suffix. Typically, these folds will be created\n by a KFold split, i.e. if S1, ..., Sk is any partition of the data, then Ft_train is the set of\n all indices except St and Ft_test = St. If the union of the Ft_test is not all the data, then only the\n subset of the data in the union of the Ft_test sets will be used in the final stage.\n\n - Then for each t in [1, ..., k]\n\n - Estimate a model :math:`\\\\hat{h}_t` for :math:`h` using Ft_train\n - Evaluate the learned :math:`\\\\hat{h}_t` model on the data in Ft_test and use that value\n as the nuisance value/vector :math:`\\\\hat{U}_i=\\\\hat{h}(V_i)` for the indices i in Ft_test\n\n 3. Estimate the model for :math:`\\\\theta(X)` by minimizing the empirical (regularized) plugin loss on\n the subset of indices for which we have a nuisance value, i.e. the union of {F1_test, ..., Fk_test}:\n\n .. math ::\n \\\\mathbb{E}_n[\\\\ell(V; \\\\theta(X), \\\\hat{h}(V))]\\\n = \\\\frac{1}{n} \\\\sum_{i=1}^n \\\\sum_i \\\\ell(V_i; \\\\theta(X_i), \\\\hat{U}_i)\n\n The method is a bit more general in that the final step does not need to be a loss minimization step.\n The class takes as input a model for fitting an estimate of the nuisance h given a set of samples\n and predicting the value of the learned nuisance model on any other set of samples. It also\n takes as input a model for the final estimation, that takes as input the data and their associated\n estimated nuisance values from the first stage and fits a model for the CATE :math:`\\\\theta(X)`. Then\n at predict time, the final model given any set of samples of the X variable, returns the estimated\n :math:`\\\\theta(X)`.\n\n The method essentially implements all the crossfit and plugin logic, so that any child classes need\n to only implement the appropriate `model_nuisance` and `model_final` and essentially nothing more.\n It also implements the basic preprocessing logic behind the expansion of discrete treatments into\n one-hot encodings.\n\n Parameters\n ----------\n discrete_treatment: bool\n Whether the treatment values should be treated as categorical, rather than continuous, quantities\n\n discrete_instrument: bool\n Whether the instrument values should be treated as categorical, rather than continuous, quantities\n\n categories: 'auto' or list\n The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).\n The first category will be treated as the control treatment.\n\n cv: int, cross-validation generator or an iterable\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if the treatment is discrete\n :class:`~sklearn.model_selection.StratifiedKFold` is used, else,\n :class:`~sklearn.model_selection.KFold` is used\n (with a random shuffle in either case).\n\n Unless an iterable is used, we call `split(concat[Z, W, X], T)` to generate the splits. If all\n Z, W, X are None, then we call `split(ones((T.shape[0], 1)), T)`.\n\n random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None\n If int, random_state is the seed used by the random number generator;\n If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;\n If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used\n by :mod:`np.random<numpy.random>`.\n\n mc_iters: int, optional (default=None)\n The number of times to rerun the first stage models to reduce the variance of the nuisances.\n\n mc_agg: {'mean', 'median'}, optional (default='mean')\n How to aggregate the nuisance value for each sample across the `mc_iters` monte carlo iterations of\n cross-fitting.\n\n Examples\n --------\n\n The example code below implements a very simple version of the double machine learning\n method on top of the :class:`._OrthoLearner` class, for expository purposes.\n For a more elaborate implementation of a Double Machine Learning child class of the class\n :class:`._OrthoLearner` check out :class:`.DML`\n and its child classes:\n\n .. testcode::\n\n import numpy as np\n from sklearn.linear_model import LinearRegression\n from econml._ortho_learner import _OrthoLearner\n class ModelNuisance:\n def __init__(self, model_t, model_y):\n self._model_t = model_t\n self._model_y = model_y\n def fit(self, Y, T, W=None):\n self._model_t.fit(W, T)\n self._model_y.fit(W, Y)\n return self\n def predict(self, Y, T, W=None):\n return Y - self._model_y.predict(W), T - self._model_t.predict(W)\n class ModelFinal:\n def __init__(self):\n return\n def fit(self, Y, T, W=None, nuisances=None):\n Y_res, T_res = nuisances\n self.model = LinearRegression(fit_intercept=False).fit(T_res.reshape(-1, 1), Y_res)\n return self\n def predict(self, X=None):\n return self.model.coef_[0]\n def score(self, Y, T, W=None, nuisances=None):\n Y_res, T_res = nuisances\n return np.mean((Y_res - self.model.predict(T_res.reshape(-1, 1)))**2)\n class OrthoLearner(_OrthoLearner):\n def _gen_ortho_learner_model_nuisance(self):\n return ModelNuisance(LinearRegression(), LinearRegression())\n def _gen_ortho_learner_model_final(self):\n return ModelFinal()\n np.random.seed(123)\n X = np.random.normal(size=(100, 3))\n y = X[:, 0] + X[:, 1] + np.random.normal(0, 0.1, size=(100,))\n est = OrthoLearner(cv=2, discrete_treatment=False, discrete_instrument=False,\n categories='auto', random_state=None)\n est.fit(y, X[:, 0], W=X[:, 1:])\n\n >>> est.score_\n 0.00756830...\n >>> est.const_marginal_effect()\n 1.02364992...\n >>> est.effect()\n array([1.023649...])\n >>> est.effect(T0=0, T1=10)\n array([10.236499...])\n >>> est.score(y, X[:, 0], W=X[:, 1:])\n 0.00727995...\n >>> est.ortho_learner_model_final_.model\n LinearRegression(fit_intercept=False)\n >>> est.ortho_learner_model_final_.model.coef_\n array([1.023649...])\n\n The following example shows how to do double machine learning with discrete treatments, using\n the _OrthoLearner:\n\n .. testcode::\n\n class ModelNuisance:\n def __init__(self, model_t, model_y):\n self._model_t = model_t\n self._model_y = model_y\n def fit(self, Y, T, W=None):\n self._model_t.fit(W, np.matmul(T, np.arange(1, T.shape[1]+1)))\n self._model_y.fit(W, Y)\n return self\n def predict(self, Y, T, W=None):\n return Y - self._model_y.predict(W), T - self._model_t.predict_proba(W)[:, 1:]\n class ModelFinal:\n def __init__(self):\n return\n def fit(self, Y, T, W=None, nuisances=None):\n Y_res, T_res = nuisances\n self.model = LinearRegression(fit_intercept=False).fit(T_res.reshape(-1, 1), Y_res)\n return self\n def predict(self):\n # theta needs to be of dimension (1, d_t) if T is (n, d_t)\n return np.array([[self.model.coef_[0]]])\n def score(self, Y, T, W=None, nuisances=None):\n Y_res, T_res = nuisances\n return np.mean((Y_res - self.model.predict(T_res.reshape(-1, 1)))**2)\n from sklearn.linear_model import LogisticRegression\n class OrthoLearner(_OrthoLearner):\n def _gen_ortho_learner_model_nuisance(self):\n return ModelNuisance(LogisticRegression(solver='lbfgs'), LinearRegression())\n def _gen_ortho_learner_model_final(self):\n return ModelFinal()\n np.random.seed(123)\n W = np.random.normal(size=(100, 3))\n import scipy.special\n T = np.random.binomial(1, scipy.special.expit(W[:, 0]))\n y = T + W[:, 0] + np.random.normal(0, 0.01, size=(100,))\n est = OrthoLearner(cv=2, discrete_treatment=True, discrete_instrument=False,\n categories='auto', random_state=None)\n est.fit(y, T, W=W)\n\n >>> est.score_\n 0.00673015...\n >>> est.const_marginal_effect()\n array([[1.008401...]])\n >>> est.effect()\n array([1.008401...])\n >>> est.score(y, T, W=W)\n 0.00310431...\n >>> est.ortho_learner_model_final_.model.coef_[0]\n 1.00840170...\n\n Attributes\n ----------\n models_nuisance_: list of objects of type(model_nuisance)\n A list of instances of the model_nuisance object. Each element corresponds to a crossfitting\n fold and is the model instance that was fitted for that training fold. If `mc_iters` is > 1,\n then the fitted models from the last monte carlo iteration are being stored and returned.\n TODO. Enable returning all fitted nuisance models from all monte carlo iterations.\n ortho_learner_model_final_: object of type(model_final)\n An instance of the model_final object that was fitted after calling fit.\n score_ : float or array of floats\n If the model_final has a score method, then `score_` contains the outcome of the final model\n score when evaluated on the fitted nuisances from the first stage. Represents goodness of fit,\n of the final CATE model.\n nuisance_scores_ : tuple of lists of floats or None\n The out-of-sample scores from training each nuisance model\n \"\"\"\n\n def __init__(self, *,\n discrete_treatment, discrete_instrument, categories, cv, random_state,\n n_splits='raise', mc_iters=None, mc_agg='mean'):\n self.cv = cv\n self.n_splits = n_splits\n self.discrete_treatment = discrete_treatment\n self.discrete_instrument = discrete_instrument\n self.random_state = random_state\n self.categories = categories\n self.mc_iters = mc_iters\n self.mc_agg = mc_agg\n super().__init__()\n\n @abstractmethod\n def _gen_ortho_learner_model_nuisance(self):\n \"\"\" Must return a fresh instance of a nuisance model\n\n Returns\n -------\n model_nuisance: estimator\n The estimator for fitting the nuisance function. Must implement\n `fit` and `predict` methods that both have signatures::\n\n model_nuisance.fit(Y, T, X=X, W=W, Z=Z,\n sample_weight=sample_weight, sample_var=sample_var)\n model_nuisance.predict(Y, T, X=X, W=W, Z=Z,\n sample_weight=sample_weight, sample_var=sample_var)\n\n In fact we allow for the model method signatures to skip any of the keyword arguments\n as long as the class is always called with the omitted keyword argument set to ``None``.\n This can be enforced in child classes by re-implementing the fit and the various effect\n methods. If ``discrete_treatment=True``, then the input ``T`` to both above calls will be the\n one-hot encoding of the original input ``T``, excluding the first column of the one-hot.\n\n If the estimator also provides a score method with the same arguments as fit, it will be used to\n calculate scores during training.\n \"\"\"\n pass\n\n @abstractmethod\n def _gen_ortho_learner_model_final(self):\n \"\"\" Must return a fresh instance of a final model\n\n Returns\n -------\n model_final: estimator for fitting the response residuals to the features and treatment residuals\n Must implement `fit` and `predict` methods that must have signatures::\n\n model_final.fit(Y, T, X=X, W=W, Z=Z, nuisances=nuisances,\n sample_weight=sample_weight, sample_var=sample_var)\n model_final.predict(X=X)\n\n Predict, should just take the features X and return the constant marginal effect. In fact we allow\n for the model method signatures to skip any of the keyword arguments as long as the class is always\n called with the omitted keyword argument set to ``None``. Moreover, the predict function of the final\n model can take no argument if the class is always called with ``X=None``. This can be enforced in child\n classes by re-implementing the fit and the various effect methods. If ``discrete_treatment=True``,\n then the input ``T`` to both above calls will be the one-hot encoding of the original input ``T``,\n excluding the first column of the one-hot.\n \"\"\"\n pass\n\n def _check_input_dims(self, Y, T, X=None, W=None, Z=None, *other_arrays):\n assert shape(Y)[0] == shape(T)[0], \"Dimension mis-match!\"\n for arr in [X, W, Z, *other_arrays]:\n assert (arr is None) or (arr.shape[0] == Y.shape[0]), \"Dimension mismatch\"\n self._d_x = X.shape[1:] if X is not None else None\n self._d_w = W.shape[1:] if W is not None else None\n self._d_z = Z.shape[1:] if Z is not None else None\n\n def _check_fitted_dims(self, X):\n if X is None:\n assert self._d_x is None, \"X was not None when fitting, so can't be none for score or effect\"\n else:\n assert self._d_x == X.shape[1:], \"Dimension mis-match of X with fitted X\"\n\n def _check_fitted_dims_w_z(self, W, Z):\n if W is None:\n assert self._d_w is None, \"W was not None when fitting, so can't be none for score\"\n else:\n assert self._d_w == W.shape[1:], \"Dimension mis-match of W with fitted W\"\n\n if Z is None:\n assert self._d_z is None, \"Z was not None when fitting, so can't be none for score\"\n else:\n assert self._d_z == Z.shape[1:], \"Dimension mis-match of Z with fitted Z\"\n\n def _subinds_check_none(self, var, inds):\n return var[inds] if var is not None else None\n\n def _strata(self, Y, T, X=None, W=None, Z=None,\n sample_weight=None, sample_var=None, groups=None,\n cache_values=False, only_final=False, check_input=True):\n if self.discrete_instrument:\n Z = LabelEncoder().fit_transform(np.ravel(Z))\n\n if self.discrete_treatment:\n enc = LabelEncoder()\n T = enc.fit_transform(np.ravel(T))\n if self.discrete_instrument:\n return T + Z * len(enc.classes_)\n else:\n return T\n elif self.discrete_instrument:\n return Z\n else:\n return None\n\n def _prefit(self, Y, T, *args, only_final=False, **kwargs):\n\n # generate an instance of the final model\n self._ortho_learner_model_final = self._gen_ortho_learner_model_final()\n if not only_final:\n # generate an instance of the nuisance model\n self._ortho_learner_model_nuisance = self._gen_ortho_learner_model_nuisance()\n\n super()._prefit(Y, T, *args, **kwargs)\n\n @_deprecate_positional(\"X, W, and Z should be passed by keyword only. In a future release \"\n \"we will disallow passing X, W, and Z by position.\", ['X', 'W', 'Z'])\n @BaseCateEstimator._wrap_fit\n def fit(self, Y, T, X=None, W=None, Z=None, *, sample_weight=None, sample_var=None, groups=None,\n cache_values=False, inference=None, only_final=False, check_input=True):\n \"\"\"\n Estimate the counterfactual model from data, i.e. estimates function :math:`\\\\theta(\\\\cdot)`.\n\n Parameters\n ----------\n Y: (n, d_y) matrix or vector of length n\n Outcomes for each sample\n T: (n, d_t) matrix or vector of length n\n Treatments for each sample\n X: optional (n, d_x) matrix or None (Default=None)\n Features for each sample\n W: optional (n, d_w) matrix or None (Default=None)\n Controls for each sample\n Z: optional (n, d_z) matrix or None (Default=None)\n Instruments for each sample\n sample_weight: optional (n,) vector or None (Default=None)\n Weights for each samples\n sample_var: optional (n,) vector or None (Default=None)\n Sample variance for each sample\n groups: (n,) vector, optional\n All rows corresponding to the same group will be kept together during splitting.\n If groups is not None, the cv argument passed to this class's initializer\n must support a 'groups' argument to its split method.\n cache_values: bool, default False\n Whether to cache the inputs and computed nuisances, which will allow refitting a different final model\n inference: string, :class:`.Inference` instance, or None\n Method for performing inference. This estimator supports 'bootstrap'\n (or an instance of :class:`.BootstrapInference`).\n only_final: bool, defaul False\n Whether to fit the nuisance models or use the existing cached values\n Note. This parameter is only used internally by the `refit` method and should not be exposed\n publicly by overwrites of the `fit` method in public classes.\n check_input: bool, default True\n Whether to check if the input is valid\n Note. This parameter is only used internally by the `refit` method and should not be exposed\n publicly by overwrites of the `fit` method in public classes.\n\n Returns\n -------\n self : object\n \"\"\"\n self._random_state = check_random_state(self.random_state)\n if check_input:\n Y, T, X, W, Z, sample_weight, sample_var, groups = check_input_arrays(\n Y, T, X, W, Z, sample_weight, sample_var, groups)\n self._check_input_dims(Y, T, X, W, Z, sample_weight, sample_var, groups)\n\n if not only_final:\n\n if self.discrete_treatment:\n categories = self.categories\n if categories != 'auto':\n categories = [categories] # OneHotEncoder expects a 2D array with features per column\n self._one_hot_encoder = OneHotEncoder(categories=categories, sparse=False, drop='first')\n self._one_hot_encoder.fit(reshape(T, (-1, 1)))\n self._d_t = (len(self._one_hot_encoder.categories_[0]) - 1,)\n self.transformer = FunctionTransformer(\n func=_EncoderWrapper(self._one_hot_encoder).encode,\n validate=False)\n else:\n self.transformer = None\n\n if self.discrete_instrument:\n z_enc = LabelEncoder()\n z_ohe = OneHotEncoder(categories='auto', sparse=False, drop='first')\n z_ohe.fit(reshape(z_enc.fit_transform(Z.ravel()), (-1, 1)))\n self.z_transformer = FunctionTransformer(\n func=_EncoderWrapper(z_ohe, z_enc).encode,\n validate=False)\n else:\n self.z_transformer = None\n\n all_nuisances = []\n fitted_inds = None\n\n for _ in range(self.mc_iters or 1):\n nuisances, new_inds = self._fit_nuisances(Y, T, X, W, Z, sample_weight=sample_weight, groups=groups)\n all_nuisances.append(nuisances)\n if fitted_inds is None:\n fitted_inds = new_inds\n elif not np.array_equal(fitted_inds, new_inds):\n raise AttributeError(\"Different indices were fit by different folds, so they cannot be aggregated\")\n\n if self.mc_iters is not None:\n if self.mc_agg == 'mean':\n nuisances = tuple(np.mean(nuisance_mc_variants, axis=0)\n for nuisance_mc_variants in zip(*all_nuisances))\n elif self.mc_agg == 'median':\n nuisances = tuple(np.median(nuisance_mc_variants, axis=0)\n for nuisance_mc_variants in zip(*all_nuisances))\n else:\n raise ValueError(\n \"Parameter `mc_agg` must be one of {'mean', 'median'}. Got {}\".format(self.mc_agg))\n\n Y, T, X, W, Z, sample_weight, sample_var = (self._subinds_check_none(arr, fitted_inds)\n for arr in (Y, T, X, W, Z, sample_weight, sample_var))\n nuisances = tuple([self._subinds_check_none(nuis, fitted_inds) for nuis in nuisances])\n self._cached_values = CachedValues(nuisances=nuisances,\n Y=Y, T=T, X=X, W=W, Z=Z,\n sample_weight=sample_weight,\n sample_var=sample_var,\n groups=groups) if cache_values else None\n else:\n nuisances = self._cached_values.nuisances\n # _d_t is altered by fit nuisances to what prefit does. So we need to perform the same\n # alteration even when we only want to fit_final.\n if self.transformer is not None:\n self._d_t = (len(self._one_hot_encoder.categories_[0]) - 1,)\n\n self._fit_final(Y=Y, T=T, X=X, W=W, Z=Z,\n nuisances=nuisances,\n sample_weight=sample_weight,\n sample_var=sample_var)\n\n return self\n\n @property\n def _illegal_refit_inference_methods(self):\n return (BootstrapInference,)\n\n def refit_final(self, inference=None):\n \"\"\"\n Estimate the counterfactual model using a new final model specification but with cached first stage results.\n\n In order for this to succeed, ``fit`` must have been called with ``cache_values=True``. This call\n will only refit the final model. This call we use the current setting of any parameters that change the\n final stage estimation. If any parameters that change how the first stage nuisance estimates\n has also been changed then it will have no effect. You need to call fit again to change the\n first stage estimation results.\n\n Parameters\n ----------\n inference : inference method, optional\n The string or object that represents the inference method\n\n Returns\n -------\n self : object\n This instance\n \"\"\"\n assert self._cached_values, \"Refit can only be called if values were cached during the original fit\"\n if isinstance(self._get_inference(inference), self._illegal_refit_inference_methods):\n raise ValueError(\"The chosen inference method does not allow only for model final re-fitting.\")\n cached = self._cached_values\n kwargs = filter_none_kwargs(\n Y=cached.Y, T=cached.T, X=cached.X, W=cached.W, Z=cached.Z,\n sample_weight=cached.sample_weight, sample_var=cached.sample_var,\n groups=cached.groups,\n )\n _OrthoLearner.fit(self, **kwargs,\n cache_values=True, inference=inference, only_final=True, check_input=False)\n return self\n\n def _fit_nuisances(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None):\n\n # use a binary array to get stratified split in case of discrete treatment\n stratify = self.discrete_treatment or self.discrete_instrument\n strata = self._strata(Y, T, X=X, W=W, Z=Z, sample_weight=sample_weight, groups=groups)\n if strata is None:\n strata = T # always safe to pass T as second arg to split even if we're not actually stratifying\n\n if self.discrete_treatment:\n T = self.transformer.transform(reshape(T, (-1, 1)))\n\n if self.discrete_instrument:\n Z = self.z_transformer.transform(reshape(Z, (-1, 1)))\n\n if self.cv == 1: # special case, no cross validation\n folds = None\n else:\n splitter = check_cv(self.cv, [0], classifier=stratify)\n # if check_cv produced a new KFold or StratifiedKFold object, we need to set shuffle and random_state\n # TODO: ideally, we'd also infer whether we need a GroupKFold (if groups are passed)\n # however, sklearn doesn't support both stratifying and grouping (see\n # https://github.com/scikit-learn/scikit-learn/issues/13621), so for now the user needs to supply\n # their own object that supports grouping if they want to use groups.\n if splitter != self.cv and isinstance(splitter, (KFold, StratifiedKFold)):\n splitter.shuffle = True\n splitter.random_state = self._random_state\n\n all_vars = [var if np.ndim(var) == 2 else var.reshape(-1, 1) for var in [Z, W, X] if var is not None]\n to_split = np.hstack(all_vars) if all_vars else np.ones((T.shape[0], 1))\n\n if groups is not None:\n if isinstance(splitter, (KFold, StratifiedKFold)):\n raise TypeError(\"Groups were passed to fit while using a KFold or StratifiedKFold splitter. \"\n \"Instead you must initialize this object with a splitter that can handle groups.\")\n folds = splitter.split(to_split, strata, groups=groups)\n else:\n folds = splitter.split(to_split, strata)\n\n nuisances, fitted_models, fitted_inds, scores = _crossfit(self._ortho_learner_model_nuisance, folds,\n Y, T, X=X, W=W, Z=Z,\n sample_weight=sample_weight, groups=groups)\n self._models_nuisance = fitted_models\n self.nuisance_scores_ = scores\n return nuisances, fitted_inds\n\n def _fit_final(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None):\n self._ortho_learner_model_final.fit(Y, T, **filter_none_kwargs(X=X, W=W, Z=Z,\n nuisances=nuisances,\n sample_weight=sample_weight,\n sample_var=sample_var))\n self.score_ = None\n if hasattr(self._ortho_learner_model_final, 'score'):\n self.score_ = self._ortho_learner_model_final.score(Y, T, **filter_none_kwargs(X=X, W=W, Z=Z,\n nuisances=nuisances,\n sample_weight=sample_weight,\n sample_var=sample_var))\n\n def const_marginal_effect(self, X=None):\n X, = check_input_arrays(X)\n self._check_fitted_dims(X)\n if X is None:\n return self._ortho_learner_model_final.predict()\n else:\n return self._ortho_learner_model_final.predict(X)\n const_marginal_effect.__doc__ = LinearCateEstimator.const_marginal_effect.__doc__\n\n def const_marginal_effect_interval(self, X=None, *, alpha=0.1):\n X, = check_input_arrays(X)\n self._check_fitted_dims(X)\n return super().const_marginal_effect_interval(X, alpha=alpha)\n const_marginal_effect_interval.__doc__ = LinearCateEstimator.const_marginal_effect_interval.__doc__\n\n def const_marginal_effect_inference(self, X=None):\n X, = check_input_arrays(X)\n self._check_fitted_dims(X)\n return super().const_marginal_effect_inference(X)\n const_marginal_effect_inference.__doc__ = LinearCateEstimator.const_marginal_effect_inference.__doc__\n\n def effect_interval(self, X=None, *, T0=0, T1=1, alpha=0.1):\n X, T0, T1 = check_input_arrays(X, T0, T1)\n self._check_fitted_dims(X)\n return super().effect_interval(X, T0=T0, T1=T1, alpha=alpha)\n effect_interval.__doc__ = LinearCateEstimator.effect_interval.__doc__\n\n def effect_inference(self, X=None, *, T0=0, T1=1):\n X, T0, T1 = check_input_arrays(X, T0, T1)\n self._check_fitted_dims(X)\n return super().effect_inference(X, T0=T0, T1=T1)\n effect_inference.__doc__ = LinearCateEstimator.effect_inference.__doc__\n\n def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None):\n \"\"\"\n Score the fitted CATE model on a new data set. Generates nuisance parameters\n for the new data set based on the fitted nuisance models created at fit time.\n It uses the mean prediction of the models fitted by the different crossfit folds.\n Then calls the score function of the model_final and returns the calculated score.\n The model_final model must have a score method.\n\n If model_final does not have a score method, then it raises an :exc:`.AttributeError`\n\n Parameters\n ----------\n Y: (n, d_y) matrix or vector of length n\n Outcomes for each sample\n T: (n, d_t) matrix or vector of length n\n Treatments for each sample\n X: optional (n, d_x) matrix or None (Default=None)\n Features for each sample\n W: optional (n, d_w) matrix or None (Default=None)\n Controls for each sample\n Z: optional (n, d_z) matrix or None (Default=None)\n Instruments for each sample\n sample_weight: optional(n,) vector or None (Default=None)\n Weights for each samples\n\n Returns\n -------\n score : float or (array of float)\n The score of the final CATE model on the new data. Same type as the return\n type of the model_final.score method.\n \"\"\"\n if not hasattr(self._ortho_learner_model_final, 'score'):\n raise AttributeError(\"Final model does not have a score method!\")\n Y, T, X, W, Z = check_input_arrays(Y, T, X, W, Z)\n self._check_fitted_dims(X)\n self._check_fitted_dims_w_z(W, Z)\n X, T = self._expand_treatments(X, T)\n if self.z_transformer is not None:\n Z = self.z_transformer.transform(Z)\n n_splits = len(self._models_nuisance)\n for idx, mdl in enumerate(self._models_nuisance):\n nuisance_temp = mdl.predict(Y, T, **filter_none_kwargs(X=X, W=W, Z=Z))\n if not isinstance(nuisance_temp, tuple):\n nuisance_temp = (nuisance_temp,)\n\n if idx == 0:\n nuisances = [np.zeros((n_splits,) + nuis.shape) for nuis in nuisance_temp]\n\n for it, nuis in enumerate(nuisance_temp):\n nuisances[it][idx] = nuis\n\n for it in range(len(nuisances)):\n nuisances[it] = np.mean(nuisances[it], axis=0)\n\n return self._ortho_learner_model_final.score(Y, T, nuisances=nuisances,\n **filter_none_kwargs(X=X, W=W, Z=Z, sample_weight=sample_weight))\n\n @property\n def ortho_learner_model_final_(self):\n if not hasattr(self, '_ortho_learner_model_final'):\n raise AttributeError(\"Model is not fitted!\")\n return self._ortho_learner_model_final\n\n @property\n def models_nuisance_(self):\n if not hasattr(self, '_models_nuisance'):\n raise AttributeError(\"Model is not fitted!\")\n return self._models_nuisance\n\n #######################################################\n # These should be removed once `n_splits` is deprecated\n #######################################################\n\n @property\n def n_splits(self):\n return self.cv\n\n @n_splits.setter\n def n_splits(self, value):\n if value != 'raise':\n warn(\"Parameter `n_splits` has been deprecated and will be removed in the next version. \"\n \"Use parameter `cv` instead.\")\n self.cv = value\n" ]
[ [ "numpy.concatenate", "numpy.full", "sklearn.preprocessing.LabelEncoder", "numpy.array_equal", "numpy.zeros", "numpy.median", "numpy.ones", "numpy.mean", "sklearn.utils.check_random_state", "numpy.arange", "numpy.ravel", "numpy.intersect1d", "numpy.ndim", "numpy.hstack", "sklearn.base.clone", "sklearn.preprocessing.OneHotEncoder", "sklearn.model_selection.check_cv" ] ]
cericdahl/SBCcode
[ "90a7841a5c1208d64f71a332289d9005a011aa21" ]
[ "UserCode/bressler/coincidentbubblescintillation.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 6 09:37:12 2019\n\n@author: bressler\n\"\"\"\n\nimport SBCcode as sbc\nfrom os import listdir\nfrom os.path import isfile,join\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy\nfrom gaincalc import get_gain\nimport pulse_integrator as pi\nfrom runlistscatalogue import *\nimport gc\n\nCONVERSION_TO_CHARGE = (125.0/128)*(1/50.0)*(1/1000.0)*(1/(1.602e-19))\n\ndef trig_difference(runs):\n pmtdiffs = []\n pmtnobubdiffs = []\n dubbubdiffs = []\n for run in runs:\n print(run)\n runrawpath = \"/bluearc/storage/SBC-17-data/%s/\"%run\n runreconpath = \"/pnfs/coupp/persistent/grid_output/SBC-17/output/%s/\"%run\n acousticfilename = runreconpath+\"AcousticAnalysis_%s.bin\"%run\n getbubfile = \"/coupp/data/home/coupp/HumanGetBub_output_SBC-17/HumanGetBub_%s.bin\"%run\n a = sbc.DataHandling.ReadBinary.ReadBlock(acousticfilename)\n c = sbc.DataHandling.ReadBinary.ReadBlock(getbubfile)\n eventn = c[\"ev\"]\n bubt0 = a[\"bubble_t0\"]\n #events = [evnt for evnt in listdir(runrawpath) if not isfile(join(runrawpath,evnt))]\n #for x in events:\n for x in range(101):\n gc.collect()\n try:\n e = sbc.DataHandling.GetSBCEvent.GetEvent(runrawpath,x,'fastDAQ','PMTtraces')\n cgate = e[\"fastDAQ\"][\"CAMgate\"]\n dcam = np.diff(cgate)\n fdt = e[\"fastDAQ\"][\"time\"]\n camOffTimes = [fdt[i] for i in range(len(dcam)) if dcam[i] > 0.5]\n pmttracetime = e[\"PMTtraces\"][\"t0_sec\"][:,0]+e[\"PMTtraces\"][\"t0_frac\"][:,0]\n d=sbc.AnalysisModules.PMTfastDAQalignment.PMTandFastDAQalignment(e)\n pmtalign = d[\"PMT_trigt0_sec\"]+d[\"PMT_trigt0_frac\"]\n tracetimes = pmttracetime - pmtalign\n \n at0 = bubt0[int(x),0]\n for t in (tracetimes-at0):\n if t<0 and t>-500e-6:\n lastCamOff = 0\n for k in range(len(camOffTimes)):\n if t+at0 > camOffTimes[k]:\n lastCamOff = camOffTimes[k]\n elif t+at0 < camOffTimes[k]:\n break\n if t+at0-lastCamOff > 25e-6:\n if list(eventn).count(int(x)) == 2:\n pmtdiffs.append(t)\n elif list(eventn).count(int(x)) == 1:\n pmtnobubdiffs.append(t)\n elif list(eventn).count(int(x)) == 3:\n print(3)\n elif list(eventn).count(int(x)) == 4:\n dubbubdiffs.append(t)\n except:\n print(\"Last event: %d\"%(x-1))\n break\n\n return [pmtnobubdiffs,pmtdiffs,dubbubdiffs]\n \n\ndef zdependence(runs, m):\n #m = 1e7\n\n #m=get_gain(\"/bluearc/storage/SBC-17-data/\",runs[0])\n \n Ncoinc = [0,0]\n ntotcoinc = [0,0]\n totevents = [0,0]\n totbub = [0,0]\n diffs = [[],[]]\n goodz=[[],[]]\n pmtdiffs = [[],[]]\n coincspec = [[],[]]\n\n allxyzfname = \"/pnfs/coupp/persistent/grid_output/SBC-17/output/SimpleXYZ_all.bin\"\n xyzf = sbc.DataHandling.ReadBinary.ReadBlock(allxyzfname)\n for run in runs:\n print(\"zdependence processing run \"+run)\n indices = [i for i,x in enumerate(xyzf[\"runid\"]) if str(x[0])+\"_\"+str(x[1]) == run]\n runposreco = {\"ev\":[xyzf[\"ev\"][indices]],\"x\":[xyzf[\"bubX\"][indices]],\n \"y\":[xyzf[\"bubY\"][indices]],\"z\":[xyzf[\"bubZ\"][indices]]}\n runrawpath = \"/bluearc/storage/SBC-17-data/%s/\"%run\n runreconpath = \"/pnfs/coupp/persistent/grid_output/SBC-17/output/%s/\"%run\n acousticfilename = runreconpath+\"AcousticAnalysis_%s.bin\"%run\n a = sbc.DataHandling.ReadBinary.ReadBlock(acousticfilename)\n #c = sbc.DataHandling.ReadBinary.ReadBlock(getbubfile)\n bubt0 = a[\"bubble_t0\"]\n events = [evnt for evnt in listdir(runrawpath) if not isfile(join(runrawpath,evnt))]\n for j in [0,1]:\n with open(\"/nashome/b/bressler/sbcoutput/%s_PMTmatching_ch%s.txt\"%(run,str(j)),\"w+\") as f, open(\"/nashome/b/bressler/sbcoutput/%s_muonCoincidences.txt\"%run,'w+') as fmu:\n f.write(\"run event PMT_t0_index PMT_t0_-at0_us PMT_t0 at0 phe z\\n\")\n fmu.write(\"run event phe\\n\")\n for x in events:\n yes = False\n if int(x)<len(runposreco[\"z\"][0])-1:\n yes=True\n totevents[j] += 1\n if yes and not np.isnan(runposreco[\"z\"][0][int(x)]):\n totbub[j] += 1\n e = sbc.DataHandling.GetSBCEvent.GetEvent(runrawpath,x)\n veto = e[\"fastDAQ\"][\"VetoCoinc\"]\n cgate = e[\"fastDAQ\"][\"CAMgate\"]\n dcam = np.diff(cgate)\n fdt = e[\"fastDAQ\"][\"time\"]\n camOffTimes = [fdt[i] for i in range(len(dcam)) if dcam[i] > 0.5]\n muon = False\n pmttracetime = e[\"PMTtraces\"][\"t0_sec\"][:,0]+e[\"PMTtraces\"][\"t0_frac\"][:,0]\n d=sbc.AnalysisModules.PMTfastDAQalignment.PMTandFastDAQalignment(e)\n pmtalign = d[\"PMT_trigt0_sec\"]+d[\"PMT_trigt0_frac\"]\n tracetimes = pmttracetime - pmtalign\n at0 = bubt0[int(x),j]\n i=0 # to match the indexing of the pre-made code I had 1???\n candidate = 0\n candidate_time = 0\n candidate_PMTtime = 0\n candidate_index = 0\n for t in (tracetimes-at0):\n # loop through every PMT trace for the event\n \n if t<-150e-6 and t>-600e-6: \n # if the trace time is within 500 microsec before acoustic t0\n if max(veto)>0.1:\n if fdt[list(veto).index(max(veto))]-at0<0 and fdt[list(veto).index(max(veto))]-at0>-500e-6:\n print(\"Veto Coincidence: event \"+run+\"-\"+str(x))\n muon = True\n \n #lastCamOff = 0\n #for k in range(len(camOffTimes)):\n # if t+at0 > camOffTimes[k]:\n # lastCamOff = camOffTimes[k]\n # elif t+at0 < camOffTimes[k]:\n # break\n #if t+at0-lastCamOff > 25e-6:\n # if the trace time is more than 25 microseconds away from a camera gate rise\n #but not doing it this way anymore because we'll check for the LED being on later during the merge\n ntotcoinc[j]+=1\n pmtdiffs.append(t)\n \n #take abs to get positive area:\n trace = np.fabs(e[\"PMTtraces\"][\"traces\"][i][0]) \n #if ch0 saturated, stitch in low res channel:\n if max(trace) == 128:\n trace = pi.stitchTraces(trace,np.fabs(e[\"PMTtraces\"][\"traces\"][i][1]))\n dt = e[\"PMTtraces\"][\"dt\"][i][0]\n \n #subtract baseline:\n #Actually this gets done in pulse_integrator anyway\n #baseline = np.mean(trace[0:50])\n #trace -= baseline \n \n #integrate and convert to phe:\n [phe,n,totInt,pktimes] = pi.SBC_pulse_integrator_bressler(trace,dt) \n if phe != None:\n phe /= m\n #keep track of largest candidate:\n if phe > candidate:\n candidate = phe\n candidate_time = t\n candidate_PMTtime = t+at0\n candidate_index = i\n #else:\n # candidate = -1.0\n i+=1\n #i.e. if there is a candidate PMT trace with area greater than zero\n if candidate > 0:\n Ncoinc[j] += 1\n ind = candidate_index\n pmtt = candidate_PMTtime\n diffs[j].append(candidate_time)\n goodz[j].append(runposreco[\"z\"][0][int(x)])\n coincspec[j].append(candidate)\n f.write(\"%s %s %d %f %f %f %f %f\\n\"%(run,x,ind,\n candidate_time*1e6,\n pmtt,at0,candidate,\n runposreco[\"z\"][0][int(x)]))\n if muon:\n fmu.write(\"%s %s %f\\n\"%(run,x,candidate))\n gc.collect()\n print(\"run \"+run+\" file %s written\"%str(j))\n #pmtdiffs.append(candidate_times[candidates.index(max(candidates))])\n print(\"total number of events: \"+str(totevents))\n print(\"total number of bubbles: \"+str(totbub)) \n print(\"total coincident triggers: \"+str(ntotcoinc))\n print(\"total coincident bubbles with scintillation greater than 0phe: \"+str(Ncoinc))\n print(\"fraction of bubbles with a coincident scintillation signal greater than 0phe: \"+str(sum(Ncoinc)*100/sum(totbub))+\"%\")\n \n return [goodz,diffs,coincspec,Ncoinc,ntotcoinc,totevents,totbub]\n \n\n \ndef main():\n bgruns = bgOct10and11\n \n biberuns = BiBeSept23and24\n \n \"\"\"\n ch1 files made for:\n \"20171003_4\",\"20171003_5\",\n \"20171004_0\",\"20171004_1\",\"20171004_2\",\"20171004_3\",\"20171004_4\",\n \"20171005_0\",\"20171005_1\",\"20171005_2\",\"20171005_3\",\"20171005_4\",\n \"20171006_0\",\"20171006_1\"\n \n ch0 files made for:\n \"20171003_4\",\"20171003_5\",\n \"20171004_0\",\"20171004_1\",\"20171004_2\",\"20171004_3\",\"20171004_4\",\n \"20171005_0\",\"20171005_1\",\"20171005_2\",\"20171005_3\",\"20171005_4\",\n \"20171006_0\",\"20171006_1\"\n \n \"\"\"\n \n BiAlruns = []\n \n \"\"\"\n ch1 files made for:\n \"20171006_2\",\"20171006_3\",\"20171006_4\",\"20171006_5\",\"20171007_0\",\n \"20171007_1\",\"20171007_2\",\"20171007_4\",\"20171007_5\",\n \"20171007_6\",\"20171008_0\",\"20171008_1\",\"20171008_4\",\"20171008_6\",\n \"20171008_7\",\"20171009_0\",\"20171009_1\",\"20171009_2\"\n \n ch0 files made for:\n \"20171006_2\",\"20171006_3\",\"20171006_4\",\"20171006_5\",\"20171007_0\",\n \"20171007_1\",\"20171007_2\",\"20171007_4\",\"20171007_5\",\n \"20171007_6\",\"20171008_0\",\"20171008_1\",\"20171008_4\",\"20171008_6\",\n \"20171008_7\",\"20171009_0\",\"20171009_1\",\"20171009_2\"\n \"\"\"\n\n cfruns = [\"20170711_15\",\"20170711_16\"]\n \"\"\"\n ch0 files made for:\n \"20170707_6\",\"20170707_7\",\"20170707_8\",\"20170707_9\",\"20170707_10\",\"20170708_0\",\n \"20170708_1\",\"20170708_3\",\"20170708_5\",\"20170708_6\",\"20170708_7\",\n \"20170708_8\",\"20170708_9\",\"20170709_0\",\"20170709_1\",\"20170709_2\",\n \"20170709_3\",\"20170709_4\",\"20170709_6\",\"20170709_7\",\"20170709_8\",\n \"20170710_0\",\"20170710_1\",\"20170710_2\",\"20170710_3\",\"20170710_4\",\n \"20170710_5\",\"20170710_6\",\"20170710_7\",\"20170710_8\",\"20170710_9\",\n \"20170711_0\",\"20170711_14\",\"20170711_16\"\n \n ch1 files made for:\n \n \"20170707_6\",\"20170707_7\",\"20170707_8\",\"20170707_9\",\"20170707_10\",\"20170708_0\",\n \"20170708_1\",\"20170708_3\",\"20170708_4\",\"20170708_5\",\"20170708_6\",\n \"20170708_7\",\"20170708_8\",\"20170708_9\",\"20170709_0\",\"20170709_1\",\"20170709_2\",\n \"20170709_3\",\"20170709_4\",\"20170709_6\",20170709_7\",\"20170709_8\",\n \"20170710_0\",\"20170710_1\",\"20170710_2\",\"20170710_3\",\"20170710_4\",\n \"20170710_5\",\"20170710_6\",\"20170710_7\",\"20170710_8\", \"20170710_9\",\"20170711_0\",\n \"20170711_14\",\"20170711_15\",\"20170711_16\"\n \n \n \n bad AcousticAnalysis_ .bin files:\n \"20170708_2\",\"20170708_4\",\n \"\"\"\n \n pmtnobubdiffs,pmtdiffs,dubbubdiffs = trig_difference(biberuns)\n goodz,diffs,coincspec,Ncoinc,ntotcoinc,totevents,totbub = zdependence(biberuns)\n \n bgpmtnobubdiffs,bgpmtdiffs,bgdubbubdiffs = trig_difference(bgruns)\n bggoodz,bgdiffs,bgcoincspec,bgNcoinc,bgntotcoinc,bgtotevents,bgtotbub = zdependence(bgruns)\n \n \"\"\"\n plt.figure()\n _,bins,_=plt.hist(pmtdiffs,150,histtype='step',label=\"one bubble\",lw=4)\n plt.hist(diffs,200,histtype='step')\n plt.hist(pmtnobubdiffs,bins,histtype='step', label = \"no bubble\",lw=4)\n plt.hist(dubbubdiffs,bins,histtype='step', label=\"two bubbles\",lw=4)\n plt.xlabel(\"PMT trigger times minus acoustic t_0\",fontsize=25)\n #plt.xlim([-100e-6,300e-6])\n plt.yscale('log')\n plt.legend(fontsize=18)\n plt.show\n \"\"\"\n def ft(x,m,b):\n return m*x +b\n \n params,params_cov = scipy.optimize.curve_fit(ft,goodz,diffs)\n p1 = params[0]\n p0 = params[1]\n print(\"the slope is\"+str(p1))\n print(\"The speed of sound is \"+str((1/np.fabs(p1))/100)+\" m/s\")\n \n plt.figure()\n plt.scatter(goodz,diffs)\n plt.plot(np.arange(-3,0.1,0.1),p0+p1*np.arange(-3,0.1,0.1),lw=3)\n plt.ylim([-500e-6, 0])\n plt.xlabel(\"z position (cm)\")\n plt.ylabel(\"time difference (seconds)\")\n plt.show\n \n \n \n plt.figure()\n vals,bins,_=plt.hist(coincspec,np.ceil(max(coincspec)),histtype='step',color='r')\n bgvals,_,_ = plt.hist(bgcoincspec,bins,histtype='step')\n plt.xlabel(\"spectrum of PMT pulse areas within 500 microseconds before acoustic t_0 (photoelectrons)\")\n plt.show\n\n plt.figure()\n plt.bar(bins[:(len(vals))],[v/totbub for v in vals],1,color='r',linewidth=0,label=\"californium\")\n plt.bar(bins[:len(vals)],[v/bgtotbub for v in bgvals],0.7,color='b',linewidth = 0,label=\"background\")\n plt.xlabel(\"spectrum of PMT pulse areas within 500 microseconds before acoustic t_0 (photoelectrons)\",fontsize=25)\n plt.ylabel(\"probability of a scintillation pulse of this area per bubble\",fontsize=25)\n plt.legend(fontsize=18)\n plt.show\n \nif __name__ == \"__main__\":\n main()\n" ]
[ [ "scipy.optimize.curve_fit", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "numpy.diff", "matplotlib.pyplot.hist", "numpy.fabs", "numpy.arange", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter" ] ]
elias-ramzi/pytorch-metric-learning
[ "1fb343124d15fd2f63d535df26aa1463daf4ceee" ]
[ "tests/miners/test_pair_margin_miner.py" ]
[ "import unittest\n\nimport torch\n\nfrom pytorch_metric_learning.distances import CosineSimilarity, LpDistance\nfrom pytorch_metric_learning.miners import PairMarginMiner\nfrom pytorch_metric_learning.utils import common_functions as c_f\n\nfrom .. import TEST_DEVICE, TEST_DTYPES\n\n\nclass TestPairMarginMiner(unittest.TestCase):\n def test_pair_margin_miner(self):\n for dtype in TEST_DTYPES:\n for distance in [LpDistance(), CosineSimilarity()]:\n embedding_angles = torch.arange(0, 16)\n embeddings = torch.tensor(\n [c_f.angle_to_coord(a) for a in embedding_angles],\n requires_grad=True,\n dtype=dtype,\n ).to(\n TEST_DEVICE\n ) # 2D embeddings\n labels = torch.randint(low=0, high=2, size=(16,))\n mat = distance(embeddings)\n pos_pairs = []\n neg_pairs = []\n for i in range(len(embeddings)):\n anchor_label = labels[i]\n for j in range(len(embeddings)):\n if j == i:\n continue\n positive_label = labels[j]\n if positive_label == anchor_label:\n ap_dist = mat[i, j]\n pos_pairs.append((i, j, ap_dist))\n\n for i in range(len(embeddings)):\n anchor_label = labels[i]\n for j in range(len(embeddings)):\n if j == i:\n continue\n negative_label = labels[j]\n if negative_label != anchor_label:\n an_dist = mat[i, j]\n neg_pairs.append((i, j, an_dist))\n\n for pos_margin_int in range(-1, 4):\n pos_margin = float(pos_margin_int) * 0.05\n for neg_margin_int in range(2, 7):\n neg_margin = float(neg_margin_int) * 0.05\n miner = PairMarginMiner(\n pos_margin, neg_margin, distance=distance\n )\n correct_pos_pairs = []\n correct_neg_pairs = []\n for i, j, k in pos_pairs:\n condition = (\n (k < pos_margin)\n if distance.is_inverted\n else (k > pos_margin)\n )\n if condition:\n correct_pos_pairs.append((i, j))\n for i, j, k in neg_pairs:\n condition = (\n (k > neg_margin)\n if distance.is_inverted\n else (k < neg_margin)\n )\n if condition:\n correct_neg_pairs.append((i, j))\n\n correct_pos = set(correct_pos_pairs)\n correct_neg = set(correct_neg_pairs)\n a1, p1, a2, n2 = miner(embeddings, labels)\n mined_pos = set([(a.item(), p.item()) for a, p in zip(a1, p1)])\n mined_neg = set([(a.item(), n.item()) for a, n in zip(a2, n2)])\n\n self.assertTrue(mined_pos == correct_pos)\n self.assertTrue(mined_neg == correct_neg)\n\n def test_empty_output(self):\n miner = PairMarginMiner(0, 1)\n batch_size = 32\n for dtype in TEST_DTYPES:\n embeddings = torch.randn(batch_size, 64).type(dtype).to(TEST_DEVICE)\n labels = torch.arange(batch_size)\n a, p, _, _ = miner(embeddings, labels)\n self.assertTrue(len(a) == 0)\n self.assertTrue(len(p) == 0)\n" ]
[ [ "torch.randint", "torch.randn", "torch.arange" ] ]
777ki/alibabacloud-pai-dsw-cn-demo
[ "361c91dd0a302c5073b84cea5ca64dd7b00b0c35" ]
[ "dawnbench_mlperf_dsw/quantize/quant_hooks_v2.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Some useful session run hooks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport multiprocessing\n\nimport numpy as np\n\nfrom enum import Enum, unique\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.training import training\nfrom quantize.fake_quantize_v2 import quantize_graph\n\nroot_dir = os.path.abspath(os.path.dirname(__file__))\n\n@unique\nclass OfflineQuantizeMethod(Enum):\n KL = 1\n MAX = 2\n PERC = 3\n\n\nMODE_MAP = {1: OfflineQuantizeMethod.KL,\n 2: OfflineQuantizeMethod.MAX,\n 3: OfflineQuantizeMethod.PERC,}\n\ncalibration_num_proc = 16\n\n\ndef computeScaleValue(data_list, method=OfflineQuantizeMethod.KL, name='',\n num_bits=8, bin_num=2048):\n max_range = float(pow(2, num_bits) // 2 - 1)\n quant_num = int(pow(2, num_bits) // 2)\n\n flatten_abs_data = np.abs(np.concatenate(data_list).ravel())\n data_max = np.max(flatten_abs_data)\n is_one_hot = np.all(np.add(flatten_abs_data==0, flatten_abs_data==1))\n # max-range, per-tensor for activation\n if (method == OfflineQuantizeMethod.MAX or is_one_hot) and (num_bits < 8):\n data_max_list = [np.max(np.abs(data_array)) for data_array in data_list]\n data_max = np.mean(np.array(data_max_list))\n max_scale = max_range / (data_max+sys.float_info.epsilon)\n return name, (max_scale, num_bits)\n # histgram of the activation\n bin_size = float(data_max) / bin_num\n hist = np.zeros((bin_num))\n for data in data_list:\n data = np.abs(data)\n data = np.int32(np.minimum(np.floor(data / bin_size), bin_num-1))\n tmp_hist = np.bincount(data[np.where(data > 0)])\n hist[:tmp_hist.shape[0]] += tmp_hist\n\n start_idx = np.where(hist>0)[0][0]\n start_idx = min(max(start_idx+1, quant_num), bin_num)\n # 99.999th percentile range value\n if (method == OfflineQuantizeMethod.PERC):\n perc_i = 0\n hist_total = np.sum(hist)\n for i in range(start_idx, bin_num+1):\n P = hist[:i].copy()\n P_total = np.sum(P)\n P_perc = P_total / float(hist_total)\n if (P_perc > 0.99999):\n perc_i = i-1\n break\n threshold = (perc_i + 0.5) * bin_size\n scale_perc = max_range / threshold\n return name, (scale_perc, num_bits)\n\n # KL-scale, per-tensor for activation\n KL = np.ones((bin_num)) * float('inf')\n for i in range(start_idx, bin_num+1):\n P = hist[:i].copy()\n P[i-1] += np.sum(hist[i:])\n P /= sum(P)\n\n # Need to optimize\n # For an array of length l that should be splitinto n sections,\n # it returns l % n sub-arrays of size l//n + 1 and the rest of size l//n.\n tmp_hist = hist[:i].copy()\n sub_1_num = i % quant_num\n sub_1_len = i // quant_num + 1\n sub_2_num = quant_num - sub_1_num\n sub_2_len = i // quant_num\n Q_array_1 = tmp_hist[:sub_1_num*sub_1_len].reshape(sub_1_num, sub_1_len)\n Q_array_2 = tmp_hist[sub_1_num*sub_1_len:].reshape(sub_2_num, sub_2_len)\n Q_quant_1 = np.sum(Q_array_1, 1)\n Q_quant_2 = np.sum(Q_array_2, 1)\n Q_quant = np.concatenate((Q_quant_1, Q_quant_2))\n Q_quant_num_1 = np.sum(Q_array_1 > 0, 1)\n Q_quant_num_2 = np.sum(Q_array_2 > 0, 1)\n Q_quant_num = np.int32(np.concatenate((Q_quant_num_1, Q_quant_num_2)))\n tmp_range_1 = np.array(range(sub_1_num)).reshape(-1, 1)\n tmp_range_2 = (np.array(range(sub_2_num)) + sub_1_num).reshape(-1, 1)\n Q_quant_idx_1 = (np.ones(Q_array_1.shape) * tmp_range_1).reshape(-1)\n Q_quant_idx_2 = (np.ones(Q_array_2.shape) * tmp_range_2).reshape(-1)\n Q_quant_idx = np.int32(np.concatenate((Q_quant_idx_1, Q_quant_idx_2)))\n\n Q = np.zeros((i))\n tmp_idx = np.where(hist[:i] > 0)\n Q[tmp_idx] = Q_quant[Q_quant_idx[tmp_idx]] / Q_quant_num[Q_quant_idx[tmp_idx]]\n Q /= sum(Q)\n\n tmp_idx = np.where(Q == 0)\n Q[tmp_idx] = sys.float_info.epsilon\n tmp_idx = np.where(P > 0)\n KL[i-1] = np.sum(P[tmp_idx] * np.log(P[tmp_idx] / Q[tmp_idx]))\n\n m = np.argmin(KL)\n threshold = (m + 0.5) * bin_size\n scale_kl = max_range / threshold\n return name, (scale_kl, num_bits)\n\n\ndef computeAllOfflineScale(featmap_name, featmap_data,\n method=OfflineQuantizeMethod.KL,\n num_bits=None):\n scale_dict = dict()\n for i in range(len(featmap_name)):\n _, scale_bits = computeScaleValue(featmap_data[featmap_name[i]], method,\n featmap_name[i], num_bits[featmap_name[i]])\n scale_dict[featmap_name[i]] = scale_bits\n return scale_dict\n\n\ndef computeAllOfflineScaleMultiProc(featmap_name, featmap_data,\n method=OfflineQuantizeMethod.KL,\n process_num=calibration_num_proc,\n num_bits=None):\n # offline KL\n scale_dict = dict()\n results = list()\n pool = multiprocessing.Pool(processes=min(len(featmap_name), process_num))\n for i in xrange(len(featmap_name)):\n results.append(pool.apply_async(\n computeScaleValue,\n (featmap_data[featmap_name[i]], method, featmap_name[i], num_bits[featmap_name[i]])))\n pool.close()\n pool.join()\n for result in results:\n scale_name, scale_bits = result.get()\n scale_dict[scale_name] = scale_bits\n return scale_dict\n\n\n# pylint: disable=protected-access\nclass QuantHook(training.SessionRunHook):\n\n def __init__(self,\n calib_file='calib_224_64.npy',\n bits=8,\n input_name = 'IteratorGetNext:0',\n int8_layers = ['resnet_model/conv2d/Conv2D',\n 'resnet_model/dense/MatMul'],\n online=False,\n quant_copy_num=1,\n quant_mode=1,\n finish=False\n ):\n\n self._bits = bits\n self._input_name = input_name\n self._online = online and (not finish)\n self._int8_layers = int8_layers\n self._quant_copy_num = quant_copy_num\n self._quant_mode = MODE_MAP[quant_mode]\n self._quant_mode_t = quant_mode\n\n calib_path = os.path.join(root_dir, '../calib/')\n if not os.path.exists(calib_path):\n os.mkdir(calib_path)\n\n self._pre_calib = calib_path\n self._calib_file = os.path.join(calib_path, calib_file)\n\n if self._online:\n calib_data = np.load(self._calib_file, allow_pickle=True)\n self._calib_data = list()\n for i, calib_dict in enumerate(calib_data):\n self._calib_data.append({self._input_name: calib_dict.values()[0]})\n\n\n def begin(self):\n # graph rewrite\n self._act_dict = quantize_graph(bits=self._bits,\n pre_calib=self._pre_calib,\n int8_layers=self._int8_layers,\n quant_copy_num=self._quant_copy_num,\n online=self._online,\n method=self._quant_mode_t)\n\n if self._online:\n self._act_list = list()\n self._act_bits = dict()\n for act_name in self._act_dict:\n self._act_list.append(act_name)\n self._act_bits[act_name] = self._act_dict[act_name][1]\n\n\n def after_create_session(self, session, coord): # pylint: disable=unused-argument\n\n if self._online:\n # obtain the calib data\n print('KL calib: obtain the calib data')\n act_dict = dict()\n for i, calib_dict in enumerate(self._calib_data):\n acts = session.run(self._act_list, feed_dict=calib_dict)\n for act_name, act in zip(self._act_list, acts):\n if not (act_name in act_dict):\n act_dict[act_name] = [act]\n else:\n act_dict[act_name].append(act)\n # calculate the KL scaling factor\n print('KL calib: calculate the KL scaling factor')\n act_dict = computeAllOfflineScaleMultiProc(featmap_name=self._act_list,\n featmap_data=act_dict,\n method=self._quant_mode,\n num_bits=self._act_bits)\n act_dict_file = os.path.join(self._pre_calib, 'act_dict_%s.npy' % self._bits)\n print('Save the online-calib: %s' % act_dict_file)\n np.save(act_dict_file, act_dict)\n" ]
[ [ "numpy.max", "numpy.concatenate", "numpy.array", "numpy.add", "numpy.zeros", "numpy.argmin", "numpy.log", "numpy.sum", "numpy.ones", "numpy.load", "numpy.save", "numpy.where", "numpy.abs", "numpy.floor" ] ]
jleuschn/dival
[ "483915b2e64c1ad6355311da0429ef8f2c2eceb5" ]
[ "dival/reconstructors/fbpunet_reconstructor.py" ]
[ "from warnings import warn\nfrom copy import deepcopy\n\nimport torch\nimport numpy as np\nimport torch.nn as nn\nfrom odl.tomo import fbp_op\n\nfrom dival.reconstructors.standard_learned_reconstructor import (\n StandardLearnedReconstructor)\nfrom dival.reconstructors.networks.unet import UNet\nfrom dival.datasets.fbp_dataset import FBPDataset\n\n\nclass FBPUNetReconstructor(StandardLearnedReconstructor):\n \"\"\"\n CT reconstructor applying filtered back-projection followed by a\n postprocessing U-Net (e.g. [1]_).\n\n References\n ----------\n .. [1] K. H. Jin, M. T. McCann, E. Froustey, et al., 2017,\n \"Deep Convolutional Neural Network for Inverse Problems in Imaging\".\n IEEE Transactions on Image Processing.\n `doi:10.1109/TIP.2017.2713099\n <https://doi.org/10.1109/TIP.2017.2713099>`_\n \"\"\"\n\n HYPER_PARAMS = deepcopy(StandardLearnedReconstructor.HYPER_PARAMS)\n HYPER_PARAMS.update({\n 'scales': {\n 'default': 5,\n 'retrain': True\n },\n 'skip_channels': {\n 'default': 4,\n 'retrain': True\n },\n 'channels': {\n 'default': (32, 32, 64, 64, 128, 128),\n 'retrain': True\n },\n 'filter_type': {\n 'default': 'Hann',\n 'retrain': True\n },\n 'frequency_scaling': {\n 'default': 1.0,\n 'retrain': True\n },\n 'use_sigmoid': {\n 'default': False,\n 'retrain': True\n },\n 'init_bias_zero': {\n 'default': True,\n 'retrain': True\n },\n 'lr': {\n 'default': 0.001,\n 'retrain': True\n },\n 'scheduler': {\n 'default': 'cosine',\n 'choices': ['base', 'cosine'], # 'base': inherit\n 'retrain': True\n },\n 'lr_min': { # only used if 'cosine' scheduler is selected\n 'default': 1e-4,\n 'retrain': True\n }\n })\n\n def __init__(self, ray_trafo,\n allow_multiple_workers_without_random_access=False,\n **kwargs):\n \"\"\"\n Parameters\n ----------\n ray_trafo : :class:`odl.tomo.RayTransform`\n Ray transform (the forward operator).\n allow_multiple_workers_without_random_access : bool, optional\n Whether for datasets without support for random access\n a specification of ``num_data_loader_workers > 1`` is honored.\n If `False` (the default), the value is overridden by ``1`` for\n generator-only datasets.\n\n Further keyword arguments are passed to ``super().__init__()``.\n \"\"\"\n self.allow_multiple_workers_without_random_access = (\n allow_multiple_workers_without_random_access)\n super().__init__(ray_trafo, **kwargs)\n\n def train(self, dataset):\n try:\n fbp_dataset = dataset.fbp_dataset\n except AttributeError:\n warn('Training FBPUNetReconstructor with no cached FBP dataset. '\n 'Will compute the FBPs on the fly. For faster training, '\n 'consider precomputing the FBPs with '\n '`generate_fbp_cache_files(...)` and passing them to '\n '`train()` by setting the attribute '\n '``dataset.fbp_dataset = get_cached_fbp_dataset(...)``.')\n fbp_dataset = FBPDataset(\n dataset, self.non_normed_op, filter_type=self.filter_type,\n frequency_scaling=self.frequency_scaling)\n\n if not fbp_dataset.supports_random_access():\n if not self.allow_multiple_workers_without_random_access:\n if self.num_data_loader_workers > 1:\n warn('Overriding number of workers with 1 for a dataset '\n 'not supporting random access. To force a higher '\n 'number of workers, specify '\n '`allow_multiple_workers_without_random_access=True` '\n 'to `FBPUNetReconstructor.__init__()`.')\n self.num_data_loader_workers = min(\n self.num_data_loader_workers, 1)\n\n super().train(fbp_dataset)\n\n def init_model(self):\n self.fbp_op = fbp_op(self.op, filter_type=self.filter_type,\n frequency_scaling=self.frequency_scaling)\n self.model = UNet(in_ch=1, out_ch=1,\n channels=self.channels[:self.scales],\n skip_channels=[self.skip_channels] * (self.scales),\n use_sigmoid=self.use_sigmoid)\n if self.init_bias_zero:\n def weights_init(m):\n if isinstance(m, torch.nn.Conv2d):\n m.bias.data.fill_(0.0)\n self.model.apply(weights_init)\n\n if self.use_cuda:\n self.model = nn.DataParallel(self.model).to(self.device)\n\n def init_scheduler(self, dataset_train):\n if self.scheduler.lower() == 'cosine':\n # need to set private self._scheduler because self.scheduler\n # property accesses hyper parameter of same name,\n # i.e. self.hyper_params['scheduler']\n self._scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n self.optimizer,\n T_max=self.epochs,\n eta_min=self.lr_min)\n else:\n super().init_scheduler(dataset_train)\n\n def _reconstruct(self, observation):\n self.model.eval()\n fbp = self.fbp_op(observation)\n fbp_tensor = torch.from_numpy(\n np.asarray(fbp)[None, None]).to(self.device)\n reco_tensor = self.model(fbp_tensor)\n reconstruction = reco_tensor.cpu().detach().numpy()[0, 0]\n return self.reco_space.element(reconstruction)\n" ]
[ [ "torch.optim.lr_scheduler.CosineAnnealingLR", "numpy.asarray", "torch.nn.DataParallel" ] ]
clausia/qiskit-nature
[ "3e66f54496445f08ce26c58eea3789f28eed4cc8" ]
[ "test/algorithms/ground_state_solvers/test_groundstate_eigensolver.py" ]
[ "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020, 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\" Test GroundStateEigensolver \"\"\"\n\nimport contextlib\nimport copy\nimport io\nimport unittest\nimport warnings\n\nfrom test import QiskitNatureTestCase\n\nimport numpy as np\n\nfrom qiskit import BasicAer\nfrom qiskit.algorithms import VQE\nfrom qiskit.algorithms.optimizers import SLSQP, SPSA\nfrom qiskit.opflow import AerPauliExpectation, PauliExpectation\nfrom qiskit.test import slow_test\nfrom qiskit.utils import QuantumInstance, algorithm_globals\n\nfrom qiskit_nature.algorithms import (\n GroundStateEigensolver,\n VQEUCCFactory,\n NumPyMinimumEigensolverFactory,\n)\nfrom qiskit_nature.circuit.library import HartreeFock, UCC, UCCSD\nfrom qiskit_nature.drivers.second_quantization import HDF5Driver\nfrom qiskit_nature.mappers.second_quantization import JordanWignerMapper, ParityMapper\nfrom qiskit_nature.converters.second_quantization import QubitConverter\nfrom qiskit_nature.problems.second_quantization import ElectronicStructureProblem\nfrom qiskit_nature.properties.second_quantization.electronic import ElectronicEnergy\nfrom qiskit_nature.properties.second_quantization.electronic.bases import ElectronicBasis\nfrom qiskit_nature.properties.second_quantization.electronic.integrals import (\n OneBodyElectronicIntegrals,\n TwoBodyElectronicIntegrals,\n)\nfrom qiskit_nature.transformers.second_quantization.electronic import FreezeCoreTransformer\n\n\nclass TestGroundStateEigensolver(QiskitNatureTestCase):\n \"\"\"Test GroundStateEigensolver\"\"\"\n\n def setUp(self):\n super().setUp()\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning, module=\".*drivers.*\")\n self.driver = HDF5Driver(\n self.get_resource_path(\"test_driver_hdf5.hdf5\", \"drivers/second_quantization/hdf5d\")\n )\n self.seed = 56\n algorithm_globals.random_seed = self.seed\n\n self.reference_energy = -1.1373060356951838\n\n self.qubit_converter = QubitConverter(JordanWignerMapper())\n self.electronic_structure_problem = ElectronicStructureProblem(self.driver)\n\n self.num_spin_orbitals = 4\n self.num_particles = (1, 1)\n\n def test_npme(self):\n \"\"\"Test NumPyMinimumEigensolver\"\"\"\n solver = NumPyMinimumEigensolverFactory()\n calc = GroundStateEigensolver(self.qubit_converter, solver)\n res = calc.solve(self.electronic_structure_problem)\n self.assertAlmostEqual(res.total_energies[0], self.reference_energy, places=6)\n\n def test_npme_with_default_filter(self):\n \"\"\"Test NumPyMinimumEigensolver with default filter\"\"\"\n solver = NumPyMinimumEigensolverFactory(use_default_filter_criterion=True)\n calc = GroundStateEigensolver(self.qubit_converter, solver)\n res = calc.solve(self.electronic_structure_problem)\n self.assertAlmostEqual(res.total_energies[0], self.reference_energy, places=6)\n\n def test_vqe_uccsd(self):\n \"\"\"Test VQE UCCSD case\"\"\"\n solver = VQEUCCFactory(\n quantum_instance=QuantumInstance(BasicAer.get_backend(\"statevector_simulator\")),\n ansatz=UCC(excitations=\"d\"),\n )\n calc = GroundStateEigensolver(self.qubit_converter, solver)\n res = calc.solve(self.electronic_structure_problem)\n self.assertAlmostEqual(res.total_energies[0], self.reference_energy, places=6)\n\n def test_vqe_ucc_custom(self):\n \"\"\"Test custom ansatz in Factory use case\"\"\"\n solver = VQEUCCFactory(QuantumInstance(BasicAer.get_backend(\"statevector_simulator\")))\n calc = GroundStateEigensolver(self.qubit_converter, solver)\n res = calc.solve(self.electronic_structure_problem)\n self.assertAlmostEqual(res.total_energies[0], self.reference_energy, places=6)\n\n def test_aux_ops_reusability(self):\n \"\"\"Test that the auxiliary operators can be reused\"\"\"\n # Regression test against #1475\n solver = NumPyMinimumEigensolverFactory()\n calc = GroundStateEigensolver(self.qubit_converter, solver)\n\n modes = 4\n h_1 = np.eye(modes, dtype=complex)\n h_2 = np.zeros((modes, modes, modes, modes))\n aux_ops = ElectronicEnergy(\n [\n OneBodyElectronicIntegrals(ElectronicBasis.MO, (h_1, None)),\n TwoBodyElectronicIntegrals(ElectronicBasis.MO, (h_2, None, None, None)),\n ],\n ).second_q_ops()\n aux_ops_copy = copy.deepcopy(aux_ops)\n\n _ = calc.solve(self.electronic_structure_problem)\n assert all(\n frozenset(a.to_list()) == frozenset(b.to_list()) for a, b in zip(aux_ops, aux_ops_copy)\n )\n\n def _setup_evaluation_operators(self):\n # first we run a ground state calculation\n solver = VQEUCCFactory(QuantumInstance(BasicAer.get_backend(\"statevector_simulator\")))\n calc = GroundStateEigensolver(self.qubit_converter, solver)\n res = calc.solve(self.electronic_structure_problem)\n\n # now we decide that we want to evaluate another operator\n # for testing simplicity, we just use some pre-constructed auxiliary operators\n _, *aux_ops = self.qubit_converter.convert_match(\n self.electronic_structure_problem.second_q_ops()\n )\n return calc, res, aux_ops\n\n def test_eval_op_single(self):\n \"\"\"Test evaluating a single additional operator\"\"\"\n calc, res, aux_ops = self._setup_evaluation_operators()\n # we filter the list because in this test we test a single operator evaluation\n add_aux_op = aux_ops[0][0]\n\n # now we have the ground state calculation evaluate it\n add_aux_op_res = calc.evaluate_operators(res.raw_result.eigenstate, add_aux_op)\n self.assertIsInstance(add_aux_op_res[0], complex)\n self.assertAlmostEqual(add_aux_op_res[0].real, 2, places=6)\n\n def test_eval_op_single_none(self):\n \"\"\"Test evaluating a single `None` operator\"\"\"\n calc, res, _ = self._setup_evaluation_operators()\n # we filter the list because in this test we test a single operator evaluation\n add_aux_op = None\n\n # now we have the ground state calculation evaluate it\n add_aux_op_res = calc.evaluate_operators(res.raw_result.eigenstate, add_aux_op)\n self.assertIsNone(add_aux_op_res)\n\n def test_eval_op_list(self):\n \"\"\"Test evaluating a list of additional operators\"\"\"\n calc, res, aux_ops = self._setup_evaluation_operators()\n # we filter the list because of simplicity\n expected_results = {\"number of particles\": 2, \"s^2\": 0, \"magnetization\": 0}\n add_aux_op = aux_ops[0:3]\n\n # now we have the ground state calculation evaluate them\n add_aux_op_res = calc.evaluate_operators(res.raw_result.eigenstate, add_aux_op)\n self.assertIsInstance(add_aux_op_res, list)\n # in this list we require that the order of the results remains unchanged\n for idx, expected in enumerate(expected_results.values()):\n self.assertAlmostEqual(add_aux_op_res[idx][0].real, expected, places=6)\n\n def test_eval_op_list_none(self):\n \"\"\"Test evaluating a list of additional operators incl. `None`\"\"\"\n calc, res, aux_ops = self._setup_evaluation_operators()\n # we filter the list because of simplicity\n expected_results = {\"number of particles\": 2, \"s^2\": 0, \"magnetization\": 0}\n add_aux_op = aux_ops[0:3] + [None]\n\n # now we have the ground state calculation evaluate them\n add_aux_op_res = calc.evaluate_operators(res.raw_result.eigenstate, add_aux_op)\n self.assertIsInstance(add_aux_op_res, list)\n # in this list we require that the order of the results remains unchanged\n for idx, expected in enumerate(expected_results.values()):\n self.assertAlmostEqual(add_aux_op_res[idx][0].real, expected, places=6)\n self.assertIsNone(add_aux_op_res[-1])\n\n def test_eval_op_dict(self):\n \"\"\"Test evaluating a dict of additional operators\"\"\"\n calc, res, aux_ops = self._setup_evaluation_operators()\n # we filter the list because of simplicity\n expected_results = {\"number of particles\": 2, \"s^2\": 0, \"magnetization\": 0}\n add_aux_op = aux_ops[0:3]\n # now we convert it into a dictionary\n add_aux_op = dict(zip(expected_results.keys(), add_aux_op))\n\n # now we have the ground state calculation evaluate them\n add_aux_op_res = calc.evaluate_operators(res.raw_result.eigenstate, add_aux_op)\n self.assertIsInstance(add_aux_op_res, dict)\n for name, expected in expected_results.items():\n self.assertAlmostEqual(add_aux_op_res[name][0].real, expected, places=6)\n\n def test_eval_op_dict_none(self):\n \"\"\"Test evaluating a dict of additional operators incl. `None`\"\"\"\n calc, res, aux_ops = self._setup_evaluation_operators()\n # we filter the list because of simplicity\n expected_results = {\"number of particles\": 2, \"s^2\": 0, \"magnetization\": 0}\n add_aux_op = aux_ops[0:3]\n # now we convert it into a dictionary\n add_aux_op = dict(zip(expected_results.keys(), add_aux_op))\n add_aux_op[\"None\"] = None\n\n # now we have the ground state calculation evaluate them\n add_aux_op_res = calc.evaluate_operators(res.raw_result.eigenstate, add_aux_op)\n self.assertIsInstance(add_aux_op_res, dict)\n for name, expected in expected_results.items():\n self.assertAlmostEqual(add_aux_op_res[name][0].real, expected, places=6)\n self.assertIsNone(add_aux_op_res[\"None\"])\n\n @slow_test\n def test_eval_op_qasm(self):\n \"\"\"Regression tests against https://github.com/Qiskit/qiskit-nature/issues/53.\"\"\"\n solver = VQEUCCFactory(\n optimizer=SLSQP(maxiter=100),\n expectation=PauliExpectation(),\n quantum_instance=QuantumInstance(\n backend=BasicAer.get_backend(\"qasm_simulator\"),\n seed_simulator=algorithm_globals.random_seed,\n seed_transpiler=algorithm_globals.random_seed,\n ),\n )\n calc = GroundStateEigensolver(self.qubit_converter, solver)\n res_qasm = calc.solve(self.electronic_structure_problem)\n\n hamiltonian = self.electronic_structure_problem.second_q_ops()[0]\n qubit_op = self.qubit_converter.map(hamiltonian)\n\n ansatz = solver.get_solver(self.electronic_structure_problem, self.qubit_converter).ansatz\n circuit = ansatz.assign_parameters(res_qasm.raw_result.optimal_point)\n mean = calc.evaluate_operators(circuit, qubit_op)\n\n self.assertAlmostEqual(res_qasm.eigenenergies[0], mean[0].real)\n\n def test_eval_op_qasm_aer(self):\n \"\"\"Regression tests against https://github.com/Qiskit/qiskit-nature/issues/53.\"\"\"\n try:\n # pylint: disable=import-outside-toplevel\n # pylint: disable=unused-import\n from qiskit import Aer\n\n backend = Aer.get_backend(\"aer_simulator\")\n except ImportError as ex: # pylint: disable=broad-except\n self.skipTest(f\"Aer doesn't appear to be installed. Error: '{str(ex)}'\")\n return\n\n solver = VQEUCCFactory(\n optimizer=SLSQP(maxiter=100),\n expectation=AerPauliExpectation(),\n include_custom=True,\n quantum_instance=QuantumInstance(\n backend=backend,\n seed_simulator=algorithm_globals.random_seed,\n seed_transpiler=algorithm_globals.random_seed,\n ),\n )\n calc = GroundStateEigensolver(self.qubit_converter, solver)\n res_qasm = calc.solve(self.electronic_structure_problem)\n\n hamiltonian = self.electronic_structure_problem.second_q_ops()[0]\n qubit_op = self.qubit_converter.map(hamiltonian)\n\n ansatz = solver.get_solver(self.electronic_structure_problem, self.qubit_converter).ansatz\n circuit = ansatz.assign_parameters(res_qasm.raw_result.optimal_point)\n mean = calc.evaluate_operators(circuit, qubit_op)\n\n self.assertAlmostEqual(res_qasm.eigenenergies[0], mean[0].real)\n\n def _prepare_uccsd_hf(self, qubit_converter):\n initial_state = HartreeFock(self.num_spin_orbitals, self.num_particles, qubit_converter)\n ansatz = UCCSD(\n qubit_converter,\n self.num_particles,\n self.num_spin_orbitals,\n initial_state=initial_state,\n )\n\n return ansatz\n\n def test_uccsd_hf(self):\n \"\"\"uccsd hf test\"\"\"\n ansatz = self._prepare_uccsd_hf(self.qubit_converter)\n\n optimizer = SLSQP(maxiter=100)\n backend = BasicAer.get_backend(\"statevector_simulator\")\n solver = VQE(\n ansatz=ansatz,\n optimizer=optimizer,\n quantum_instance=QuantumInstance(backend=backend),\n )\n\n gsc = GroundStateEigensolver(self.qubit_converter, solver)\n\n result = gsc.solve(self.electronic_structure_problem)\n\n self.assertAlmostEqual(result.total_energies[0], self.reference_energy, places=6)\n\n @slow_test\n def test_uccsd_hf_qasm(self):\n \"\"\"uccsd hf test with qasm simulator.\"\"\"\n qubit_converter = QubitConverter(ParityMapper())\n ansatz = self._prepare_uccsd_hf(qubit_converter)\n\n backend = BasicAer.get_backend(\"qasm_simulator\")\n\n optimizer = SPSA(maxiter=200, last_avg=5)\n solver = VQE(\n ansatz=ansatz,\n optimizer=optimizer,\n expectation=PauliExpectation(),\n quantum_instance=QuantumInstance(\n backend=backend,\n seed_simulator=algorithm_globals.random_seed,\n seed_transpiler=algorithm_globals.random_seed,\n ),\n )\n\n gsc = GroundStateEigensolver(qubit_converter, solver)\n\n result = gsc.solve(self.electronic_structure_problem)\n self.assertAlmostEqual(result.total_energies[0], -1.138, places=2)\n\n @slow_test\n def test_uccsd_hf_aer_statevector(self):\n \"\"\"uccsd hf test with Aer statevector\"\"\"\n try:\n # pylint: disable=import-outside-toplevel\n from qiskit import Aer\n\n backend = Aer.get_backend(\"aer_simulator_statevector\")\n except ImportError as ex: # pylint: disable=broad-except\n self.skipTest(f\"Aer doesn't appear to be installed. Error: '{str(ex)}'\")\n return\n\n ansatz = self._prepare_uccsd_hf(self.qubit_converter)\n\n optimizer = SLSQP(maxiter=100)\n solver = VQE(\n ansatz=ansatz,\n optimizer=optimizer,\n quantum_instance=QuantumInstance(backend=backend),\n )\n\n gsc = GroundStateEigensolver(self.qubit_converter, solver)\n\n result = gsc.solve(self.electronic_structure_problem)\n self.assertAlmostEqual(result.total_energies[0], self.reference_energy, places=6)\n\n @slow_test\n def test_uccsd_hf_aer_qasm(self):\n \"\"\"uccsd hf test with Aer qasm simulator.\"\"\"\n try:\n # pylint: disable=import-outside-toplevel\n from qiskit import Aer\n\n backend = Aer.get_backend(\"aer_simulator\")\n except ImportError as ex: # pylint: disable=broad-except\n self.skipTest(f\"Aer doesn't appear to be installed. Error: '{str(ex)}'\")\n return\n\n ansatz = self._prepare_uccsd_hf(self.qubit_converter)\n\n optimizer = SPSA(maxiter=200, last_avg=5)\n solver = VQE(\n ansatz=ansatz,\n optimizer=optimizer,\n expectation=PauliExpectation(group_paulis=False),\n quantum_instance=QuantumInstance(\n backend=backend,\n seed_simulator=algorithm_globals.random_seed,\n seed_transpiler=algorithm_globals.random_seed,\n ),\n )\n\n gsc = GroundStateEigensolver(self.qubit_converter, solver)\n\n result = gsc.solve(self.electronic_structure_problem)\n self.assertAlmostEqual(result.total_energies[0], -1.131, places=2)\n\n @slow_test\n def test_uccsd_hf_aer_qasm_snapshot(self):\n \"\"\"uccsd hf test with Aer qasm simulator snapshot.\"\"\"\n try:\n # pylint: disable=import-outside-toplevel\n from qiskit import Aer\n\n backend = Aer.get_backend(\"aer_simulator\")\n except ImportError as ex: # pylint: disable=broad-except\n self.skipTest(f\"Aer doesn't appear to be installed. Error: '{str(ex)}'\")\n return\n\n ansatz = self._prepare_uccsd_hf(self.qubit_converter)\n\n optimizer = SPSA(maxiter=200, last_avg=5)\n solver = VQE(\n ansatz=ansatz,\n optimizer=optimizer,\n expectation=AerPauliExpectation(),\n quantum_instance=QuantumInstance(backend=backend),\n )\n\n gsc = GroundStateEigensolver(self.qubit_converter, solver)\n\n result = gsc.solve(self.electronic_structure_problem)\n self.assertAlmostEqual(result.total_energies[0], self.reference_energy, places=3)\n\n def test_freeze_core_z2_symmetry_compatibility(self):\n \"\"\"Regression test against #192.\n\n An issue arose when the FreezeCoreTransformer was combined with the automatic Z2Symmetry\n reduction. This regression test ensures that this behavior remains fixed.\n \"\"\"\n driver = HDF5Driver(\n hdf5_input=self.get_resource_path(\n \"LiH_sto3g.hdf5\", \"transformers/second_quantization/electronic\"\n )\n )\n problem = ElectronicStructureProblem(driver, [FreezeCoreTransformer()])\n qubit_converter = QubitConverter(\n ParityMapper(),\n two_qubit_reduction=True,\n z2symmetry_reduction=\"auto\",\n )\n\n solver = NumPyMinimumEigensolverFactory()\n gsc = GroundStateEigensolver(qubit_converter, solver)\n\n result = gsc.solve(problem)\n self.assertAlmostEqual(result.total_energies[0], -7.882, places=2)\n\n def test_total_dipole(self):\n \"\"\"Regression test against #198.\n\n An issue with calculating the dipole moment that had division None/float.\n \"\"\"\n solver = NumPyMinimumEigensolverFactory()\n calc = GroundStateEigensolver(self.qubit_converter, solver)\n res = calc.solve(self.electronic_structure_problem)\n self.assertAlmostEqual(res.total_dipole_moment_in_debye[0], 0.0, places=1)\n\n def test_print_result(self):\n \"\"\"Regression test against #198 and general issues with printing results.\"\"\"\n solver = NumPyMinimumEigensolverFactory()\n calc = GroundStateEigensolver(self.qubit_converter, solver)\n res = calc.solve(self.electronic_structure_problem)\n with contextlib.redirect_stdout(io.StringIO()) as out:\n print(res)\n # do NOT change the below! Lines have been truncated as to not force exact numerical matches\n expected = \"\"\"\\\n === GROUND STATE ENERGY ===\n\n * Electronic ground state energy (Hartree): -1.857\n - computed part: -1.857\n ~ Nuclear repulsion energy (Hartree): 0.719\n > Total ground state energy (Hartree): -1.137\n\n === MEASURED OBSERVABLES ===\n\n 0: # Particles: 2.000 S: 0.000 S^2: 0.000 M: 0.000\n\n === DIPOLE MOMENTS ===\n\n ~ Nuclear dipole moment (a.u.): [0.0 0.0 1.38\n\n 0:\n * Electronic dipole moment (a.u.): [0.0 0.0 -1.38\n - computed part: [0.0 0.0 -1.38\n > Dipole moment (a.u.): [0.0 0.0 0.0] Total: 0.\n (debye): [0.0 0.0 0.0] Total: 0.\n \"\"\"\n for truth, expected in zip(out.getvalue().split(\"\\n\"), expected.split(\"\\n\")):\n assert truth.strip().startswith(expected.strip())\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.zeros", "numpy.eye" ] ]
yangninghua/code_library
[ "b769abecb4e0cbdbbb5762949c91847a0f0b3c5a" ]
[ "book-code/numpy-ml/numpy_ml/neural_nets/initializers/initializers.py" ]
[ "import re\nfrom functools import partial\nfrom ast import literal_eval as eval\n\nimport numpy as np\n\nfrom ..optimizers import OptimizerBase, SGD, AdaGrad, RMSProp, Adam\nfrom ..activations import ActivationBase, Affine, ReLU, Tanh, Sigmoid, LeakyReLU\nfrom ..schedulers import (\n SchedulerBase,\n ConstantScheduler,\n ExponentialScheduler,\n NoamScheduler,\n KingScheduler,\n)\n\nfrom ..utils import (\n he_normal,\n he_uniform,\n glorot_normal,\n glorot_uniform,\n truncated_normal,\n)\n\n\nclass ActivationInitializer(object):\n def __init__(self, param=None):\n \"\"\"\n A class for initializing activation functions. Valid inputs are:\n (a) __str__ representations of `ActivationBase` instances\n (b) `ActivationBase` instances\n\n If `param` is `None`, return the identity function: f(X) = X\n \"\"\"\n self.param = param\n\n def __call__(self):\n param = self.param\n if param is None:\n act = Affine(slope=1, intercept=0)\n elif isinstance(param, ActivationBase):\n act = param\n elif isinstance(param, str):\n act = self.init_from_str(param)\n else:\n raise ValueError(\"Unknown activation: {}\".format(param))\n return act\n\n def init_from_str(self, act_str):\n act_str = act_str.lower()\n if act_str == \"relu\":\n act_fn = ReLU()\n elif act_str == \"tanh\":\n act_fn = Tanh()\n elif act_str == \"sigmoid\":\n act_fn = Sigmoid()\n elif \"affine\" in act_str:\n r = r\"affine\\(slope=(.*), intercept=(.*)\\)\"\n slope, intercept = re.match(r, act_str).groups()\n act_fn = Affine(float(slope), float(intercept))\n elif \"leaky relu\" in act_str:\n r = r\"leaky relu\\(alpha=(.*)\\)\"\n alpha = re.match(r, act_str).groups()[0]\n act_fn = LeakyReLU(float(alpha))\n else:\n raise ValueError(\"Unknown activation: {}\".format(act_str))\n return act_fn\n\n\nclass SchedulerInitializer(object):\n def __init__(self, param=None, lr=None):\n \"\"\"\n A class for initializing learning rate schedulers. Valid inputs are:\n (a) __str__ representations of `SchedulerBase` instances\n (b) `SchedulerBase` instances\n (c) Parameter dicts (e.g., as produced via the `summary` method in\n `LayerBase` instances)\n\n If `param` is `None`, return the ConstantScheduler with learning rate\n equal to `lr`.\n \"\"\"\n if all([lr is None, param is None]):\n raise ValueError(\"lr and param cannot both be `None`\")\n\n self.lr = lr\n self.param = param\n\n def __call__(self):\n param = self.param\n if param is None:\n scheduler = ConstantScheduler(self.lr)\n elif isinstance(param, SchedulerBase):\n scheduler = param\n elif isinstance(param, str):\n scheduler = self.init_from_str()\n elif isinstance(param, dict):\n scheduler = self.init_from_dict()\n return scheduler\n\n def init_from_str(self):\n r = r\"([a-zA-Z]*)=([^,)]*)\"\n sch_str = self.param.lower()\n kwargs = dict([(i, eval(j)) for (i, j) in re.findall(r, sch_str)])\n\n if \"constant\" in sch_str:\n scheduler = ConstantScheduler(**kwargs)\n elif \"exponential\" in sch_str:\n scheduler = ExponentialScheduler(**kwargs)\n elif \"noam\" in sch_str:\n scheduler = NoamScheduler(**kwargs)\n elif \"king\" in sch_str:\n scheduler = KingScheduler(**kwargs)\n else:\n raise NotImplementedError(\"{}\".format(sch_str))\n return scheduler\n\n def init_from_dict(self):\n S = self.param\n sc = S[\"hyperparameters\"] if \"hyperparameters\" in S else None\n\n if sc is None:\n raise ValueError(\"Must have `hyperparameters` key: {}\".format(S))\n\n if sc and sc[\"id\"] == \"ConstantScheduler\":\n scheduler = ConstantScheduler()\n elif sc and sc[\"id\"] == \"ExponentialScheduler\":\n scheduler = ExponentialScheduler()\n elif sc and sc[\"id\"] == \"NoamScheduler\":\n scheduler = NoamScheduler()\n elif sc:\n raise NotImplementedError(\"{}\".format(sc[\"id\"]))\n scheduler.set_params(sc)\n return scheduler\n\n\nclass OptimizerInitializer(object):\n def __init__(self, param=None):\n \"\"\"\n A class for initializing optimizers. Valid inputs are:\n (a) __str__ representations of `OptimizerBase` instances\n (b) `OptimizerBase` instances\n (c) Parameter dicts (e.g., as produced via the `summary` method in\n `LayerBase` instances)\n\n If `param` is `None`, return the SGD optimizer with default parameters.\n \"\"\"\n self.param = param\n\n def __call__(self):\n param = self.param\n if param is None:\n opt = SGD()\n elif isinstance(param, OptimizerBase):\n opt = param\n elif isinstance(param, str):\n opt = self.init_from_str()\n elif isinstance(param, dict):\n opt = self.init_from_dict()\n return opt\n\n def init_from_str(self):\n r = r\"([a-zA-Z]*)=([^,)]*)\"\n opt_str = self.param.lower()\n kwargs = dict([(i, eval(j)) for (i, j) in re.findall(r, opt_str)])\n if \"sgd\" in opt_str:\n optimizer = SGD(**kwargs)\n elif \"adagrad\" in opt_str:\n optimizer = AdaGrad(**kwargs)\n elif \"rmsprop\" in opt_str:\n optimizer = RMSProp(**kwargs)\n elif \"adam\" in opt_str:\n optimizer = Adam(**kwargs)\n else:\n raise NotImplementedError(\"{}\".format(opt_str))\n return optimizer\n\n def init_from_dict(self):\n O = self.param\n cc = O[\"cache\"] if \"cache\" in O else None\n op = O[\"hyperparameters\"] if \"hyperparameters\" in O else None\n\n if op is None:\n raise ValueError(\"Must have `hyperparemeters` key: {}\".format(O))\n\n if op and op[\"id\"] == \"SGD\":\n optimizer = SGD()\n elif op and op[\"id\"] == \"RMSProp\":\n optimizer = RMSProp()\n elif op and op[\"id\"] == \"AdaGrad\":\n optimizer = AdaGrad()\n elif op and op[\"id\"] == \"Adam\":\n optimizer = Adam()\n elif op:\n raise NotImplementedError(\"{}\".format(op[\"id\"]))\n optimizer.set_params(op, cc)\n return optimizer\n\n\nclass WeightInitializer(object):\n def __init__(self, act_fn_str, mode=\"glorot_uniform\"):\n \"\"\"\n A factory for weight initializers.\n\n Parameters\n ----------\n act_fn_str : str\n The string representation for the layer activation function\n mode : str (default: 'glorot_uniform')\n The weight initialization strategy. Valid entries are {\"he_normal\",\n \"he_uniform\", \"glorot_normal\", glorot_uniform\", \"std_normal\",\n \"trunc_normal\"}\n \"\"\"\n if mode not in [\n \"he_normal\",\n \"he_uniform\",\n \"glorot_normal\",\n \"glorot_uniform\",\n \"std_normal\",\n \"trunc_normal\",\n ]:\n raise ValueError(\"Unrecognize initialization mode: {}\".format(mode))\n\n self.mode = mode\n self.act_fn = act_fn_str\n\n if mode == \"glorot_uniform\":\n self._fn = glorot_uniform\n elif mode == \"glorot_normal\":\n self._fn = glorot_normal\n elif mode == \"he_uniform\":\n self._fn = he_uniform\n elif mode == \"he_normal\":\n self._fn = he_normal\n elif mode == \"std_normal\":\n self._fn = np.random.randn\n elif mode == \"trunc_normal\":\n self._fn = partial(truncated_normal, mean=0, std=1)\n\n def __call__(self, weight_shape):\n if \"glorot\" in self.mode:\n gain = self._calc_glorot_gain()\n W = self._fn(weight_shape, gain)\n elif self.mode == \"std_normal\":\n W = self._fn(*weight_shape)\n else:\n W = self._fn(weight_shape)\n return W\n\n def _calc_glorot_gain(self):\n \"\"\"\n Values from:\n https://pytorch.org/docs/stable/nn.html?#torch.nn.init.calculate_gain\n \"\"\"\n gain = 1.0\n act_str = self.act_fn.lower()\n if act_str == \"tanh\":\n gain = 5.0 / 3.0\n elif act_str == \"relu\":\n gain = np.sqrt(2)\n elif \"leaky relu\" in act_str:\n r = r\"leaky relu\\(alpha=(.*)\\)\"\n alpha = re.match(r, act_str).groups()[0]\n gain = np.sqrt(2 / 1 + float(alpha) ** 2)\n return gain\n" ]
[ [ "numpy.sqrt" ] ]
Tudor33/nni
[ "020408a235fd3cc625ba5627971448647e6ff1f2" ]
[ "nni/algorithms/compression/torch/pruning/simulated_annealing_pruner.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport logging\nimport os\nimport math\nimport copy\nimport csv\nimport json\nimport numpy as np\nfrom schema import And, Optional\n\nfrom nni.utils import OptimizeMode\n\nfrom nni.compression.torch.compressor import Pruner\nfrom nni.compression.torch.utils.config_validation import CompressorSchema\nfrom .constants_pruner import PRUNER_DICT\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass SimulatedAnnealingPruner(Pruner):\n \"\"\"\n A Pytorch implementation of Simulated Annealing compression algorithm.\n\n Parameters\n ----------\n model : pytorch model\n The model to be pruned.\n config_list : list\n Supported keys:\n - sparsity : The target overall sparsity.\n - op_types : The operation type to prune.\n evaluator : function\n Function to evaluate the pruned model.\n This function should include `model` as the only parameter, and returns a scalar value.\n Example::\n\n def evaluator(model):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n val_loader = ...\n model.eval()\n correct = 0\n with torch.no_grad():\n for data, target in val_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n # get the index of the max log-probability\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n accuracy = correct / len(val_loader.dataset)\n return accuracy\n optimize_mode : str\n Optimize mode, `maximize` or `minimize`, by default `maximize`.\n base_algo : str\n Base pruning algorithm. `level`, `l1` or `l2`, by default `l1`. Given the sparsity distribution among the ops,\n the assigned `base_algo` is used to decide which filters/channels/weights to prune.\n start_temperature : float\n Start temperature of the simulated annealing process.\n stop_temperature : float\n Stop temperature of the simulated annealing process.\n cool_down_rate : float\n Cool down rate of the temperature.\n perturbation_magnitude : float\n Initial perturbation magnitude to the sparsities. The magnitude decreases with current temperature.\n experiment_data_dir : string\n PATH to save experiment data,\n including the config_list generated for the base pruning algorithm, the performance of the pruned model and the pruning history.\n\n \"\"\"\n\n def __init__(self, model, config_list, evaluator, optimize_mode='maximize', base_algo='l1',\n start_temperature=100, stop_temperature=20, cool_down_rate=0.9, perturbation_magnitude=0.35, experiment_data_dir='./'):\n # original model\n self._model_to_prune = copy.deepcopy(model)\n self._base_algo = base_algo\n\n super().__init__(model, config_list)\n\n self._evaluator = evaluator\n self._optimize_mode = OptimizeMode(optimize_mode)\n\n # hyper parameters for SA algorithm\n self._start_temperature = start_temperature\n self._current_temperature = start_temperature\n self._stop_temperature = stop_temperature\n self._cool_down_rate = cool_down_rate\n self._perturbation_magnitude = perturbation_magnitude\n\n # overall pruning rate\n self._sparsity = config_list[0]['sparsity']\n # pruning rates of the layers\n self._sparsities = None\n\n # init current performance & best performance\n self._current_performance = -np.inf\n self._best_performance = -np.inf\n self._best_config_list = []\n\n self._search_history = []\n\n self._experiment_data_dir = experiment_data_dir\n if not os.path.exists(self._experiment_data_dir):\n os.makedirs(self._experiment_data_dir)\n\n def validate_config(self, model, config_list):\n \"\"\"\n Parameters\n ----------\n model : torch.nn.Module\n Model to be pruned\n config_list : list\n List on pruning configs\n \"\"\"\n\n if self._base_algo == 'level':\n schema = CompressorSchema([{\n 'sparsity': And(float, lambda n: 0 < n < 1),\n Optional('op_types'): [str],\n Optional('op_names'): [str],\n }], model, _logger)\n elif self._base_algo in ['l1', 'l2']:\n schema = CompressorSchema([{\n 'sparsity': And(float, lambda n: 0 < n < 1),\n 'op_types': ['Conv2d'],\n Optional('op_names'): [str]\n }], model, _logger)\n\n schema.validate(config_list)\n\n def _sparsities_2_config_list(self, sparsities):\n '''\n convert sparsities vector into config_list for LevelPruner or L1FilterPruner\n\n Parameters\n ----------\n sparsities : list\n list of sparsities\n\n Returns\n -------\n list of dict\n config_list for LevelPruner or L1FilterPruner\n '''\n config_list = []\n\n sparsities = sorted(sparsities)\n self.modules_wrapper = sorted(\n self.modules_wrapper, key=lambda wrapper: wrapper.module.weight.data.numel())\n\n # a layer with more weights will have no less pruning rate\n for idx, wrapper in enumerate(self.get_modules_wrapper()):\n # L1Filter Pruner requires to specify op_types\n if self._base_algo in ['l1', 'l2']:\n config_list.append(\n {'sparsity': sparsities[idx], 'op_types': ['Conv2d'], 'op_names': [wrapper.name]})\n elif self._base_algo == 'level':\n config_list.append(\n {'sparsity': sparsities[idx], 'op_names': [wrapper.name]})\n\n config_list = [val for val in config_list if not math.isclose(val['sparsity'], 0, abs_tol=1e-6)]\n\n return config_list\n\n def _rescale_sparsities(self, sparsities, target_sparsity):\n '''\n Rescale the sparsities list to satisfy the target overall sparsity\n\n Parameters\n ----------\n sparsities : list\n\n target_sparsity : float\n the target overall sparsity\n\n Returns\n -------\n list\n the rescaled sparsities\n '''\n num_weights = []\n for wrapper in self.get_modules_wrapper():\n num_weights.append(wrapper.module.weight.data.numel())\n\n num_weights = sorted(num_weights)\n sparsities = sorted(sparsities)\n\n total_weights = 0\n total_weights_pruned = 0\n\n # calculate the scale\n for idx, num_weight in enumerate(num_weights):\n total_weights += num_weight\n total_weights_pruned += int(num_weight*sparsities[idx])\n if total_weights_pruned == 0:\n return None\n scale = target_sparsity / (total_weights_pruned/total_weights)\n\n # rescale the sparsities\n sparsities = np.asarray(sparsities)*scale\n\n return sparsities\n\n def _init_sparsities(self):\n '''\n Generate a sorted sparsities vector\n '''\n # repeatedly generate a distribution until satisfies the overall sparsity requirement\n _logger.info('Gererating sparsities...')\n while True:\n sparsities = sorted(np.random.uniform(\n 0, 1, len(self.get_modules_wrapper())))\n\n sparsities = self._rescale_sparsities(\n sparsities, target_sparsity=self._sparsity)\n\n if sparsities is not None and sparsities[0] >= 0 and sparsities[-1] < 1:\n _logger.info('Initial sparsities generated : %s', sparsities)\n self._sparsities = sparsities\n break\n\n def _generate_perturbations(self):\n '''\n Generate perturbation to the current sparsities distribution.\n\n Returns:\n --------\n list\n perturbated sparsities\n '''\n _logger.info(\"Gererating perturbations to the current sparsities...\")\n\n # decrease magnitude with current temperature\n magnitude = self._current_temperature / \\\n self._start_temperature * self._perturbation_magnitude\n _logger.info('current perturation magnitude:%s', magnitude)\n\n while True:\n perturbation = np.random.uniform(-magnitude, magnitude, len(self.get_modules_wrapper()))\n sparsities = np.clip(0, self._sparsities + perturbation, None)\n _logger.debug(\"sparsities before rescalling:%s\", sparsities)\n\n sparsities = self._rescale_sparsities(sparsities, target_sparsity=self._sparsity)\n _logger.debug(\"sparsities after rescalling:%s\", sparsities)\n\n if sparsities is not None and sparsities[0] >= 0 and sparsities[-1] < 1:\n _logger.info(\"Sparsities perturbated:%s\", sparsities)\n return sparsities\n\n def calc_mask(self, wrapper, **kwargs):\n return None\n\n def compress(self, return_config_list=False):\n \"\"\"\n Compress the model with Simulated Annealing.\n\n Returns\n -------\n torch.nn.Module\n model with specified modules compressed.\n \"\"\"\n _logger.info('Starting Simulated Annealing Compression...')\n\n # initiaze a randomized action\n pruning_iteration = 0\n self._init_sparsities()\n\n # stop condition\n self._current_temperature = self._start_temperature\n while self._current_temperature > self._stop_temperature:\n _logger.info('Pruning iteration: %d', pruning_iteration)\n _logger.info('Current temperature: %d, Stop temperature: %d',\n self._current_temperature, self._stop_temperature)\n while True:\n # generate perturbation\n sparsities_perturbated = self._generate_perturbations()\n config_list = self._sparsities_2_config_list(\n sparsities_perturbated)\n _logger.info(\n \"config_list for Pruner generated: %s\", config_list)\n\n # fast evaluation\n pruner = PRUNER_DICT[self._base_algo](copy.deepcopy(self._model_to_prune), config_list)\n model_masked = pruner.compress()\n evaluation_result = self._evaluator(model_masked)\n\n self._search_history.append(\n {'sparsity': self._sparsity, 'performance': evaluation_result, 'config_list': config_list})\n\n if self._optimize_mode is OptimizeMode.Minimize:\n evaluation_result *= -1\n\n # if better evaluation result, then accept the perturbation\n if evaluation_result > self._current_performance:\n self._current_performance = evaluation_result\n self._sparsities = sparsities_perturbated\n\n # save best performance and best params\n if evaluation_result > self._best_performance:\n _logger.info('updating best model...')\n self._best_performance = evaluation_result\n self._best_config_list = config_list\n\n # save the overall best masked model\n self.bound_model = model_masked\n # the ops with sparsity 0 are not included in this modules_wrapper\n modules_wrapper_final = pruner.get_modules_wrapper()\n break\n # if not, accept with probability e^(-deltaE/current_temperature)\n else:\n delta_E = np.abs(evaluation_result -\n self._current_performance)\n probability = math.exp(-1 * delta_E /\n self._current_temperature)\n if np.random.uniform(0, 1) < probability:\n self._current_performance = evaluation_result\n self._sparsities = sparsities_perturbated\n break\n\n # cool down\n self._current_temperature *= self._cool_down_rate\n pruning_iteration += 1\n\n _logger.info('----------Compression finished--------------')\n _logger.info('Best performance: %s', self._best_performance)\n _logger.info('config_list found : %s',\n self._best_config_list)\n\n # save search history\n with open(os.path.join(self._experiment_data_dir, 'search_history.csv'), 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=['sparsity', 'performance', 'config_list'])\n writer.writeheader()\n for item in self._search_history:\n writer.writerow({'sparsity': item['sparsity'], 'performance': item['performance'], 'config_list': json.dumps(\n item['config_list'])})\n\n # save best config found and best performance\n if self._optimize_mode is OptimizeMode.Minimize:\n self._best_performance *= -1\n with open(os.path.join(self._experiment_data_dir, 'search_result.json'), 'w+') as jsonfile:\n json.dump({\n 'performance': self._best_performance,\n 'config_list': json.dumps(self._best_config_list)\n }, jsonfile)\n\n _logger.info('search history and result saved to foler : %s',\n self._experiment_data_dir)\n\n if return_config_list:\n return self._best_config_list\n\n # This should be done only at the final stage,\n # because the modules_wrapper with all the ops are used during the annealing process\n self.modules_wrapper = modules_wrapper_final\n\n return self.bound_model\n" ]
[ [ "numpy.random.uniform", "numpy.asarray", "numpy.abs", "numpy.clip" ] ]
rjbordon/dataflow-sample-applications
[ "e7a58ac3fc7af9022d0ae61702a5f6043bc4b2db" ]
[ "timeseries-streaming/timeseries-python-applications/MLPipelineExamples/test_pipelines/stream_inference.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import absolute_import\n\nimport logging\nimport sys\n\nimport tensorflow as tf\n\nimport apache_beam as beam\nfrom apache_beam.options.pipeline_options import PipelineOptions\nfrom google.protobuf.json_format import Parse\nfrom tfx_bsl.public.beam import RunInference\nfrom tfx_bsl.public.proto import model_spec_pb2\nfrom timeseries.transforms import process_inference_return\n\n\ndef run(args, pipeline_args):\n \"\"\"\n Run inference pipeline using data generated from streaming pipeline.\n \"\"\"\n pipeline_options = PipelineOptions(\n pipeline_args, save_main_session=True, streaming=True)\n\n with beam.Pipeline(options=pipeline_options) as pipeline:\n _ = (\n pipeline\n | 'ReadTFExample' >> beam.io.gcp.pubsub.ReadStringsFromPubSub(subscription=args.pubsub_subscription)\n | 'ParseExamples' >> beam.Map(lambda x: Parse(x, tf.train.Example()))\n | RunInference(\n model_spec_pb2.InferenceSpecType(\n saved_model_spec=model_spec_pb2.SavedModelSpec(\n signature_name=['serving_default'],\n model_path=args.saved_model_location)))\n | beam.ParDo(process_inference_return.ProcessReturn())\n | beam.ParDo(process_inference_return.CheckAnomalous())\n | beam.ParDo(print))\n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n import argparse\n\n # sys.argv.append(\"--saved_model_location=/tmp/serving_model_dir/\")\n # sys.argv.append(\"--pubsub_subscription=projects/<your-project>/subscriptions/outlier-detection\")\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--pubsub_subscription',\n dest='pubsub_subscription',\n required=True,\n help=\n 'PubSub Subscription of the JSON samples produced by the streaming pipeline')\n parser.add_argument(\n '--saved_model_location',\n dest='saved_model_location',\n required=True,\n help='location of save model to be used with this inference pipeline'\n )\n\n known_args, pipeline_args = parser.parse_known_args()\n\n run(known_args, pipeline_args)\n" ]
[ [ "tensorflow.train.Example" ] ]
hyyh619/ViZDoom-Imitation-Defend-Center
[ "952ab8f923ef8e13b9a7dd9a935effeb1af42267" ]
[ "c51_ddqn.py" ]
[ "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport skimage as skimage\nfrom skimage import transform, color, exposure, io\nfrom skimage.viewer import ImageViewer\nimport random\nfrom random import choice\nimport numpy as np\nfrom collections import deque\nimport time\nimport math\nimport os\nimport pandas as pd\nimport cv2\nimport csv\nfrom PIL import Image\n\nimport json\nimport keras\nfrom keras.models import model_from_json\nfrom keras.models import Sequential, load_model, Model\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, Dense, Flatten, merge, MaxPooling2D, Input, AveragePooling2D, Lambda, Activation, Embedding\nfrom keras.optimizers import SGD, Adam, rmsprop\nfrom keras import backend as K\nfrom keras.utils import np_utils\nfrom keras.preprocessing.image import array_to_img, img_to_array\n\nfrom vizdoom import DoomGame, ScreenResolution\nfrom vizdoom import *\nimport itertools as it\nfrom time import sleep\nimport tensorflow as tf\n\nfrom networks import Networks\n\n#DEATHMATCH_ACTION5_NAME = [\n# \"ATTACK\",\n# \"MOVE_FORWARD\",\n# \"MOVE_BACKWARD\",\n# \"TURN_LEFT\",\n# \"TURN_RIGHT\"\n#]\n\nDEATHMATCH_ACTION5_NAME = [\n \"MOVE_LEFT\",\n \"MOVE_RIGHT\",\n \"ATTACK\",\n \"MOVE_FORWARD\",\n \"MOVE_BACKWARD\",\n \"TURN_LEFT\",\n \"TURN_RIGHT\"\n]\n\ndef preprocessImg(img, size):\n \n img = np.rollaxis(img, 0, 3) # It becomes (640, 480, 3)\n img = skimage.transform.resize(img, size, mode='constant')\n img = skimage.color.rgb2gray(img)\n\n return img\n\ndef ResizeImg(img, size):\n img = np.rollaxis(img, 0, 3) # It becomes (640, 480, 3)\n img = skimage.transform.resize(img, size, mode='constant')\n return img\n\nbTrain = True\nbUseImitation = False\nbRecordSamples = False\nnMaxSamples = 1000\nnSamples = 0\ngameCfg = \"./scenarios/deathmatch_7action.cfg\"\n\n# This is for saving model of imitation learning.\nmodel_path = \"../ViZDoom-models/CarCloneModel-deathmatch-50000-epoch10-5action-256x256-modify1/\"\n\nclass CNNAction:\n def __init__(self, gameName):\n model_json = model_path + \"test_model.json\"\n model_h5 = model_path + \"test_model.h5\"\n with open(model_json, 'r') as jfile:\n self.model = model_from_json(json.load(jfile))\n\n self.model.compile(\"adam\", \"categorical_crossentropy\")\n self.model.load_weights(model_h5)\n self.imgList = []\n self.model.summary()\n\n self.w1 = 256\n self.h1 = 256\n self.inputW = 128\n self.inputH = 128\n\n self.frame_per_action = 4\n self.epsilon = 1.0\n self.initial_epsilon = 1.0\n self.final_epsilon = 0.0001\n self.observe = 2000\n\n # Performance Statistics\n self.stats_window_size = 50 # window size for computing rolling statistics\n self.mavg_score = [] # Moving Average of Survival Time\n self.var_score = [] # Variance of Survival Time\n self.mavg_ammo_left = [] # Moving Average of Ammo used\n self.mavg_kill_counts = [] # Moving Average of Kill Counts\n \n # sample picture number\n dataPath = \"ImitationData/\" + gameName\n if not os.path.exists(dataPath):\n os.mkdir(dataPath)\n imgPath = dataPath + \"/img\"\n if not os.path.exists(imgPath):\n os.mkdir(imgPath)\n self.sampleNum = 0\n self.imgPath = imgPath\n self.dataPath = dataPath\n self.cvsPath = dataPath + \"/test.csv\"\n self.sampleCSVFile = open(self.cvsPath, \"w\")\n self.sampleCSVWriter = csv.writer(self.sampleCSVFile)\n self.sampleCSVWriter.writerow([\"name\", \"action\", \"action_name\"])\n \n def GenerateSamples(self, screen, action):\n self.sampleNum = self.sampleNum + 1\n t = time.time()\n now = int(round(t*1000))\n timeStr = time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(now/1000))\n savedFileName = \"%s/doom-%s-%d.jpg\" % (self.imgPath, timeStr, self.sampleNum)\n self.sampleCSVWriter.writerow([savedFileName, action, DEATHMATCH_ACTION5_NAME[action]])\n self.sampleCSVFile.flush()\n\n # skimage.io.imsave(\"hy.jpg\", screen.transpose(1, 2, 0))\n dst = ResizeImg(screen, (256, 256))\n skimage.io.imsave(savedFileName, dst)\n return\n\n def next_action(self, state, save_graph=False):\n action_id = self.f_eval(state)\n return action_id\n\n def reset(self):\n pass\n # prev_state is only used for evaluation, so has a batch size of 1\n # self.prev_state = self.init_state_e\n\n def prepare_f_eval_args(self, state):\n \"\"\"\n Prepare inputs for evaluation.\n \"\"\"\n screen = np.float32(state)\n return screen\n\n def f_eval(self, state):\n screen = self.prepare_f_eval_args(state)\n img = screen\n # print (img.shape)\n\n img = cv2.resize(img.transpose(1, 2, 0), (self.w1, self.h1), interpolation=cv2.INTER_AREA)\n self.imgList.append(img)\n# if len(self.imgList) < 4:\n# return 0\n\n# img1Int = self.imgList[0].transpose(2, 1, 0).astype(int)\n\n img1 = array_to_img(self.imgList[0].astype(int))\n# img2 = array_to_img(self.imgList[1].astype(int))\n# img3 = array_to_img(self.imgList[2].astype(int))\n# img4 = array_to_img(self.imgList[3].astype(int))\n\n w = self.w1\n h = self.h1\n merge_img = Image.new('RGB', (w, h), 0xffffff)\n merge_img.paste(img1, (0, 0))\n# merge_img.paste(img2, (w, 0))\n# merge_img.paste(img3, (0, h))\n# merge_img.paste(img4, (w, h))\n merge_img.save(\"hy.jpg\")\n\n merge_img = merge_img.resize((self.inputW, self.inputH))\n\n img5 = img_to_array(merge_img).transpose(0, 1, 2)\n img5 = img5.astype(\"float32\")\n img5 = (img5 * (1. / 255)) - 0.5\n imgs = img5[None, :, :, :]\n # print (imgs.shape)\n\n action_id = self.model.predict(imgs, batch_size=1)\n action_list = np.argsort(-action_id, axis=1)\n\n self.imgList.pop(0)\n return int(action_list[0][0])\n\nclass C51Agent:\n\n def __init__(self, state_size, action_size, num_atoms, gameName):\n\n # get size of state and action\n self.state_size = state_size\n self.action_size = action_size\n\n # these is hyper parameters for the DQN\n self.gamma = 0.99\n self.learning_rate = 0.0001\n self.epsilon = 1.0\n self.initial_epsilon = 1.0\n self.final_epsilon = 0.0001\n self.batch_size = 32\n self.observe = 2000\n self.explore = 100000 # orig: 50000\n self.frame_per_action = 4\n self.update_target_freq = 3000\n self.timestep_per_train = 100 # Number of timesteps between training interval\n\n # Initialize Atoms\n self.num_atoms = num_atoms # 51 for C51\n self.v_max = 30 # Max possible score for Defend the center is 26 - 0.1*26 = 23.4\n self.v_min = -10 # -0.1*26 - 1 = -3.6\n self.delta_z = (self.v_max - self.v_min) / float(self.num_atoms - 1)\n self.z = [self.v_min + i * self.delta_z for i in range(self.num_atoms)]\n\n # Create replay memory using deque\n self.memory = deque()\n self.max_memory = 100000 # orig: 50000 # number of previous transitions to remember\n\n # Models for value distribution\n self.model = None\n self.target_model = None\n\n # Performance Statistics\n self.stats_window_size = 50 # window size for computing rolling statistics\n self.mavg_score = [] # Moving Average of Survival Time\n self.var_score = [] # Variance of Survival Time\n self.mavg_ammo_left = [] # Moving Average of Ammo used\n self.mavg_kill_counts = [] # Moving Average of Kill Counts\n\n # sample picture number\n dataPath = \"ImitationData/\" + gameName\n if not os.path.exists(dataPath):\n os.mkdir(dataPath)\n imgPath = dataPath + \"/img\"\n if not os.path.exists(imgPath):\n os.mkdir(imgPath)\n self.sampleNum = 0\n self.imgPath = imgPath\n self.dataPath = dataPath\n self.cvsPath = dataPath + \"/test.csv\"\n self.sampleCSVFile = open(self.cvsPath, \"w\")\n self.sampleCSVWriter = csv.writer(self.sampleCSVFile)\n self.sampleCSVWriter.writerow([\"name\", \"action\", \"action_name\"])\n\n def update_target_model(self):\n \"\"\"\n After some time interval update the target model to be same with model\n \"\"\"\n self.target_model.set_weights(self.model.get_weights())\n\n def GenerateSamples(self, screen, action):\n self.sampleNum = self.sampleNum + 1\n t = time.time()\n now = int(round(t*1000))\n timeStr = time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(now/1000))\n savedFileName = \"%s/doom-%s-%d.jpg\" % (self.imgPath, timeStr, self.sampleNum)\n self.sampleCSVWriter.writerow([savedFileName, action, DEATHMATCH_ACTION5_NAME[action]])\n self.sampleCSVFile.flush()\n\n # skimage.io.imsave(\"hy.jpg\", screen.transpose(1, 2, 0))\n dst = ResizeImg(screen, (256, 256))\n skimage.io.imsave(savedFileName, dst)\n return\n\n def get_action(self, state, bTrain=True):\n \"\"\"\n Get action from model using epsilon-greedy policy\n \"\"\"\n if bTrain:\n if np.random.rand() <= self.epsilon:\n action_idx = random.randrange(self.action_size)\n else:\n action_idx = self.get_optimal_action(state)\n else:\n action_idx = self.get_optimal_action(state)\n\n return action_idx\n\n def get_optimal_action(self, state):\n \"\"\"Get optimal action for a state\n \"\"\"\n z = self.model.predict(state) # Return a list [1x51, 1x51, 1x51]\n\n z_concat = np.vstack(z)\n q = np.sum(np.multiply(z_concat, np.array(self.z)), axis=1) \n\n # Pick action with the biggest Q value\n action_idx = np.argmax(q)\n\n return action_idx\n\n def shape_reward(self, r_t, misc, prev_misc, t):\n\n # Check any kill count orig reward:\n # if (misc[0] > prev_misc[0]):\n # r_t = r_t + 1\n\n # if (misc[1] < prev_misc[1]): # Use ammo\n # r_t = r_t - 0.1\n\n # if (misc[2] < prev_misc[2]): # Loss HEALTH\n # r_t = r_t - 0.1\n\n # hy modify\n if (misc[0] > prev_misc[0]): # kill\n r_t = r_t + 1\n\n if (misc[1] < prev_misc[1]): # Use ammo\n r_t = r_t - 0.2\n\n if (misc[2] < prev_misc[2]): # Loss HEALTH\n r_t = r_t - 0.1\n\n return r_t\n\n # save sample <s,a,r,s'> to the replay memory\n def replay_memory(self, s_t, action_idx, r_t, s_t1, is_terminated, t):\n self.memory.append((s_t, action_idx, r_t, s_t1, is_terminated))\n if self.epsilon > self.final_epsilon and t > self.observe:\n self.epsilon -= (self.initial_epsilon - self.final_epsilon) / self.explore\n\n if len(self.memory) > self.max_memory:\n self.memory.popleft()\n\n # Update the target model to be same with model\n if t % self.update_target_freq == 0:\n self.update_target_model()\n\n # pick samples randomly from replay memory (with batch_size)\n def train_replay(self):\n\n num_samples = min(self.batch_size * self.timestep_per_train, len(self.memory))\n replay_samples = random.sample(self.memory, num_samples)\n\n state_inputs = np.zeros(((num_samples,) + self.state_size)) \n next_states = np.zeros(((num_samples,) + self.state_size)) \n m_prob = [np.zeros((num_samples, self.num_atoms)) for i in range(action_size)]\n action, reward, done = [], [], []\n\n for i in range(num_samples):\n state_inputs[i,:,:,:] = replay_samples[i][0]\n action.append(replay_samples[i][1])\n reward.append(replay_samples[i][2])\n next_states[i,:,:,:] = replay_samples[i][3]\n done.append(replay_samples[i][4])\n\n z = self.model.predict(next_states) # Return a list [32x51, 32x51, 32x51]\n z_ = self.model.predict(next_states) # Return a list [32x51, 32x51, 32x51]\n\n # Get Optimal Actions for the next states (from distribution z)\n optimal_action_idxs = []\n z_concat = np.vstack(z)\n q = np.sum(np.multiply(z_concat, np.array(self.z)), axis=1) # length (num_atoms x num_actions)\n q = q.reshape((num_samples, action_size), order='F')\n optimal_action_idxs = np.argmax(q, axis=1)\n\n # Project Next State Value Distribution (of optimal action) to Current State\n for i in range(num_samples):\n if done[i]: # Terminal State\n # Distribution collapses to a single point\n Tz = min(self.v_max, max(self.v_min, reward[i]))\n bj = (Tz - self.v_min) / self.delta_z \n m_l, m_u = math.floor(bj), math.ceil(bj)\n m_prob[action[i]][i][int(m_l)] += (m_u - bj)\n m_prob[action[i]][i][int(m_u)] += (bj - m_l)\n else:\n for j in range(self.num_atoms):\n Tz = min(self.v_max, max(self.v_min, reward[i] + self.gamma * self.z[j]))\n bj = (Tz - self.v_min) / self.delta_z \n m_l, m_u = math.floor(bj), math.ceil(bj)\n m_prob[action[i]][i][int(m_l)] += z_[optimal_action_idxs[i]][i][j] * (m_u - bj)\n m_prob[action[i]][i][int(m_u)] += z_[optimal_action_idxs[i]][i][j] * (bj - m_l)\n\n loss = self.model.fit(state_inputs, m_prob, batch_size=self.batch_size, epochs=1, verbose=0)\n\n return loss.history['loss']\n\n # load the saved model\n def load_model(self, name):\n self.model.load_weights(name)\n\n # save the model which is under training\n def save_model(self, name):\n self.model.save_weights(name)\n\nif __name__ == \"__main__\":\n gameCfgFile = os.path.basename(gameCfg)\n gameName, extension = os.path.splitext(gameCfgFile)\n\n # Avoid Tensorflow eats up GPU memory\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n K.set_session(sess)\n\n game = DoomGame()\n game.load_config(gameCfg)\n game.set_sound_enabled(False)\n game.set_screen_resolution(ScreenResolution.RES_640X480)\n game.set_window_visible(True)\n game.init()\n\n game.new_episode()\n game_state = game.get_state()\n misc = game_state.game_variables # [KILLCOUNT, AMMO, HEALTH]\n prev_misc = misc\n\n action_size = game.get_available_buttons_size()\n\n img_rows, img_cols = 64, 64\n # Convert image into Black and white\n img_channels = 4 # We stack 4 frames\n\n # C51\n num_atoms = 51\n\n state_size = (img_rows, img_cols, img_channels)\n\n if bUseImitation:\n agent = CNNAction(gameName)\n else:\n agent = C51Agent(state_size, action_size, num_atoms, gameName)\n\n agent.model = Networks.value_distribution_network(state_size, num_atoms, action_size, agent.learning_rate)\n agent.target_model = Networks.value_distribution_network(state_size, num_atoms, action_size, agent.learning_rate)\n\n if not bTrain:\n file = \"./models/\" + \"c51_ddqn_\" + gameName + \".h5\"\n agent.load_model(file)\n\n # Start training\n epsilon = agent.initial_epsilon\n GAME = 0\n t = 0\n max_life = 0 # Maximum episode life (Proxy for agent performance)\n life = 0\n\n x_t = game_state.screen_buffer # 480 x 640\n x_t = preprocessImg(x_t, size=(img_rows, img_cols))\n s_t = np.stack(([x_t]*4), axis=2) # It becomes 64x64x4\n s_t = np.expand_dims(s_t, axis=0) # 1x64x64x4\n\n is_terminated = game.is_episode_finished()\n\n # Buffer to compute rolling statistics\n life_buffer, ammo_buffer, kills_buffer = [], [], []\n\n while not game.is_episode_finished():\n\n loss = 0\n r_t = 0\n a_t = np.zeros([action_size])\n\n # Epsilon Greedy\n if bUseImitation:\n action_idx = agent.next_action(game_state.screen_buffer)\n else:\n action_idx = agent.get_action(s_t, bTrain)\n \n if not bTrain and bRecordSamples:\n agent.GenerateSamples(game_state.screen_buffer, action_idx)\n nSamples += 1\n if nSamples > nMaxSamples:\n break\n\n a_t[action_idx] = 1\n\n a_t = a_t.astype(int)\n game.set_action(a_t.tolist())\n skiprate = agent.frame_per_action\n game.advance_action(skiprate)\n\n game_state = game.get_state() # Observe again after we take the action\n is_terminated = game.is_episode_finished()\n\n r_t = game.get_last_reward() # each frame we get reward of 0.1, so 4 frames will be 0.4\n\n if (is_terminated):\n if (life > max_life):\n max_life = life\n GAME += 1\n life_buffer.append(life)\n ammo_buffer.append(misc[1])\n kills_buffer.append(misc[0])\n # print (\"Episode Finish \", misc)\n print (\"Episode: lifetime(%d) ammo(%d) kills(%d)\" % (life, misc[1], misc[0]))\n game.new_episode()\n game_state = game.get_state()\n misc = game_state.game_variables\n x_t1 = game_state.screen_buffer\n\n x_t1 = game_state.screen_buffer\n misc = game_state.game_variables\n\n x_t1 = preprocessImg(x_t1, size=(img_rows, img_cols))\n x_t1 = np.reshape(x_t1, (1, img_rows, img_cols, 1))\n s_t1 = np.append(x_t1, s_t[:, :, :, :3], axis=3)\n\n if bUseImitation:\n r_t = 0\n else:\n r_t = agent.shape_reward(r_t, misc, prev_misc, t)\n\n if (is_terminated):\n life = 0\n else:\n life += 1\n\n # update the cache\n prev_misc = misc\n\n if not bUseImitation:\n if bTrain:\n # save the sample <s, a, r, s'> to the replay memory and decrease epsilon\n agent.replay_memory(s_t, action_idx, r_t, s_t1, is_terminated, t)\n\n # Do the training\n if t > agent.observe and t % agent.timestep_per_train == 0:\n loss = agent.train_replay()\n else:\n sleep(0.01)\n\n s_t = s_t1\n t += 1\n\n # save progress every 10000 iterations\n if not bUseImitation:\n if t % 10000 == 0 and bTrain:\n file = \"./models/\" + \"c51_ddqn_\" + gameName + \".h5\"\n print(\"Now we save model: %s\" %(file))\n agent.model.save_weights(file, overwrite=True)\n\n # print info\n state = \"\"\n if t <= agent.observe:\n state = \"observe\"\n elif t > agent.observe and t <= agent.observe + agent.explore:\n state = \"explore\"\n else:\n state = \"train\"\n else:\n state = \"observe\"\n\n if (is_terminated):\n if bUseImitation:\n print(\"TIME\", t, \"/ GAME\", GAME, \"/ ACTION\", action_idx,\n \"/ LIFE\", max_life, \"/ LOSS\", loss)\n else:\n print(\"TIME\", t, \"/ GAME\", GAME, \"/ STATE\", state,\n \"/ EPSILON\", agent.epsilon, \"/ ACTION\", action_idx, \n \"/ REWARD\", r_t, \"/ LIFE\", max_life, \"/ LOSS\", loss)\n \n # Training times.\n if GAME > 5000:\n break\n\n # Save Agent's Performance Statistics\n if bUseImitation:\n if GAME % agent.stats_window_size == 0:\n print(\"Update Rolling Statistics\")\n agent.mavg_score.append(np.mean(np.array(life_buffer)))\n agent.var_score.append(np.var(np.array(life_buffer)))\n agent.mavg_ammo_left.append(np.mean(np.array(ammo_buffer)))\n agent.mavg_kill_counts.append(np.mean(np.array(kills_buffer)))\n\n # Reset rolling stats buffer\n life_buffer, ammo_buffer, kills_buffer = [], [], [] \n\n # Write Rolling Statistics to file\n with open(\"statistics/imitation_stats.txt\", \"w\") as stats_file:\n stats_file.write('Game: ' + str(GAME) + '\\n')\n stats_file.write('Max Score: ' + str(max_life) + '\\n')\n stats_file.write('mavg_score: ' + str(agent.mavg_score) + '\\n')\n stats_file.write('var_score: ' + str(agent.var_score) + '\\n')\n stats_file.write('mavg_ammo_left: ' + str(agent.mavg_ammo_left) + '\\n')\n stats_file.write('mavg_kill_counts: ' + str(agent.mavg_kill_counts) + '\\n')\n else:\n if GAME % agent.stats_window_size == 0 and t > agent.observe:\n print(\"Update Rolling Statistics\")\n agent.mavg_score.append(np.mean(np.array(life_buffer)))\n agent.var_score.append(np.var(np.array(life_buffer)))\n agent.mavg_ammo_left.append(np.mean(np.array(ammo_buffer)))\n agent.mavg_kill_counts.append(np.mean(np.array(kills_buffer)))\n\n # Reset rolling stats buffer\n life_buffer, ammo_buffer, kills_buffer = [], [], [] \n\n # Write Rolling Statistics to file\n file = \"./statistics/\" + \"c51_ddqn_stats_\" + gameName + \".txt\"\n with open(file, \"w\") as stats_file:\n stats_file.write('Game: ' + str(GAME) + '\\n')\n stats_file.write('Max Score: ' + str(max_life) + '\\n')\n stats_file.write('mavg_score: ' + str(agent.mavg_score) + '\\n')\n stats_file.write('var_score: ' + str(agent.var_score) + '\\n')\n stats_file.write('mavg_ammo_left: ' + str(agent.mavg_ammo_left) + '\\n')\n stats_file.write('mavg_kill_counts: ' + str(agent.mavg_kill_counts) + '\\n')\n\n" ]
[ [ "numpy.array", "numpy.random.rand", "numpy.reshape", "numpy.zeros", "numpy.rollaxis", "tensorflow.Session", "tensorflow.ConfigProto", "numpy.float32", "numpy.stack", "numpy.vstack", "numpy.argmax", "numpy.argsort", "numpy.append", "numpy.expand_dims" ] ]
tsmbland/andi_challenge
[ "18e6e1420d269066b3a7646e1525f017026edf4c" ]
[ "Task1_Exponent/Train/2D.py" ]
[ "import os\nimport sys\n\nsys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../..')\n\nfrom andi_funcs import TrackGeneratorRegression, import_tracks, import_labels, package_tracks\nfrom models import regression_model_2d\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.models import load_model\nimport numpy as np\n\n# Load validation data\ntracks_val = package_tracks(import_tracks('../../Datasets/Validation/task1.txt')[1], dimensions=2, max_T=1001)\nexponents_val = import_labels('../../Datasets/Validation/ref1.txt')[1]\ntracks_test = package_tracks(import_tracks('../../Datasets/Test/task1.txt')[1], dimensions=2, max_T=1001)\n\n# Run model\nmodel = regression_model_2d()\nmodel.compile(optimizer=Adam(learning_rate=0.001), loss='mse', metrics=['mae'])\nmodel.summary()\nhistory = model.fit(TrackGeneratorRegression(batches=200, batch_size=32, dimensions=2, min_T=5, max_T=1001),\n epochs=200,\n callbacks=[\n ModelCheckpoint(filepath='../Models/2D.h5', monitor='val_mae', save_best_only=True,\n mode='min')],\n validation_data=(tracks_val, exponents_val), use_multiprocessing=True, workers=16)\n\n# Save performance metrics\nnp.savetxt('2D_mae.txt', history.history['mae'])\nnp.savetxt('2D_val_mae.txt', history.history['val_mae'])\n\n# Evaluate on test data\nmodel = load_model('../Models/2D.h5')\nnp.savetxt('../../Datasets/Test/predictions_task1_2D.txt', model.predict(tracks_test, use_multiprocessing=True))\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.savetxt", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.callbacks.ModelCheckpoint" ] ]
MingyuLi19910814/machine_learning_from_scratch
[ "e6fa52045e9cdcf38fe4c8c5f5577c3dade71281" ]
[ "supervised/Classifier/RandomForest.py" ]
[ "import numpy as np\nfrom DecisionTree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom collections import Counter\nfrom Evaluate import evaluate_classifier\n\n\nclass RandomForest:\n def __init__(self, n_estimators, max_depth, max_features):\n self.n_estimators = n_estimators\n self.trees = [DecisionTreeClassifier(max_depth, loss='gini', max_features=max_features) for _ in range(self.n_estimators)]\n def fit(self, X, Y):\n for i in range(self.n_estimators):\n idx = np.random.choice(X.shape[0], X.shape[0], replace=True)\n self.trees[i].fit(X[idx], Y[idx])\n\n def predict(self, X):\n ans = np.zeros(X.shape[0])\n for idx, x in enumerate(X):\n results = [tree.predict(x.reshape((1, -1)))[0] for tree in self.trees]\n ans[idx] = Counter(results).most_common(1)[0][0]\n return ans\n\n\nif __name__ == \"__main__\":\n n_estimators = 5\n max_depth = 5\n cls1 = RandomForest(n_estimators, max_depth, max_features=None)\n cls2 = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, max_features=None)\n evaluate_classifier(cls1, cls2)" ]
[ [ "sklearn.ensemble.RandomForestClassifier", "numpy.random.choice", "numpy.zeros" ] ]
paul-cvp/e2ecorefpytorch
[ "51cb274f5916867b0a6d76314391e0843a541a27" ]
[ "allen/allen_self_attentive_span_extractor.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom overrides import overrides\n\nfrom allen.allen_params import Params\nfrom allen.allen_time_distributed import TimeDistributed\nimport allen.allen_util as util\n\n\nclass SelfAttentiveSpanExtractor(nn.Module):\n \"\"\"\n Computes span representations by generating an unnormalized attention score for each\n word in the document. Spans representations are computed with respect to these\n scores by normalising the attention scores for words inside the span.\n\n Given these attention distributions over every span, this module weights the\n corresponding vector representations of the words in the span by this distribution,\n returning a weighted representation of each span.\n\n Parameters\n ----------\n input_dim : ``int``, required.\n The final dimension of the ``sequence_tensor``.\n\n Returns\n -------\n attended_text_embeddings : ``torch.FloatTensor``.\n A tensor of shape (batch_size, num_spans, input_dim), which each span representation\n is formed by locally normalising a global attention over the sequence. The only way\n in which the attention distribution differs over different spans is in the set of words\n over which they are normalized.\n \"\"\"\n def __init__(self,\n input_dim: int) -> None:\n super().__init__()\n self._input_dim = input_dim\n self._global_attention = TimeDistributed(torch.nn.Linear(input_dim, 1))\n\n def get_input_dim(self) -> int:\n return self._input_dim\n\n def get_output_dim(self) -> int:\n return self._input_dim\n\n @overrides\n def forward(self,\n sequence_tensor: torch.FloatTensor,\n span_starts: torch.LongTensor,\n span_ends: torch.LongTensor,\n sequence_mask: torch.LongTensor = None,\n span_indices_mask: torch.LongTensor = None) -> torch.FloatTensor:\n sequence_tensor = sequence_tensor.unsqueeze(0)\n # both of shape (batch_size, num_spans, 1)\n span_starts = span_starts.unsqueeze(0).unsqueeze(-1)\n span_ends = span_ends.unsqueeze(0).unsqueeze(-1)\n if sequence_mask is not None:\n sequence_mask = sequence_mask.unsqueeze(0)\n if span_indices_mask is not None:\n span_indices_mask = span_indices_mask.unsqueeze(0)\n\n # shape (batch_size, num_spans, 1)\n # These span widths are off by 1, because the span ends are `inclusive`.\n span_widths = span_ends - span_starts\n\n # We need to know the maximum span width so we can\n # generate indices to extract the spans from the sequence tensor.\n # These indices will then get masked below, such that if the length\n # of a given span is smaller than the max, the rest of the values\n # are masked.\n max_batch_span_width = int(span_widths.max().data) + 1\n\n # shape (batch_size, sequence_length, 1)\n global_attention_logits = self._global_attention(sequence_tensor)\n\n # Shape: (1, 1, max_batch_span_width)\n max_span_range_indices = util.get_range_vector(max_batch_span_width,\n util.get_device_of(sequence_tensor)).view(1, 1, -1)\n # Shape: (batch_size, num_spans, max_batch_span_width)\n # This is a broadcasted comparison - for each span we are considering,\n # we are creating a range vector of size max_span_width, but masking values\n # which are greater than the actual length of the span.\n #\n # We're using <= here (and for the mask below) because the span ends are\n # inclusive, so we want to include indices which are equal to span_widths rather\n # than using it as a non-inclusive upper bound.\n span_mask = (max_span_range_indices <= span_widths).float()\n raw_span_indices = span_ends - max_span_range_indices\n # We also don't want to include span indices which are less than zero,\n # which happens because some spans near the beginning of the sequence\n # have an end index < max_batch_span_width, so we add this to the mask here.\n span_mask = span_mask * (raw_span_indices >= 0).float()\n span_indices = torch.nn.functional.relu(raw_span_indices.float()).long()\n\n # Shape: (batch_size * num_spans * max_batch_span_width)\n flat_span_indices = util.flatten_and_batch_shift_indices(span_indices, sequence_tensor.size(1))\n\n # Shape: (batch_size, num_spans, max_batch_span_width, embedding_dim)\n span_embeddings = util.batched_index_select(sequence_tensor, span_indices, flat_span_indices)\n\n # Shape: (batch_size, num_spans, max_batch_span_width)\n span_attention_logits = util.batched_index_select(global_attention_logits,\n span_indices,\n flat_span_indices).squeeze(-1)\n # Shape: (batch_size, num_spans, max_batch_span_width)\n span_attention_weights = util.last_dim_softmax(span_attention_logits, span_mask)\n\n # Do a weighted sum of the embedded spans with\n # respect to the normalised attention distributions.\n # Shape: (batch_size, num_spans, embedding_dim)\n attended_text_embeddings = util.weighted_sum(span_embeddings, span_attention_weights)\n\n if span_indices_mask is not None:\n # Above we were masking the widths of spans with respect to the max\n # span width in the batch. Here we are masking the spans which were\n # originally passed in as padding.\n return attended_text_embeddings * span_indices_mask.unsqueeze(-1).float()\n\n return attended_text_embeddings.squeeze(0)\n" ]
[ [ "torch.nn.Linear" ] ]
wangstone666/mmdetection
[ "5ce2f219c1cedd6ccd1e531232341497c893d7fe" ]
[ "mmdet/apis/inference.py" ]
[ "import warnings\n\nimport matplotlib.pyplot as plt\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.ops import RoIPool\nfrom mmcv.parallel import collate, scatter\nfrom mmcv.runner import load_checkpoint\n\nfrom mmdet.core import get_classes\nfrom mmdet.datasets.pipelines import Compose\nfrom mmdet.models import build_detector\n\n\ndef init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None):\n \"\"\"Initialize a detector from config file.\n\n Args:\n config (str or :obj:`mmcv.Config`): Config file path or the config\n object.\n checkpoint (str, optional): Checkpoint path. If left as None, the model\n will not load any weights.\n cfg_options (dict): Options to override some settings in the used\n config.\n\n Returns:\n nn.Module: The constructed detector.\n \"\"\"\n if isinstance(config, str):\n config = mmcv.Config.fromfile(config)\n elif not isinstance(config, mmcv.Config):\n raise TypeError('config must be a filename or Config object, '\n f'but got {type(config)}')\n if cfg_options is not None:\n config.merge_from_dict(cfg_options)\n config.model.pretrained = None\n model = build_detector(config.model, test_cfg=config.test_cfg)\n if checkpoint is not None:\n map_loc = 'cpu' if device == 'cpu' else None\n checkpoint = load_checkpoint(model, checkpoint, map_location=map_loc)\n if 'CLASSES' in checkpoint['meta']:\n model.CLASSES = checkpoint['meta']['CLASSES']\n else:\n warnings.simplefilter('once')\n warnings.warn('Class names are not saved in the checkpoint\\'s '\n 'meta data, use COCO classes by default.')\n model.CLASSES = get_classes('coco')\n model.cfg = config # save the config in the model for convenience\n model.to(device)\n model.eval()\n return model\n\n\nclass LoadImage(object):\n \"\"\"A simple pipeline to load image.\"\"\"\n\n def __call__(self, results):\n \"\"\"Call function to load images into results.\n\n Args:\n results (dict): A result dict contains the file name\n of the image to be read.\n\n Returns:\n dict: ``results`` will be returned containing loaded image.\n \"\"\"\n if isinstance(results['img'], str):\n results['filename'] = results['img']\n results['ori_filename'] = results['img']\n else:\n results['filename'] = None\n results['ori_filename'] = None\n img = mmcv.imread(results['img'])\n results['img'] = img\n results['img_fields'] = ['img']\n results['img_shape'] = img.shape\n results['ori_shape'] = img.shape\n return results\n\n\ndef inference_detector(model, img):\n \"\"\"Inference image(s) with the detector.\n\n Args:\n model (nn.Module): The loaded detector.\n imgs (str/ndarray or list[str/ndarray]): Either image files or loaded\n images.\n\n Returns:\n If imgs is a str, a generator will be returned, otherwise return the\n detection results directly.\n \"\"\"\n cfg = model.cfg\n device = next(model.parameters()).device # model device\n # prepare data\n if isinstance(img, np.ndarray):\n # directly add img\n data = dict(img=img)\n cfg = cfg.copy()\n # set loading pipeline type\n cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'\n else:\n # add information into dict\n data = dict(img_info=dict(filename=img), img_prefix=None)\n # build the data pipeline\n test_pipeline = Compose(cfg.data.test.pipeline)\n data = test_pipeline(data)\n data = collate([data], samples_per_gpu=1)\n if next(model.parameters()).is_cuda:\n # scatter to specified GPU\n data = scatter(data, [device])[0]\n else:\n for m in model.modules():\n assert not isinstance(\n m, RoIPool\n ), 'CPU inference with RoIPool is not supported currently.'\n # just get the actual data from DataContainer\n data['img_metas'] = data['img_metas'][0].data\n\n # forward the model\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)[0]\n return result\n\n\nasync def async_inference_detector(model, img):\n \"\"\"Async inference image(s) with the detector.\n\n Args:\n model (nn.Module): The loaded detector.\n img (str | ndarray): Either image files or loaded images.\n\n Returns:\n Awaitable detection results.\n \"\"\"\n cfg = model.cfg\n device = next(model.parameters()).device # model device\n # prepare data\n if isinstance(img, np.ndarray):\n # directly add img\n data = dict(img=img)\n cfg = cfg.copy()\n # set loading pipeline type\n cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'\n else:\n # add information into dict\n data = dict(img_info=dict(filename=img), img_prefix=None)\n # build the data pipeline\n test_pipeline = Compose(cfg.data.test.pipeline)\n data = test_pipeline(data)\n data = scatter(collate([data], samples_per_gpu=1), [device])[0]\n\n # We don't restore `torch.is_grad_enabled()` value during concurrent\n # inference since execution can overlap\n torch.set_grad_enabled(False)\n result = await model.aforward_test(rescale=True, **data)\n return result\n\n\ndef show_result_pyplot(model,\n img,\n result,\n score_thr=0.3,\n fig_size=(15, 10),\n title='result',\n block=True,show=False):\n \"\"\"Visualize the detection results on the image.\n\n Args:\n model (nn.Module): The loaded detector.\n img (str or np.ndarray): Image filename or loaded image.\n result (tuple[list] or list): The detection result, can be either\n (bbox, segm) or just bbox.\n score_thr (float): The threshold to visualize the bboxes and masks.\n fig_size (tuple): Figure size of the pyplot figure.\n title (str): Title of the pyplot figure.\n block (bool): Whether to block GUI.\n \"\"\"\n if hasattr(model, 'module'):\n model = model.module\n img = model.show_result(img, result, score_thr=score_thr, show=False)\n if show:\n plt.figure(figsize=fig_size)\n plt.imshow(mmcv.bgr2rgb(img))\n plt.title(title)\n plt.tight_layout()\n plt.show(block=block)\n return img\n" ]
[ [ "torch.no_grad", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.show", "torch.set_grad_enabled" ] ]
shervinazadi/topoGenesis
[ "5c73a5adf4afbda781540c6c08d24e2da62810b8" ]
[ "examples/py/gradient_decent.py" ]
[ "from time import sleep # for delaying between iterations\nimport click # for cleaning the command line\nimport topogenesis as tg\nimport numpy as np\nnp.random.seed(0)\n\n# create a step one moore neighbourhood\ns = tg.create_stencil(\"von_neumann\", 1)\n\n\n# assign the arg-minimum function\ns.function = tg.sfunc.argmin\n\n\"\"\"\nprint(s)\n[[[0 0 0]\n [0 1 0]\n [0 0 0]]\n\n [[0 1 0]\n [1 1 1]\n [0 1 0]]\n\n [[0 0 0]\n [0 1 0]\n [0 0 0]]]\n\"\"\"\n\n# initialize a 2d lattice with random values\nr = np.random.rand(1, 5, 5)\nl_vals = tg.to_lattice(r, [0, 0, 0])\n\n\"\"\"\nprint(l_vals)\n[[[0.5488135 0.71518937 0.60276338 0.54488318 0.4236548 ]\n [0.64589411 0.43758721 0.891773 0.96366276 0.38344152]\n [0.79172504 0.52889492 0.56804456 0.92559664 0.07103606]\n [0.0871293 0.0202184 0.83261985 0.77815675 0.87001215]\n [0.97861834 0.79915856 0.46147936 0.78052918 0.11827443]]]\n\"\"\"\n\n# initialize walkers lattice\nz = np.zeros((1, 5, 5))\nl_walk = tg.to_lattice(z, [0, 0, 0])\nl_walk[0, 2, 2] += 1\n\n\"\"\"\nprint(l_walk)\n[[[0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 1. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]]]\n\"\"\"\n\n# retrieve lattice indices\nl_inds = l_vals.indices\n\n\"\"\"\nprint(l_inds)\n[[[ 0 1 2 3 4]\n [ 5 6 7 8 9]\n [10 11 12 13 14]\n [15 16 17 18 19]\n [20 21 22 23 24]]]\n\"\"\"\n\n# main iteration forloop\nfor i in range(20):\n\n # clear the print console\n click.clear()\n\n # print the state of the lattice\n print(l_walk)\n print(l_vals)\n\n # apply the stencil (function) to the lattice\n local_min_neighbour = l_vals.arg_apply_stencil(\n l_inds, s, border_condition=\"pad_outside\", padding_value=1.0)\n\n # convert the current positions id and selected neighbour id to lattice indices\n old_pos = np.array(np.unravel_index(l_inds[l_walk > 0], l_walk.shape))\n new_pos = np.array(np.unravel_index(\n local_min_neighbour[l_walk > 0], l_walk.shape))\n\n # apply the movements\n l_walk[old_pos[0], old_pos[1], old_pos[2]] -= 1\n l_walk[new_pos[0], new_pos[1], new_pos[2]] += 1\n\n # wait for 0.3 seconds\n sleep(.3)\n" ]
[ [ "numpy.random.seed", "numpy.unravel_index", "numpy.random.rand", "numpy.zeros" ] ]
gml-explore/gradual-ml
[ "cc3b0806498798c394f844980d268a7ceac2228d" ]
[ "src/gradual-ml/numbskull_extend/inference.py" ]
[ "\"\"\"TODO.\"\"\"\n\nfrom __future__ import print_function, absolute_import\nimport numba\nfrom numba import jit\nimport numpy as np\nimport math\n\n\n@jit(nopython=True, cache=True, nogil=True)\ndef gibbsthread(shardID, nshards, var_copy, weight_copy, weight, variable,\n factor, fmap, vmap, factor_index, Z, cstart,\n count, var_value, weight_value, sample_evidence, burnin):\n \"\"\"TODO.\"\"\"\n # Indentify start and end variable\n nvar = variable.shape[0]\n start = (shardID * nvar) // nshards\n end = ((shardID + 1) * nvar) // nshards\n # TODO: give option do not store result, or just store tally\n for var_samp in range(start, end):\n if variable[var_samp][\"isEvidence\"] == 4:\n # This variable is not owned by this machine\n continue\n if variable[var_samp][\"isEvidence\"] == 0 or sample_evidence:\n v = draw_sample(var_samp, var_copy, weight_copy, weight, variable,\n factor, fmap, vmap, factor_index, Z[shardID],\n var_value, weight_value)\n var_value[var_copy][var_samp] = v\n if not burnin:\n if variable[var_samp][\"cardinality\"] == 2:\n count[cstart[var_samp]] += v\n else:\n count[cstart[var_samp] + v] += 1\n\n\n@jit(nopython=True, cache=True, nogil=True)\ndef draw_sample(var_samp, var_copy, weight_copy, weight, variable, factor,\n fmap, vmap, factor_index, Z, var_value, weight_value):\n \"\"\"TODO.\"\"\"\n cardinality = variable[var_samp][\"cardinality\"]\n for value in range(cardinality):\n Z[value] = np.exp(potential(var_samp, value, var_copy, weight_copy,\n weight, variable, factor, fmap,\n vmap, factor_index, var_value,\n weight_value))\n\n for j in range(1, cardinality):\n Z[j] += Z[j - 1]\n\n z = np.random.rand() * Z[cardinality - 1]\n\n return np.argmax(Z[:cardinality] >= z)\n\n\n\n@jit(nopython=True, cache=True, nogil=True)\ndef potential(var_samp, value, var_copy, weight_copy, weight, variable, factor,\n fmap, vmap, factor_index, var_value, weight_value):\n \"\"\"TODO.\"\"\"\n p = 0.0\n varval_off = value\n if variable[var_samp][\"dataType\"] == 0:\n varval_off = 0\n vtf = vmap[variable[var_samp][\"vtf_offset\"] + varval_off]\n start = vtf[\"factor_index_offset\"]\n end = start + vtf[\"factor_index_length\"]\n for k in range(start, end):\n factor_id = factor_index[k]\n if weight[factor[factor_id]['weightId']]['parameterize']:\n a = weight[factor[factor_id]['weightId']]['a']\n b = weight[factor[factor_id]['weightId']]['b']\n x = fmap[factor[factor_id][\"ftv_offset\"]]['x']\n theta = fmap[factor[factor_id][\"ftv_offset\"]]['theta']\n w = weight_value[weight_copy][factor[factor_id][\"weightId\"]] = theta * a * (x-b)\n else:\n w = weight_value[weight_copy][factor[factor_id][\"weightId\"]]\n p += w * eval_factor(factor_id, var_samp, value, var_copy, variable,\n factor, fmap, var_value)\n return p\n\n\nFACTORS = {\n # Factor functions for boolean variables\n \"NOOP\": -1,\n \"IMPLY_NATURAL\": 0,\n \"OR\": 1,\n \"EQUAL\": 3,\n \"AND\": 2,\n \"ISTRUE\": 4,\n \"LINEAR\": 7,\n \"RATIO\": 8,\n \"LOGICAL\": 9,\n \"IMPLY_MLN\": 13,\n\n # Factor functions for categorical variables\n \"AND_CAT\": 12,\n \"OR_CAT\": 14,\n \"EQUAL_CAT_CONST\": 15,\n \"IMPLY_NATURAL_CAT\": 16,\n \"IMPLY_MLN_CAT\": 17,\n\n # Factor functions for generative models for data programming.\n #\n # These functions accept two types of categorical variables:\n #\n # y \\in {1, -1} corresponding to latent labels, and\n # l \\in {1, 0, -1} corresponding to labeling function outputs.\n #\n # The values of y are mapped to Numbskull variables y_index\n # via {-1: 0, 1: 1}, and\n # the values of l are mapped to Numbskull variables l_index\n # via {-1: 0, 0: 1, 1: 2}.\n\n # h(y) := y\n \"DP_GEN_CLASS_PRIOR\": 18,\n\n # h(l) := l\n \"DP_GEN_LF_PRIOR\": 19,\n\n # h(l) := l * l\n \"DP_GEN_LF_PROPENSITY\": 20,\n\n # h(y, l) := y * l\n \"DP_GEN_LF_ACCURACY\": 21,\n\n # h(l) := y * l * l\n \"DP_GEN_LF_CLASS_PROPENSITY\": 22,\n\n # l_2 fixes errors made by l_1\n #\n # h(y, l_1, l_2) := if l_1 == 0 and l_2 != 0: -1,\n # elif l_1 == -1 * y and l_2 == y: 1,\n # else: 0\n \"DP_GEN_DEP_FIXING\": 23,\n\n # l_2 reinforces the output of l_1\n #\n # h(y, l_1, l_2) := if l_1 == 0 and l_2 != 0: -1,\n # elif l_1 == y and l_2 == y: 1,\n # else: 0\n \"DP_GEN_DEP_REINFORCING\": 24,\n\n # h(l_1, l_2) := if l_1 != 0 and l_2 != 0: -1, else: 0\n \"DP_GEN_DEP_EXCLUSIVE\": 25,\n\n #h(l_1, l_2) := if l_1 == l_2: 1, else: 0\n \"DP_GEN_DEP_SIMILAR\": 26,\n\n # Factor functions for distribution\n \"UFO\": 30\n}\n\nfor (key, value) in FACTORS.items():\n exec(\"FUNC_\" + key + \" = \" + str(value))\n\n\n@jit(nopython=True, cache=True, nogil=True)\ndef eval_factor(factor_id, var_samp, value, var_copy, variable, factor, fmap,\n var_value):\n \"\"\"TODO.\"\"\"\n ####################\n # BINARY VARIABLES #\n ####################\n fac = factor[factor_id]\n ftv_start = fac[\"ftv_offset\"]\n ftv_end = ftv_start + fac[\"arity\"]\n\n if fac[\"factorFunction\"] == FUNC_NOOP:\n return 0\n\n elif fac[\"factorFunction\"] == FUNC_IMPLY_NATURAL:\n for l in range(ftv_start, ftv_end):\n v = value if (fmap[l][\"vid\"] == var_samp) else \\\n var_value[var_copy][fmap[l][\"vid\"]]\n if v == 0:\n # Early return if body is not satisfied\n return 0\n\n # If this point is reached, body must be true\n l = ftv_end - 1\n head = value if (fmap[l][\"vid\"] == var_samp) else \\\n var_value[var_copy][fmap[l][\"vid\"]]\n if head:\n return 1\n return -1\n elif factor[factor_id][\"factorFunction\"] == FUNC_OR:\n for l in range(ftv_start, ftv_end):\n v = value if (fmap[l][\"vid\"] == var_samp) else \\\n var_value[var_copy][fmap[l][\"vid\"]]\n if v == 1:\n return 1\n return -1\n elif factor[factor_id][\"factorFunction\"] == FUNC_EQUAL:\n v = value if (fmap[ftv_start][\"vid\"] == var_samp) \\\n else var_value[var_copy][fmap[ftv_start][\"vid\"]]\n for l in range(ftv_start + 1, ftv_end):\n w = value if (fmap[l][\"vid\"] == var_samp) \\\n else var_value[var_copy][fmap[l][\"vid\"]]\n if v != w:\n return -1\n return 1\n elif factor[factor_id][\"factorFunction\"] == FUNC_AND \\\n or factor[factor_id][\"factorFunction\"] == FUNC_ISTRUE:\n for l in range(ftv_start, ftv_end):\n v = value if (fmap[l][\"vid\"] == var_samp) \\\n else var_value[var_copy][fmap[l][\"vid\"]]\n if v == 0:\n return -1\n return 1\n elif factor[factor_id][\"factorFunction\"] == FUNC_LINEAR:\n res = 0\n head = value if (fmap[ftv_end - 1][\"vid\"] == var_samp) \\\n else var_value[var_copy][fmap[ftv_end - 1][\"vid\"]]\n for l in range(ftv_start, ftv_end - 1):\n v = value if (fmap[l][\"vid\"] == var_samp) \\\n else var_value[var_copy][fmap[l][\"vid\"]]\n if v == head:\n res += 1\n # This does not match Dimmwitted, but matches the eq in the paper\n return res\n elif factor[factor_id][\"factorFunction\"] == FUNC_RATIO:\n res = 1\n head = value if (fmap[ftv_end - 1][\"vid\"] == var_samp) \\\n else var_value[var_copy][fmap[ftv_end - 1][\"vid\"]]\n for l in range(ftv_start, ftv_end - 1):\n v = value if (fmap[l][\"vid\"] == var_samp) \\\n else var_value[var_copy][fmap[l][\"vid\"]]\n if v == head:\n res += 1\n # This does not match Dimmwitted, but matches the eq in the paper\n return math.log(res) # TODO: use log2?\n elif factor[factor_id][\"factorFunction\"] == FUNC_LOGICAL:\n head = value if (fmap[ftv_end - 1][\"vid\"] == var_samp) \\\n else var_value[var_copy][fmap[ftv_end - 1][\"vid\"]]\n for l in range(ftv_start, ftv_end - 1):\n v = value if (fmap[l][\"vid\"] == var_samp) \\\n else var_value[var_copy][fmap[l][\"vid\"]]\n if v == head:\n return 1\n return 0\n elif factor[factor_id][\"factorFunction\"] == FUNC_IMPLY_MLN:\n for l in range(ftv_start, ftv_end - 1):\n v = value if (fmap[l][\"vid\"] == var_samp) \\\n else var_value[var_copy][fmap[l][\"vid\"]]\n if v == 0:\n # Early return if body is not satisfied\n return 1\n\n # If this point is reached, body must be true\n l = ftv_end - 1\n head = value if (fmap[l][\"vid\"] == var_samp) \\\n else var_value[var_copy][l]\n if head:\n return 1\n return 0\n\n #########################\n # CATEGORICAL VARIABLES #\n #########################\n elif factor[factor_id][\"factorFunction\"] == FUNC_AND_CAT \\\n or factor[factor_id][\"factorFunction\"] == FUNC_EQUAL_CAT_CONST:\n for l in range(ftv_start, ftv_end):\n v = value if (fmap[l][\"vid\"] == var_samp) \\\n else var_value[var_copy][fmap[l][\"vid\"]]\n if v != fmap[l][\"dense_equal_to\"]:\n return 0\n return 1\n elif factor[factor_id][\"factorFunction\"] == FUNC_OR_CAT:\n for l in range(ftv_start, ftv_end):\n v = value if (fmap[l][\"vid\"] == var_samp) \\\n else var_value[var_copy][fmap[l][\"vid\"]]\n if v == fmap[l][\"dense_equal_to\"]:\n return 1\n return -1\n elif factor[factor_id][\"factorFunction\"] == FUNC_IMPLY_NATURAL_CAT:\n for l in range(ftv_start, ftv_end - 1):\n v = value if (fmap[l][\"vid\"] == var_samp) \\\n else var_value[var_copy][fmap[l][\"vid\"]]\n if v != fmap[l][\"dense_equal_to\"]:\n # Early return if body is not satisfied\n return 0\n\n # If this point is reached, body must be true\n l = ftv_end - 1\n head = value if (fmap[l][\"vid\"] == var_samp) \\\n else var_value[var_copy][l]\n if head == fmap[l][\"dense_equal_to\"]:\n return 1\n return -1\n elif factor[factor_id][\"factorFunction\"] == FUNC_IMPLY_MLN_CAT:\n for l in range(ftv_start, ftv_end - 1):\n v = value if (fmap[l][\"vid\"] == var_samp) \\\n else var_value[var_copy][fmap[l][\"vid\"]]\n if v != fmap[l][\"dense_equal_to\"]:\n # Early return if body is not satisfied\n return 1\n\n # If this point is reached, body must be true\n l = ftv_end - 1\n head = value if (fmap[l][\"vid\"] == var_samp) \\\n else var_value[var_copy][l]\n if head == fmap[l][\"dense_equal_to\"]:\n return 1\n return 0\n\n #####################\n # DATA PROGRAMMING #\n # GENERATIVE MODELS #\n #####################\n elif factor[factor_id][\"factorFunction\"] == FUNC_DP_GEN_CLASS_PRIOR:\n # NB: this doesn't make sense for categoricals\n y_index = value if fmap[ftv_start][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start][\"vid\"]]\n return 1 if y_index == 1 else -1\n elif factor[factor_id][\"factorFunction\"] == FUNC_DP_GEN_LF_PRIOR:\n # NB: this doesn't make sense for categoricals\n l_index = value if fmap[ftv_start][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start][\"vid\"]]\n if l_index == 2:\n return -1\n elif l_index == 0:\n return 0\n else:\n return 1\n elif factor[factor_id][\"factorFunction\"] == FUNC_DP_GEN_LF_PROPENSITY:\n l_index = value if fmap[ftv_start][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start][\"vid\"]]\n abstain = variable[fmap[ftv_start][\"vid\"]][\"cardinality\"] - 1\n return 0 if l_index == abstain else 1\n elif factor[factor_id][\"factorFunction\"] == FUNC_DP_GEN_LF_ACCURACY:\n y_index = value if fmap[ftv_start][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start][\"vid\"]]\n l_index = value if fmap[ftv_start + 1][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start + 1][\"vid\"]]\n abstain = variable[fmap[ftv_start + 1][\"vid\"]][\"cardinality\"] - 1\n if l_index == abstain:\n return 0\n elif y_index == l_index:\n return 1\n else:\n return -1\n elif factor[factor_id][\"factorFunction\"] == \\\n FUNC_DP_GEN_LF_CLASS_PROPENSITY:\n # NB: this doesn't make sense for categoricals\n y_index = value if fmap[ftv_start][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start][\"vid\"]]\n l_index = value if fmap[ftv_start + 1][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start + 1][\"vid\"]]\n abstain = variable[fmap[ftv_start + 1][\"vid\"]][\"cardinality\"] - 1\n if l_index == abstain:\n return 0\n elif y_index == 1:\n return 1\n else:\n return -1\n elif factor[factor_id][\"factorFunction\"] == FUNC_DP_GEN_DEP_FIXING:\n # NB: this doesn't make sense for categoricals\n y_index = value if fmap[ftv_start][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start][\"vid\"]]\n l1_index = value if fmap[ftv_start + 1][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start + 1][\"vid\"]]\n l2_index = value if fmap[ftv_start + 2][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start + 2][\"vid\"]]\n abstain = variable[fmap[ftv_start + 1][\"vid\"]][\"cardinality\"] - 1\n if l1_index == abstain:\n return -1 if l2_index != 1 else 0\n elif l1_index == 0 and l2_index == 1 and y_index == 1:\n return 1\n elif l1_index == 1 and l2_index == 0 and y_index == 0:\n return 1\n else:\n return 0\n elif factor[factor_id][\"factorFunction\"] == FUNC_DP_GEN_DEP_REINFORCING:\n # NB: this doesn't make sense for categoricals\n y_index = value if fmap[ftv_start][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start][\"vid\"]]\n l1_index = value if fmap[ftv_start + 1][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start + 1][\"vid\"]]\n l2_index = value if fmap[ftv_start + 2][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start + 2][\"vid\"]]\n abstain = variable[fmap[ftv_start + 1][\"vid\"]][\"cardinality\"] - 1\n if l1_index == abstain:\n return -1 if l2_index != 1 else 0\n elif l1_index == 0 and l2_index == 0 and y_index == 0:\n return 1\n elif l1_index == 1 and l2_index == 1 and y_index == 1:\n return 1\n else:\n return 0\n elif factor[factor_id][\"factorFunction\"] == FUNC_DP_GEN_DEP_EXCLUSIVE:\n l1_index = value if fmap[ftv_start][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start][\"vid\"]]\n l2_index = value if fmap[ftv_start + 1][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start + 1][\"vid\"]]\n abstain = variable[fmap[ftv_start][\"vid\"]][\"cardinality\"] - 1\n return 0 if l1_index == abstain or l2_index == abstain else -1\n elif factor[factor_id][\"factorFunction\"] == FUNC_DP_GEN_DEP_SIMILAR:\n l1_index = value if fmap[ftv_start][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start][\"vid\"]]\n l2_index = value if fmap[ftv_start + 1][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start + 1][\"vid\"]]\n return 1 if l1_index == l2_index else 0\n\n ###########################################\n # FACTORS FOR OPTIMIZING DISTRIBUTED CODE #\n ###########################################\n elif factor[factor_id][\"factorFunction\"] == FUNC_UFO:\n v = value if fmap[ftv_start][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start][\"vid\"]]\n if v == 0:\n return 0\n\n return value if fmap[ftv_start + v - 1][\"vid\"] == var_samp else \\\n var_value[var_copy][fmap[ftv_start + v - 1][\"vid\"]]\n\n ######################\n # FACTOR NOT DEFINED #\n ######################\n else: # FUNC_UNDEFINED\n print(\"Error: Factor Function\", factor[factor_id][\"factorFunction\"],\n \"( used in factor\", factor_id, \") is not implemented.\")\n raise NotImplementedError(\"Factor function is not implemented.\")\n" ]
[ [ "numpy.argmax", "numpy.random.rand" ] ]
tjuwlz/NER
[ "a84b11650f2ec2d531e5925f5bab0726eb7c7af3" ]
[ "NER_v2/modules/rnn_encoder.py" ]
[ "import torch\nimport torch.nn as nn\n\n\n# 通用的RNN\nclass RNNEncoder(nn.Module):\n def __init__(self, input_size, # 输入的特征维度\n hidden_size, # 隐层特征维度\n num_layers=1, # RNN层数\n batch_first=True, # (batch_size, seq_len, feature_size)\n bidirectional=False, # 是否是双向RNN\n dropout=0.2, # RNN层与层之间是否dropout,\n rnn_type='lstm'\n ):\n super(RNNEncoder, self).__init__()\n\n self._batch_first = batch_first\n self._hidden_size = hidden_size\n self._num_layers = num_layers\n self._bidirectional = bidirectional\n self._drop_out = dropout\n self._num_directions = 2 if bidirectional else 1\n self._rnn_type = rnn_type.upper()\n self._RNNs = ['RNN', 'GRU', 'LSTM']\n assert self._rnn_type in self._RNNs\n\n if self._rnn_type == 'RNN':\n self._rnn_cell = nn.RNNCell\n elif self._rnn_type == 'GRU':\n self._rnn_cell = nn.GRUCell\n elif self._rnn_type == 'LSTM':\n self._rnn_cell = nn.LSTMCell\n\n self.fw_cells = nn.ModuleList()\n self.bw_cells = nn.ModuleList()\n for layer_i in range(num_layers):\n layer_input_size = input_size if layer_i == 0 else hidden_size * self._num_directions\n self.fw_cells.append(self._rnn_cell(layer_input_size, hidden_size))\n if self._bidirectional:\n self.bw_cells.append(self._rnn_cell(layer_input_size, hidden_size))\n\n def _forward(self, cell, inputs, init_hidden, mask):\n '''\n :param inputs: [seq_len, batch_size, input_size]\n :param init_hidden: [batch_size, hidden_size], if it is LSTM, init_hidden is tuple type\n :param mask: [seq_len, batch_size, hidden_size]\n :return: [seq_len, batch_size, hidden_size]\n '''\n seq_len = inputs.shape[0] # inputs.size(0)\n fw_next = init_hidden\n outputs = []\n for xi in range(seq_len):\n if self._rnn_type == 'LSTM':\n # LSTMCell\n # init_hidden: (h0, c0)\n h_next, c_next = cell(inputs[xi], fw_next)\n h_next = h_next * mask[xi] + init_hidden[0] * (1-mask[xi])\n c_next = c_next * mask[xi] + init_hidden[1] * (1-mask[xi])\n fw_next = (h_next, c_next)\n outputs.append(h_next)\n else:\n # RNNCell / GRUCell\n # init_hidden: h0\n fw_next = cell(inputs[xi], fw_next)\n fw_next = fw_next * mask[xi] + init_hidden * (1-mask[xi])\n outputs.append(fw_next)\n\n return torch.stack(tuple(outputs), dim=0), fw_next\n\n def _backward(self, cell, inputs, init_hidden, mask):\n '''\n :param inputs: [seq_len, batch_size, input_size]\n :param init_hidden: [batch_size, hidden_size], if it is LSTM, init_hidden is tuple type\n :param mask: [seq_len, batch_size, hidden_size]\n :return: [seq_len, batch_size, hidden_size]\n '''\n seq_len = inputs.shape[0] # inputs.size(0)\n bw_next = init_hidden\n outputs = []\n for xi in reversed(range(seq_len)):\n if self._rnn_type == 'LSTM':\n # LSTMCell\n # init_hidden: (h0, c0)\n h_next, c_next = cell(inputs[xi], bw_next)\n h_next = h_next * mask[xi] + init_hidden[0] * (1 - mask[xi])\n c_next = c_next * mask[xi] + init_hidden[1] * (1 - mask[xi])\n bw_next = (h_next, c_next)\n outputs.append(h_next)\n else:\n # RNNCell / GRUCell\n # init_hidden: h0\n bw_next = cell(inputs[xi], bw_next)\n bw_next = bw_next * mask[xi] + init_hidden * (1 - mask[xi])\n outputs.append(bw_next)\n outputs.reverse()\n return torch.stack(tuple(outputs), dim=0), bw_next\n\n def _init_hidden(self, batch_size, device=torch.device('cpu')):\n h0 = torch.zeros(batch_size, self._hidden_size, device=device)\n if self._rnn_type == 'LSTM':\n return h0, h0\n else:\n return h0\n\n # 自定义dropout: 所有节点的输出以概率p被置为0\n def _dropout(self, inputs, p=0.5, training=False):\n if training:\n if inputs.dim() == 2: # [batch_size, input_size]\n drop_mask = torch.zeros(inputs.shape, device=inputs.device).fill_(1 - p)\n # 所有元素以概率(1-p)被置成1\n drop_mask = torch.bernoulli(drop_mask) # 两点分布,只返回0或1\n # 因为评估的时候不需要dropout,为了保证期望值一样(保证网络的每一层在训练阶段和测试阶段数据分布相同),因此需要除以(1-p)\n inputs *= drop_mask\n inputs /= (1 - p)\n elif inputs.dim() == 3: # [seq_len, batch_size, input_size]\n drop_mask = torch.zeros((inputs.size(1), inputs.size(2)), device=inputs.device).fill_(1 - p)\n # 所有元素以概率(1-p)被置成1\n drop_mask = torch.bernoulli(drop_mask) # 两点分布,只返回0或1\n # [batch_size, input_size, seq_len] -> [seq_len, batch_size, input_size]\n drop_mask = drop_mask.unsqueeze(-1).expand((-1, -1, inputs.size(0))).permute((2, 0, 1))\n inputs *= drop_mask\n inputs /= (1 - p)\n return inputs\n\n def forward(self, inputs, mask, init_hidden=None):\n '''\n :param inputs: [batch_size, seq_len, input_size]\n :param mask: [batch_size, seq_len]\n :param init_hidden: [batch_size, hidden_size]\n :return:\n out: [batch_size, seq_len, hidden_size * num_directions]\n hidden: [num_layer, batch_size, hidden_size * num_directions]\n '''\n if self._batch_first:\n # [seq_len, batch_size, input_size]\n inputs = inputs.transpose(0, 1)\n # [seq_len, batch_size]\n mask = mask.transpose(0, 1)\n\n # [seq_len, batch_size] -> [seq_len, batch_size, 1]\n # -> [seq_len, batch_size, hidden_size]\n mask = mask.float().unsqueeze(dim=2).expand((-1, -1, self._hidden_size))\n\n batch_size = inputs.shape[1]\n if init_hidden is None:\n init_hidden = self._init_hidden(batch_size, inputs.device)\n\n hn, cn = [], []\n for i in range(self._num_layers):\n if i != 0:\n inputs = self._dropout(inputs, p=self._drop_out, training=self.training)\n # fw_out: [seq_len, batch_size, hidden_size]\n # fw_hidden: (hn, cn) [batch_size, hidden_size]\n fw_out, fw_hidden = self._forward(self.fw_cells[i], inputs, init_hidden, mask)\n bw_out, bw_hidden = None, None\n if self._bidirectional:\n bw_out, bw_hidden = self._backward(self.bw_cells[i], inputs, init_hidden, mask)\n\n if self._rnn_type == 'LSTM':\n hn.append(torch.cat((fw_hidden[0], bw_hidden[0]), dim=1) if self._bidirectional else fw_hidden[0])\n cn.append(torch.cat((fw_hidden[1], bw_hidden[1]), dim=1) if self._bidirectional else bw_hidden[1])\n else:\n # RNN / GRU\n hn.append(torch.cat(fw_hidden, bw_hidden) if self._bidirectional else fw_hidden)\n\n # out: [seq_len, batch_size, hidden_size * num_directions]\n inputs = torch.cat((fw_out, bw_out), dim=2) if self._bidirectional else fw_out\n\n # [batch_size, seq_len, hidden_size * num_directions]\n output = inputs.transpose(0, 1) if self._batch_first else inputs\n\n hn = torch.stack(tuple(hn), dim=0)\n # hn, cn: [num_layer, batch_size, hidden_size * num_directions]\n if self._rnn_type == 'LSTM':\n cn = torch.stack(tuple(cn), dim=0)\n hidden = (hn, cn)\n else:\n hidden = hn\n\n return output, hidden\n" ]
[ [ "torch.zeros", "torch.device", "torch.cat", "torch.nn.ModuleList", "torch.bernoulli" ] ]
BBN-Q/pyqgl2
[ "7acc8b244ee7799c21df03ecff8325e15cdb94d3" ]
[ "src/python/qgl2/basic_sequences/SPAM.py" ]
[ "# Copyright 2016 by Raytheon BBN Technologies Corp. All Rights Reserved.\n\nfrom qgl2.qgl2 import qgl2decl, qreg, qgl2main, pulse, QRegister\n\nfrom qgl2.qgl1 import X, U, Y90, X90, MEAS, Id\n\nfrom qgl2.util import init\n\nfrom itertools import chain\nfrom numpy import pi\n\n@qgl2decl\ndef spam_seqs(angle, qubit: qreg, maxSpamBlocks=10):\n \"\"\" Helper function to create a list of sequences increasing SPAM blocks with a given angle. \"\"\"\n #SPAMBlock = [X(qubit), U(qubit, phase=pi/2+angle), X(qubit), U(qubit, phase=pi/2+angle)]\n #return [[Y90(qubit)] + SPAMBlock*rep + [X90(qubit)] for rep in range(maxSpamBlocks)]\n for rep in range(maxSpamBlocks):\n init(qubit)\n Y90(qubit)\n for _ in range(rep):\n X(qubit)\n U(qubit, phase=pi/2+angle)\n X(qubit)\n U(qubit, phase=pi/2+angle)\n X90(qubit)\n MEAS(qubit)\n\n@qgl2decl\ndef SPAM(qubit: qreg, angleSweep, maxSpamBlocks=10):\n \"\"\"\n X-Y sequence (X-Y-X-Y)**n to determine quadrature angles or mixer correction.\n\n Parameters\n ----------\n qubit : logical channel to implement sequence (LogicalChannel) \n angleSweep : angle shift to sweep over\n maxSpamBlocks : maximum number of XYXY block to do\n \"\"\"\n # Original:\n # def spam_seqs(angle):\n # \"\"\" Helper function to create a list of sequences increasing SPAM blocks with a given angle. \"\"\"\n # SPAMBlock = [X(qubit), U(qubit, phase=pi/2+angle), X(qubit), U(qubit, phase=pi/2+angle)]\n # return [[Y90(qubit)] + SPAMBlock*rep + [X90(qubit)] for rep in range(maxSpamBlocks)]\n\n # # Insert an identity at the start of every set to mark them off\n # seqs = list(chain.from_iterable([[[Id(qubit)]] + spam_seqs(angle) for angle in angleSweep]))\n\n # # Add a final pi for reference\n # seqs.append([X(qubit)])\n\n # # Add the measurment block to every sequence\n # measBlock = MEAS(qubit)\n # for seq in seqs:\n # seq.append(measBlock)\n\n # fileNames = compile_to_hardware(seqs, 'SPAM/SPAM')\n # print(fileNames)\n\n # if showPlot:\n # plot_pulse_files(fileNames)\n\n # Insert an identity at the start of every set to mark them off\n for angle in angleSweep:\n init(qubit)\n Id(qubit)\n MEAS(qubit)\n spam_seqs(angle, qubit, maxSpamBlocks)\n\n # Add a final pi for reference\n init(qubit)\n X(qubit)\n MEAS(qubit)\n\n# compileAndPlot('SPAM/SPAM', showPlot)\n\n# QGL1 function to compile the above QGL2\n# Uses main.py\n# FIXME: Use the same argument parsing as main.py\ndef main():\n from pyqgl2.qreg import QRegister\n import pyqgl2.test_cl\n from pyqgl2.main import compile_function, qgl2_compile_to_hardware\n import numpy as np\n\n toHW = True\n plotPulses = True\n pyqgl2.test_cl.create_default_channelLibrary(toHW, True)\n\n# # To turn on verbose logging in compile_function\n# from pyqgl2.ast_util import NodeError\n# from pyqgl2.debugmsg import DebugMsg\n# NodeError.MUTE_ERR_LEVEL = NodeError.NODE_ERROR_NONE\n# DebugMsg.set_level(0)\n\n # Now compile the QGL2 to produce the function that would generate the expected sequence.\n # Supply the path to the QGL2, the main function in that file, and a list of the args to that function.\n # Can optionally supply saveOutput=True to save the qgl1.py\n # file,\n # and intermediate_output=\"path-to-output-file\" to save\n # intermediate products\n\n # Pass in a QRegister NOT the real Qubit\n q = QRegister(1)\n\n # SPAM(q1, np.linspace(0, pi/2, 11))\n # - test_basic_mins uses np.linspace(0,1,11)\n\n # Here we know the function is in the current file\n # You could use os.path.dirname(os.path.realpath(__file)) to find files relative to this script,\n # Or os.getcwd() to get files relative to where you ran from. Or always use absolute paths.\n resFunction = compile_function(__file__,\n \"SPAM\",\n (q, np.linspace(0, pi/2, 11), 10))\n # Run the QGL2. Note that the generated function takes no arguments itself\n sequences = resFunction()\n if toHW:\n print(\"Compiling sequences to hardware\\n\")\n fileNames = qgl2_compile_to_hardware(sequences, filename='SPAM/SPAM')\n print(f\"Compiled sequences; metafile = {fileNames}\")\n if plotPulses:\n from QGL.PulseSequencePlotter import plot_pulse_files\n # FIXME: As called, this returns a graphical object to display\n plot_pulse_files(fileNames)\n else:\n print(\"\\nGenerated sequences:\\n\")\n from QGL.Scheduler import schedule\n\n scheduled_seq = schedule(sequences)\n from IPython.lib.pretty import pretty\n print(pretty(scheduled_seq))\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.linspace" ] ]
ikibalin/cryspy_editor
[ "dbc84518c8e0de61185f9c66586ccc07af16350c" ]
[ "build/lib/cryspy_editor/widgets/w_pd2d_proc.py" ]
[ "import numpy\nfrom PyQt5 import QtWidgets\n\nfrom .i_graph_mod_1d import cwidg_central as cwidg_pwd\nfrom .interactive_matrix import cwidg_central as cwidg_matrix\nfrom .FUNCTIONS import get_layout_rciftab_obj, del_layout\nfrom .w_item_constr import w_for_item_constr\n\ndef w_for_pd2d_proc(obj, layout_11, layout_12, layout_13, layout_2, layout_3, w_output, thread):\n w_for_item_constr(obj, layout_11, layout_12, layout_13, layout_2, layout_3, w_output, thread)\n del_layout(layout_3)\n\n stack_widg = QtWidgets.QStackedWidget()\n \n lay_grid = QtWidgets.QGridLayout()\n lay_grid.addWidget(QtWidgets.QLabel(\"sum\"), 0, 1)\n lay_grid.addWidget(QtWidgets.QLabel(\"diff.\"), 0, 2)\n lay_grid.addWidget(QtWidgets.QLabel(\"up\"), 0, 3)\n lay_grid.addWidget(QtWidgets.QLabel(\"down\"), 0, 4)\n lay_grid.addWidget(QtWidgets.QLabel(\"projection\"), 1, 0)\n lay_grid.addWidget(QtWidgets.QLabel(\"maxtix model\"), 2, 0)\n lay_grid.addWidget(QtWidgets.QLabel(\"maxtix exp\"), 3, 0)\n lay_grid.addWidget(QtWidgets.QLabel(\"maxtix exp - model\"), 4, 0)\n lay_grid.addWidget(QtWidgets.QLabel(\"maxtix model (gn)\"), 5, 0)\n lay_grid.addWidget(QtWidgets.QLabel(\"maxtix exp (gn)\"), 6, 0)\n lay_grid.addWidget(QtWidgets.QLabel(\"maxtix exp - model (gn)\"), 7, 0)\n\n\n _rb_1 = QtWidgets.QRadioButton()\n _rb_1.toggled.connect(lambda: stack_widg.setCurrentIndex(0))\n lay_grid.addWidget(_rb_1, 1, 1)\n _rb_1.setChecked(True)\n _rb_2 = QtWidgets.QRadioButton()\n _rb_2.toggled.connect(lambda: stack_widg.setCurrentIndex(1))\n lay_grid.addWidget(_rb_2, 2, 1)\n\n _rb_3 = QtWidgets.QRadioButton()\n _rb_3.toggled.connect(lambda: stack_widg.setCurrentIndex(2))\n lay_grid.addWidget(_rb_3, 1, 2)\n _rb_4 = QtWidgets.QRadioButton()\n _rb_4.toggled.connect(lambda: stack_widg.setCurrentIndex(3))\n lay_grid.addWidget(_rb_4, 2, 2)\n\n _rb_5 = QtWidgets.QRadioButton()\n _rb_5.toggled.connect(lambda: stack_widg.setCurrentIndex(4))\n lay_grid.addWidget(_rb_5, 1, 3)\n _rb_6 = QtWidgets.QRadioButton()\n _rb_6.toggled.connect(lambda: stack_widg.setCurrentIndex(5))\n lay_grid.addWidget(_rb_6, 2, 3)\n\n _rb_7 = QtWidgets.QRadioButton()\n _rb_7.toggled.connect(lambda: stack_widg.setCurrentIndex(6))\n lay_grid.addWidget(_rb_7, 1, 4)\n _rb_8 = QtWidgets.QRadioButton()\n _rb_8.toggled.connect(lambda: stack_widg.setCurrentIndex(7))\n lay_grid.addWidget(_rb_8, 2, 4)\n\n\n _rb_9 = QtWidgets.QRadioButton()\n _rb_9.toggled.connect(lambda: stack_widg.setCurrentIndex(8))\n lay_grid.addWidget(_rb_9, 3, 1)\n _rb_10 = QtWidgets.QRadioButton()\n _rb_10.toggled.connect(lambda: stack_widg.setCurrentIndex(9))\n lay_grid.addWidget(_rb_10, 3, 2)\n _rb_11 = QtWidgets.QRadioButton()\n _rb_11.toggled.connect(lambda: stack_widg.setCurrentIndex(10))\n lay_grid.addWidget(_rb_11, 3, 3)\n _rb_12 = QtWidgets.QRadioButton()\n _rb_12.toggled.connect(lambda: stack_widg.setCurrentIndex(11))\n lay_grid.addWidget(_rb_12, 3, 4)\n\n\n _rb_13 = QtWidgets.QRadioButton()\n _rb_13.toggled.connect(lambda: stack_widg.setCurrentIndex(12))\n lay_grid.addWidget(_rb_13, 4, 1)\n _rb_14 = QtWidgets.QRadioButton()\n _rb_14.toggled.connect(lambda: stack_widg.setCurrentIndex(13))\n lay_grid.addWidget(_rb_14, 4, 2)\n _rb_15 = QtWidgets.QRadioButton()\n _rb_15.toggled.connect(lambda: stack_widg.setCurrentIndex(14))\n lay_grid.addWidget(_rb_15, 4, 3)\n _rb_16 = QtWidgets.QRadioButton()\n _rb_16.toggled.connect(lambda: stack_widg.setCurrentIndex(15))\n lay_grid.addWidget(_rb_16, 4, 4)\n\n\n\n _rb_17 = QtWidgets.QRadioButton()\n _rb_17.toggled.connect(lambda: stack_widg.setCurrentIndex(16))\n lay_grid.addWidget(_rb_17, 5, 1)\n\n _rb_18 = QtWidgets.QRadioButton()\n _rb_18.toggled.connect(lambda: stack_widg.setCurrentIndex(17))\n lay_grid.addWidget(_rb_18, 5, 2)\n\n _rb_19 = QtWidgets.QRadioButton()\n _rb_19.toggled.connect(lambda: stack_widg.setCurrentIndex(18))\n lay_grid.addWidget(_rb_19, 5, 3)\n\n _rb_20 = QtWidgets.QRadioButton()\n _rb_20.toggled.connect(lambda: stack_widg.setCurrentIndex(19))\n lay_grid.addWidget(_rb_20, 5, 4)\n\n\n _rb_21 = QtWidgets.QRadioButton()\n _rb_21.toggled.connect(lambda: stack_widg.setCurrentIndex(20))\n lay_grid.addWidget(_rb_21, 6, 1)\n\n _rb_22 = QtWidgets.QRadioButton()\n _rb_22.toggled.connect(lambda: stack_widg.setCurrentIndex(21))\n lay_grid.addWidget(_rb_22, 6, 2)\n\n _rb_23 = QtWidgets.QRadioButton()\n _rb_23.toggled.connect(lambda: stack_widg.setCurrentIndex(22))\n lay_grid.addWidget(_rb_23, 6, 3)\n\n _rb_24 = QtWidgets.QRadioButton()\n _rb_24.toggled.connect(lambda: stack_widg.setCurrentIndex(23))\n lay_grid.addWidget(_rb_24, 6, 4)\n\n\n _rb_25 = QtWidgets.QRadioButton()\n _rb_25.toggled.connect(lambda: stack_widg.setCurrentIndex(24))\n lay_grid.addWidget(_rb_25, 7, 1)\n\n _rb_26 = QtWidgets.QRadioButton()\n _rb_26.toggled.connect(lambda: stack_widg.setCurrentIndex(25))\n lay_grid.addWidget(_rb_26, 7, 2)\n\n _rb_27 = QtWidgets.QRadioButton()\n _rb_27.toggled.connect(lambda: stack_widg.setCurrentIndex(26))\n lay_grid.addWidget(_rb_27, 7, 3)\n\n _rb_28 = QtWidgets.QRadioButton()\n _rb_28.toggled.connect(lambda: stack_widg.setCurrentIndex(27))\n lay_grid.addWidget(_rb_28, 7, 4)\n\n _lay_1 = QtWidgets.QHBoxLayout()\n _lay_1.addLayout(lay_grid)\n _lay_1.addStretch(1)\n layout_3.addLayout(_lay_1)\n\n\n ttheta = obj.ttheta\n phi = obj.phi\n intensity_up = obj.intensity_up\n intensity_up_sigma = obj.intensity_up_sigma\n intensity_down = obj.intensity_down\n intensity_down_sigma = obj.intensity_down_sigma\n intensity_up_total = obj.intensity_up_total\n intensity_down_total = obj.intensity_down_total\n intensity_up_net = obj.intensity_up_net\n intensity_down_net = obj.intensity_down_net\n\n x = ttheta\n y = phi\n\n try:\n z_u = intensity_up_total.transpose()\n z_u_e = intensity_up.transpose()\n z_u_s_sq = (intensity_up_sigma.transpose())**2\n _z_1 = numpy.where(numpy.isnan(z_u_e), 0., z_u).sum(axis=0)\n _z_1_e = numpy.where(numpy.isnan(z_u_e), 0., z_u_e).sum(axis=0)\n _z_1_s = numpy.sqrt(numpy.where(numpy.isnan(z_u_e), 0., z_u_s_sq).sum(axis=0))\n _n_1 = numpy.where(numpy.isnan(z_u_e), 0., 1.).sum(axis=0)\n z_1_u = _z_1/_n_1\n z_1_u_e = _z_1_e/_n_1\n z_1_u_s = _z_1_s/_n_1\n except:\n return\n\n z_d = intensity_down_total.transpose()\n z_d_e = intensity_down.transpose()\n z_d_s_sq = (intensity_down_sigma.transpose())**2\n _z_1 = numpy.where(numpy.isnan(z_d_e), 0., z_d).sum(axis=0)\n _z_1_e = numpy.where(numpy.isnan(z_d_e), 0., z_d_e).sum(axis=0)\n _z_1_s = numpy.sqrt(numpy.where(numpy.isnan(z_d_e), 0., z_d_s_sq).sum(axis=0))\n _n_1 = numpy.where(numpy.isnan(z_d_e), 0., 1.).sum(axis=0)\n z_1_d = _z_1/_n_1\n z_1_d_e = _z_1_e/_n_1\n z_1_d_s = _z_1_s/_n_1\n\n z_sum = intensity_up_total.transpose()+intensity_down_total.transpose()\n z_sum_e = intensity_up.transpose()+intensity_down.transpose()\n z_sum_s_sq = (intensity_up_sigma.transpose())**2+(intensity_down_sigma.transpose())**2\n _z_1_e = numpy.where(numpy.isnan(z_sum_e), 0., z_sum_e).sum(axis=0)\n _z_1 = numpy.where(numpy.isnan(z_sum_e), 0., z_sum).sum(axis=0)\n _z_1_s = numpy.sqrt(numpy.where(numpy.isnan(z_sum_e), 0., z_sum_s_sq).sum(axis=0))\n _n_1 = numpy.where(numpy.isnan(z_sum_e), 0., 1.).sum(axis=0)\n z_1_sum = _z_1/_n_1\n z_1_sum_e = _z_1_e/_n_1\n z_1_sum_s = _z_1_s/_n_1\n\n z_diff = intensity_up_total.transpose()-intensity_down_total.transpose()\n z_diff_e = intensity_up.transpose()-intensity_down.transpose()\n _z_1_e = numpy.where(numpy.isnan(z_diff_e), 0., z_diff_e).sum(axis=0)\n _z_1 = numpy.where(numpy.isnan(z_diff_e), 0., z_diff).sum(axis=0)\n _n_1 = numpy.where(numpy.isnan(z_diff_e), 0., 1.).sum(axis=0)\n z_1_diff = _z_1/_n_1\n z_1_diff_e = _z_1_e/_n_1\n z_1_diff_s = _z_1_s/_n_1\n\n widg_matrix_u = cwidg_matrix()\n widg_matrix_u.plot_matrix(x, y, z_u)\n widg_matrix_u_e = cwidg_matrix()\n widg_matrix_u_e.plot_matrix(x, y, z_u_e)\n widg_matrix_u_e_m = cwidg_matrix()\n widg_matrix_u_e_m.plot_matrix(x, y, z_u_e-z_u)\n widg_proj_u = cwidg_pwd()\n np_xysm_1 = numpy.vstack((x, z_1_u_e, z_1_u_s, z_1_u)).transpose()\n widg_proj_u.plot_numpy_arrays(np_xysm_1)\n\n widg_matrix_d = cwidg_matrix()\n widg_matrix_d.plot_matrix(x, y, z_d)\n widg_matrix_d_e = cwidg_matrix()\n widg_matrix_d_e.plot_matrix(x, y, z_d_e)\n widg_matrix_d_e_m = cwidg_matrix()\n widg_matrix_d_e_m.plot_matrix(x, y, z_d_e-z_d)\n widg_proj_d = cwidg_pwd()\n np_xysm_1 = numpy.vstack((x, z_1_d_e, z_1_d_s, z_1_d)).transpose()\n widg_proj_d.plot_numpy_arrays(np_xysm_1)\n\n widg_matrix_sum = cwidg_matrix()\n widg_matrix_sum.plot_matrix(x, y, z_sum)\n widg_matrix_sum_e = cwidg_matrix()\n widg_matrix_sum_e.plot_matrix(x, y, z_sum_e)\n widg_matrix_sum_e_m = cwidg_matrix()\n widg_matrix_sum_e_m.plot_matrix(x, y, z_sum_e-z_sum)\n widg_proj_sum = cwidg_pwd()\n np_xysm_1 = numpy.vstack((x, z_1_sum_e, z_1_sum_s, z_1_sum)).transpose()\n widg_proj_sum.plot_numpy_arrays(np_xysm_1)\n\n widg_matrix_diff = cwidg_matrix()\n widg_matrix_diff.plot_matrix(x, y, z_diff)\n widg_matrix_diff_e = cwidg_matrix()\n widg_matrix_diff_e.plot_matrix(x, y, z_diff_e)\n widg_matrix_diff_e_m = cwidg_matrix()\n widg_matrix_diff_e_m.plot_matrix(x, y, z_diff_e-z_diff)\n widg_proj_diff = cwidg_pwd()\n np_xysm_1 = numpy.vstack((x, z_1_diff_e, z_1_diff_s, z_1_diff)).transpose()\n widg_proj_diff.plot_numpy_arrays(np_xysm_1)\n\n stack_widg.addWidget(widg_proj_sum)\n stack_widg.addWidget(widg_matrix_sum)\n stack_widg.addWidget(widg_proj_diff)\n stack_widg.addWidget(widg_matrix_diff)\n stack_widg.addWidget(widg_proj_u)\n stack_widg.addWidget(widg_matrix_u)\n stack_widg.addWidget(widg_proj_d)\n stack_widg.addWidget(widg_matrix_d)\n\n stack_widg.addWidget(widg_matrix_sum_e)\n stack_widg.addWidget(widg_matrix_diff_e)\n stack_widg.addWidget(widg_matrix_u_e)\n stack_widg.addWidget(widg_matrix_d_e)\n\n stack_widg.addWidget(widg_matrix_sum_e_m)\n stack_widg.addWidget(widg_matrix_diff_e_m)\n stack_widg.addWidget(widg_matrix_u_e_m)\n stack_widg.addWidget(widg_matrix_d_e_m)\n\n\n np_gamma, np_nu, l_int =obj.recalc_to_gamma_nu_grid()\n x_gn, y_gn = np_gamma, np_nu\n z_u_e_gn, z_d_e_gn, z_sum_e_gn, z_diff_e_gn, z_u_m_gn, z_d_m_gn, z_sum_m_gn, z_diff_m_gn = l_int\n\n widg_matrix_u_e_gn = cwidg_matrix()\n widg_matrix_u_e_gn.plot_matrix(x_gn, y_gn, z_u_e_gn)\n widg_matrix_d_e_gn = cwidg_matrix()\n widg_matrix_d_e_gn.plot_matrix(x_gn, y_gn, z_d_e_gn)\n widg_matrix_sum_e_gn = cwidg_matrix()\n widg_matrix_sum_e_gn.plot_matrix(x_gn, y_gn, z_sum_e_gn)\n widg_matrix_diff_e_gn = cwidg_matrix()\n widg_matrix_diff_e_gn.plot_matrix(x_gn, y_gn, z_diff_e_gn)\n\n widg_matrix_u_m_gn = cwidg_matrix()\n widg_matrix_u_m_gn.plot_matrix(x_gn, y_gn, z_u_m_gn)\n widg_matrix_d_m_gn = cwidg_matrix()\n widg_matrix_d_m_gn.plot_matrix(x_gn, y_gn, z_d_m_gn)\n widg_matrix_sum_m_gn = cwidg_matrix()\n widg_matrix_sum_m_gn.plot_matrix(x_gn, y_gn, z_sum_m_gn)\n widg_matrix_diff_m_gn = cwidg_matrix()\n widg_matrix_diff_m_gn.plot_matrix(x_gn, y_gn, z_diff_m_gn)\n\n widg_matrix_u_em_gn = cwidg_matrix()\n widg_matrix_u_em_gn.plot_matrix(x_gn, y_gn, z_u_e_gn - z_u_m_gn)\n widg_matrix_d_em_gn = cwidg_matrix()\n widg_matrix_d_em_gn.plot_matrix(x_gn, y_gn, z_d_e_gn - z_d_m_gn)\n widg_matrix_sum_em_gn = cwidg_matrix()\n widg_matrix_sum_em_gn.plot_matrix(x_gn, y_gn, z_sum_e_gn - z_sum_m_gn)\n widg_matrix_diff_em_gn = cwidg_matrix()\n widg_matrix_diff_em_gn.plot_matrix(x_gn, y_gn, z_diff_e_gn - z_diff_m_gn)\n\n stack_widg.addWidget(widg_matrix_sum_m_gn)\n stack_widg.addWidget(widg_matrix_diff_m_gn)\n stack_widg.addWidget(widg_matrix_u_m_gn)\n stack_widg.addWidget(widg_matrix_d_m_gn)\n\n stack_widg.addWidget(widg_matrix_sum_e_gn)\n stack_widg.addWidget(widg_matrix_diff_e_gn)\n stack_widg.addWidget(widg_matrix_u_e_gn)\n stack_widg.addWidget(widg_matrix_d_e_gn)\n\n stack_widg.addWidget(widg_matrix_sum_em_gn)\n stack_widg.addWidget(widg_matrix_diff_em_gn)\n stack_widg.addWidget(widg_matrix_u_em_gn)\n stack_widg.addWidget(widg_matrix_d_em_gn)\n\n stack_widg.setCurrentIndex(0)\n\n layout_3.addWidget(stack_widg)\n \n\n return\n\n" ]
[ [ "numpy.isnan", "numpy.vstack" ] ]
XLearning-SCU/2021-NeurIPS-NCR
[ "31dd08ec114a9c5abca88fb14b4487ffebaed292" ]
[ "NCR/co_train.py" ]
[ "\"\"\"Training script\"\"\"\n\nimport os\nimport time\nimport copy\nimport shutil\nimport random\n\nimport numpy as np\nimport torch\nfrom sklearn.mixture import GaussianMixture\n\nfrom data import get_loader, get_dataset\nfrom model import SGRAF\nfrom vocab import Vocabulary, deserialize_vocab\nfrom evaluation import i2t, t2i, encode_data, shard_attn_scores\nfrom utils import (\n AverageMeter,\n ProgressMeter,\n save_checkpoint,\n adjust_learning_rate,\n)\n\n\ndef main(opt):\n\n # load Vocabulary Wrapper\n print(\"load and process dataset ...\")\n vocab = deserialize_vocab(\n os.path.join(opt.vocab_path, \"%s_vocab.json\" % opt.data_name)\n )\n opt.vocab_size = len(vocab)\n\n # load dataset\n captions_train, images_train = get_dataset(\n opt.data_path, opt.data_name, \"train\", vocab\n )\n captions_dev, images_dev = get_dataset(opt.data_path, opt.data_name, \"dev\", vocab)\n\n # data loader\n noisy_trainloader, data_size, clean_labels = get_loader(\n captions_train,\n images_train,\n \"warmup\",\n opt.batch_size,\n opt.workers,\n opt.noise_ratio,\n opt.noise_file,\n )\n val_loader = get_loader(\n captions_dev, images_dev, \"dev\", opt.batch_size, opt.workers\n )\n\n # create models\n model_A = SGRAF(opt)\n model_B = SGRAF(opt)\n\n best_rsum = 0\n start_epoch = 0\n\n # save the history of losses from two networks\n all_loss = [[], []]\n\n # Warmup\n print(\"\\n* Warmup\")\n if opt.warmup_model_path:\n if os.path.isfile(opt.warmup_model_path):\n checkpoint = torch.load(opt.warmup_model_path)\n model_A.load_state_dict(checkpoint[\"model_A\"])\n model_B.load_state_dict(checkpoint[\"model_B\"])\n print(\n \"=> load warmup checkpoint '{}' (epoch {})\".format(\n opt.warmup_model_path, checkpoint[\"epoch\"]\n )\n )\n print(\"\\nValidattion ...\")\n validate(opt, val_loader, [model_A, model_B])\n else:\n raise Exception(\n \"=> no checkpoint found at '{}'\".format(opt.warmup_model_path)\n )\n else:\n epoch = 0\n for epoch in range(0, opt.warmup_epoch):\n print(\"[{}/{}] Warmup model_A\".format(epoch + 1, opt.warmup_epoch))\n warmup(opt, noisy_trainloader, model_A, epoch)\n print(\"[{}/{}] Warmup model_B\".format(epoch + 1, opt.warmup_epoch))\n warmup(opt, noisy_trainloader, model_B, epoch)\n\n save_checkpoint(\n {\n \"epoch\": epoch,\n \"model_A\": model_A.state_dict(),\n \"model_B\": model_B.state_dict(),\n \"opt\": opt,\n },\n is_best=False,\n filename=\"warmup_model_{}.pth.tar\".format(epoch),\n prefix=opt.output_dir + \"/\",\n )\n\n # evaluate on validation set\n print(\"\\nValidattion ...\")\n validate(opt, val_loader, [model_A, model_B])\n\n # save the history of losses from two networks\n all_loss = [[], []]\n print(\"\\n* Co-training\")\n\n # Train the Model\n for epoch in range(start_epoch, opt.num_epochs):\n print(\"\\nEpoch [{}/{}]\".format(epoch, opt.num_epochs))\n adjust_learning_rate(opt, model_A.optimizer, epoch)\n adjust_learning_rate(opt, model_B.optimizer, epoch)\n\n # # Dataset split (labeled, unlabeled)\n print(\"Split dataset ...\")\n prob_A, prob_B, all_loss = eval_train(\n opt,\n model_A,\n model_B,\n noisy_trainloader,\n data_size,\n all_loss,\n clean_labels,\n epoch,\n )\n\n pred_A = split_prob(prob_A, opt.p_threshold)\n pred_B = split_prob(prob_B, opt.p_threshold)\n\n print(\"\\nModel A training ...\")\n # train model_A\n labeled_trainloader, unlabeled_trainloader = get_loader(\n captions_train,\n images_train,\n \"train\",\n opt.batch_size,\n opt.workers,\n opt.noise_ratio,\n opt.noise_file,\n pred=pred_B,\n prob=prob_B,\n )\n train(opt, model_A, model_B, labeled_trainloader, unlabeled_trainloader, epoch)\n\n print(\"\\nModel B training ...\")\n # train model_B\n labeled_trainloader, unlabeled_trainloader = get_loader(\n captions_train,\n images_train,\n \"train\",\n opt.batch_size,\n opt.workers,\n opt.noise_ratio,\n opt.noise_file,\n pred=pred_A,\n prob=prob_A,\n )\n train(opt, model_B, model_A, labeled_trainloader, unlabeled_trainloader, epoch)\n\n print(\"\\nValidattion ...\")\n # evaluate on validation set\n rsum = validate(opt, val_loader, [model_A, model_B])\n\n # remember best R@ sum and save checkpoint\n is_best = rsum > best_rsum\n best_rsum = max(rsum, best_rsum)\n if is_best:\n save_checkpoint(\n {\n \"epoch\": epoch,\n \"model_A\": model_A.state_dict(),\n \"model_B\": model_B.state_dict(),\n \"best_rsum\": best_rsum,\n \"opt\": opt,\n },\n is_best,\n filename=\"checkpoint_{}.pth.tar\".format(epoch),\n prefix=opt.output_dir + \"/\",\n )\n\n\ndef train(opt, net, net2, labeled_trainloader, unlabeled_trainloader=None, epoch=None):\n \"\"\"\n One epoch training.\n \"\"\"\n losses = AverageMeter(\"loss\", \":.4e\")\n batch_time = AverageMeter(\"batch\", \":6.3f\")\n data_time = AverageMeter(\"data\", \":6.3f\")\n progress = ProgressMeter(\n len(labeled_trainloader),\n [batch_time, data_time, losses],\n prefix=\"Training Step\",\n )\n\n # fix one network and train the other\n net.train_start()\n net2.val_start()\n\n unlabeled_train_iter = iter(unlabeled_trainloader)\n labels_l = []\n pred_labels_l = []\n labels_u = []\n pred_labels_u = []\n end = time.time()\n for i, batch_train_data in enumerate(labeled_trainloader):\n (\n batch_images_l,\n batch_text_l,\n batch_lengths_l,\n _,\n batch_labels_l,\n batch_prob_l,\n batch_clean_labels_l,\n ) = batch_train_data\n batch_size = batch_images_l.size(0)\n labels_l.append(batch_clean_labels_l)\n\n # unlabeled data\n try:\n (\n batch_images_u,\n batch_text_u,\n batch_lengths_u,\n _,\n batch_clean_labels_u,\n ) = unlabeled_train_iter.next()\n except:\n unlabeled_train_iter = iter(unlabeled_trainloader)\n (\n batch_images_u,\n batch_text_u,\n batch_lengths_u,\n _,\n batch_clean_labels_u,\n ) = unlabeled_train_iter.next()\n labels_u.append(batch_clean_labels_u)\n\n # measure data loading time\n data_time.update(time.time() - end)\n\n if torch.cuda.is_available():\n batch_prob_l = batch_prob_l.cuda()\n batch_labels_l = batch_labels_l.cuda()\n\n # label refinement\n with torch.no_grad():\n net.val_start()\n # labeled data\n pl = net.predict(batch_images_l, batch_text_l, batch_lengths_l)\n ptl = batch_prob_l * batch_labels_l + (1 - batch_prob_l) * pl[0]\n targets_l = ptl.detach()\n pred_labels_l.append(ptl.cpu().numpy())\n\n # unlabeled data\n pu1 = net.predict(batch_images_u, batch_text_u, batch_lengths_u)\n pu2 = net2.predict(batch_images_u, batch_text_u, batch_lengths_u)\n ptu = (pu1 + pu2) / 2\n targets_u = ptu.detach()\n targets_u = targets_u.view(-1, 1)\n pred_labels_u.append(ptu.cpu().numpy())\n\n # drop last batch if only one sample (batch normalization require)\n if batch_images_l.size(0) == 1 or batch_images_u.size(0) == 1:\n break\n\n net.train_start()\n # train with labeled + unlabeled data exponential or linear\n loss_l = net.train(\n batch_images_l,\n batch_text_l,\n batch_lengths_l,\n labels=targets_l,\n hard_negative=True,\n soft_margin=opt.soft_margin,\n mode=\"train\",\n )\n if epoch < (opt.num_epochs // 2):\n loss_u = 0\n else:\n loss_u = net.train(\n batch_images_u,\n batch_text_u,\n batch_lengths_u,\n labels=targets_u,\n hard_negative=True,\n soft_margin=opt.soft_margin,\n mode=\"train\",\n )\n\n loss = loss_l + loss_u\n losses.update(loss, batch_images_l.size(0) + batch_images_u.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # Print log info\n if i % opt.log_step == 0:\n progress.display(i)\n\n\ndef warmup(opt, train_loader, model, epoch):\n # average meters to record the training statistics\n losses = AverageMeter(\"loss\", \":.4e\")\n batch_time = AverageMeter(\"batch\", \":6.3f\")\n data_time = AverageMeter(\"data\", \":6.3f\")\n progress = ProgressMeter(\n len(train_loader), [batch_time, data_time, losses], prefix=\"Warmup Step\"\n )\n\n end = time.time()\n for i, (images, captions, lengths, _) in enumerate(train_loader):\n data_time.update(time.time() - end)\n\n # drop last batch if only one sample (batch normalization require)\n if images.size(0) == 1:\n break\n\n model.train_start()\n\n # Update the model\n loss = model.train(images, captions, lengths, mode=\"warmup\")\n losses.update(loss, images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % opt.log_step == 0:\n progress.display(i)\n\n\ndef validate(opt, val_loader, models=[]):\n # compute the encoding for all the validation images and captions\n if opt.data_name == \"cc152k_precomp\":\n per_captions = 1\n elif opt.data_name in [\"coco_precomp\", \"f30k_precomp\"]:\n per_captions = 5\n\n Eiters = models[0].Eiters\n sims_mean = 0\n count = 0\n for ind in range(len(models)):\n count += 1\n print(\"Encoding with model {}\".format(ind))\n img_embs, cap_embs, cap_lens = encode_data(\n models[ind], val_loader, opt.log_step\n )\n\n # clear duplicate 5*images and keep 1*images FIXME\n img_embs = np.array(\n [img_embs[i] for i in range(0, len(img_embs), per_captions)]\n )\n\n # record computation time of validation\n start = time.time()\n print(\"Computing similarity from model {}\".format(ind))\n sims_mean += shard_attn_scores(\n models[ind], img_embs, cap_embs, cap_lens, opt, shard_size=100\n )\n end = time.time()\n print(\n \"Calculate similarity time with model {}: {:.2f} s\".format(ind, end - start)\n )\n\n # average the sims\n sims_mean = sims_mean / count\n\n # caption retrieval\n (r1, r5, r10, medr, meanr) = i2t(img_embs.shape[0], sims_mean, per_captions)\n print(\n \"Image to text: {:.1f}, {:.1f}, {:.1f}, {:.1f}, {:.1f}\".format(\n r1, r5, r10, medr, meanr\n )\n )\n\n # image retrieval\n (r1i, r5i, r10i, medri, meanr) = t2i(img_embs.shape[0], sims_mean, per_captions)\n print(\n \"Text to image: {:.1f}, {:.1f}, {:.1f}, {:.1f}, {:.1f}\".format(\n r1i, r5i, r10i, medri, meanr\n )\n )\n\n # sum of recalls to be used for early stopping\n r_sum = r1 + r5 + r10 + r1i + r5i + r10i\n\n return r_sum\n\n\ndef eval_train(\n opt, model_A, model_B, data_loader, data_size, all_loss, clean_labels, epoch\n):\n \"\"\"\n Compute per-sample loss and prob\n \"\"\"\n batch_time = AverageMeter(\"batch\", \":6.3f\")\n data_time = AverageMeter(\"data\", \":6.3f\")\n progress = ProgressMeter(\n len(data_loader), [batch_time, data_time], prefix=\"Computinng losses\"\n )\n\n model_A.val_start()\n model_B.val_start()\n losses_A = torch.zeros(data_size)\n losses_B = torch.zeros(data_size)\n\n end = time.time()\n for i, (images, captions, lengths, ids) in enumerate(data_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n with torch.no_grad():\n # compute the loss\n loss_A = model_A.train(images, captions, lengths, mode=\"eval_loss\")\n loss_B = model_B.train(images, captions, lengths, mode=\"eval_loss\")\n for b in range(images.size(0)):\n losses_A[ids[b]] = loss_A[b]\n losses_B[ids[b]] = loss_B[b]\n\n batch_time.update(time.time() - end)\n end = time.time()\n if i % opt.log_step == 0:\n progress.display(i)\n\n losses_A = (losses_A - losses_A.min()) / (losses_A.max() - losses_A.min())\n all_loss[0].append(losses_A)\n losses_B = (losses_B - losses_B.min()) / (losses_B.max() - losses_B.min())\n all_loss[1].append(losses_B)\n\n input_loss_A = losses_A.reshape(-1, 1)\n input_loss_B = losses_B.reshape(-1, 1)\n\n print(\"\\nFitting GMM ...\")\n # fit a two-component GMM to the loss\n gmm_A = GaussianMixture(n_components=2, max_iter=10, tol=1e-2, reg_covar=5e-4)\n gmm_A.fit(input_loss_A.cpu().numpy())\n prob_A = gmm_A.predict_proba(input_loss_A.cpu().numpy())\n prob_A = prob_A[:, gmm_A.means_.argmin()]\n\n gmm_B = GaussianMixture(n_components=2, max_iter=10, tol=1e-2, reg_covar=5e-4)\n gmm_B.fit(input_loss_B.cpu().numpy())\n prob_B = gmm_B.predict_proba(input_loss_B.cpu().numpy())\n prob_B = prob_B[:, gmm_B.means_.argmin()]\n\n return prob_A, prob_B, all_loss\n\n\ndef split_prob(prob, threshld):\n if prob.min() > threshld:\n # If prob are all larger than threshld, i.e. no noisy data, we enforce 1/100 unlabeled data\n print(\n \"No estimated noisy data. Enforce the 1/100 data with small probability to be unlabeled.\"\n )\n threshld = np.sort(prob)[len(prob) // 100]\n pred = prob > threshld\n return pred\n" ]
[ [ "torch.zeros", "torch.no_grad", "sklearn.mixture.GaussianMixture", "torch.cuda.is_available", "numpy.sort", "torch.load" ] ]
neptune0818/ground-station-app
[ "045e17b543633df6b7c463f252e8fe01fa372c71" ]
[ "gui/DataLoggerTest.py" ]
[ "'''\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n# necessary libraries\nimport numpy as np\nimport os\nimport time\n\n# initialize file name - ok if it rewrites the file every time for testing\nFileName = 'TestData.csv'\ncwd = os.getcwd()\nFilePath = os.path.join(cwd, 'data', FileName)\n\n# open file to begin writing process\nf = open(FilePath, 'w')\nf.write('Time,Altitude,Velocity,Events\\r')\n\n# Can change these\nduration = 30 # test time\nsampling_rate = 10 # Hz\n\nstart = time.time()\ncurrent = 0\nwhile duration > current:\n f = open(FilePath, 'a')\n alt = np.random.randint(0,5000)\n vel = np.random.randint(0, 500)\n f.write(f\"{current}, {alt}, {vel}\\r\")\n f.close()\n time.sleep(1/sampling_rate)\n current = time.time() - start\n \nf.close()" ]
[ [ "numpy.random.randint" ] ]
MOAISUS/jina
[ "9157e1a9429ccd04c7e5123273d40b136f69d977" ]
[ "jina/executors/encoders/torchvision.py" ]
[ "__copyright__ = \"Copyright (c) 2020 Jina AI Limited. All rights reserved.\"\n__license__ = \"Apache-2.0\"\n\nimport numpy as np\n\nfrom . import BaseNumericEncoder\nfrom ..decorators import batching, as_ndarray\n\n\nclass TorchEncoder(BaseNumericEncoder):\n def __init__(self,\n model_name: str,\n channel_axis: int = 1,\n *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.model_name = model_name\n self.channel_axis = channel_axis\n self._default_channel_axis = 1\n\n def post_init(self):\n import torch\n self._build_model()\n device = 'cuda:0' if self.on_gpu else 'cpu'\n self.model.to(torch.device(device))\n\n @batching\n @as_ndarray\n def encode(self, data: 'np.ndarray', *args, **kwargs) -> 'np.ndarray':\n if self.channel_axis != self._default_channel_axis:\n data = np.moveaxis(data, self.channel_axis, self._default_channel_axis)\n import torch\n feature_map = self._get_features(torch.from_numpy(data.astype('float32'))).detach().numpy()\n return self._get_pooling(feature_map)\n\n def _build_model(self):\n raise NotImplementedError\n\n def _get_features(self, data):\n raise NotImplementedError\n\n def _get_pooling(self, feature_map):\n return feature_map\n" ]
[ [ "torch.device", "numpy.moveaxis" ] ]
saurabhraj-115/Recommendation-Systems
[ "60fcdb06a142a09a7f079ec937110ad2891851ae" ]
[ "fin.py" ]
[ "\nfrom __future__ import print_function #importing relevant tools\nimport numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nfrom os import system\nimport math\nfrom itertools import product as pr\n# from sklearn.impute import SimpleImputer\n\n\n\n\n\ndef spa(X):\n non_zero = np.count_nonzero(X)\n total_val = np.product(X.shape)\n sparsity = (total_val - non_zero)\n return sparsity\n\n\n\n\n\n\n\n\nsystem('clear')\nA=np.genfromtxt(\"movie_rarting_test.csv\",delimiter=',',skip_header=1) #importing dataset\n# A=pd.DataFrame(A)\nD=np.genfromtxt(\"rsdata.csv\",delimiter=',',skip_header=1) #importing dataset\n# D=pd.\nA= A[1:,1:] #deleting the coulumn with user ID\nD= D[1:,1:] #deleting the coulumn with user ID\n#\n\nA=A[:D.shape[0],:]\n\nB= np.zeros((D.shape[0],D.shape[0])) #making a matrix for storing cosine_similarity\nfor i,j in pr(range(D.shape[0]) , range(D.shape[0])): #implementing cosine_similarity\n M=D[i,:].reshape(1,-1)\n N=D[j,:].reshape(1,-1)\n B[i,j]= cosine_similarity(M,N)\n\n\nk=int(math.floor(A.shape[0]/3.0))\ntop=int(math.floor(k/3.0))\n\nflag=0\nc=0\nq=3\nC=np.zeros((A.shape[0],A.shape[1])) #this parameter could be changed accordin to convinience\nwhile flag==0 and q>-1:\n q-=1\n K= np.argpartition(B, np.argmin(B, axis=0))[:, -k-1:] #partitioning the datasets row-wise and finding the closest 'top'+1 users to each users\n K= K[:,int((q)*top):int((q+1)*top-1)] #removing itself from top+1 users closest to itself\n for i,j in pr(range(A.shape[0]),range(A.shape[1])):\n # if A[i,j] == 0:\n su= 0.0\n ctr=0.0\n for l in range(int(top-1)):\n if A[K[i,l],j] > 0.0:\n flag=1\n c=1\n su+= A[K[i,l],j]\n ctr+=1.0\n if flag==1:\n C[i,j]=float(su/ctr)\n flag=0\n\n if c==1 and q==2 and A[i,j]==0:\n C[i,j]=0\n\n# C= np.around(C) #rounding off the numpy array from float to nearest integer rating\n# print (C)\n# A=pd.DataFrame(A)\n# C=pd.DataFrame(C)\nC= np.ceil(C)\n# print (A)\n# print (C)\n# print (spa(C))\n# np.savetxt('final_ratings.csv', C, delimiter=',',fmt='%1.18f') #outputting to any file\nnz=0\nmae=0\nrmse=0\nfor i,j in pr(range(A.shape[0]),range(A.shape[1])):\n if A[i,j]>0:\n nz+=1\n mae+=abs(A[i,j]-C[i,j])\n\n\nmae=mae/nz\nfor i,j in pr(range(A.shape[0]),range(A.shape[1])):\n if A[i,j]>0:\n rmse=(C[i,j]-A[i,j]-mae)**2\nrmse=np.sqrt(rmse/nz)\nA=pd.DataFrame(A)\nC=pd.DataFrame(C)\nprint (A)\nprint (C)\nprint (spa(C))\nprint ('Mae and RMSE are')\nprint (mae)\nprint (rmse)\n" ]
[ [ "numpy.product", "numpy.ceil", "numpy.count_nonzero", "numpy.zeros", "numpy.argmin", "pandas.DataFrame", "numpy.genfromtxt", "numpy.sqrt", "sklearn.metrics.pairwise.cosine_similarity" ] ]
xeisberg/pecos
[ "c9cf209676205dd000479861667351e724f0ba1c" ]
[ "test/pecos/ann/test_hnsw.py" ]
[ "# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file except in compliance\n# with the License. A copy of the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES\n# OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions\n# and limitations under the License.\nimport pytest # noqa: F401; pylint: disable=unused-variable\nfrom pytest import approx\n\n\ndef test_importable():\n import pecos.ann # noqa: F401\n from pecos.ann.hnsw import HNSW # noqa: F401\n\n\ndef test_save_and_load(tmpdir):\n import random\n import numpy as np\n from pecos.ann.hnsw import HNSW\n from pecos.utils import smat_util\n\n random.seed(1234)\n np.random.seed(1234)\n X_trn = smat_util.load_matrix(\"test/tst-data/ann/X.trn.l2-normalized.npy\").astype(np.float32)\n X_tst = smat_util.load_matrix(\"test/tst-data/ann/X.tst.l2-normalized.npy\").astype(np.float32)\n model_folder = tmpdir.join(\"hnsw_model_dir\")\n\n train_params = HNSW.TrainParams(M=36, efC=90, metric_type=\"ip\", threads=1)\n pred_params = HNSW.PredParams(efS=80, topk=10, threads=1)\n model = HNSW.train(\n X_trn,\n train_params=train_params,\n pred_params=pred_params,\n )\n Yp_from_mem, _ = model.predict(X_tst, ret_csr=False)\n model.save(model_folder)\n del model\n\n model = HNSW.load(model_folder)\n Yp_from_file, _ = model.predict(X_tst, pred_params=pred_params, ret_csr=False)\n assert Yp_from_mem == approx(\n Yp_from_file, abs=0.0\n ), f\"save and load failed: Yp_from_mem != Yp_from_file\"\n\n\ndef test_predict_and_recall():\n import random\n import numpy as np\n import scipy.sparse as smat\n from pecos.utils import smat_util\n from pecos.ann.hnsw import HNSW\n\n random.seed(1234)\n np.random.seed(1234)\n top_k = 10\n efS_list = [50, 75, 100]\n num_searcher_online = 2\n\n def calc_recall(Y_true, Y_pred):\n n_data, top_k = Y_true.shape\n recall = 0.0\n for qid in range(n_data):\n yt = set(Y_true[qid, :].flatten().data)\n yp = set(Y_pred[qid, :].flatten().data)\n recall += len(yt.intersection(yp)) / top_k\n recall = recall / n_data\n return recall\n\n # load data matrices\n X_trn = smat_util.load_matrix(\"test/tst-data/ann/X.trn.l2-normalized.npy\").astype(np.float32)\n X_tst = smat_util.load_matrix(\"test/tst-data/ann/X.tst.l2-normalized.npy\").astype(np.float32)\n dense_model_folder = \"test/tst-data/ann/hnsw-model-dense\"\n sparse_model_folder = \"test/tst-data/ann/hnsw-model-sparse\"\n\n # compute exact NN ground truth\n # for both ip and cosine similarity, since data is l2-normalized\n Y_true = 1.0 - X_tst.dot(X_trn.T)\n Y_true = np.argsort(Y_true)[:, :top_k]\n\n # test dense features\n model = HNSW.load(dense_model_folder)\n searchers = model.searchers_create(num_searcher_online)\n pred_params = model.get_pred_params()\n for efS in efS_list:\n pred_params.efS = efS\n Y_pred, _ = model.predict(\n X_tst, pred_params=pred_params, searchers=searchers, ret_csr=False\n )\n recall = calc_recall(Y_true, Y_pred)\n assert recall == approx(\n 1.0, abs=1e-2\n ), f\"hnsw inference failed: data_type=drm, efS={efS}, recall={recall}\"\n del searchers, model\n\n # test csr features, we just reuse the Y_true since data are the same\n X_trn = smat.csr_matrix(X_trn).astype(np.float32)\n X_tst = smat.csr_matrix(X_tst).astype(np.float32)\n model = HNSW.load(sparse_model_folder)\n searchers = model.searchers_create(num_searcher_online)\n pred_params = model.get_pred_params()\n for efS in efS_list:\n pred_params.efS = efS\n Y_pred, _ = model.predict(\n X_tst, pred_params=pred_params, searchers=searchers, ret_csr=False\n )\n recall = calc_recall(Y_true, Y_pred)\n assert recall == approx(\n 1.0, abs=1e-2\n ), f\"hnsw inference failed: data_type=csr, efS={efS}, recall={recall}\"\n del searchers, model\n\n\ndef test_cli(tmpdir):\n import subprocess\n import shlex\n\n x_trn_path = \"test/tst-data/ann/X.trn.l2-normalized.npy\"\n x_tst_path = \"test/tst-data/ann/X.tst.l2-normalized.npy\"\n model_folder = str(tmpdir.join(\"hnsw_save_model\"))\n y_pred_path = str(tmpdir.join(\"Yt_pred.npz\"))\n\n # train\n cmd = []\n cmd += [\"python3 -m pecos.ann.hnsw.train\"]\n cmd += [\"-x {}\".format(x_trn_path)]\n cmd += [\"-m {}\".format(model_folder)]\n process = subprocess.run(\n shlex.split(\" \".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n assert process.returncode == 0, \" \".join(cmd)\n\n # predict\n cmd = []\n cmd += [\"python3 -m pecos.ann.hnsw.predict\"]\n cmd += [\"-x {}\".format(x_tst_path)]\n cmd += [\"-m {}\".format(model_folder)]\n cmd += [\"-o {}\".format(y_pred_path)]\n process = subprocess.run(\n shlex.split(\" \".join(cmd)), stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n assert process.returncode == 0, \" \".join(cmd)\n" ]
[ [ "numpy.random.seed", "scipy.sparse.csr_matrix", "numpy.argsort" ] ]
zhuxinqimac/stylegan2
[ "5c3bda161ead21ea290de4190d3704e59cf6de64" ]
[ "metrics/frechet_inception_distance.py" ]
[ "# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.\n#\n# This work is made available under the Nvidia Source Code License-NC.\n# To view a copy of this license, visit\n# https://nvlabs.github.io/stylegan2/license.html\n\n\"\"\"Frechet Inception Distance (FID).\"\"\"\n\nimport os\nimport numpy as np\nimport scipy\nimport tensorflow as tf\nimport dnnlib.tflib as tflib\n\nfrom metrics import metric_base\nfrom training import misc\nfrom training.utils import get_return_v\n\n#----------------------------------------------------------------------------\n\nclass FID(metric_base.MetricBase):\n def __init__(self, num_images, minibatch_per_gpu, **kwargs):\n super().__init__(**kwargs)\n self.num_images = num_images\n self.minibatch_per_gpu = minibatch_per_gpu\n\n def _evaluate(self, Gs, Gs_kwargs, num_gpus, **kwargs):\n minibatch_size = num_gpus * self.minibatch_per_gpu\n inception = misc.load_pkl('http://d36zk2xti64re0.cloudfront.net/stylegan1/networks/metrics/inception_v3_features.pkl')\n activations = np.empty([self.num_images, inception.output_shape[1]], dtype=np.float32)\n\n # Calculate statistics for reals.\n cache_file = self._get_cache_file_for_reals(num_images=self.num_images)\n os.makedirs(os.path.dirname(cache_file), exist_ok=True)\n if os.path.isfile(cache_file):\n mu_real, sigma_real = misc.load_pkl(cache_file)\n else:\n for idx, images in enumerate(self._iterate_reals(minibatch_size=minibatch_size)):\n begin = idx * minibatch_size\n end = min(begin + minibatch_size, self.num_images)\n if images.shape[1] == 1:\n images = np.tile(images, [1, 3, 1, 1])\n activations[begin:end] = inception.run(images[:end-begin], num_gpus=num_gpus, assume_frozen=True)\n if end == self.num_images:\n break\n mu_real = np.mean(activations, axis=0)\n sigma_real = np.cov(activations, rowvar=False)\n misc.save_pkl((mu_real, sigma_real), cache_file)\n\n # Construct TensorFlow graph.\n result_expr = []\n for gpu_idx in range(num_gpus):\n with tf.device('/gpu:%d' % gpu_idx):\n # Gs_clone = Gs.clone()\n Gs_clone = Gs\n inception_clone = inception.clone()\n latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:])\n labels = self._get_random_labels_tf(self.minibatch_per_gpu)\n images = get_return_v(Gs_clone.get_output_for(latents, labels, **Gs_kwargs), 1)\n # images, _ = Gs_clone.get_output_for(latents, labels, **Gs_kwargs)\n if images.get_shape().as_list()[1] == 1:\n images = tf.tile(images, [1, 3, 1, 1])\n images = tflib.convert_images_to_uint8(images)\n result_expr.append(inception_clone.get_output_for(images))\n\n # Calculate statistics for fakes.\n for begin in range(0, self.num_images, minibatch_size):\n self._report_progress(begin, self.num_images)\n end = min(begin + minibatch_size, self.num_images)\n activations[begin:end] = np.concatenate(tflib.run(result_expr), axis=0)[:end-begin]\n mu_fake = np.mean(activations, axis=0)\n sigma_fake = np.cov(activations, rowvar=False)\n\n # Calculate FID.\n m = np.square(mu_fake - mu_real).sum()\n s, _ = scipy.linalg.sqrtm(np.dot(sigma_fake, sigma_real), disp=False) # pylint: disable=no-member\n dist = m + np.trace(sigma_fake + sigma_real - 2*s)\n self._report_result(np.real(dist))\n\n#----------------------------------------------------------------------------\n" ]
[ [ "numpy.square", "numpy.dot", "numpy.cov", "numpy.trace", "numpy.empty", "tensorflow.tile", "numpy.tile", "numpy.real", "numpy.mean", "tensorflow.device", "tensorflow.random_normal" ] ]
TerenceChen95/Retina-Unet-Pytorch
[ "fad5a9a0bcab5d81a0f1bb2537b9a2ead87828ca" ]
[ "generate_train_dataset.py" ]
[ "from dataset import Dataset\nimport os\nimport h5py\nimport numpy as np\nfrom PIL import Image\nfrom config import config\nfrom torchvision import transforms\n\ndef write_hdf5(arr, outfile):\n with h5py.File(outfile, 'w') as f :\n f.create_dataset(\"image\", data=arr, dtype=arr.dtype)\n\n\ndef get_dataset(root, mode, transform=None):\n dataSet = Dataset(root, transform=transform, mode=mode)\n imgs = np.empty((config['N_imgs'], 1, 565, 565))\n msks = np.empty((config['N_imgs'], 1, 565, 565))\n #msks = np.empty((config['N_imgs'], 565*565, 2))\n for i in range(len(dataSet)):\n if mode == 'train':\n img, label = dataSet[i]\n #print(img.shape, label.shape)\n assert(img.shape == (1, 565, 565))\n #assert(label.shape == (565*565, 2))\n imgs[i] = img\n msks[i] = label\n elif mode == 'test':\n img = dataSet[i]\n assert(img.shape == (1, 565, 565))\n imgs[i] = img\n if mode == 'train':\n write_hdf5(imgs, './hdf5/DRIVE_dataset_imgs_%s.hdf5' % (mode))\n write_hdf5(msks, './hdf5/DRIVE_dataset_msks_%s.hdf5' % (mode))\n elif mode == 'test':\n write_hdf5(imgs, './hdf5/DRIVE_dataset_imgs_%s.hdf5' % (mode))\n\n\n#transform = transforms.Compose([transforms.ToTensor()])\ntrain_root = os.getcwd()+'/data/training/'\ntest_root = os.getcwd()+'/data/testing/'\n\nget_dataset(train_root, mode='train')\nget_dataset(test_root, mode='test')\n\n\n" ]
[ [ "numpy.empty" ] ]
BenTenmann/scirpy
[ "2c5b99e7c5205adc506a10d1aca97250051fab81" ]
[ "scirpy/tests/test_workflow.py" ]
[ "\"\"\"Test entire workflows using small example datasets.\n\nThe workflow is additionally tested using a larger dataset\nby running the tutorial on the CI.\n\nCurrently, these tests are mainly designed to be ran on the\nBioconda-CI when building the container as a quick consistency check.\nThe tests need to be quick in order not to overload the bioconda CI,\nbut AIRR-compliance mandates to have these tests.\n\"\"\"\nfrom . import TESTDATA\nimport scirpy as ir\nimport pytest\nimport pandas.testing as pdt\nimport pandas as pd\nfrom scirpy.util import _is_na\nimport numpy as np\nfrom .util import _normalize_df_types\nimport tempfile\nimport scanpy as sc\n\n\[email protected]\[email protected](\"save_intermediates\", [False, True])\[email protected](\n \"adata_path,upgrade_schema,obs_expected\",\n [\n (\n TESTDATA / \"10x/vdj_nextgem_hs_pbmc3_t_filtered_contig_annotations.csv.gz\",\n False,\n TESTDATA / \"test_workflow/adata_10x_pbmc3_t.obs.expected.pkl.gz\",\n ),\n (\n TESTDATA / \"wu2020_200_old_schema.h5ad\",\n True,\n TESTDATA / \"test_workflow/adata_wu_200_old_schema.obs.expected.pkl.gz\",\n ),\n ],\n)\ndef test_workflow(\n adata_path, save_intermediates, upgrade_schema, obs_expected, tmp_path\n):\n def _save_and_load(adata):\n \"\"\"If save_intermediates is True, save the anndata to a temporary location\n and re-load it from disk.\"\"\"\n if save_intermediates:\n adata.write_h5ad(tmp_path / \"tmp_adata.h5ad\")\n return sc.read_h5ad(tmp_path / \"tmp_adata.h5ad\")\n else:\n return adata\n\n if upgrade_schema:\n adata = sc.read_h5ad(adata_path)\n ir.io.upgrade_schema(adata)\n else:\n adata = ir.io.read_10x_vdj(adata_path, include_fields=None)\n\n adata_obs_expected = pd.read_pickle(obs_expected)\n\n ir.tl.chain_qc(adata)\n adata = _save_and_load(adata)\n ir.pp.ir_dist(adata)\n adata = _save_and_load(adata)\n ir.tl.define_clonotypes(adata)\n adata = _save_and_load(adata)\n ir.tl.clonotype_network(adata)\n adata = _save_and_load(adata)\n ir.tl.clonal_expansion(adata)\n adata = _save_and_load(adata)\n ir.pl.clonotype_network(adata)\n adata = _save_and_load(adata)\n\n # turn nans into consistent value (nan)\n _normalize_df_types(adata.obs)\n\n # # Use this code to re-generate the \"expected file\", if necessary.\n # adata.obs.to_pickle(obs_expected, protocol=4)\n\n pdt.assert_frame_equal(\n adata.obs, adata_obs_expected, check_dtype=False, check_categorical=False\n )\n" ]
[ [ "pandas.read_pickle", "pandas.testing.assert_frame_equal" ] ]