repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
Yu-Nie/Yolo-v2-pytorch
[ "da18482b0d53ef4a99c88bde599927408c819127" ]
[ "train_coco.py" ]
[ "\"\"\"\n@author: Viet Nguyen <[email protected]>\n\"\"\"\nimport os\nimport argparse\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom src.coco_dataset import COCODataset\nfrom src.utils import *\nfrom src.loss import YoloLoss\nfrom src.yolo_net import Yolo\nfrom tensorboardX import SummaryWriter\nimport shutil\n\n\ndef get_args():\n parser = argparse.ArgumentParser(\"You Only Look Once: Unified, Real-Time Object Detection\")\n parser.add_argument(\"--image_size\", type=int, default=448, help=\"The common width and height for all images\")\n parser.add_argument(\"--batch_size\", type=int, default=10, help=\"The number of images per batch\")\n parser.add_argument(\"--momentum\", type=float, default=0.9)\n parser.add_argument(\"--decay\", type=float, default=0.0005)\n parser.add_argument(\"--dropout\", type=float, default=0.5)\n parser.add_argument(\"--num_epoches\", type=int, default=160)\n parser.add_argument(\"--test_interval\", type=int, default=5, help=\"Number of epoches between testing phases\")\n parser.add_argument(\"--object_scale\", type=float, default=1.0)\n parser.add_argument(\"--noobject_scale\", type=float, default=0.5)\n parser.add_argument(\"--class_scale\", type=float, default=1.0)\n parser.add_argument(\"--coord_scale\", type=float, default=5.0)\n parser.add_argument(\"--reduction\", type=int, default=32)\n parser.add_argument(\"--es_min_delta\", type=float, default=0.0,\n help=\"Early stopping's parameter: minimum change loss to qualify as an improvement\")\n parser.add_argument(\"--es_patience\", type=int, default=0,\n help=\"Early stopping's parameter: number of epochs with no improvement after which training will be stopped. Set to 0 to disable this technique.\")\n parser.add_argument(\"--train_set\", type=str, default=\"train\")\n parser.add_argument(\"--test_set\", type=str, default=\"val\")\n parser.add_argument(\"--year\", type=str, default=\"2014\", help=\"The year of dataset (2014 or 2017)\")\n parser.add_argument(\"--data_path\", type=str, default=\"data/COCO\", help=\"the root folder of dataset\")\n parser.add_argument(\"--pre_trained_model_type\", type=str, choices=[\"model\", \"params\"], default=\"model\")\n parser.add_argument(\"--pre_trained_model_path\", type=str, default=\"trained_models/whole_model_trained_yolo_coco\")\n parser.add_argument(\"--log_path\", type=str, default=\"tensorboard/yolo_coco\")\n parser.add_argument(\"--saved_path\", type=str, default=\"trained_models\")\n\n args = parser.parse_args()\n return args\n\n\ndef train(opt):\n if torch.cuda.is_available():\n torch.cuda.manual_seed(123)\n else:\n torch.manual_seed(123)\n learning_rate_schedule = {\"0\": 1e-5, \"5\": 1e-4,\n \"80\": 1e-5, \"110\": 1e-6}\n training_params = {\"batch_size\": opt.batch_size,\n \"shuffle\": True,\n \"drop_last\": True,\n \"collate_fn\": custom_collate_fn}\n\n test_params = {\"batch_size\": opt.batch_size,\n \"shuffle\": False,\n \"drop_last\": False,\n \"collate_fn\": custom_collate_fn}\n\n training_set = COCODataset(opt.data_path, opt.year, opt.train_set, opt.image_size)\n training_generator = DataLoader(training_set, **training_params)\n\n test_set = COCODataset(opt.data_path, opt.year, opt.test_set, opt.image_size, is_training=False)\n test_generator = DataLoader(test_set, **test_params)\n\n '''\n if torch.cuda.is_available():\n if opt.pre_trained_model_type == \"model\":\n model = torch.load(opt.pre_trained_model_path)\n else:\n model = Yolo(training_set.num_classes)\n model.load_state_dict(torch.load(opt.pre_trained_model_path))\n else:\n if opt.pre_trained_model_type == \"model\":\n model = torch.load(opt.pre_trained_model_path, map_location=lambda storage, loc: storage)\n else:\n model = Yolo(training_set.num_classes)\n model.load_state_dict(torch.load(opt.pre_trained_model_path, map_location=lambda storage, loc: storage))\n '''\n model = Yolo(training_set.num_classes)\n # The following line will re-initialize weight for the last layer, which is useful\n # when you want to retrain the model based on my trained weights. if you uncomment it,\n # you will see the loss is already very small at the beginning.\n nn.init.normal_(list(model.modules())[-1].weight, 0, 0.01)\n log_path = os.path.join(opt.log_path, \"{}\".format(opt.year))\n if os.path.isdir(log_path):\n shutil.rmtree(log_path)\n os.makedirs(log_path)\n writer = SummaryWriter(log_path)\n if torch.cuda.is_available():\n writer.add_graph(model.cpu(), torch.rand(opt.batch_size, 3, opt.image_size, opt.image_size))\n model.cuda()\n else:\n writer.add_graph(model, torch.rand(opt.batch_size, 3, opt.image_size, opt.image_size))\n criterion = YoloLoss(training_set.num_classes, model.anchors, opt.reduction)\n optimizer = torch.optim.SGD(model.parameters(), lr=1e-5, momentum=opt.momentum, weight_decay=opt.decay)\n best_loss = 1e10\n best_epoch = 0\n model.train()\n num_iter_per_epoch = len(training_generator)\n for epoch in range(opt.num_epoches):\n if str(epoch) in learning_rate_schedule.keys():\n for param_group in optimizer.param_groups:\n param_group['lr'] = learning_rate_schedule[str(epoch)]\n for iter, batch in enumerate(training_generator):\n image, label = batch\n if torch.cuda.is_available():\n image = Variable(image.cuda(), requires_grad=True)\n else:\n image = Variable(image, requires_grad=True)\n optimizer.zero_grad()\n logits = model(image)\n loss, loss_coord, loss_conf, loss_cls = criterion(logits, label)\n loss.backward()\n optimizer.step()\n print(\"Epoch: {}/{}, Iteration: {}/{}, Lr: {}, Loss:{:.2f} (Coord:{:.2f} Conf:{:.2f} Cls:{:.2f})\".format(\n epoch + 1,\n opt.num_epoches,\n iter + 1,\n num_iter_per_epoch,\n optimizer.param_groups[0]['lr'],\n loss,\n loss_coord,\n loss_conf,\n loss_cls))\n writer.add_scalar('Train/Total_loss', loss, epoch * num_iter_per_epoch + iter)\n writer.add_scalar('Train/Coordination_loss', loss_coord, epoch * num_iter_per_epoch + iter)\n writer.add_scalar('Train/Confidence_loss', loss_conf, epoch * num_iter_per_epoch + iter)\n writer.add_scalar('Train/Class_loss', loss_cls, epoch * num_iter_per_epoch + iter)\n if epoch % opt.test_interval == 0:\n model.eval()\n loss_ls = []\n loss_coord_ls = []\n loss_conf_ls = []\n loss_cls_ls = []\n for te_iter, te_batch in enumerate(test_generator):\n te_image, te_label = te_batch\n num_sample = len(te_label)\n if torch.cuda.is_available():\n te_image = te_image.cuda()\n with torch.no_grad():\n te_logits = model(te_image)\n batch_loss, batch_loss_coord, batch_loss_conf, batch_loss_cls = criterion(te_logits, te_label)\n loss_ls.append(batch_loss * num_sample)\n loss_coord_ls.append(batch_loss_coord * num_sample)\n loss_conf_ls.append(batch_loss_conf * num_sample)\n loss_cls_ls.append(batch_loss_cls * num_sample)\n te_loss = sum(loss_ls) / test_set.__len__()\n te_coord_loss = sum(loss_coord_ls) / test_set.__len__()\n te_conf_loss = sum(loss_conf_ls) / test_set.__len__()\n te_cls_loss = sum(loss_cls_ls) / test_set.__len__()\n print(\"Epoch: {}/{}, Lr: {}, Loss:{:.2f} (Coord:{:.2f} Conf:{:.2f} Cls:{:.2f})\".format(\n epoch + 1,\n opt.num_epoches,\n optimizer.param_groups[0]['lr'],\n te_loss,\n te_coord_loss,\n te_conf_loss,\n te_cls_loss))\n writer.add_scalar('Test/Total_loss', te_loss, epoch)\n writer.add_scalar('Test/Coordination_loss', te_coord_loss, epoch)\n writer.add_scalar('Test/Confidence_loss', te_conf_loss, epoch)\n writer.add_scalar('Test/Class_loss', te_cls_loss, epoch)\n model.train()\n if te_loss + opt.es_min_delta < best_loss:\n best_loss = te_loss\n best_epoch = epoch\n # torch.save(model, opt.saved_path + os.sep + \"trained_yolo_coco\")\n torch.save(model.state_dict(), opt.saved_path + os.sep + \"only_params_trained_yolo_coco\")\n torch.save(model, opt.saved_path + os.sep + \"whole_model_trained_yolo_coco\")\n\n # Early stopping\n if epoch - best_epoch > opt.es_patience > 0:\n print(\"Stop training at epoch {}. The lowest loss achieved is {}\".format(epoch, te_loss))\n break\n writer.export_scalars_to_json(log_path + os.sep + \"all_logs.json\")\n writer.close()\n\n\nif __name__ == \"__main__\":\n opt = get_args()\n train(opt)\n" ]
[ [ "torch.utils.data.DataLoader" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yuanqidu/IDGL
[ "64d2d73289ca0f6dcab966062d4cb15844236b37", "64d2d73289ca0f6dcab966062d4cb15844236b37" ]
[ "src/core/model.py", "src/core/layers/scalable_graphlearn.py" ]
[ "import os\nimport random\nimport numpy as np\nfrom collections import Counter\nfrom sklearn.metrics import r2_score\n\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nimport torch.nn.functional as F\n\nfrom .models.graph_clf import GraphClf\nfrom .models.text_graph import TextGraphRegression, TextGraphClf\nfrom .utils.text_data.vocab_utils import VocabModel\nfrom .utils import constants as Constants\nfrom .utils.generic_utils import to_cuda, create_mask\nfrom .utils.constants import INF\nfrom .utils.radam import RAdam\n\n\nclass Model(object):\n \"\"\"High level model that handles intializing the underlying network\n architecture, saving, updating examples, and predicting examples.\n \"\"\"\n def __init__(self, config, train_set=None):\n self.config = config\n if self.config['model_name'] == 'GraphClf':\n self.net_module = GraphClf\n elif self.config['model_name'] == 'TextGraphRegression':\n self.net_module = TextGraphRegression\n elif self.config['model_name'] == 'TextGraphClf':\n self.net_module = TextGraphClf\n else:\n raise RuntimeError('Unknown model_name: {}'.format(self.config['model_name']))\n print('[ Running {} model ]'.format(self.config['model_name']))\n\n if config['data_type'] == 'text':\n saved_vocab_file = os.path.join(config['data_dir'], '{}_seed{}.vocab'.format(config['dataset_name'], config.get('data_seed', 1234)))\n self.vocab_model = VocabModel.build(saved_vocab_file, train_set, self.config)\n\n if config['task_type'] == 'regression':\n assert config['out_predictions']\n self.criterion = F.mse_loss\n self.score_func = r2_score\n self.metric_name = 'r2'\n elif config['task_type'] == 'classification':\n self.criterion = F.nll_loss\n self.score_func = accuracy\n self.metric_name = 'acc'\n else:\n self.criterion = F.nll_loss\n self.score_func = None\n self.metric_name = None\n\n\n\n if self.config['pretrained']:\n self.init_saved_network(self.config['pretrained'])\n else:\n # Building network.\n self._init_new_network()\n\n num_params = 0\n for name, p in self.network.named_parameters():\n print('{}: {}'.format(name, str(p.size())))\n num_params += p.numel()\n\n print('#Parameters = {}\\n'.format(num_params))\n self._init_optimizer()\n\n\n def init_saved_network(self, saved_dir):\n _ARGUMENTS = ['word_embed_dim', 'hidden_size', 'f_qem', 'f_pos', 'f_ner',\n 'word_dropout', 'rnn_dropout',\n 'ctx_graph_hops', 'ctx_graph_topk',\n 'score_unk_threshold', 'score_yes_threshold',\n 'score_no_threshold']\n\n # Load all saved fields.\n fname = os.path.join(saved_dir, Constants._SAVED_WEIGHTS_FILE)\n print('[ Loading saved model %s ]' % fname)\n saved_params = torch.load(fname, map_location=lambda storage, loc: storage)\n self.state_dict = saved_params['state_dict']\n # for k in _ARGUMENTS:\n # if saved_params['config'][k] != self.config[k]:\n # print('Overwrite {}: {} -> {}'.format(k, self.config[k], saved_params['config'][k]))\n # self.config[k] = saved_params['config'][k]\n\n if self.config['data_type'] == 'text':\n w_embedding = self._init_embedding(len(self.vocab_model.word_vocab), self.config['word_embed_dim'])\n self.network = self.net_module(self.config, w_embedding, self.vocab_model.word_vocab)\n else:\n self.network = self.net_module(self.config)\n\n # Merge the arguments\n if self.state_dict:\n merged_state_dict = self.network.state_dict()\n for k, v in self.state_dict['network'].items():\n if k in merged_state_dict:\n merged_state_dict[k] = v\n self.network.load_state_dict(merged_state_dict)\n\n def _init_new_network(self):\n if self.config['data_type'] == 'text':\n w_embedding = self._init_embedding(len(self.vocab_model.word_vocab), self.config['word_embed_dim'],\n pretrained_vecs=self.vocab_model.word_vocab.embeddings)\n self.network = self.net_module(self.config, w_embedding, self.vocab_model.word_vocab)\n else:\n self.network = self.net_module(self.config)\n\n def _init_optimizer(self):\n parameters = [p for p in self.network.parameters() if p.requires_grad]\n if self.config['optimizer'] == 'sgd':\n self.optimizer = optim.SGD(parameters, self.config['learning_rate'],\n momentum=self.config['momentum'],\n weight_decay=self.config['weight_decay'])\n elif self.config['optimizer'] == 'adam':\n self.optimizer = optim.Adam(parameters, lr=self.config['learning_rate'], weight_decay=self.config['weight_decay'])\n elif self.config['optimizer'] == 'adamax':\n self.optimizer = optim.Adamax(parameters, lr=self.config['learning_rate'])\n elif self.config['optimizer'] == 'radam':\n self.optimizer = RAdam(parameters, lr=self.config['learning_rate'], weight_decay=self.config['weight_decay'])\n else:\n raise RuntimeError('Unsupported optimizer: %s' % self.config['optimizer'])\n self.scheduler = ReduceLROnPlateau(self.optimizer, mode='max', factor=self.config['lr_reduce_factor'], \\\n patience=self.config['lr_patience'], verbose=True)\n\n def _init_embedding(self, vocab_size, embed_size, pretrained_vecs=None):\n \"\"\"Initializes the embeddings\n \"\"\"\n return nn.Embedding(vocab_size, embed_size, padding_idx=0,\n _weight=torch.from_numpy(pretrained_vecs).float()\n if pretrained_vecs is not None else None)\n\n def save(self, dirname):\n params = {\n 'state_dict': {\n 'network': self.network.state_dict(),\n },\n 'config': self.config,\n 'dir': dirname,\n }\n try:\n torch.save(params, os.path.join(dirname, Constants._SAVED_WEIGHTS_FILE))\n except BaseException:\n print('[ WARN: Saving failed... continuing anyway. ]')\n\n\n def clip_grad(self):\n # Clip gradients\n if self.config['grad_clipping']:\n parameters = [p for p in self.network.parameters() if p.requires_grad]\n torch.nn.utils.clip_grad_norm_(parameters, self.config['grad_clipping'])\n\ndef train_batch(batch, network, vocab, criterion, forcing_ratio, rl_ratio, config, wmd=None):\n network.train(True)\n\n with torch.set_grad_enabled(True):\n ext_vocab_size = batch['oov_dict'].ext_vocab_size if batch['oov_dict'] else None\n\n network_out = network(batch, batch['targets'], criterion,\n forcing_ratio=forcing_ratio, partial_forcing=config['partial_forcing'], \\\n sample=config['sample'], ext_vocab_size=ext_vocab_size, \\\n include_cover_loss=config['show_cover_loss'])\n\n if rl_ratio > 0:\n batch_size = batch['context'].shape[0]\n sample_out = network(batch, saved_out=network_out, criterion=criterion, \\\n criterion_reduction=False, criterion_nll_only=True, \\\n sample=True, ext_vocab_size=ext_vocab_size)\n baseline_out = network(batch, saved_out=network_out, visualize=False, \\\n ext_vocab_size=ext_vocab_size)\n\n sample_out_decoded = sample_out.decoded_tokens.transpose(0, 1)\n baseline_out_decoded = baseline_out.decoded_tokens.transpose(0, 1)\n\n neg_reward = []\n for i in range(batch_size):\n scores = eval_batch_output([batch['target_src'][i]], vocab, batch['oov_dict'],\n [sample_out_decoded[i]], [baseline_out_decoded[i]])\n\n greedy_score = scores[1][config['rl_reward_metric']]\n reward_ = scores[0][config['rl_reward_metric']] - greedy_score\n\n if config['rl_wmd_ratio'] > 0:\n # Add word mover's distance\n sample_seq = batch_decoded_index2word([sample_out_decoded[i]], vocab, batch['oov_dict'])[0]\n greedy_seq = batch_decoded_index2word([baseline_out_decoded[i]], vocab, batch['oov_dict'])[0]\n\n sample_wmd = -wmd.distance(sample_seq, batch['target_src'][i]) / max(len(sample_seq.split()), 1)\n greedy_wmd = -wmd.distance(greedy_seq, batch['target_src'][i]) / max(len(greedy_seq.split()), 1)\n wmd_reward_ = sample_wmd - greedy_wmd\n wmd_reward_ = max(min(wmd_reward_, config['max_wmd_reward']), -config['max_wmd_reward'])\n reward_ += config['rl_wmd_ratio'] * wmd_reward_\n\n neg_reward.append(reward_)\n neg_reward = to_cuda(torch.Tensor(neg_reward), network.device)\n\n\n # if sample > baseline, the reward is positive (i.e. good exploration), rl_loss is negative\n rl_loss = torch.sum(neg_reward * sample_out.loss) / batch_size\n rl_loss_value = torch.sum(neg_reward * sample_out.loss_value).item() / batch_size\n loss = (1 - rl_ratio) * network_out.loss + rl_ratio * rl_loss\n loss_value = (1 - rl_ratio) * network_out.loss_value + rl_ratio * rl_loss_value\n\n metrics = eval_batch_output(batch['target_src'], vocab, \\\n batch['oov_dict'], baseline_out.decoded_tokens)[0]\n\n else:\n loss = network_out.loss\n loss_value = network_out.loss_value\n metrics = eval_batch_output(batch['target_src'], vocab, \\\n batch['oov_dict'], network_out.decoded_tokens)[0]\n\n return loss, loss_value, metrics\n\ndef accuracy(labels, output):\n preds = output.max(1)[1].type_as(labels)\n correct = preds.eq(labels).double()\n correct = correct.sum().item()\n return correct / len(labels)\n", "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..utils.generic_utils import to_cuda, normalize_adj\nfrom ..utils.constants import VERY_SMALL_NUMBER, INF\n\n\ndef compute_normalized_laplacian(adj):\n rowsum = torch.sum(adj, -1)\n d_inv_sqrt = torch.pow(rowsum, -0.5)\n d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = torch.diagflat(d_inv_sqrt)\n L_norm = torch.mm(torch.mm(d_mat_inv_sqrt, adj), d_mat_inv_sqrt)\n return L_norm\n\n\nclass AnchorGraphLearner(nn.Module):\n def __init__(self, input_size, hidden_size, topk=None, epsilon=None, num_pers=16, metric_type='attention', device=None):\n super(AnchorGraphLearner, self).__init__()\n self.device = device\n self.topk = topk\n self.epsilon = epsilon\n self.metric_type = metric_type\n if metric_type == 'attention':\n self.linear_sims = nn.ModuleList([nn.Linear(input_size, hidden_size, bias=False) for _ in range(num_pers)])\n print('[ Multi-perspective {} AnchorGraphLearner: {} ]'.format(metric_type, num_pers))\n\n elif metric_type == 'weighted_cosine':\n self.weight_tensor = torch.Tensor(num_pers, input_size)\n self.weight_tensor = nn.Parameter(nn.init.xavier_uniform_(self.weight_tensor))\n print('[ Multi-perspective {} AnchorGraphLearner: {} ]'.format(metric_type, num_pers))\n\n\n elif metric_type == 'gat_attention':\n self.linear_sims1 = nn.ModuleList([nn.Linear(input_size, 1, bias=False) for _ in range(num_pers)])\n self.linear_sims2 = nn.ModuleList([nn.Linear(input_size, 1, bias=False) for _ in range(num_pers)])\n\n self.leakyrelu = nn.LeakyReLU(0.2)\n\n print('[ GAT_Attention AnchorGraphLearner]')\n\n elif metric_type == 'kernel':\n self.precision_inv_dis = nn.Parameter(torch.Tensor(1, 1))\n self.precision_inv_dis.data.uniform_(0, 1.0)\n self.weight = nn.Parameter(nn.init.xavier_uniform_(torch.Tensor(input_size, hidden_size)))\n elif metric_type == 'transformer':\n self.linear_sim1 = nn.Linear(input_size, hidden_size, bias=False)\n self.linear_sim2 = nn.Linear(input_size, hidden_size, bias=False)\n\n\n elif metric_type == 'cosine':\n pass\n\n else:\n raise ValueError('Unknown metric_type: {}'.format(metric_type))\n\n print('[ Graph Learner metric type: {} ]'.format(metric_type))\n\n def forward(self, context, anchors, ctx_mask=None, anchor_mask=None):\n \"\"\"\n Parameters\n :context, (batch_size, ctx_size, dim)\n :ctx_mask, (batch_size, ctx_size)\n\n Returns\n :attention, (batch_size, ctx_size, ctx_size)\n \"\"\"\n if self.metric_type == 'attention':\n attention = 0\n for _ in range(len(self.linear_sims)):\n context_fc = torch.relu(self.linear_sims[_](context))\n attention += torch.matmul(context_fc, context_fc.transpose(-1, -2))\n\n attention /= len(self.linear_sims)\n markoff_value = -INF\n\n elif self.metric_type == 'weighted_cosine':\n expand_weight_tensor = self.weight_tensor.unsqueeze(1)\n if len(context.shape) == 3:\n expand_weight_tensor = expand_weight_tensor.unsqueeze(1)\n\n context_fc = context.unsqueeze(0) * expand_weight_tensor\n context_norm = F.normalize(context_fc, p=2, dim=-1)\n\n anchors_fc = anchors.unsqueeze(0) * expand_weight_tensor\n anchors_norm = F.normalize(anchors_fc, p=2, dim=-1)\n\n attention = torch.matmul(context_norm, anchors_norm.transpose(-1, -2)).mean(0)\n markoff_value = 0\n\n\n elif self.metric_type == 'transformer':\n Q = self.linear_sim1(context)\n attention = torch.matmul(Q, Q.transpose(-1, -2)) / math.sqrt(Q.shape[-1])\n markoff_value = -INF\n\n\n elif self.metric_type == 'gat_attention':\n attention = []\n for _ in range(len(self.linear_sims1)):\n a_input1 = self.linear_sims1[_](context)\n a_input2 = self.linear_sims2[_](context)\n attention.append(self.leakyrelu(a_input1 + a_input2.transpose(-1, -2)))\n\n attention = torch.mean(torch.stack(attention, 0), 0)\n markoff_value = -INF\n\n\n elif self.metric_type == 'kernel':\n dist_weight = torch.mm(self.weight, self.weight.transpose(-1, -2))\n attention = self.compute_distance_mat(context, dist_weight)\n attention = torch.exp(-0.5 * attention * (self.precision_inv_dis**2))\n\n markoff_value = 0\n\n elif self.metric_type == 'cosine':\n context_norm = context.div(torch.norm(context, p=2, dim=-1, keepdim=True))\n attention = torch.mm(context_norm, context_norm.transpose(-1, -2)).detach()\n markoff_value = 0\n\n\n if ctx_mask is not None:\n attention = attention.masked_fill_(1 - ctx_mask.byte().unsqueeze(-1), markoff_value)\n\n if anchor_mask is not None:\n attention = attention.masked_fill_(1 - anchor_mask.byte().unsqueeze(-2), markoff_value)\n\n if self.epsilon is not None:\n attention = self.build_epsilon_neighbourhood(attention, self.epsilon, markoff_value)\n\n if self.topk is not None:\n attention = self.build_knn_neighbourhood(attention, self.topk, markoff_value)\n\n return attention\n\n def build_knn_neighbourhood(self, attention, topk, markoff_value):\n topk = min(topk, attention.size(-1))\n knn_val, knn_ind = torch.topk(attention, topk, dim=-1)\n weighted_adjacency_matrix = to_cuda((markoff_value * torch.ones_like(attention)).scatter_(-1, knn_ind, knn_val), self.device)\n return weighted_adjacency_matrix\n\n def build_epsilon_neighbourhood(self, attention, epsilon, markoff_value):\n mask = (attention > epsilon).detach().float()\n weighted_adjacency_matrix = attention * mask + markoff_value * (1 - mask)\n return weighted_adjacency_matrix\n\n def compute_distance_mat(self, X, weight=None):\n if weight is not None:\n trans_X = torch.mm(X, weight)\n else:\n trans_X = X\n norm = torch.sum(trans_X * X, dim=-1)\n dists = -2 * torch.matmul(trans_X, X.transpose(-1, -2)) + norm.unsqueeze(0) + norm.unsqueeze(1)\n return dists\n\n\ndef get_binarized_kneighbors_graph(features, topk, mask=None, device=None):\n assert features.requires_grad is False\n # Compute cosine similarity matrix\n features_norm = features.div(torch.norm(features, p=2, dim=-1, keepdim=True))\n attention = torch.matmul(features_norm, features_norm.transpose(-1, -2))\n\n if mask is not None:\n attention = attention.masked_fill_(1 - mask.byte().unsqueeze(1), 0)\n attention = attention.masked_fill_(1 - mask.byte().unsqueeze(-1), 0)\n\n # Extract and Binarize kNN-graph\n topk = min(topk, attention.size(-1))\n _, knn_ind = torch.topk(attention, topk, dim=-1)\n adj = to_cuda(torch.zeros_like(attention).scatter_(-1, knn_ind, 1), device)\n return adj\n" ]
[ [ "torch.optim.Adam", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.Tensor", "torch.load", "torch.sum", "torch.from_numpy", "torch.nn.utils.clip_grad_norm_", "torch.set_grad_enabled", "torch.optim.SGD", "torch.optim.Adamax" ], [ "torch.nn.functional.normalize", "torch.mm", "torch.norm", "torch.isinf", "torch.Tensor", "torch.ones_like", "torch.topk", "torch.sum", "torch.zeros_like", "torch.exp", "torch.nn.Linear", "torch.nn.LeakyReLU", "torch.nn.init.xavier_uniform_", "torch.diagflat", "torch.pow", "torch.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mylovage/czsc
[ "21c68c8451e951fd8f3c242c4c4f52dac5cb6283" ]
[ "test/test_ta.py" ]
[ "# coding: utf-8\nimport os\nimport pandas as pd\nimport numpy as np\nimport czsc\n\ncur_path = os.path.split(os.path.realpath(__file__))[0]\nfile_kline = os.path.join(cur_path, \"data/000001.SH_D.csv\")\nkline = pd.read_csv(file_kline, encoding=\"utf-8\")\nkline.loc[:, \"dt\"] = pd.to_datetime(kline.dt)\nbars = kline.to_dict(\"records\")\nclose = np.array([x['close'] for x in bars], dtype=np.double)\n\n\ndef test_sma():\n ma5 = czsc.SMA(close, 5)\n assert len(ma5) == len(close)\n assert round(ma5[-1], 2) == 3362.53\n assert round(ma5[-2], 2) == 3410.62\n\n\ndef test_macd():\n diff, dea, macd = czsc.MACD(close)\n\n assert len(diff) == len(dea) == len(macd) == len(close)\n assert round(macd[-1], 2) == 13.35\n assert round(macd[-5], 2) == 88.0\n\n assert round(diff[-1], 2) == 117.3\n assert round(diff[-5], 2) == 127.51\n\n assert round(dea[-1], 2) == 110.62\n assert round(dea[-5], 2) == 83.51\n\n\ndef test_jdk():\n high = np.array([x['high'] for x in bars], dtype=np.double)\n low = np.array([x['low'] for x in bars], dtype=np.double)\n k, d, j = czsc.KDJ(close, high, low)\n\n assert round(k[-1], 2) == 59.94\n assert round(d[-1], 2) == 80.47\n assert round(j[-1], 2) == 18.87\n" ]
[ [ "numpy.array", "pandas.read_csv", "pandas.to_datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
netanelbarel/improvedCvat
[ "ff2894d3b3757a5e080d3130d6875cfd14201bf5" ]
[ "cvat/apps/engine/task.py" ]
[ "# Copyright (C) 2018 Intel Corporation\r\n#\r\n# SPDX-License-Identifier: MIT\r\n\r\nimport os\r\nimport sys\r\nimport rq\r\nimport shlex\r\nimport shutil\r\nimport tempfile\r\nimport requests\r\nimport re\r\nimport xml.etree.ElementTree as ET\r\nfrom threading import Thread\r\nfrom io import BytesIO\r\nfrom PIL import Image\r\nfrom traceback import print_exception\r\nfrom ast import literal_eval\r\nfrom .handle_file_s3 import copyFileToOSByThread, deleteFolder, getFileUrl, copyFileToOS, uploadFile, downloadFile, getBucketConnection\r\nfrom .segmentation import process_watershed\r\n\r\nimport numpy as np\r\nimport urllib\r\nimport ssl\r\nfrom imutils.video import FPS\r\nimport argparse\r\nimport imutils\r\nimport cv2\r\nimport threading\r\nimport time\r\nimport json\r\nimport errno\r\n\r\nimport skvideo.io\r\n\r\nimport mimetypes\r\n_SCRIPT_DIR = os.path.realpath(os.path.dirname(__file__))\r\n_MEDIA_MIMETYPES_FILE = os.path.join(_SCRIPT_DIR, \"media.mimetypes\")\r\nmimetypes.init(files=[_MEDIA_MIMETYPES_FILE])\r\n\r\nfrom cvat.apps.engine.models import StatusChoice\r\nfrom cvat.apps.engine import formatter\r\n\r\nimport django_rq\r\nfrom django.forms.models import model_to_dict\r\nfrom django.conf import settings\r\nfrom django.core import serializers\r\nfrom django.db import transaction\r\nfrom django.db.models import Max\r\nfrom ffmpy import FFmpeg\r\nfrom pyunpack import Archive\r\nfrom distutils.dir_util import copy_tree\r\nfrom collections import OrderedDict\r\nfrom django.contrib.auth.models import User\r\n\r\nfrom . import models\r\nfrom .log import slogger\r\n\r\n############################# Global Variables\r\nTRACKER_THREADS = {}\r\n\r\n############################# Low Level server API\r\n\r\[email protected]\r\ndef create_empty(params):\r\n \"\"\"Create empty directory structure for a new task, add it to DB.\"\"\"\r\n\r\n db_task = models.Task()\r\n\r\n db_task.name = params['task_name']\r\n db_task.bug_tracker = params['bug_tracker_link']\r\n db_task.path = \"\"\r\n db_task.size = 0\r\n db_task.owner = params['owner']\r\n db_task.project = models.Projects.objects.get(pk=params['project'])\r\n db_task.assignee = User.objects.get(pk=params['assignee'])\r\n db_task.save()\r\n task_path = os.path.join(settings.DATA_ROOT, str(db_task.id))\r\n db_task.set_task_dirname(task_path)\r\n\r\n task_path = db_task.get_task_dirname()\r\n if os.path.isdir(task_path):\r\n shutil.rmtree(task_path)\r\n os.mkdir(task_path)\r\n\r\n upload_dir = db_task.get_upload_dirname()\r\n os.makedirs(upload_dir)\r\n output_dir = db_task.get_data_dirname()\r\n os.makedirs(output_dir)\r\n\r\n return db_task\r\n\r\ndef create(tid, params):\r\n \"\"\"Schedule the task\"\"\"\r\n q = django_rq.get_queue('default')\r\n q.enqueue_call(func=_create_thread, args=(tid, params),\r\n job_id=\"task.create/{}\".format(tid))\r\n\r\ndef check(tid):\r\n \"\"\"Check status of the scheduled task\"\"\"\r\n response = {}\r\n queue = django_rq.get_queue('default')\r\n job = queue.fetch_job(\"task.create/{}\".format(tid))\r\n if job is None:\r\n response = {\"state\": \"unknown\"}\r\n elif job.is_failed:\r\n response = {\"state\": \"error\", \"stderr\": \"Could not create the task. \" + job.exc_info }\r\n elif job.is_finished:\r\n destFile = r'/home/django/data/' + str(tid) + r'/data/xml/annotations.txt'\r\n if os.path.exists(destFile):\r\n with open(destFile, 'r') as f:\r\n fileData = f.read()\r\n response = {\"state\": \"created\", \"annotationFile\" : fileData, \"tid\" : tid}\r\n else:\r\n response = {\"state\": \"created\"}\r\n else:\r\n response = {\"state\": \"started\"}\r\n\r\n if 'status' in job.meta:\r\n response['status'] = job.meta['status']\r\n\r\n return response\r\n\r\[email protected]\r\ndef delete(tid):\r\n \"\"\"Delete the task\"\"\"\r\n db_task = models.Task.objects.select_for_update().get(pk=tid)\r\n if db_task:\r\n db_task.delete()\r\n shutil.rmtree(db_task.get_task_dirname(), ignore_errors=True)\r\n threads = deleteFolder(db_task.get_task_dirname())\r\n\r\n for t in threads:\r\n t.join()\r\n else:\r\n raise Exception(\"The task doesn't exist\")\r\n\r\[email protected]\r\ndef update(tid, labels, score, assignee):\r\n \"\"\"Update labels for the task\"\"\"\r\n\r\n db_task = models.Task.objects.select_for_update().get(pk=tid)\r\n db_labels = list(db_task.label_set.prefetch_related('attributespec_set').all())\r\n\r\n if (labels):\r\n new_labels = _parse_labels(labels)\r\n old_labels = _parse_db_labels(db_labels)\r\n\r\n for label_name in new_labels:\r\n if label_name in old_labels:\r\n db_label = [l for l in db_labels if l.name == label_name][0]\r\n for attr_name in new_labels[label_name]:\r\n if attr_name in old_labels[label_name]:\r\n db_attr = [attr for attr in db_label.attributespec_set.all()\r\n if attr.get_name() == attr_name][0]\r\n new_attr = new_labels[label_name][attr_name]\r\n old_attr = old_labels[label_name][attr_name]\r\n if new_attr['prefix'] != old_attr['prefix']:\r\n raise Exception(\"new_attr['prefix'] != old_attr['prefix']\")\r\n if new_attr['type'] != old_attr['type']:\r\n raise Exception(\"new_attr['type'] != old_attr['type']\")\r\n if set(old_attr['values']) - set(new_attr['values']):\r\n raise Exception(\"set(old_attr['values']) - set(new_attr['values'])\")\r\n\r\n db_attr.text = \"{}{}={}:{}\".format(new_attr['prefix'],\r\n new_attr['type'], attr_name, \",\".join(new_attr['values']))\r\n db_attr.save()\r\n else:\r\n db_attr = models.AttributeSpec()\r\n attr = new_labels[label_name][attr_name]\r\n db_attr.text = \"{}{}={}:{}\".format(attr['prefix'],\r\n attr['type'], attr_name, \",\".join(attr['values']))\r\n db_attr.label = db_label\r\n db_attr.save()\r\n else:\r\n db_label = models.Label()\r\n db_label.name = label_name\r\n db_label.task = db_task\r\n db_label.save()\r\n for attr_name in new_labels[label_name]:\r\n db_attr = models.AttributeSpec()\r\n attr = new_labels[label_name][attr_name]\r\n db_attr.text = \"{}{}={}:{}\".format(attr['prefix'],\r\n attr['type'], attr_name, \",\".join(attr['values']))\r\n db_attr.label = db_label\r\n db_attr.save()\r\n\r\n db_task.assignee = User.objects.get(pk=assignee)\r\n # If score sent from the client is -1 it means there is no score because the project has_score attribute is set to true.\r\n if (score != -1): \r\n db_task.score = score\r\n db_task.save()\r\n\r\[email protected]\r\ndef updateProperties(tid, properties):\r\n db_task = models.Task.objects.select_for_update().get(pk=tid)\r\n\r\n newFrameProperties = _parse_frameproperties(properties)\r\n\r\n for frameprop in newFrameProperties:\r\n db_taskframespec = models.TaskFrameSpec()\r\n db_taskframespec.task = db_task\r\n\r\n db_framepropvals = models.FrameProperties.objects.get(prop=frameprop[0], value=frameprop[1], project__pk=db_task.project.pk)\r\n db_taskframespec.propVal = db_framepropvals\r\n if (models.TaskFrameSpec.objects.filter(task__id=db_task.id, propVal__id=db_framepropvals.id).count() == 0):\r\n db_taskframespec.save()\r\n\r\ndef get_frame_path(tid, frame):\r\n \"\"\"Read corresponding frame for the task\"\"\"\r\n db_task = models.Task.objects.get(pk=tid)\r\n path = _get_frame_path(frame, db_task.get_data_dirname())\r\n\r\n return path\r\n\r\ndef get_frame_watershed_path(tid, frame):\r\n \"\"\"Read corresponding frame for the task\"\"\"\r\n db_task = models.Task.objects.get(pk=tid)\r\n path = _get_frame_watershed_path(frame, db_task.get_data_dirname())\r\n\r\n return path\r\n\r\ndef get(tid):\r\n \"\"\"Get the task as dictionary of attributes\"\"\"\r\n db_task = models.Task.objects.get(pk=tid)\r\n if db_task:\r\n db_labels = db_task.label_set.prefetch_related('attributespec_set').order_by('-pk').all()\r\n im_meta_data = get_image_meta_cache(db_task)\r\n attributes = {}\r\n for db_label in db_labels:\r\n attributes[db_label.id] = {}\r\n for db_attrspec in db_label.attributespec_set.all():\r\n attributes[db_label.id][db_attrspec.id] = db_attrspec.text\r\n db_segments = list(db_task.segment_set.prefetch_related('job_set').all())\r\n segment_length = max(db_segments[0].stop_frame - db_segments[0].start_frame + 1, 1)\r\n job_indexes = []\r\n for segment in db_segments:\r\n db_job = segment.job_set.first()\r\n job_indexes.append({\r\n \"job_id\": db_job.id,\r\n \"max_shape_id\": db_job.max_shape_id,\r\n })\r\n\r\n labels_colors = models.LabelDetails.objects.filter(labelType__label__in=[db_label.name for db_label in db_labels])\r\n\r\n response = {\r\n \"status\": db_task.status,\r\n \"spec\": {\r\n \"labels\": OrderedDict((db_label.id, db_label.name) for db_label in db_labels),\r\n \"attributes\": attributes,\r\n \"segmentation\": {label_color.labelType.label: {\"color\": label_color.color, \"label_type_id\": label_color.labelType.id} for label_color in labels_colors}\r\n },\r\n \"size\": db_task.size,\r\n \"taskid\": db_task.id,\r\n \"name\": db_task.name,\r\n \"mode\": db_task.mode,\r\n \"segment_length\": segment_length,\r\n \"jobs\": job_indexes,\r\n \"overlap\": db_task.overlap,\r\n \"z_orded\": db_task.z_order,\r\n \"flipped\": db_task.flipped,\r\n \"score\": db_task.score,\r\n \"image_meta_data\": im_meta_data,\r\n }\r\n else:\r\n raise Exception(\"Cannot find the task: {}\".format(tid))\r\n\r\n return response\r\n\r\n\r\[email protected]\r\ndef save_job_status(jid, status, user):\r\n db_job = models.Job.objects.select_related(\"segment__task\").select_for_update().get(pk = jid)\r\n db_task = db_job.segment.task\r\n status = StatusChoice(status)\r\n\r\n slogger.job[jid].info('changing job status from {} to {} by an user {}'.format(db_job.status, str(status), user))\r\n\r\n db_job.status = status.value\r\n db_job.save()\r\n db_segments = list(db_task.segment_set.prefetch_related('job_set').all())\r\n db_jobs = [db_segment.job_set.first() for db_segment in db_segments]\r\n\r\n if len(list(filter(lambda x: StatusChoice(x.status) == StatusChoice.ANNOTATION, db_jobs))) > 0:\r\n db_task.status = StatusChoice.ANNOTATION\r\n elif len(list(filter(lambda x: StatusChoice(x.status) == StatusChoice.VALIDATION, db_jobs))) > 0:\r\n db_task.status = StatusChoice.VALIDATION\r\n else:\r\n db_task.status = StatusChoice.COMPLETED\r\n\r\n db_task.save()\r\n\r\nclass CSRTTrackerThread(threading.Thread):\r\n def __init__(self, data, base_dir, results):\r\n threading.Thread.__init__(self)\r\n self.data = data\r\n self.base_dir = base_dir\r\n self.results = results\r\n self._stop_event = False\r\n def stop(self):\r\n self._stop_event = True\r\n def stopped(self):\r\n return self._stop_event\r\n def run(self):\r\n def _frame_path(frame, base_dir):\r\n d1 = str(frame // 10000)\r\n d2 = str(frame // 100)\r\n path = os.path.join(d1, d2, str(frame) + '.jpg')\r\n if base_dir:\r\n path = os.path.join(base_dir, path)\r\n\r\n return path\r\n\r\n def _get_frame(currentFrame, base_dir):\r\n # Download the requested frame\r\n frame_path = _frame_path(currentFrame, base_dir)\r\n downloadFile(settings.AWS_STORAGE_BUCKET_NAME, frame_path, frame_path)\r\n\r\n return frame_path\r\n\r\n tracker = cv2.TrackerCSRT_create()\r\n currentFrame = self.data['frame']\r\n frame_path = _get_frame(currentFrame, self.base_dir)\r\n frame = cv2.imread(frame_path)\r\n self.results[self.data['id']] = {'results': {}}\r\n\r\n counter = 0\r\n\r\n x = self.data['positions']['x']\r\n y = self.data['positions']['y']\r\n w = self.data['positions']['w']\r\n h = self.data['positions']['h']\r\n\r\n bbox = (x, y, w, h)\r\n\r\n tracker.init(frame, bbox)\r\n\r\n if os.environ.get('WITH_OS') == 'True':\r\n os.remove(frame_path)\r\n\r\n while ((not self.stopped()) and (counter < 10)):\r\n currentFrame += 1\r\n\r\n frame_path = _get_frame(currentFrame, self.base_dir)\r\n frame = cv2.imread(frame_path)\r\n \r\n if frame is None:\r\n break\r\n\r\n ok, bbox = tracker.update(frame)\r\n if os.environ.get('WITH_OS') == 'True':\r\n os.remove(frame_path)\r\n\r\n (x, y, w, h) = [int(v) for v in bbox]\r\n \r\n if (h == 0 and w == 0):\r\n self.results[self.data['id']]['results'][currentFrame] = {'x': x, 'y': y, 'h': h, 'w': w}\r\n break\r\n\r\n self.results[self.data['id']]['results'][currentFrame] = {'x': x, 'y': y, 'h': h, 'w': w}\r\n\r\n key = cv2.waitKey(1) & 0xFF\r\n\r\n counter += 1\r\n\r\ndef track_shapes(data, tid):\r\n base_dir='/home/django/data/%d/data' % (tid)\r\n\r\n results = {}\r\n shape = data['shapes'][0]\r\n\r\n results[shape['id']] = {'results': {}}\r\n thread = CSRTTrackerThread(shape, base_dir, results)\r\n thread.start()\r\n\r\n if tid not in TRACKER_THREADS:\r\n TRACKER_THREADS[tid] = []\r\n\r\n TRACKER_THREADS[tid].append(thread)\r\n\r\n thread.join()\r\n \r\n return results\r\n\r\ndef stop_tracking(tid):\r\n for thread in TRACKER_THREADS[tid]:\r\n thread.stop()\r\n\r\ndef download_vid(tid, currentTask):\r\n base_dir = '/home/django/data/%d/data' % (tid)\r\n vid_dir_path = os.path.join(base_dir, 'video')\r\n if not os.path.isdir(vid_dir_path):\r\n try:\r\n os.mkdir(vid_dir_path)\r\n except OSError as e:\r\n if e.errno == errno.EEXIST:\r\n os.remove(vid_dir_path)\r\n os.mkdir(vid_dir_path)\r\n\r\n if os.environ.get('WITH_OS') == 'True':\r\n bucket = getBucketConnection()\r\n\r\n for object_summary in bucket.objects.filter(Prefix=base_dir + \"/video\"):\r\n currentTask[\"video path\"] = object_summary.key\r\n downloadFile(settings.AWS_STORAGE_BUCKET_NAME, currentTask[\"video path\"], currentTask[\"video path\"])\r\n break\r\n else:\r\n currentTask[\"video path\"] = os.path.join(vid_dir_path, os.listdir(vid_dir_path)[0])\r\n \r\n currentTask[\"downloaded\"] = True\r\n currentTask[\"startedDownloading\"] = False\r\n\r\ndef track_all_video(shape, currentTask):\r\n vs = cv2.VideoCapture(currentTask[\"video path\"].split(\"?\")[0])\r\n\r\n currentFrame = shape['frame']\r\n vs.set(1, currentFrame)\r\n \r\n box = (shape['positions']['x'], shape['positions']['y'], shape['positions']['h'], shape['positions']['w'])\r\n\r\n ok, frame = vs.read()\r\n \r\n # Add a tracker to each box in the frame\r\n tracker = cv2.TrackerCSRT_create()\r\n tracker.init(frame, box)\r\n\r\n while not currentTask[\"shapes\"][shape[\"id\"]][\"stopped\"]:\r\n currentFrame += 1\r\n ok, frame = vs.read()\r\n \r\n if frame is None:\r\n break\r\n \r\n ok, box = tracker.update(frame)\r\n\r\n (x, y, h, w) = [int(v) for v in box]\r\n \r\n if (h == 0 and w == 0):\r\n break\r\n\r\n # If the predicted position is lower than 0 the box is out of bounds.\r\n xtl = x if x > 0 else 0\r\n ytl = y if y > 0 else 0\r\n\r\n # If the predicted position is greater than either the frame width or height the box is out of bounds.\r\n xbr = shape[\"frameWidth\"] if (x + w) > shape[\"frameWidth\"] else (x + w)\r\n ybr = shape[\"frameHeight\"] if (y + h) > shape[\"frameHeight\"] else (y + h)\r\n\r\n currentTask[\"shapes\"][shape[\"id\"]][\"positions\"][currentFrame] = {\"xtl\": xtl, \"ytl\": ytl, \"xbr\": xbr, \"ybr\": ybr, \"occluded\": shape[\"occluded\"], \"z_order\": shape[\"z_order\"], \"outside\": shape[\"outside\"]}\r\n currentTask[\"shapes\"][shape[\"id\"]][\"finished\"] = True\r\n\r\ndef check_video_path(tid):\r\n base_dir = '/home/django/data/%d/data' % (tid)\r\n\r\n if os.environ.get('WITH_OS') == 'False':\r\n return os.path.exists(base_dir + \"/video\")\r\n else: \r\n bucket = getBucketConnection()\r\n\r\n objs = list(bucket.objects.filter(Prefix=base_dir + \"/video\"))\r\n \r\n return len(objs) > 0\r\n\r\ndef watershed(tid, frame, draws, username):\r\n\r\n frame_path = \"/home/django/data/watershed/\" + str(tid) + \"/\" + username + \"/\" + str(frame) + \".jpg\"\r\n watershed_path = get_frame_path(tid, frame).replace('.jpg', '_w.png')\r\n\r\n cvImg = cv2.imread(frame_path)\r\n\r\n print(\"start process\")\r\n overlay = process_watershed(cvImg, draws, tid, frame)\r\n print(\"end process\")\r\n\r\n print(\"start save\")\r\n save_watershed_image(overlay, watershed_path)\r\n print(\"end save\")\r\n \r\n # q = django_rq.get_queue('default')\r\n # q.enqueue_call(func=save_watershed_matrix, args=(tid, frame, matrix),\r\n # job_id=\"task/{}.frame/{}.save_matrix\".format(tid, frame))\r\n\r\n #result['polygons'] = polygons\r\n\r\n #return result\r\n\r\ndef save_watershed_image(image, path):\r\n im = Image.fromarray(image)\r\n im.save(path)\r\n\r\ndef compress_matrix(matrix):\r\n compressedMatrix = []\r\n sequenceCount = 0\r\n prevLabel = matrix[0][0]\r\n\r\n # Each sequence (n elements) of label in matrix is reduced\r\n # to array with 2 elements: [the label, n (sequence count)]\r\n for currLabel in np.nditer(matrix):\r\n if currLabel == prevLabel:\r\n sequenceCount += 1\r\n else:\r\n compressedMatrix.append([prevLabel, sequenceCount])\r\n sequenceCount = 1\r\n\r\n prevLabel = currLabel\r\n\r\n return compressedMatrix\r\n\r\ndef save_watershed_matrix(tid, frame, matrix):\r\n db_task = models.Task.objects.get(pk=tid)\r\n models.Watershed.objects.update_or_create(task=db_task, frame=frame, defaults={'task':db_task, 'frame':frame, 'watershed':compress_matrix(matrix)})\r\n\r\ndef save_paintings(tid, frame, paintings):\r\n db_task = models.Task.objects.get(pk=tid)\r\n models.Watershed.objects.update_or_create(task=db_task, frame=frame, defaults={'task':db_task, 'frame':frame, 'paintings':paintings})\r\n\r\ndef get_paintings(tid, frame):\r\n db_task = models.Task.objects.get(pk=tid)\r\n db_frame_paintings = models.Watershed.objects.filter(task=db_task, frame=frame).first()\r\n\r\n if db_frame_paintings is None:\r\n paintings = []\r\n else:\r\n paintings = db_frame_paintings.paintings\r\n \r\n return paintings\r\n\r\ndef get_task_byjob(jid):\r\n \"\"\"Get the task by the jobid\"\"\"\r\n db_job = models.Job.objects.select_related(\"segment__task\").get(id=jid)\r\n if db_job:\r\n db_segment = db_job.segment\r\n db_task = db_segment.task\r\n\r\n return get(db_task.id)\r\n else:\r\n raise Exception(\"Cannot find the job: {}\".format(jid))\r\n return {}\r\n\r\ndef get_job(jid):\r\n \"\"\"Get the job as dictionary of attributes\"\"\"\r\n db_job = models.Job.objects.select_related(\"segment__task\").get(id=jid)\r\n if db_job:\r\n db_segment = db_job.segment\r\n db_task = db_segment.task\r\n im_meta_data = get_image_meta_cache(db_task)\r\n\r\n # Truncate extra image sizes\r\n if db_task.mode == 'annotation':\r\n im_meta_data['original_size'] = im_meta_data['original_size'][db_segment.start_frame:db_segment.stop_frame + 1]\r\n\r\n db_labels = db_task.label_set.prefetch_related('attributespec_set').order_by('-pk').all()\r\n attributes = {}\r\n for db_label in db_labels:\r\n attributes[db_label.id] = {}\r\n for db_attrspec in db_label.attributespec_set.all():\r\n attributes[db_label.id][db_attrspec.id] = db_attrspec.text\r\n\r\n framePropertiesDict = {\"allProperties\": {}, \"keyframeSpec\": {}}\r\n\r\n # Get all of the task frame spec rows related to the requested task.\r\n taskFrameSpecQuerySet = db_task.taskframespec_set.all()\r\n\r\n # Save the prop name, value name, and relation id for each row in the database for the task in a dictionary\r\n for taskFrameSpec in taskFrameSpecQuerySet:\r\n propName = taskFrameSpec.propVal.prop\r\n valName = taskFrameSpec.propVal.value\r\n propValId = taskFrameSpec.propVal.pk\r\n\r\n # If the propName is not in the dictionary yet, add an empty dictionary to it\r\n if (propName not in framePropertiesDict[\"allProperties\"]):\r\n framePropertiesDict[\"allProperties\"][propName] = {}\r\n framePropertiesDict[\"allProperties\"][propName][valName] = propValId\r\n \r\n keyframes = taskFrameSpec.keyframespec_set.all()\r\n for keyframe in keyframes:\r\n frame = keyframe.frame\r\n if (frame not in framePropertiesDict[\"keyframeSpec\"]):\r\n framePropertiesDict[\"keyframeSpec\"][frame] = {}\r\n framePropertiesDict[\"keyframeSpec\"][frame][propName] = propValId\r\n \r\n labels_colors = models.LabelDetails.objects.filter(labelType__label__in=[db_label.name for db_label in db_labels])\r\n commentsList = list(models.Comments.objects.filter(task=db_task).values_list('frame', 'comment'))\r\n comments = {}\r\n for comment in commentsList:\r\n comments[comment[0]] = comment[1]\r\n \r\n project = serializers.serialize('json', [db_task.project])\r\n\r\n response = {\r\n \"status\": db_job.status,\r\n \"labels\": OrderedDict((db_label.id, db_label.name) for db_label in db_labels),\r\n \"frameProperties\": framePropertiesDict,\r\n \"comments\": comments,\r\n \"segmentation\": {label_color.labelType.label: {\"color\": label_color.color, \"label_type_id\": label_color.labelType.id} for label_color in labels_colors},\r\n \"stop\": db_segment.stop_frame,\r\n \"taskid\": db_task.id,\r\n \"slug\": db_task.name,\r\n \"jobid\": jid,\r\n \"start\": db_segment.start_frame,\r\n \"mode\": db_task.mode,\r\n \"overlap\": db_task.overlap,\r\n \"attributes\": attributes,\r\n \"z_order\": db_task.z_order,\r\n \"flipped\": db_task.flipped,\r\n \"score\": db_task.score,\r\n \"project\": project,\r\n \"image_meta_data\": im_meta_data,\r\n \"max_shape_id\": db_job.max_shape_id,\r\n \"current\": models.Task.objects.get(pk=db_task.id).last_viewed_frame, # db_task.last_viewed_frame returns the previous value from the database\r\n }\r\n else:\r\n raise Exception(\"Cannot find the job: {}\".format(jid))\r\n\r\n return response\r\n\r\[email protected]\r\ndef rq_handler(job, exc_type, exc_value, traceback):\r\n tid = job.id.split('/')[1]\r\n db_task = models.Task.objects.select_for_update().get(pk=tid)\r\n with open(db_task.get_log_path(), \"wt\") as log_file:\r\n print_exception(exc_type, exc_value, traceback, file=log_file)\r\n db_task.delete()\r\n\r\n return False\r\n\r\ndef nextJobIdByPriority(username, status, tid):\r\n project = models.Task.objects.get(pk=tid).project\r\n if not username == \"staff_user\":\r\n currentUser = User.objects.get(username=username)\r\n opened_tasks = models.Task.objects.filter(project=project, assignee=currentUser, status=status)\r\n else:\r\n currentUser = username \r\n opened_tasks = models.Task.objects.filter(project=project, status=status)\r\n\r\n if opened_tasks.exists():\r\n max_score = opened_tasks.aggregate(maxscore=Max('score'))['maxscore']\r\n\r\n if currentUser == \"staff_user\":\r\n highest_priority_task = models.Task.objects.filter(project=project, status=status, score=max_score)\r\n else:\r\n highest_priority_task = models.Task.objects.filter(project=project, assignee=currentUser, status=status, score=max_score)\r\n\r\n return models.Job.objects.get(segment__task=highest_priority_task[0]).id\r\n else:\r\n return \"No task found\"\r\n\r\n############################# Internal implementation for server API\r\n\r\ndef _make_image_meta_cache(db_task, sorted_filenames=None):\r\n with open(db_task.get_image_meta_cache_path(), 'w') as meta_file:\r\n cache = {\r\n 'original_size': []\r\n }\r\n\r\n if db_task.mode == 'interpolation':\r\n frame_0_url = getFileUrl(get_frame_path(db_task.id, 0))\r\n image = Image.open(frame_0_url)\r\n cache['original_size'].append({\r\n 'width': image.size[0],\r\n 'height': image.size[1]\r\n })\r\n image.close()\r\n else:\r\n filenames = []\r\n\r\n if sorted_filenames is None:\r\n for root, _, files in os.walk(db_task.get_upload_dirname()):\r\n fullnames = map(lambda f: os.path.join(root, f), files)\r\n images = filter(lambda x: _get_mime(x) == 'image', fullnames)\r\n filenames.extend(images)\r\n filenames.sort()\r\n else:\r\n filenames = sorted_filenames\r\n\r\n for image_path in filenames:\r\n image = Image.open(image_path)\r\n cache['original_size'].append({\r\n 'width': image.size[0],\r\n 'height': image.size[1]\r\n })\r\n image.close()\r\n\r\n meta_file.write(str(cache))\r\n\r\ndef get_image_meta_cache(db_task):\r\n try:\r\n with open(db_task.get_image_meta_cache_path()) as meta_cache_file:\r\n return literal_eval(meta_cache_file.read())\r\n except Exception:\r\n _make_image_meta_cache(db_task)\r\n with open(db_task.get_image_meta_cache_path()) as meta_cache_file:\r\n return literal_eval(meta_cache_file.read())\r\n\r\n\r\ndef _get_mime(name):\r\n mime = mimetypes.guess_type(name)\r\n mime_type = mime[0]\r\n encoding = mime[1]\r\n # zip, rar, tar, tar.gz, tar.bz2, 7z, cpio\r\n supportedArchives = ['application/zip', 'application/x-rar-compressed',\r\n 'application/x-tar', 'application/x-7z-compressed', 'application/x-cpio',\r\n 'gzip', 'bzip2']\r\n if mime_type is not None:\r\n if mime_type.startswith('video'):\r\n return 'video'\r\n elif mime_type in supportedArchives or encoding in supportedArchives:\r\n return 'archive'\r\n elif mime_type.startswith('image'):\r\n return 'image'\r\n else:\r\n return 'empty'\r\n else:\r\n if os.path.isdir(name):\r\n return 'directory'\r\n else:\r\n return 'empty'\r\n\r\n\r\ndef _get_frame_path(frame, base_dir):\r\n d1 = str(frame // 10000)\r\n d2 = str(frame // 100)\r\n path = os.path.join(d1, d2, str(frame) + '.jpg')\r\n if base_dir:\r\n path = os.path.join(base_dir, path)\r\n\r\n return path\r\n\r\ndef _parse_frameproperties(frameproperties):\r\n parsed_frameprops = []\r\n for row in frameproperties:\r\n if (row['parent'] != '#' and row['parent'] != '$$$'):\r\n parsed_frameprops.append(row['original']['path'].split(\"/\"))\r\n\r\n return parsed_frameprops\r\n \r\ndef _get_frame_watershed_path(frame, base_dir):\r\n d1 = str(frame // 10000)\r\n d2 = str(frame // 100)\r\n path = os.path.join(d1, d2, str(frame) + '_w.png')\r\n if base_dir:\r\n path = os.path.join(base_dir, path)\r\n\r\n return path\r\n\r\ndef _parse_labels(labels):\r\n parsed_labels = OrderedDict()\r\n\r\n last_label = \"\"\r\n for token in shlex.split(labels):\r\n if token[0] != \"~\" and token[0] != \"@\":\r\n if token in parsed_labels:\r\n raise ValueError(\"labels string is not corect. \" +\r\n \"`{}` label is specified at least twice.\".format(token))\r\n\r\n parsed_labels[token] = {}\r\n last_label = token\r\n else:\r\n attr = models.parse_attribute(token)\r\n attr['text'] = token\r\n if not attr['type'] in ['checkbox', 'radio', 'number', 'text', 'select']:\r\n raise ValueError(\"labels string is not corect. \" +\r\n \"`{}` attribute has incorrect type {}.\".format(\r\n attr['name'], attr['type']))\r\n\r\n values = attr['values']\r\n if attr['type'] == 'checkbox': # <prefix>checkbox=name:true/false\r\n if not (len(values) == 1 and values[0] in ['true', 'false']):\r\n raise ValueError(\"labels string is not corect. \" +\r\n \"`{}` attribute has incorrect value.\".format(attr['name']))\r\n elif attr['type'] == 'number': # <prefix>number=name:min,max,step\r\n try:\r\n if len(values) != 3 or float(values[2]) <= 0 or \\\r\n float(values[0]) >= float(values[1]):\r\n raise ValueError\r\n except ValueError:\r\n raise ValueError(\"labels string is not correct. \" +\r\n \"`{}` attribute has incorrect format.\".format(attr['name']))\r\n\r\n if attr['name'] in parsed_labels[last_label]:\r\n raise ValueError(\"labels string is not corect. \" +\r\n \"`{}` attribute is specified at least twice.\".format(attr['name']))\r\n\r\n parsed_labels[last_label][attr['name']] = attr\r\n\r\n return parsed_labels\r\n\r\ndef _parse_db_labels(db_labels):\r\n result = []\r\n for db_label in db_labels:\r\n result += [db_label.name]\r\n result += [attr.text for attr in db_label.attributespec_set.all()]\r\n return _parse_labels(\" \".join(result))\r\n\r\n\r\n'''\r\n Count all files, remove garbage (unknown mime types or extra dirs)\r\n'''\r\ndef _prepare_paths(source_paths, target_paths, storage):\r\n counters = {\r\n \"image\": 0,\r\n \"directory\": 0,\r\n \"video\": 0,\r\n \"archive\": 0\r\n }\r\n\r\n share_dirs_mapping = {}\r\n share_files_mapping = {}\r\n\r\n if storage == 'local':\r\n # Files were uploaded early. Remove trash if it exists. Count them.\r\n for path in target_paths:\r\n mime = _get_mime(path)\r\n if mime in ['video', 'archive', 'image']:\r\n counters[mime] += 1\r\n else:\r\n try:\r\n os.remove(path)\r\n except:\r\n os.rmdir(path)\r\n else:\r\n # Files are available via mount share. Count them and separate dirs.\r\n for source_path, target_path in zip(source_paths, target_paths):\r\n mime = _get_mime(source_path)\r\n if mime in ['directory', 'image', 'video', 'archive']:\r\n counters[mime] += 1\r\n if mime == 'directory':\r\n share_dirs_mapping[source_path] = target_path\r\n else:\r\n share_files_mapping[source_path] = target_path\r\n\r\n # Remove directories if other files from them exists in input paths\r\n exclude = []\r\n for dir_name in share_dirs_mapping.keys():\r\n for patch in share_files_mapping.keys():\r\n if dir_name in patch:\r\n exclude.append(dir_name)\r\n break\r\n\r\n for excluded_dir in exclude:\r\n del share_dirs_mapping[excluded_dir]\r\n\r\n counters['directory'] = len(share_dirs_mapping.keys())\r\n\r\n return (counters, share_dirs_mapping, share_files_mapping)\r\n\r\n\r\n'''\r\n Check file set on valid\r\n Valid if:\r\n 1 video, 0 images and 0 dirs (interpolation mode)\r\n 1 archive, 0 images and 0 dirs (annotation mode)\r\n Many images or many dirs with images (annotation mode), 0 archives and 0 videos\r\n'''\r\ndef _valid_file_set(counters):\r\n if (counters['image'] or counters['directory']) and (counters['video'] or counters['archive']):\r\n return False\r\n elif counters['video'] > 1 or (counters['video'] and (counters['archive'] or counters['image'] or counters['directory'])):\r\n return False\r\n elif counters['archive'] > 1 or (counters['archive'] and (counters['video'] or counters['image'] or counters['directory'])):\r\n return False\r\n\r\n return True\r\n\r\n\r\n'''\r\n Copy data from share to local\r\n'''\r\ndef _copy_data_from_share(share_files_mapping, share_dirs_mapping):\r\n for source_path in share_dirs_mapping:\r\n copy_tree(source_path, share_dirs_mapping[source_path])\r\n for source_path in share_files_mapping:\r\n target_path = share_files_mapping[source_path]\r\n target_dir = os.path.dirname(target_path)\r\n if not os.path.exists(target_dir):\r\n os.makedirs(target_dir)\r\n shutil.copyfile(source_path, target_path)\r\n\r\n\r\n'''\r\n Find and unpack archive in upload dir\r\n'''\r\ndef _find_and_unpack_archive(upload_dir):\r\n archive = None\r\n for root, _, files in os.walk(upload_dir):\r\n fullnames = map(lambda f: os.path.join(root, f), files)\r\n archives = list(filter(lambda x: _get_mime(x) == 'archive', fullnames))\r\n if len(archives):\r\n archive = archives[0]\r\n break\r\n if archive:\r\n Archive(archive).extractall(upload_dir)\r\n os.remove(archive)\r\n else:\r\n raise Exception('Type defined as archive, but archives were not found.')\r\n\r\n return archive\r\n\r\n\r\n'''\r\n Search a video in upload dir and split it by frames. Copy frames to target dirs\r\n'''\r\ndef _find_and_extract_video(upload_dir, output_dir, db_task, job):\r\n video = None\r\n for root, _, files in os.walk(upload_dir):\r\n fullnames = map(lambda f: os.path.join(root, f), files)\r\n videos = list(filter(lambda x: _get_mime(x) == 'video', fullnames))\r\n if len(videos):\r\n video = videos[0]\r\n break\r\n\r\n if video:\r\n job.meta['status'] = 'Video is being extracted..'\r\n job.save_meta()\r\n _dir, vid_name = os.path.split(video)\r\n uploadFile(video, os.path.join(output_dir, 'video', vid_name))\r\n frame_count = extract_frames(video, output_dir)\r\n db_task.size += frame_count\r\n\r\n else:\r\n raise Exception(\"Video files were not found\")\r\n\r\n return video\r\n\r\ndef count_frames(path):\r\n video = cv2.VideoCapture(path)\r\n total = 0\r\n\r\n # Try to count the frames using opencv property.\r\n # If opencv can't count the frames, count them manually.\r\n try:\r\n # VieoCapture.get returns float value, so we need to convert it to int.\r\n total = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\r\n except:\r\n total = count_frames_manual(video)\r\n \r\n video.release()\r\n\r\n return total\r\n\r\ndef count_frames_manual(video):\r\n total = 0\r\n\r\n # frameExists is a boolean returned from read that indicates wether or not \r\n # a frame was read.\r\n (frameExists, _) = video.read()\r\n \r\n # Continue to iterate over the video frames until the end of the video.\r\n while frameExists: \r\n total += 1\r\n \r\n # video.read() is a function that advances the pointer of the video and\r\n # returns wether or not the frame exists and the frame itself.\r\n (frameExists, _) = video.read()\r\n \r\n return total\r\n\r\ndef get_meta_data(source_path):\r\n meta_data = skvideo.io.ffprobe(source_path)['video']\r\n\r\n if '@nb_frames' not in meta_data:\r\n meta_data['@nb_frames'] = count_frames(source_path)\r\n \r\n return meta_data\r\n\r\ndef extract_frames(source_path, output_dir):\r\n count = 0\r\n threads = []\r\n output = tempfile.mkdtemp(prefix='cvat-', suffix='.data')\r\n target_path = os.path.join(output, '%d.jpg')\r\n LocalImagesPath = target_path\r\n\r\n # create a folder for this video and for entire dataser (if doesnt exist)\r\n _dir, vid_name = os.path.split(source_path)\r\n name = os.path.splitext(vid_name)[0]\r\n save_dir = os.path.abspath(os.path.join(LocalImagesPath, name))\r\n os.makedirs(save_dir)\r\n\r\n # Parse the video\r\n for frame_count, frame in protected_reader(source_path):\r\n if frame is not False:\r\n img_path = os.path.join(save_dir, str(frame_count) + 'jpg')\r\n \r\n #Remove combing lines effect from image\r\n deint_image = deinterlace(frame)\r\n cv2.imwrite(img_path, deint_image[:, :,::-1]) # save image (cv2 uses BGR color channels so reverse)\r\n image_dest_path = _get_frame_path(frame_count, output_dir)\r\n count += 1\r\n dirname = os.path.dirname(image_dest_path)\r\n if not os.path.exists(dirname):\r\n os.makedirs(dirname)\r\n t = copyFileToOSByThread(img_path, image_dest_path)\r\n t.start()\r\n threads.append(t) \r\n\r\n else:\r\n break\r\n\r\n threads = [t for t in threads if t.isAlive()]\r\n for t in threads:\r\n t.join()\r\n \r\n return count\r\n \r\ndef protected_reader(src_path, max_frames=None):\r\n \"\"\"A wrapper reader for skvideo.io.FFmpegReader to avoid crashing on a RuntimeError exception.\r\n\r\n :param src_path: Path to the video file to be read.\r\n :param max_frames: (default=None) Number of frames to read. If left as None will attempt to read the entire video.\r\n :return: A tuple of frame_count, frame\r\n \"\"\"\r\n frame, reader, count = False, None, 0\r\n metadata = get_meta_data(src_path)\r\n if max_frames is None:\r\n max_frames = metadata['@nb_frames']\r\n\r\n video_codec = metadata['@codec_name']\r\n reader = skvideo.io.FFmpegReader(filename=src_path, inputdict={'-vcodec': video_codec})\r\n gen = reader.nextFrame()\r\n\r\n while count < int(max_frames):\r\n try:\r\n frame = gen.__next__()\r\n except Exception:\r\n frame = False\r\n reader.close()\r\n finally:\r\n yield count, frame\r\n count += 1\r\n try:\r\n reader.close()\r\n except Exception:\r\n pass\r\n\r\n\r\ndef deinterlace(image):\r\n interpolation = cv2.INTER_LINEAR # cv2.INTER_NEAREST - fast, looks ok for tagging | cv2.INTER_LINEAR - slower, looks good\r\n\r\n # if sample of image and tags\r\n h, w, c = image.shape\r\n\r\n # cut image in half\r\n temp = image[::2, :, :] if h % 2 == 0 else image[:h-1:2, :, :]\r\n\r\n return cv2.resize(temp, (w, h), interpolation=interpolation)\r\n\r\n'''\r\n Recursive search for all images in upload dir and compress it to RGB jpg with specified quality. Create symlinks for them.\r\n'''\r\ndef _find_and_compress_images(upload_dir, output_dir, db_task, compress_quality, flip_flag, job):\r\n filenames = []\r\n for root, _, files in os.walk(upload_dir):\r\n fullnames = map(lambda f: os.path.join(root, f), files)\r\n images = filter(lambda x: _get_mime(x) == 'image', fullnames)\r\n filenames.extend(images)\r\n filenames.sort()\r\n\r\n _make_image_meta_cache(db_task, filenames)\r\n\r\n if len(filenames):\r\n for idx, name in enumerate(filenames):\r\n job.meta['status'] = 'Images are being compressed.. {}%'.format(idx * 100 // len(filenames))\r\n job.save_meta()\r\n compressed_name = os.path.splitext(name)[0] + '.jpg'\r\n image = Image.open(name).convert('RGB')\r\n if flip_flag:\r\n image = image.transpose(Image.ROTATE_180)\r\n image.save(compressed_name, quality=compress_quality, optimize=True)\r\n image.close()\r\n if compressed_name != name:\r\n os.remove(name)\r\n # PIL::save uses filename in order to define image extension.\r\n # We need save it as jpeg for compression and after rename the file\r\n # Else annotation file will contain invalid file names (with other extensions)\r\n os.rename(compressed_name, name)\r\n\r\n threads = []\r\n for frame, image_orig_path in enumerate(filenames):\r\n image_dest_path = _get_frame_path(frame, output_dir)\r\n image_orig_path = os.path.abspath(image_orig_path)\r\n db_task.size += 1\r\n dirname = os.path.dirname(image_dest_path)\r\n if not os.path.exists(dirname):\r\n os.makedirs(dirname)\r\n os.rename(image_orig_path, image_dest_path)\r\n t = copyFileToOSByThread(image_orig_path, image_dest_path)\r\n t.start()\r\n threads.append(t) \r\n \r\n threads = [t for t in threads if t.isAlive()]\r\n for t in threads:\r\n t.join()\r\n else:\r\n raise Exception(\"Image files were not found\")\r\n\r\n return filenames\r\n\r\ndef _save_task_to_db(db_task, task_params):\r\n\r\n db_task.overlap = min(db_task.size, task_params['overlap'])\r\n db_task.mode = task_params['mode']\r\n db_task.z_order = task_params['z_order']\r\n db_task.flipped = task_params['flip']\r\n db_task.score = task_params['score'] and task_params['score'] or 0 # Set to task_params['score'] unless its undefined, then 0.\r\n db_task.video_id = task_params['video_id']\r\n db_task.source = task_params['data']\r\n\r\n segment_step = task_params['segment'] - db_task.overlap\r\n for x in range(0, db_task.size, segment_step):\r\n start_frame = x\r\n stop_frame = min(x + task_params['segment'] - 1, db_task.size - 1)\r\n slogger.glob.info(\"New segment for task #{}: start_frame = {}, \\\r\n stop_frame = {}\".format(db_task.id, start_frame, stop_frame))\r\n\r\n db_segment = models.Segment()\r\n db_segment.task = db_task\r\n db_segment.start_frame = start_frame\r\n db_segment.stop_frame = stop_frame\r\n db_segment.save()\r\n\r\n db_job = models.Job()\r\n db_job.segment = db_segment\r\n db_job.save()\r\n\r\n parsed_frameprops = _parse_frameproperties(task_params['frame_properties'])\r\n for frameprop in parsed_frameprops:\r\n db_taskframespec = models.TaskFrameSpec()\r\n db_taskframespec.task = db_task\r\n\r\n db_framepropvals = models.FrameProperties.objects.get(prop=frameprop[0], value=frameprop[1], project__pk=db_task.project.pk)\r\n db_taskframespec.propVal = db_framepropvals\r\n db_taskframespec.save()\r\n\r\n parsed_labels = _parse_labels(task_params['labels'])\r\n for label in parsed_labels:\r\n db_label = models.Label()\r\n db_label.task = db_task\r\n db_label.name = label\r\n db_label.save()\r\n\r\n for attr in parsed_labels[label]:\r\n db_attrspec = models.AttributeSpec()\r\n db_attrspec.label = db_label\r\n db_attrspec.text = parsed_labels[label][attr]['text']\r\n db_attrspec.save()\r\n\r\n db_task.save()\r\n\r\ndef _save_paths_to_db(task, files):\r\n count = 0\r\n for currFile in files:\r\n db_task_source = models.TaskSource()\r\n db_task_source.task = task\r\n db_task_source.source_name = currFile\r\n db_task_source.frame = count\r\n count+=1\r\n db_task_source.save()\r\n\r\ndef parseTxtToXml(fileData, taskId):\r\n try:\r\n # Getting image size\r\n frame_0_url = getFileUrl(get_frame_path(taskId, 0))\r\n width, height = Image.open(frame_0_url).size\r\n except Exception:\r\n raise ex\r\n\r\n return (formatter.parse_format(fileData, frame_0_url, width, height))\r\n\r\n\r\[email protected]\r\ndef _create_thread(tid, params):\r\n def raise_exception(images, dirs, videos, archives):\r\n raise Exception('Only one archive, one video or many images can be dowloaded simultaneously. \\\r\n {} image(s), {} dir(s), {} video(s), {} archive(s) found'.format(images, dirs, videos, archives))\r\n\r\n slogger.glob.info(\"create task #{}\".format(tid))\r\n job = rq.get_current_job()\r\n\r\n db_task = models.Task.objects.select_for_update().get(pk=tid)\r\n upload_dir = db_task.get_upload_dirname()\r\n output_dir = db_task.get_data_dirname()\r\n\r\n counters, share_dirs_mapping, share_files_mapping = _prepare_paths(\r\n params['SOURCE_PATHS'],\r\n params['TARGET_PATHS'],\r\n params['storage']\r\n )\r\n\r\n if (not _valid_file_set(counters)):\r\n raise Exception('Only one archive, one video or many images can be dowloaded simultaneously. \\\r\n {} image(s), {} dir(s), {} video(s), {} archive(s) found'.format(\r\n counters['image'],\r\n counters['directory'],\r\n counters['video'],\r\n counters['archive']\r\n )\r\n )\r\n\r\n archive = None\r\n if counters['archive']:\r\n job.meta['status'] = 'Archive is being unpacked..'\r\n job.save_meta()\r\n archive = _find_and_unpack_archive(upload_dir)\r\n\r\n # Define task mode and other parameters\r\n task_video_id = -1\r\n print(params)\r\n task_score = params['score']\r\n if 'video_id' in params:\r\n task_video_id = params['video_id']\r\n\r\n task_params = {\r\n 'mode': 'annotation' if counters['image'] or counters['directory'] or counters['archive'] else 'interpolation',\r\n 'flip': params['flip_flag'].lower() == 'true',\r\n 'score': task_score,\r\n 'video_id': task_video_id,\r\n 'z_order': params['z_order'].lower() == 'true',\r\n 'compress': int(params.get('compress_quality', 50)),\r\n 'segment': int(sys.maxsize),\r\n 'labels': params['labels'],\r\n 'frame_properties': json.loads(params['frame_properties'])\r\n }\r\n\r\n task_params['overlap'] = int(params.get('overlap_size', 5 if task_params['mode'] == 'interpolation' else 0))\r\n slogger.glob.info(\"Task #{} parameters: {}\".format(tid, task_params))\r\n\r\n files = []\r\n\r\n if task_params['mode'] == 'interpolation':\r\n video = _find_and_extract_video(upload_dir, output_dir, db_task, job)\r\n task_params['data'] = os.path.relpath(video, upload_dir)\r\n else:\r\n files =_find_and_compress_images(upload_dir, output_dir, db_task,\r\n task_params['compress'], task_params['flip'], job)\r\n if archive:\r\n task_params['data'] = os.path.relpath(archive, upload_dir)\r\n else:\r\n task_params['data'] = '{} images: {}, ...'.format(len(files),\r\n \", \".join([os.path.relpath(x, upload_dir) for x in files[0:2]]))\r\n\r\n slogger.glob.info(\"Founded frames {} for task #{}\".format(db_task.size, tid))\r\n\r\n task_params['segment'] = db_task.size + 10\r\n job.meta['status'] = 'Task is being saved in database'\r\n job.save_meta()\r\n\r\n try:\r\n _save_task_to_db(db_task, task_params)\r\n \r\n if task_params['mode'] == 'annotation':\r\n # add sources paths to db\r\n _save_paths_to_db(db_task, params['SOURCE_PATHS'])\r\n\r\n # Parsing taggs file\r\n if params['storage'] == 'share':\r\n txt = parseTxtToXml(upload_dir, db_task.id)\r\n destDir = r'/home/django/data/' + str(db_task.id) + r'/data/xml/'\r\n os.makedirs(destDir)\r\n with open(destDir + r'annotations.txt', 'w') as annotationFile:\r\n annotationFile.write(txt)\r\n except Exception:\r\n pass\r\n finally:\r\n # Deleting upload dir \r\n shutil.rmtree(upload_dir) \r\n" ]
[ [ "numpy.nditer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gagan3012/metrics
[ "5a2388ccaa97cc3608b1fa28879f77436434a6d6", "5a2388ccaa97cc3608b1fa28879f77436434a6d6" ]
[ "tests/wrappers/test_bootstrapping.py", "integrations/test_lightning.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport operator\n\nimport numpy as np\nimport pytest\nimport torch\nfrom sklearn.metrics import precision_score, recall_score\nfrom torch import Tensor\n\nfrom torchmetrics.classification import Precision, Recall\nfrom torchmetrics.utilities import apply_to_collection\nfrom torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_7\nfrom torchmetrics.wrappers.bootstrapping import BootStrapper, _bootstrap_sampler\n\n_preds = torch.randint(10, (10, 32))\n_target = torch.randint(10, (10, 32))\n\n\nclass TestBootStrapper(BootStrapper):\n \"\"\"For testing purpose, we subclass the bootstrapper class so we can get the exact permutation the class is\n creating.\"\"\"\n\n def update(self, *args) -> None:\n self.out = []\n for idx in range(self.num_bootstraps):\n size = len(args[0])\n sample_idx = _bootstrap_sampler(size, sampling_strategy=self.sampling_strategy)\n new_args = apply_to_collection(args, Tensor, torch.index_select, dim=0, index=sample_idx)\n self.metrics[idx].update(*new_args)\n self.out.append(new_args)\n\n\ndef _sample_checker(old_samples, new_samples, op: operator, threshold: int):\n found_one = False\n for os in old_samples:\n cond = op(os, new_samples)\n if cond.sum() > threshold:\n found_one = True\n break\n return found_one\n\n\[email protected](\"sampling_strategy\", [\"poisson\", \"multinomial\"])\ndef test_bootstrap_sampler(sampling_strategy):\n \"\"\"make sure that the bootstrap sampler works as intended.\"\"\"\n old_samples = torch.randn(10, 2)\n\n # make sure that the new samples are only made up of old samples\n idx = _bootstrap_sampler(10, sampling_strategy=sampling_strategy)\n new_samples = old_samples[idx]\n for ns in new_samples:\n assert ns in old_samples\n\n found_one = _sample_checker(old_samples, new_samples, operator.eq, 2)\n assert found_one, \"resampling did not work because no samples were sampled twice\"\n\n found_zero = _sample_checker(old_samples, new_samples, operator.ne, 0)\n assert found_zero, \"resampling did not work because all samples were atleast sampled once\"\n\n\[email protected](\"sampling_strategy\", [\"poisson\", \"multinomial\"])\[email protected](\n \"metric, sk_metric\", [[Precision(average=\"micro\"), precision_score], [Recall(average=\"micro\"), recall_score]]\n)\ndef test_bootstrap(sampling_strategy, metric, sk_metric):\n \"\"\"Test that the different bootstraps gets updated as we expected and that the compute method works.\"\"\"\n _kwargs = {\"base_metric\": metric, \"mean\": True, \"std\": True, \"raw\": True, \"sampling_strategy\": sampling_strategy}\n if _TORCH_GREATER_EQUAL_1_7:\n _kwargs.update(dict(quantile=torch.tensor([0.05, 0.95])))\n\n bootstrapper = TestBootStrapper(**_kwargs)\n\n collected_preds = [[] for _ in range(10)]\n collected_target = [[] for _ in range(10)]\n for p, t in zip(_preds, _target):\n bootstrapper.update(p, t)\n\n for i, o in enumerate(bootstrapper.out):\n\n collected_preds[i].append(o[0])\n collected_target[i].append(o[1])\n\n collected_preds = [torch.cat(cp) for cp in collected_preds]\n collected_target = [torch.cat(ct) for ct in collected_target]\n\n sk_scores = [sk_metric(ct, cp, average=\"micro\") for ct, cp in zip(collected_target, collected_preds)]\n\n output = bootstrapper.compute()\n # quantile only avaible for pytorch v1.7 and forward\n if _TORCH_GREATER_EQUAL_1_7:\n assert np.allclose(output[\"quantile\"][0], np.quantile(sk_scores, 0.05))\n assert np.allclose(output[\"quantile\"][1], np.quantile(sk_scores, 0.95))\n\n assert np.allclose(output[\"mean\"], np.mean(sk_scores))\n assert np.allclose(output[\"std\"], np.std(sk_scores, ddof=1))\n assert np.allclose(output[\"raw\"], sk_scores)\n", "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom unittest import mock\n\nimport pytest\nimport torch\nfrom pytorch_lightning import LightningModule, Trainer\nfrom torch import tensor\nfrom torch.utils.data import DataLoader\n\nfrom integrations.lightning.boring_model import BoringModel, RandomDataset\nfrom tests.helpers import _LIGHTNING_GREATER_EQUAL_1_3\nfrom torchmetrics import Accuracy, AveragePrecision, Metric\n\n\nclass SumMetric(Metric):\n def __init__(self):\n super().__init__()\n self.add_state(\"x\", tensor(0.0), dist_reduce_fx=\"sum\")\n\n def update(self, x):\n self.x += x\n\n def compute(self):\n return self.x\n\n\nclass DiffMetric(Metric):\n def __init__(self):\n super().__init__()\n self.add_state(\"x\", tensor(0.0), dist_reduce_fx=\"sum\")\n\n def update(self, x):\n self.x -= x\n\n def compute(self):\n return self.x\n\n\ndef test_metric_lightning(tmpdir):\n class TestModel(BoringModel):\n def __init__(self):\n super().__init__()\n self.metric = SumMetric()\n self.sum = 0.0\n\n def training_step(self, batch, batch_idx):\n x = batch\n self.metric(x.sum())\n self.sum += x.sum()\n\n return self.step(x)\n\n def training_epoch_end(self, outs):\n if not torch.allclose(self.sum, self.metric.compute()):\n raise ValueError(\"Sum and computed value must be equal\")\n self.sum = 0.0\n self.metric.reset()\n\n model = TestModel()\n model.val_dataloader = None\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n limit_train_batches=2,\n limit_val_batches=2,\n max_epochs=2,\n log_every_n_steps=1,\n weights_summary=None,\n )\n trainer.fit(model)\n\n\[email protected](not _LIGHTNING_GREATER_EQUAL_1_3, reason=\"test requires lightning v1.3 or higher\")\ndef test_metrics_reset(tmpdir):\n \"\"\"Tests that metrics are reset correctly after the end of the train/val/test epoch.\n\n Taken from:\n https://github.com/PyTorchLightning/pytorch-lightning/pull/7055\n \"\"\"\n\n class TestModel(LightningModule):\n def __init__(self):\n super().__init__()\n self.layer = torch.nn.Linear(32, 1)\n\n for stage in [\"train\", \"val\", \"test\"]:\n acc = Accuracy()\n acc.reset = mock.Mock(side_effect=acc.reset)\n ap = AveragePrecision(num_classes=1, pos_label=1)\n ap.reset = mock.Mock(side_effect=ap.reset)\n self.add_module(f\"acc_{stage}\", acc)\n self.add_module(f\"ap_{stage}\", ap)\n\n def forward(self, x):\n return self.layer(x)\n\n def _step(self, stage, batch):\n labels = (batch.detach().sum(1) > 0).float() # Fake some targets\n logits = self.forward(batch)\n loss = torch.nn.functional.binary_cross_entropy_with_logits(logits, labels.unsqueeze(1))\n probs = torch.sigmoid(logits.detach())\n self.log(f\"loss/{stage}\", loss)\n\n acc = self._modules[f\"acc_{stage}\"]\n ap = self._modules[f\"ap_{stage}\"]\n\n labels_int = labels.to(torch.long)\n acc(probs.flatten(), labels_int)\n ap(probs.flatten(), labels_int)\n\n # Metric.forward calls reset so reset the mocks here\n acc.reset.reset_mock()\n ap.reset.reset_mock()\n\n self.log(f\"{stage}/accuracy\", acc)\n self.log(f\"{stage}/ap\", ap)\n\n return loss\n\n def training_step(self, batch, batch_idx, *args, **kwargs):\n return self._step(\"train\", batch)\n\n def validation_step(self, batch, batch_idx, *args, **kwargs):\n return self._step(\"val\", batch)\n\n def test_step(self, batch, batch_idx, *args, **kwargs):\n return self._step(\"test\", batch)\n\n def configure_optimizers(self):\n optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)\n return [optimizer], [lr_scheduler]\n\n @staticmethod\n def train_dataloader():\n return DataLoader(RandomDataset(32, 64), batch_size=2)\n\n @staticmethod\n def val_dataloader():\n return DataLoader(RandomDataset(32, 64), batch_size=2)\n\n @staticmethod\n def test_dataloader():\n return DataLoader(RandomDataset(32, 64), batch_size=2)\n\n def _assert_epoch_end(self, stage):\n acc = self._modules[f\"acc_{stage}\"]\n ap = self._modules[f\"ap_{stage}\"]\n\n acc.reset.asset_not_called()\n ap.reset.assert_not_called()\n\n def train_epoch_end(self, outputs):\n self._assert_epoch_end(\"train\")\n\n def validation_epoch_end(self, outputs):\n self._assert_epoch_end(\"val\")\n\n def test_epoch_end(self, outputs):\n self._assert_epoch_end(\"test\")\n\n def _assert_called(model, stage):\n acc = model._modules[f\"acc_{stage}\"]\n ap = model._modules[f\"ap_{stage}\"]\n\n acc.reset.assert_called_once()\n acc.reset.reset_mock()\n\n ap.reset.assert_called_once()\n ap.reset.reset_mock()\n\n model = TestModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n limit_train_batches=2,\n limit_val_batches=2,\n limit_test_batches=2,\n max_epochs=1,\n progress_bar_refresh_rate=0,\n )\n\n trainer.fit(model)\n _assert_called(model, \"train\")\n _assert_called(model, \"val\")\n\n trainer.validate(model)\n _assert_called(model, \"val\")\n\n trainer.test(model)\n _assert_called(model, \"test\")\n\n\n# todo: reconsider if it make sense to keep here\n# def test_metric_lightning_log(tmpdir):\n# \"\"\" Test logging a metric object and that the metric state gets reset after each epoch.\"\"\"\n# class TestModel(BoringModel):\n# def __init__(self):\n# super().__init__()\n# self.metric_step = SumMetric()\n# self.metric_epoch = SumMetric()\n# self.sum = 0.0\n#\n# def on_epoch_start(self):\n# self.sum = 0.0\n#\n# def training_step(self, batch, batch_idx):\n# x = batch\n# self.metric_step(x.sum())\n# self.sum += x.sum()\n# self.log(\"sum_step\", self.metric_step, on_epoch=True, on_step=False)\n# return {'loss': self.step(x), 'data': x}\n#\n# def training_epoch_end(self, outs):\n# self.log(\"sum_epoch\", self.metric_epoch(torch.stack([o['data'] for o in outs]).sum()))\n#\n# model = TestModel()\n# model.val_dataloader = None\n#\n# trainer = Trainer(\n# default_root_dir=tmpdir,\n# limit_train_batches=2,\n# limit_val_batches=2,\n# max_epochs=2,\n# log_every_n_steps=1,\n# weights_summary=None,\n# )\n# trainer.fit(model)\n#\n# logged = trainer.logged_metrics\n# assert torch.allclose(tensor(logged[\"sum_step\"]), model.sum)\n# assert torch.allclose(tensor(logged[\"sum_epoch\"]), model.sum)\n\n# todo: need to be fixed\n# def test_scriptable(tmpdir):\n# class TestModel(BoringModel):\n# def __init__(self):\n# super().__init__()\n# # the metric is not used in the module's `forward`\n# # so the module should be exportable to TorchScript\n# self.metric = SumMetric()\n# self.sum = 0.0\n#\n# def training_step(self, batch, batch_idx):\n# x = batch\n# self.metric(x.sum())\n# self.sum += x.sum()\n# self.log(\"sum\", self.metric, on_epoch=True, on_step=False)\n# return self.step(x)\n#\n# model = TestModel()\n# trainer = Trainer(\n# default_root_dir=tmpdir,\n# limit_train_batches=2,\n# limit_val_batches=2,\n# max_epochs=1,\n# log_every_n_steps=1,\n# weights_summary=None,\n# logger=False,\n# checkpoint_callback=False,\n# )\n# trainer.fit(model)\n# rand_input = torch.randn(10, 32)\n#\n# script_model = model.to_torchscript()\n#\n# # test that we can still do inference\n# output = model(rand_input)\n# script_output = script_model(rand_input)\n# assert torch.allclose(output, script_output)\n\n# def test_metric_collection_lightning_log(tmpdir):\n#\n# class TestModel(BoringModel):\n#\n# def __init__(self):\n# super().__init__()\n# self.metric = MetricCollection([SumMetric(), DiffMetric()])\n# self.sum = 0.0\n# self.diff = 0.0\n#\n# def training_step(self, batch, batch_idx):\n# x = batch\n# metric_vals = self.metric(x.sum())\n# self.sum += x.sum()\n# self.diff -= x.sum()\n# self.log_dict({f'{k}_step': v for k, v in metric_vals.items()})\n# return self.step(x)\n#\n# def training_epoch_end(self, outputs):\n# metric_vals = self.metric.compute()\n# self.log_dict({f'{k}_epoch': v for k, v in metric_vals.items()})\n#\n# model = TestModel()\n# model.val_dataloader = None\n#\n# trainer = Trainer(\n# default_root_dir=tmpdir,\n# limit_train_batches=2,\n# limit_val_batches=2,\n# max_epochs=1,\n# log_every_n_steps=1,\n# weights_summary=None,\n# )\n# trainer.fit(model)\n#\n# logged = trainer.logged_metrics\n# assert torch.allclose(tensor(logged[\"SumMetric_epoch\"]), model.sum)\n# assert torch.allclose(tensor(logged[\"DiffMetric_epoch\"]), model.diff)\n" ]
[ [ "torch.randint", "numpy.allclose", "torch.cat", "torch.randn", "numpy.quantile", "torch.tensor", "numpy.std", "numpy.mean" ], [ "torch.optim.lr_scheduler.StepLR", "torch.nn.Linear", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
idk3/Cirq
[ "1f82a72bda689895753e9a32c8b991fd5057854b" ]
[ "cirq/contrib/acquaintance/gates_test.py" ]
[ "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom itertools import product\nfrom string import ascii_lowercase as alphabet\nfrom typing import Sequence, Tuple\n\nfrom numpy.random import poisson\nimport pytest\n\nimport cirq\nfrom cirq.contrib.acquaintance.gates import (\n ACQUAINT, SwapNetworkGate, op_acquaintance_size)\nfrom cirq.contrib.acquaintance.shift import CircularShiftGate\nfrom cirq.contrib.acquaintance.permutation import (\n update_mapping, LinearPermutationGate)\n\n\ndef test_acquaintance_gate_repr():\n assert repr(ACQUAINT) == 'Acq'\n\ndef test_acquaintance_gate_text_diagram_info():\n qubits = [cirq.NamedQubit(s) for s in 'xyz']\n circuit = cirq.Circuit([cirq.Moment([ACQUAINT(*qubits)])])\n actual_text_diagram = circuit.to_text_diagram().strip()\n expected_text_diagram = \"\"\"\nx: ───█───\n │\ny: ───█───\n │\nz: ───█───\n \"\"\".strip()\n assert actual_text_diagram == expected_text_diagram\n\ndef test_acquaintance_gate_unknown_qubit_count():\n g = ACQUAINT\n args = cirq.TextDiagramInfoArgs.UNINFORMED_DEFAULT\n assert g.text_diagram_info(args) == NotImplemented\n\n\ndef test_swap_network_gate():\n qubits = tuple(cirq.NamedQubit(s) for s in alphabet)\n\n acquaintance_size = 3\n n_parts = 3\n part_lens = (acquaintance_size - 1,) * n_parts\n n_qubits = sum(part_lens)\n swap_network_op = SwapNetworkGate(part_lens,\n acquaintance_size=acquaintance_size)(*qubits[:n_qubits])\n swap_network = cirq.Circuit.from_ops(swap_network_op)\n actual_text_diagram = swap_network.to_text_diagram().strip()\n expected_text_diagram = \"\"\"\na: ───×(0,0)───\n │\nb: ───×(0,1)───\n │\nc: ───×(1,0)───\n │\nd: ───×(1,1)───\n │\ne: ───×(2,0)───\n │\nf: ───×(2,1)───\n \"\"\".strip()\n assert actual_text_diagram == expected_text_diagram\n\n no_decomp = lambda op: isinstance(op.gate,\n (CircularShiftGate, LinearPermutationGate))\n expander = cirq.ExpandComposite(no_decomp=no_decomp)\n expander(swap_network)\n actual_text_diagram = swap_network.to_text_diagram().strip()\n expected_text_diagram = \"\"\"\na: ───█───────╲0╱───█─────────────────█───────────╲0╱───█───────0↦1───\n │ │ │ │ │ │ │\nb: ───█───█───╲1╱───█───█─────────────█───█───────╲1╱───█───█───1↦0───\n │ │ │ │ │ │ │ │ │ │\nc: ───█───█───╱2╲───█───█───█───╲0╱───█───█───█───╱2╲───█───█───0↦1───\n │ │ │ │ │ │ │ │ │ │\nd: ───────█───╱3╲───█───█───█───╲1╱───█───█───█───╱3╲───────█───1↦0───\n │ │ │ │ │\ne: ─────────────────█───────█───╱2╲───█───────█───0↦1─────────────────\n │ │ │ │\nf: ─────────────────█───────────╱3╲───█───────────1↦0─────────────────\n \"\"\".strip()\n assert actual_text_diagram == expected_text_diagram\n\n no_decomp = lambda op: isinstance(op.gate, CircularShiftGate)\n expander = cirq.ExpandComposite(no_decomp=no_decomp)\n\n acquaintance_size = 3\n n_parts = 6\n part_lens = (1,) * n_parts\n n_qubits = sum(part_lens)\n swap_network_op = SwapNetworkGate(part_lens,\n acquaintance_size=acquaintance_size)(*qubits[:n_qubits])\n swap_network = cirq.Circuit.from_ops(swap_network_op)\n\n expander(swap_network)\n actual_text_diagram = swap_network.to_text_diagram().strip()\n expected_text_diagram = \"\"\"\na: ───╲0╱─────────╲0╱─────────╲0╱─────────\n │ │ │\nb: ───╱1╲───╲0╱───╱1╲───╲0╱───╱1╲───╲0╱───\n │ │ │\nc: ───╲0╱───╱1╲───╲0╱───╱1╲───╲0╱───╱1╲───\n │ │ │\nd: ───╱1╲───╲0╱───╱1╲───╲0╱───╱1╲───╲0╱───\n │ │ │\ne: ───╲0╱───╱1╲───╲0╱───╱1╲───╲0╱───╱1╲───\n │ │ │\nf: ───╱1╲─────────╱1╲─────────╱1╲─────────\n \"\"\".strip()\n assert actual_text_diagram == expected_text_diagram\n\[email protected]('part_lens, acquaintance_size',\n list(((part_len,) * n_parts, acquaintance_size) for\n part_len, acquaintance_size, n_parts in\n product(range(1, 5), range(5), range(2, 5)))\n )\ndef test_swap_network_gate_permutation(part_lens, acquaintance_size):\n n_qubits = sum(part_lens)\n qubits = cirq.LineQubit.range(n_qubits)\n swap_network_gate = SwapNetworkGate(part_lens, acquaintance_size)\n operations = swap_network_gate.default_decompose(qubits)\n operations = list(cirq.flatten_op_tree(operations))\n mapping = {q: i for i, q in enumerate(qubits)}\n update_mapping(mapping, operations)\n assert mapping == {q: i for i, q in enumerate(reversed(qubits))}\n\ndef test_swap_network_gate_from_ops():\n n_qubits = 10\n qubits = cirq.LineQubit.range(n_qubits)\n part_lens = (1, 2, 1, 3, 3)\n operations = [cirq.Z(qubits[0]),\n cirq.CZ(*qubits[1:3]),\n cirq.CCZ(*qubits[4:7]),\n cirq.CCZ(*qubits[7:])]\n acquaintance_size = 3\n swap_network = SwapNetworkGate.from_operations(\n qubits, operations, acquaintance_size)\n assert swap_network.acquaintance_size == acquaintance_size\n assert swap_network.part_lens == part_lens\n\n\ndef test_swap_network_decomposition():\n qubits = cirq.LineQubit.range(8)\n swap_network_gate = SwapNetworkGate((4, 4), 5)\n operations = swap_network_gate.default_decompose(qubits)\n circuit = cirq.Circuit.from_ops(operations)\n actual_text_diagram = circuit.to_text_diagram()\n expected_text_diagram = \"\"\"\n0: ───█─────────────█─────────────╲0╱─────────────█─────────█───────0↦2───\n │ │ │ │ │ │\n1: ───█─────────────█─────────────╲1╱─────────────█─────────█───────1↦3───\n │ │ │ │ │ │\n2: ───█─────────────█───1↦0───────╲2╱───────1↦0───█─────────█───────2↦0───\n │ │ │ │ │ │ │ │\n3: ───█───█─────────█───0↦1───█───╲3╱───█───0↦1───█─────────█───█───3↦1───\n │ │ │ │ │ │ │ │ │\n4: ───█───█───0↦1───█─────────█───╱4╲───█─────────█───0↦1───█───█───0↦2───\n │ │ │ │ │ │ │ │\n5: ───────█───1↦0─────────────█───╱5╲───█─────────────1↦0───────█───1↦3───\n │ │ │ │ │ │\n6: ───────█───────────────────█───╱6╲───█───────────────────────█───2↦0───\n │ │ │ │ │ │\n7: ───────█───────────────────█───╱7╲───█───────────────────────█───3↦1───\n \"\"\".strip()\n assert actual_text_diagram == expected_text_diagram\n\ndef test_swap_network_init_error():\n with pytest.raises(ValueError):\n SwapNetworkGate(())\n with pytest.raises(ValueError):\n SwapNetworkGate((3,))\n\[email protected]('part_lens, acquaintance_size', [\n [[l + 1 for l in poisson(size=n_parts, lam=lam)], poisson(4)]\n for n_parts, lam in product(range(2, 20, 3), range(1, 4))\n ])\ndef test_swap_network_permutation(part_lens, acquaintance_size):\n n_qubits = sum(part_lens)\n gate = SwapNetworkGate(part_lens, acquaintance_size)\n\n expected_permutation = {i: j for i, j in\n zip(range(n_qubits), reversed(range(n_qubits)))}\n assert gate.permutation(n_qubits) == expected_permutation\n\ndef test_swap_network_permutation_error():\n gate = SwapNetworkGate((1, 1))\n with pytest.raises(ValueError):\n gate.permutation(1)\n\nclass OtherOperation(cirq.Operation):\n def __init__(self, qubits: Sequence[cirq.QubitId]) -> None:\n self._qubits = tuple(qubits)\n\n @property\n def qubits(self) -> Tuple[cirq.QubitId, ...]:\n return self._qubits\n\n def with_qubits(self, *new_qubits: cirq.QubitId) -> 'OtherOperation':\n return type(self)(self._qubits)\n\n def __eq__(self, other):\n return (isinstance(other, type(self)) and\n self.qubits == other.qubits)\n\ndef test_op_acquaintance_size():\n qubits = cirq.LineQubit.range(5)\n op = OtherOperation(qubits)\n assert op.with_qubits(qubits) == op\n assert op_acquaintance_size(op) == 0\n\n for s, _ in enumerate(qubits):\n op = ACQUAINT(*qubits[:s + 1])\n assert op_acquaintance_size(op) == s + 1\n\n part_lens = (2, 2, 2, 2)\n acquaintance_size = 3\n gate = SwapNetworkGate(part_lens, acquaintance_size)\n op = gate(*qubits[:sum(part_lens)])\n assert op_acquaintance_size(op) == 3\n\n part_lens = (2, 2, 2, 2)\n acquaintance_size = 4\n gate = SwapNetworkGate(part_lens, acquaintance_size)\n op = gate(*qubits[:sum(part_lens)])\n assert op_acquaintance_size(op) == 0\n\n part_lens = (2, 2, 2, 2)\n acquaintance_size = 1\n gate = SwapNetworkGate(part_lens, acquaintance_size)\n op = gate(*qubits[:sum(part_lens)])\n assert op_acquaintance_size(op) == 0\n\n part_lens = (2, 2, 2, 2)\n acquaintance_size = 1\n gate = SwapNetworkGate(part_lens, acquaintance_size)\n op = gate(*qubits[:sum(part_lens)])\n assert op_acquaintance_size(op) == 0\n" ]
[ [ "numpy.random.poisson" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mwsssxu/tutorials
[ "e2742ea7171e3829ea628bac483e8787be60bbd3" ]
[ "sklearnTUT/sk10_cross_validation3.py" ]
[ "# View more python learning tutorial on my Youtube and Youku channel!!!\n\n# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg\n# Youku video tutorial: http://i.youku.com/pythontutorial\n\n\"\"\"\nPlease note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.\n\"\"\"\nfrom __future__ import print_function\nfrom sklearn.model_selection import validation_curve\nfrom sklearn.datasets import load_digits\nfrom sklearn.svm import SVC\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndigits = load_digits()\nX = digits.data\ny = digits.target\nparam_range = np.logspace(-6, -2.3, 5)\ntrain_loss, test_loss = validation_curve(\n SVC(), X, y, param_name='gamma', param_range=param_range, cv=10,\n scoring='neg_mean_squared_error')\ntrain_loss_mean = -np.mean(train_loss, axis=1)\ntest_loss_mean = -np.mean(test_loss, axis=1)\n\nplt.plot(param_range, train_loss_mean, 'o-', color=\"r\",\n label=\"Training\")\nplt.plot(param_range, test_loss_mean, 'o-', color=\"g\",\n label=\"Cross-validation\")\n\nplt.xlabel(\"gamma\")\nplt.ylabel(\"Loss\")\nplt.legend(loc=\"best\")\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.logspace", "matplotlib.pyplot.plot", "numpy.mean", "sklearn.datasets.load_digits", "sklearn.svm.SVC", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rykiprince/Mission-to-Mars
[ "ef800c7d5b1c5a4b4dcd51e371bf0be0e2513ed4" ]
[ "Mission_to_Mars_Challenge.py" ]
[ "# Import Splinter and BeautifulSoup\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup as soup\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nimport pandas as pd\n\n\nexecutable_path = {'executable_path': ChromeDriverManager().install()}\nbrowser = Browser('chrome',**executable_path, headless=False)\n\n\n# Visit the mars nasa news site\nurl = 'https://redplanetscience.com'\nbrowser.visit(url)\n# Optional delay for loading the page\nbrowser.is_element_present_by_css('div.list_text', wait_time=1)\n\n\n#Set up the HTML parser\nhtml = browser.html\nnews_soup = soup(html, 'html.parser')\nslide_elem = news_soup.select_one('div.list_text')\n\n# Begin scraping\nslide_elem.find('div', class_='content_title')\n\n\n\n\n# Use the parent element to find the first `a` tag and save it as `news_title`\nnews_title = slide_elem.find('div', class_='content_title').get_text()\nnews_title\n\n\n\n\nslide_elem.find('div', class_='article_teaser_body')\n\n\n\n# Use the parent element to find the paragraph text\nnews_p = slide_elem.find('div', class_='article_teaser_body').get_text()\nnews_p\n\n\n# ### Featured Images\n\n# Visit URL\nurl = 'https://spaceimages-mars.com'\nbrowser.visit(url)\n\n\n# Find and click the full image button\nfull_image_elem = browser.find_by_tag('button')[1]\nfull_image_elem.click()\n\n\n\n\n# Parse the resulting html with soup\nhtml = browser.html\nimg_soup = soup(html, 'html.parser')\n\n\n# Find the relative image url\nimg_url_rel = img_soup.find('img', class_='fancybox-image').get('src')\nimg_url_rel\n\n\n# Use the base URL to create an absolute URL\nimg_url = f'https://spaceimages-mars.com/{img_url_rel}'\nimg_url\n\n\ndf = pd.read_html('https://galaxyfacts-mars.com')[0]\ndf.columns=['description', 'Mars', 'Earth']\ndf.set_index('description', inplace=True)\ndf\n\n\ndf.to_html()\n\nbrowser.quit()\n\n\n# # D1: Scrape High-Resolution Mars’ Hemisphere Images and Titles\n\n# Hemispheres\n\n# Import Splinter and BeautifulSoup\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup as soup\nfrom webdriver_manager.chrome import ChromeDriverManager \nimport pandas as pd\n\nexecutable_path = {'executable_path': ChromeDriverManager().install()}\nbrowser = Browser('chrome',**executable_path, headless=False)\n\n# 1. Use browser to visit the URL \nurl = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\nbrowser.visit(url)\n\n# 2. Create a list to hold the images and titles.\nhemisphere_image_urls = []\n\n# 3. Write code to retrieve the image urls and titles for each hemisphere.\n# links for all hemisphere\n\nlinks = browser.find_by_css('a.product-item h3')\n\n# # Loop through each link\nfor index in range(len(links)):\n\n hemispheres = {}\n browser.find_by_css('a.product-item h3')[index].click()\n # navigate to the full-resolution image page\n# html = browser.html\n# hemi_soup = soup(html, 'html.parser')\n \n sample_rel = browser.links.find_by_text('Sample').first\n\n # retrieve the full-resolution image URL string and title for the hemisphere image\n hemispheres[\"img_url\"] = sample_rel[\"href\"]\n\n hemispheres[\"title\"] = browser.find_by_css(\"h2.title\").value\n \n hemisphere_image_urls.append(hemispheres)\n\n # Use `browser.back()` to navigate back to the beginning to get the mext hemisphere image.\n browser.back()\n\n# 4. Print the list that holds the dictionary of each image url and title.\nhemisphere_image_urls\n\n# 5. Quit the browser\nbrowser.quit()\n\n" ]
[ [ "pandas.read_html" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
S-Eggers/GraphMask
[ "9e431a541279801ec46a5b38ed57b2033f795240" ]
[ "code/problems/qa/qa_model.py" ]
[ "import scipy\nfrom torch.nn import Dropout, CrossEntropyLoss\n\nfrom code.abstract.abstract_torch_module import AbstractTorchModule\nimport torch\nimport numpy as np\n\nfrom code.gnns.qa_gnn import QaGNN\nfrom code.utils.evaluation.choice_model_output import ChoiceModelOutput\nfrom code.utils.torch_utils.xavier_linear import XavierLinear\n\n\nclass QAModel(AbstractTorchModule):\n n_edge_types = 4\n\n def __init__(self, configuration):\n AbstractTorchModule.__init__(self)\n\n self.layers = configuration[\"model_parameters\"][\"gnn_layers\"]\n self.configuration = configuration\n self.max_nodes = configuration[\"task\"][\"max_nodes\"]\n self.max_query_size = configuration[\"task\"][\"max_query_size\"]\n self.max_candidates = configuration[\"task\"][\"max_candidates\"]\n\n embedding_input_dim = 300\n\n self.gcn = QaGNN(dim=512,\n n_layers=self.layers,\n n_relations=self.n_edge_types,\n share_parameters=True)\n\n self.node_compress_mlp = torch.nn.Sequential(XavierLinear(embedding_input_dim, 256),\n torch.nn.Tanh(),\n torch.nn.Dropout(p=0.2))\n\n self.node_mlp = torch.nn.Sequential(XavierLinear(512, 1024),\n torch.nn.Tanh(),\n torch.nn.Dropout(p=0.2),\n XavierLinear(1024, 512),\n torch.nn.Tanh(),\n torch.nn.Dropout(p=0.2))\n\n # self.lstm = LSTM(3072, 256, 2, batch_first=True, bidirectional=True)\n\n self.lstm1 = torch.nn.LSTM(embedding_input_dim, 256, num_layers=1, batch_first=True, bidirectional=True,\n dropout=0)\n self.lstm2 = torch.nn.LSTM(512, 128, num_layers=1, batch_first=True, bidirectional=True, dropout=0)\n self.query_dropout = Dropout(p=0.2)\n\n self.second_mlp = torch.nn.Sequential(XavierLinear(768, 128),\n torch.nn.Tanh(),\n XavierLinear(128, 1),\n torch.nn.Dropout(p=0.2))\n\n self.loss = CrossEntropyLoss(reduction=\"none\")\n\n def forward(self, batch):\n processed_batch = self.process_batch(batch)\n\n this_batch_max_nodes = max(processed_batch[\"nodes_length_mb\"])\n normalized_batch_adj_mats = torch.FloatTensor(processed_batch[\"adj_mb\"]).to(self.device)[:, :,\n :this_batch_max_nodes, :this_batch_max_nodes]\n\n query = torch.FloatTensor(processed_batch[\"query_mb\"]).to(self.device).view(len(batch), self.max_query_size, -1)\n query_lengths = torch.LongTensor(processed_batch[\"query_length_mb\"]).to(self.device)\n\n packed_representation = torch.nn.utils.rnn.pack_padded_sequence(query, query_lengths.cpu(),\n batch_first=True, enforce_sorted=False)\n\n lstm1_output, _ = self.lstm1(packed_representation)\n _, (query_lasthidden, _) = self.lstm2(lstm1_output)\n\n final_output = query_lasthidden.transpose(1, 0).reshape(len(batch), -1)\n final_output = self.query_dropout(final_output)\n\n query_to_node = final_output.unsqueeze(1).repeat(1, this_batch_max_nodes, 1)\n nodes = torch.FloatTensor(processed_batch[\"nodes_mb\"]).to(self.device).view(len(batch), self.max_nodes, -1)[:,\n :this_batch_max_nodes, :]\n node_lengths = torch.LongTensor(processed_batch[\"nodes_length_mb\"]).to(self.device)\n\n node_mask = torch.arange(this_batch_max_nodes, dtype=torch.long).to(self.device).expand(node_lengths.shape[0],\n this_batch_max_nodes) < node_lengths.unsqueeze(\n 1)\n node_mask = node_mask.unsqueeze(-1).float()\n\n nodes *= node_mask\n query_to_node *= node_mask\n\n nodes = self.node_compress_mlp(nodes)\n\n nodes = torch.cat([query_to_node, nodes], -1)\n nodes = self.node_mlp(nodes)\n\n vertex_embeddings = self.gcn(nodes, normalized_batch_adj_mats, mask=node_mask)\n\n vertex_embeddings = vertex_embeddings.view(len(batch), this_batch_max_nodes, -1)\n final_vertex_embeddings = torch.cat([query_to_node, vertex_embeddings], -1)\n final_vertex_embeddings = self.second_mlp(final_vertex_embeddings)\n\n final_vertex_embeddings *= node_mask\n\n bmask = torch.FloatTensor(processed_batch[\"bmask_mb\"]).to(self.device)[:, :, :this_batch_max_nodes]\n\n final_vertex_embeddings = final_vertex_embeddings.squeeze(-1).unsqueeze(1)\n\n candidate_embeddings = bmask * final_vertex_embeddings\n cand_unconnected = candidate_embeddings == 0\n\n cand_n_connections = (1 - cand_unconnected.float()).sum(dim=-1)\n cand_connected = torch.min(cand_n_connections, torch.ones_like(cand_n_connections))\n\n candidate_embeddings = torch.where(cand_unconnected, torch.ones_like(candidate_embeddings) * -1e8,\n candidate_embeddings)\n\n candidate_embeddings, _ = torch.max(candidate_embeddings, dim=-1)\n\n answers = torch.LongTensor(processed_batch[\"answer_positions_mb\"]).to(self.device)\n\n gold_candidate_connected = cand_connected[torch.arange(cand_connected.size(0)), answers]\n\n # This is a bit hacky, might want to refactor.\n # We only see negative targets at test time when the answer is not a mention, so we could actually skip\n # computing the loss entirely in those cases.\n loss_targets = torch.max(answers, torch.zeros_like(answers))\n loss = (self.loss(candidate_embeddings, loss_targets) * gold_candidate_connected).mean()\n\n scores = torch.softmax(candidate_embeddings, dim=-1).detach().cpu().numpy()\n\n predictions = []\n for i, example in enumerate(batch):\n example_scores = scores[i]\n example_gold = example[\"answer_position\"]\n\n example_output = ChoiceModelOutput(example_scores, example_gold)\n predictions.append(example_output)\n\n return loss, predictions\n\n def get_gnn(self):\n return self.gcn\n\n def process_batch(self, data_mb):\n answers_mb = [d[\"answer_position\"] for d in data_mb]\n\n id_mb = [d['id'] for d in data_mb]\n\n candidates_orig_mb = [d['candidates_orig'] for d in data_mb]\n candidates_orig_mb2 = [d['candidates_orig2'] for d in data_mb]\n\n candidates_mb = [d['candidates'] for d in data_mb]\n\n nodes_mb = np.array([np.pad(np.array([c.mean(0) for c in d['nodes_glove']]),\n ((0, self.max_nodes - len(d['nodes_candidates_id'])), (0, 0)),\n mode='constant')\n for d in data_mb])\n\n query_mb = np.stack([np.pad(d['query_glove'],\n ((0, self.max_query_size - d['query_glove'].shape[0]), (0, 0)),\n mode='constant')\n for d in data_mb], 0)\n\n nodes_length_mb = np.stack([len(d['nodes_candidates_id']) for d in data_mb], 0)\n query_length_mb = np.stack([d['query_glove'].shape[0] for d in data_mb], 0)\n\n adj_mb = []\n for d in data_mb:\n\n adj_ = []\n\n if len(d['edges_in']) == 0:\n adj_.append(np.zeros((self.max_nodes, self.max_nodes)))\n else:\n adj = scipy.sparse.coo_matrix((np.ones(len(d['edges_in'])), np.array(d['edges_in']).T),\n shape=(self.max_nodes, self.max_nodes)).toarray()\n\n adj_.append(adj)\n\n if len(d['edges_out']) == 0:\n adj_.append(np.zeros((self.max_nodes, self.max_nodes)))\n else:\n adj = scipy.sparse.coo_matrix((np.ones(len(d['edges_out'])), np.array(d['edges_out']).T),\n shape=(self.max_nodes, self.max_nodes)).toarray()\n\n adj_.append(adj)\n\n if len(d['edges_coref']) == 0:\n adj_.append(np.zeros((self.max_nodes, self.max_nodes)))\n else:\n adj = scipy.sparse.coo_matrix((np.ones(len(d['edges_coref'])), np.array(d['edges_coref']).T),\n shape=(self.max_nodes, self.max_nodes)).toarray()\n\n adj_.append(adj)\n\n adj = np.pad(np.ones((len(d['nodes_candidates_id']), len(d['nodes_candidates_id']))),\n ((0, self.max_nodes - len(d['nodes_candidates_id'])),\n (0, self.max_nodes - len(d['nodes_candidates_id']))), mode='constant') \\\n - adj_[0] - adj_[1] - adj_[2] - np.pad(np.eye(len(d['nodes_candidates_id'])),\n ((0, self.max_nodes - len(d['nodes_candidates_id'])),\n (0, self.max_nodes - len(d['nodes_candidates_id']))),\n mode='constant')\n\n adj_.append(np.clip(adj, 0, 1))\n\n adj = np.stack(adj_, 0)\n\n d_ = adj.sum(-1)\n d_[np.nonzero(d_)] **= -1\n adj = adj * np.expand_dims(d_, -1)\n\n adj_mb.append(adj)\n\n adj_mb = np.array(adj_mb)\n\n bmask_mb = np.array([np.pad(np.array([i == np.array(d['nodes_candidates_id'])\n for i in range(len(d['candidates']))]),\n ((0, self.max_candidates - len(d['candidates'])),\n (0, self.max_nodes - len(d['nodes_candidates_id']))), mode='constant')\n for d in data_mb])\n\n return {'id_mb': id_mb, 'nodes_mb': nodes_mb, 'nodes_length_mb': nodes_length_mb,\n 'query_mb': query_mb, 'query_length_mb': query_length_mb, 'bmask_mb': bmask_mb,\n 'adj_mb': adj_mb, 'candidates_mb': candidates_mb, 'candidates_orig_mb': candidates_orig_mb,\n 'candidates_orig_mb2': candidates_orig_mb2, \"answer_positions_mb\": answers_mb}" ]
[ [ "numpy.expand_dims", "torch.max", "torch.cat", "torch.FloatTensor", "torch.nn.CrossEntropyLoss", "torch.nn.Dropout", "torch.softmax", "numpy.pad", "numpy.clip", "numpy.stack", "torch.arange", "numpy.zeros", "torch.ones_like", "torch.LongTensor", "numpy.nonzero", "torch.zeros_like", "numpy.array", "torch.nn.LSTM", "torch.nn.Tanh" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JinLi97/recommender-system-dev-workshop-code
[ "e2673360f3dfaa561f35fa4d50b8726bc46cd80d" ]
[ "src/offline/movie/recall-batch/service_impl.py" ]
[ "import logging\nimport numpy as np\nimport json\nimport itertools\n\nlogging.basicConfig(level=logging.INFO)\n\n\nclass ServiceImpl:\n\n def __init__(self,\n recall_per_news_id=10,\n similar_entity_threshold=20,\n recall_threshold=2.0,\n recall_merge_number=20,\n entity_index_l={},\n word_index_l={},\n entity_embedding_l=[]):\n\n logging.info('Initial Service implementation...')\n logging.info(\n 'recall_per_news_id = %s, similar_entity_threshold=%s, recall_threshold=%s, recall_merge_number=%s',\n recall_per_news_id,\n similar_entity_threshold,\n recall_threshold,\n recall_merge_number\n )\n #\n self.recall_per_news_id = int(recall_per_news_id)\n self.similar_entity_threshold = int(similar_entity_threshold)\n self.recall_threshold = float(recall_threshold)\n self.recall_merge_number = int(recall_merge_number)\n self.entity_index = entity_index_l\n self.word_index = word_index_l\n self.entity_embedding = entity_embedding_l\n\n def analyze_shot_record(self, record, id):\n if id in record.keys():\n current_count = record[id]\n record[id] = record[id] + 1\n else:\n record[id] = 1\n\n # 根据召回位置打分;记录命中的次数;去重\n def recall_pos_score(self, src_item, topn_list, param, shot_record):\n list_with_score = []\n for pos, idx in enumerate(topn_list):\n if src_item != str(idx):\n current_idx_with_score = {}\n current_idx_with_score['id'] = str(idx)\n current_idx_with_score['score'] = (len(topn_list) - 1 - pos) * param['w'] + param['b']\n self.analyze_shot_record(shot_record, str(idx))\n list_with_score.append(current_idx_with_score)\n return list_with_score\n\n def recall_by_popularity(self, news_ids, recall_wrap, recall_items, multiple_shot_record):\n # 根据最近阅读的记录召回\n # 1. category: 类别\n # 2. director: 导演\n # 3. actor: 演员\n # 4. language: 语言\n # 5. level: 分级 \n # 6. year: 年限 \n dict_id_content = recall_wrap['content']\n dict_wrap = recall_wrap['dict_wrap']\n topn_wrap = recall_wrap['config']['mt_topn']\n weights = recall_wrap['config']['pos_weights']\n popularity_method_list = recall_wrap['config']['pop_mt_list']\n for news_id in news_ids:\n for mt in popularity_method_list:\n src_item = news_id\n current_prop = dict_id_content[src_item][mt]\n logging.info(\n \"top n {} method with following {}\".format(mt, current_prop))\n single_recall_result = {}\n current_list_with_score = []\n if current_prop[0] != None:\n for prop in current_prop:\n current_list_with_score = current_list_with_score + \\\n self.recall_pos_score(src_item,\n dict_wrap[mt][prop][0:topn_wrap[mt]],\n weights[mt], multiple_shot_record)\n single_recall_result['method'] = mt\n single_recall_result['list'] = current_list_with_score\n logging.info(\"method {} find {} candidates\".format(\n mt, len(current_list_with_score)))\n recall_items.append(single_recall_result)\n\n def recall_by_portrait(self, user_portrait, recall_wrap, recall_items, multiple_shot_record):\n # 根据用户画像做热门召回\n # 1. category: 类别\n # 2. director: 导演\n # 3. actor: 演员\n # 4. language: 语言\n # 5. level: 分级 \n # 6. year: 年限 \n # 7. review: 评论/描述\n # 8. photo: 海报 \n # 9. ub: 用户行为/双塔模型\n dict_wrap = recall_wrap['dict_wrap']\n topn_wrap = recall_wrap['config']['mt_topn']\n weights = recall_wrap['config']['pos_weights']\n portrait_method_list = recall_wrap['config']['portrait_mt_list']\n for mt in portrait_method_list:\n current_prop = user_portrait[mt]\n logging.info(\n \"top n user portrait {} method with following {}\".format(mt, current_prop))\n single_recall_result = {}\n current_list_with_score = []\n if current_prop['recent'] != None:\n user_mt = \"portrait_{}\".format(mt)\n for prop in current_prop['recent'][0]:\n if prop and dict_wrap[mt].get(prop):\n current_list_with_score = current_list_with_score + \\\n self.recall_pos_score(None,\n dict_wrap[mt][prop][0:topn_wrap[user_mt]],\n weights[user_mt], multiple_shot_record)\n else:\n logging.warning(\"cannot find '{}' in dict_wrap[{}]\".format(prop, mt))\n\n single_recall_result['method'] = user_mt\n single_recall_result['list'] = current_list_with_score\n logging.info(\"portrait method {} find {} candidates\".format(\n mt, len(current_list_with_score)))\n recall_items.append(single_recall_result)\n # 根据用户画像做相似性召回\n # 1. ub: 用户行为/YoutubeDNN\n user_ub_embedding = user_portrait['ub_embeddding']\n ub_faiss_index = recall_wrap['ub_index']\n ub_idx_mapping = recall_wrap['ub_idx_mapping']\n D, I = ub_faiss_index.search(np.ascontiguousarray(user_ub_embedding), topn_wrap['portrait_ub'])\n # mapping index code to item code\n single_recall_result = {}\n single_recall_result['method'] = 'portrait_ub'\n single_recall_result['list'] = []\n for d, i in zip(D[0], I[0]):\n map_idx = ub_idx_mapping[i]\n current_idx_with_score = {}\n current_idx_with_score['id'] = map_idx\n current_idx_with_score['score'] = d\n self.analyze_shot_record(multiple_shot_record, map_idx)\n single_recall_result['list'].append(current_idx_with_score)\n recall_items.append(single_recall_result)\n\n def merge_recall_result(self, news_ids, **config_dict):\n ########################################\n # 召回融合排序逻辑\n ########################################\n recall_wrap = config_dict['recall_wrap']\n user_portrait = config_dict['user_portrait']\n\n recall_items = []\n multiple_shot_record = {}\n # 根据最近阅读的历史做召回\n self.recall_by_popularity(news_ids, recall_wrap, recall_items, multiple_shot_record)\n # 根据用户画像做召回\n self.recall_by_portrait(user_portrait, recall_wrap, recall_items, multiple_shot_record)\n\n # recall_merge_cnt = 100\n n_last_len = recall_wrap['config']['merge_cnt']\n method_weights = recall_wrap['config']['mt_weights']\n raw_item_list = {}\n\n for mt_list in recall_items:\n mt = mt_list['method']\n list_result = mt_list['list']\n method_weight = method_weights[mt]\n for idx, id_with_score in enumerate(list_result):\n current_id = id_with_score['id']\n current_score = id_with_score['score']\n multiple_shot_score = multiple_shot_record[current_id]\n whole_score = method_weight * \\\n (current_score + multiple_shot_score)\n current_result = []\n current_result.append(current_id)\n current_result.append(mt)\n current_result.append(idx)\n current_result.append(whole_score)\n # update raw list\n if current_id in raw_item_list.keys():\n if whole_score > raw_item_list[current_id][3]:\n raw_item_list[current_id] = current_result\n else:\n raw_item_list[current_id] = current_result\n\n # 根据最终得分进行排序\n sort_item_list = dict(\n sorted(raw_item_list.items(), key=lambda item: item[1][3], reverse=True))\n\n logging.info(\"sort {} result is {}\".format(\n len(sort_item_list), sort_item_list))\n\n recall_result = {}\n\n # 截取前recall_merge_cnt的结果作为recall的结果\n recall_result = dict(itertools.islice(sort_item_list.items(), n_last_len))\n\n logging.info('Recall has done & return -> {}'.format(recall_result))\n return recall_result\n" ]
[ [ "numpy.ascontiguousarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Tubbz-alt/deep_learning_final
[ "d187c7125170f56c4744c1ba2bdca948c08fe108" ]
[ "models.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Implements SRGAN models: https://arxiv.org/abs/1609.04802\n\nTODO:\n\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\ndef swish(x):\n return x * F.sigmoid(x)\n\nclass FeatureExtractor(nn.Module):\n def __init__(self, cnn, feature_layer=11):\n super(FeatureExtractor, self).__init__()\n self.features = nn.Sequential(*list(cnn.features.children())[:(feature_layer+1)])\n\n def forward(self, x):\n return self.features(x)\n\n\nclass residualBlock(nn.Module):\n def __init__(self, in_channels=64, k=3, n=64, s=1):\n super(residualBlock, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels, n, k, stride=s, padding=1)\n self.bn1 = nn.BatchNorm2d(n)\n self.conv2 = nn.Conv2d(n, n, k, stride=s, padding=1)\n self.bn2 = nn.BatchNorm2d(n)\n\n def forward(self, x):\n y = swish(self.bn1(self.conv1(x)))\n return self.bn2(self.conv2(y)) + x\n\nclass upsampleBlock(nn.Module):\n # Implements resize-convolution\n def __init__(self, in_channels, out_channels):\n super(upsampleBlock, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1)\n self.shuffler = nn.PixelShuffle(2)\n\n def forward(self, x):\n return swish(self.shuffler(self.conv(x)))\n\nclass Generator(nn.Module):\n def __init__(self, n_residual_blocks, upsample_factor):\n super(Generator, self).__init__()\n self.n_residual_blocks = n_residual_blocks\n self.upsample_factor = upsample_factor\n\n self.conv1 = nn.Conv2d(3, 64, 9, stride=1, padding=4)\n\n for i in range(self.n_residual_blocks):\n self.add_module('residual_block' + str(i+1), residualBlock())\n\n self.conv2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)\n self.bn2 = nn.BatchNorm2d(64)\n\n for i in range(int(self.upsample_factor/2)):\n self.add_module('upsample' + str(i+1), upsampleBlock(64, 256))\n\n self.conv3 = nn.Conv2d(64, 3, 9, stride=1, padding=4)\n\n def forward(self, x):\n x = swish(self.conv1(x))\n\n y = x.clone()\n for i in range(self.n_residual_blocks):\n y = self.__getattr__('residual_block' + str(i+1))(y)\n\n x = self.bn2(self.conv2(y)) + x\n\n for i in range(int(self.upsample_factor/2)):\n x = self.__getattr__('upsample' + str(i+1))(x)\n\n return self.conv3(x)\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, 3, stride=1, padding=1)\n\n self.conv2 = nn.Conv2d(64, 64, 3, stride=2, padding=1)\n self.bn2 = nn.BatchNorm2d(64)\n self.conv3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)\n self.bn3 = nn.BatchNorm2d(128)\n self.conv4 = nn.Conv2d(128, 128, 3, stride=2, padding=1)\n self.bn4 = nn.BatchNorm2d(128)\n self.conv5 = nn.Conv2d(128, 256, 3, stride=1, padding=1)\n self.bn5 = nn.BatchNorm2d(256)\n self.conv6 = nn.Conv2d(256, 256, 3, stride=2, padding=1)\n self.bn6 = nn.BatchNorm2d(256)\n self.conv7 = nn.Conv2d(256, 512, 3, stride=1, padding=1)\n self.bn7 = nn.BatchNorm2d(512)\n self.conv8 = nn.Conv2d(512, 512, 3, stride=2, padding=1)\n self.bn8 = nn.BatchNorm2d(512)\n\n # Replaced original paper FC layers with FCN\n self.conv9 = nn.Conv2d(512, 1, 1, stride=1, padding=1)\n\n def forward(self, x):\n x = swish(self.conv1(x))\n\n x = swish(self.bn2(self.conv2(x)))\n x = swish(self.bn3(self.conv3(x)))\n x = swish(self.bn4(self.conv4(x)))\n x = swish(self.bn5(self.conv5(x)))\n x = swish(self.bn6(self.conv6(x)))\n x = swish(self.bn7(self.conv7(x)))\n x = swish(self.bn8(self.conv8(x)))\n\n x = self.conv9(x)\n return F.sigmoid(F.avg_pool2d(x, x.size()[2:])).view(x.size()[0], -1)\n" ]
[ [ "torch.nn.functional.sigmoid", "torch.nn.Conv2d", "torch.nn.PixelShuffle", "torch.nn.BatchNorm2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
act65/mri-reconstruction
[ "2dcf30e10c37a482f1aab2524c5966d03eb72085" ]
[ "scripts/train_bernoulliae.py" ]
[ "import tensorflow as tf\nimport src.bernoulliae as bernoulliae\nimport src.utils as utils\n\nimport os\nimport argparse\n\ndef argumentparser():\n parser = argparse.ArgumentParser(description='Train an SparseAE')\n parser.add_argument('--batch_size', type=int, default=50,\n help='Batch size...')\n parser.add_argument('--epochs', type=int, default=50,\n help='number of epochs')\n parser.add_argument('--logdir', type=str, default='/tmp/sparseae/',\n help='location to save logs')\n parser.add_argument('--n_hidden', type=int, default=12)\n parser.add_argument('--width', type=int, default=16)\n parser.add_argument('--depth', type=int, default=4)\n parser.add_argument('--learning_rate', type=float, default=0.0001)\n parser.add_argument('--beta', type=float, default=1.0)\n return parser.parse_args()\n\ndef main(args):\n from tensorflow.examples.tutorials.mnist import input_data\n mnist = input_data.read_data_sets(\"/tmp/MNIST_data/\", one_hot=True)\n\n PL = utils.PyramidLoss(32, 3)\n\n with tf.variable_scope('s'):\n nn = bernoulliae.BernoulliAE(args.n_hidden, args.width, args.depth)\n\n x = tf.placeholder(shape=[None, 32, 32, 1], dtype=tf.float32)\n recon_loss, prior_loss = nn.make_losses(x)\n pl_loss = PL((x, nn.y))\n loss = pl_loss + recon_loss #+args.beta*prior_loss\n\n train_summaries = [\n tf.summary.scalar('train/loss/recon', recon_loss),\n tf.summary.scalar('train/loss/latent', prior_loss),\n tf.summary.histogram('latents', nn.z),\n tf.summary.image('train/input', x),\n tf.summary.image('train/recon', tf.nn.sigmoid(nn.y)),\n # tf.summary.scalar('train/Px/real', p_real),\n # tf.summary.scalar('train/Px/fake', p_fake)\n ]\n\n p_real = nn.estimate_density(x)\n p_fake = nn.estimate_density(tf.random_normal(shape=tf.shape(x)))\n test_summaries = [\n tf.summary.scalar('test/loss/recon', recon_loss),\n tf.summary.scalar('test/loss/latent', prior_loss),\n tf.summary.image('test/input', x),\n tf.summary.image('test/recon', tf.nn.sigmoid(nn.y)),\n tf.summary.scalar('test/Px/real', tf.reduce_mean(p_real)),\n tf.summary.scalar('test/Px/fake', tf.reduce_mean(p_fake))\n ]\n\n train_merged = tf.summary.merge(train_summaries)\n test_merged = tf.summary.merge(test_summaries)\n\n global_step = tf.train.get_or_create_global_step()\n learning_rate = args.learning_rate\n\n opt = tf.train.AdamOptimizer(learning_rate)\n gnvs = opt.compute_gradients(loss, var_list=nn.encoder.variables+nn.decoder.variables)\n gnvs += opt.compute_gradients(prior_loss, var_list=nn.prior_variables)\n\n # gnvs = [(tf.clip_by_norm(g, 1), v) for g, v in gnvs]\n train_step = opt.apply_gradients(gnvs, global_step=global_step)\n saver = tf.train.Saver()\n checkpoint = tf.contrib.eager.Checkpoint(**{var.name: var for var in tf.global_variables()})\n\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n writer = tf.summary.FileWriter(args.logdir, sess.graph)\n\n for i in range(args.epochs*1000):\n batch_x, _ = mnist.train.next_batch(args.batch_size)\n _, train_summ = sess.run([train_step, train_merged],\n feed_dict={x: bernoulliae.BernoulliAE.preprocess(batch_x)})\n\n if i % 10 == 0:\n writer.add_summary(train_summ, i)\n\n if i % 100 == 0:\n L, test_summ = sess.run([loss, test_merged],\n feed_dict={x:\n bernoulliae.BernoulliAE.preprocess(mnist.test.images[:100, ...])})\n print('\\rStep: {} Loss: {}'.format(i, L), end='', flush=True)\n writer.add_summary(test_summ, i)\n\n if i % 1000 == 0:\n save_path = checkpoint.save(os.path.join(args.logdir, \"infovae_ckpt.ckpt\"))\n save_path = saver.save(sess, os.path.join(args.logdir,\"infovae_saver.ckpt\"))\n print(save_path)\n\nif __name__ == '__main__':\n main(argumentparser())\n" ]
[ [ "tensorflow.summary.FileWriter", "tensorflow.nn.sigmoid", "tensorflow.reduce_mean", "tensorflow.shape", "tensorflow.summary.image", "tensorflow.global_variables", "tensorflow.placeholder", "tensorflow.train.get_or_create_global_step", "tensorflow.global_variables_initializer", "tensorflow.variable_scope", "tensorflow.train.AdamOptimizer", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.summary.scalar", "tensorflow.summary.merge", "tensorflow.summary.histogram" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
MarshallKrakauer/DecisionTree
[ "67a0623376a11bc43d94c045519a382b5e46915a" ]
[ "RandomForest.py" ]
[ "from collections import defaultdict\nfrom ClassificationTree import ClassificationTree\nfrom RegressionTree import RegressionTree\nfrom MultiClassTree import MultiClassTree\nfrom AbstractDecisionTree import print_breadth_first, get_dataframe, get_multi_class_dataframe\nimport random\nimport numpy as np\n\nclass RandomForest:\n\n def __init__(self, dataframe, y_col='target', target_type='binary', num_trees=3,\n parent=None, depth=0, random_seed=0.0, max_depth=3,\n min_sample_split=0, min_impurity_decrease=float('-inf')):\n if num_trees > 10:\n raise ValueError(\"Max of 10 trees\")\n elif num_trees < 2:\n raise ValueError(\"At least 2 trees required\")\n else:\n self.num_trees = int(num_trees)\n self.df = dataframe\n self.y_col = y_col\n self.depth = depth\n self.max_depth = max_depth\n self.min_sample_split = min_sample_split\n self.min_impurity_decrease = min_impurity_decrease\n self.parent = parent\n self.random_seed = random_seed\n self.tree_list = []\n self.target_type = target_type\n\n def create_trees(self):\n \"\"\"Initialize and fit all the trees in the random forest\"\"\"\n for i in range(self.num_trees):\n # For the first model, use the random seed. We don't want to use that seed for every model\n # Since it will produce identical copies of the first tree\n model = self.make_classifier(is_first=i==0)\n model.create_tree()\n self.tree_list.append(model)\n\n def make_classifier(self, is_first):\n \"\"\"\n Create regression or classification tree.\n\n :param is_first: bool\n True if the first tree in the forest. The first tree uses the random seed, others use a\n randomly generated one\n :return: DecisionTree\n DecisionTree that will be part of \"forest\"\n \"\"\"\n if is_first:\n seed = self.random_seed\n else:\n seed = random.random()\n\n if self.target_type == 'binary':\n model = ClassificationTree(self.df, self.y_col, None, 0, seed,\n self.max_depth, self.min_sample_split, self.min_impurity_decrease)\n elif self.target_type == 'multi_class':\n model = MultiClassTree(self.df, self.y_col, None, 0, seed,\n self.max_depth, self.min_sample_split, self.min_impurity_decrease)\n else:\n model = RegressionTree(self.df, self.y_col, None, 0, seed,\n self.max_depth, self.min_sample_split, self.min_impurity_decrease)\n\n return model\n\n def predict_proba(self, data_row):\n \"\"\"\n Probability prediction for classification models\n\n :param data_row: series\n Row of data from which to make a prediction\n :return: float or dict\n Returns single float value for binary prediction value. For multi class problem,\n returns a dict with probability for each class\n \"\"\"\n if self.target_type == 'continuous':\n raise AttributeError(\"predict_proba not available for regression model\")\n\n if self.target_type == 'binary':\n prediction_list = []\n for decision_tree in self.tree_list:\n percentage = decision_tree.predict_proba(data_row)\n prediction_list.append(percentage)\n\n return np.mean(prediction_list)\n\n elif self.target_type == 'multi_class':\n def default_dict_zero():\n return 0\n predict_dict = defaultdict(default_dict_zero)\n for decision_tree in self.tree_list:\n output_dict = decision_tree.predict_proba(data_row)\n for key, percent in output_dict.items():\n predict_dict[key] += percent / self.num_trees\n\n return dict(predict_dict)\n\n def predict(self, data_row, cutoff=0.5):\n \"\"\"\n Get predicted value for regression, or predicted class for classification\n\n :param data_row: series\n Row of data from which to make a prediction\n :param cutoff: int\n Cutoff value for binary prediction. If above or equal to this value, will predict 1. If below,\n predicts 0. Not used in multi class or regression\n :return: float or int\n Single value of the most likely class (with classification). For regression, produces predicted value.\n \"\"\"\n if self.target_type == 'binary':\n if self.predict_proba(data_row) >= cutoff:\n return 1\n else:\n return 0\n\n elif self.target_type == 'continuous':\n prediction_list = []\n\n for decision_tree in self.tree_list:\n percentage = decision_tree.predict(data_row)\n prediction_list.append(percentage)\n\n return np.mean(prediction_list)\n\n else:\n prediction_dict = self.predict_proba(data_row)\n max_value = float('-inf')\n best_prediction = None\n for key, current_value in prediction_dict.items():\n if prediction_dict[key] > max_value:\n max_value = current_value\n best_prediction = key\n\n return best_prediction\n\nif __name__ == '__main__':\n # Select type of trees: binary, multi_class, or continuous (ie regression)\n prediction_type = 'multi_class'\n print_trees = False\n\n # Different dataframe creation functions for multi_class and binary/continuous\n if prediction_type == 'multi_class':\n df, individual_val, true_value = get_multi_class_dataframe() # get_dataframe(is_classification)\n else:\n df, individual_val, true_value = get_dataframe(prediction_type == 'binary')\n\n rf = RandomForest(dataframe=df, y_col='y',target_type=prediction_type,\n max_depth=3, num_trees=3, random_seed=777, min_impurity_decrease=0.4)\n rf.create_trees()\n\n if print_trees:\n for idx, tree in enumerate(rf.tree_list):\n print('~~~TREE NUMBER {}~~~'.format(idx+1))\n print_breadth_first(tree)\n\n # For classification trees we can print out predicted value and class\n # For regression trees, we only have a predicted value\n if prediction_type in ['binary','multi_class']:\n prob = rf.predict_proba(individual_val)\n class_ = rf.predict(individual_val)\n\n # We have a specific value we can round for binary predictions\n # For multiclass one, we have the entire dictionary\n if prediction_type == 'binary':\n print('predicted:', np.round(prob, 3),',', class_, 'actual:', true_value)\n else:\n print('predicted:', prob , ',', class_, 'actual:', true_value)\n else:\n value = rf.predict(individual_val)\n print('predicted:', np.round(value, 3), 'actual:', true_value)" ]
[ [ "numpy.round", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
stopyun-jirong/igrf
[ "951ad3414b2b89e0121faec02e3e194b0e745428" ]
[ "src/igrf/base.py" ]
[ "import xarray\nfrom datetime import datetime\nimport numpy as np\nimport subprocess\nimport shutil\nimport os\nfrom pathlib import Path\nimport importlib.resources\n\nfrom .utils import mag_vector2incl_decl, datetime2yeardec\n\n\ndef cmake(setup_file: Path):\n \"\"\"\n attempt to build using CMake\n \"\"\"\n exe = shutil.which(\"ctest\")\n if not exe:\n raise FileNotFoundError(\"CMake not available\")\n\n subprocess.check_call([exe, \"-S\", str(setup_file), \"-VV\"])\n\n\ndef build_exe(exe_name: str) -> str:\n # build on run\n if os.name == \"nt\":\n exe_name += \".exe\"\n if not importlib.resources.is_resource(__package__, exe_name):\n with importlib.resources.path(__package__, \"setup.cmake\") as setup_file:\n cmake(setup_file)\n if not importlib.resources.is_resource(__package__, exe_name):\n raise ModuleNotFoundError(\"could not build MSISE00 Fortran driver\")\n\n return exe_name\n\n\ndef grid(\n time: datetime,\n glat: np.ndarray,\n glon: np.ndarray,\n alt_km: np.ndarray,\n *,\n isv: int = 0,\n itype: int = 1,\n) -> xarray.Dataset:\n\n glat = np.atleast_1d(glat)\n glon = np.atleast_1d(glon)\n\n yeardec = datetime2yeardec(time)\n\n x = np.empty(glat.size)\n y = np.empty_like(x)\n z = np.empty_like(x)\n f = np.empty_like(x)\n\n with importlib.resources.path(__package__, build_exe(\"igrf13_driver\")) as exe:\n for i, (la, lo) in enumerate(zip(glat.ravel(), glon.ravel())):\n cmd = [str(exe), str(yeardec), str(la), str(lo), str(alt_km), str(isv), str(itype)]\n ret = subprocess.run(\n cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n if ret.returncode != 0:\n raise RuntimeError(\n f\"IGRF13 error code {ret.returncode}\\n{ret.stderr}\\n{' '.join(cmd)}\"\n )\n # different compilers throw in extra \\n\n x[i], y[i], z[i], f[i] = list(map(float, ret.stdout.split()))\n\n # %% assemble output\n if glat.ndim == 2 and glon.ndim == 2: # assume meshgrid\n coords = {\"glat\": glat[:, 0], \"glon\": glon[0, :]}\n elif glat.ndim == 1 and glon.ndim == 1:\n coords = {\"glat\": glat, \"glon\": glon}\n else:\n raise ValueError(f\"glat/glon shapes: {glat.shape} {glon.shape}\")\n\n mag = xarray.Dataset(coords=coords, attrs={\"time\": time, \"isv\": isv, \"itype\": itype})\n mag[\"north\"] = ((\"glat\", \"glon\"), x.reshape(glat.shape))\n mag[\"east\"] = ((\"glat\", \"glon\"), y.reshape(glat.shape))\n mag[\"down\"] = ((\"glat\", \"glon\"), z.reshape(glat.shape))\n mag[\"total\"] = ((\"glat\", \"glon\"), f.reshape(glat.shape))\n\n decl, incl = mag_vector2incl_decl(mag.north, mag.east, mag.down)\n\n mag[\"incl\"] = ((\"glat\", \"glon\"), incl)\n mag[\"decl\"] = ((\"glat\", \"glon\"), decl)\n\n return mag\n\n\ndef igrf(\n time: datetime, glat: float, glon: float, alt_km: np.ndarray, *, isv: int = 0, itype: int = 1,\n) -> xarray.Dataset:\n \"\"\"\n\n Parameters\n ----------\n\n date: datetime.date or decimal year yyyy.dddd\n glat, glon: geographic Latitude, Longitude\n alt_km: altitude [km] above sea level for itype==1\n isv: 0 for main geomagnetic field\n itype: 1: altitude is above sea level\n \"\"\"\n\n # decimal year\n yeardec = datetime2yeardec(time)\n\n alt_km = np.atleast_1d(alt_km)\n Bnorth = np.empty(alt_km.size)\n Beast = np.empty_like(Bnorth)\n Bvert = np.empty_like(Bnorth)\n Btotal = np.empty_like(Bnorth)\n\n with importlib.resources.path(__package__, build_exe(\"igrf13_driver\")) as exe:\n for i, a in enumerate(alt_km):\n cmd = [str(exe), str(yeardec), str(glat), str(glon), str(a), str(isv), str(itype)]\n ret = subprocess.run(\n cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n if ret.returncode != 0:\n raise RuntimeError(\n f\"IGRF13 error code {ret.returncode}\\n{ret.stderr}\\n{' '.join(cmd)}\"\n )\n # different compilers throw in extra \\n\n\n Bnorth[i], Beast[i], Bvert[i], Btotal[i] = list(map(float, ret.stdout.split()))\n\n # %% assemble output\n decl, incl = mag_vector2incl_decl(Bnorth, Beast, Bvert)\n\n mag = xarray.Dataset(\n {\n \"north\": (\"alt_km\", Bnorth),\n \"east\": (\"alt_km\", Beast),\n \"down\": (\"alt_km\", Bvert),\n \"total\": (\"alt_km\", Btotal),\n \"incl\": (\"alt_km\", incl),\n \"decl\": (\"alt_km\", decl),\n },\n coords={\"alt_km\": alt_km},\n attrs={\"time\": time, \"isv\": isv, \"itype\": itype, \"glat\": glat, \"glon\": glon},\n )\n\n\n return mag\n" ]
[ [ "numpy.atleast_1d", "numpy.empty_like", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CRIPAC-DIG/K-GHRM
[ "5d73ed701b7753ee402ecfc1dbc4b20c578a4656" ]
[ "Data/ent2vec.py" ]
[ "from wikipedia2vec import Wikipedia2Vec\nimport pickle\nimport numpy as np\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', default='robust04', help='dataset name: robust04/clueweb09')\nargs = parser.parse_args()\n\ndef save_obj(obj, name):\n with open(name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\ndef load_obj(name):\n with open(name + '.pkl', 'rb') as f:\n return pickle.load(f)\n\nwiki2vec = Wikipedia2Vec.load('./enwiki_20180420_100d.pkl')\n# wiki2vec = Wikipedia2Vec.load('./enwiki_20180420_300d.pkl')\nent2id = load_obj('./{}/ent2id'.format(args.dataset))\n\nent2vec = []\nno_pretrain_emd_cnt = 0 \nfor e in ent2id:\n try:\n ent2vec.append(wiki2vec.get_entity_vector(e))\n except:\n no_pretrain_emd_cnt += 1\n ent2vec.append(np.random.randn(100))\n # ent2vec.append(np.random.randn(300))\nprint(no_pretrain_emd_cnt) # clueweb09:22820, robust04:8423\nprint(len(ent2vec)) # clueweb09:226363, robust04:108627\nnp.save('./{}/ent_embedding_100d.npy'.format(args.dataset), ent2vec)\n# np.save('./{}/ent_embedding_300d.npy'.format(args.dataset), ent2vec)\n\n# que_ent = load_obj('./{}/que_entity'.format(args.dataset))\n# with open('./{}/que_entity_list_unique.txt'.format(args.dataset), 'w') as f:\n# for i in que_ent:\n# f.writelines(str(i)+'\\t')\n# for word in que_ent[i]:\n# f.writelines(str(ent2id[word])+' ')\n# f.writelines('\\n')\n" ]
[ [ "numpy.random.randn" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FloydHsiu/Multi-focus-Image-Fusion-Using-Encoder-Decoder-Network
[ "996583e1db60ff1f7e8fc9383e0f0785773f2f54" ]
[ "train.py" ]
[ "import tensorflow as tf\nfrom data import parseDataset\nimport model\nimport datetime\nfrom os import path\nimport tqdm\nimport argparse\n\n\[email protected](experimental_relax_shapes=True)\ndef convertData(A, B, label):\n A = A / 255.0\n B = B / 255.0\n A = A * 2.0 - 1.0\n B = B * 2.0 - 1.0\n label = label * 2.0 - 1.0\n return A, B, label\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='')\n parser.add_argument(\n '--tpu_name', dest='tpu_name', type=str, default='',\n help='Assign tpu that you want to train this code on')\n parser.add_argument(\n '--data_path', type=str, default='',\n help='Assign your training data (tfrecords)')\n parser.add_argument(\n '--logs_dir', tpye=str, default='',\n help='Assisn your directory to keep training logs')\n parser.add_argument(\n '--lytro_dir', type=str, default='',\n help='Assign directory of Lytro Multi-focus Dataset')\n\n args = parser.parse_args()\n\n if parser.tpu_name == '':\n print('Error: have no tpu_name been declared.')\n return\n if parser.data_path == '':\n print('Error: have no training data path been declared.')\n return\n if parser.logs_dir == '':\n print('Error: have no training logs directory been declared')\n return\n if parser.lytro_dir == '':\n print('Error: have no lytro multi-focus dataset directory been declared')\n\n batch_size = 128\n learning_rate = 1e-4\n learning_rate_decay = 1e-1\n epoch = 2\n\n TPU_NAME = parser.tpu_name\n DATA_PATH = parser.data_path\n LOGS_DIR = parser.logs_dir\n LYTRO_DIR = parser.lytro_dir\n\n # TPU distributed computation initialization\n cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(\n tpu=TPU_NAME)\n tf.config.experimental_connect_to_cluster(cluster_resolver)\n tf.tpu.experimental.initialize_tpu_system(cluster_resolver)\n tpu_strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)\n\n ############### DATAs ###############\n # split training data and valiation data\n dataset = tf.data.TFRecordDataset(DATA_PATH)\n dataset = dataset.map(parseDataset).shuffle(\n buffer_size=10000, reshuffle_each_iteration=False)\n\n train_dataset = dataset.take(90000).batch(batch_size, drop_remainder=False)\n test_dataset = dataset.skip(90000).batch(batch_size, drop_remainder=False)\n\n train_dist_dataset = tpu_strategy.experimental_distribute_dataset(\n train_dataset)\n test_dist_dataset = tpu_strategy.experimental_distribute_dataset(\n test_dataset)\n\n with tpu_strategy.scope():\n mfnet = model.MFNet()\n\n ############### LOGs ###############\n dt_now = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n print(f\"############ {dt_now} ############\")\n tensorboard_log_dir = path.join(LOGS_DIR, 'fcn_origin_'+dt_now)\n\n # checkpoint initialize\n checkpoint_dir = path.join(tensorboard_log_dir, 'training_checkpoints')\n makedirs(checkpoint_dir, exist_ok=True)\n checkpoint_prefix = path.join(checkpoint_dir, \"ckpt\")\n checkpoint = tf.train.Checkpoint(mfnet=mfnet)\n ckpt_manager = tf.train.CheckpointManager(\n checkpoint, directory=checkpoint_dir, max_to_keep=5)\n\n # Model directory initialize\n model_dir = path.join(tensorboard_log_dir, 'model')\n\n # tensorboard initialize\n tensorboard_dir = path.join(tensorboard_log_dir, 'tensorboard')\n makedirs(tensorboard_dir, exist_ok=True)\n summary_writer = tf.summary.create_file_writer(tensorboard_dir)\n\n # Read Images for Inference\n fns = [\"lytro-03-A.jpg\", \"lytro-03-B.jpg\",\n \"lytro-05-A.jpg\", \"lytro-05-B.jpg\"]\n paths = [path.join(LYTRO_DIR, fn) for fn in fns]\n\n imgs = []\n for i in range(0, len(paths), 2):\n tmp1 = tf.io.read_file(paths[i])\n tmp2 = tf.io.read_file(paths[i+1])\n img1 = tf.cast(\n tf.io.decode_jpeg(tmp1, channels=1),\n tf.float32) / 255.0 * 2.0 - 1\n img2 = tf.cast(\n tf.io.decode_jpeg(tmp2, channels=1),\n tf.float32) / 255.0 * 2.0 - 1\n img1 = tf.reshape(img1, (1, 520, 520, 1))\n img2 = tf.reshape(img2, (1, 520, 520, 1))\n img1 = tf.slice(img1, [0, 0, 0, 0], [1, 512, 512, 1])\n img2 = tf.slice(img2, [0, 0, 0, 0], [1, 512, 512, 1])\n imgs.append([img1, img2])\n\n # stage 1\n with tpu_strategy.scope():\n optimizer_1 = tf.optimizers.Adam(\n learning_rate=learning_rate, beta_1=0.5)\n optimizer_2 = tf.optimizers.Adam(\n learning_rate=learning_rate*learning_rate_decay, beta_1=0.5)\n\n @tf.function\n def validation(dist_inputs):\n def step_fn(inputs):\n p1 = inputs['p1']\n p2 = inputs['p2']\n label = inputs['label']\n p1, p2, label = convertData(p1, p2, label)\n pred = mfnet([p1, p2], training=False)\n loss_fn = tf.keras.losses.Huber(\n delta=0.2, reduction=tf.keras.losses.Reduction.NONE)\n mae = loss_fn(label, pred)\n loss = tf.reduce_sum(mae, keepdims=True) / (320 * 320)\n return loss\n per_example_losses = tpu_strategy.experimental_run_v2(\n step_fn, args=(dist_inputs,))\n mean_loss = tpu_strategy.reduce(\n tf.distribute.ReduceOp.SUM, per_example_losses, axis=0)\n\n return mean_loss\n\n @tf.function\n def train_step_1(dist_inputs):\n # In training step 1, learning rate is set as 1e-4\n def step_fn(inputs):\n p1 = inputs['p1']\n p2 = inputs['p2']\n label = inputs['label']\n p1, p2, label = convertData(p1, p2, label)\n with tf.GradientTape() as g_tape:\n pred = mfnet([p1, p2], training=True)\n loss_fn = tf.keras.losses.Huber(\n delta=0.2, reduction=tf.keras.losses.Reduction.NONE)\n mae = loss_fn(label, pred)\n loss = tf.reduce_sum(\n mae, keepdims=True) / (batch_size * 320 * 320)\n grad = g_tape.gradient(loss, mfnet.trainable_variables)\n optimizer_1.apply_gradients(\n list(zip(grad, mfnet.trainable_variables)))\n return loss\n per_example_losses = tpu_strategy.experimental_run_v2(\n step_fn, args=(dist_inputs,))\n mean_loss = tpu_strategy.reduce(\n tf.distribute.ReduceOp.SUM, per_example_losses, axis=0)\n\n return mean_loss\n\n @tf.function\n def train_step_2(dist_inputs):\n # In training step 1, learning rate is set as 1e-4 * 1e-1\n def step_fn(inputs):\n p1 = inputs['p1']\n p2 = inputs['p2']\n label = inputs['label']\n p1, p2, label = convertData(p1, p2, label)\n with tf.GradientTape() as g_tape:\n pred = mfnet([p1, p2], training=True)\n loss_fn = tf.keras.losses.Huber(\n delta=0.2, reduction=tf.keras.losses.Reduction.NONE)\n mae = loss_fn(label, pred)\n loss = tf.reduce_sum(\n mae, keepdims=True) / (batch_size * 320 * 320)\n grad = g_tape.gradient(loss, mfnet.trainable_variables)\n optimizer_2.apply_gradients(\n list(zip(grad, mfnet.trainable_variables)))\n return loss\n per_example_losses = tpu_strategy.experimental_run_v2(\n step_fn, args=(dist_inputs,))\n mean_loss = tpu_strategy.reduce(\n tf.distribute.ReduceOp.SUM, per_example_losses, axis=0)\n\n return mean_loss\n\n @tf.function\n def inference():\n result = []\n for i_s in imgs:\n img1 = i_s[0]\n img2 = i_s[1]\n alpha = mfnet([img1, img2], training=False)\n alpha = (alpha+1.0)/2.0\n result.append(alpha)\n return result\n\n train_step = train_step_1\n\n i = 0\n for e in range(epoch):\n if e == epoch//2:\n train_step = train_step_2\n for inputs in tqdm.tqdm(train_dist_dataset):\n with tpu_strategy.scope():\n loss = train_step(inputs)\n\n if i % 1000 == 0:\n checkpoint.save(file_prefix=checkpoint_prefix)\n if i % 100 == 0:\n total_loss = 0.0\n count = 10000\n for inputs_val in test_dist_dataset:\n with tpu_strategy.scope():\n val_loss = tf.squeeze(validation(inputs_val))\n total_loss += val_loss\n with summary_writer.as_default():\n tf.summary.scalar('val', total_loss/count, step=i)\n result = inference()\n for j in range(len(result)):\n tf.summary.image(\n f\"Test Image {j+1}\", result[j],\n step=i)\n with summary_writer.as_default():\n tf.summary.scalar('loss', tf.squeeze(loss), step=i)\n i = i + 1\n tf.saved_model.save(mfnet, model_dir)\n tf.saved_model.save(mfnet, model_dir)\n checkpoint.save(file_prefix=checkpoint_prefix)\n" ]
[ [ "tensorflow.io.decode_jpeg", "tensorflow.reduce_sum", "tensorflow.distribute.cluster_resolver.TPUClusterResolver", "tensorflow.summary.scalar", "tensorflow.data.TFRecordDataset", "tensorflow.tpu.experimental.initialize_tpu_system", "tensorflow.summary.image", "tensorflow.squeeze", "tensorflow.saved_model.save", "tensorflow.distribute.experimental.TPUStrategy", "tensorflow.train.CheckpointManager", "tensorflow.train.Checkpoint", "tensorflow.keras.losses.Huber", "tensorflow.function", "tensorflow.config.experimental_connect_to_cluster", "tensorflow.optimizers.Adam", "tensorflow.GradientTape", "tensorflow.slice", "tensorflow.reshape", "tensorflow.io.read_file", "tensorflow.summary.create_file_writer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
William-prog/airflow
[ "b2e3e8c0718142d4cb0387f46cd77c15b67cc1e9" ]
[ "tests/core.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom typing import Optional\nimport io\nimport json\nimport multiprocessing\nimport os\nimport pickle # type: ignore\nimport re\nimport signal\nimport subprocess\nimport tempfile\nimport unittest\nimport warnings\nfrom datetime import timedelta\nfrom email.mime.application import MIMEApplication\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom tempfile import NamedTemporaryFile\nfrom time import sleep\nfrom unittest import mock\n\nimport sqlalchemy\nfrom dateutil.relativedelta import relativedelta\nfrom numpy.testing import assert_array_almost_equal\nfrom pendulum import utcnow\n\nfrom airflow import configuration, models\nfrom airflow import jobs, DAG, utils, settings, exceptions\nfrom airflow.bin import cli\nfrom airflow.configuration import AirflowConfigException, run_command, conf\nfrom airflow.exceptions import AirflowException\nfrom airflow.executors import SequentialExecutor\nfrom airflow.hooks.base_hook import BaseHook\nfrom airflow.hooks.sqlite_hook import SqliteHook\nfrom airflow.models import (\n BaseOperator,\n Connection,\n TaskFail,\n DagBag,\n DagRun,\n Pool,\n DagModel,\n TaskInstance,\n Variable,\n)\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.check_operator import CheckOperator, ValueCheckOperator\nfrom airflow.operators.dagrun_operator import TriggerDagRunOperator\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.settings import Session\nfrom airflow.utils import timezone\nfrom airflow.utils.dates import (\n days_ago, infer_time_unit, round_time,\n scale_time_units\n)\nfrom airflow.utils.state import State\nfrom airflow.utils.timezone import datetime\nfrom airflow.hooks import hdfs_hook\nfrom tests.test_utils.config import conf_vars\n\nDEV_NULL = '/dev/null'\nTEST_DAG_FOLDER = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), 'dags')\nDEFAULT_DATE = datetime(2015, 1, 1)\nDEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()\nDEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]\nTEST_DAG_ID = 'unit_tests'\nEXAMPLE_DAG_DEFAULT_DATE = days_ago(2)\n\n\nclass OperatorSubclass(BaseOperator):\n \"\"\"\n An operator to test template substitution\n \"\"\"\n template_fields = ['some_templated_field']\n\n def __init__(self, some_templated_field, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.some_templated_field = some_templated_field\n\n def execute(self, context):\n pass\n\n\nclass TestCore(unittest.TestCase):\n TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_no_previous_runs'\n TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID = \\\n TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous'\n TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID = \\\n TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only'\n TEST_SCHEDULE_ONCE_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_once'\n TEST_SCHEDULE_RELATIVEDELTA_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_relativedelta'\n TEST_SCHEDULE_START_END_DATES_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_start_end_dates'\n\n default_scheduler_args = {\"num_runs\": 1}\n\n def setUp(self):\n self.dagbag = DagBag(\n dag_folder=DEV_NULL, include_examples=True)\n self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}\n self.dag = DAG(TEST_DAG_ID, default_args=self.args)\n self.dag_bash = self.dagbag.dags['example_bash_operator']\n self.runme_0 = self.dag_bash.get_task('runme_0')\n self.run_after_loop = self.dag_bash.get_task('run_after_loop')\n self.run_this_last = self.dag_bash.get_task('run_this_last')\n\n def tearDown(self):\n if os.environ.get('KUBERNETES_VERSION') is not None:\n return\n\n dag_ids_to_clean = [\n TEST_DAG_ID,\n self.TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID,\n self.TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID,\n self.TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID,\n self.TEST_SCHEDULE_ONCE_DAG_ID,\n self.TEST_SCHEDULE_RELATIVEDELTA_DAG_ID,\n self.TEST_SCHEDULE_START_END_DATES_DAG_ID,\n ]\n session = Session()\n session.query(DagRun).filter(\n DagRun.dag_id.in_(dag_ids_to_clean)).delete(\n synchronize_session=False)\n session.query(TaskInstance).filter(\n TaskInstance.dag_id.in_(dag_ids_to_clean)).delete(\n synchronize_session=False)\n session.query(TaskFail).filter(\n TaskFail.dag_id.in_(dag_ids_to_clean)).delete(\n synchronize_session=False)\n session.commit()\n session.close()\n\n def test_schedule_dag_no_previous_runs(self):\n \"\"\"\n Tests scheduling a dag with no previous runs\n \"\"\"\n dag = DAG(self.TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID)\n dag.add_task(BaseOperator(\n task_id=\"faketastic\",\n owner='Also fake',\n start_date=datetime(2015, 1, 2, 0, 0)))\n\n dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)\n self.assertIsNotNone(dag_run)\n self.assertEqual(dag.dag_id, dag_run.dag_id)\n self.assertIsNotNone(dag_run.run_id)\n self.assertNotEqual('', dag_run.run_id)\n self.assertEqual(\n datetime(2015, 1, 2, 0, 0),\n dag_run.execution_date,\n msg='dag_run.execution_date did not match expectation: {0}'\n .format(dag_run.execution_date)\n )\n self.assertEqual(State.RUNNING, dag_run.state)\n self.assertFalse(dag_run.external_trigger)\n dag.clear()\n\n def test_schedule_dag_relativedelta(self):\n \"\"\"\n Tests scheduling a dag with a relativedelta schedule_interval\n \"\"\"\n delta = relativedelta(hours=+1)\n dag = DAG(self.TEST_SCHEDULE_RELATIVEDELTA_DAG_ID,\n schedule_interval=delta)\n dag.add_task(BaseOperator(\n task_id=\"faketastic\",\n owner='Also fake',\n start_date=datetime(2015, 1, 2, 0, 0)))\n\n dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)\n self.assertIsNotNone(dag_run)\n self.assertEqual(dag.dag_id, dag_run.dag_id)\n self.assertIsNotNone(dag_run.run_id)\n self.assertNotEqual('', dag_run.run_id)\n self.assertEqual(\n datetime(2015, 1, 2, 0, 0),\n dag_run.execution_date,\n msg='dag_run.execution_date did not match expectation: {0}'\n .format(dag_run.execution_date)\n )\n self.assertEqual(State.RUNNING, dag_run.state)\n self.assertFalse(dag_run.external_trigger)\n dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)\n self.assertIsNotNone(dag_run2)\n self.assertEqual(dag.dag_id, dag_run2.dag_id)\n self.assertIsNotNone(dag_run2.run_id)\n self.assertNotEqual('', dag_run2.run_id)\n self.assertEqual(\n datetime(2015, 1, 2, 0, 0) + delta,\n dag_run2.execution_date,\n msg='dag_run2.execution_date did not match expectation: {0}'\n .format(dag_run2.execution_date)\n )\n self.assertEqual(State.RUNNING, dag_run2.state)\n self.assertFalse(dag_run2.external_trigger)\n dag.clear()\n\n def test_schedule_dag_fake_scheduled_previous(self):\n \"\"\"\n Test scheduling a dag where there is a prior DagRun\n which has the same run_id as the next run should have\n \"\"\"\n delta = timedelta(hours=1)\n\n dag = DAG(self.TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID,\n schedule_interval=delta,\n start_date=DEFAULT_DATE)\n dag.add_task(BaseOperator(\n task_id=\"faketastic\",\n owner='Also fake',\n start_date=DEFAULT_DATE))\n\n scheduler = jobs.SchedulerJob(**self.default_scheduler_args)\n dag.create_dagrun(run_id=DagRun.id_for_date(DEFAULT_DATE),\n execution_date=DEFAULT_DATE,\n state=State.SUCCESS,\n external_trigger=True)\n dag_run = scheduler.create_dag_run(dag)\n self.assertIsNotNone(dag_run)\n self.assertEqual(dag.dag_id, dag_run.dag_id)\n self.assertIsNotNone(dag_run.run_id)\n self.assertNotEqual('', dag_run.run_id)\n self.assertEqual(\n DEFAULT_DATE + delta,\n dag_run.execution_date,\n msg='dag_run.execution_date did not match expectation: {0}'\n .format(dag_run.execution_date)\n )\n self.assertEqual(State.RUNNING, dag_run.state)\n self.assertFalse(dag_run.external_trigger)\n\n def test_schedule_dag_once(self):\n \"\"\"\n Tests scheduling a dag scheduled for @once - should be scheduled the first time\n it is called, and not scheduled the second.\n \"\"\"\n dag = DAG(self.TEST_SCHEDULE_ONCE_DAG_ID)\n dag.schedule_interval = '@once'\n dag.add_task(BaseOperator(\n task_id=\"faketastic\",\n owner='Also fake',\n start_date=datetime(2015, 1, 2, 0, 0)))\n dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)\n dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)\n\n self.assertIsNotNone(dag_run)\n self.assertIsNone(dag_run2)\n dag.clear()\n\n def test_fractional_seconds(self):\n \"\"\"\n Tests if fractional seconds are stored in the database\n \"\"\"\n dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')\n dag.schedule_interval = '@once'\n dag.add_task(BaseOperator(\n task_id=\"faketastic\",\n owner='Also fake',\n start_date=datetime(2015, 1, 2, 0, 0)))\n\n start_date = timezone.utcnow()\n\n run = dag.create_dagrun(\n run_id='test_' + start_date.isoformat(),\n execution_date=start_date,\n start_date=start_date,\n state=State.RUNNING,\n external_trigger=False\n )\n\n run.refresh_from_db()\n\n self.assertEqual(start_date, run.execution_date,\n \"dag run execution_date loses precision\")\n self.assertEqual(start_date, run.start_date,\n \"dag run start_date loses precision \")\n\n def test_schedule_dag_start_end_dates(self):\n \"\"\"\n Tests that an attempt to schedule a task after the Dag's end_date\n does not succeed.\n \"\"\"\n delta = timedelta(hours=1)\n runs = 3\n start_date = DEFAULT_DATE\n end_date = start_date + (runs - 1) * delta\n\n dag = DAG(self.TEST_SCHEDULE_START_END_DATES_DAG_ID,\n start_date=start_date,\n end_date=end_date,\n schedule_interval=delta)\n dag.add_task(BaseOperator(task_id='faketastic', owner='Also fake'))\n\n # Create and schedule the dag runs\n dag_runs = []\n scheduler = jobs.SchedulerJob(**self.default_scheduler_args)\n for _ in range(runs):\n dag_runs.append(scheduler.create_dag_run(dag))\n\n additional_dag_run = scheduler.create_dag_run(dag)\n\n for dag_run in dag_runs:\n self.assertIsNotNone(dag_run)\n\n self.assertIsNone(additional_dag_run)\n\n def test_schedule_dag_no_end_date_up_to_today_only(self):\n \"\"\"\n Tests that a Dag created without an end_date can only be scheduled up\n to and including the current datetime.\n\n For example, if today is 2016-01-01 and we are scheduling from a\n start_date of 2015-01-01, only jobs up to, but not including\n 2016-01-01 should be scheduled.\n \"\"\"\n session = settings.Session()\n delta = timedelta(days=1)\n now = utcnow()\n start_date = now.subtract(weeks=1)\n\n runs = (now - start_date).days\n\n dag = DAG(self.TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID,\n start_date=start_date,\n schedule_interval=delta)\n dag.add_task(BaseOperator(task_id='faketastic', owner='Also fake'))\n\n dag_runs = []\n scheduler = jobs.SchedulerJob(**self.default_scheduler_args)\n for _ in range(runs):\n dag_run = scheduler.create_dag_run(dag)\n dag_runs.append(dag_run)\n\n # Mark the DagRun as complete\n dag_run.state = State.SUCCESS\n session.merge(dag_run)\n session.commit()\n\n # Attempt to schedule an additional dag run (for 2016-01-01)\n additional_dag_run = scheduler.create_dag_run(dag)\n\n for dag_run in dag_runs:\n self.assertIsNotNone(dag_run)\n\n self.assertIsNone(additional_dag_run)\n\n def test_confirm_unittest_mod(self):\n self.assertTrue(conf.get('core', 'unit_test_mode'))\n\n def test_pickling(self):\n dp = self.dag.pickle()\n self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)\n\n def test_rich_comparison_ops(self):\n\n class DAGsubclass(DAG):\n pass\n\n dag_eq = DAG(TEST_DAG_ID, default_args=self.args)\n\n dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)\n dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)\n\n dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)\n dag_subclass_diff_name = DAGsubclass(\n TEST_DAG_ID + '2', default_args=self.args)\n\n for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:\n d.last_loaded = self.dag.last_loaded\n\n # test identity equality\n self.assertEqual(self.dag, self.dag)\n\n # test dag (in)equality based on _comps\n self.assertEqual(dag_eq, self.dag)\n self.assertNotEqual(dag_diff_name, self.dag)\n self.assertNotEqual(dag_diff_load_time, self.dag)\n\n # test dag inequality based on type even if _comps happen to match\n self.assertNotEqual(dag_subclass, self.dag)\n\n # a dag should equal an unpickled version of itself\n d = pickle.dumps(self.dag)\n self.assertEqual(pickle.loads(d), self.dag)\n\n # dags are ordered based on dag_id no matter what the type is\n self.assertLess(self.dag, dag_diff_name)\n self.assertGreater(self.dag, dag_diff_load_time)\n self.assertLess(self.dag, dag_subclass_diff_name)\n\n # greater than should have been created automatically by functools\n self.assertGreater(dag_diff_name, self.dag)\n\n # hashes are non-random and match equality\n self.assertEqual(hash(self.dag), hash(self.dag))\n self.assertEqual(hash(dag_eq), hash(self.dag))\n self.assertNotEqual(hash(dag_diff_name), hash(self.dag))\n self.assertNotEqual(hash(dag_subclass), hash(self.dag))\n\n def test_check_operators(self):\n\n conn_id = \"sqlite_default\"\n\n captainHook = BaseHook.get_hook(conn_id=conn_id)\n captainHook.run(\"CREATE TABLE operator_test_table (a, b)\")\n captainHook.run(\"insert into operator_test_table values (1,2)\")\n\n t = CheckOperator(\n task_id='check',\n sql=\"select count(*) from operator_test_table\",\n conn_id=conn_id,\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n t = ValueCheckOperator(\n task_id='value_check',\n pass_value=95,\n tolerance=0.1,\n conn_id=conn_id,\n sql=\"SELECT 100\",\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n captainHook.run(\"drop table operator_test_table\")\n\n def test_clear_api(self):\n task = self.dag_bash.tasks[0]\n task.clear(\n start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,\n upstream=True, downstream=True)\n ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)\n ti.are_dependents_done()\n\n def test_illegal_args(self):\n \"\"\"\n Tests that Operators reject illegal arguments\n \"\"\"\n with warnings.catch_warnings(record=True) as w:\n BashOperator(\n task_id='test_illegal_args',\n bash_command='echo success',\n dag=self.dag,\n illegal_argument_1234='hello?')\n self.assertTrue(\n issubclass(w[0].category, PendingDeprecationWarning))\n self.assertIn(\n ('Invalid arguments were passed to BashOperator '\n '(task_id: test_illegal_args).'),\n w[0].message.args[0])\n\n def test_bash_operator(self):\n t = BashOperator(\n task_id='test_bash_operator',\n bash_command=\"echo success\",\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_bash_operator_multi_byte_output(self):\n t = BashOperator(\n task_id='test_multi_byte_bash_operator',\n bash_command=\"echo \\u2600\",\n dag=self.dag,\n output_encoding='utf-8')\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_bash_operator_kill(self):\n import psutil\n sleep_time = \"100%d\" % os.getpid()\n t = BashOperator(\n task_id='test_bash_operator_kill',\n execution_timeout=timedelta(seconds=1),\n bash_command=\"/bin/bash -c 'sleep %s'\" % sleep_time,\n dag=self.dag)\n self.assertRaises(\n exceptions.AirflowTaskTimeout,\n t.run,\n start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)\n sleep(2)\n pid = -1\n for proc in psutil.process_iter():\n if proc.cmdline() == ['sleep', sleep_time]:\n pid = proc.pid\n if pid != -1:\n os.kill(pid, signal.SIGTERM)\n self.fail(\"BashOperator's subprocess still running after stopping on timeout!\")\n\n def test_on_failure_callback(self):\n # Annoying workaround for nonlocal not existing in python 2\n data = {'called': False}\n\n def check_failure(context, test_case=self):\n data['called'] = True\n error = context.get('exception')\n test_case.assertIsInstance(error, AirflowException)\n\n t = BashOperator(\n task_id='check_on_failure_callback',\n bash_command=\"exit 1\",\n dag=self.dag,\n on_failure_callback=check_failure)\n self.assertRaises(\n exceptions.AirflowException,\n t.run,\n start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n self.assertTrue(data['called'])\n\n def test_trigger_dagrun(self):\n def trigga(_, obj):\n if True:\n return obj\n\n t = TriggerDagRunOperator(\n task_id='test_trigger_dagrun',\n trigger_dag_id='example_bash_operator',\n python_callable=trigga,\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_dryrun(self):\n t = BashOperator(\n task_id='test_dryrun',\n bash_command=\"echo success\",\n dag=self.dag)\n t.dry_run()\n\n def test_sqlite(self):\n import airflow.operators.sqlite_operator\n t = airflow.operators.sqlite_operator.SqliteOperator(\n task_id='time_sqlite',\n sql=\"CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))\",\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_timeout(self):\n t = PythonOperator(\n task_id='test_timeout',\n execution_timeout=timedelta(seconds=1),\n python_callable=lambda: sleep(5),\n dag=self.dag)\n self.assertRaises(\n exceptions.AirflowTaskTimeout,\n t.run,\n start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_python_op(self):\n def test_py_op(templates_dict, ds, **kwargs):\n if not templates_dict['ds'] == ds:\n raise Exception(\"failure\")\n\n t = PythonOperator(\n task_id='test_py_op',\n python_callable=test_py_op,\n templates_dict={'ds': \"{{ ds }}\"},\n dag=self.dag)\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_complex_template(self):\n def verify_templated_field(context):\n self.assertEqual(context['ti'].task.some_templated_field['bar'][1],\n context['ds'])\n\n t = OperatorSubclass(\n task_id='test_complex_template',\n some_templated_field={\n 'foo': '123',\n 'bar': ['baz', '{{ ds }}']\n },\n dag=self.dag)\n t.execute = verify_templated_field\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_template_with_variable(self):\n \"\"\"\n Test the availability of variables in templates\n \"\"\"\n val = {\n 'test_value': 'a test value'\n }\n Variable.set(\"a_variable\", val['test_value'])\n\n def verify_templated_field(context):\n self.assertEqual(context['ti'].task.some_templated_field,\n val['test_value'])\n\n t = OperatorSubclass(\n task_id='test_complex_template',\n some_templated_field='{{ var.value.a_variable }}',\n dag=self.dag)\n t.execute = verify_templated_field\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_template_with_json_variable(self):\n \"\"\"\n Test the availability of variables (serialized as JSON) in templates\n \"\"\"\n val = {\n 'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}\n }\n Variable.set(\"a_variable\", val['test_value'], serialize_json=True)\n\n def verify_templated_field(context):\n self.assertEqual(context['ti'].task.some_templated_field,\n val['test_value']['obj']['v2'])\n\n t = OperatorSubclass(\n task_id='test_complex_template',\n some_templated_field='{{ var.json.a_variable.obj.v2 }}',\n dag=self.dag)\n t.execute = verify_templated_field\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_template_with_json_variable_as_value(self):\n \"\"\"\n Test the availability of variables (serialized as JSON) in templates, but\n accessed as a value\n \"\"\"\n val = {\n 'test_value': {'foo': 'bar'}\n }\n Variable.set(\"a_variable\", val['test_value'], serialize_json=True)\n\n def verify_templated_field(context):\n self.assertEqual(context['ti'].task.some_templated_field,\n '{\\n \"foo\": \"bar\"\\n}')\n\n t = OperatorSubclass(\n task_id='test_complex_template',\n some_templated_field='{{ var.value.a_variable }}',\n dag=self.dag)\n t.execute = verify_templated_field\n t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n\n def test_template_non_bool(self):\n \"\"\"\n Test templates can handle objects with no sense of truthiness\n \"\"\"\n\n class NonBoolObject:\n def __len__(self):\n return NotImplemented\n\n def __bool__(self):\n return NotImplemented\n\n t = OperatorSubclass(\n task_id='test_bad_template_obj',\n some_templated_field=NonBoolObject(),\n dag=self.dag)\n t.resolve_template_files()\n\n def test_task_get_template(self):\n TI = TaskInstance\n ti = TI(\n task=self.runme_0, execution_date=DEFAULT_DATE)\n ti.dag = self.dag_bash\n ti.run(ignore_ti_state=True)\n context = ti.get_template_context()\n\n # DEFAULT DATE is 2015-01-01\n self.assertEqual(context['ds'], '2015-01-01')\n self.assertEqual(context['ds_nodash'], '20150101')\n\n # next_ds is 2015-01-02 as the dag interval is daily\n self.assertEqual(context['next_ds'], '2015-01-02')\n self.assertEqual(context['next_ds_nodash'], '20150102')\n\n # prev_ds is 2014-12-31 as the dag interval is daily\n self.assertEqual(context['prev_ds'], '2014-12-31')\n self.assertEqual(context['prev_ds_nodash'], '20141231')\n\n self.assertEqual(context['ts'], '2015-01-01T00:00:00+00:00')\n self.assertEqual(context['ts_nodash'], '20150101T000000')\n self.assertEqual(context['ts_nodash_with_tz'], '20150101T000000+0000')\n\n self.assertEqual(context['yesterday_ds'], '2014-12-31')\n self.assertEqual(context['yesterday_ds_nodash'], '20141231')\n\n self.assertEqual(context['tomorrow_ds'], '2015-01-02')\n self.assertEqual(context['tomorrow_ds_nodash'], '20150102')\n\n def test_local_task_job(self):\n TI = TaskInstance\n ti = TI(\n task=self.runme_0, execution_date=DEFAULT_DATE)\n job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)\n job.run()\n\n def test_raw_job(self):\n TI = TaskInstance\n ti = TI(\n task=self.runme_0, execution_date=DEFAULT_DATE)\n ti.dag = self.dag_bash\n ti.run(ignore_ti_state=True)\n\n def test_variable_set_get_round_trip(self):\n Variable.set(\"tested_var_set_id\", \"Monday morning breakfast\")\n self.assertEqual(\"Monday morning breakfast\", Variable.get(\"tested_var_set_id\"))\n\n def test_variable_set_get_round_trip_json(self):\n value = {\"a\": 17, \"b\": 47}\n Variable.set(\"tested_var_set_id\", value, serialize_json=True)\n self.assertEqual(value, Variable.get(\"tested_var_set_id\", deserialize_json=True))\n\n def test_get_non_existing_var_should_return_default(self):\n default_value = \"some default val\"\n self.assertEqual(default_value, Variable.get(\"thisIdDoesNotExist\",\n default_var=default_value))\n\n def test_get_non_existing_var_should_raise_key_error(self):\n with self.assertRaises(KeyError):\n Variable.get(\"thisIdDoesNotExist\")\n\n def test_get_non_existing_var_with_none_default_should_return_none(self):\n self.assertIsNone(Variable.get(\"thisIdDoesNotExist\", default_var=None))\n\n def test_get_non_existing_var_should_not_deserialize_json_default(self):\n default_value = \"}{ this is a non JSON default }{\"\n self.assertEqual(default_value, Variable.get(\"thisIdDoesNotExist\",\n default_var=default_value,\n deserialize_json=True))\n\n def test_variable_setdefault_round_trip(self):\n key = \"tested_var_setdefault_1_id\"\n value = \"Monday morning breakfast in Paris\"\n Variable.setdefault(key, value)\n self.assertEqual(value, Variable.get(key))\n\n def test_variable_setdefault_round_trip_json(self):\n key = \"tested_var_setdefault_2_id\"\n value = {\"city\": 'Paris', \"Happiness\": True}\n Variable.setdefault(key, value, deserialize_json=True)\n self.assertEqual(value, Variable.get(key, deserialize_json=True))\n\n def test_variable_setdefault_existing_json(self):\n key = \"tested_var_setdefault_2_id\"\n value = {\"city\": 'Paris', \"Happiness\": True}\n Variable.set(key, value, serialize_json=True)\n val = Variable.setdefault(key, value, deserialize_json=True)\n # Check the returned value, and the stored value are handled correctly.\n self.assertEqual(value, val)\n self.assertEqual(value, Variable.get(key, deserialize_json=True))\n\n def test_variable_delete(self):\n key = \"tested_var_delete\"\n value = \"to be deleted\"\n\n # No-op if the variable doesn't exist\n Variable.delete(key)\n with self.assertRaises(KeyError):\n Variable.get(key)\n\n # Set the variable\n Variable.set(key, value)\n self.assertEqual(value, Variable.get(key))\n\n # Delete the variable\n Variable.delete(key)\n with self.assertRaises(KeyError):\n Variable.get(key)\n\n def test_parameterized_config_gen(self):\n\n cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)\n\n # making sure some basic building blocks are present:\n self.assertIn(\"[core]\", cfg)\n self.assertIn(\"dags_folder\", cfg)\n self.assertIn(\"sql_alchemy_conn\", cfg)\n self.assertIn(\"fernet_key\", cfg)\n\n # making sure replacement actually happened\n self.assertNotIn(\"{AIRFLOW_HOME}\", cfg)\n self.assertNotIn(\"{FERNET_KEY}\", cfg)\n\n def test_config_use_original_when_original_and_fallback_are_present(self):\n self.assertTrue(conf.has_option(\"core\", \"FERNET_KEY\"))\n self.assertFalse(conf.has_option(\"core\", \"FERNET_KEY_CMD\"))\n\n FERNET_KEY = conf.get('core', 'FERNET_KEY')\n\n with conf_vars({('core', 'FERNET_KEY_CMD'): 'printf HELLO'}):\n FALLBACK_FERNET_KEY = conf.get(\n \"core\",\n \"FERNET_KEY\"\n )\n\n self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)\n\n def test_config_throw_error_when_original_and_fallback_is_absent(self):\n self.assertTrue(conf.has_option(\"core\", \"FERNET_KEY\"))\n self.assertFalse(conf.has_option(\"core\", \"FERNET_KEY_CMD\"))\n\n with conf_vars({('core', 'fernet_key'): None}):\n with self.assertRaises(AirflowConfigException) as cm:\n conf.get(\"core\", \"FERNET_KEY\")\n\n exception = str(cm.exception)\n message = \"section/key [core/fernet_key] not found in config\"\n self.assertEqual(message, exception)\n\n def test_config_override_original_when_non_empty_envvar_is_provided(self):\n key = \"AIRFLOW__CORE__FERNET_KEY\"\n value = \"some value\"\n self.assertNotIn(key, os.environ)\n\n os.environ[key] = value\n FERNET_KEY = conf.get('core', 'FERNET_KEY')\n self.assertEqual(value, FERNET_KEY)\n\n # restore the envvar back to the original state\n del os.environ[key]\n\n def test_config_override_original_when_empty_envvar_is_provided(self):\n key = \"AIRFLOW__CORE__FERNET_KEY\"\n value = \"\"\n self.assertNotIn(key, os.environ)\n\n os.environ[key] = value\n FERNET_KEY = conf.get('core', 'FERNET_KEY')\n self.assertEqual(value, FERNET_KEY)\n\n # restore the envvar back to the original state\n del os.environ[key]\n\n def test_round_time(self):\n\n rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))\n self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)\n\n rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))\n self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)\n\n rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(\n 2015, 9, 14, 0, 0))\n self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)\n\n rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(\n 2015, 9, 14, 0, 0))\n self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)\n\n rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(\n 2015, 9, 14, 0, 0))\n self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)\n\n rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(\n 2015, 9, 14, 0, 0))\n self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)\n\n def test_infer_time_unit(self):\n\n self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))\n\n self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))\n\n self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))\n\n self.assertEqual('days', infer_time_unit([200000, 100000]))\n\n def test_scale_time_units(self):\n\n # use assert_almost_equal from numpy.testing since we are comparing\n # floating point arrays\n arr1 = scale_time_units([130, 5400, 10], 'minutes')\n assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)\n\n arr2 = scale_time_units([110, 50, 10, 100], 'seconds')\n assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)\n\n arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')\n assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],\n decimal=3)\n\n arr4 = scale_time_units([200000, 100000], 'days')\n assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)\n\n def test_bad_trigger_rule(self):\n with self.assertRaises(AirflowException):\n DummyOperator(\n task_id='test_bad_trigger',\n trigger_rule=\"non_existent\",\n dag=self.dag)\n\n def test_terminate_task(self):\n \"\"\"If a task instance's db state get deleted, it should fail\"\"\"\n TI = TaskInstance\n dag = self.dagbag.dags.get('test_utils')\n task = dag.task_dict.get('sleeps_forever')\n\n ti = TI(task=task, execution_date=DEFAULT_DATE)\n job = jobs.LocalTaskJob(\n task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())\n\n # Running task instance asynchronously\n p = multiprocessing.Process(target=job.run)\n p.start()\n sleep(5)\n settings.engine.dispose()\n session = settings.Session()\n ti.refresh_from_db(session=session)\n # making sure it's actually running\n self.assertEqual(State.RUNNING, ti.state)\n ti = session.query(TI).filter_by(\n dag_id=task.dag_id,\n task_id=task.task_id,\n execution_date=DEFAULT_DATE\n ).one()\n\n # deleting the instance should result in a failure\n session.delete(ti)\n session.commit()\n # waiting for the async task to finish\n p.join()\n\n # making sure that the task ended up as failed\n ti.refresh_from_db(session=session)\n self.assertEqual(State.FAILED, ti.state)\n session.close()\n\n def test_task_fail_duration(self):\n \"\"\"If a task fails, the duration should be recorded in TaskFail\"\"\"\n\n p = BashOperator(\n task_id='pass_sleepy',\n bash_command='sleep 3',\n dag=self.dag)\n f = BashOperator(\n task_id='fail_sleepy',\n bash_command='sleep 5',\n execution_timeout=timedelta(seconds=3),\n retry_delay=timedelta(seconds=0),\n dag=self.dag)\n session = settings.Session()\n try:\n p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n except Exception:\n pass\n try:\n f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n except Exception:\n pass\n p_fails = session.query(TaskFail).filter_by(\n task_id='pass_sleepy',\n dag_id=self.dag.dag_id,\n execution_date=DEFAULT_DATE).all()\n f_fails = session.query(TaskFail).filter_by(\n task_id='fail_sleepy',\n dag_id=self.dag.dag_id,\n execution_date=DEFAULT_DATE).all()\n\n self.assertEqual(0, len(p_fails))\n self.assertEqual(1, len(f_fails))\n self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)\n\n def test_run_command(self):\n write = r'sys.stdout.buffer.write(\"\\u1000foo\".encode(\"utf8\"))'\n\n cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)\n\n self.assertEqual(run_command(\"python -c '{0}'\".format(cmd)), '\\u1000foo')\n\n self.assertEqual(run_command('echo \"foo bar\"'), 'foo bar\\n')\n self.assertRaises(AirflowConfigException, run_command, 'bash -c \"exit 1\"')\n\n def test_trigger_dagrun_with_execution_date(self):\n utc_now = timezone.utcnow()\n run_id = 'trig__' + utc_now.isoformat()\n\n def payload_generator(context, object): # pylint: disable=unused-argument\n object.run_id = run_id\n return object\n\n task = TriggerDagRunOperator(task_id='test_trigger_dagrun_with_execution_date',\n trigger_dag_id='example_bash_operator',\n python_callable=payload_generator,\n execution_date=utc_now,\n dag=self.dag)\n task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n dag_runs = DagRun.find(dag_id='example_bash_operator', run_id=run_id)\n self.assertEqual(len(dag_runs), 1)\n dag_run = dag_runs[0]\n self.assertEqual(dag_run.execution_date, utc_now)\n\n def test_trigger_dagrun_with_str_execution_date(self):\n utc_now_str = timezone.utcnow().isoformat()\n self.assertIsInstance(utc_now_str, (str,))\n run_id = 'trig__' + utc_now_str\n\n def payload_generator(context, object): # pylint: disable=unused-argument\n object.run_id = run_id\n return object\n\n task = TriggerDagRunOperator(\n task_id='test_trigger_dagrun_with_str_execution_date',\n trigger_dag_id='example_bash_operator',\n python_callable=payload_generator,\n execution_date=utc_now_str,\n dag=self.dag)\n task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)\n dag_runs = DagRun.find(dag_id='example_bash_operator', run_id=run_id)\n self.assertEqual(len(dag_runs), 1)\n dag_run = dag_runs[0]\n self.assertEqual(dag_run.execution_date.isoformat(), utc_now_str)\n\n def test_trigger_dagrun_with_templated_execution_date(self):\n task = TriggerDagRunOperator(\n task_id='test_trigger_dagrun_with_str_execution_date',\n trigger_dag_id='example_bash_operator',\n execution_date='{{ execution_date }}',\n dag=self.dag)\n\n self.assertTrue(isinstance(task.execution_date, str))\n self.assertEqual(task.execution_date, '{{ execution_date }}')\n\n ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)\n ti.render_templates()\n self.assertEqual(timezone.parse(task.execution_date), DEFAULT_DATE)\n\n def test_externally_triggered_dagrun(self):\n TI = TaskInstance\n\n # Create the dagrun between two \"scheduled\" execution dates of the DAG\n EXECUTION_DATE = DEFAULT_DATE + timedelta(days=2)\n EXECUTION_DS = EXECUTION_DATE.strftime('%Y-%m-%d')\n EXECUTION_DS_NODASH = EXECUTION_DS.replace('-', '')\n\n dag = DAG(\n TEST_DAG_ID,\n default_args=self.args,\n schedule_interval=timedelta(weeks=1),\n start_date=DEFAULT_DATE)\n task = DummyOperator(task_id='test_externally_triggered_dag_context',\n dag=dag)\n dag.create_dagrun(run_id=DagRun.id_for_date(EXECUTION_DATE),\n execution_date=EXECUTION_DATE,\n state=State.RUNNING,\n external_trigger=True)\n task.run(\n start_date=EXECUTION_DATE, end_date=EXECUTION_DATE)\n\n ti = TI(task=task, execution_date=EXECUTION_DATE)\n context = ti.get_template_context()\n\n # next_ds/prev_ds should be the execution date for manually triggered runs\n self.assertEqual(context['next_ds'], EXECUTION_DS)\n self.assertEqual(context['next_ds_nodash'], EXECUTION_DS_NODASH)\n\n self.assertEqual(context['prev_ds'], EXECUTION_DS)\n self.assertEqual(context['prev_ds_nodash'], EXECUTION_DS_NODASH)\n\n\nclass TestCli(unittest.TestCase):\n\n TEST_USER1_EMAIL = '[email protected]'\n TEST_USER2_EMAIL = '[email protected]'\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls._cleanup()\n\n def setUp(self):\n super().setUp()\n from airflow.www import app as application\n self.app, self.appbuilder = application.create_app(session=Session, testing=True)\n self.app.config['TESTING'] = True\n\n self.parser = cli.CLIFactory.get_parser()\n self.dagbag = DagBag(dag_folder=DEV_NULL, include_examples=True)\n settings.configure_orm()\n self.session = Session\n\n def tearDown(self):\n self._cleanup(session=self.session)\n for email in [self.TEST_USER1_EMAIL, self.TEST_USER2_EMAIL]:\n test_user = self.appbuilder.sm.find_user(email=email)\n if test_user:\n self.appbuilder.sm.del_register_user(test_user)\n for role_name in ['FakeTeamA', 'FakeTeamB']:\n if self.appbuilder.sm.find_role(role_name):\n self.appbuilder.sm.delete_role(role_name)\n\n super().tearDown()\n\n @staticmethod\n def _cleanup(session=None):\n if session is None:\n session = Session()\n\n session.query(Pool).delete()\n session.query(Variable).delete()\n session.commit()\n session.close()\n\n def test_cli_list_dags(self):\n args = self.parser.parse_args(['dags', 'list', '--report'])\n cli.list_dags(args)\n\n def test_cli_list_dag_runs(self):\n cli.trigger_dag(self.parser.parse_args([\n 'dags', 'trigger', 'example_bash_operator', ]))\n args = self.parser.parse_args(['dags', 'list_runs',\n 'example_bash_operator',\n '--no_backfill'])\n cli.list_dag_runs(args)\n\n def test_cli_create_user_random_password(self):\n args = self.parser.parse_args([\n 'users', 'create', '--username', 'test1', '--lastname', 'doe',\n '--firstname', 'jon',\n '--email', '[email protected]', '--role', 'Viewer', '--use_random_password'\n ])\n cli.users_create(args)\n\n def test_cli_create_user_supplied_password(self):\n args = self.parser.parse_args([\n 'users', 'create', '--username', 'test2', '--lastname', 'doe',\n '--firstname', 'jon',\n '--email', '[email protected]', '--role', 'Viewer', '--password', 'test'\n ])\n cli.users_create(args)\n\n def test_cli_delete_user(self):\n args = self.parser.parse_args([\n 'users', 'create', '--username', 'test3', '--lastname', 'doe',\n '--firstname', 'jon',\n '--email', '[email protected]', '--role', 'Viewer', '--use_random_password'\n ])\n cli.users_create(args)\n args = self.parser.parse_args([\n 'users', 'delete', '--username', 'test3',\n ])\n cli.users_delete(args)\n\n def test_cli_list_users(self):\n for i in range(0, 3):\n args = self.parser.parse_args([\n 'users', 'create', '--username', 'user{}'.format(i), '--lastname',\n 'doe', '--firstname', 'jon',\n '--email', 'jdoe+{}@gmail.com'.format(i), '--role', 'Viewer',\n '--use_random_password'\n ])\n cli.users_create(args)\n with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:\n cli.users_list(self.parser.parse_args(['users', 'list']))\n stdout = mock_stdout.getvalue()\n for i in range(0, 3):\n self.assertIn('user{}'.format(i), stdout)\n\n def test_cli_import_users(self):\n def assertUserInRoles(email, roles):\n for role in roles:\n self.assertTrue(self._does_user_belong_to_role(email, role))\n\n def assertUserNotInRoles(email, roles):\n for role in roles:\n self.assertFalse(self._does_user_belong_to_role(email, role))\n\n assertUserNotInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])\n assertUserNotInRoles(self.TEST_USER2_EMAIL, ['Public'])\n users = [\n {\n \"username\": \"imported_user1\", \"lastname\": \"doe1\",\n \"firstname\": \"jon\", \"email\": self.TEST_USER1_EMAIL,\n \"roles\": [\"Admin\", \"Op\"]\n },\n {\n \"username\": \"imported_user2\", \"lastname\": \"doe2\",\n \"firstname\": \"jon\", \"email\": self.TEST_USER2_EMAIL,\n \"roles\": [\"Public\"]\n }\n ]\n self._import_users_from_file(users)\n\n assertUserInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])\n assertUserInRoles(self.TEST_USER2_EMAIL, ['Public'])\n\n users = [\n {\n \"username\": \"imported_user1\", \"lastname\": \"doe1\",\n \"firstname\": \"jon\", \"email\": self.TEST_USER1_EMAIL,\n \"roles\": [\"Public\"]\n },\n {\n \"username\": \"imported_user2\", \"lastname\": \"doe2\",\n \"firstname\": \"jon\", \"email\": self.TEST_USER2_EMAIL,\n \"roles\": [\"Admin\"]\n }\n ]\n self._import_users_from_file(users)\n\n assertUserNotInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])\n assertUserInRoles(self.TEST_USER1_EMAIL, ['Public'])\n assertUserNotInRoles(self.TEST_USER2_EMAIL, ['Public'])\n assertUserInRoles(self.TEST_USER2_EMAIL, ['Admin'])\n\n def test_cli_export_users(self):\n user1 = {\"username\": \"imported_user1\", \"lastname\": \"doe1\",\n \"firstname\": \"jon\", \"email\": self.TEST_USER1_EMAIL,\n \"roles\": [\"Public\"]}\n user2 = {\"username\": \"imported_user2\", \"lastname\": \"doe2\",\n \"firstname\": \"jon\", \"email\": self.TEST_USER2_EMAIL,\n \"roles\": [\"Admin\"]}\n self._import_users_from_file([user1, user2])\n\n users_filename = self._export_users_to_file()\n with open(users_filename, mode='r') as file:\n retrieved_users = json.loads(file.read())\n os.remove(users_filename)\n\n # ensure that an export can be imported\n self._import_users_from_file(retrieved_users)\n\n def find_by_username(username):\n matches = [u for u in retrieved_users\n if u['username'] == username]\n if not matches:\n self.fail(\"Couldn't find user with username {}\".format(username))\n else:\n matches[0].pop('id') # this key not required for import\n return matches[0]\n\n self.assertEqual(find_by_username('imported_user1'), user1)\n self.assertEqual(find_by_username('imported_user2'), user2)\n\n def _import_users_from_file(self, user_list):\n json_file_content = json.dumps(user_list)\n f = NamedTemporaryFile(delete=False)\n try:\n f.write(json_file_content.encode())\n f.flush()\n\n args = self.parser.parse_args([\n 'users', 'import', f.name\n ])\n cli.users_import(args)\n finally:\n os.remove(f.name)\n\n def _export_users_to_file(self):\n f = NamedTemporaryFile(delete=False)\n args = self.parser.parse_args([\n 'users', 'export', f.name\n ])\n cli.users_export(args)\n return f.name\n\n def _does_user_belong_to_role(self, email, rolename):\n user = self.appbuilder.sm.find_user(email=email)\n role = self.appbuilder.sm.find_role(rolename)\n if user and role:\n return role in user.roles\n\n return False\n\n def test_cli_add_user_role(self):\n args = self.parser.parse_args([\n 'users', 'create', '--username', 'test4', '--lastname', 'doe',\n '--firstname', 'jon',\n '--email', self.TEST_USER1_EMAIL, '--role', 'Viewer', '--use_random_password'\n ])\n cli.users_create(args)\n\n self.assertFalse(\n self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,\n rolename='Op'),\n \"User should not yet be a member of role 'Op'\"\n )\n\n args = self.parser.parse_args([\n 'users', 'add_role', '--username', 'test4', '--role', 'Op'\n ])\n cli.users_manage_role(args, remove=False)\n\n self.assertTrue(\n self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,\n rolename='Op'),\n \"User should have been added to role 'Op'\"\n )\n\n def test_cli_remove_user_role(self):\n args = self.parser.parse_args([\n 'users', 'create', '--username', 'test4', '--lastname', 'doe',\n '--firstname', 'jon',\n '--email', self.TEST_USER1_EMAIL, '--role', 'Viewer', '--use_random_password'\n ])\n cli.users_create(args)\n\n self.assertTrue(\n self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,\n rolename='Viewer'),\n \"User should have been created with role 'Viewer'\"\n )\n\n args = self.parser.parse_args([\n 'users', 'remove_role', '--username', 'test4', '--role', 'Viewer'\n ])\n cli.users_manage_role(args, remove=True)\n\n self.assertFalse(\n self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,\n rolename='Viewer'),\n \"User should have been removed from role 'Viewer'\"\n )\n\n @mock.patch(\"airflow.bin.cli.DagBag\")\n def test_cli_sync_perm(self, dagbag_mock):\n self.expect_dagbag_contains([\n DAG('has_access_control',\n access_control={\n 'Public': {'can_dag_read'}\n }),\n DAG('no_access_control')\n ], dagbag_mock)\n self.appbuilder.sm = mock.Mock()\n\n args = self.parser.parse_args([\n 'sync_perm'\n ])\n cli.sync_perm(args)\n\n assert self.appbuilder.sm.sync_roles.call_count == 1\n\n self.assertEqual(2,\n len(self.appbuilder.sm.sync_perm_for_dag.mock_calls))\n self.appbuilder.sm.sync_perm_for_dag.assert_any_call(\n 'has_access_control',\n {'Public': {'can_dag_read'}}\n )\n self.appbuilder.sm.sync_perm_for_dag.assert_any_call(\n 'no_access_control',\n None,\n )\n\n def expect_dagbag_contains(self, dags, dagbag_mock):\n dagbag = mock.Mock()\n dagbag.dags = {dag.dag_id: dag for dag in dags}\n dagbag_mock.return_value = dagbag\n\n def test_cli_create_roles(self):\n self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamA'))\n self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamB'))\n\n args = self.parser.parse_args([\n 'roles', 'create', 'FakeTeamA', 'FakeTeamB'\n ])\n cli.roles_create(args)\n\n self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamA'))\n self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamB'))\n\n def test_cli_create_roles_is_reentrant(self):\n self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamA'))\n self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamB'))\n\n args = self.parser.parse_args([\n 'roles', 'create', 'FakeTeamA', 'FakeTeamB'\n ])\n\n cli.roles_create(args)\n\n self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamA'))\n self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamB'))\n\n def test_cli_list_roles(self):\n self.appbuilder.sm.add_role('FakeTeamA')\n self.appbuilder.sm.add_role('FakeTeamB')\n\n with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:\n cli.roles_list(self.parser.parse_args(['roles', 'list']))\n stdout = mock_stdout.getvalue()\n\n self.assertIn('FakeTeamA', stdout)\n self.assertIn('FakeTeamB', stdout)\n\n def test_cli_list_tasks(self):\n for dag_id in self.dagbag.dags.keys():\n args = self.parser.parse_args(['tasks', 'list', dag_id])\n cli.list_tasks(args)\n\n args = self.parser.parse_args([\n 'tasks', 'list', 'example_bash_operator', '--tree'])\n cli.list_tasks(args)\n\n def test_cli_list_jobs(self):\n args = self.parser.parse_args(['dags', 'list_jobs'])\n cli.list_jobs(args)\n\n def test_cli_list_jobs_with_args(self):\n args = self.parser.parse_args(['dags', 'list_jobs', '--dag_id',\n 'example_bash_operator',\n '--state', 'success',\n '--limit', '100'])\n cli.list_jobs(args)\n\n @mock.patch(\"airflow.bin.cli.db.initdb\")\n def test_cli_initdb(self, initdb_mock):\n cli.initdb(self.parser.parse_args(['db', 'init']))\n\n initdb_mock.assert_called_once_with()\n\n @mock.patch(\"airflow.bin.cli.db.resetdb\")\n def test_cli_resetdb(self, resetdb_mock):\n cli.resetdb(self.parser.parse_args(['db', 'reset', '--yes']))\n\n resetdb_mock.assert_called_once_with()\n\n def test_cli_connections_list(self):\n with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:\n cli.connections_list(self.parser.parse_args(['connections', 'list']))\n stdout = mock_stdout.getvalue()\n conns = [[x.strip(\"'\") for x in re.findall(r\"'\\w+'\", line)[:2]]\n for ii, line in enumerate(stdout.split('\\n'))\n if ii % 2 == 1]\n conns = [conn for conn in conns if len(conn) > 0]\n\n # Assert that some of the connections are present in the output as\n # expected:\n self.assertIn(['aws_default', 'aws'], conns)\n self.assertIn(['hive_cli_default', 'hive_cli'], conns)\n self.assertIn(['emr_default', 'emr'], conns)\n self.assertIn(['mssql_default', 'mssql'], conns)\n self.assertIn(['mysql_default', 'mysql'], conns)\n self.assertIn(['postgres_default', 'postgres'], conns)\n self.assertIn(['wasb_default', 'wasb'], conns)\n self.assertIn(['segment_default', 'segment'], conns)\n\n def test_cli_connections_list_redirect(self):\n cmd = ['airflow', 'connections', 'list']\n with tempfile.TemporaryFile() as fp:\n p = subprocess.Popen(cmd, stdout=fp)\n p.wait()\n self.assertEqual(0, p.returncode)\n\n def test_cli_connections_add_delete(self):\n # Add connections:\n uri = 'postgresql://airflow:airflow@host:5432/airflow'\n with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:\n cli.connections_add(self.parser.parse_args(\n ['connections', 'add', 'new1',\n '--conn_uri=%s' % uri]))\n cli.connections_add(self.parser.parse_args(\n ['connections', 'add', 'new2',\n '--conn_uri=%s' % uri]))\n cli.connections_add(self.parser.parse_args(\n ['connections', 'add', 'new3',\n '--conn_uri=%s' % uri, '--conn_extra', \"{'extra': 'yes'}\"]))\n cli.connections_add(self.parser.parse_args(\n ['connections', 'add', 'new4',\n '--conn_uri=%s' % uri, '--conn_extra', \"{'extra': 'yes'}\"]))\n cli.connections_add(self.parser.parse_args(\n ['connections', 'add', 'new5',\n '--conn_type=hive_metastore', '--conn_login=airflow',\n '--conn_password=airflow', '--conn_host=host',\n '--conn_port=9083', '--conn_schema=airflow']))\n cli.connections_add(self.parser.parse_args(\n ['connections', 'add', 'new6',\n '--conn_uri', \"\", '--conn_type=google_cloud_platform', '--conn_extra', \"{'extra': 'yes'}\"]))\n stdout = mock_stdout.getvalue()\n\n # Check addition stdout\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n (\"\\tSuccessfully added `conn_id`=new1 : \" +\n \"postgresql://airflow:airflow@host:5432/airflow\"),\n (\"\\tSuccessfully added `conn_id`=new2 : \" +\n \"postgresql://airflow:airflow@host:5432/airflow\"),\n (\"\\tSuccessfully added `conn_id`=new3 : \" +\n \"postgresql://airflow:airflow@host:5432/airflow\"),\n (\"\\tSuccessfully added `conn_id`=new4 : \" +\n \"postgresql://airflow:airflow@host:5432/airflow\"),\n (\"\\tSuccessfully added `conn_id`=new5 : \" +\n \"hive_metastore://airflow:airflow@host:9083/airflow\"),\n (\"\\tSuccessfully added `conn_id`=new6 : \" +\n \"google_cloud_platform://:@:\")\n ])\n\n # Attempt to add duplicate\n with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:\n cli.connections_add(self.parser.parse_args(\n ['connections', 'add', 'new1',\n '--conn_uri=%s' % uri]))\n stdout = mock_stdout.getvalue()\n\n # Check stdout for addition attempt\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n \"\\tA connection with `conn_id`=new1 already exists\",\n ])\n\n # Attempt to add without providing conn_uri\n with self.assertRaises(SystemExit) as exc:\n cli.connections_add(self.parser.parse_args(\n ['connections', 'add', 'new']))\n\n self.assertEqual(\n exc.exception.code,\n \"The following args are required to add a connection: ['conn_uri or conn_type']\"\n )\n\n # Prepare to add connections\n session = settings.Session()\n extra = {'new1': None,\n 'new2': None,\n 'new3': \"{'extra': 'yes'}\",\n 'new4': \"{'extra': 'yes'}\"}\n\n # Add connections\n for index in range(1, 6):\n conn_id = 'new%s' % index\n result = (session\n .query(Connection)\n .filter(Connection.conn_id == conn_id)\n .first())\n result = (result.conn_id, result.conn_type, result.host,\n result.port, result.get_extra())\n if conn_id in ['new1', 'new2', 'new3', 'new4']:\n self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,\n extra[conn_id]))\n elif conn_id == 'new5':\n self.assertEqual(result, (conn_id, 'hive_metastore', 'host',\n 9083, None))\n elif conn_id == 'new6':\n self.assertEqual(result, (conn_id, 'google_cloud_platform',\n None, None, \"{'extra': 'yes'}\"))\n\n # Delete connections\n with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:\n cli.connections_delete(self.parser.parse_args(\n ['connections', 'delete', 'new1']))\n cli.connections_delete(self.parser.parse_args(\n ['connections', 'delete', 'new2']))\n cli.connections_delete(self.parser.parse_args(\n ['connections', 'delete', 'new3']))\n cli.connections_delete(self.parser.parse_args(\n ['connections', 'delete', 'new4']))\n cli.connections_delete(self.parser.parse_args(\n ['connections', 'delete', 'new5']))\n cli.connections_delete(self.parser.parse_args(\n ['connections', 'delete', 'new6']))\n stdout = mock_stdout.getvalue()\n\n # Check deletion stdout\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n \"\\tSuccessfully deleted `conn_id`=new1\",\n \"\\tSuccessfully deleted `conn_id`=new2\",\n \"\\tSuccessfully deleted `conn_id`=new3\",\n \"\\tSuccessfully deleted `conn_id`=new4\",\n \"\\tSuccessfully deleted `conn_id`=new5\",\n \"\\tSuccessfully deleted `conn_id`=new6\"\n ])\n\n # Check deletions\n for index in range(1, 7):\n conn_id = 'new%s' % index\n result = (session.query(Connection)\n .filter(Connection.conn_id == conn_id)\n .first())\n\n self.assertTrue(result is None)\n\n # Attempt to delete a non-existing connection\n with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:\n cli.connections_delete(self.parser.parse_args(\n ['connections', 'delete', 'fake']))\n stdout = mock_stdout.getvalue()\n\n # Check deletion attempt stdout\n lines = [l for l in stdout.split('\\n') if len(l) > 0]\n self.assertListEqual(lines, [\n \"\\tDid not find a connection with `conn_id`=fake\",\n ])\n\n session.close()\n\n def test_cli_test(self):\n cli.test(self.parser.parse_args([\n 'tasks', 'test', 'example_bash_operator', 'runme_0',\n DEFAULT_DATE.isoformat()]))\n cli.test(self.parser.parse_args([\n 'tasks', 'test', 'example_bash_operator', 'runme_0', '--dry_run',\n DEFAULT_DATE.isoformat()]))\n\n def test_cli_test_with_params(self):\n cli.test(self.parser.parse_args([\n 'tasks', 'test', 'example_passing_params_via_test_command', 'run_this',\n '-tp', '{\"foo\":\"bar\"}', DEFAULT_DATE.isoformat()]))\n cli.test(self.parser.parse_args([\n 'tasks', 'test', 'example_passing_params_via_test_command', 'also_run_this',\n '-tp', '{\"foo\":\"bar\"}', DEFAULT_DATE.isoformat()]))\n\n def test_cli_run(self):\n cli.run(self.parser.parse_args([\n 'tasks', 'run', 'example_bash_operator', 'runme_0', '-l',\n DEFAULT_DATE.isoformat()]))\n\n def test_task_state(self):\n cli.task_state(self.parser.parse_args([\n 'tasks', 'state', 'example_bash_operator', 'runme_0',\n DEFAULT_DATE.isoformat()]))\n\n def test_dag_state(self):\n self.assertEqual(None, cli.dag_state(self.parser.parse_args([\n 'dags', 'state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))\n\n def test_pause(self):\n args = self.parser.parse_args([\n 'dags', 'pause', 'example_bash_operator'])\n cli.pause(args)\n self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])\n\n args = self.parser.parse_args([\n 'dags', 'unpause', 'example_bash_operator'])\n cli.unpause(args)\n self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])\n\n def test_subdag_clear(self):\n args = self.parser.parse_args([\n 'tasks', 'clear', 'example_subdag_operator', '--no_confirm'])\n cli.clear(args)\n args = self.parser.parse_args([\n 'tasks', 'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])\n cli.clear(args)\n\n def test_parentdag_downstream_clear(self):\n args = self.parser.parse_args([\n 'tasks', 'clear', 'example_subdag_operator.section-1', '--no_confirm'])\n cli.clear(args)\n args = self.parser.parse_args([\n 'tasks', 'clear', 'example_subdag_operator.section-1', '--no_confirm',\n '--exclude_parentdag'])\n cli.clear(args)\n\n def test_get_dags(self):\n dags = cli.get_dags(self.parser.parse_args(['tasks', 'clear', 'example_subdag_operator',\n '-c']))\n self.assertEqual(len(dags), 1)\n\n dags = cli.get_dags(self.parser.parse_args(['tasks', 'clear', 'subdag', '-dx', '-c']))\n self.assertGreater(len(dags), 1)\n\n with self.assertRaises(AirflowException):\n cli.get_dags(self.parser.parse_args(['tasks', 'clear', 'foobar', '-dx', '-c']))\n\n def test_process_subdir_path_with_placeholder(self):\n self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))\n\n def test_trigger_dag(self):\n cli.trigger_dag(self.parser.parse_args([\n 'dags', 'trigger', 'example_bash_operator',\n '-c', '{\"foo\": \"bar\"}']))\n self.assertRaises(\n ValueError,\n cli.trigger_dag,\n self.parser.parse_args([\n 'dags', 'trigger', 'example_bash_operator',\n '--run_id', 'trigger_dag_xxx',\n '-c', 'NOT JSON'])\n )\n\n def test_delete_dag(self):\n DM = DagModel\n key = \"my_dag_id\"\n session = settings.Session()\n session.add(DM(dag_id=key))\n session.commit()\n cli.delete_dag(self.parser.parse_args([\n 'dags', 'delete', key, '--yes']))\n self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)\n self.assertRaises(\n AirflowException,\n cli.delete_dag,\n self.parser.parse_args([\n 'dags', 'delete',\n 'does_not_exist_dag',\n '--yes'])\n )\n\n def test_pool_create(self):\n cli.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))\n self.assertEqual(self.session.query(Pool).count(), 1)\n\n def test_pool_get(self):\n cli.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))\n try:\n cli.pool_get(self.parser.parse_args(['pools', 'get', 'foo']))\n except Exception as e:\n self.fail(\"The 'pool -g foo' command raised unexpectedly: %s\" % e)\n\n def test_pool_delete(self):\n cli.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))\n cli.pool_delete(self.parser.parse_args(['pools', 'delete', 'foo']))\n self.assertEqual(self.session.query(Pool).count(), 0)\n\n def test_pool_import_export(self):\n # Create two pools first\n pool_config_input = {\n \"foo\": {\n \"description\": \"foo_test\",\n \"slots\": 1\n },\n \"baz\": {\n \"description\": \"baz_test\",\n \"slots\": 2\n }\n }\n with open('pools_import.json', mode='w') as file:\n json.dump(pool_config_input, file)\n\n # Import json\n try:\n cli.pool_import(self.parser.parse_args(['pools', 'import', 'pools_import.json']))\n except Exception as e:\n self.fail(\"The 'pool import pools_import.json' failed: %s\" % e)\n\n # Export json\n try:\n cli.pool_export(self.parser.parse_args(['pools', 'export', 'pools_export.json']))\n except Exception as e:\n self.fail(\"The 'pool export pools_export.json' failed: %s\" % e)\n\n with open('pools_export.json', mode='r') as file:\n pool_config_output = json.load(file)\n self.assertEqual(\n pool_config_input,\n pool_config_output,\n \"Input and output pool files are not same\")\n os.remove('pools_import.json')\n os.remove('pools_export.json')\n\n def test_variables(self):\n # Checks if all subcommands are properly received\n cli.variables_set(self.parser.parse_args([\n 'variables', 'set', 'foo', '{\"foo\":\"bar\"}']))\n cli.variables_get(self.parser.parse_args([\n 'variables', 'get', 'foo']))\n cli.variables_get(self.parser.parse_args([\n 'variables', 'get', 'baz', '-d', 'bar']))\n cli.variables_list(self.parser.parse_args([\n 'variables', 'list']))\n cli.variables_delete(self.parser.parse_args([\n 'variables', 'delete', 'bar']))\n cli.variables_import(self.parser.parse_args([\n 'variables', 'import', DEV_NULL]))\n cli.variables_export(self.parser.parse_args([\n 'variables', 'export', DEV_NULL]))\n\n cli.variables_set(self.parser.parse_args([\n 'variables', 'set', 'bar', 'original']))\n # First export\n cli.variables_export(self.parser.parse_args([\n 'variables', 'export', 'variables1.json']))\n\n first_exp = open('variables1.json', 'r')\n\n cli.variables_set(self.parser.parse_args([\n 'variables', 'set', 'bar', 'updated']))\n cli.variables_set(self.parser.parse_args([\n 'variables', 'set', 'foo', '{\"foo\":\"oops\"}']))\n cli.variables_delete(self.parser.parse_args([\n 'variables', 'delete', 'foo']))\n # First import\n cli.variables_import(self.parser.parse_args([\n 'variables', 'import', 'variables1.json']))\n\n self.assertEqual('original', Variable.get('bar'))\n self.assertEqual('{\\n \"foo\": \"bar\"\\n}', Variable.get('foo'))\n # Second export\n cli.variables_export(self.parser.parse_args([\n 'variables', 'export', 'variables2.json']))\n\n second_exp = open('variables2.json', 'r')\n self.assertEqual(first_exp.read(), second_exp.read())\n second_exp.close()\n first_exp.close()\n # Second import\n cli.variables_import(self.parser.parse_args([\n 'variables', 'import', 'variables2.json']))\n\n self.assertEqual('original', Variable.get('bar'))\n self.assertEqual('{\\n \"foo\": \"bar\"\\n}', Variable.get('foo'))\n\n # Set a dict\n cli.variables_set(self.parser.parse_args([\n 'variables', 'set', 'dict', '{\"foo\": \"oops\"}']))\n # Set a list\n cli.variables_set(self.parser.parse_args([\n 'variables', 'set', 'list', '[\"oops\"]']))\n # Set str\n cli.variables_set(self.parser.parse_args([\n 'variables', 'set', 'str', 'hello string']))\n # Set int\n cli.variables_set(self.parser.parse_args([\n 'variables', 'set', 'int', '42']))\n # Set float\n cli.variables_set(self.parser.parse_args([\n 'variables', 'set', 'float', '42.0']))\n # Set true\n cli.variables_set(self.parser.parse_args([\n 'variables', 'set', 'true', 'true']))\n # Set false\n cli.variables_set(self.parser.parse_args([\n 'variables', 'set', 'false', 'false']))\n # Set none\n cli.variables_set(self.parser.parse_args([\n 'variables', 'set', 'null', 'null']))\n\n # Export and then import\n cli.variables_export(self.parser.parse_args([\n 'variables', 'export', 'variables3.json']))\n cli.variables_import(self.parser.parse_args([\n 'variables', 'import', 'variables3.json']))\n\n # Assert value\n self.assertEqual({'foo': 'oops'}, models.Variable.get('dict', deserialize_json=True))\n self.assertEqual(['oops'], models.Variable.get('list', deserialize_json=True))\n self.assertEqual('hello string', models.Variable.get('str')) # cannot json.loads(str)\n self.assertEqual(42, models.Variable.get('int', deserialize_json=True))\n self.assertEqual(42.0, models.Variable.get('float', deserialize_json=True))\n self.assertEqual(True, models.Variable.get('true', deserialize_json=True))\n self.assertEqual(False, models.Variable.get('false', deserialize_json=True))\n self.assertEqual(None, models.Variable.get('null', deserialize_json=True))\n\n os.remove('variables1.json')\n os.remove('variables2.json')\n os.remove('variables3.json')\n\n def _wait_pidfile(self, pidfile):\n while True:\n try:\n with open(pidfile) as file:\n return int(file.read())\n except Exception:\n sleep(1)\n\n def test_cli_webserver_foreground(self):\n # Confirm that webserver hasn't been launched.\n # pgrep returns exit status 1 if no process matched.\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"airflow\"]).wait())\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"gunicorn\"]).wait())\n\n # Run webserver in foreground and terminate it.\n p = subprocess.Popen([\"airflow\", \"webserver\"])\n p.terminate()\n p.wait()\n\n # Assert that no process remains.\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"airflow\"]).wait())\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"gunicorn\"]).wait())\n\n @unittest.skipIf(\"TRAVIS\" in os.environ and bool(os.environ[\"TRAVIS\"]),\n \"Skipping test due to lack of required file permission\")\n def test_cli_webserver_foreground_with_pid(self):\n # Run webserver in foreground with --pid option\n pidfile = tempfile.mkstemp()[1]\n p = subprocess.Popen([\"airflow\", \"webserver\", \"--pid\", pidfile])\n\n # Check the file specified by --pid option exists\n self._wait_pidfile(pidfile)\n\n # Terminate webserver\n p.terminate()\n p.wait()\n\n @unittest.skipIf(\"TRAVIS\" in os.environ and bool(os.environ[\"TRAVIS\"]),\n \"Skipping test due to lack of required file permission\")\n def test_cli_webserver_background(self):\n import psutil\n\n # Confirm that webserver hasn't been launched.\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"airflow\"]).wait())\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"gunicorn\"]).wait())\n\n # Run webserver in background.\n subprocess.Popen([\"airflow\", \"webserver\", \"-D\"])\n pidfile = cli.setup_locations(\"webserver\")[0]\n self._wait_pidfile(pidfile)\n\n # Assert that gunicorn and its monitor are launched.\n self.assertEqual(0, subprocess.Popen([\"pgrep\", \"-c\", \"airflow\"]).wait())\n self.assertEqual(0, subprocess.Popen([\"pgrep\", \"-c\", \"gunicorn\"]).wait())\n\n # Terminate monitor process.\n pidfile = cli.setup_locations(\"webserver-monitor\")[0]\n pid = self._wait_pidfile(pidfile)\n p = psutil.Process(pid)\n p.terminate()\n p.wait()\n\n # Assert that no process remains.\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"airflow\"]).wait())\n self.assertEqual(1, subprocess.Popen([\"pgrep\", \"-c\", \"gunicorn\"]).wait())\n\n # Patch for causing webserver timeout\n @mock.patch(\"airflow.bin.cli.get_num_workers_running\", return_value=0)\n def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):\n # Shorten timeout so that this test doesn't take too long time\n args = self.parser.parse_args(['webserver'])\n with conf_vars({('webserver', 'web_server_master_timeout'): '10'}):\n with self.assertRaises(SystemExit) as e:\n cli.webserver(args)\n self.assertEqual(e.exception.code, 1)\n\n\nclass FakeWebHDFSHook:\n def __init__(self, conn_id):\n self.conn_id = conn_id\n\n def get_conn(self):\n return self.conn_id\n\n def check_for_path(self, hdfs_path):\n return hdfs_path\n\n\nclass FakeSnakeBiteClientException(Exception):\n pass\n\n\nclass FakeSnakeBiteClient:\n\n def __init__(self):\n self.started = True\n\n def ls(self, path, include_toplevel=False):\n \"\"\"\n the fake snakebite client\n :param path: the array of path to test\n :param include_toplevel: to return the toplevel directory info\n :return: a list for path for the matching queries\n \"\"\"\n if path[0] == '/datadirectory/empty_directory' and not include_toplevel:\n return []\n elif path[0] == '/datadirectory/datafile':\n return [{\n 'group': 'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 0,\n 'blocksize': 134217728,\n 'owner': 'hdfs',\n 'path': '/datadirectory/datafile'\n }]\n elif path[0] == '/datadirectory/empty_directory' and include_toplevel:\n return [{\n 'group': 'supergroup',\n 'permission': 493,\n 'file_type': 'd',\n 'access_time': 0,\n 'block_replication': 0,\n 'modification_time': 1481132141540,\n 'length': 0,\n 'blocksize': 0,\n 'owner': 'hdfs',\n 'path': '/datadirectory/empty_directory'\n }]\n elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:\n return [{\n 'group': 'supergroup',\n 'permission': 493,\n 'file_type': 'd',\n 'access_time': 0,\n 'block_replication': 0,\n 'modification_time': 1481132141540,\n 'length': 0,\n 'blocksize': 0,\n 'owner': 'hdfs',\n 'path': '/datadirectory/empty_directory'\n }, {\n 'group': 'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 0,\n 'blocksize': 134217728,\n 'owner': 'hdfs',\n 'path': '/datadirectory/not_empty_directory/test_file'\n }]\n elif path[0] == '/datadirectory/not_empty_directory':\n return [{\n 'group': 'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 0,\n 'blocksize': 134217728,\n 'owner': 'hdfs',\n 'path': '/datadirectory/not_empty_directory/test_file'\n }]\n elif path[0] == '/datadirectory/not_existing_file_or_directory':\n raise FakeSnakeBiteClientException\n elif path[0] == '/datadirectory/regex_dir':\n return [{\n 'group': 'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862, 'length': 12582912,\n 'blocksize': 134217728,\n 'owner': 'hdfs',\n 'path': '/datadirectory/regex_dir/test1file'\n }, {\n 'group': 'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 12582912,\n 'blocksize': 134217728,\n 'owner': 'hdfs',\n 'path': '/datadirectory/regex_dir/test2file'\n }, {\n 'group': 'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 12582912,\n 'blocksize': 134217728,\n 'owner': 'hdfs',\n 'path': '/datadirectory/regex_dir/test3file'\n }, {\n 'group': 'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 12582912,\n 'blocksize': 134217728,\n 'owner': 'hdfs',\n 'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'\n }, {\n 'group': 'supergroup',\n 'permission': 420,\n 'file_type': 'f',\n 'access_time': 1481122343796,\n 'block_replication': 3,\n 'modification_time': 1481122343862,\n 'length': 12582912,\n 'blocksize': 134217728,\n 'owner': 'hdfs',\n 'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'\n }]\n else:\n raise FakeSnakeBiteClientException\n\n\nclass FakeHDFSHook:\n def __init__(self, conn_id=None):\n self.conn_id = conn_id\n\n def get_conn(self):\n client = FakeSnakeBiteClient()\n return client\n\n\nclass TestConnection(unittest.TestCase):\n def setUp(self):\n utils.db.initdb()\n os.environ['AIRFLOW_CONN_TEST_URI'] = (\n 'postgres://username:[email protected]:5432/the_database')\n os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (\n 'postgres://ec2.compute.com/the_database')\n\n def tearDown(self):\n env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']\n for ev in env_vars:\n if ev in os.environ:\n del os.environ[ev]\n\n def test_using_env_var(self):\n c = SqliteHook.get_connection(conn_id='test_uri')\n self.assertEqual('ec2.compute.com', c.host)\n self.assertEqual('the_database', c.schema)\n self.assertEqual('username', c.login)\n self.assertEqual('password', c.password)\n self.assertEqual(5432, c.port)\n\n def test_using_unix_socket_env_var(self):\n c = SqliteHook.get_connection(conn_id='test_uri_no_creds')\n self.assertEqual('ec2.compute.com', c.host)\n self.assertEqual('the_database', c.schema)\n self.assertIsNone(c.login)\n self.assertIsNone(c.password)\n self.assertIsNone(c.port)\n\n def test_param_setup(self):\n c = Connection(conn_id='local_mysql', conn_type='mysql',\n host='localhost', login='airflow',\n password='airflow', schema='airflow')\n self.assertEqual('localhost', c.host)\n self.assertEqual('airflow', c.schema)\n self.assertEqual('airflow', c.login)\n self.assertEqual('airflow', c.password)\n self.assertIsNone(c.port)\n\n def test_env_var_priority(self):\n c = SqliteHook.get_connection(conn_id='airflow_db')\n self.assertNotEqual('ec2.compute.com', c.host)\n\n os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \\\n 'postgres://username:[email protected]:5432/the_database'\n c = SqliteHook.get_connection(conn_id='airflow_db')\n self.assertEqual('ec2.compute.com', c.host)\n self.assertEqual('the_database', c.schema)\n self.assertEqual('username', c.login)\n self.assertEqual('password', c.password)\n self.assertEqual(5432, c.port)\n del os.environ['AIRFLOW_CONN_AIRFLOW_DB']\n\n def test_dbapi_get_uri(self):\n conn = BaseHook.get_connection(conn_id='test_uri')\n hook = conn.get_hook()\n self.assertEqual('postgres://username:[email protected]:5432/the_database', hook.get_uri())\n conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')\n hook2 = conn2.get_hook()\n self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())\n\n def test_dbapi_get_sqlalchemy_engine(self):\n conn = BaseHook.get_connection(conn_id='test_uri')\n hook = conn.get_hook()\n engine = hook.get_sqlalchemy_engine()\n self.assertIsInstance(engine, sqlalchemy.engine.Engine)\n self.assertEqual('postgres://username:[email protected]:5432/the_database', str(engine.url))\n\n def test_get_connections_env_var(self):\n conns = SqliteHook.get_connections(conn_id='test_uri')\n assert len(conns) == 1\n assert conns[0].host == 'ec2.compute.com'\n assert conns[0].schema == 'the_database'\n assert conns[0].login == 'username'\n assert conns[0].password == 'password'\n assert conns[0].port == 5432\n\n\nclass TestWebHDFSHook(unittest.TestCase):\n def test_simple_init(self):\n from airflow.hooks.webhdfs_hook import WebHDFSHook\n c = WebHDFSHook()\n self.assertIsNone(c.proxy_user)\n\n def test_init_proxy_user(self):\n from airflow.hooks.webhdfs_hook import WebHDFSHook\n c = WebHDFSHook(proxy_user='someone')\n self.assertEqual('someone', c.proxy_user)\n\n\nHDFSHook = None # type: Optional[hdfs_hook.HDFSHook]\nsnakebite = None # type: None\n\n\[email protected](HDFSHook is None,\n \"Skipping test because HDFSHook is not installed\")\nclass TestHDFSHook(unittest.TestCase):\n def setUp(self):\n os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = 'hdfs://localhost:8020'\n\n def test_get_client(self):\n client = HDFSHook(proxy_user='foo').get_conn()\n self.assertIsInstance(client, snakebite.client.Client)\n self.assertEqual('localhost', client.host)\n self.assertEqual(8020, client.port)\n self.assertEqual('foo', client.service.channel.effective_user)\n\n @mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')\n @mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')\n def test_get_autoconfig_client(self, mock_get_connections,\n MockAutoConfigClient):\n c = Connection(conn_id='hdfs', conn_type='hdfs',\n host='localhost', port=8020, login='foo',\n extra=json.dumps({'autoconfig': True}))\n mock_get_connections.return_value = [c]\n HDFSHook(hdfs_conn_id='hdfs').get_conn()\n MockAutoConfigClient.assert_called_once_with(effective_user='foo',\n use_sasl=False)\n\n @mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')\n def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):\n HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()\n MockAutoConfigClient.assert_called_once_with(effective_user=None,\n use_sasl=False)\n\n @mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')\n def test_get_ha_client(self, mock_get_connections):\n c1 = Connection(conn_id='hdfs_default', conn_type='hdfs',\n host='localhost', port=8020)\n c2 = Connection(conn_id='hdfs_default', conn_type='hdfs',\n host='localhost2', port=8020)\n mock_get_connections.return_value = [c1, c2]\n client = HDFSHook().get_conn()\n self.assertIsInstance(client, snakebite.client.HAClient)\n\n\nsend_email_test = mock.Mock()\n\n\nclass TestEmail(unittest.TestCase):\n def setUp(self):\n conf.remove_option('email', 'EMAIL_BACKEND')\n\n @mock.patch('airflow.utils.email.send_email')\n def test_default_backend(self, mock_send_email):\n res = utils.email.send_email('to', 'subject', 'content')\n mock_send_email.assert_called_once_with('to', 'subject', 'content')\n self.assertEqual(mock_send_email.return_value, res)\n\n @mock.patch('airflow.utils.email.send_email_smtp')\n def test_custom_backend(self, mock_send_email):\n with conf_vars({('email', 'email_backend'): 'tests.core.send_email_test'}):\n utils.email.send_email('to', 'subject', 'content')\n send_email_test.assert_called_once_with(\n 'to', 'subject', 'content', files=None, dryrun=False,\n cc=None, bcc=None, mime_charset='utf-8', mime_subtype='mixed')\n self.assertFalse(mock_send_email.called)\n\n\nclass TestEmailSmtp(unittest.TestCase):\n def setUp(self):\n conf.set('smtp', 'SMTP_SSL', 'False')\n\n @mock.patch('airflow.utils.email.send_MIME_email')\n def test_send_smtp(self, mock_send_mime):\n attachment = tempfile.NamedTemporaryFile()\n attachment.write(b'attachment')\n attachment.seek(0)\n utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])\n self.assertTrue(mock_send_mime.called)\n call_args = mock_send_mime.call_args[0]\n self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])\n self.assertEqual(['to'], call_args[1])\n msg = call_args[2]\n self.assertEqual('subject', msg['Subject'])\n self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])\n self.assertEqual(2, len(msg.get_payload()))\n filename = 'attachment; filename=\"' + os.path.basename(attachment.name) + '\"'\n self.assertEqual(filename, msg.get_payload()[-1].get('Content-Disposition'))\n mimeapp = MIMEApplication('attachment')\n self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())\n\n @mock.patch('airflow.utils.email.send_MIME_email')\n def test_send_smtp_with_multibyte_content(self, mock_send_mime):\n utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8')\n self.assertTrue(mock_send_mime.called)\n call_args = mock_send_mime.call_args[0]\n msg = call_args[2]\n mimetext = MIMEText('🔥', 'mixed', 'utf-8')\n self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload())\n\n @mock.patch('airflow.utils.email.send_MIME_email')\n def test_send_bcc_smtp(self, mock_send_mime):\n attachment = tempfile.NamedTemporaryFile()\n attachment.write(b'attachment')\n attachment.seek(0)\n utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')\n self.assertTrue(mock_send_mime.called)\n call_args = mock_send_mime.call_args[0]\n self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])\n self.assertEqual(['to', 'cc', 'bcc'], call_args[1])\n msg = call_args[2]\n self.assertEqual('subject', msg['Subject'])\n self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])\n self.assertEqual(2, len(msg.get_payload()))\n self.assertEqual('attachment; filename=\"' + os.path.basename(attachment.name) + '\"',\n msg.get_payload()[-1].get('Content-Disposition'))\n mimeapp = MIMEApplication('attachment')\n self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())\n\n @mock.patch('smtplib.SMTP_SSL')\n @mock.patch('smtplib.SMTP')\n def test_send_mime(self, mock_smtp, mock_smtp_ssl):\n mock_smtp.return_value = mock.Mock()\n mock_smtp_ssl.return_value = mock.Mock()\n msg = MIMEMultipart()\n utils.email.send_MIME_email('from', 'to', msg, dryrun=False)\n mock_smtp.assert_called_once_with(\n conf.get('smtp', 'SMTP_HOST'),\n conf.getint('smtp', 'SMTP_PORT'),\n )\n self.assertTrue(mock_smtp.return_value.starttls.called)\n mock_smtp.return_value.login.assert_called_once_with(\n conf.get('smtp', 'SMTP_USER'),\n conf.get('smtp', 'SMTP_PASSWORD'),\n )\n mock_smtp.return_value.sendmail.assert_called_once_with('from', 'to', msg.as_string())\n self.assertTrue(mock_smtp.return_value.quit.called)\n\n @mock.patch('smtplib.SMTP_SSL')\n @mock.patch('smtplib.SMTP')\n def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):\n mock_smtp.return_value = mock.Mock()\n mock_smtp_ssl.return_value = mock.Mock()\n with conf_vars({('smtp', 'smtp_ssl'): 'True'}):\n utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)\n self.assertFalse(mock_smtp.called)\n mock_smtp_ssl.assert_called_once_with(\n conf.get('smtp', 'SMTP_HOST'),\n conf.getint('smtp', 'SMTP_PORT'),\n )\n\n @mock.patch('smtplib.SMTP_SSL')\n @mock.patch('smtplib.SMTP')\n def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):\n mock_smtp.return_value = mock.Mock()\n mock_smtp_ssl.return_value = mock.Mock()\n with conf_vars({\n ('smtp', 'smtp_user'): None,\n ('smtp', 'smtp_password'): None,\n }):\n utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)\n self.assertFalse(mock_smtp_ssl.called)\n mock_smtp.assert_called_once_with(\n conf.get('smtp', 'SMTP_HOST'),\n conf.getint('smtp', 'SMTP_PORT'),\n )\n self.assertFalse(mock_smtp.login.called)\n\n @mock.patch('smtplib.SMTP_SSL')\n @mock.patch('smtplib.SMTP')\n def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):\n utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)\n self.assertFalse(mock_smtp.called)\n self.assertFalse(mock_smtp_ssl.called)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.testing.assert_array_almost_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Mhaiyang/iccv
[ "04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb", "04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb" ]
[ "utils/pascal.py", "utils/compute_overlap.py" ]
[ "\"\"\"\n @Time : 203/10/19 15:31\n @Author : TaylorMei\n @Email : [email protected]\n \n @Project : iccv\n @File : pascal.py\n @Function:\n \n\"\"\"\nimport os\nimport numpy as np\nimport skimage.io\n\ninput_path = \"/home/iccd/data/MSRA10K/PASCAL-S/masks/\"\noutput_path = \"/home/iccd/data/MSRA10K/PASCAL-S/mask/\"\n\nimglist = os.listdir(input_path)\nfor i, imgname in enumerate(imglist):\n print(i, imgname)\n mask = skimage.io.imread(input_path + imgname)\n print(np.max(mask))\n mask = np.where(mask >= 127.5, 255, 0).astype(np.uint8)\n mask = skimage.io.imsave(output_path + imgname, mask)\n", "\"\"\"\n @Time : 202/16/19 16:33\n @Author : TaylorMei\n @Email : [email protected]\n \n @Project : iccv\n @File : compute_overlap.py\n @Function:\n \n\"\"\"\nimport os\nimport numpy as np\nimport skimage.io\nimport skimage.transform\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport seaborn as sns\n\nimage_path = '/home/iccd/data/msd9/test/image/'\nmask_json_path = '/home/iccd/data/msd9/test/mask/'\n# image_path = '/home/iccd/data/2019/msd9_all/all_images/'\n# mask_json_path = '/home/iccd/data/2019/msd9_all/all_masks/'\n\nimglist = os.listdir(image_path)\nprint(len(imglist))\n\noverlap = np.zeros([256, 256], dtype=np.float64)\ntall, wide = 0, 0\n\nfor i, imgname in enumerate(imglist):\n print(i, imgname)\n name = imgname.split('.')[0]\n\n mask = skimage.io.imread(mask_json_path + name + '.png')\n\n height = mask.shape[0]\n width = mask.shape[1]\n if height > width:\n tall += 1\n else:\n wide += 1\n mask = skimage.transform.resize(mask, [256, 256], order=0)\n mask = np.where(mask != 0, 1, 0).astype(np.float64)\n overlap += mask\n\noverlap = overlap / len(imglist)\noverlap_normalized = (overlap - np.min(overlap)) / (np.max(overlap) - np.min(overlap))\nskimage.io.imsave('./msd9_test.png', (overlap*255).astype(np.uint8))\nskimage.io.imsave('./msd9_test_normalized.png', overlap_normalized)\n\nprint(tall, wide)\n\nf, ax = plt.subplots()\nsns.set()\nax = sns.heatmap(overlap, ax=ax, cmap=cm.summer, cbar=False)\nax.set_xticklabels([])\nax.set_yticklabels([])\nplt.xticks([])\nplt.yticks([])\nplt.show()\n\n\n" ]
[ [ "numpy.max", "numpy.where" ], [ "matplotlib.pyplot.yticks", "numpy.min", "matplotlib.pyplot.subplots", "numpy.max", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
spencerkent/vision-transform-codes
[ "63258ce698e436ee3ce29def75c89337759fb98b", "63258ce698e436ee3ce29def75c89337759fb98b", "63258ce698e436ee3ce29def75c89337759fb98b", "63258ce698e436ee3ce29def75c89337759fb98b" ]
[ "vision_transform_codes/tests/sparse_coding_2.py", "vision_transform_codes/training/ica.py", "vision_transform_codes/utils/image_processing.py", "vision_transform_codes/dict_update_rules/convolutional/sc_cheap_quadratic_descent.py" ]
[ "\"\"\"\nTest: Sparse coding, fully connected, fista, cheap qaudratic descent\n\"\"\"\nimport _set_the_path\n\nimport math\nimport pickle\nimport torch\n\nfrom training.sparse_coding import train_dictionary\nfrom utils.dataset_generation import OneOutputDset\nfrom utils import defaults\n\nRUN_IDENTIFIER = '_testing_sc_2'\nLOGS_STORED_HERE = defaults.logging_directory\n\nTRAINING_SET_SIZE = 10000\nVALIDATION_SET_SIZE = 5000\nBATCH_SIZE = 1000\nPATCH_HEIGHT = 16\nPATCH_WIDTH = 16\n\nCODE_SIZE = 1 * PATCH_HEIGHT*PATCH_WIDTH # critically sampled\nNUM_EPOCHS = 1\niters_per_epoch = int(math.ceil(TRAINING_SET_SIZE / BATCH_SIZE))\n\ntrn_val_dsets = pickle.load(open(defaults.dataset_directory /\n 'vtc_testing/field_white_16x16.p', 'rb'))\n\nSC_PARAMS = {\n 'mode': 'fully-connected',\n 'num_epochs': NUM_EPOCHS,\n 'code_inference_algorithm': 'fista',\n 'inference_param_schedule': {\n 0: {'sparsity_weight': 0.008, 'num_iters': 5}},\n 'dictionary_update_algorithm': 'sc_cheap_quadratic_descent',\n 'dict_update_param_schedule': {\n 0: {'stepsize': 0.1, 'num_iters': 1},\n 5*iters_per_epoch: {'stepsize': 0.05, 'num_iters': 1}},\n # write various tensorboard logs on the following schedule:\n 'training_visualization_schedule': set([0, 10, 500]),\n 'reshaped_kernel_size': (PATCH_HEIGHT, PATCH_WIDTH),\n # actually store all logs here:\n 'logging_folder_fullpath': LOGS_STORED_HERE / RUN_IDENTIFIER,\n # checkpoint the dictionary on this interval\n 'checkpoint_schedule': set([iters_per_epoch,\n (NUM_EPOCHS*iters_per_epoch)-1])}\nSC_PARAMS['training_visualization_schedule'].update(set(\n [iters_per_epoch*x for x in range(1, NUM_EPOCHS)]))\n\n\n# Now initialize model and begin training\ntorch_device = torch.device('cuda:1')\n# otherwise can put on 'cuda:0' or 'cpu'\n\n# send ALL image patches to the GPU and wrap in a simple dataloader\nimage_patches_gpu_training = torch.utils.data.DataLoader(\n OneOutputDset(torch.from_numpy(\n trn_val_dsets['training']['patches']).to(torch_device)),\n batch_size=BATCH_SIZE, shuffle=True)\nimage_patches_gpu_validation = torch.utils.data.DataLoader(\n OneOutputDset(torch.from_numpy(\n trn_val_dsets['validation']['patches']).to(torch_device)),\n batch_size=BATCH_SIZE*10) # larger batches for validation data\n# if data is too big to all fit on GPU, just omit .to(torch_device) above.\n# Can also add num_workers=x to the DataLoader constructor\n\n# create the dictionary Tensor on the GPU\nsparse_coding_dictionary = torch.randn((CODE_SIZE, PATCH_HEIGHT*PATCH_WIDTH),\n device=torch_device)\n# start out the dictionaries with norm 1\nsparse_coding_dictionary.div_(\n sparse_coding_dictionary.norm(p=2, dim=1)[:, None])\n\ntrain_dictionary(image_patches_gpu_training, image_patches_gpu_validation,\n sparse_coding_dictionary, SC_PARAMS)\n", "\"\"\"\nThis implements ICA dictionary learning\n\"\"\"\n\nimport time\nimport os\nimport pickle\nimport yaml\nfrom matplotlib import pyplot as plt\nimport torch\n\ndef train_dictionary(image_dataset, init_dictionary, all_params):\n \"\"\"\n Train an ICA dictionary\n\n Only works in 'fully-connected' mode\n\n Parameters\n ----------\n image_dataset : torch.Tensor OR torch.Dataloader\n We make __getitem__ calls to either of these iterables and have them\n return us a batch of images. If image_dataset is a torch Tensor, that\n means ALL of the data is stored in the CPU's RAM or in the GPU's RAM. The\n choice of which will have already been made when the Tensor is created.\n The tensor is an array of size (k, b, n) where k is the total number of\n batches, n is the (flattened) size of each image, and b is the size of\n an individual batch. If image_dataset is a torch.DataLoader that means\n each time we make a __getitem__ call it will return a batch of images\n that it has fetched and preprocessed from disk. This is done in cpu\n multiprocesses that run asynchronously from the GPU. If the whole dataset\n is too large to be loaded into memory, this is really our only option.\n init_dictionary : torch.Tensor(float32, size=(n, n))\n This is an initial guess for the dictionary of basis functions that\n we can use to descibe the images. n is the size of each image and also\n the size of the code -- in ICA the codes are always the same\n dimensionality as the input signal.\n all_params :\n --- MANDATORY ---\n 'num_epochs': int\n The number of times to cycle over the whole dataset, reshuffling the\n order of the patches.\n 'dictionary_update_algorithm' : str\n One of {'ica_natural_gradient'}\n 'dict_update_param_schedule' : dictionary\n Dictionary containing iteration indexes at which to set/update\n parameters of the dictionary update algorithm. This will be algorithm\n specific. See the docstring for the respective algorithm in\n dictionary_learning/\n --- OPTIONAL ---\n 'checkpoint_schedule' : dictionary, optional\n Specific iterations at which to save the\n parameters of the model (dictionary, codes, etc.) to disk. Values\n associated w/ each of these keys aren't used and can be set to None.\n We're just using the dictionary for its fast hash-based lookup.\n 'training_visualization_schedule' : dictionary, optional\n Specific iterations at which to plot the dictionary and some sample\n codes. Again, dictionary values can be None, we're just using the keys\n 'logging_folder_fullpath' : pathlib.Path, optional\n Tells us where to save any checkpoint files or tensorboard summaries.\n Required if either 'checkpoint_schedule' or\n 'training_visualization_schedule' is set.\n 'stdout_print_interval' : int, optional\n The interval on which we print training progress to the terminal.\n Default 1000.\n \"\"\"\n ################################\n # Visualization Helper functions\n ################################\n def save_checkpoint(where_to_save):\n # In lieu of the torch-specific saver torch.save, we'll just use\n # pythons's standard serialization tool, pickle. That way we can mess\n # with the results without needing PyTorch.\n pickle.dump(dictionary.cpu().numpy(), open(where_to_save, 'wb'))\n\n def log_training_progress(current_iteration_number):\n batch_images_np = batch_images.cpu().numpy()\n batch_sig_mag = np.max(batch_images_np) - np.min(batch_images_np)\n #^ psnr depends on the range of the data which we just estimate from\n # this batch.\n recons = torch.mm(codes, dictionary).cpu().numpy()\n recon_psnr = []\n for b_idx in range(recons.shape[0]):\n psnr = compute_pSNR(batch_images_np[b_idx, :],\n recons[b_idx, :], manual_sig_mag=batch_sig_mag)\n if psnr != np.inf:\n recon_psnr.append(psnr)\n avg_recon_psnr = np.mean(recon_psnr)\n # Tensorboard doesn't give you a lot of control for how images look so\n # i'm going to generate my own pyplot figures and save these as images.\n # There's probably a more elegant way to do this, but it works for now...\n tiled_kernel_figs = display_dictionary(\n dictionary.cpu().numpy(), reshaping=kernel_reshaping,\n renormalize=True,\n plot_title='Current dictionary (renormalized), iter{}'.format(\n total_iter_idx))\n for fig_idx in range(len(tiled_kernel_figs)):\n tb_img_caption = ('Current dictionary (renorm), fig ' + str(fig_idx+1) +\n ' of ' + str(len(tiled_kernel_figs)))\n write_pyplot_to_tb_image(tiled_kernel_figs[fig_idx], tb_img_caption)\n del tiled_kernel_figs\n tiled_kernel_figs = display_dictionary(\n dictionary.cpu().numpy(), reshaping=kernel_reshaping,\n renormalize=False,\n plot_title='Current dictionary (no renorm), iter {}'.format(\n total_iter_idx))\n for fig_idx in range(len(tiled_kernel_figs)):\n tb_img_caption = ('Current dictionary (no renorm), fig ' +\n str(fig_idx+1) + ' of ' + str(len(tiled_kernel_figs)))\n write_pyplot_to_tb_image(tiled_kernel_figs[fig_idx], tb_img_caption)\n del tiled_kernel_figs\n\n #TODO: plot the ICA cost function\n tb_summary_writer.add_scalar('Average pSNR of reconstructions',\n avg_recon_psnr, total_iter_idx)\n\n def write_pyplot_to_tb_image(plt_fig, img_caption):\n buf = io.BytesIO()\n plt_fig.savefig(buf, format='png')\n plt.close(plt_fig)\n the_tensor = torch.tensor(np.array(Image.open(buf))[:, :, :3])\n tb_summary_writer.add_image(img_caption,\n torch.tensor(np.array(Image.open(buf))[:, :, :3]),\n global_step=total_iter_idx, dataformats='HWC')\n ##########################\n # Done w/ helper functions\n ##########################\n\n ##########################\n # Setup and error checking\n ##########################\n assert 0 in all_params['dict_update_param_schedule']\n assert init_dictionary.size(0) == init_dictionary.size(1) # critically sample\n # let's unpack all_params to make things a little less verbose...\n ### MANDATORY ###\n num_epochs = all_params['num_epochs']\n dict_update_alg = all_params['dictionary_update_algorithm']\n dict_update_param_schedule = all_params['dict_update_param_schedule']\n assert dict_update_alg in ['ica_natural_gradient']\n ### OPTIONAL ###\n if 'logging_folder_fullpath' in all_params:\n assert type(all_params['logging_folder_fullpath']) != str, (\n 'should be pathlib.Path')\n logging_path = all_params['logging_folder_fullpath']\n if logging_path.exists() and ('checkpoint_schedule' in all_params or\n 'training_visualization_schedule' in all_params):\n print('-------\\n',\n 'Warning, saving checkpoints and/or tensorboard logs into ',\n 'existing, directory. Will overwrite existing files\\n-------')\n if not logging_path.exists() and ('checkpoint_schedule' in all_params or\n 'training_visualization_schedule' in all_params):\n logging_path.mkdir(parents=True)\n if 'checkpoint_schedule' in all_params:\n import os\n import pickle\n ckpt_sched = all_params['checkpoint_schedule']\n else:\n ckpt_sched = None\n if 'training_visualization_schedule' in all_params:\n import io\n import numpy as np\n from matplotlib import pyplot as plt\n from PIL import Image\n from utils.plotting import compute_pSNR\n from utils.plotting import display_dictionary\n from torch.utils.tensorboard import SummaryWriter\n trn_vis_sched = all_params['training_visualization_schedule']\n tb_summary_writer = SummaryWriter(logging_path)\n if 'reshaped_kernel_size' in all_params:\n kernel_reshaping = all_params.pop('reshaped_kernel_size')\n else:\n kernel_reshaping = None\n else:\n trn_vis_sched = None\n if ckpt_sched is not None or trn_vis_sched is not None:\n import yaml\n # dump the parameters of this training session in human-readable JSON\n saved_training_params = {\n k: all_params[k] for k in all_params if k not in\n ['checkpoint_schedule', 'training_visualization_schedule']}\n yaml.dump(saved_training_params,\n open(logging_path / 'training_params.yaml', 'w'))\n if 'stdout_print_interval' in all_params:\n print_interval = all_params['stdout_print_interval']\n else:\n print_interval = 1000\n\n from analysis_transforms.fully_connected import invertible_linear\n if dict_update_alg == 'ica_natural_gradient':\n from dict_update_rules.fully_connected import ica_natural_gradient\n else:\n raise KeyError('Unrecognized dict update algorithm: ' + dict_update_alg)\n ##################################\n # Done w/ setup and error checking\n ##################################\n\n dictionary = init_dictionary # no copying, just a new reference\n\n starttime = time.time()\n total_iter_idx = 0\n for epoch_idx in range(num_epochs):\n for batch_idx, batch_images in enumerate(image_dataset):\n ###########################\n # Status updates to console\n ###########################\n if total_iter_idx % print_interval == 0:\n print('Iteration', total_iter_idx, 'complete')\n print('Time elapsed:', '{:.1f}'.format(time.time() - starttime),\n 'seconds')\n print('-----')\n\n if dictionary.device != batch_images.device:\n batch_images = batch_images.to(dictionary.device)\n\n ####################\n # Run code inference\n ####################\n codes = invertible_linear.run(batch_images, dictionary)\n\n #################################\n # Checkpointing and visualization\n #################################\n if (ckpt_sched is not None and total_iter_idx in ckpt_sched):\n save_checkpoint(logging_path /\n ('checkpoint_dictionary_iter_' + str(total_iter_idx)))\n if (trn_vis_sched is not None and total_iter_idx in trn_vis_sched):\n log_training_progress(total_iter_idx)\n\n #######################\n # Update the dictionary\n #######################\n # check to see if we need to set/update parameters\n if total_iter_idx in dict_update_param_schedule:\n d_upd_stp= dict_update_param_schedule[total_iter_idx]['stepsize']\n d_upd_niters = dict_update_param_schedule[total_iter_idx]['num_iters']\n if dict_update_alg == 'ica_natural_gradient':\n ica_natural_gradient.run(dictionary, codes, d_upd_stp, d_upd_niters)\n\n total_iter_idx += 1\n\n print(\"Epoch\", epoch_idx, \"finished\")\n", "\"\"\"\nSome utilities for wrangling image data.\n\nThese may be relevant for implementing sparse coding or other transform coding\nstrategies on images. Just leaving a few relevant references:\n\n.. [1] Olshausen, B. A., & Field, D. J. (1997). Sparse coding with an\n overcomplete basis set: A strategy employed by V1?. Vision research,\n 37(23), 3311-3325.\n\"\"\"\nimport numpy as np\nfrom scipy.signal import convolve\nfrom scipy.signal import convolve2d\nfrom scipy.ndimage import convolve1d\nfrom matplotlib import pyplot as plt\n\n\ndef filter_sd(image, filter_spatial, separable_vert=None, separable_horz=None):\n \"\"\"\n Filters an image using a filter specified in the {s}patial {d}omain\n\n If the filter is \"separable\" then we can get a performance boost by doing\n 1d convolutions separately in each axis.\n\n Parameters\n ----------\n image : ndarray(float32 or uint8, size=(h, w, c))\n The image to be filtered. The filter is applied to each color\n channel independently.\n filter_spatial : ndarray(float32 or uint8, size=(fh, fw))\n The filter has fh samples in the vertical dimension and fw samples in\n the horizontal dimension\n separable_vert : ndarray(float32 or uint8, size=(fh,)), optional\n The corresponding vertical 'factor' of filter_spatial. Default None.\n separable_horz : ndarray(float32 or uint8, size=(fw,)), optional\n The corresponding horizontal 'factor' of filter_spatial. Default None.\n\n Returns\n -------\n filtered_image : ndarray(float32, size=(h, w, c))\n \"\"\"\n assert image.dtype in ['float32', 'uint8']\n filtered_image = np.zeros(image.shape, dtype='float32')\n for color_channel in range(image.shape[2]):\n if separable_vert is None:\n filtered_image[:, :, color_channel] = convolve2d(\n image[:, :, color_channel], filter_spatial, 'same', boundary='symm')\n #^ using the 'symmetric' boundary condition seems to reduce artifacts at\n # the image boundary. Worth looking into more closely.\n else:\n # separable filter, we can do this faster\n for row_idx in range(image.shape[0]):\n filtered_image[row_idx, :, color_channel] = convolve1d(\n image[row_idx, :, color_channel], separable_horz,\n mode='reflect')\n for col_idx in range(image.shape[1]):\n filtered_image[:, col_idx, color_channel] = convolve1d(\n filtered_image[:, col_idx, color_channel], separable_vert,\n mode='reflect')\n return filtered_image\n\n\ndef filter_fd(image, filter_DFT):\n \"\"\"\n Filters an image using a filter specified in the {f}requency {d}omain\n\n This may optionally pad the image so as to match the number of samples in the\n filter DFT. We should make sure this is greater than or equal to the size of\n the image.\n\n Parameters\n ----------\n image : ndarray(float32 or uint8, size=(h, w, c))\n The image to be filtered. The filter is applied to each color\n channel independently.\n filter_DFT : ndarray(complex128, size=(fh, fw))\n The filter has fh samples in the vertical dimension and fw samples in\n the horizontal dimension\n\n Returns\n -------\n filtered_image : ndarray(float32, size=(h, w, c))\n \"\"\"\n assert image.dtype in ['float32', 'uint8']\n assert filter_DFT.shape[0] >= image.shape[0], \"don't undersample DFT\"\n assert filter_DFT.shape[1] >= image.shape[1], \"don't undersample DFT\"\n filtered_image = np.zeros(image.shape, dtype='float32')\n for color_channel in range(image.shape[2]):\n filtered_image[:, :, color_channel] = np.real(np.fft.ifft2(\n filter_DFT * np.fft.fft2(image[:, :, color_channel], filter_DFT.shape),\n filter_DFT.shape)).astype('float32')[0:image.shape[0], 0:image.shape[1]]\n return filtered_image\n\n\ndef downsample(image, factor=2):\n \"\"\"\n Downsample an image by the provided factor\n\n Parameters\n ----------\n image : ndarray(float32 or uint8, size=(h, w, c))\n The image to be filtered. The filter is applied to each color\n channel independently.\n factor : int, optional\n Take every Nth sample in each dimension, where N==factor\n\n Returns\n -------\n downsampled_image : ndarray(float32 or uint8, size=(h_d, w_d, c))\n The size of the downsampled image is h_d = ceil(h / factor),\n w_d = ceil(w / factor).\n \"\"\"\n assert type(factor) == int\n return image[::factor, ::factor]\n\n\ndef get_binomial_filter_1d(size):\n \"\"\"\n This produces a 1d spatial filter with binomial coefficients\n \"\"\"\n assert size > 1\n kernel = np.array([0.5, 0.5])\n for i in range(size - 2):\n kernel = convolve(np.array([0.5, 0.5]), kernel)\n return kernel\n\n\ndef get_binomial_filter_2d(height, width):\n \"\"\"\n This produces a 2d binomial spatial filter (to use w/ filter_sd)\n \"\"\"\n return (get_binomial_filter_1d(height)[:, None] *\n get_binomial_filter_1d(width)[None, :])\n\n\ndef get_gaussian_filter_2d(sigma, window_size, normalized=True):\n \"\"\"\n Returns a 2d gaussian filter\n\n Parameters\n ----------\n sigma : float\n The standard deviation of the gaussian\n window_size : tuple(int, int)\n The size of the filter's window.\n normalized : bool, optional\n Make the filter elements sum to 1. Default True.\n\n Returns\n -------\n gfilt : ndarray(float32, size=window_size)\n A gaussian filter\n \"\"\"\n lower_lim = [-int(np.floor((window_size[0]) / 2)),\n -int(np.floor((window_size[1]) / 2))]\n upper_lim = []\n for i in range(2):\n if window_size[i] % 2 != 0: # prefer odd-sized window\n upper_lim.append(int(np.floor(window_size[i] / 2)) + 1)\n else:\n upper_lim.append(int(np.floor(window_size[i] / 2)))\n\n v_coords = np.arange(lower_lim[0], upper_lim[0])\n h_coords = np.arange(lower_lim[1], upper_lim[1])\n kv_coords, kh_coords = np.meshgrid(v_coords, h_coords, indexing='ij')\n gaussian_kernel = np.exp(-0.5*(kv_coords**2 + kh_coords**2) / (sigma**2))\n if normalized:\n return gaussian_kernel / np.sum(gaussian_kernel)\n else:\n return gaussian_kernel\n\n\ndef get_low_pass_filter(DFT_num_samples, filter_parameters,\n norm_and_threshold=True):\n \"\"\"\n Returns the DFT of a lowpass filter that can be applied to an image\n\n Parameters\n ----------\n DFT_num_samples : (int, int)\n The number of samples on the DFT vertical ax and the DFT horizontal ax\n filter_parameters : dictionary\n Parameters of the filter. These may vary depending on the filter shape.\n Smoother filters reduce ringing artifacts but can throw away a lot of\n information in the middle frequencies. For now we just have 'exponential'\n but may add other filter shapes in the future. Each shape has some\n specific parameters.\n 'shape' : str\n One of {'exponential'}\n ** if 'shape' == 'exponential', then also:\n 'cutoff' : float \\in [0, 1]\n A fraction of the 2d nyquist frequency at which to set the cutoff.\n 'order' : float \\in [1, np.inf)\n The order of the exponential. In the Vision Research sparse coding\n paper[1] this is 4. We can make the cutoff sharper by increasing\n this number.\n ** elif...\n norm_and_threshold : bool\n If true, make sure the maximum magnitude of the transfer function is 1.0\n and threshold any values below 1e-3\n\n Returns\n -------\n lpf_DFT = ndarray(complex128, size(DFT_num_samples[0], DFT_num_samples[1]))\n The DFT of the low pass filter\n \"\"\"\n if filter_parameters['shape'] == 'exponential':\n assert all([x in filter_parameters for x in ['cutoff', 'order']])\n assert (filter_parameters['cutoff'] >= 0.0 and\n filter_parameters['cutoff'] <= 1.0)\n assert filter_parameters['order'] >= 1.0\n\n freqs_vert = np.fft.fftfreq(DFT_num_samples[0])\n freqs_horz = np.fft.fftfreq(DFT_num_samples[1])\n\n two_d_freq = np.meshgrid(freqs_vert, freqs_horz, indexing='ij')\n spatial_freq_mag = (np.sqrt(np.square(two_d_freq[0]) +\n np.square(two_d_freq[1])))\n lpf_DFT_mag = np.exp(\n -1. * np.power(spatial_freq_mag / (0.5 * filter_parameters['cutoff']),\n filter_parameters['order']))\n #^ 0.5 is the 2d spatial nyquist frequency\n if norm_and_threshold:\n lpf_DFT_mag[lpf_DFT_mag < 1e-3] = 1e-3\n #^ avoid filter magnitudes that are 'too small' because this will make\n # undoing the filter introduce arbitrary high-frequency noise\n lpf_DFT_phase = np.zeros(spatial_freq_mag.shape)\n else:\n raise KeyError('Unrecognized filter shape: ' + filter_parameters['shape'])\n\n return lpf_DFT_mag * np.exp(1j * lpf_DFT_phase)\n\n\ndef get_whitening_ramp_filter(DFT_num_samples, norm_and_threshold=True):\n \"\"\"\n Returns the DFT of a simple 'magnitude ramp' filter that whitens data\n\n Parameters\n ----------\n DFT_num_samples : (int, int)\n The number of samples in the DFT vertical ax and the DFT horizontal ax\n norm_and_threshold : bool, optional.\n If true, make sure the maximum magnitude of the transfer function is 1.0\n and threshold any values below 1e-5. Default True\n\n Returns\n -------\n wf_DFT = ndarray(complex128, size(DFT_num_samples[0], DFT_num_samples[1]))\n The DFT of the whitening filter\n \"\"\"\n freqs_vert = np.fft.fftfreq(DFT_num_samples[0])\n freqs_horz = np.fft.fftfreq(DFT_num_samples[1])\n\n two_d_freq = np.meshgrid(freqs_vert, freqs_horz, indexing='ij')\n spatial_freq_mag = (np.sqrt(np.square(two_d_freq[0]) +\n np.square(two_d_freq[1])))\n if norm_and_threshold:\n wf_DFT_mag = spatial_freq_mag / np.max(spatial_freq_mag)\n wf_DFT_mag[wf_DFT_mag < 1e-5] = 1e-5\n #^ avoid filter magnitudes that are 'too small' because this will make\n # undoing the filter introduce arbitrary high-frequency noise\n else:\n wf_DFT_mag = spatial_freq_mag\n return wf_DFT_mag * np.exp(1j * 0) # zero-phase filter\n\n\ndef whiten_center_surround(image, cutoffs, return_filter=False,\n norm_and_threshold=True):\n \"\"\"\n Slight mod to scheme described in the Vision Research sparse coding paper [1]\n\n We have the composition of a low pass filter with a ramp in spatial frequency\n which together produces a center-surround filter in the image domain. The\n wrinkle is that we can flatten off the transfer function in the low frequency\n band so it passes some low frequency information through. This is useful for\n making the unwhitening computation more well-behaved and for passing of the\n responsibility of coding low-freq information to a subsequent step of\n processing, for instance `local_luminance_subtraction` (see below).\n\n Parameters\n ----------\n image : ndarray(float32 or uint8, size=(h, w, c))\n An image of height h and width w, with c color channels\n cutoffs : dictionary\n 'low' : The low-end cutoff of the whitening filter, clip attenuation\n below this value. These spatial frequencies are what get through.\n 'high' : The upper cutoff of the lowpass filter\n return_filter : bool, optional\n If true, also return the DFT of the used filter. Just for unwhitening,\n visualization, and debugging purposes. Default False.\n norm_and_threshold : bool, optional.\n If true, make sure the maximum magnitude of the transfer function is 1.0\n and threshold any values below 1e-3. Default True\n \"\"\"\n assert image.dtype in ['float32', 'uint8']\n lpf = get_low_pass_filter(image.shape,\n {'shape': 'exponential', 'cutoff': cutoffs['high'], 'order': 8.0},\n norm_and_threshold=False)\n wf = get_whitening_ramp_filter(image.shape, norm_and_threshold=False)\n rolled_off_ramp = np.maximum(wf, cutoffs['low'] * np.ones(wf.shape))\n combined_filter = rolled_off_ramp * lpf\n if norm_and_threshold:\n combined_filter /= np.max(np.abs(combined_filter))\n combined_filter[np.abs(combined_filter) < 1e-3] = 1e-3\n if return_filter:\n return filter_fd(image, combined_filter), combined_filter\n else:\n return filter_fd(image, combined_filter)\n\n\ndef unwhiten_center_surround(image, low_cutoff=None, orig_filter_DFT=None):\n \"\"\"\n Undoes center-surround whitening. If the original filter DFT is provided\n this is used to exactly invert the filter. If not provided, we drop the\n the low pass part and whiten with a rolled-off ramp filter based on the\n specified cutoffs.\n\n Parameters\n ----------\n image : ndarray(float32, size=(h, w, c))\n An image of height h and width w, with c color channels\n low_cutoff : float, optional\n The lowpass cutoff that rolls off the ramp filter to pass DC. Ignored\n if orig_filter_DFT is provided. Default None.\n orig_filter_DFT : ndarray(complex128), optional\n If not None, this is used to invert the whitening exactly. Default None.\n \"\"\"\n assert image.dtype == 'float32'\n assert not ((low_cutoff is None) and (orig_filter_DFT is None))\n if orig_filter_DFT is None:\n # synthesize a whitening filter but don't use the upper low pass filter.\n # inverting this causes problems with noise and instability\n wf = get_whitening_ramp_filter(image.shape, norm_and_threshold=False)\n orig_filter_DFT = np.maximum(wf, low_cutoff * np.ones(wf.shape))\n return filter_fd(image, 1. / orig_filter_DFT)\n\n\ndef whiten_ZCA(flat_data, precomputed_ZCA_parameters=None):\n \"\"\"\n Uses the principal components transformation to whiten data.\n\n We have to use a large dataset to estimate the directions of largest variance\n in vector-valued data, the principal components, and then we normalize the\n variance of each direction in this space of principal components. This has\n a similar, *but different* visual affect on images than does the\n whiten_center_surround transformation.\n\n Parameters\n ----------\n flat_data : ndarray(float32 or uint8, size=(D, n))\n n is the dimensionality of a single datapoint and D is the size of the\n dataset to which we are applying the ZCA transform (independently to\n each sample). We may also be using this dataset to estimate the ZCA\n whitening transform in the first place.\n precomputed_ZCA_parameters : dictionary, optional\n The parameters of a ZCA transform that have already been estimated. If\n None, we will compute these based on flat_data. Default None.\n 'PCA_basis' : ndarray(float32, size=(n, n))\n The principal components transform matrix meant to be applied with a\n right inner product to new data\n 'PCA_axis_variances' : ndarray(float32, size=(n,))\n The estimated variance of data on each of the n princpal axes.\n 'subtracted_mean' : float32\n We subtract this from each datapoint to approximately 'zero' the data\n\n Returns\n -------\n whitened_data : ndarray(float32, size=(D, n))\n The data, now whitened\n ZCA_parameters : dictionary, if precomputed_ZCA_parameters is None\n The parameters of a ZCA transform that we estimate from flat_data.\n 'PCA_basis' : ndarray(float32, size=(n, n))\n The principal components transform matrix meant to be applied with a\n left inner product to new data\n 'PCA_axis_variances' : ndarray(float32, size=(n,))\n The estimated variance of data on each of the n princpal axes.\n 'subtracted_mean' : float32\n We subtract this from each datapoint to approximately 'zero' the data\n \"\"\"\n assert flat_data.dtype in ['float32', 'uint8']\n # we could do all this in torch using the functionality defined in\n # ../training/pca.py and ../analysis_transforms/invertible_linear.py, but\n # I'm trying to make this portable and numpy/pythonic, so we'll do it\n # manually here\n num_samples = flat_data.shape[0]\n num_components = flat_data.shape[1]\n if precomputed_ZCA_parameters is None:\n if num_components > 0.1 * num_samples:\n raise RuntimeError('Number of samples is way too small to estimate PCA')\n meanzero_flat_data, component_means = center_each_component(flat_data)\n U, w, _ = np.linalg.svd(\n np.dot(meanzero_flat_data.T, meanzero_flat_data) / num_samples,\n full_matrices=True)\n # ^way faster to estimate based on the n x n covariance matrix\n ZCA_parameters = {'PCA_basis': U, 'PCA_axis_variances': w,\n 'subtracted_mean': np.mean(component_means)}\n # technically speaking, we should subtract from each component its\n # specific mean over the dataset. However, when we patch an image, compute\n # the transform, and then reassemble the patches, using component-specific\n # means can introduce some blocking artifacts in the reassembled image.\n # Assuming there's nothing special about particular components, they will\n # all have approximately the same mean. Instead, I will just subtract the\n # mean of these means, approximately zeroing the data while reducing the\n # visual artifacts.\n else:\n # shallow copy just creates a reference, not an honest-to-goodness copy\n ZCA_parameters = precomputed_ZCA_parameters.copy()\n meanzero_flat_data = flat_data - ZCA_parameters['subtracted_mean']\n\n meanzero_white_data = np.dot(\n np.dot(meanzero_flat_data, ZCA_parameters['PCA_basis']) /\n (np.sqrt(ZCA_parameters['PCA_axis_variances']) + 1e-4)[None, :],\n ZCA_parameters['PCA_basis'].T)\n\n white_data = (meanzero_white_data.astype('float32') +\n ZCA_parameters['subtracted_mean'])\n\n if precomputed_ZCA_parameters is None:\n return white_data, ZCA_parameters\n else:\n return white_data\n\n\ndef unwhiten_ZCA(white_flat_data, precomputed_ZCA_parameters):\n \"\"\"\n Undoes the ZCA whitening operation (see above)\n\n Parameters\n ----------\n white_flat_data : ndarray(float32, size=(D, n))\n n is the dimensionality of a single datapoint and D is the size of the\n dataset to which we are applying the ZCA transform (independently to\n each sample).\n precomputed_ZCA_parameters : dictionary\n The parameters of a ZCA transform that have already been estimated.\n 'PCA_basis' : ndarray(float32, size=(n, n))\n The principal components transform matrix meant to be applied with a\n right inner product to new flat data\n 'PCA_axis_variances' : ndarray(float32, size=(n,))\n The estimated variance of data on each of the n princpal axes.\n 'subtracted_mean' : float32\n We subtract this from each datapoint to approximately 'zero' the data\n\n Returns\n -------\n colored_data : ndarray(float32, size=(D, n))\n The data, with whitening operation undone\n \"\"\"\n assert white_flat_data.dtype == 'float32'\n meanzero_white_data = (white_flat_data -\n precomputed_ZCA_parameters['subtracted_mean'])\n meanzero_colored_data = np.dot(\n np.dot(meanzero_white_data, precomputed_ZCA_parameters['PCA_basis']) *\n (np.sqrt(precomputed_ZCA_parameters['PCA_axis_variances']) + 1e-4)[None, :],\n precomputed_ZCA_parameters['PCA_basis'].T)\n\n colored_data = (meanzero_colored_data.astype('float32') +\n precomputed_ZCA_parameters['subtracted_mean'])\n\n return colored_data\n\n\ndef local_contrast_normalization(image, filter_sigma, return_normalizer=False):\n \"\"\"\n Computes an estimate of the local contrast and removes this from an image\n\n Parameters\n ----------\n image : ndarray(float32 or uint8, size=(h, w, c))\n An image of height h and width w, with c color channels\n filter_sigma : tuple(int, int)\n The standard deviation of the isotropic gaussian kernel that we use to\n compute a local estimate of the variance\n return_normalizer : bool, optional\n If true, return the array used to do the normalization -- this can be\n used to reverse the transform. Defualt False.\n\n Returns\n -------\n filtered_image : ndarray(float32, size=(h, w, c))\n normalizer : ndarray(float32, size=(h, w, c)), if return_normalizer=True\n \"\"\"\n gaussian_kernel = get_gaussian_filter_2d(\n filter_sigma, (4*filter_sigma+1, 4*filter_sigma+1))\n\n local_variance = filter_sd(image**2, gaussian_kernel)\n\n # TODO: deal with divide by zero\n local_variance[local_variance == 0] = 1.\n if return_normalizer:\n return image / np.sqrt(local_variance), np.sqrt(local_variance)\n else:\n return image / np.sqrt(local_variance)\n\n\ndef local_luminance_subtraction(image, filter_sigma, return_subtractor=False):\n \"\"\"\n Computes an estimate of the local luminance and removes this from an image\n\n Parameters\n ----------\n image : ndarray(float32 or uint8, size=(h, w, c))\n An image of height h and width w, with c color channels\n filter_sigma : float\n The standard deviation of the isotropic gaussian kernel that we use to\n compute a local estimate of the luminance\n return_subtractor : bool, optional\n If true, return the array used to do the luminance subtraction -- this\n can be used to reverse the transform. Defualt False.\n\n Returns\n -------\n filtered_image : ndarray(float32, size=(h, w, c))\n subtractor : ndarray(float32, size=(h, w, c))\n \"\"\"\n gaussian_kernel = get_gaussian_filter_2d(\n filter_sigma, (4*filter_sigma+1, 4*filter_sigma+1))\n\n local_luminance = filter_sd(image, gaussian_kernel)\n\n if return_subtractor:\n return image - local_luminance, local_luminance\n else:\n return image - local_luminance\n\n\ndef center_each_component(flat_data):\n \"\"\"\n Makes each component of data have mean zero across the dataset\n\n Parameters\n ----------\n flat_data : ndarray(float32 or uint8, size=(D, n))\n n is the dimensionality of a single datapoint and D is the size of the\n dataset over which we are taking the mean.\n\n Returns\n -------\n centered_data : ndarray(float32, size=(D, n))\n The data, now with mean 0 in each component\n original_means : ndarray(float32, size=(n,))\n The componentwise means of the original data. Can be used to\n uncenter the data later (for instance, after dictionary learning)\n \"\"\"\n assert flat_data.dtype in ['float32', 'uint8']\n original_means = np.mean(flat_data, axis=0)\n return (flat_data - original_means[None, :]).astype('float32'), original_means\n\n\ndef center_each_sample(flat_data):\n \"\"\"\n Makes each sample have an average value of zero\n\n Parameters\n ----------\n flat_data : ndarray(float32 or uint8, size=(D, n))\n n is the dimensionality of a single datapoint and D is the size of the\n dataset over which we are taking the mean.\n\n Returns\n -------\n zero_dc_data : ndarray(float32, size=(D, n))\n The data, now with a DC value of zero\n original_means : ndarray(float32, size=(n,))\n The patch-specific DC values of the original data. Can be used to\n uncenter the data later (for instance, after dictionary learning)\n \"\"\"\n assert flat_data.dtype in ['float32', 'uint8']\n original_means = np.mean(flat_data, axis=1)\n return (flat_data - original_means[:, None]).astype('float32'), original_means\n\n\ndef normalize_component_variance(flat_data):\n \"\"\"\n Normalize each component to have a variance of 1 across the dataset\n\n Parameters\n ----------\n flat_data : ndarray(float32 or uint8, size=(D, n))\n n is the dimensionality of a single datapoint and D is the size of the\n dataset over which we are taking the variance.\n\n Returns\n -------\n normalized_data : ndarray(float32 size=(D, n))\n The data, now with variance\n original_variances : ndarray(float32, size=(n,))\n The componentwise variances of the original data. Can be used to\n unnormalize the data later (for instance, after dictionary learning)\n \"\"\"\n assert flat_data.dtype in ['float32', 'uint8']\n original_variances = np.var(flat_data, axis=0)\n return ((flat_data / np.sqrt(original_variances)[None, :]).astype('float32'),\n original_variances)\n\n\ndef patches_from_single_image(image, patch_dimensions, flatten_patches):\n \"\"\"\n Extracts tiled patches from a single image\n\n Parameters\n ----------\n image : ndarray(float32 or uint8, size=(h, w, c))\n An image of height h and width w, with c color channels\n patch_dimensions : tuple(int, int)\n The size in pixels of each patch\n flatten_patches : bool\n Indicates whether to flatten the patches or leave them in\n (ph, pw, c) format.\n\n Returns\n -------\n patches : ndarray(float32, size=(k, ph*pw*c) OR (k, ph, pw, c))\n An array of patches each of height ph and width pw. k is the\n number of total patches that were extracted from the full image\n patch_positions : list(tuple(int, int))\n The position in pixels of the upper-left-hand corner of each patch within\n the full image. Used to re-tile the full image after processing\n the patches\n \"\"\"\n assert image.dtype in ['float32', 'uint8']\n assert image.ndim == 3\n if (image.shape[0] / patch_dimensions[0] % 1 != 0 or\n image.shape[1] / patch_dimensions[1] % 1 != 0):\n print('Warning: image cannot be completely patched with these dimensions.',\n 'Ignoring overflow pixels on the right and bottom of image')\n\n num_patches_vert = image.shape[0] // patch_dimensions[0]\n num_patches_horz = image.shape[1] // patch_dimensions[1]\n patches = np.zeros([num_patches_vert * num_patches_horz,\n patch_dimensions[0], patch_dimensions[1],\n image.shape[2]], dtype=image.dtype)\n patch_positions = [] # keep track of where each patch belongs\n p_idx = 0\n for patch_idx_vert in range(num_patches_vert):\n for patch_idx_horz in range(num_patches_horz):\n pos_vert = patch_idx_vert * patch_dimensions[0]\n pos_horz = patch_idx_horz * patch_dimensions[1]\n patches[p_idx] = image[pos_vert:pos_vert+patch_dimensions[0],\n pos_horz:pos_horz+patch_dimensions[1]]\n patch_positions.append((pos_vert, pos_horz))\n #^ upper left hand corner position in pixels in the original image\n p_idx += 1\n\n if flatten_patches:\n patches = patches.reshape((patches.shape[0], -1))\n\n return patches, patch_positions\n\n\ndef assemble_image_from_patches(patches, patch_dimensions, patch_positions):\n \"\"\"\n Tiles an image from patches\n\n Parameters\n ----------\n patches : ndarray(float32 or uint8, size=(k, ph*pw*c) OR (k, ph, pw, c))\n An array of patches each of height ph and width pw. k is the\n number of total patches that were extracted from the full image\n patch_dimensions : tuple(int, int)\n The size in pixels of each patch\n patch_positions : list(tuple(int, int))\n The position in pixels of the upper-left-hand corner of each patch within\n the full image.\n\n Returns\n -------\n image : ndarray(float32 or uint8, size=(h, w, c))\n An image of height h and width w\n \"\"\"\n assert patches.dtype in ['float32', 'uint8']\n\n full_img_height = (np.max([x[0] for x in patch_positions]) +\n patch_dimensions[0])\n full_img_width = (np.max([x[1] for x in patch_positions]) +\n patch_dimensions[1])\n if patches.ndim == 2:\n # flattened patches\n num_color_channels = (patches.shape[1] /\n (patch_dimensions[0]*patch_dimensions[1]))\n assert num_color_channels % 1.0 == 0\n num_color_channels = int(num_color_channels)\n else:\n num_color_channels = patches.shape[-1]\n\n full_img = np.zeros([full_img_height, full_img_width,\n num_color_channels], dtype=patches.dtype)\n for patch_idx in range(patches.shape[0]):\n vert = patch_positions[patch_idx][0]\n horz = patch_positions[patch_idx][1]\n if patches.ndim == 2:\n full_img[vert:vert+patch_dimensions[0], horz:horz+patch_dimensions[1]] = \\\n patches[patch_idx, :].reshape(\n (patch_dimensions[0], patch_dimensions[1], num_color_channels))\n else:\n full_img[vert:vert+patch_dimensions[0], horz:horz+patch_dimensions[1]] = \\\n patches[patch_idx, :]\n\n return full_img\n", "\"\"\"\nUpdates dictionary with a modified descent for convolutional sparse coding.\n\nWhat I mean by convolutional is that the basis functions are convolved with\nthe sparse codes to produce an image. The basis functions will be much smaller\nthan the images. This uses the diagonal of the Hessian to rescale the updates.\nIt's like getting a quadratic update for each coefficient independently, while\nnot computing the full hessian.\n\"\"\"\nimport torch\n\nfrom utils.convolutions import create_mask\n\ndef run(images_padded, dictionary, codes, hessian_diagonal,\n kernel_stride, padding_dims, stepsize=0.001, num_iters=1,\n lowest_code_val=0.001, normalize_dictionary=True):\n \"\"\"\n Runs num_iters steps of an approximate quadratic descent\n\n Parameters\n ----------\n images_padded : torch.Tensor(float32, size=(b, c, h, w))\n A batch of images that we want to find the CONVOLUTIONAL sparse code\n for. b is the number of images. c is the number of image channels, h is\n the (padded) height of the image, while w is the (padded) width.\n dictionary : torch.Tensor(float32, size=(s, c, kh, kw))\n The dictionary of basis functions which we can use to describe the\n images. s is the number of basis functions, the number of channels in the\n resultant code. c is the number of image channels and consequently the\n number of channels for each basis function. kh is the kernel height in\n pixels, while kw is the kernel width.\n codes : torch.Tensor(float32, size=(b, s, sh, sw))\n The inferred convolutional codes for this set of images. b is the number\n of images. s is the number of basis functions, the number of channels in\n the resultant code. sh is the height of the code. sw is the width of the\n code. These can both be inferred from the image size and kernel size.\n hessian_diagonal : torch.Tensor(float32, size=(s,))\n An estimate of the diagonal of the hessian that we'll compute outside of\n this function call.\n kernel_stride : tuple(int, int)\n The stride of the kernels in the vertical direction is kernel_stride[0]\n whereas stride in the horizontal direction is kernel_stride[1]\n padding_dims : tuple(tuple(int, int), tuple(int, int))\n The amount of padding that was done to the images -- is used to determine\n the mask. padding_dims[0] is vertical padding and padding_dims[1] is\n horizontal padding. The first component of each of these is the leading\n padding while the second component is the trailing padding.\n stepsize : torch.Tensor(float32), optional\n The step size for each iteration of cheap quadratic descent. Keep this\n small. Default 0.001.\n num_iters : int, optional\n Number of steps of steepest descent to run. Default 1\n lowest_code_val : float, optional\n Used to condition the hessian diagonal to not be too small. Default 0.001\n normalize_dictionary : bool, optional\n If true, we normalize each dictionary element to have l2 norm equal to 1\n before we return. Default True.\n \"\"\"\n reconstruction_mask = create_mask(images_padded, padding_dims)\n codes_temp_transposed = codes.transpose(dim0=1, dim1=0)\n # TODO: Figure out if I can remove the double-transpose in the gradient comp\n for iter_idx in range(num_iters):\n # WARNING: this gradient computation can overflow, adjusting the stepsize\n # or the scale of the data are typical remedies\n gradient = (torch.nn.functional.conv2d(\n (reconstruction_mask * (\n torch.nn.functional.conv_transpose2d(codes, dictionary,\n stride=kernel_stride) - images_padded)).transpose(dim0=1, dim1=0),\n codes_temp_transposed, dilation=kernel_stride) /\n images_padded.shape[0]).transpose(dim0=1, dim1=0)\n # divide each kernel's update by our hessian approximation\n gradient.div_(hessian_diagonal[:, None, None, None] + lowest_code_val)\n # it makes sense to put this update on the same scale as the dictionary\n # so that stepsize is effectively dimensionless\n gradient.mul_(dictionary.norm(p=2) / gradient.norm(p=2))\n dictionary.sub_(stepsize * gradient)\n if normalize_dictionary:\n dictionary.div_(torch.squeeze(dictionary.norm(\n p=2, dim=(1, 2, 3)))[:, None, None, None])\n" ]
[ [ "torch.device", "torch.randn", "torch.from_numpy" ], [ "torch.mm", "numpy.min", "numpy.max", "numpy.mean", "torch.utils.tensorboard.SummaryWriter", "matplotlib.pyplot.close" ], [ "numpy.dot", "numpy.sqrt", "numpy.max", "numpy.mean", "numpy.var", "numpy.exp", "scipy.ndimage.convolve1d", "numpy.square", "numpy.arange", "numpy.zeros", "numpy.fft.fft2", "numpy.power", "scipy.signal.convolve2d", "numpy.floor", "numpy.fft.fftfreq", "numpy.meshgrid", "numpy.array", "numpy.sum", "numpy.abs", "numpy.ones" ], [ "torch.nn.functional.conv_transpose2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
spacemig/satellite-attitude-simulator
[ "77ad4e05a4e8076aa1e5077023d0ee293ccc4530", "77ad4e05a4e8076aa1e5077023d0ee293ccc4530", "77ad4e05a4e8076aa1e5077023d0ee293ccc4530" ]
[ "thirdparty/pyorbital/orbital.py", "modules/attitude.py", "adcs_sim.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2011, 2012, 2013, 2014, 2015.\n\n# Author(s):\n\n# Esben S. Nielsen <[email protected]>\n# Adam Dybbroe <[email protected]>\n# Martin Raspaud <[email protected]>\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Module for computing the orbital parameters of satellites.\"\"\"\n\nimport warnings\nfrom datetime import datetime, timedelta\n\nfrom scipy import optimize\n\nimport numpy as np\nfrom pyorbital import astronomy, dt2np, tlefile\n\ntry:\n import dask.array as da\n has_dask = True\nexcept ImportError:\n da = None\n has_dask = False\n\ntry:\n import xarray as xr\n has_xarray = True\nexcept ImportError:\n xr = None\n has_xarray = False\n\nECC_EPS = 1.0e-6 # Too low for computing further drops.\nECC_LIMIT_LOW = -1.0e-3\nECC_LIMIT_HIGH = 1.0 - ECC_EPS # Too close to 1\nECC_ALL = 1.0e-4\n\nEPS_COS = 1.5e-12\n\nNR_EPS = 1.0e-12\n\nCK2 = 5.413080e-4\nCK4 = 0.62098875e-6\nE6A = 1.0e-6\nQOMS2T = 1.88027916e-9\nS = 1.01222928\nS0 = 78.0\nXJ3 = -0.253881e-5\nXKE = 0.743669161e-1\nXKMPER = 6378.135\nXMNPDA = 1440.0\n# MFACTOR = 7.292115E-5\nAE = 1.0\nSECDAY = 8.6400E4\n\nF = 1 / 298.257223563 # Earth flattening WGS-84\nA = 6378.137 # WGS84 Equatorial radius\n\n\nSGDP4_ZERO_ECC = 0\nSGDP4_DEEP_NORM = 1\nSGDP4_NEAR_SIMP = 2\nSGDP4_NEAR_NORM = 3\n\nKS = AE * (1.0 + S0 / XKMPER)\nA3OVK2 = (-XJ3 / CK2) * AE**3\n\n\nclass OrbitalError(Exception):\n pass\n\n\ndef get_observer_look(sat_lon, sat_lat, sat_alt, utc_time, lon, lat, alt):\n \"\"\"Calculate observers look angle to a satellite.\n http://celestrak.com/columns/v02n02/\n\n :utc_time: Observation time (datetime object)\n :lon: Longitude of observer position on ground in degrees east\n :lat: Latitude of observer position on ground in degrees north\n :alt: Altitude above sea-level (geoid) of observer position on ground in km\n\n :return: (Azimuth, Elevation)\n \"\"\"\n (pos_x, pos_y, pos_z), (vel_x, vel_y, vel_z) = astronomy.observer_position(\n utc_time, sat_lon, sat_lat, sat_alt)\n\n (opos_x, opos_y, opos_z), (ovel_x, ovel_y, ovel_z) = \\\n astronomy.observer_position(utc_time, lon, lat, alt)\n\n lon = np.deg2rad(lon)\n lat = np.deg2rad(lat)\n\n theta = (astronomy.gmst(utc_time) + lon) % (2 * np.pi)\n\n rx = pos_x - opos_x\n ry = pos_y - opos_y\n rz = pos_z - opos_z\n\n sin_lat = np.sin(lat)\n cos_lat = np.cos(lat)\n sin_theta = np.sin(theta)\n cos_theta = np.cos(theta)\n\n top_s = sin_lat * cos_theta * rx + \\\n sin_lat * sin_theta * ry - cos_lat * rz\n top_e = -sin_theta * rx + cos_theta * ry\n top_z = cos_lat * cos_theta * rx + \\\n cos_lat * sin_theta * ry + sin_lat * rz\n\n az_ = np.arctan(-top_e / top_s)\n\n if has_xarray and isinstance(az_, xr.DataArray):\n az_data = az_.data\n else:\n az_data = az_\n\n if has_dask and isinstance(az_data, da.Array):\n az_data = da.where(top_s > 0, az_data + np.pi, az_data)\n az_data = da.where(az_data < 0, az_data + 2 * np.pi, az_data)\n else:\n az_data[np.where(top_s > 0)] += np.pi\n az_data[np.where(az_data < 0)] += 2 * np.pi\n\n if has_xarray and isinstance(az_, xr.DataArray):\n az_.data = az_data\n else:\n az_ = az_data\n\n rg_ = np.sqrt(rx * rx + ry * ry + rz * rz)\n el_ = np.arcsin(top_z / rg_)\n\n return np.rad2deg(az_), np.rad2deg(el_)\n\n\nclass Orbital(object):\n\n \"\"\"Class for orbital computations.\n\n The *satellite* parameter is the name of the satellite to work on and is\n used to retrieve the right TLE data for internet or from *tle_file* in case\n it is provided.\n \"\"\"\n\n def __init__(self, satellite, tle_file=None, line1=None, line2=None):\n satellite = satellite.upper()\n self.satellite_name = satellite\n self.tle = tlefile.read(satellite, tle_file=tle_file,\n line1=line1, line2=line2)\n self.orbit_elements = OrbitElements(self.tle)\n self._sgdp4 = _SGDP4(self.orbit_elements)\n\n def __str__(self):\n return self.satellite_name + \" \" + str(self.tle)\n\n def get_last_an_time(self, utc_time):\n \"\"\"Calculate time of last ascending node relative to the\n specified time\n \"\"\"\n\n # Propagate backwards to ascending node\n dt = np.timedelta64(10, 'm')\n t_old = utc_time\n t_new = t_old - dt\n pos0, vel0 = self.get_position(t_old, normalize=False)\n pos1, vel1 = self.get_position(t_new, normalize=False)\n while not (pos0[2] > 0 and pos1[2] < 0):\n pos0 = pos1\n t_old = t_new\n t_new = t_old - dt\n pos1, vel1 = self.get_position(t_new, normalize=False)\n\n # Return if z within 1 km of an\n if np.abs(pos0[2]) < 1:\n return t_old\n elif np.abs(pos1[2]) < 1:\n return t_new\n\n # Bisect to z within 1 km\n while np.abs(pos1[2]) > 1:\n # pos0, vel0 = pos1, vel1\n dt = (t_old - t_new) / 2\n t_mid = t_old - dt\n pos1, vel1 = self.get_position(t_mid, normalize=False)\n if pos1[2] > 0:\n t_old = t_mid\n else:\n t_new = t_mid\n\n return t_mid\n\n def get_position(self, utc_time, normalize=True):\n \"\"\"Get the cartesian position and velocity from the satellite.\n \"\"\"\n\n kep = self._sgdp4.propagate(utc_time)\n pos, vel = kep2xyz(kep)\n\n if normalize:\n pos /= XKMPER\n vel /= XKMPER * XMNPDA / SECDAY\n\n return pos, vel\n\n def get_lonlatalt(self, utc_time):\n \"\"\"Calculate sublon, sublat and altitude of satellite.\n http://celestrak.com/columns/v02n03/\n \"\"\"\n (pos_x, pos_y, pos_z), (vel_x, vel_y, vel_z) = self.get_position(\n utc_time, normalize=True)\n\n lon = ((np.arctan2(pos_y * XKMPER, pos_x * XKMPER) - astronomy.gmst(utc_time))\n % (2 * np.pi))\n\n lon = np.where(lon > np.pi, lon - np.pi * 2, lon)\n lon = np.where(lon <= -np.pi, lon + np.pi * 2, lon)\n\n r = np.sqrt(pos_x ** 2 + pos_y ** 2)\n lat = np.arctan2(pos_z, r)\n e2 = F * (2 - F)\n while True:\n lat2 = lat\n c = 1 / (np.sqrt(1 - e2 * (np.sin(lat2) ** 2)))\n lat = np.arctan2(pos_z + c * e2 * np.sin(lat2), r)\n if np.all(abs(lat - lat2) < 1e-10):\n break\n alt = r / np.cos(lat) - c\n alt *= A\n return np.rad2deg(lon), np.rad2deg(lat), alt\n\n def find_aos(self, utc_time, lon, lat):\n pass\n\n def find_aol(self, utc_time, lon, lat):\n pass\n\n def get_observer_look(self, utc_time, lon, lat, alt):\n \"\"\"Calculate observers look angle to a satellite.\n http://celestrak.com/columns/v02n02/\n\n utc_time: Observation time (datetime object)\n lon: Longitude of observer position on ground in degrees east\n lat: Latitude of observer position on ground in degrees north\n alt: Altitude above sea-level (geoid) of observer position on ground in km\n\n Return: (Azimuth, Elevation)\n \"\"\"\n\n utc_time = dt2np(utc_time)\n (pos_x, pos_y, pos_z), (vel_x, vel_y, vel_z) = self.get_position(\n utc_time, normalize=False)\n (opos_x, opos_y, opos_z), (ovel_x, ovel_y, ovel_z) = \\\n astronomy.observer_position(utc_time, lon, lat, alt)\n\n lon = np.deg2rad(lon)\n lat = np.deg2rad(lat)\n\n theta = (astronomy.gmst(utc_time) + lon) % (2 * np.pi)\n\n rx = pos_x - opos_x\n ry = pos_y - opos_y\n rz = pos_z - opos_z\n\n sin_lat = np.sin(lat)\n cos_lat = np.cos(lat)\n sin_theta = np.sin(theta)\n cos_theta = np.cos(theta)\n\n top_s = sin_lat * cos_theta * rx + \\\n sin_lat * sin_theta * ry - cos_lat * rz\n top_e = -sin_theta * rx + cos_theta * ry\n top_z = cos_lat * cos_theta * rx + \\\n cos_lat * sin_theta * ry + sin_lat * rz\n\n az_ = np.arctan(-top_e / top_s)\n\n az_ = np.where(top_s > 0, az_ + np.pi, az_)\n az_ = np.where(az_ < 0, az_ + 2 * np.pi, az_)\n\n rg_ = np.sqrt(rx * rx + ry * ry + rz * rz)\n el_ = np.arcsin(top_z / rg_)\n\n return np.rad2deg(az_), np.rad2deg(el_)\n\n def get_orbit_number(self, utc_time, tbus_style=False, as_float=False):\n \"\"\"Calculate orbit number at specified time.\n\n Args:\n tbus_style: If True, use TBUS-style orbit numbering (TLE orbit number + 1)\n as_float: Return a continuous orbit number as float.\n \"\"\"\n utc_time = np.datetime64(utc_time)\n try:\n dt = astronomy._days(utc_time - self.orbit_elements.an_time)\n orbit_period = astronomy._days(self.orbit_elements.an_period)\n except AttributeError:\n pos_epoch, vel_epoch = self.get_position(self.tle.epoch,\n normalize=False)\n if np.abs(pos_epoch[2]) > 1 or not vel_epoch[2] > 0:\n # Epoch not at ascending node\n self.orbit_elements.an_time = self.get_last_an_time(\n self.tle.epoch)\n else:\n # Epoch at ascending node (z < 1 km) and positive v_z\n self.orbit_elements.an_time = self.tle.epoch\n\n self.orbit_elements.an_period = self.orbit_elements.an_time - \\\n self.get_last_an_time(self.orbit_elements.an_time\n - np.timedelta64(10, 'm'))\n\n dt = astronomy._days(utc_time - self.orbit_elements.an_time)\n orbit_period = astronomy._days(self.orbit_elements.an_period)\n\n orbit = self.tle.orbit + dt / orbit_period + \\\n self.tle.mean_motion_derivative * dt ** 2 + \\\n self.tle.mean_motion_sec_derivative * dt ** 3\n if not as_float:\n orbit = int(orbit)\n\n if tbus_style:\n orbit += 1\n\n return orbit\n\n def get_next_passes(self, utc_time, length, lon, lat, alt, tol=0.001, horizon=0):\n \"\"\"Calculate passes for the next hours for a given start time and a\n given observer.\n\n Original by Martin.\n\n :utc_time: Observation time (datetime object)\n :length: Number of hours to find passes (int)\n :lon: Longitude of observer position on ground (float)\n :lat: Latitude of observer position on ground (float)\n :alt: Altitude above sea-level (geoid) of observer position on ground (float)\n :tol: precision of the result in seconds\n :horizon: the elevation of horizon to compute risetime and falltime.\n\n :return: [(rise-time, fall-time, max-elevation-time), ...]\n \"\"\"\n\n def elevation(minutes):\n \"\"\"Compute the elevation.\"\"\"\n return self.get_observer_look(utc_time +\n timedelta(\n minutes=np.float64(minutes)),\n lon, lat, alt)[1] - horizon\n\n def elevation_inv(minutes):\n \"\"\"Compute the inverse of elevation.\"\"\"\n return -elevation(minutes)\n\n def get_root(fun, start, end, tol=0.01):\n \"\"\"Root finding scheme\"\"\"\n x_0 = end\n x_1 = start\n fx_0 = fun(end)\n fx_1 = fun(start)\n if abs(fx_0) < abs(fx_1):\n fx_0, fx_1 = fx_1, fx_0\n x_0, x_1 = x_1, x_0\n\n x_n = optimize.brentq(fun, x_0, x_1)\n return x_n\n\n def get_max_parab(fun, start, end, tol=0.01):\n \"\"\"Successive parabolic interpolation.\"\"\"\n a = float(start)\n c = float(end)\n b = (a + c) / 2.0\n\n f_a = fun(a)\n f_b = fun(b)\n f_c = fun(c)\n\n x = b\n with np.errstate(invalid='raise'):\n while True:\n try:\n x = x - 0.5 * (((b - a) ** 2 * (f_b - f_c)\n - (b - c) ** 2 * (f_b - f_a)) /\n ((b - a) * (f_b - f_c) - (b - c) * (f_b - f_a)))\n except FloatingPointError:\n return b\n if abs(b - x) <= tol:\n return x\n\n a, b, c = (a + x) / 2.0, x, (x + c) / 2.0\n f_a, f_b, f_c = fun(a), fun(b), fun(c)\n\n # every minute\n times = utc_time + np.array([timedelta(minutes=minutes)\n for minutes in range(length * 60)])\n elev = self.get_observer_look(times, lon, lat, alt)[1] - horizon\n zcs = np.where(np.diff(np.sign(elev)))[0]\n res = []\n risetime = None\n falltime = None\n for guess in zcs:\n horizon_mins = get_root(\n elevation, guess, guess + 1.0, tol=tol / 60.0)\n horizon_time = utc_time + timedelta(minutes=horizon_mins)\n if elev[guess] < 0:\n risetime = horizon_time\n risemins = horizon_mins\n falltime = None\n else:\n falltime = horizon_time\n fallmins = horizon_mins\n if risetime:\n int_start = max(0, int(np.floor(risemins)))\n int_end = min(len(elev), int(np.ceil(fallmins) + 1))\n middle = int_start + np.argmax(elev[int_start:int_end])\n highest = utc_time + \\\n timedelta(minutes=get_max_parab(\n elevation_inv,\n max(risemins, middle - 1), min(fallmins, middle + 1),\n tol=tol / 60.0\n ))\n res += [(risetime, falltime, highest)]\n risetime = None\n return res\n\n def _get_time_at_horizon(self, utc_time, obslon, obslat, **kwargs):\n \"\"\"Get the time closest in time to *utc_time* when the\n satellite is at the horizon relative to the position of an observer on\n ground (altitude = 0)\n\n Note: This is considered deprecated and it's functionality is currently\n replaced by 'get_next_passes'.\n \"\"\"\n warnings.warn(\"_get_time_at_horizon is replaced with get_next_passes\",\n DeprecationWarning)\n if \"precision\" in kwargs:\n precision = kwargs['precision']\n else:\n precision = timedelta(seconds=0.001)\n if \"max_iterations\" in kwargs:\n nmax_iter = kwargs[\"max_iterations\"]\n else:\n nmax_iter = 100\n\n sec_step = 0.5\n t_step = timedelta(seconds=sec_step / 2.0)\n\n # Local derivative:\n def fprime(timex):\n el0 = self.get_observer_look(timex - t_step,\n obslon, obslat, 0.0)[1]\n el1 = self.get_observer_look(timex + t_step,\n obslon, obslat, 0.0)[1]\n return el0, (abs(el1) - abs(el0)) / sec_step\n\n tx0 = utc_time - timedelta(seconds=1.0)\n tx1 = utc_time\n idx = 0\n # eps = 500.\n eps = 100.\n while abs(tx1 - tx0) > precision and idx < nmax_iter:\n tx0 = tx1\n fpr = fprime(tx0)\n # When the elevation is high the scale is high, and when\n # the elevation is low the scale is low\n # var_scale = np.abs(np.sin(fpr[0] * np.pi/180.))\n # var_scale = np.sqrt(var_scale)\n var_scale = np.abs(fpr[0])\n tx1 = tx0 - timedelta(seconds=(eps * var_scale * fpr[1]))\n idx = idx + 1\n # print idx, tx0, tx1, var_scale, fpr\n if abs(tx1 - utc_time) < precision and idx < 2:\n tx1 = tx1 + timedelta(seconds=1.0)\n\n if abs(tx1 - tx0) <= precision and idx < nmax_iter:\n return tx1\n else:\n return None\n\n def utc2local(self, utc_time):\n \"\"\"Convert UTC to local time.\"\"\"\n lon, _, _ = self.get_lonlatalt(utc_time)\n return utc_time + timedelta(hours=lon * 24 / 360.0)\n\n def get_equatorial_crossing_time(self, tstart, tend, node='ascending', local_time=False,\n rtol=1E-9):\n \"\"\"Estimate the equatorial crossing time of an orbit.\n\n The crossing time is determined via the orbit number, which increases by one if the\n spacecraft passes the ascending node at the equator. A bisection algorithm is used to find\n the time of that passage.\n\n Args:\n tstart: Start time of the orbit\n tend: End time of the orbit. Orbit number at the end must be at least one greater than\n at the start. If there are multiple revolutions in the given time interval, the\n crossing time of the last revolution in that interval will be computed.\n node: Specifies whether to compute the crossing time at the ascending or descending\n node. Choices: ('ascending', 'descending').\n local_time: By default the UTC crossing time is returned. Use this flag to convert UTC\n to local time.\n rtol: Tolerance of the bisection algorithm. The smaller the tolerance, the more accurate\n the result.\n \"\"\"\n # Determine orbit number at the start and end of the orbit.\n n_start = self.get_orbit_number(tstart, as_float=True)\n n_end = self.get_orbit_number(tend, as_float=True)\n if int(n_end) - int(n_start) == 0:\n # Orbit doesn't cross the equator in the given time interval\n return None\n elif n_end - n_start > 1:\n warnings.warn('Multiple revolutions between start and end time. Computing crossing '\n 'time for the last revolution in that interval.')\n\n # Let n'(t) = n(t) - offset. Determine offset so that n'(tstart) < 0 and n'(tend) > 0 and\n # n'(tcross) = 0.\n offset = int(n_end)\n if node == 'descending':\n offset = offset + 0.5\n\n # Use bisection algorithm to find the root of n'(t), which is the crossing time. The\n # algorithm requires continuous time coordinates, so convert timestamps to microseconds\n # since 1970.\n time_unit = 'us' # same precision as datetime\n\n def _nprime(time_f):\n \"\"\"Continuous orbit number as a function of time.\"\"\"\n time64 = np.datetime64(int(time_f), time_unit)\n n = self.get_orbit_number(time64, as_float=True)\n return n - offset\n\n try:\n tcross = optimize.bisect(_nprime,\n a=np.datetime64(tstart, time_unit).astype(np.int64),\n b=np.datetime64(tend, time_unit).astype(np.int64),\n rtol=rtol)\n except ValueError:\n # Bisection did not converge\n return None\n tcross = np.datetime64(int(tcross), time_unit).astype(datetime)\n\n # Convert UTC to local time\n if local_time:\n tcross = self.utc2local(tcross)\n\n return tcross\n\n\nclass OrbitElements(object):\n\n \"\"\"Class holding the orbital elements.\n \"\"\"\n\n def __init__(self, tle):\n self.epoch = tle.epoch\n self.excentricity = tle.excentricity\n self.inclination = np.deg2rad(tle.inclination)\n self.right_ascension = np.deg2rad(tle.right_ascension)\n self.arg_perigee = np.deg2rad(tle.arg_perigee)\n self.mean_anomaly = np.deg2rad(tle.mean_anomaly)\n\n self.mean_motion = tle.mean_motion * (np.pi * 2 / XMNPDA)\n self.mean_motion_derivative = tle.mean_motion_derivative * \\\n np.pi * 2 / XMNPDA ** 2\n self.mean_motion_sec_derivative = tle.mean_motion_sec_derivative * \\\n np.pi * 2 / XMNPDA ** 3\n self.bstar = tle.bstar * AE\n\n n_0 = self.mean_motion\n k_e = XKE\n k_2 = CK2\n i_0 = self.inclination\n e_0 = self.excentricity\n\n a_1 = (k_e / n_0) ** (2.0 / 3)\n delta_1 = ((3 / 2.0) * (k_2 / a_1**2) * ((3 * np.cos(i_0)**2 - 1) /\n (1 - e_0**2)**(2.0 / 3)))\n\n a_0 = a_1 * (1 - delta_1 / 3 - delta_1**2 - (134.0 / 81) * delta_1**3)\n\n delta_0 = ((3 / 2.0) * (k_2 / a_0**2) * ((3 * np.cos(i_0)**2 - 1) /\n (1 - e_0**2)**(2.0 / 3)))\n\n # original mean motion\n n_0pp = n_0 / (1 + delta_0)\n self.original_mean_motion = n_0pp\n\n # semi major axis\n a_0pp = a_0 / (1 - delta_0)\n self.semi_major_axis = a_0pp\n\n self.period = np.pi * 2 / n_0pp\n\n self.perigee = (a_0pp * (1 - e_0) / AE - AE) * XKMPER\n\n self.right_ascension_lon = (self.right_ascension\n - astronomy.gmst(self.epoch))\n\n if self.right_ascension_lon > np.pi:\n self.right_ascension_lon -= 2 * np.pi\n\n\nclass _SGDP4(object):\n\n \"\"\"Class for the SGDP4 computations.\n \"\"\"\n\n def __init__(self, orbit_elements):\n self.mode = None\n\n # perigee = orbit_elements.perigee\n self.eo = orbit_elements.excentricity\n self.xincl = orbit_elements.inclination\n self.xno = orbit_elements.original_mean_motion\n # k_2 = CK2\n # k_4 = CK4\n # k_e = XKE\n self.bstar = orbit_elements.bstar\n self.omegao = orbit_elements.arg_perigee\n self.xmo = orbit_elements.mean_anomaly\n self.xnodeo = orbit_elements.right_ascension\n self.t_0 = orbit_elements.epoch\n self.xn_0 = orbit_elements.mean_motion\n # A30 = -XJ3 * AE**3\n\n if not(0 < self.eo < ECC_LIMIT_HIGH):\n raise OrbitalError('Eccentricity out of range: %e' % self.eo)\n elif not((0.0035 * 2 * np.pi / XMNPDA) < self.xn_0 < (18 * 2 * np.pi / XMNPDA)):\n raise OrbitalError('Mean motion out of range: %e' % self.xn_0)\n elif not(0 < self.xincl < np.pi):\n raise OrbitalError('Inclination out of range: %e' % self.xincl)\n\n if self.eo < 0:\n self.mode = self.SGDP4_ZERO_ECC\n return\n\n self.cosIO = np.cos(self.xincl)\n self.sinIO = np.sin(self.xincl)\n theta2 = self.cosIO**2\n theta4 = theta2 ** 2\n self.x3thm1 = 3.0 * theta2 - 1.0\n self.x1mth2 = 1.0 - theta2\n self.x7thm1 = 7.0 * theta2 - 1.0\n\n a1 = (XKE / self.xn_0) ** (2. / 3)\n betao2 = 1.0 - self.eo**2\n betao = np.sqrt(betao2)\n temp0 = 1.5 * CK2 * self.x3thm1 / (betao * betao2)\n del1 = temp0 / (a1**2)\n a0 = a1 * \\\n (1.0 - del1 * (1.0 / 3.0 + del1 * (1.0 + del1 * 134.0 / 81.0)))\n del0 = temp0 / (a0**2)\n self.xnodp = self.xn_0 / (1.0 + del0)\n self.aodp = (a0 / (1.0 - del0))\n self.perigee = (self.aodp * (1.0 - self.eo) - AE) * XKMPER\n self.apogee = (self.aodp * (1.0 + self.eo) - AE) * XKMPER\n self.period = (2 * np.pi * 1440.0 / XMNPDA) / self.xnodp\n\n if self.period >= 225:\n # Deep-Space model\n self.mode = SGDP4_DEEP_NORM\n elif self.perigee < 220:\n # Near-space, simplified equations\n self.mode = SGDP4_NEAR_SIMP\n else:\n # Near-space, normal equations\n self.mode = SGDP4_NEAR_NORM\n\n if self.perigee < 156:\n s4 = self.perigee - 78\n if s4 < 20:\n s4 = 20\n\n qoms24 = ((120 - s4) * (AE / XKMPER))**4\n s4 = (s4 / XKMPER + AE)\n else:\n s4 = KS\n qoms24 = QOMS2T\n\n pinvsq = 1.0 / (self.aodp**2 * betao2**2)\n tsi = 1.0 / (self.aodp - s4)\n self.eta = self.aodp * self.eo * tsi\n etasq = self.eta**2\n eeta = self.eo * self.eta\n psisq = np.abs(1.0 - etasq)\n coef = qoms24 * tsi**4\n coef_1 = coef / psisq**3.5\n\n self.c2 = (coef_1 * self.xnodp * (self.aodp *\n (1.0 + 1.5 * etasq + eeta * (4.0 + etasq)) +\n (0.75 * CK2) * tsi / psisq * self.x3thm1 *\n (8.0 + 3.0 * etasq * (8.0 + etasq))))\n\n self.c1 = self.bstar * self.c2\n\n self.c4 = (2.0 * self.xnodp * coef_1 * self.aodp * betao2 * (\n self.eta * (2.0 + 0.5 * etasq) + self.eo * (0.5 + 2.0 * etasq) - (2.0 * CK2) * tsi /\n (self.aodp * psisq) * (-3.0 * self.x3thm1 * (1.0 - 2.0 * eeta + etasq * (1.5 - 0.5 * eeta)) +\n 0.75 * self.x1mth2 * (2.0 * etasq - eeta * (1.0 + etasq)) *\n np.cos(2.0 * self.omegao))))\n\n self.c5, self.c3, self.omgcof = 0.0, 0.0, 0.0\n\n if self.mode == SGDP4_NEAR_NORM:\n self.c5 = (2.0 * coef_1 * self.aodp * betao2 *\n (1.0 + 2.75 * (etasq + eeta) + eeta * etasq))\n if self.eo > ECC_ALL:\n self.c3 = coef * tsi * A3OVK2 * \\\n self.xnodp * AE * self.sinIO / self.eo\n self.omgcof = self.bstar * self.c3 * np.cos(self.omegao)\n\n temp1 = 3.0 * CK2 * pinvsq * self.xnodp\n temp2 = temp1 * CK2 * pinvsq\n temp3 = 1.25 * CK4 * pinvsq**2 * self.xnodp\n\n self.xmdot = (self.xnodp + (0.5 * temp1 * betao * self.x3thm1 + 0.0625 *\n temp2 * betao * (13.0 - 78.0 * theta2 +\n 137.0 * theta4)))\n\n x1m5th = 1.0 - 5.0 * theta2\n\n self.omgdot = (-0.5 * temp1 * x1m5th + 0.0625 * temp2 *\n (7.0 - 114.0 * theta2 + 395.0 * theta4) +\n temp3 * (3.0 - 36.0 * theta2 + 49.0 * theta4))\n\n xhdot1 = -temp1 * self.cosIO\n self.xnodot = (xhdot1 + (0.5 * temp2 * (4.0 - 19.0 * theta2) +\n 2.0 * temp3 * (3.0 - 7.0 * theta2)) * self.cosIO)\n\n if self.eo > ECC_ALL:\n self.xmcof = (-(2. / 3) * AE) * coef * self.bstar / eeta\n else:\n self.xmcof = 0.0\n\n self.xnodcf = 3.5 * betao2 * xhdot1 * self.c1\n self.t2cof = 1.5 * self.c1\n\n # Check for possible divide-by-zero for X/(1+cos(xincl)) when\n # calculating xlcof */\n temp0 = 1.0 + self.cosIO\n if np.abs(temp0) < EPS_COS:\n temp0 = np.sign(temp0) * EPS_COS\n\n self.xlcof = 0.125 * A3OVK2 * self.sinIO * \\\n (3.0 + 5.0 * self.cosIO) / temp0\n\n self.aycof = 0.25 * A3OVK2 * self.sinIO\n\n self.cosXMO = np.cos(self.xmo)\n self.sinXMO = np.sin(self.xmo)\n self.delmo = (1.0 + self.eta * self.cosXMO)**3\n\n if self.mode == SGDP4_NEAR_NORM:\n c1sq = self.c1**2\n self.d2 = 4.0 * self.aodp * tsi * c1sq\n temp0 = self.d2 * tsi * self.c1 / 3.0\n self.d3 = (17.0 * self.aodp + s4) * temp0\n self.d4 = 0.5 * temp0 * self.aodp * tsi * \\\n (221.0 * self.aodp + 31.0 * s4) * self.c1\n self.t3cof = self.d2 + 2.0 * c1sq\n self.t4cof = 0.25 * \\\n (3.0 * self.d3 + self.c1 * (12.0 * self.d2 + 10.0 * c1sq))\n self.t5cof = (0.2 * (3.0 * self.d4 + 12.0 * self.c1 * self.d3 + 6.0 * self.d2**2 +\n 15.0 * c1sq * (2.0 * self.d2 + c1sq)))\n\n elif self.mode == SGDP4_DEEP_NORM:\n raise NotImplementedError('Deep space calculations not supported')\n\n def propagate(self, utc_time):\n kep = {}\n\n # get the time delta in minutes\n # ts = astronomy._days(utc_time - self.t_0) * XMNPDA\n # print utc_time.shape\n # print self.t_0\n utc_time = dt2np(utc_time)\n ts = (utc_time - self.t_0) / np.timedelta64(1, 'm')\n\n em = self.eo\n xinc = self.xincl\n\n xmp = self.xmo + self.xmdot * ts\n xnode = self.xnodeo + ts * (self.xnodot + ts * self.xnodcf)\n omega = self.omegao + self.omgdot * ts\n\n if self.mode == SGDP4_ZERO_ECC:\n raise NotImplementedError('Mode SGDP4_ZERO_ECC not implemented')\n elif self.mode == SGDP4_NEAR_SIMP:\n raise NotImplementedError('Mode \"Near-space, simplified equations\"'\n ' not implemented')\n elif self.mode == SGDP4_NEAR_NORM:\n delm = self.xmcof * \\\n ((1.0 + self.eta * np.cos(xmp))**3 - self.delmo)\n temp0 = ts * self.omgcof + delm\n xmp += temp0\n omega -= temp0\n tempa = 1.0 - \\\n (ts *\n (self.c1 + ts * (self.d2 + ts * (self.d3 + ts * self.d4))))\n tempe = self.bstar * \\\n (self.c4 * ts + self.c5 * (np.sin(xmp) - self.sinXMO))\n templ = ts * ts * \\\n (self.t2cof + ts *\n (self.t3cof + ts * (self.t4cof + ts * self.t5cof)))\n a = self.aodp * tempa**2\n e = em - tempe\n xl = xmp + omega + xnode + self.xnodp * templ\n\n else:\n raise NotImplementedError('Deep space calculations not supported')\n\n if np.any(a < 1):\n raise Exception('Satellite crashed at time %s', utc_time)\n elif np.any(e < ECC_LIMIT_LOW):\n raise ValueError('Satellite modified eccentricity too low: %s < %e'\n % (str(e[e < ECC_LIMIT_LOW]), ECC_LIMIT_LOW))\n\n e = np.where(e < ECC_EPS, ECC_EPS, e)\n e = np.where(e > ECC_LIMIT_HIGH, ECC_LIMIT_HIGH, e)\n\n beta2 = 1.0 - e**2\n\n # Long period periodics\n sinOMG = np.sin(omega)\n cosOMG = np.cos(omega)\n\n temp0 = 1.0 / (a * beta2)\n axn = e * cosOMG\n ayn = e * sinOMG + temp0 * self.aycof\n xlt = xl + temp0 * self.xlcof * axn\n\n elsq = axn**2 + ayn**2\n\n if np.any(elsq >= 1):\n raise Exception('e**2 >= 1 at %s', utc_time)\n\n kep['ecc'] = np.sqrt(elsq)\n\n epw = np.fmod(xlt - xnode, 2 * np.pi)\n # needs a copy in case of an array\n capu = np.array(epw)\n maxnr = kep['ecc']\n for i in range(10):\n sinEPW = np.sin(epw)\n cosEPW = np.cos(epw)\n\n ecosE = axn * cosEPW + ayn * sinEPW\n esinE = axn * sinEPW - ayn * cosEPW\n f = capu - epw + esinE\n if np.all(np.abs(f) < NR_EPS):\n break\n\n df = 1.0 - ecosE\n\n # 1st order Newton-Raphson correction.\n nr = f / df\n\n # 2nd order Newton-Raphson correction.\n nr = np.where(np.logical_and(i == 0, np.abs(nr) > 1.25 * maxnr),\n np.sign(nr) * maxnr,\n f / (df + 0.5 * esinE * nr))\n epw += nr\n\n # Short period preliminary quantities\n temp0 = 1.0 - elsq\n betal = np.sqrt(temp0)\n pl = a * temp0\n r = a * (1.0 - ecosE)\n invR = 1.0 / r\n temp2 = a * invR\n temp3 = 1.0 / (1.0 + betal)\n cosu = temp2 * (cosEPW - axn + ayn * esinE * temp3)\n sinu = temp2 * (sinEPW - ayn - axn * esinE * temp3)\n u = np.arctan2(sinu, cosu)\n sin2u = 2.0 * sinu * cosu\n cos2u = 2.0 * cosu**2 - 1.0\n temp0 = 1.0 / pl\n temp1 = CK2 * temp0\n temp2 = temp1 * temp0\n\n # Update for short term periodics to position terms.\n\n rk = r * (1.0 - 1.5 * temp2 * betal * self.x3thm1) + \\\n 0.5 * temp1 * self.x1mth2 * cos2u\n uk = u - 0.25 * temp2 * self.x7thm1 * sin2u\n xnodek = xnode + 1.5 * temp2 * self.cosIO * sin2u\n xinck = xinc + 1.5 * temp2 * self.cosIO * self.sinIO * cos2u\n\n if np.any(rk < 1):\n raise Exception('Satellite crashed at time %s', utc_time)\n\n temp0 = np.sqrt(a)\n temp2 = XKE / (a * temp0)\n rdotk = ((XKE * temp0 * esinE * invR - temp2 * temp1 * self.x1mth2 * sin2u) *\n (XKMPER / AE * XMNPDA / 86400.0))\n rfdotk = ((XKE * np.sqrt(pl) * invR + temp2 * temp1 *\n (self.x1mth2 * cos2u + 1.5 * self.x3thm1)) *\n (XKMPER / AE * XMNPDA / 86400.0))\n\n kep['radius'] = rk * XKMPER / AE\n kep['theta'] = uk\n kep['eqinc'] = xinck\n kep['ascn'] = xnodek\n kep['argp'] = omega\n kep['smjaxs'] = a * XKMPER / AE\n kep['rdotk'] = rdotk\n kep['rfdotk'] = rfdotk\n\n return kep\n\n\ndef kep2xyz(kep):\n sinT = np.sin(kep['theta'])\n cosT = np.cos(kep['theta'])\n sinI = np.sin(kep['eqinc'])\n cosI = np.cos(kep['eqinc'])\n sinS = np.sin(kep['ascn'])\n cosS = np.cos(kep['ascn'])\n\n xmx = -sinS * cosI\n xmy = cosS * cosI\n\n ux = xmx * sinT + cosS * cosT\n uy = xmy * sinT + sinS * cosT\n uz = sinI * sinT\n\n x = kep['radius'] * ux\n y = kep['radius'] * uy\n z = kep['radius'] * uz\n\n vx = xmx * cosT - cosS * sinT\n vy = xmy * cosT - sinS * sinT\n vz = sinI * cosT\n\n v_x = kep['rdotk'] * ux + kep['rfdotk'] * vx\n v_y = kep['rdotk'] * uy + kep['rfdotk'] * vy\n v_z = kep['rdotk'] * uz + kep['rfdotk'] * vz\n\n return np.array((x, y, z)), np.array((v_x, v_y, v_z))\n\n\nif __name__ == \"__main__\":\n obs_lon, obs_lat = np.deg2rad((12.4143, 55.9065))\n obs_alt = 0.02\n o = Orbital(satellite=\"METOP-B\")\n\n t_start = datetime.now()\n t_stop = t_start + timedelta(minutes=20)\n t = t_start\n while t < t_stop:\n t += timedelta(seconds=15)\n lon, lat, alt = o.get_lonlatalt(t)\n lon, lat = np.rad2deg((lon, lat))\n az, el = o.get_observer_look(t, obs_lon, obs_lat, obs_alt)\n ob = o.get_orbit_number(t, tbus_style=True)\n print(az, el, ob)\n", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 25 15:46:56 2013\n\n@author: miguelnunes\n\nnotes on the attitude code\n- for books and papers the quaternion representation with scalar last is common\n but for programming it's more common to find scalar first representation\n- to avoid confusion I use: \n q = [q1,q2,q3, q4] for scalar last (q4) \n and \n q = [q0, q1,q2,q4] for scalar first (q0) \n \n notice that q1,q2,q3 are the vector components for both cases\n \n references:\n [1] - Analytical Mechanics of Space Systems, Shaub & Junkins, 2003\n\n\"\"\"\n\nimport numpy as np\nfrom numpy.linalg import inv\nfrom scipy import integrate\n#import matplotlib.pyplot as plt\n#from matplotlib import plot, ion, show\nfrom matplotlib.pylab import *\n#import transformations as tr\n#from transformations import *\n\n#########################################\n# FUNCTIONS\n#########################################\n#def skew(x):\n# # quaternion representation, scalar last, q=(q1,q2,q3,q4_scalar)\n# return array( [ ( 0 , -x[2], x[1]), \n# ( x[2], 0, -x[0]),\n# (-x[1], x[0], 0 ) ] )\n\ndef skew(x):\n # quaternion representation, scalar first, q=(q0,q1,q2,q3)\n # MUST CHECK!!! now it's probvably the same as scalar last!\n return np.array( [ ( 0 , -x[2], x[1]), \n ( x[2], 0, -x[0]),\n (-x[1], x[0], 0 ) ] )\n#def skew4(x):\n# # quaternion representation, scalar last, q=(q1,q2,q3,q4_scalar)\n# # eqn 2.1.7, pg 8\n# return array( [ ( 0 , x[2], -x[1], x[0]), \n# ( -x[2], 0 , x[0], x[1]),\n# ( x[1], -x[0], 0 , x[2]),\n# ( -x[0], -x[1], -x[2], 0) ] )\n\ndef skew4(x):\n # quaternion representation, scalar first, q=(q0,q1,q2,q3)\n # reference [1.100]\n w1 = x[0]\n w2 = x[1]\n w3 = x[2]\n return np.array( [ [ 0, -w1, -w2, -w3], \n [ w1, 0 , w3, -w2],\n [ w2, -w3, 0 , w1],\n [ w3, w2, -w1, 0 ] ] )\n\n#def euler2quaternion_test(euler):\n# # from http://www.gamedev.net/topic/597324-quaternion-to-euler-angles-and-back-why-is-the-rotation-changing/\n# eX,eY,eZ = euler[0],euler[1],euler[2]\n# \n# c1 = cos(eX/2);\n# s1 = sin(eX/2);\n# \n# c2 = cos(eY/2);\n# s2 = sin(eY/2);\n# \n# c3 = cos(eZ/2);\n# s3 = sin(eZ/2);\n# \n# qx = s1*c2*c3 + c1*s2*s3;\n# qy = c1*s2*c3 - s1*c2*s3;\n# qz = c1*c2*s3 + s1*s2*c3; \n# \n# qw = c1*c2*c3 - s1*s2*s3;\n# \n# return array([qx,qy,qz,qw])\n \ndef quaternion_from_euler(euler):\n \"\"\"\n Convert euler angles to a quaternion with scalar first representation\n \"\"\"\n # aerospace standard Euler angles sequence Z,Y,X = yaw, pitch, roll\n # 1, psi - z (yaw,heading)\n # 2, theta - y (pitch)\n # 3, phi - x (roll)\n \n # check if pitch is not in [-90, 90] deg domain\n if euler[1] >= pi/2:\n print(\">>> WARNING! Pitch is more than 90 deg. Results may not be accurate\")\n \n if euler[1] <= -pi/2:\n print(\">>> WARNING! Pitch is less than -90 deg. Results may not be accurate\")\n \n \n #angles = array([r, p, y])\n c = np.cos( euler/2. )\n s = np.sin( euler/2. )\n \n # formulat from Space Vehicle Dynamics and Control, Wie, pg 338\n # q1,q2,q3,q4/scalar\n #q = [s(:,1).*c(:,2).*c(:,3) - c(:,1).*s(:,2).*s(:,3), ...\n # c(:,1).*s(:,2).*c(:,3) + s(:,1).*c(:,2).*s(:,3), ...\n # c(:,1).*c(:,2).*s(:,3) - s(:,1).*s(:,2).*c(:,3), ...\n # c(:,1).*c(:,2).*c(:,3) + s(:,1).*s(:,2).*s(:,3)];\n \n # eqn A.2.15\n# q1 = s[0]*c[1]*c[2] - c[0]*s[1]*s[2]\n# q2 = c[0]*s[1]*c[2] + s[0]*c[1]*s[2]\n# q3 = c[0]*c[1]*s[2] - s[0]*s[1]*c[2]\n# q4 = c[0]*c[1]*c[2] + s[0]*s[1]*s[2]\n\n # from book: Quaternions and Rotation Sequences pg 167 \n # scalar first representation \n q0 = c[0]*c[1]*c[2] + s[0]*s[1]*s[2]\n q1 = c[0]*c[1]*s[2] - s[0]*s[1]*c[2]\n q2 = c[0]*s[1]*c[2] + s[0]*c[1]*s[2]\n q3 = s[0]*c[1]*c[2] - c[0]*s[1]*s[2]\n\n #scalar first\n return np.array([ q0, q1, q2, q3])\n\n\n# before quaternion2euler_aero\ndef euler_from_quaternion(q):\n # from book: Quaternions and Rotation Sequences pg 168\n # assumes q = [q0, q1, q2, q3] (scalar first)\n\n dcm = dcm_from_quaternion(q)\n \n psi = np.arctan2(dcm[0,1],dcm[0,0]) #yaw\n theta = np.arcsin(-dcm[0,2]) #pitch \n phi = np.arctan2(dcm[1,2],dcm[2,2]) #roll \n \n return np.array([psi,theta,phi])\n \n\ndef euler_from_quaternion_scalar_last(q):\n # ref: https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles\n # assumes q = [q1, q2, q3, q4] (scalar last)\n\n q1,q2,q3,q4 = q \n \n phi = np.arctan2( 2*(q4*q1 + q2*q3), 1-2*(q1*q1 + q2*q2) ) #yaw\n theta = np.arcsin( 2*(q4*q2 - q3*q1) ) #pitch \n psi = np.arctan2( 2*(q4*q3 + q1*q2), 1-2*(q2*q2 + q3*q3)) #roll \n \n return np.array([psi,theta,phi])\n\n#def quaternion2euler_test(q):\n# # from http://www.gamedev.net/topic/597324-quaternion-to-euler-angles-and-back-why-is-the-rotation-changing/\n# \n# qx,qy,qz,qw = q[0],q[1],q[2],q[3]\n# \n# eX = arctan2(-2*(qy*qz-qw*qx), qw*qw-qx*qx-qy*qy+qz*qz)\n# eY = arcsin(2*(qx*qz + qw*qy))\n# eZ = arctan2(-2*(qx*qy-qw*qz), qw*qw+qx*qx-qy*qy-qz*qz)\n# \n# return array([eX,eY,eZ])\n \ndef quatNorm(q):\n '''\n normalize quaternion\n '''\n return q/np.sqrt(np.dot(q,q))\n \n# ------------------------------------------------------------------------------\n# DCM operations\n\ndef dcm_from_euler(euler):\n # from book: Quaternions and Rotation Sequences pg 167\n \n psi, theta, phi = euler\n \n cpsi = np.cos(psi)\n spsi = np.sin(psi)\n \n ctheta = np.cos(theta)\n stheta = np.sin(theta)\n \n cphi = np.cos(phi) \n sphi = np.sin(phi)\n \n return np.array([\n [cpsi*ctheta , spsi*ctheta , -stheta ],\n [cpsi*stheta*sphi - spsi*cphi , spsi*stheta*sphi + cpsi*cphi , ctheta*sphi],\n [cpsi*stheta*cphi + spsi*sphi , spsi*stheta*cphi - cpsi*sphi , ctheta*cphi]\n ])\n \n\n#def dcm_from_quaternion(q):\n# #eqn A.2.13\n# # from Wertz pg 414\n# # q = [q1,q2,q3,q4_scalar)]\n# q1,q2,q3,q4 = q[0],q[1],q[2],q[3]\n# \n# return array([\n# [q1**2 - q2**2 -q3**2 + q4**2, 2*(q1*q2-q3*q4), 2*(q1*q3+q2*q4)],\n# [2*(q1*q2+q3*q4), -q1**2 + q2**2 - q3**2 + q4**2, 2*(q2*q3-q1*q4)],\n# [2*(q1*q3-q2*q4), 2*(q2*q3+q1*q4), -q1**2 - q2**2 + q3**2 + q4**2]\n# ])\n\n# before: quaternion2dcm_aero \ndef dcm_from_quaternion(q):\n # from book: Quaternions and Rotation Sequences pg 168\n q0,q1,q2,q3 = q #[0],q[1],q[2],q[3]\n \n return np.array([\n [2*q0**2-1+2*q1**2, 2*(q1*q2+q0*q3), 2*(q1*q3-q0*q2)],\n [2*(q1*q2-q0*q3), 2*q0**2-1+2*q2**2, 2*(q2*q3+q0*q1)],\n [2*(q1*q3+q0*q2), 2*(q2*q3-q0*q1), 2*q0**2-1+2*q3**2]\n ])\n\n###############################################################################\n# Rotations\n\n# difference between passive and active transformation\n# passive transformation - rotates the frame, but the point/vector remains fixed\n# active transformation - rotates the point/vector but the frame remains fixed\n# http://en.wikipedia.org/wiki/Active_and_passive_transformation\ndef rotX3d_passive(angle):\n # coordinate frame transformation (passive transformation/rotation) \n # through the given angle \n cang = np.cos(angle)\n sang = np.sin(angle)\n \n return np.array([\n [1, 0 , 0 ],\n [0, cang, sang ],\n [0 , -sang, cang ],\n ])\ndef rotY3d_passive(angle):\n # coordinate frame transformation (passive transformation/rotation) \n # through the given angle \n cang = np.cos(angle)\n sang = np.sin(angle)\n \n return np.array([\n [ cang , 0 , -sang],\n [ 0 , 1 , 0 ],\n [ sang , 0 , cang]\n ])\ndef rotZ3d_passive(angle):\n # coordinate frame transformation (passive transformation/rotation) \n # through the given angle \n cang = np.cos(angle)\n sang = np.sin(angle)\n \n return np.array([\n [ cang, sang, 0],\n [-sang, cang, 0],\n [ 0 , 0 , 1]\n ])\n\n\ndef rotX3d_active(angle):\n # point transfromation (active transformation/rotation)\n # through the given angle\n \n # it's just the inverse or transpose of the passive transformation\n return rotX3d_passive(angle).T\n\ndef rotY3d_active(angle):\n # point transfromation (active transformation/rotation)\n # through the given angle\n \n # it's just the inverse or transpose of the passive transformation\n return rotY3d_passive(angle).T\n \ndef rotZ3d_active(angle):\n # point transfromation (active transformation/rotation)\n # through the given angle\n \n # it's just the inverse or transpose of the passive transformation\n return rotZ3d_passive(angle).T\n\n\n \n#########################################\n# DYNAMICS + KINEMATICS\n#########################################\n# dynamics\n#omega = array( [1,2,3] )\n#torque = array( [0,0,0] )\n#domega = dot(inv(I), dot( dot(-skew(omega), Inertia), omega) + torque )\n\ndef attitude_dynamics(X, t, Torque, Inertia):\n #global Inertia\n \n #state vector x = (q0,q1,q2,q3, wx,wy,wz)\n q = X[0:4]\n omega_b_i = X[4:7]\n \n #print(omega_b_i)\n\n #satellite kinematics\n #omega_b_o is angular velocity of body frame wrt orbital???? or inertial .. frame \n # represented in body frame - IMU\n #omega_b_i = omega\n dq = 0.5*np.dot(skew4(omega_b_i),q)\n \n #print(dq)\n\n #satellite dynamics, pg 6\n # inv(skew_omega * Inertia) * omega + torque\n domega = np.dot(inv(Inertia), np.dot( np.dot(-skew(omega_b_i), Inertia), omega_b_i) + Torque)\n\n #print(domega)\n \n dX = np.concatenate((dq,domega),axis=None)\n #print(dX)\n \n # return state vector q, omega\n return dX\n\n#########################################\n# Linearization\n#########################################\n# linearization for quaternion scalar first formulation\ndef linearSystem_(w):\n #A = zeros([4,4])\n \n '''\n A[0,0] = 0\n A[0,1] = w[0]\n A[0,2] = w[1]\n A[0,3] = w[2]\n \n A[1,0] = w[0]\n A[1,1] = 0\n A[1,2] = w[2]\n A[1,3] = -w[1]\n \n A[2,0] = w[1]\n A[2,1] = -w[2]\n A[2,2] = 0\n A[2,3] = w[0]\n \n A[3,0] = w[2] \n A[3,1] = w[1]\n A[3,2] = -w[0]\n A[3,3] = 0\n '''\n \n A = 0.5*np.array([ ( 0 , -w[0], -w[1], -w[2] ),\n ( w[0], 0 , w[2], -w[1] ),\n ( w[1], -w[2], 0 , w[0] ),\n ( w[2], w[1], -w[0], 0 )])\n \n #A = 0.5*A\n return A\n\n\n# linearization for quaternion scalar last formulation (default!)\ndef linear_dynamics(omega,inertia):\n \n I_x = inertia[0,0]\n I_y = inertia[1,1]\n I_z = inertia[2,2]\n \n omega_x = omega[0]\n omega_y = omega[1]\n omega_z = omega[2]\n \n # attitude section\n A_q = 0.5*np.array([ ( 0 , omega_z, -omega_y, omega_x ),\n (-omega_z, 0 , omega_x, omega_y ),\n ( omega_y, -omega_x, 0 , omega_z ),\n (-omega_x, -omega_y, -omega_z, 0 )])\n \n # append the zeros matrix for the attitude part\n A_q = np.concatenate((A_q,np.zeros([4,3])),axis=1)\n\n\n # dynamics section\n \n k_1 = -(I_x - I_z)/I_y\n k_2 = (I_x - I_y)/I_z\n k_3 = (I_y - I_z)/I_x\n \n A_omega = np.array([ ( 0 , k_3*omega_z , k_3*omega_y), \n ( k_1*omega_z, 0 , k_1*omega_x), \n ( k_2*omega_y, k_2*omega_x , 0 ) ]) \n\n #print A_omega \n # append the zeros matrix for the attitude part\n A_omega = np.concatenate( (np.zeros([3,4]),A_omega), axis=1)\n \n A = np.concatenate ((A_q,A_omega), axis=0)\n #print \"#############################\"\n #print A_omega\n return A", "# -*- coding: utf-8 -*-\n# Python Space Simulator v 0.0.1\n# Test ADCS control\n\n# notes\n# - variables with _data are the collection of all data for that variable,\n# before I used bucket\n# - scalars are lower cap, vector and matrices are all CAPS\n\n# system modules\n#import os\n#import sys\nfrom time import time #, sleep\nimport numpy as np\nfrom scipy.integrate import odeint\n#from math import pow, degrees, radians, pi, sin, cos, sqrt\nimport matplotlib.pyplot as plt\n#import pdb; \n#from mod_attitude import *\nimport modules.attitude as att\n\ntic = time()\n\n# ------------------------------------------------------------------------------\n# GLOBAL CONSTANTS\ninRadians = np.pi/180.0; # conversion to radians\ninDegrees = 180.0/np.pi; # conversion to radians\nmu = 3.986004e14; # m J /kg\nr_earth = 6378.14e3; # Earth Radius [m]\n\n# ------------------------------------------------------------------------------\n# CONTROL VARIABLES\nnOrbits = 0.1; #number of orbits\ndt = 1.0; # time step\nmodel = 'hiakasat';\n\n#-------------------------------------------------------------------------------\n# INITIALIZATION\n# initial angular speed [deg/s]\n\n# euler sequence: yaw, pitch, roll\n# \neuler_0 = np.array([100,0,0]) * inRadians;\n\n# conver euler angles to quaternions\n# q is defined with scalar first [q0 q1 q2 q3]\nq_0 = att.quaternion_from_euler(euler_0);\n\n# \nomega_0 = np.array([0.0, 0.0, 5.0]) * inRadians;\n\n# orbital data\nh_sat = 400e3; # height [m]\nr_sat = r_earth + h_sat; # radius [m]\nv = np.sqrt(mu/r_sat); # speed [m/s]\nP = 2*np.pi*r_sat/v; #period [s]\nPminutes = P/60;\n\ntf = nOrbits*P;#sec\n#tf = 1000;\n\n# for gravity gradient stability\n# Iy > Ix > Iz\nI_xx=2.5448;\nI_yy=2.4444;\nI_zz=2.6052;\n\nInertia = np.diag([I_xx,I_yy,I_zz]);\n\n##\n# state vector\nX_0 = np.concatenate([q_0,omega_0]);\n#t = 0:dt:tf; \n# set time points\ntime_data = np.arange(0,tf,dt)\n\nTorque_0 = np.array([0.0, 0.0 , 0.0]);\n\n# initialize data buckets with zeros\nX_data = np.zeros([np.alen(time_data),7]);\nattitude_error_data = np.zeros([np.alen(time_data),3]);\nTorque_data = np.zeros([np.alen(time_data),3]);\n\n# assign initial value for simulation\nX = X_0;\nTorque = Torque_0\n\n# desired attitude\neuler_ref = np.array([0.0,0.0,0.0]);\n\nerror_last = np.array([0.0,0.0,0.0]);\n\n# -----------------------------------------------------------------------------\n# start attitude propagation simulation\n\n#X_data = integrate.odeint(attitude_dynamics, X_0, time_data, args=(Torque,Inertia))\n \nfor k in range(time_data.size):\n #print time_data[k]\n Xk = odeint(att.attitude_dynamics, X, [0,dt/2.,dt], args=(Torque,Inertia))\n X = Xk[-1]\n X_data[k,:] = X \n \n #pdb.set_trace()\n # compute control Torque\n # \n q = X[0:4];\n euler = att.euler_from_quaternion(q)\n \n # compute error\n error = euler_ref - euler\n derror = (error-error_last)/dt\n error_last = error\n \n # for symetric Inertia = 1\n #kp = 0.01\n #kd = 0.1\n \n # for hiakasat Inertia\n kp = 0.001\n kd = 0.1\n \n tx = kp*error[2] + kd*derror[2]\n ty = kp*error[1] + kd*derror[1]\n tz = kp*error[0] + kd*derror[0]\n \n Torque = np.array([tx,ty,tz])\n Torque_data[k,:] = Torque\n \n\n# assign\nq_data = X_data[:,0:4];\nomega_data = X_data[:,4:7];\n\n# convert quaternions to euler\neuler_data = np.zeros([np.alen(time_data),3]);\nfor k in range(time_data.size):\n euler_data[k,:] = att.euler_from_quaternion(q_data[k,:]);\n\n#-------------------------------------------------------------------------------\n# INIT FIGURE\n#-------------------------------------------------------------------------------\n\n#turn interactive mode on\n#ion()\n\n# to get size: fig.get_size_inches()\nfig = plt.figure(1) #figsize=(6.5, 9.5))\n\n#clear the figure\nfig.clf()\n#fig.canvas.manager.window.move(900,0) # in pixels from top-left corner\n\n#ax = fig.add_subplot(111)\n\ntime_data = time_data/60\n\n# plot measurements and compare with estimated fiter measurements\nplt.subplot(311)\nplt.plot(time_data, q_data)\nplt.legend(['$q_0$','$q_1$','$q_2$','$q_3$'])\nplt.ylim(-1.1, 1.1)\n\n#legend(['$\\omega_x$','$\\omega_y$','$\\omega_z$'])\n#ylabel('$\\Omega$ [deg/sec]')\n\nplt.subplot(312)\nplt.plot(time_data, euler_data*180/np.pi) \nplt.legend(['$\\\\psi (yaw)$','$\\\\theta (pitch)$','$\\phi (roll)$'])\nplt.ylim(-200, 200)\n\nplt.subplot(313)\nplt.plot(time_data, Torque_data) \nplt.legend(['$T_x$','$T_y$','$T_z$'])\nplt.ylim(-0.200, 0.0200)\n\n# to redraw\nplt.show()\n#-------------------------------------------------------------------------------\n# ELAPSED TIME\n#-------------------------------------------------------------------------------\ntoc = time()\n\nprint(\"Elapsed time: {:.3f} s\".format(toc-tic))" ]
[ [ "numpy.sqrt", "numpy.arctan", "numpy.rad2deg", "numpy.arctan2", "numpy.any", "numpy.where", "numpy.fmod", "numpy.arcsin", "numpy.sin", "numpy.ceil", "numpy.argmax", "numpy.timedelta64", "numpy.deg2rad", "numpy.floor", "numpy.errstate", "numpy.array", "numpy.abs", "numpy.cos", "numpy.datetime64", "numpy.sign", "numpy.float64", "scipy.optimize.brentq" ], [ "numpy.dot", "numpy.arcsin", "numpy.linalg.inv", "numpy.cos", "numpy.sin", "numpy.arctan2", "numpy.concatenate", "numpy.array", "numpy.zeros" ], [ "numpy.diag", "matplotlib.pyplot.legend", "numpy.sqrt", "numpy.alen", "numpy.arange", "matplotlib.pyplot.ylim", "scipy.integrate.odeint", "numpy.concatenate", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
mattkjames7/groundmag
[ "38d66aaad8ad68bae8a42805fde3e92c3180aa84" ]
[ "groundmag/PlotPolarization.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport wavespec as ws\nfrom .Spectrogram3D import Spectrogram3D\nfrom .GetStationInfo import GetStationInfo\n\ndef PlotPolarization(Station,Date,wind,slip,ut=None,high=None,low=None,Freq=None,comps=['x','y'],Threshold=0.0,Method='FFT',WindowFunction=None,Param=None,Detrend=True,FindGaps=True,fig=None,maps=[1,1,0,0],TimeAxisUnits='hh:mm',nox=False,Multiplier=1.0,trange=None,useytitle=False):\n\n\t#create title string\n\tstn = GetStationInfo(Station)\n\ttitle = Station.upper()\n\tpos = '(mlat={:3.1f},mlon={:3.1f})'.format(stn.mlat[0],stn.mlon[0])\n\n\t#check if data are filtered\n\tfilt = 'Filtered: '\n\tif not low is None:\n\t\tfilt += 'low = {:3.1f} s '.format(np.float32(low))\n\tif not high is None:\n\t\tfilt += 'high = {:3.1f} s '.format(np.float32(high))\n\tif high is None and low is None:\n\t\tfilt = None\n\n\n\t#get the spectrogram\n\tNw,LenW,Freq,Spec = Spectrogram3D(Station,Date,wind,slip,ut=ut,high=high,low=low,Freq=Freq,Method=Method,WindowFunction=WindowFunction,Param=Param,Detrend=Detrend,FindGaps=FindGaps,GoodData=None)\n\t\n\t#combine the appropriate components\n\tP = Spec[comps[0]+'Pow'] + Spec[comps[1]+'Pow']\n\t\n\t#now find the most powerful peak along the time axis\n\tpk = ws.DetectWaves.DetectWavePeaks(Spec.Tspec,Freq,P,Threshold,True)\n\t\n\t#get the amplitudes and phases\n\tAx = Spec.xAmp[pk.tind,pk.find]\n\tPx = Spec.xPha[pk.tind,pk.find]\n\t\n\tAy = Spec.yAmp[pk.tind,pk.find]\n\tPy = Spec.yPha[pk.tind,pk.find]\n\t\n\tAz = Spec.zAmp[pk.tind,pk.find]\n\tPz = Spec.zPha[pk.tind,pk.find]\n\t\n\tDir = Spec.kz[pk.tind,pk.find]\n\t\n\tpol = ws.Tools.Polarization2D(Ax**2,Px,Ay**2,Py)\n\n\t#plot the polarization ellipses\n\tax = ws.Tools.PlotPolarization(pk.t,Ax,Ay,Px,Py,Dir,fig=fig,maps=maps,Multiplier=Multiplier,nox=nox,trange=None,TimeAxisUnits=TimeAxisUnits)\n\t\n\t#add the title\n\tif useytitle:\n\t\tax.set_ylabel(title)\t\n\t\tax.text(0.02,0.97,pos,transform=ax.transAxes,va='top')\n\telse:\n\t\tax.text(0.02,0.97,title+' '+pos,transform=ax.transAxes,va='top')\n\t\t\n\tif not filt is None:\n\t\tax.text(0.02,0.03,filt,transform=ax.transAxes,va='bottom')\n\t\n\t\n\treturn ax,Spec,pk,pol\n" ]
[ [ "numpy.float32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
futureseadev/Off-policy-off-line-ranking-evaluation
[ "3f67ed48a759e3ab8788c05f1c31232a4d6e2fd1" ]
[ "src/data_synthesis.py" ]
[ "import pandas as pd\nimport numpy as np\n\n\ndef create_context_vector():\n def parse_line_with_slash(line):\n split_line = line.split()\n return split_line[0], split_line[1].split('|')\n\n descriptionid_tokensid = []\n purchasedkeywordid_tokensid = []\n queryid_tokensid = []\n titleid_tokensid = []\n userid_profile = []\n\n file_1 = open('data/track2/descriptionid_tokensid.txt', 'r')\n file_2 = open('data/track2/purchasedkeywordid_tokensid.txt', 'r')\n file_3 = open('data/track2/queryid_tokensid.txt', 'r')\n file_4 = open('data/track2/titleid_tokensid.txt', 'r')\n file_5 = open('data/track2/userid_profile.txt', 'r')\n\n for line in file_1:\n new_info = parse_line_with_slash(line)\n descriptionid_tokensid.append(new_info)\n\n for line in file_2:\n new_info = parse_line_with_slash(line)\n purchasedkeywordid_tokensid.append(new_info)\n\n for line in file_3:\n new_info = parse_line_with_slash(line)\n queryid_tokensid.append(new_info)\n\n for line in file_4:\n new_info = parse_line_with_slash(line)\n titleid_tokensid.append(new_info)\n\n for line in file_5:\n new_info = line.split()\n userid_profile.append(new_info)\n\n with open('data/track2/training.txt') as file:\n head = [next(file) for _ in range(10)]\n train = np.array(list(map(lambda x: x.split(), head)))\n\n data = []\n for line in train:\n AdID = line(3)\n QueryID = int(line[7])\n KeywordID = int(line[8])\n TitleID = int(line[9])\n DescriptionID = int(line[10])\n UserID = int(line[11])\n assert int(queryid_tokensid[QueryID][0]) == QueryID\n assert int(purchasedkeywordid_tokensid[KeywordID][0]) == KeywordID\n assert int(titleid_tokensid[TitleID][0]) == TitleID\n assert int(descriptionid_tokensid[DescriptionID][0]) == DescriptionID\n user_info = [-1, -1]\n if UserID != 0:\n assert int(userid_profile[UserID][0]) == UserID\n user_info = userid_profile[UserID][1]\n\n data.append([AdID] + user_info + queryid_tokensid[QueryID][1] + purchasedkeywordid_tokensid[KeywordID][1] +\n titleid_tokensid[TitleID][1] + descriptionid_tokensid[DescriptionID][1])\n\n file_1.close()\n file_2.close()\n file_3.close()\n file_4.close()\n file_5.close()\n\n path = 'data/track2/my_data.txt'\n file = open(path, 'w')\n for line in data:\n s = ' '.join(line)\n file.write(s + '\\n')\n file.close()\n return path\n\n\ndef do_binary_vectors(path, size):\n file = open(path, 'r')\n data = []\n for line in file:\n split_line = line.split()\n context = np.zeros(size + 1)\n context[0] = int(split_line[0]) # context[0] = adId, context[1:3] = gender, context[4:10] - age\n gender = int(split_line[1])\n age = int(split_line[2])\n context[gender + 1] = 1\n context[age + 4] = 1\n for num in split_line[3:]:\n context[int(num) + 10] = 1\n data.append(context)\n return data\n\n\n# It is a bad function. It will be deleted later.\n\ndef create_data_from_training_file(path):\n with open(path) as file:\n head = [next(file) for _ in range(1000000)]\n train = np.array(list(map(lambda x: x.split(), head)))\n\n df = pd.DataFrame(train,\n columns=['Click', 'Impression', 'DisplayURL', 'AdID', 'AdvertiserID', 'Depth', 'Position',\n 'QueryID',\n 'KeywordID', 'TitleID', 'DescriptionID', 'UserID'])\n\n pd.options.display.max_columns = 12\n print(df)\n data = []\n\n # for t, row in df.iterrows():\n # x = row['UserID']\n # a = row['AdID']\n # r = compute_reward(x, a, 0.8)\n # data.append((x, a, r))\n\n return data\n" ]
[ [ "numpy.zeros", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
RobinRojowiec/stopword-collection
[ "4fa3665aa9e0e1e34d9d44afc3bf7ee2cb0da5ed" ]
[ "analyze.py" ]
[ "\"\"\"\n\nIDE: PyCharm\nProject: corpus-analysis\nAuthor: Robin\nFilename: analyze\nDate: 13.01.2020\n\n\"\"\"\nimport spacy\n\nfrom util import load_dict\nfrom paths import DIAGRAMS, DICTS\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom tqdm import tqdm\n\n# analyze quality\nnlp = spacy.load(\"de_core_news_sm\")\ntotal_frequency: dict = load_dict(DICTS + '/total_frequency.json')\ndocument_frequency: dict = load_dict(DICTS + '/document_frequency.json')\ntotal_docs = 10000\nmax_freq = sum(list(item[1] for item in total_frequency.items()))\n\n\n# visualize\ndef count_bar_chart(counts, title):\n data: [] = list(counts.items())\n data.sort(key=lambda x: x[1], reverse=True)\n data = data[:20]\n\n objects = [x[0] for x in data]\n y_pos = np.arange(len(objects))\n performance = [x[1] / total_docs for x in data]\n\n plt.bar(y_pos, performance, align='center', alpha=0.5)\n plt.xticks(y_pos, objects, rotation='vertical')\n plt.ylabel('relative frequency')\n plt.title(title)\n\n plt.savefig(DIAGRAMS + '/' + title.replace(' ', '_').lower() + \".png\", format='png')\n plt.clf()\n\n\ncount_bar_chart(total_frequency, \"Total frequency\")\ncount_bar_chart(document_frequency, \"Document frequency\")\n\n# generate lists\ndfs: [] = list([item[0], item[1]] for item in document_frequency.items())\n\nword_pattern = re.compile('\\\\w+', flags=re.IGNORECASE)\ndfs = list(filter(lambda x: word_pattern.match(x[0]), dfs))\n\n# calculate information score\nmax_stop_words = 0\nfor token_df in tqdm(dfs):\n # token_df[1] = (total_frequency[token_df[0]]/max_freq)*math.log(total_docs / token_df[1])\n token_df[1] = math.log(total_docs / token_df[1])\n # token_df[1] = document_frequency[token_df[0]]#/max_freq\n\n if nlp(token_df[0])[0].is_stop:\n max_stop_words += 1\n\ndfs.sort(key=lambda x: x[1], reverse=False)\n\ndfreq_list = [x[1] for x in dfs]\nprint(\"Max: %.05f, Min: %.05f, Median:%.05f \" % (max(dfreq_list), min(dfreq_list), np.median(dfreq_list)))\n\nlimit = 200\nstopword_list = [token_df[0] for token_df in dfs[:limit]]\n\ncorrect = 0\ntotal = 0.0\nfor stopword in stopword_list:\n word = nlp(stopword)\n if word[0].is_stop:\n correct += 1\n total += 1\n\nwith open(DICTS + '/stopword_list_ger.dict', 'w+', encoding='utf-8') as dict_file:\n for word in stopword_list:\n dict_file.write(word + '\\n')\nprint(\"Total Stopwords: %i, Accuracy: %.02f, Recall: %.02f\" % (total, correct / total, correct / max_stop_words))\n" ]
[ [ "matplotlib.pyplot.title", "numpy.median", "matplotlib.pyplot.clf", "matplotlib.pyplot.bar", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
avmehta/frcnntest
[ "1f2cd9b68af942ef3b9b78ec97ce2ff496c546ee" ]
[ "tf_frcnn/lib/layer_utils/generate_anchors.py" ]
[ "# --------------------------------------------------------\n# Faster R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick and Sean Bell\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\n\ndef generate_anchors(base_size=16, ratios=[0.5, 1, 2],\n scales=2 ** np.arange(3, 6)):\n \"\"\"\n Generate anchor (reference) windows by enumerating aspect ratios X\n scales wrt a reference (0, 0, 15, 15) window.\n \"\"\"\n\n base_anchor = np.array([1, 1, base_size, base_size]) - 1\n ratio_anchors = _ratio_enum(base_anchor, ratios)\n anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)\n for i in range(ratio_anchors.shape[0])])\n return anchors\n\n\ndef _whctrs(anchor):\n \"\"\"\n Return width, height, x center, and y center for an anchor (window).\n \"\"\"\n\n w = anchor[2] - anchor[0] + 1\n h = anchor[3] - anchor[1] + 1\n x_ctr = anchor[0] + 0.5 * (w - 1)\n y_ctr = anchor[1] + 0.5 * (h - 1)\n return w, h, x_ctr, y_ctr\n\n\ndef _mkanchors(ws, hs, x_ctr, y_ctr):\n \"\"\"\n Given a vector of widths (ws) and heights (hs) around a center\n (x_ctr, y_ctr), output a set of anchors (windows).\n \"\"\"\n\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((x_ctr - 0.5 * (ws - 1),\n y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1),\n y_ctr + 0.5 * (hs - 1)))\n return anchors\n\n\ndef _ratio_enum(anchor, ratios):\n \"\"\"\n Enumerate a set of anchors for each aspect ratio wrt an anchor.\n \"\"\"\n\n w, h, x_ctr, y_ctr = _whctrs(anchor)\n size = w * h\n size_ratios = size / ratios\n ws = np.round(np.sqrt(size_ratios))\n hs = np.round(ws * ratios)\n anchors = _mkanchors(ws, hs, x_ctr, y_ctr)\n return anchors\n\n\ndef _scale_enum(anchor, scales):\n \"\"\"\n Enumerate a set of anchors for each scale wrt an anchor.\n \"\"\"\n\n w, h, x_ctr, y_ctr = _whctrs(anchor)\n ws = w * scales\n hs = h * scales\n anchors = _mkanchors(ws, hs, x_ctr, y_ctr)\n return anchors\n\n\nif __name__ == '__main__':\n import time\n\n t = time.time()\n a = generate_anchors()\n print(time.time() - t)\n print(a)\n from IPython import embed;\n\n embed()\n" ]
[ [ "numpy.hstack", "numpy.sqrt", "numpy.arange", "numpy.round", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CarlosHernandezP/BCN-20k
[ "73630c7420d10002f84642e222a77846f0d59161" ]
[ "modelling/model.py" ]
[ "import torch\nimport torch.nn as nn\nimport torchvision.models as models\nfrom efficientnet_pytorch import EfficientNet\n\nclass CEEffNet(nn.Module):\n \"\"\"encoder + classifier\"\"\"\n def __init__(self, num_classes=2, model_name='effb0'):\n super(CEEffNet, self).__init__()\n\n self.encoder = choose_model(model_name=model_name)\n dim_in = self.encoder._fc.in_features\n self.encoder._fc = nn.Identity()\n self.fc = nn.Sequential(\n nn.Dropout(p=0.4),\n nn.Linear(dim_in, int(dim_in/2)),\n Swish_Module(),\n nn.Dropout(p=0.4),\n nn.Linear(int(dim_in/2), num_classes))\n\n def forward(self, x):\n feat = self.encoder(x)\n \n return self.fc(feat)\n\n\nclass CEResNet(nn.Module):\n \"\"\"encoder + classifier\"\"\"\n def __init__(self, num_classes=2, model_name='resnet50'):\n super(CEResNet, self).__init__()\n self.encoder = choose_model(model_name=model_name)\n dim_in = self.encoder.fc.in_features\n\n self.encoder.fc = nn.Identity()\n self.fc = nn.Sequential(\n nn.Dropout(p=0.4),\n nn.Linear(dim_in, int(dim_in/2)),\n nn.ReLU(inplace=True),\n nn.Dropout(p=0.4),\n nn.Linear(int(dim_in/2), num_classes))\n\n def forward(self, x):\n feat = self.encoder(x)\n return self.fc(self.encoder(x))\n\ndef choose_model(model_name : str) -> nn.Module:\n if 'res' in model_name:\n if '18' in model_name:\n feature_extractor = models.resnet18(pretrained=True)\n elif '34' in model_name:\n feature_extractor = models.resnet34(pretrained=True)\n elif '50' in model_name:\n feature_extractor = models.resnet50(pretrained=True)\n else:\n raise NotImplementedError(\"The feature extractor cannot be instantiated: model asked -> {} does not exist\".format(model_name))\n\n elif 'eff' in model_name:\n if 'b0' in model_name:\n feature_extractor = EfficientNet.from_pretrained('efficientnet-b0', num_classes=2)\n elif 'b1' in model_name:\n feature_extractor = EfficientNet.from_pretrained('efficientnet-b1', num_classes=2)\n elif 'b2' in model_name:\n feature_extractor = EfficientNet.from_pretrained('efficientnet-b2', num_classes=2)\n else:\n raise NotImplementedError(\"The feature extractor cannot be instantiated: model asked -> {} does not exist\".format(model_name))\n else:\n raise NotImplementedError(\"The feature extractor cannot be instantiated: model asked -> {} does not exist\".format(model_name))\n \n return feature_extractor\n\nsigmoid = nn.Sigmoid()\nclass Swish(torch.autograd.Function):\n @staticmethod\n def forward(ctx, i):\n result = i * sigmoid(i)\n ctx.save_for_backward(i)\n return result\n\n @staticmethod \n def backward(ctx, grad_output):\n i = ctx.saved_variables[0]\n sigmoid_i = sigmoid(i)\n return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))\n\nclass Swish_Module(nn.Module):\n def forward(self, x):\n return Swish.apply(x)\n" ]
[ [ "torch.nn.Dropout", "torch.nn.Identity", "torch.nn.ReLU", "torch.nn.Sigmoid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zoubaihan/SEGCN
[ "3d5b60e09018b162bcad6f91a11512cd815765cc" ]
[ "segcn/layers.py" ]
[ "import math\n\nimport torch\n\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.modules.module import Module\n\n\nclass SparseMM(torch.autograd.Function):\n \"\"\"\n Sparse x dense matrix multiplication with autograd support.\n\n Implementation by Soumith Chintala:\n https://discuss.pytorch.org/t/\n does-pytorch-support-autograd-on-sparse-matrix/6156/7\n \"\"\"\n\n def __init__(self, sparse):\n super(SparseMM, self).__init__()\n self.sparse = sparse\n\n def forward(self, dense):\n return torch.mm(self.sparse, dense)\n\n def backward(self, grad_output):\n grad_input = None\n if self.needs_input_grad[0]:\n grad_input = torch.mm(self.sparse.t(), grad_output)\n return grad_input\n\n\nclass GraphConvolution(Module):\n \"\"\"\n Simple GCN layer, similar to https://arxiv.org/abs/1609.02907\n \"\"\"\n\n def __init__(self, in_features, out_features, bias=True):\n super(GraphConvolution, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = Parameter(torch.FloatTensor(in_features, out_features))\n if bias:\n self.bias = Parameter(torch.FloatTensor(out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.weight.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)\n\n def forward(self, input, adj):\n support = torch.mm(input, self.weight)\n output = SparseMM(adj)(support)\n if self.bias is not None:\n return output + self.bias\n else:\n return output\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.in_features) + ' -> ' \\\n + str(self.out_features) + ')'\n" ]
[ [ "torch.mm", "torch.FloatTensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hassanobeid1994/tr_b_causal_2020
[ "1ffaeb7dcefccf5e1f24c459e9a2f140b2a052a5" ]
[ "src/causal2020/observables/distfit.py" ]
[ "\"\"\"\nFunctions used in the fitting of distributions\nto variables to be simulated.\n\"\"\"\nfrom collections import defaultdict\n\nimport numpy as np\nimport pandas as pd\nfrom fitter import Fitter\n\n# Functions to replace code within\n# DistNodeNoParent\n# Function for checking length\n\n\ndef is_unique(var_values):\n \"\"\"\n Checks whether a variable has one unique value.\n \"\"\"\n return len(var_values.unique()) == 1\n\n\n# def is_empirical(var_type):\n# \"\"\"\n# Checks whether the variable type for the\n# variable of interest is to be taken\n# as a constant value or as numerical values.\n# \"\"\"\n# return var_type in ['constant', 'numerical']\n\n\ndef is_constant(var_type): # to be rethought\n \"\"\"\n Checks whether a variable has a constant\n value.\n \"\"\"\n return var_type == \"constant\"\n\n\ndef is_empirical(var_type):\n \"\"\"\n Checks whether the variable type for the\n variable of interest is to be taken\n as a constant value or as numerical values.\n \"\"\"\n return var_type == \"empirical\"\n\n\ndef is_categorical(var_type):\n \"\"\"\n Checks whether the variable type for the\n variable of interest is categorical.\n \"\"\"\n return var_type == \"categorical\"\n\n\ndef get_alt_specific_variable_name(var_name, alt_name):\n \"\"\"\n Gets the alternative specific variable,\n returns a string starting with variable name and\n ending with alternative name.\n \"\"\"\n return var_name + \"_\" + alt_name\n\n\ndef get_constant_dist(var, var_val, alt_name=None):\n \"\"\"\n Retrives the constant 'distribution' of a\n constant variable.\n \"\"\"\n constant_dict = defaultdict(dict)\n # Add name of alternative to variable and store distriburion & parameters\n var_name = (\n var\n if alt_name is None\n else get_alt_specific_variable_name(var, alt_name)\n )\n constant_dict[var_name][\"distribution\"] = \"constant\"\n constant_dict[var_name][\"parameters\"] = var_val.unique()\n return constant_dict\n\n\ndef get_empirical_dist(var, var_val, alt_name=None):\n \"\"\"\n Retrives the empirical values of the alternative\n specific variable of interest as its distribution.\n \"\"\"\n empir_dict = defaultdict(dict)\n # Add name of alternative to variable and store distriburion & parameters\n var_name = (\n var\n if alt_name is None\n else get_alt_specific_variable_name(var, alt_name)\n )\n empir_dict[var_name][\"distribution\"] = \"empirical\"\n empir_dict[var_name][\"parameters\"] = np.array(var_val)\n return empir_dict\n\n\ndef get_categorical_dist(var, var_val, alt_name=None):\n \"\"\"\n Retrives the unique values and the proportions\n of observed values for a categorical alternative\n specific variables.\n \"\"\"\n categ_dict = defaultdict(dict)\n # If more than one category, compute the frequency of values\n # and store as parameters\n # Add name of alternative to variable and store distriburion & parameters\n var_name = (\n var\n if alt_name is None\n else get_alt_specific_variable_name(var, alt_name)\n )\n categ_dict[var_name][\"distribution\"] = \"categorical\"\n np_array_range = np.arange(var_val.max() + 1)\n array_bincount = np.bincount(var_val)\n probs = array_bincount / len(var_val)\n categ_dict[var_name][\"parameters\"] = [np_array_range, probs]\n return categ_dict\n\n\ndef get_continuous_dist(var, var_val, cont_dists, alt_name=None):\n \"\"\"\n Retrives the distribution of continuous alternative\n specific variables using the Fitter package.\n \"\"\"\n cont_dict = defaultdict(dict)\n # Use the Fitter library to fit distributions\n # to the data\n fitter_object = Fitter(\n data=var_val, distributions=cont_dists, timeout=60\n )\n fitter_object.fit()\n # Get the best distribution and store in dictionary\n BestDict = fitter_object.get_best()\n # Add name of alternative to variable and store distriburion & parameters\n var_name = (\n var\n if alt_name is None\n else get_alt_specific_variable_name(var, alt_name)\n )\n cont_dict[var_name][\"distribution\"] = list(BestDict.items())[0][0]\n cont_dict[var_name][\"parameters\"] = list(BestDict.items())[0][1]\n return cont_dict\n\n\ndef get_distribution_dicts(var, var_type, var_val, cont_dists, alt_name=None):\n \"\"\"\n Helper function to generate a distribution dictionary\n for the variable specified.\n \"\"\"\n # If data is categorical\n if is_empirical(var_type):\n # If only one category\n if is_unique(var_val):\n # Add name of alternative to variable\n # and store distriburion & parameters\n dist_dic = get_constant_dist(var, var_val, alt_name)\n else:\n dist_dic = get_empirical_dist(var, var_val, alt_name)\n elif is_categorical(var_type):\n if is_unique(var_val):\n dist_dic = get_constant_dist(var, var_val, alt_name)\n else:\n dist_dic = get_categorical_dist(var, var_val, alt_name)\n else:\n # If data is not categorical but has one unique value\n if is_unique(var_val):\n dist_dic = get_constant_dist(var, var_val, alt_name)\n # If data is not categorical but has more than one unique value\n else:\n dist_dic = get_continuous_dist(var, var_val, alt_name, cont_dists)\n return dist_dic\n\n\n############################################\n# Functions to replace functionality for\n# fitting distributions for variables\n# specific variables that have no parents\n# in the causal graph.\n############################################\n\n\ndef ind_spec_dist(data_long, obs_id_col, ind_spec, var_types, cont_dists):\n \"\"\"\n Function that retrieves distributions for all individual\n specific variables.\n \"\"\"\n ind_spec_dict = defaultdict(dict)\n for ind_var in ind_spec:\n # generate array of values for individual specific variable\n var_val = (\n data_long[[obs_id_col, ind_var]]\n .drop_duplicates(obs_id_col, inplace=False)\n .loc[:, ind_var]\n .reset_index(drop=True)\n )\n # Get distribution of variable\n var_type = var_types[ind_var]\n ind_var_dic = get_distribution_dicts(\n ind_var, var_type, var_val, cont_dists\n )\n ind_spec_dict.update(ind_var_dic)\n return ind_spec_dict\n\n\ndef alt_spec_dist(\n data_long, alt_id_col, alt_spec_dic, var_types, alt_name_dic, cont_dists\n):\n \"\"\"\n Function that retrieves distributions for all alternative\n specific variables.\n \"\"\"\n all_alt_spec_var_dic = defaultdict(dict)\n for alt_id in data_long[alt_id_col].unique():\n # Store data for specific alternative (mode)\n alt_data = data_long.loc[data_long[alt_id_col] == alt_id]\n alt_spec_var_dic = defaultdict(dict)\n # Loop around the alternative specific\n # variables in the input dictionary\n alt_name = alt_name_dic[alt_id]\n for alt_var in alt_spec_dic[alt_id]:\n var_val = alt_data[alt_var]\n var_type = var_types[alt_var]\n alt_spec_var_dist = get_distribution_dicts(\n alt_var, var_type, var_val, alt_name, cont_dists\n )\n alt_spec_var_dic.update(alt_spec_var_dist)\n all_alt_spec_var_dic.update(alt_spec_var_dic)\n return all_alt_spec_var_dic\n\n\ndef trip_spec_dist(data_long, obs_id_col, trip_spec, var_types, cont_dists):\n \"\"\"\n Function that retrieves distributions for all trip\n specific variables.\n \"\"\"\n # Trip Specific Variable (maybe combine with individual specific variables)\n # Loop around trip (observation) specific variables\n trip_spec_dict = defaultdict(dict)\n for trip_var in trip_spec:\n # generate array of values for trip specific variable\n var_val = (\n data_long[[obs_id_col, trip_var]]\n .drop_duplicates(obs_id_col, inplace=False)\n .loc[:, trip_var]\n .reset_index(drop=True)\n )\n var_type = var_types[trip_var]\n # If data is to be taken as empirical values\n trip_spec_var_dist = get_distribution_dicts(\n trip_var, var_type, var_val, cont_dists\n )\n trip_spec_dict.update(trip_spec_var_dist)\n return trip_spec_dict\n\n\n# Define the main function\ndef get_dist_node_no_parent(\n data_long,\n alt_id_col,\n obs_id_col,\n alt_spec_dic,\n alt_name_dic,\n ind_spec,\n trip_spec,\n var_types,\n cont_dists=None,\n):\n \"\"\"\n Function to find the distribution of specific variables\n from a long format dataset.\n\n Parameters\n ----------\n data_long: Pandas DataFrame\n Dataset in long format from which variable\n distribution is to be found.\n\n alt_id_col: string\n Name of the column with alternative ids.\n\n obs_id_col: string\n Name of the column with observation ids.\n\n alt_spec_dic: dictionary\n Dictionary with keys as the ordered number\n of alternatives, and the value for each key\n is a list of strings representing the name of\n variables without parents per alternative.\n\n alt_name_dic: dictionary\n Dictionary with keys as the ordered number\n of alternatives, and the value for each key\n is a string representing the name of the\n alternative.\n\n ind_spec: list\n List containing strings of the names of\n individual specific variables.\n\n trip_spec: list\n List containing string of the names of\n trip specific variables.\n\n var_types: dictionary\n Dictionary with keys as strings of names of variables\n from long format dataset, and values for each key are\n the type of variables (e.g.: 'categorical vs. continuous').\n\n cont_dists: list\n List of continuous RVs distribution names from scipy.\n\n Returns\n -------\n a nested dictionary with keys as variable names and values\n as dictionaries containing both the distribution name and\n its parameters.\n \"\"\"\n params_dict = defaultdict(dict)\n\n # Code for Individual Specific Variables\n print(\"Getting Distributions of Individual Specific Variables...\")\n print(\"---------------------------------------------------------\")\n ind_spec_dic_params = ind_spec_dist(\n data_long, obs_id_col, ind_spec, var_types, cont_dists\n )\n params_dict.update(ind_spec_dic_params)\n print(\"Done...\")\n\n # Code for Alternative Specific Variables\n # Loop around the different available alternatives\n print(\"Getting Distributions of Alternative Specific Variables...\")\n print(\"----------------------------------------------------------\")\n alt_spec_dic_params = alt_spec_dist(\n data_long,\n alt_id_col,\n alt_spec_dic,\n var_types,\n alt_name_dic,\n cont_dists,\n )\n params_dict.update(alt_spec_dic_params)\n print(\"Done...\")\n\n # Trip Specific Variable (maybe combine with individual specific variables)\n # Loop around trip (observation) specific variables\n print(\"Getting Distributions of Trip Specific Variables...\")\n print(\"---------------------------------------------------------\")\n trip_spec_dic_params = trip_spec_dist(\n data_long, obs_id_col, trip_spec, var_types, cont_dists\n )\n params_dict.update(trip_spec_dic_params)\n print(\"Done...\")\n\n return params_dict\n" ]
[ [ "numpy.array", "numpy.bincount" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jphacks/D_2003
[ "60a5684d549862e85bdf758069518702d9925a48", "60a5684d549862e85bdf758069518702d9925a48", "60a5684d549862e85bdf758069518702d9925a48", "60a5684d549862e85bdf758069518702d9925a48", "60a5684d549862e85bdf758069518702d9925a48", "60a5684d549862e85bdf758069518702d9925a48" ]
[ "YOLO/.history/pytorch-yolo-v3/video_demo_20201105153325.py", "YOLO/.history/pytorch-yolo-v3/video_demo_v1_20201106032031.py", "YOLO/.history/pytorch-yolo-v3/video_demo_20201105145255.py", "YOLO/.history/pytorch-yolo-v3/video_demo_20201105151709.py", "YOLO/.history/pytorch-yolo-v3/video_demo_v1_20201106003651.py", "YOLO/.history/pytorch-yolo-v3/video_demo_v1_20201106014428.py" ]
[ "from __future__ import division\nimport time\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\nimport cv2\nfrom util import *\nfrom darknet import Darknet\nfrom preprocess import prep_image, inp_to_image\nimport pandas as pd\nimport random\nimport argparse\nimport pickle as pkl\n\nimport requests\nfrom requests.auth import HTTPDigestAuth\n\nimport io\nfrom PIL import Image, ImageDraw, ImageFilter\n# import play\n\n#from pygame import mixer\n#import winsound\n\ncamera_name = {\n \"north\":0,\n \"south\":2,\n \"east\":1,\n \"west\":3,\n}\n\n\ndef prep_image(img, inp_dim):\n # CNNに通すために画像を加工する\n orig_im = img\n dim = orig_im.shape[1], orig_im.shape[0]\n img = cv2.resize(orig_im, (inp_dim, inp_dim))\n img_ = img[:,:,::-1].transpose((2,0,1)).copy()\n img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)\n return img_, orig_im, dim\n\ndef count(x, img, count):\n # 画像に結果を描画\n c1 = tuple(x[1:3].int())\n c2 = tuple(x[3:5].int())\n cls = int(x[-1])\n label = \"{0}\".format(classes[cls])\n print(\"label:\\n\", label)\n # 人数カウント\n if(label=='no-mask'):\n count+=1\n print(count)\n\n return count\n\ndef write(x, img,camId):\n global count\n global point\n p = [0,0]\n # 画像に結果を描画\n c1 = tuple(x[1:3].int())\n c2 = tuple(x[3:5].int())\n cls = int(x[-1])\n print(camId, \"_c1:\",c1)\n print(camId, \"_c2:\",c2)\n label = \"{0}\".format(classes[cls])\n print(\"label:\", label)\n # 人数カウント\n if(label=='no-mask'):\n count+=1\n print(count)\n\n color = random.choice(colors)\n cv2.rectangle(img, c1, c2,color, 1)\n t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]\n c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4\n cv2.rectangle(img, c1, c2,color, -1)\n cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);\n\n p[0] = (c2[0]-c1[0])/2\n p[1] = (c2[1]-c1[1])/2\n point[camId].append(p)\n print(\"point0\",point[0])\n print(\"point1\",point[1])\n\n return img\n\ndef arg_parse():\n # モジュールの引数を作成\n parser = argparse.ArgumentParser(description='YOLO v3 Cam Demo') # ArgumentParserで引数を設定する\n parser.add_argument(\"--confidence\", dest = \"confidence\", help = \"Object Confidence to filter predictions\", default = 0.25)\n # confidenceは信頼性\n parser.add_argument(\"--nms_thresh\", dest = \"nms_thresh\", help = \"NMS Threshhold\", default = 0.4)\n # nms_threshは閾値\n\n parser.add_argument(\"--reso\", dest = 'reso', help =\n \"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed\",\n default = \"160\", type = str)\n # resoはCNNの入力解像度で、増加させると精度が上がるが、速度が低下する。\n return parser.parse_args() # 引数を解析し、返す\n\ndef cvpaste(img, imgback, x, y, angle, scale):\n # x and y are the distance from the center of the background image\n\n r = img.shape[0]\n c = img.shape[1]\n rb = imgback.shape[0]\n cb = imgback.shape[1]\n hrb=round(rb/2)\n hcb=round(cb/2)\n hr=round(r/2)\n hc=round(c/2)\n\n # Copy the forward image and move to the center of the background image\n imgrot = np.zeros((rb,cb,3),np.uint8)\n imgrot[hrb-hr:hrb+hr,hcb-hc:hcb+hc,:] = img[:hr*2,:hc*2,:]\n\n # Rotation and scaling\n M = cv2.getRotationMatrix2D((hcb,hrb),angle,scale)\n imgrot = cv2.warpAffine(imgrot,M,(cb,rb))\n # Translation\n M = np.float32([[1,0,x],[0,1,y]])\n imgrot = cv2.warpAffine(imgrot,M,(cb,rb))\n\n # Makeing mask\n imggray = cv2.cvtColor(imgrot,cv2.COLOR_BGR2GRAY)\n ret, mask = cv2.threshold(imggray, 10, 255, cv2.THRESH_BINARY)\n mask_inv = cv2.bitwise_not(mask)\n\n # Now black-out the area of the forward image in the background image\n img1_bg = cv2.bitwise_and(imgback,imgback,mask = mask_inv)\n\n # Take only region of the forward image.\n img2_fg = cv2.bitwise_and(imgrot,imgrot,mask = mask)\n\n # Paste the forward image on the background image\n imgpaste = cv2.add(img1_bg,img2_fg)\n\n return imgpaste\n\ndef hconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC):\n h_min = min(im.shape[0] for im in im_list)\n im_list_resize = [cv2.resize(im, (int(im.shape[1] * h_min / im.shape[0]), h_min), interpolation=interpolation)\n for im in im_list]\n return cv2.hconcat(im_list_resize)\n\n# def beep(freq, dur=100):\n# winsound.Beep(freq, dur)\n\nif __name__ == '__main__':\n #学習前YOLO\n # cfgfile = \"cfg/yolov3.cfg\" # 設定ファイル\n # weightsfile = \"weight/yolov3.weights\" # 重みファイル\n # classes = load_classes('data/coco.names') # 識別クラスのリスト\n\n #マスク学習後YOLO\n cfgfile = \"cfg/mask.cfg\" # 設定ファイル\n weightsfile = \"weight/mask_1500.weights\" # 重みファイル\n classes = load_classes('data/mask.names') # 識別クラスのリスト\n\n\n num_classes = 80 # クラスの数\n\n args = arg_parse() # 引数を取得\n confidence = float(args.confidence) # 信頼性の設定値を取得\n nms_thesh = float(args.nms_thresh) # 閾値を取得\n start = 0\n CUDA = torch.cuda.is_available() # CUDAが使用可能かどうか\n\n num_classes = 80 # クラスの数\n bbox_attrs = 5 + num_classes\n max = 0 #限界人数\n num_camera = 2 #camera数\n model = [[] for i in range(num_camera)]\n inp_dim = [[] for i in range(num_camera)]\n cap = [[] for i in range(num_camera)]\n ret = [[] for i in range(num_camera)]\n frame = [[] for i in range(num_camera)]\n img = [[] for i in range(num_camera)]\n orig_im = [[] for i in range(num_camera)]\n dim = [[] for i in range(num_camera)]\n output0 = []\n output1 = []\n output2 = []\n output3 = []\n\n point = [[] for i in range(num_camera)]\n # output = [[] for i in range(num_camera)]\n # output = torch.tensor(output)\n # print(\"output_shape\\n\", output.shape)\n\n for i in range(num_camera):\n model[i] = Darknet(cfgfile) #model1の作成\n model[i].load_weights(weightsfile) # model1に重みを読み込む\n\n model[i].net_info[\"height\"] = args.reso\n inp_dim[i] = int(model[i].net_info[\"height\"])\n\n assert inp_dim[i] % 32 == 0\n assert inp_dim[i] > 32\n\n #mixer.init() #初期化\n\n if CUDA:\n for i in range(num_camera):\n model[i].cuda() #CUDAが使用可能であればcudaを起動\n\n for i in range(num_camera):\n model[i].eval()\n\n cap[0] = cv2.VideoCapture(0) #カメラを指定(USB接続)\n cap[1] = cv2.VideoCapture(1) #カメラを指定(USB接続)\n # cap = cv2.VideoCapture(\"movies/sample.mp4\")\n #cap = cv2.VideoCapture(\"movies/one_v2.avi\")\n\n # Use the next line if your camera has a username and password\n # cap = cv2.VideoCapture('protocol://username:password@IP:port/1')\n #cap = cv2.VideoCapture('rtsp://admin:[email protected]/1') #(ネットワーク接続)\n #cap = cv2.VideoCapture('rtsp://admin:[email protected]/80')\n #cap = cv2.VideoCapture('http://admin:[email protected]:80/video')\n #cap = cv2.VideoCapture('http://admin:[email protected]/camera-cgi/admin/recorder.cgi?action=start&id=samba')\n #cap = cv2.VideoCapture('http://admin:[email protected]/recorder.cgi?action=start&id=samba')\n #cap = cv2.VideoCapture('http://admin:[email protected]:80/snapshot.jpg?user=admin&pwd=admin&strm=0')\n print('-1')\n\n #assert cap.isOpened(), 'Cannot capture source' #カメラが起動できたか確認\n\n img1 = cv2.imread(\"images/phase_1.jpg\")\n img2 = cv2.imread(\"images/phase_2.jpg\")\n img3 = cv2.imread(\"images/phase_2_red.jpg\")\n img4 = cv2.imread(\"images/phase_3.jpg\")\n #mixer.music.load(\"voice/voice_3.m4a\")\n #print(img1)\n frames = 0\n count_frame = 0 #フレーム数カウント\n flag = 0 #密状態(0:疎密,1:密入り)\n start = time.time()\n print('-1')\n while (cap[i].isOpened() for i in range(num_camera)): #カメラが起動している間\n count=0 #人数をカウント\n point = [[] for i in range(num_camera)]\n for i in range(num_camera):\n ret[i], frame[i] = cap[i].read() #キャプチャ画像を取得\n if (ret[i] for i in range(num_camera)):\n # 解析準備としてキャプチャ画像を加工\n for i in range(num_camera):\n img[i], orig_im[i], dim[i] = prep_image(frame[i], inp_dim[i])\n\n if CUDA:\n for i in range(num_camera):\n im_dim[i] = im_dim[i].cuda()\n img[i] = img[i].cuda()\n\n # for i in range(num_camera):\n # output[i] = model[i](Variable(img[i]), CUDA)\n output0 = model[0](Variable(img[0]), CUDA)\n output1 = model[1](Variable(img[1]), CUDA)\n # output2 = model[2](Variable(img[2]), CUDA)\n # output3 = model[3](Variable(img[3]), CUDA)\n\n #print(\"output:\\n\", output)\n # output[i] = write_results(output[i], confidence, num_classes, nms = True, nms_conf = nms_thesh)\n output0 = write_results(output0, confidence, num_classes, nms = True, nms_conf = nms_thesh)\n output1 = write_results(output1, confidence, num_classes, nms = True, nms_conf = nms_thesh)\n # output2 = write_results(output2, confidence, num_classes, nms = True, nms_conf = nms_thesh)\n # output3 = write_results(output3, confidence, num_classes, nms = True, nms_conf = nms_thesh)\n # print(\"output\", i, \":\\n\", output[i])\n # print(output.shape)\n \"\"\"\n # FPSの表示\n if (type(output[i]) == int for i in range(num_camera)):\n print(\"表示\")\n frames += 1\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n # qキーを押すとFPS表示の終了\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n continue\n for i in range(num_camera):\n output[i][:,1:5] = torch.clamp(output[i][:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]\n output[i][:,[1,3]] *= frame[i].shape[1]\n output[i][:,[2,4]] *= frame[i].shape[0]\n \"\"\"\n # # FPSの表示\n # if type(output0) == int:\n # print(\"表示\")\n # frames += 1\n # print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n # # qキーを押すとFPS表示の終了\n # key = cv2.waitKey(1)\n # if key & 0xFF == ord('q'):\n # break\n # continue\n # for i in range(num_camera):\n output0[:,1:5] = torch.clamp(output0[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]\n output0[:,[1,3]] *= frame[0].shape[1]\n output0[:,[2,4]] *= frame[0].shape[0]\n\n output1[:,1:5] = torch.clamp(output1[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]\n output1[:,[1,3]] *= frame[1].shape[1]\n output1[:,[2,4]] *= frame[1].shape[0]\n\n # output2[:,1:5] = torch.clamp(output2[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]\n # output2[:,[1,3]] *= frame[i].shape[1]\n # output2[:,[2,4]] *= frame[i].shape[0]\n\n # output3[:,1:5] = torch.clamp(output3[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]\n # output3[:,[1,3]] *= frame[i].shape[1]\n # output3[:,[2,4]] *= frame[i].shape[0]\n\n\n\n colors = pkl.load(open(\"pallete\", \"rb\"))\n\n #count = lambda x: count(x, orig_im, count) #人数をカウント\n \"\"\"\n for i in range(num_camera):\n list(map(lambda x: write(x, orig_im[i]), output[i]))\n print(\"count:\\n\",count)\n \"\"\"\n # for i in range(num_camera):\n # list(map(lambda x: write(x, orig_im[i]), output))\n list(map(lambda x0: write(x0, orig_im[0],0), output0))\n list(map(lambda x1: write(x1, orig_im[1],1), output1))\n # print(\"x0\",x0)\n # list(map(lambda x2: write(x2, orig_im[2],2), output2))\n # list(map(lambda x3: write(x3, orig_im[3],3), output3))\n # print(\"point0\",point[0])\n # print(\"point1\",point[1])\n\n\n print(\"count:\\n\",count)\n print(\"count_frame\", count_frame)\n\n\n if count > max:\n count_frame += 1\n #print(\"-1\")\n if count_frame <= 50:\n x=0\n y=0\n angle=20\n scale=1.5\n for i in range(num_camera):\n imgpaste = cvpaste(img1, orig_im[i], x, y, angle, scale)\n if flag == 1:\n # play.googlehome()\n flag += 1\n #mixer.music.play(1)\n elif count_frame <= 100:\n x=-30\n y=10\n angle=20\n scale=1.1\n if count_frame%2==1:\n for i in range(num_camera):\n imgpaste = cvpaste(img2, orig_im[i], x, y, angle, scale)\n else:\n for i in range(num_camera):\n imgpaste = cvpaste(img3, orig_im[i], x, y, angle, scale)\n if flag == 2:\n # play.googlehome()\n flag += 1\n else:\n x=-30\n y=0\n angle=20\n scale=1.5\n for i in range(num_camera):\n imgpaste = cvpaste(img4, orig_im[i], x, y, angle, scale)\n if count_frame > 101: #<--2フレームずらす\n print(\"\\007\") #警告音\n time.sleep(3)\n if flag == 3:\n # play.googlehome()\n flag += 1\n # cv2.imshow(\"frame\", imgpaste)\n else:\n count_frame = 0\n flag = 0\n #print(\"-2\")\n # for i in range(num_camera):\n im_h_resize = hconcat_resize_min(orig_im)\n cv2.imshow(\"frame\", im_h_resize )\n # play.googlehome()\n key = cv2.waitKey(1)\n # qキーを押すと動画表示の終了\n if key & 0xFF == ord('q'):\n break\n frames += 1\n print(\"count_frame:\\n\", count_frame)\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n else:\n break\n\n", "from __future__ import division\nimport time\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\nimport cv2\nfrom util import *\nfrom darknet import Darknet\nfrom preprocess import prep_image, inp_to_image\nimport pandas as pd\nimport random\nimport argparse\nimport pickle as pkl\n\nimport requests\nfrom requests.auth import HTTPDigestAuth\n\nimport io\nfrom PIL import Image, ImageDraw, ImageFilter\n\nimport play\n\nimport csv\nimport itertools\nimport math\n\nwith open('csv/Lidar.csv', 'r', encoding=\"utf-8_sig\", newline = '') as f:\n l = csv.reader(f)\n LiDAR = [row for row in l]\n # for row in LiDAR:\n # print(row)\n\nLiDAR_array = np.array(LiDAR)\nLiDAR_array1, LiDAR_array2 = np.split(LiDAR_array, 2, 1)\n#LiDAR = [[int(n) for n in row] for row in LiDAR]\n# LiDAR = [list(map(int, row)) for row in LiDAR]\nLiDAR_array1_int = LiDAR_array1.astype(np.int64)\nLiDAR_array2_float = LiDAR_array2.astype(np.float32)\nLiDAR_array_cast = np.append(LiDAR_array1_int, LiDAR_array2_float, axis = 1)\nLiDAR_list = LiDAR_array_cast.tolist()\n# for i in LiDAR_list:\n# print(i)\nprint(\"LiDAR_len\", type(LiDAR[0][0]))\n\n\n\n\ndef prep_image(img, inp_dim):\n # CNNに通すために画像を加工する\n orig_im = img\n dim = orig_im.shape[1], orig_im.shape[0]\n img = cv2.resize(orig_im, (inp_dim, inp_dim))\n img_ = img[:,:,::-1].transpose((2,0,1)).copy()\n img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)\n return img_, orig_im, dim\n\ndef count(x, img, count):\n # 画像に結果を描画\n c1 = tuple(x[1:3].int())\n c2 = tuple(x[3:5].int())\n cls = int(x[-1])\n label = \"{0}\".format(classes[cls])\n print(\"label:\\n\", label)\n # 人数カウント\n if(label=='no-mask'):\n count+=1\n print(count)\n\n return count\n\ndef write(x, img,camId):\n global count\n global point\n p = [0,0]\n # 画像に結果を描画\n c1 = tuple(x[1:3].int())\n c2 = tuple(x[3:5].int())\n cls = int(x[-1])\n print(camId, \"_c0:\",c1)\n print(camId, \"_c1:\",c2)\n label = \"{0}\".format(classes[cls])\n print(\"label:\", label)\n # 人数カウント\n if(label=='no-mask'):\n count+=1\n print(count)\n\n p[0] = (c2[0]+c1[0])/2\n p[1] = (c2[1]+c1[1])/2\n point[camId].append(p)\n\n\n color = random.choice(colors)\n cv2.rectangle(img, c1, c2,color, 1)\n t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]\n c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4\n cv2.rectangle(img, c1, c2,color, -1)\n cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);\n\n return img\n\n\ndef arg_parse():\n # モジュールの引数を作成\n parser = argparse.ArgumentParser(description='YOLO v3 Cam Demo') # ArgumentParserで引数を設定する\n parser.add_argument(\"--confidence\", dest = \"confidence\", help = \"Object Confidence to filter predictions\", default = 0.25)\n # confidenceは信頼性\n parser.add_argument(\"--nms_thresh\", dest = \"nms_thresh\", help = \"NMS Threshhold\", default = 0.4)\n # nms_threshは閾値\n\n parser.add_argument(\"--reso\", dest = 'reso', help =\n \"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed\",\n default = \"160\", type = str)\n # resoはCNNの入力解像度で、増加させると精度が上がるが、速度が低下する。\n return parser.parse_args() # 引数を解析し、返す\n\ndef cvpaste(img, imgback, x, y, angle, scale):\n # x and y are the distance from the center of the background image\n\n r = img.shape[0]\n c = img.shape[1]\n rb = imgback.shape[0]\n cb = imgback.shape[1]\n hrb=round(rb/2)\n hcb=round(cb/2)\n hr=round(r/2)\n hc=round(c/2)\n\n # Copy the forward image and move to the center of the background image\n imgrot = np.zeros((rb,cb,3),np.uint8)\n imgrot[hrb-hr:hrb+hr,hcb-hc:hcb+hc,:] = img[:hr*2,:hc*2,:]\n\n # Rotation and scaling\n M = cv2.getRotationMatrix2D((hcb,hrb),angle,scale)\n imgrot = cv2.warpAffine(imgrot,M,(cb,rb))\n # Translation\n M = np.float32([[1,0,x],[0,1,y]])\n imgrot = cv2.warpAffine(imgrot,M,(cb,rb))\n\n # Makeing mask\n imggray = cv2.cvtColor(imgrot,cv2.COLOR_BGR2GRAY)\n ret, mask = cv2.threshold(imggray, 10, 255, cv2.THRESH_BINARY)\n mask_inv = cv2.bitwise_not(mask)\n\n # Now black-out the area of the forward image in the background image\n img1_bg = cv2.bitwise_and(imgback,imgback,mask = mask_inv)\n\n # Take only region of the forward image.\n img2_fg = cv2.bitwise_and(imgrot,imgrot,mask = mask)\n\n # Paste the forward image on the background image\n imgpaste = cv2.add(img1_bg,img2_fg)\n\n return imgpaste\n\ndef cosineTheorem(Lidar, radian1, radian2):\n theta = abs(radian1-radian2)\n distance = Lidar[radian1][1] ** 2 + Lidar[radian2][1] ** 2 - 2 * Lidar[radian1][1] * Lidar[radian2][1] * math.cos(abs(radian2 - radian1))\n\n return distance\n\ndef combinations_count(n, r):\n return math.factorial(n) // (math.factorial(n - r) * math.factorial(r))\n# def beep(freq, dur=100):\n# winsound.Beep(freq, dur)\n\nif __name__ == '__main__':\n #学習前YOLO\n # cfgfile = \"cfg/yolov3.cfg\" # 設定ファイル\n # weightsfile = \"weight/yolov3.weights\" # 重みファイル\n # classes = load_classes('data/coco.names') # 識別クラスのリスト\n\n #マスク学習後YOLO\n cfgfile = \"cfg/mask.cfg\" # 設定ファイル\n weightsfile = \"weight/mask_1500.weights\" # 重みファイル\n classes = load_classes('data/mask.names') # 識別クラスのリスト\n\n\n num_classes = 80 # クラスの数\n\n args = arg_parse() # 引数を取得\n confidence = float(args.confidence) # 信頼性の設定値を取得\n nms_thesh = float(args.nms_thresh) # 閾値を取得\n start = 0\n CUDA = torch.cuda.is_available() # CUDAが使用可能かどうか\n\n num_classes = 80 # クラスの数\n bbox_attrs = 5 + num_classes\n max = 0 #限界人数\n num_camera = 1 #camera数\n model = [[] for i in range(num_camera)]\n inp_dim = [[] for i in range(num_camera)]\n cap = [[] for i in range(num_camera)]\n ret = [[] for i in range(num_camera)]\n frame = [[] for i in range(num_camera)]\n img = [[] for i in range(num_camera)]\n orig_im = [[] for i in range(num_camera)]\n dim = [[] for i in range(num_camera)]\n # output = [[] for i in range(num_camera)]\n # output = torch.tensor(output)\n # print(\"output_shape\\n\", output.shape)\n\n for i in range(num_camera):\n model[i] = Darknet(cfgfile) #model1の作成\n model[i].load_weights(weightsfile) # model1に重みを読み込む\n\n model[i].net_info[\"height\"] = args.reso\n inp_dim[i] = int(model[i].net_info[\"height\"])\n\n assert inp_dim[i] % 32 == 0\n assert inp_dim[i] > 32\n\n #mixer.init() #初期化\n\n if CUDA:\n for i in range(num_camera):\n model[i].cuda() #CUDAが使用可能であればcudaを起動\n\n for i in range(num_camera):\n model[i].eval()\n\n cap[0] = cv2.VideoCapture(1) #カメラを指定(USB接続)\n # cap[1] = cv2.VideoCapture(1) #カメラを指定(USB接続)\n # cap = cv2.VideoCapture(\"movies/sample.mp4\")\n #cap = cv2.VideoCapture(\"movies/one_v2.avi\")\n\n # Use the next line if your camera has a username and password\n # cap = cv2.VideoCapture('protocol://username:password@IP:port/1')\n #cap = cv2.VideoCapture('rtsp://admin:[email protected]/1') #(ネットワーク接続)\n #cap = cv2.VideoCapture('rtsp://admin:[email protected]/80')\n #cap = cv2.VideoCapture('http://admin:[email protected]:80/video')\n #cap = cv2.VideoCapture('http://admin:[email protected]/camera-cgi/admin/recorder.cgi?action=start&id=samba')\n #cap = cv2.VideoCapture('http://admin:[email protected]/recorder.cgi?action=start&id=samba')\n #cap = cv2.VideoCapture('http://admin:[email protected]:80/snapshot.jpg?user=admin&pwd=admin&strm=0')\n print('-1')\n\n #assert cap.isOpened(), 'Cannot capture source' #カメラが起動できたか確認\n\n img1 = cv2.imread(\"images/phase_1.jpg\")\n img2 = cv2.imread(\"images/phase_2.jpg\")\n img3 = cv2.imread(\"images/phase_2_red.jpg\")\n img4 = cv2.imread(\"images/phase_3.jpg\")\n #mixer.music.load(\"voice/voice_3.m4a\")\n #print(img1)\n frames = 0\n count_frame = 0 #フレーム数カウント\n flag = 0 #密状態(0:疎密,1:密入り)\n start = time.time()\n print('-1')\n while (cap[i].isOpened() for i in range(num_camera)): #カメラが起動している間\n count=0 #人数をカウント\n point = [[] for i in range(num_camera)]\n for i in range(num_camera):\n ret[i], frame[i] = cap[i].read() #キャプチャ画像を取得\n if (ret[i] for i in range(num_camera)):\n # 解析準備としてキャプチャ画像を加工\n for i in range(num_camera):\n img[i], orig_im[i], dim[i] = prep_image(frame[i], inp_dim[i])\n\n if CUDA:\n for i in range(num_camera):\n im_dim[i] = im_dim[i].cuda()\n img[i] = img[i].cuda()\n\n for i in range(num_camera):\n # output[i] = model[i](Variable(img[i]), CUDA)\n output = model[i](Variable(img[i]), CUDA)\n\n #print(\"output:\\n\", output)\n # output[i] = write_results(output[i], confidence, num_classes, nms = True, nms_conf = nms_thesh)\n output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh)\n\n # print(\"output\", i, \":\\n\", output[i])\n print(output.shape)\n \"\"\"\n # FPSの表示\n if (type(output[i]) == int for i in range(num_camera)):\n print(\"表示\")\n frames += 1\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n # qキーを押すとFPS表示の終了\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n continue\n for i in range(num_camera):\n output[i][:,1:5] = torch.clamp(output[i][:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]\n output[i][:,[1,3]] *= frame[i].shape[1]\n output[i][:,[2,4]] *= frame[i].shape[0]\n \"\"\"\n # FPSの表示\n if type(output) == int:\n print(\"表示\")\n frames += 1\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n # qキーを押すとFPS表示の終了\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n continue\n for i in range(num_camera):\n output[:,1:5] = torch.clamp(output[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]\n output[:,[1,3]] *= frame[i].shape[1]\n output[:,[2,4]] *= frame[i].shape[0]\n\n\n\n colors = pkl.load(open(\"pallete\", \"rb\"))\n\n #count = lambda x: count(x, orig_im, count) #人数をカウント\n \"\"\"\n for i in range(num_camera):\n list(map(lambda x: write(x, orig_im[i]), output[i]))\n print(\"count:\\n\",count)\n \"\"\"\n for i in range(num_camera):\n list(map(lambda x: write(x, orig_im[i], i), output))\n print(\"count:\\n\",count)\n print(\"count_frame\", count_frame)\n # print(\"framex\", frame[0].shape[1])\n # print(\"framey\", frame[0].shape[0])\n # print(\"point0\",point[0])\n\n #LiDARの情報の人識別\n radian_lists = []\n close_list = [0] * 4\n dense_list = [0] * 4\n for k, (radian, length) in enumerate(LiDAR_list):\n if not type(length) == type('string'):\n radian_cam = [[] for i in range(len(point))]\n num_person = 0\n # print(\"k:\", k)\n if k % 90 == 0:\n # print(\"hahahah\")\n if not k == 0:\n radian_lists.append(radian_list)\n radian_list = []\n if k < 90:\n for num, p in enumerate(point[0]):\n radian_cam[num] = p[0] / frame[0].shape[1] * 100\n print(\"radian_cam\", radian_cam[0])\n for dif in range(10):\n for radi_num in range(len(radian_cam)):\n if int(radian)+dif-5 == int(radian_cam[radi_num]):\n num_person += 1\n radian_list.append(radian)\n if num_person > 1:\n close_list[0] = 1\n if num_person > 2:\n dense_list[0] = 1\n elif k < 180:\n for num, p in enumerate(point[0]):\n radian_cam[num] = p[0] / frame[0].shape[1] * 100\n for dif in range(10):\n for radi_num in range(len(radian_cam)):\n if int(radian)+dif-5 == int(radian_cam[radi_num]):\n num_person += 1\n radian_list.append(radian)\n if num_person > 1:\n close_list[1] = 1\n if num_person > 2:\n dense_list[1] = 1\n elif k < 270:\n for num, p in enumerate(point[0]):\n radian_cam[num] = p[0] / frame[0].shape[1] * 100\n for dif in range(10):\n for radi_num in range(len(radian_cam)):\n if int(radian)+dif-5 == int(radian_cam[radi_num]):\n num_person += 1\n radian_list.append(radian)\n if num_person > 1:\n close_list[2] = 1\n if num_person > 2:\n dense_list[2] = 1\n else:\n for num, p in enumerate(point[0]):\n radian_cam[num] = p[0] / frame[0].shape[1] * 100\n for dif in range(10):\n for radi_num in range(len(radian_cam)):\n if int(radian)+dif-5 == int(radian_cam[radi_num]):\n num_person += 1\n radian_list.append(radian)\n if num_person > 1:\n close_list[3] = 1\n if num_person > 2:\n dense_list[3] = 1\n radian_lists.append(radian_list)\n # print(\"radian_lists_len\", len(radian_lists))\n\n #距離計算\n dis_list = []\n for direction in range(4):\n if len(radian_lists[direction]) > 1:\n # n = combinations_k(len(radian_lists[direction]), 2)\n dis_combination = list(itertools.combinations(radian_lists[direction], 2))\n distance = [[] for i in range(len(dis_combination))]\n print(type(LiDAR_list[0][0]))\n for num_dis, com_list in enumerate(dis_combination):\n distance[num_dis] = cosineTheorem(LiDAR_list, int(com_list[0]), int(com_list[1]))\n dis_list.append(distance)\n\n #密集判定\n for direction in range(4):\n close = 0 #密接数\n dense = 0 #密集数\n print(type(direction))\n for dis in dis_list[direction]:\n if dis < 2.0:\n close += 1\n close_list[direction] = 1\n if close > 1:\n dense_list[direction] = 1\n\n print(\"close_list\", close_list)\n print(\"dense_list\", dense_list)\n\n # print(\"point1\",point[1])\n\n\n if count > max:\n count_frame += 1\n #print(\"-1\")\n if count_frame <= 50:\n x=0\n y=0\n angle=20\n scale=1.5\n for i in range(num_camera):\n imgpaste = cvpaste(img1, orig_im[i], x, y, angle, scale)\n if flag == 1:\n play.googlehome()\n flag += 1\n #mixer.music.play(1)\n elif count_frame <= 100:\n x=-30\n y=10\n angle=20\n scale=1.1\n if count_frame%2==1:\n for i in range(num_camera):\n imgpaste = cvpaste(img2, orig_im[i], x, y, angle, scale)\n else:\n for i in range(num_camera):\n imgpaste = cvpaste(img3, orig_im[i], x, y, angle, scale)\n if flag == 2:\n play.googlehome()\n flag += 1\n else:\n x=-30\n y=0\n angle=20\n scale=1.5\n for i in range(num_camera):\n imgpaste = cvpaste(img4, orig_im[i], x, y, angle, scale)\n if count_frame > 101: #<--2フレームずらす\n print(\"\\007\") #警告音\n time.sleep(3)\n if flag == 3:\n play.googlehome()\n flag += 1\n cv2.imshow(\"frame\", imgpaste)\n else:\n count_frame = 0\n flag = 0\n #print(\"-2\")\n for i in range(num_camera):\n cv2.imshow(\"frame\", orig_im[i])\n # play.googlehome()\n key = cv2.waitKey(1)\n # qキーを押すと動画表示の終了\n if key & 0xFF == ord('q'):\n break\n frames += 1\n print(\"count_frame:\\n\", count_frame)\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n else:\n break\n\n", "from __future__ import division\nimport time\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\nimport cv2\nfrom util import *\nfrom darknet import Darknet\nfrom preprocess import prep_image, inp_to_image\nimport pandas as pd\nimport random\nimport argparse\nimport pickle as pkl\n\nimport requests\nfrom requests.auth import HTTPDigestAuth\n\nimport io\nfrom PIL import Image, ImageDraw, ImageFilter\n\nimport play\n\n#from pygame import mixer\n#import winsound\n\ncamera_name = {\n \"north\":0,\n \"south\":2,\n \"east\":1,\n \"west\":3,\n}\n\ndef prep_image(img, inp_dim):\n # CNNに通すために画像を加工する\n orig_im = img\n dim = orig_im.shape[1], orig_im.shape[0]\n img = cv2.resize(orig_im, (inp_dim, inp_dim))\n img_ = img[:,:,::-1].transpose((2,0,1)).copy()\n img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)\n return img_, orig_im, dim\n\ndef count(x, img, count):\n # 画像に結果を描画\n c1 = tuple(x[1:3].int())\n c2 = tuple(x[3:5].int())\n cls = int(x[-1])\n label = \"{0}\".format(classes[cls])\n print(\"label:\\n\", label)\n # 人数カウント\n if(label=='no-mask'):\n count+=1\n print(count)\n\n return count\n\ndef write(x, img):\n global count\n global point\n p = [0,0]\n # 画像に結果を描画\n print(\"x:\\n\", x)\n c1 = tuple(x[1:3].int())\n c2 = tuple(x[3:5].int())\n cls = int(x[-1])\n label = \"{0}\".format(classes[cls])\n print(\"label:\", label)\n # 人数カウント\n if(label=='no-mask'):\n count+=1\n print(count)\n\n color = random.choice(colors)\n cv2.rectangle(img, c1, c2,color, 1)\n t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]\n c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4\n cv2.rectangle(img, c1, c2,color, -1)\n cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);\n p[0] = (c1[0]-c2[0])/2\n p[1] = (c1[1]-c2[1])/2\n point[camId].append(p)\n return img\n\ndef arg_parse():\n # モジュールの引数を作成\n parser = argparse.ArgumentParser(description='YOLO v3 Cam Demo') # ArgumentParserで引数を設定する\n parser.add_argument(\"--confidence\", dest = \"confidence\", help = \"Object Confidence to filter predictions\", default = 0.25)\n # confidenceは信頼性\n parser.add_argument(\"--nms_thresh\", dest = \"nms_thresh\", help = \"NMS Threshhold\", default = 0.4)\n # nms_threshは閾値\n\n parser.add_argument(\"--reso\", dest = 'reso', help =\n \"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed\",\n default = \"160\", type = str)\n # resoはCNNの入力解像度で、増加させると精度が上がるが、速度が低下する。\n return parser.parse_args() # 引数を解析し、返す\n\ndef cvpaste(img, imgback, x, y, angle, scale):\n # x and y are the distance from the center of the background image\n\n r = img.shape[0]\n c = img.shape[1]\n rb = imgback.shape[0]\n cb = imgback.shape[1]\n hrb=round(rb/2)\n hcb=round(cb/2)\n hr=round(r/2)\n hc=round(c/2)\n\n # Copy the forward image and move to the center of the background image\n imgrot = np.zeros((rb,cb,3),np.uint8)\n imgrot[hrb-hr:hrb+hr,hcb-hc:hcb+hc,:] = img[:hr*2,:hc*2,:]\n\n # Rotation and scaling\n M = cv2.getRotationMatrix2D((hcb,hrb),angle,scale)\n imgrot = cv2.warpAffine(imgrot,M,(cb,rb))\n # Translation\n M = np.float32([[1,0,x],[0,1,y]])\n imgrot = cv2.warpAffine(imgrot,M,(cb,rb))\n\n # Makeing mask\n imggray = cv2.cvtColor(imgrot,cv2.COLOR_BGR2GRAY)\n ret, mask = cv2.threshold(imggray, 10, 255, cv2.THRESH_BINARY)\n mask_inv = cv2.bitwise_not(mask)\n\n # Now black-out the area of the forward image in the background image\n img1_bg = cv2.bitwise_and(imgback,imgback,mask = mask_inv)\n\n # Take only region of the forward image.\n img2_fg = cv2.bitwise_and(imgrot,imgrot,mask = mask)\n\n # Paste the forward image on the background image\n imgpaste = cv2.add(img1_bg,img2_fg)\n\n return imgpaste\n\ndef hconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC):\n h_min = min(im.shape[0] for im in im_list)\n im_list_resize = [cv2.resize(im, (int(im.shape[1] * h_min / im.shape[0]), h_min), interpolation=interpolation)\n for im in im_list]\n return cv2.hconcat(im_list_resize)\n\n# def beep(freq, dur=100):\n# winsound.Beep(freq, dur)\n\nif __name__ == '__main__':\n #学習前YOLO\n # cfgfile = \"cfg/yolov3.cfg\" # 設定ファイル\n # weightsfile = \"weight/yolov3.weights\" # 重みファイル\n # classes = load_classes('data/coco.names') # 識別クラスのリスト\n\n #マスク学習後YOLO\n cfgfile = \"cfg/mask.cfg\" # 設定ファイル\n weightsfile = \"weight/mask_1500.weights\" # 重みファイル\n classes = load_classes('data/mask.names') # 識別クラスのリスト\n\n\n num_classes = 80 # クラスの数\n\n args = arg_parse() # 引数を取得\n confidence = float(args.confidence) # 信頼性の設定値を取得\n nms_thesh = float(args.nms_thresh) # 閾値を取得\n start = 0\n CUDA = torch.cuda.is_available() # CUDAが使用可能かどうか\n\n num_classes = 80 # クラスの数\n bbox_attrs = 5 + num_classes\n max = 2 #限界人数\n\n #-------適宜変更------------------------------------\n num_camera = 3 #camera数\n #-------適宜変更------------------------------------\n\n\n # cap = [[] for i in range(num_camera)]\n\n #-------適宜変更------------------------------------\n cap[0] = cv2.VideoCapture(0) #カメラを指定(ivcam)\n cap[1] = cv2.VideoCapture(1) #カメラを指定(native)\n # cap[2] = cv2.VideoCapture(2) #カメラを指定(USB接続)\n # cap[3] = cv2.VideoCapture(3) #カメラを指定(USB接続)\n # cap = cv2.VideoCapture(\"movies/sample.mp4\")\n #cap = cv2.VideoCapture(\"movies/one_v2.avi\")\n #-------適宜変更------------------------------------\n\n for i in range(num_camera):\n if not cap[i].isOpened():\n if i < num_camera - 1:\n for j in range(len(num_camera - i) - 1):\n cap[i + j] = cap[i + j + 1]\n cap.pop()\n num_camera -= 1\n\n # Use the next line if your camera has a username and password\n # cap = cv2.VideoCapture('protocol://username:password@IP:port/1')\n #cap = cv2.VideoCapture('rtsp://admin:[email protected]/1') #(ネットワーク接続)\n #cap = cv2.VideoCapture('rtsp://admin:[email protected]/80')\n #cap = cv2.VideoCapture('http://admin:[email protected]:80/video')\n #cap = cv2.VideoCapture('http://admin:[email protected]/camera-cgi/admin/recorder.cgi?action=start&id=samba')\n #cap = cv2.VideoCapture('http://admin:[email protected]/recorder.cgi?action=start&id=samba')\n #cap = cv2.VideoCapture('http://admin:[email protected]:80/snapshot.jpg?user=admin&pwd=admin&strm=0')\n print('-1')\n\n model = [[] for i in range(num_camera)]\n inp_dim = [[] for i in range(num_camera)]\n ret = [[] for i in range(num_camera)]\n frame = [[] for i in range(num_camera)]\n img = [[] for i in range(num_camera)]\n orig_im = [[] for i in range(num_camera)]\n dim = [[] for i in range(num_camera)]\n output0 = []\n output1 = []\n output2 = []\n output3 = []\n\n for i in range(num_camera):\n model[i] = Darknet(cfgfile) #model1の作成\n model[i].load_weights(weightsfile) # model1に重みを読み込む\n\n model[i].net_info[\"height\"] = args.reso\n inp_dim[i] = int(model[i].net_info[\"height\"])\n\n assert inp_dim[i] % 32 == 0\n assert inp_dim[i] > 32\n\n #mixer.init() #初期化\n\n if CUDA:\n for i in range(num_camera):\n model[i].cuda() #CUDAが使用可能であればcudaを起動\n\n for i in range(num_camera):\n model[i].eval()\n\n #assert cap.isOpened(), 'Cannot capture source' #カメラが起動できたか確認\n\n img1 = cv2.imread(\"images/phase_1.jpg\")\n img2 = cv2.imread(\"images/phase_2.jpg\")\n img3 = cv2.imread(\"images/phase_2_red.jpg\")\n img4 = cv2.imread(\"images/phase_3.jpg\")\n\n frames = 0\n count_frame = 0 #フレーム数カウント\n flag = 0 #密状態(0:疎密,1:密入り)\n start = time.time()\n print('-1')\n print(\"num_camera\", num_camera)\n while (cap[i].isOpened() for i in range(num_camera)): #カメラが起動している間\n print(\"len_cap\", len(cap))\n count=0 #人数をカウント\n point = [[] for i in range(num_camera)]\n for i in range(num_camera):\n ret[i], frame[i] = cap[i].read() #キャプチャ画像を取得\n if (ret[i] for i in range(num_camera)):\n # 解析準備としてキャプチャ画像を加工\n for i in range(num_camera):\n img[i], orig_im[i], dim[i] = prep_image(frame[i], inp_dim[i])\n\n if CUDA:\n for i in range(num_camera):\n im_dim[i] = im_dim[i].cuda()\n img[i] = img[i].cuda()\n\n \"\"\"\n output = []\n for i in range(num_camera):\n output.append(model[i](Variable(img[i]), CUDA))\n print(\"output\\n\", output)\n # print(\"output\", i, \"\\n\", output[i])\n output[i] = write_results(output[i], confidence, num_classes, nms = True, nms_conf = nms_thesh)\n # print(\"output\\n\", output)\n \"\"\"\n output0 = model[i](Variable(img[i]), CUDA)\n output1 = model[i](Variable(img[i]), CUDA)\n # output2 = model[i](Variable(img[i]), CUDA)\n # output3 = model[i](Variable(img[i]), CUDA)\n\n #print(\"output:\\n\", output)\n # output[i] = write_results(output[i], confidence, num_classes, nms = True, nms_conf = nms_thesh)\n output0 = write_results(output0, confidence, num_classes, nms = True, nms_conf = nms_thesh)\n output1 = write_results(output1, confidence, num_classes, nms = True, nms_conf = nms_thesh)\n\n # print(\"output\", i, \":\\n\", output[i])\n # print(output.shape)\n\n \"\"\"\n # FPSの表示\n if type(output) == int:#type(output0) == int or type(output1) == int or type(output2) == int\n # print(type(output))\n print(\"表示\")\n frames += 1\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n # qキーを押すとFPS表示の終了\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n continue\n \"\"\"\n \"\"\"\n # FPSの表示\n if (type(output[i]) == int for i in range(num_camera)):#type(output0) == int or type(output1) == int or type(output2) == int\n # print(type(output))\n print(\"表示\")\n frames += 1\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n # qキーを押すとFPS表示の終了\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n continue\n \"\"\"\n\n\n \"\"\"\n for i in range(num_camera):\n print(\"len_output\", len(output))\n print(\"len_output[]\", len(output[0]))\n print(\"len_output[][]\", len(output[0][0]))\n if i == 0:\n output[:1, :,1:5] = torch.clamp(output[:1, :,1:5], 0.0, float(inp_dim[i]) ) / inp_dim[i]\n output[:1, :,[1,3]] *= frame[0].shape[1]\n output[:1, :,[2,4]] *= frame[0].shape[0]\n elif i == num_classes - 1:\n output[i:, :,1:5] = torch.clamp(output[i:, :,1:5], 0.0, float(inp_dim[i]) ) / inp_dim[i]\n output[i:, :,[1,3]] *= frame[0].shape[1]\n output[i:, :,[2,4]] *= frame[0].shape[0]\n else:\n output[i:i+1, :,1:5] = torch.clamp(output[i:i+1, :,1:5], 0.0, float(inp_dim[i]) ) / inp_dim[i]\n output[i:i+1, :,[1,3]] *= frame[0].shape[1]\n output[i:i+1, :,[2,4]] *= frame[0].shape[0]\n \"\"\"\n output0[:,1:5] = torch.clamp(output0[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]\n output0[:,[1,3]] *= frame[i].shape[1]\n output0[:,[2,4]] *= frame[i].shape[0]\n\n output1[:,1:5] = torch.clamp(output1[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]\n output1[:,[1,3]] *= frame[i].shape[1]\n output1[:,[2,4]] *= frame[i].shape[0]\n\n colors = pkl.load(open(\"pallete\", \"rb\"))\n\n #count = lambda x: count(x, orig_im, count) #人数をカウント\n \"\"\"\n for i in range(num_camera):\n list(map(lambda x: write(x, orig_im[i]), output[i]))\n \"\"\"\n\n list(map(lambda x0: write(x0, orig_im[0],0), output0))\n list(map(lambda x1: write(x1, orig_im[1],1), output1))\n # print(\"x0\",x0)\n # list(map(lambda x2: write(x2, orig_im[2],2), output2))\n # list(map(lambda x3: write(x3, orig_im[3],3), output3))\n print(\"point0\",point[0])\n print(\"point1\",point[1])\n print(\"count:\\n\",count)\n\n\n if count > max:\n count_frame += 1\n #print(\"-1\")\n if count_frame <= 50:\n x=0\n y=0\n angle=20\n scale=1.5\n for i in range(num_camera):\n imgpaste = cvpaste(img1, orig_im[i], x, y, angle, scale)\n if flag == 1:\n play.googlehome()\n flag += 1\n #mixer.music.play(1)\n # 2000Hzで500ms秒鳴らす\n #beep(2000, 500)\n elif count_frame <= 100:\n x=-30\n y=10\n angle=20\n scale=1.1\n if count_frame%2==1:\n for i in range(num_camera):\n imgpaste = cvpaste(img2, orig_im[i], x, y, angle, scale)\n else:\n for i in range(num_camera):\n imgpaste = cvpaste(img3, orig_im[i], x, y, angle, scale)\n if flag == 2:\n play.googlehome()\n flag += 1\n else:\n x=-30\n y=0\n angle=20\n scale=1.5\n for i in range(num_camera):\n imgpaste = cvpaste(img4, orig_im[i], x, y, angle, scale)\n if count_frame > 101: #<--2フレームずらす\n print(\"\\007\") #警告音\n time.sleep(3)\n if flag == 3:\n play.googlehome()\n flag += 1\n cv2.imshow(\"frame\", imgpaste)\n else:\n count_frame = 0\n #print(\"-2\")\n for i in range(num_camera):\n # print(\"orig_im\\n\", orig_im)\n # print(\"len_orim_im\", len(orig_im))\n im_h_resize = hconcat_resize_min(orig_im)\n cv2.imshow(\"frame\", im_h_resize)\n key = cv2.waitKey(1)\n # qキーを押すと動画表示の終了\n if key & 0xFF == ord('q'):\n break\n frames += 1\n print(\"count_frame:\\n\", count_frame)\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n else:\n break\n\n", "from __future__ import division\nimport time\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\nimport cv2\nfrom util import *\nfrom darknet import Darknet\nfrom preprocess import prep_image, inp_to_image\nimport pandas as pd\nimport random\nimport argparse\nimport pickle as pkl\n\nimport requests\nfrom requests.auth import HTTPDigestAuth\n\nimport io\nfrom PIL import Image, ImageDraw, ImageFilter\n\nimport play\n\n#from pygame import mixer\n#import winsound\n\ncamera_name = {\n \"north\":0,\n \"south\":2,\n \"east\":1,\n \"west\":3,\n}\n\ndef prep_image(img, inp_dim):\n # CNNに通すために画像を加工する\n orig_im = img\n dim = orig_im.shape[1], orig_im.shape[0]\n img = cv2.resize(orig_im, (inp_dim, inp_dim))\n img_ = img[:,:,::-1].transpose((2,0,1)).copy()\n img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)\n return img_, orig_im, dim\n\ndef count(x, img, count):\n # 画像に結果を描画\n c1 = tuple(x[1:3].int())\n c2 = tuple(x[3:5].int())\n cls = int(x[-1])\n label = \"{0}\".format(classes[cls])\n print(\"label:\\n\", label)\n # 人数カウント\n if(label=='no-mask'):\n count+=1\n print(count)\n\n return count\n\ndef write(x, img, cam_id):\n global count\n global point\n\n p = [0,0]\n # 画像に結果を描画\n print(\"x:\\n\", x)\n c1 = tuple(x[1:3].int())\n c2 = tuple(x[3:5].int())\n cls = int(x[-1])\n label = \"{0}\".format(classes[cls])\n print(\"label:\", label)\n # 人数カウント\n if(label=='no-mask'):\n count+=1\n print(count)\n\n color = random.choice(colors)\n cv2.rectangle(img, c1, c2,color, 1)\n t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]\n c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4\n cv2.rectangle(img, c1, c2,color, -1)\n cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);\n p[0] = (c2[0]-c1[0])/2\n p[1] = (c2[1]-c1[1])/2\n point[cam_id].append(p)\n return img\n\ndef arg_parse():\n # モジュールの引数を作成\n parser = argparse.ArgumentParser(description='YOLO v3 Cam Demo') # ArgumentParserで引数を設定する\n parser.add_argument(\"--confidence\", dest = \"confidence\", help = \"Object Confidence to filter predictions\", default = 0.25)\n # confidenceは信頼性\n parser.add_argument(\"--nms_thresh\", dest = \"nms_thresh\", help = \"NMS Threshhold\", default = 0.4)\n # nms_threshは閾値\n\n parser.add_argument(\"--reso\", dest = 'reso', help =\n \"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed\",\n default = \"160\", type = str)\n # resoはCNNの入力解像度で、増加させると精度が上がるが、速度が低下する。\n return parser.parse_args() # 引数を解析し、返す\n\ndef cvpaste(img, imgback, x, y, angle, scale):\n # x and y are the distance from the center of the background image\n\n r = img.shape[0]\n c = img.shape[1]\n rb = imgback.shape[0]\n cb = imgback.shape[1]\n hrb=round(rb/2)\n hcb=round(cb/2)\n hr=round(r/2)\n hc=round(c/2)\n\n # Copy the forward image and move to the center of the background image\n imgrot = np.zeros((rb,cb,3),np.uint8)\n imgrot[hrb-hr:hrb+hr,hcb-hc:hcb+hc,:] = img[:hr*2,:hc*2,:]\n\n # Rotation and scaling\n M = cv2.getRotationMatrix2D((hcb,hrb),angle,scale)\n imgrot = cv2.warpAffine(imgrot,M,(cb,rb))\n # Translation\n M = np.float32([[1,0,x],[0,1,y]])\n imgrot = cv2.warpAffine(imgrot,M,(cb,rb))\n\n # Makeing mask\n imggray = cv2.cvtColor(imgrot,cv2.COLOR_BGR2GRAY)\n ret, mask = cv2.threshold(imggray, 10, 255, cv2.THRESH_BINARY)\n mask_inv = cv2.bitwise_not(mask)\n\n # Now black-out the area of the forward image in the background image\n img1_bg = cv2.bitwise_and(imgback,imgback,mask = mask_inv)\n\n # Take only region of the forward image.\n img2_fg = cv2.bitwise_and(imgrot,imgrot,mask = mask)\n\n # Paste the forward image on the background image\n imgpaste = cv2.add(img1_bg,img2_fg)\n\n return imgpaste\n\ndef hconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC):\n h_min = min(im.shape[0] for im in im_list)\n im_list_resize = [cv2.resize(im, (int(im.shape[1] * h_min / im.shape[0]), h_min), interpolation=interpolation)\n for im in im_list]\n return cv2.hconcat(im_list_resize)\n\n# def beep(freq, dur=100):\n# winsound.Beep(freq, dur)\n\nif __name__ == '__main__':\n #学習前YOLO\n # cfgfile = \"cfg/yolov3.cfg\" # 設定ファイル\n # weightsfile = \"weight/yolov3.weights\" # 重みファイル\n # classes = load_classes('data/coco.names') # 識別クラスのリスト\n\n #マスク学習後YOLO\n cfgfile = \"cfg/mask.cfg\" # 設定ファイル\n weightsfile = \"weight/mask_1500.weights\" # 重みファイル\n classes = load_classes('data/mask.names') # 識別クラスのリスト\n\n\n num_classes = 80 # クラスの数\n\n args = arg_parse() # 引数を取得\n confidence = float(args.confidence) # 信頼性の設定値を取得\n nms_thesh = float(args.nms_thresh) # 閾値を取得\n start = 0\n CUDA = torch.cuda.is_available() # CUDAが使用可能かどうか\n\n num_classes = 80 # クラスの数\n bbox_attrs = 5 + num_classes\n max = 2 #限界人数\n\n #-------適宜変更------------------------------------\n num_camera = 3 #camera数\n #-------適宜変更------------------------------------\n\n\n cap = [[] for i in range(num_camera)]\n\n #-------適宜変更------------------------------------\n cap[0] = cv2.VideoCapture(0) #カメラを指定(ivcam)\n cap[1] = cv2.VideoCapture(1) #カメラを指定(native)\n cap[2] = cv2.VideoCapture(2) #カメラを指定(USB接続)\n # cap[3] = cv2.VideoCapture(3) #カメラを指定(USB接続)\n # cap = cv2.VideoCapture(\"movies/sample.mp4\")\n #cap = cv2.VideoCapture(\"movies/one_v2.avi\")\n #-------適宜変更------------------------------------\n\n for i in range(num_camera):\n if not cap[i].isOpened():\n if i < num_camera - 1:\n for j in range(len(num_camera - i) - 1):\n cap[i + j] = cap[i + j + 1]\n cap.pop()\n num_camera -= 1\n\n # Use the next line if your camera has a username and password\n # cap = cv2.VideoCapture('protocol://username:password@IP:port/1')\n #cap = cv2.VideoCapture('rtsp://admin:[email protected]/1') #(ネットワーク接続)\n #cap = cv2.VideoCapture('rtsp://admin:[email protected]/80')\n #cap = cv2.VideoCapture('http://admin:[email protected]:80/video')\n #cap = cv2.VideoCapture('http://admin:[email protected]/camera-cgi/admin/recorder.cgi?action=start&id=samba')\n #cap = cv2.VideoCapture('http://admin:[email protected]/recorder.cgi?action=start&id=samba')\n #cap = cv2.VideoCapture('http://admin:[email protected]:80/snapshot.jpg?user=admin&pwd=admin&strm=0')\n print('-1')\n\n model = [[] for i in range(num_camera)]\n inp_dim = [[] for i in range(num_camera)]\n ret = [[] for i in range(num_camera)]\n frame = [[] for i in range(num_camera)]\n img = [[] for i in range(num_camera)]\n orig_im = [[] for i in range(num_camera)]\n dim = [[] for i in range(num_camera)]\n output0 = []\n output1 = []\n output2 = []\n output3 = []\n\n for i in range(num_camera):\n model[i] = Darknet(cfgfile) #model1の作成\n model[i].load_weights(weightsfile) # model1に重みを読み込む\n\n model[i].net_info[\"height\"] = args.reso\n inp_dim[i] = int(model[i].net_info[\"height\"])\n\n assert inp_dim[i] % 32 == 0\n assert inp_dim[i] > 32\n\n #mixer.init() #初期化\n\n if CUDA:\n for i in range(num_camera):\n model[i].cuda() #CUDAが使用可能であればcudaを起動\n\n for i in range(num_camera):\n model[i].eval()\n\n #assert cap.isOpened(), 'Cannot capture source' #カメラが起動できたか確認\n\n img1 = cv2.imread(\"images/phase_1.jpg\")\n img2 = cv2.imread(\"images/phase_2.jpg\")\n img3 = cv2.imread(\"images/phase_2_red.jpg\")\n img4 = cv2.imread(\"images/phase_3.jpg\")\n\n frames = 0\n count_frame = 0 #フレーム数カウント\n flag = 0 #密状態(0:疎密,1:密入り)\n start = time.time()\n print('-1')\n print(\"num_camera\", num_camera)\n while (cap[i].isOpened() for i in range(num_camera)): #カメラが起動している間\n print(\"len_cap\", len(cap))\n count=0 #人数をカウント\n point = [[] for i in range(num_camera)]\n for i in range(num_camera):\n ret[i], frame[i] = cap[i].read() #キャプチャ画像を取得\n if (ret[i] for i in range(num_camera)):\n # 解析準備としてキャプチャ画像を加工\n for i in range(num_camera):\n img[i], orig_im[i], dim[i] = prep_image(frame[i], inp_dim[i])\n\n if CUDA:\n for i in range(num_camera):\n im_dim[i] = im_dim[i].cuda()\n img[i] = img[i].cuda()\n\n \"\"\"\n output = []\n for i in range(num_camera):\n output.append(model[i](Variable(img[i]), CUDA))\n print(\"output\\n\", output)\n # print(\"output\", i, \"\\n\", output[i])\n output[i] = write_results(output[i], confidence, num_classes, nms = True, nms_conf = nms_thesh)\n # print(\"output\\n\", output)\n \"\"\"\n output0 = model[0](Variable(img[i]), CUDA)\n output1 = model[1](Variable(img[i]), CUDA)\n # output2 = model[i](Variable(img[i]), CUDA)\n # output3 = model[i](Variable(img[i]), CUDA)\n\n #print(\"output:\\n\", output)\n # output[i] = write_results(output[i], confidence, num_classes, nms = True, nms_conf = nms_thesh)\n output0 = write_results(output0, confidence, num_classes, nms = True, nms_conf = nms_thesh)\n output1 = write_results(output1, confidence, num_classes, nms = True, nms_conf = nms_thesh)\n\n # print(\"output\", i, \":\\n\", output[i])\n # print(output.shape)\n\n \"\"\"\n # FPSの表示\n if type(output) == int:#type(output0) == int or type(output1) == int or type(output2) == int\n # print(type(output))\n print(\"表示\")\n frames += 1\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n # qキーを押すとFPS表示の終了\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n continue\n \"\"\"\n \"\"\"\n # FPSの表示\n if (type(output[i]) == int for i in range(num_camera)):#type(output0) == int or type(output1) == int or type(output2) == int\n # print(type(output))\n print(\"表示\")\n frames += 1\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n # qキーを押すとFPS表示の終了\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n continue\n \"\"\"\n\n\n \"\"\"\n for i in range(num_camera):\n print(\"len_output\", len(output))\n print(\"len_output[]\", len(output[0]))\n print(\"len_output[][]\", len(output[0][0]))\n if i == 0:\n output[:1, :,1:5] = torch.clamp(output[:1, :,1:5], 0.0, float(inp_dim[i]) ) / inp_dim[i]\n output[:1, :,[1,3]] *= frame[0].shape[1]\n output[:1, :,[2,4]] *= frame[0].shape[0]\n elif i == num_classes - 1:\n output[i:, :,1:5] = torch.clamp(output[i:, :,1:5], 0.0, float(inp_dim[i]) ) / inp_dim[i]\n output[i:, :,[1,3]] *= frame[0].shape[1]\n output[i:, :,[2,4]] *= frame[0].shape[0]\n else:\n output[i:i+1, :,1:5] = torch.clamp(output[i:i+1, :,1:5], 0.0, float(inp_dim[i]) ) / inp_dim[i]\n output[i:i+1, :,[1,3]] *= frame[0].shape[1]\n output[i:i+1, :,[2,4]] *= frame[0].shape[0]\n \"\"\"\n output0[:,1:5] = torch.clamp(output0[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]\n output0[:,[1,3]] *= frame[i].shape[1]\n output0[:,[2,4]] *= frame[i].shape[0]\n\n output1[:,1:5] = torch.clamp(output1[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]\n output1[:,[1,3]] *= frame[i].shape[1]\n output1[:,[2,4]] *= frame[i].shape[0]\n\n colors = pkl.load(open(\"pallete\", \"rb\"))\n\n #count = lambda x: count(x, orig_im, count) #人数をカウント\n \"\"\"\n for i in range(num_camera):\n list(map(lambda x: write(x, orig_im[i]), output[i]))\n \"\"\"\n\n list(map(lambda x0: write(x0, orig_im[0],0), output0))\n list(map(lambda x1: write(x1, orig_im[1],1), output1))\n # print(\"x0\",x0)\n # list(map(lambda x2: write(x2, orig_im[2],2), output2))\n # list(map(lambda x3: write(x3, orig_im[3],3), output3))\n print(\"point0\",point[0])\n print(\"point1\",point[1])\n print(\"count:\\n\",count)\n\n\n if count > max:\n count_frame += 1\n #print(\"-1\")\n if count_frame <= 50:\n x=0\n y=0\n angle=20\n scale=1.5\n for i in range(num_camera):\n imgpaste = cvpaste(img1, orig_im[i], x, y, angle, scale)\n if flag == 1:\n play.googlehome()\n flag += 1\n #mixer.music.play(1)\n # 2000Hzで500ms秒鳴らす\n #beep(2000, 500)\n elif count_frame <= 100:\n x=-30\n y=10\n angle=20\n scale=1.1\n if count_frame%2==1:\n for i in range(num_camera):\n imgpaste = cvpaste(img2, orig_im[i], x, y, angle, scale)\n else:\n for i in range(num_camera):\n imgpaste = cvpaste(img3, orig_im[i], x, y, angle, scale)\n if flag == 2:\n play.googlehome()\n flag += 1\n else:\n x=-30\n y=0\n angle=20\n scale=1.5\n for i in range(num_camera):\n imgpaste = cvpaste(img4, orig_im[i], x, y, angle, scale)\n if count_frame > 101: #<--2フレームずらす\n print(\"\\007\") #警告音\n time.sleep(3)\n if flag == 3:\n play.googlehome()\n flag += 1\n cv2.imshow(\"frame\", imgpaste)\n else:\n count_frame = 0\n #print(\"-2\")\n for i in range(num_camera):\n # print(\"orig_im\\n\", orig_im)\n # print(\"len_orim_im\", len(orig_im))\n im_h_resize = hconcat_resize_min(orig_im)\n cv2.imshow(\"frame\", im_h_resize)\n key = cv2.waitKey(1)\n # qキーを押すと動画表示の終了\n if key & 0xFF == ord('q'):\n break\n frames += 1\n print(\"count_frame:\\n\", count_frame)\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n else:\n break\n\n", "from __future__ import division\nimport time\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\nimport cv2\nfrom util import *\nfrom darknet import Darknet\nfrom preprocess import prep_image, inp_to_image\nimport pandas as pd\nimport random\nimport argparse\nimport pickle as pkl\n\nimport requests\nfrom requests.auth import HTTPDigestAuth\n\nimport io\nfrom PIL import Image, ImageDraw, ImageFilter\n\nimport play\n\nimport csv\nimport pprint\n\nwith open('csv/Lidar.csv', encoding=\"utf-8_sig\") as f:\n LiDAR = csv.reader(f)\n for row in LiDAR:\n print(row)\n\n\ndef prep_image(img, inp_dim):\n # CNNに通すために画像を加工する\n orig_im = img\n dim = orig_im.shape[1], orig_im.shape[0]\n img = cv2.resize(orig_im, (inp_dim, inp_dim))\n img_ = img[:,:,::-1].transpose((2,0,1)).copy()\n img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)\n return img_, orig_im, dim\n\ndef count(x, img, count):\n # 画像に結果を描画\n c1 = tuple(x[1:3].int())\n c2 = tuple(x[3:5].int())\n cls = int(x[-1])\n label = \"{0}\".format(classes[cls])\n print(\"label:\\n\", label)\n # 人数カウント\n if(label=='no-mask'):\n count+=1\n print(count)\n\n return count\n\ndef write(x, img,camId):\n global count\n global point\n p = [0,0]\n # 画像に結果を描画\n c1 = tuple(x[1:3].int())\n c2 = tuple(x[3:5].int())\n cls = int(x[-1])\n print(camId, \"_c0:\",c1)\n print(camId, \"_c1:\",c2)\n label = \"{0}\".format(classes[cls])\n print(\"label:\", label)\n # 人数カウント\n if(label=='no-mask'):\n count+=1\n print(count)\n\n p[0] = (c2[0]+c1[0])/2\n p[1] = (c2[1]+c1[1])/2\n point[camId].append(p)\n\n\n color = random.choice(colors)\n cv2.rectangle(img, c1, c2,color, 1)\n t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]\n c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4\n cv2.rectangle(img, c1, c2,color, -1)\n cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);\n\n return img\n\n\ndef arg_parse():\n # モジュールの引数を作成\n parser = argparse.ArgumentParser(description='YOLO v3 Cam Demo') # ArgumentParserで引数を設定する\n parser.add_argument(\"--confidence\", dest = \"confidence\", help = \"Object Confidence to filter predictions\", default = 0.25)\n # confidenceは信頼性\n parser.add_argument(\"--nms_thresh\", dest = \"nms_thresh\", help = \"NMS Threshhold\", default = 0.4)\n # nms_threshは閾値\n\n parser.add_argument(\"--reso\", dest = 'reso', help =\n \"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed\",\n default = \"160\", type = str)\n # resoはCNNの入力解像度で、増加させると精度が上がるが、速度が低下する。\n return parser.parse_args() # 引数を解析し、返す\n\ndef cvpaste(img, imgback, x, y, angle, scale):\n # x and y are the distance from the center of the background image\n\n r = img.shape[0]\n c = img.shape[1]\n rb = imgback.shape[0]\n cb = imgback.shape[1]\n hrb=round(rb/2)\n hcb=round(cb/2)\n hr=round(r/2)\n hc=round(c/2)\n\n # Copy the forward image and move to the center of the background image\n imgrot = np.zeros((rb,cb,3),np.uint8)\n imgrot[hrb-hr:hrb+hr,hcb-hc:hcb+hc,:] = img[:hr*2,:hc*2,:]\n\n # Rotation and scaling\n M = cv2.getRotationMatrix2D((hcb,hrb),angle,scale)\n imgrot = cv2.warpAffine(imgrot,M,(cb,rb))\n # Translation\n M = np.float32([[1,0,x],[0,1,y]])\n imgrot = cv2.warpAffine(imgrot,M,(cb,rb))\n\n # Makeing mask\n imggray = cv2.cvtColor(imgrot,cv2.COLOR_BGR2GRAY)\n ret, mask = cv2.threshold(imggray, 10, 255, cv2.THRESH_BINARY)\n mask_inv = cv2.bitwise_not(mask)\n\n # Now black-out the area of the forward image in the background image\n img1_bg = cv2.bitwise_and(imgback,imgback,mask = mask_inv)\n\n # Take only region of the forward image.\n img2_fg = cv2.bitwise_and(imgrot,imgrot,mask = mask)\n\n # Paste the forward image on the background image\n imgpaste = cv2.add(img1_bg,img2_fg)\n\n return imgpaste\n\n# def beep(freq, dur=100):\n# winsound.Beep(freq, dur)\n\nif __name__ == '__main__':\n #学習前YOLO\n # cfgfile = \"cfg/yolov3.cfg\" # 設定ファイル\n # weightsfile = \"weight/yolov3.weights\" # 重みファイル\n # classes = load_classes('data/coco.names') # 識別クラスのリスト\n\n #マスク学習後YOLO\n cfgfile = \"cfg/mask.cfg\" # 設定ファイル\n weightsfile = \"weight/mask_1500.weights\" # 重みファイル\n classes = load_classes('data/mask.names') # 識別クラスのリスト\n\n\n num_classes = 80 # クラスの数\n\n args = arg_parse() # 引数を取得\n confidence = float(args.confidence) # 信頼性の設定値を取得\n nms_thesh = float(args.nms_thresh) # 閾値を取得\n start = 0\n CUDA = torch.cuda.is_available() # CUDAが使用可能かどうか\n\n num_classes = 80 # クラスの数\n bbox_attrs = 5 + num_classes\n max = 0 #限界人数\n num_camera = 1 #camera数\n model = [[] for i in range(num_camera)]\n inp_dim = [[] for i in range(num_camera)]\n cap = [[] for i in range(num_camera)]\n ret = [[] for i in range(num_camera)]\n frame = [[] for i in range(num_camera)]\n img = [[] for i in range(num_camera)]\n orig_im = [[] for i in range(num_camera)]\n dim = [[] for i in range(num_camera)]\n # output = [[] for i in range(num_camera)]\n # output = torch.tensor(output)\n # print(\"output_shape\\n\", output.shape)\n\n for i in range(num_camera):\n model[i] = Darknet(cfgfile) #model1の作成\n model[i].load_weights(weightsfile) # model1に重みを読み込む\n\n model[i].net_info[\"height\"] = args.reso\n inp_dim[i] = int(model[i].net_info[\"height\"])\n\n assert inp_dim[i] % 32 == 0\n assert inp_dim[i] > 32\n\n #mixer.init() #初期化\n\n if CUDA:\n for i in range(num_camera):\n model[i].cuda() #CUDAが使用可能であればcudaを起動\n\n for i in range(num_camera):\n model[i].eval()\n\n cap[0] = cv2.VideoCapture(1) #カメラを指定(USB接続)\n # cap[1] = cv2.VideoCapture(1) #カメラを指定(USB接続)\n # cap = cv2.VideoCapture(\"movies/sample.mp4\")\n #cap = cv2.VideoCapture(\"movies/one_v2.avi\")\n\n # Use the next line if your camera has a username and password\n # cap = cv2.VideoCapture('protocol://username:password@IP:port/1')\n #cap = cv2.VideoCapture('rtsp://admin:[email protected]/1') #(ネットワーク接続)\n #cap = cv2.VideoCapture('rtsp://admin:[email protected]/80')\n #cap = cv2.VideoCapture('http://admin:[email protected]:80/video')\n #cap = cv2.VideoCapture('http://admin:[email protected]/camera-cgi/admin/recorder.cgi?action=start&id=samba')\n #cap = cv2.VideoCapture('http://admin:[email protected]/recorder.cgi?action=start&id=samba')\n #cap = cv2.VideoCapture('http://admin:[email protected]:80/snapshot.jpg?user=admin&pwd=admin&strm=0')\n print('-1')\n\n #assert cap.isOpened(), 'Cannot capture source' #カメラが起動できたか確認\n\n img1 = cv2.imread(\"images/phase_1.jpg\")\n img2 = cv2.imread(\"images/phase_2.jpg\")\n img3 = cv2.imread(\"images/phase_2_red.jpg\")\n img4 = cv2.imread(\"images/phase_3.jpg\")\n #mixer.music.load(\"voice/voice_3.m4a\")\n #print(img1)\n frames = 0\n count_frame = 0 #フレーム数カウント\n flag = 0 #密状態(0:疎密,1:密入り)\n start = time.time()\n print('-1')\n while (cap[i].isOpened() for i in range(num_camera)): #カメラが起動している間\n count=0 #人数をカウント\n point = [[] for i in range(num_camera)]\n for i in range(num_camera):\n ret[i], frame[i] = cap[i].read() #キャプチャ画像を取得\n if (ret[i] for i in range(num_camera)):\n # 解析準備としてキャプチャ画像を加工\n for i in range(num_camera):\n img[i], orig_im[i], dim[i] = prep_image(frame[i], inp_dim[i])\n\n if CUDA:\n for i in range(num_camera):\n im_dim[i] = im_dim[i].cuda()\n img[i] = img[i].cuda()\n\n for i in range(num_camera):\n # output[i] = model[i](Variable(img[i]), CUDA)\n output = model[i](Variable(img[i]), CUDA)\n\n #print(\"output:\\n\", output)\n # output[i] = write_results(output[i], confidence, num_classes, nms = True, nms_conf = nms_thesh)\n output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh)\n\n # print(\"output\", i, \":\\n\", output[i])\n print(output.shape)\n \"\"\"\n # FPSの表示\n if (type(output[i]) == int for i in range(num_camera)):\n print(\"表示\")\n frames += 1\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n # qキーを押すとFPS表示の終了\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n continue\n for i in range(num_camera):\n output[i][:,1:5] = torch.clamp(output[i][:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]\n output[i][:,[1,3]] *= frame[i].shape[1]\n output[i][:,[2,4]] *= frame[i].shape[0]\n \"\"\"\n # FPSの表示\n if type(output) == int:\n print(\"表示\")\n frames += 1\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n # qキーを押すとFPS表示の終了\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n continue\n for i in range(num_camera):\n output[:,1:5] = torch.clamp(output[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]\n output[:,[1,3]] *= frame[i].shape[1]\n output[:,[2,4]] *= frame[i].shape[0]\n\n\n\n colors = pkl.load(open(\"pallete\", \"rb\"))\n\n #count = lambda x: count(x, orig_im, count) #人数をカウント\n \"\"\"\n for i in range(num_camera):\n list(map(lambda x: write(x, orig_im[i]), output[i]))\n print(\"count:\\n\",count)\n \"\"\"\n for i in range(num_camera):\n list(map(lambda x: write(x, orig_im[i], i), output))\n print(\"count:\\n\",count)\n print(\"count_frame\", count_frame)\n print(\"framex\", frame[0].shape[1])\n print(\"framey\", frame[0].shape[0])\n print(\"point0\",point[0])\n\n num_person = 0\n radian_list = []\n for count, (radian, length) in enumerate(Lidar):\n radian_cam = [[] for i in range(point)]\n\n if count < 90:\n for num, p in enumerate(point[0]):\n radian_cam[num] = p / frame[0].shape[1] * 100\n for dif in range(10):\n if int(radian)+dif-5 == radian_cam:\n num_person += 1\n radian_list.append(radian)\n\n\n # print(\"point1\",point[1])\n\n\n if count > max:\n count_frame += 1\n #print(\"-1\")\n if count_frame <= 50:\n x=0\n y=0\n angle=20\n scale=1.5\n for i in range(num_camera):\n imgpaste = cvpaste(img1, orig_im[i], x, y, angle, scale)\n if flag == 1:\n play.googlehome()\n flag += 1\n #mixer.music.play(1)\n elif count_frame <= 100:\n x=-30\n y=10\n angle=20\n scale=1.1\n if count_frame%2==1:\n for i in range(num_camera):\n imgpaste = cvpaste(img2, orig_im[i], x, y, angle, scale)\n else:\n for i in range(num_camera):\n imgpaste = cvpaste(img3, orig_im[i], x, y, angle, scale)\n if flag == 2:\n play.googlehome()\n flag += 1\n else:\n x=-30\n y=0\n angle=20\n scale=1.5\n for i in range(num_camera):\n imgpaste = cvpaste(img4, orig_im[i], x, y, angle, scale)\n if count_frame > 101: #<--2フレームずらす\n print(\"\\007\") #警告音\n time.sleep(3)\n if flag == 3:\n play.googlehome()\n flag += 1\n cv2.imshow(\"frame\", imgpaste)\n else:\n count_frame = 0\n flag = 0\n #print(\"-2\")\n for i in range(num_camera):\n cv2.imshow(\"frame\", orig_im[i])\n # play.googlehome()\n key = cv2.waitKey(1)\n # qキーを押すと動画表示の終了\n if key & 0xFF == ord('q'):\n break\n frames += 1\n print(\"count_frame:\\n\", count_frame)\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n else:\n break\n\n", "from __future__ import division\nimport time\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\nimport cv2\nfrom util import *\nfrom darknet import Darknet\nfrom preprocess import prep_image, inp_to_image\nimport pandas as pd\nimport random\nimport argparse\nimport pickle as pkl\n\nimport requests\nfrom requests.auth import HTTPDigestAuth\n\nimport io\nfrom PIL import Image, ImageDraw, ImageFilter\n\nimport play\n\nimport csv\nimport pprint\n\nwith open('csv/Lidar.csv', 'r', encoding=\"utf-8_sig\", newline = '') as f:\n l = csv.reader(f)\n LiDAR = [row for row in l]\n # for row in LiDAR:\n # print(row)\n\n\n\ndef prep_image(img, inp_dim):\n # CNNに通すために画像を加工する\n orig_im = img\n dim = orig_im.shape[1], orig_im.shape[0]\n img = cv2.resize(orig_im, (inp_dim, inp_dim))\n img_ = img[:,:,::-1].transpose((2,0,1)).copy()\n img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)\n return img_, orig_im, dim\n\ndef count(x, img, count):\n # 画像に結果を描画\n c1 = tuple(x[1:3].int())\n c2 = tuple(x[3:5].int())\n cls = int(x[-1])\n label = \"{0}\".format(classes[cls])\n print(\"label:\\n\", label)\n # 人数カウント\n if(label=='no-mask'):\n count+=1\n print(count)\n\n return count\n\ndef write(x, img,camId):\n global count\n global point\n p = [0,0]\n # 画像に結果を描画\n c1 = tuple(x[1:3].int())\n c2 = tuple(x[3:5].int())\n cls = int(x[-1])\n print(camId, \"_c0:\",c1)\n print(camId, \"_c1:\",c2)\n label = \"{0}\".format(classes[cls])\n print(\"label:\", label)\n # 人数カウント\n if(label=='no-mask'):\n count+=1\n print(count)\n\n p[0] = (c2[0]+c1[0])/2\n p[1] = (c2[1]+c1[1])/2\n point[camId].append(p)\n\n\n color = random.choice(colors)\n cv2.rectangle(img, c1, c2,color, 1)\n t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]\n c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4\n cv2.rectangle(img, c1, c2,color, -1)\n cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);\n\n return img\n\n\ndef arg_parse():\n # モジュールの引数を作成\n parser = argparse.ArgumentParser(description='YOLO v3 Cam Demo') # ArgumentParserで引数を設定する\n parser.add_argument(\"--confidence\", dest = \"confidence\", help = \"Object Confidence to filter predictions\", default = 0.25)\n # confidenceは信頼性\n parser.add_argument(\"--nms_thresh\", dest = \"nms_thresh\", help = \"NMS Threshhold\", default = 0.4)\n # nms_threshは閾値\n\n parser.add_argument(\"--reso\", dest = 'reso', help =\n \"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed\",\n default = \"160\", type = str)\n # resoはCNNの入力解像度で、増加させると精度が上がるが、速度が低下する。\n return parser.parse_args() # 引数を解析し、返す\n\ndef cvpaste(img, imgback, x, y, angle, scale):\n # x and y are the distance from the center of the background image\n\n r = img.shape[0]\n c = img.shape[1]\n rb = imgback.shape[0]\n cb = imgback.shape[1]\n hrb=round(rb/2)\n hcb=round(cb/2)\n hr=round(r/2)\n hc=round(c/2)\n\n # Copy the forward image and move to the center of the background image\n imgrot = np.zeros((rb,cb,3),np.uint8)\n imgrot[hrb-hr:hrb+hr,hcb-hc:hcb+hc,:] = img[:hr*2,:hc*2,:]\n\n # Rotation and scaling\n M = cv2.getRotationMatrix2D((hcb,hrb),angle,scale)\n imgrot = cv2.warpAffine(imgrot,M,(cb,rb))\n # Translation\n M = np.float32([[1,0,x],[0,1,y]])\n imgrot = cv2.warpAffine(imgrot,M,(cb,rb))\n\n # Makeing mask\n imggray = cv2.cvtColor(imgrot,cv2.COLOR_BGR2GRAY)\n ret, mask = cv2.threshold(imggray, 10, 255, cv2.THRESH_BINARY)\n mask_inv = cv2.bitwise_not(mask)\n\n # Now black-out the area of the forward image in the background image\n img1_bg = cv2.bitwise_and(imgback,imgback,mask = mask_inv)\n\n # Take only region of the forward image.\n img2_fg = cv2.bitwise_and(imgrot,imgrot,mask = mask)\n\n # Paste the forward image on the background image\n imgpaste = cv2.add(img1_bg,img2_fg)\n\n return imgpaste\n\ndef cosineTheorem(Lidar, radian1, radian2):\n theta = abs(radian1-radian2)\n distance = Lidar[radian1][1] ** 2 + Lidar[radian2][1] ** 2 - 2 * Lidar[radian1][1] * Lidar[radian2][1] * math.cos(abs(radian2 - radian1))\n\n return distance\n\ndef combinations_count(n, r):\n return math.factorial(n) // (math.factorial(n - r) * math.factorial(r))\n# def beep(freq, dur=100):\n# winsound.Beep(freq, dur)\n\nif __name__ == '__main__':\n #学習前YOLO\n # cfgfile = \"cfg/yolov3.cfg\" # 設定ファイル\n # weightsfile = \"weight/yolov3.weights\" # 重みファイル\n # classes = load_classes('data/coco.names') # 識別クラスのリスト\n\n #マスク学習後YOLO\n cfgfile = \"cfg/mask.cfg\" # 設定ファイル\n weightsfile = \"weight/mask_1500.weights\" # 重みファイル\n classes = load_classes('data/mask.names') # 識別クラスのリスト\n\n\n num_classes = 80 # クラスの数\n\n args = arg_parse() # 引数を取得\n confidence = float(args.confidence) # 信頼性の設定値を取得\n nms_thesh = float(args.nms_thresh) # 閾値を取得\n start = 0\n CUDA = torch.cuda.is_available() # CUDAが使用可能かどうか\n\n num_classes = 80 # クラスの数\n bbox_attrs = 5 + num_classes\n max = 0 #限界人数\n num_camera = 1 #camera数\n model = [[] for i in range(num_camera)]\n inp_dim = [[] for i in range(num_camera)]\n cap = [[] for i in range(num_camera)]\n ret = [[] for i in range(num_camera)]\n frame = [[] for i in range(num_camera)]\n img = [[] for i in range(num_camera)]\n orig_im = [[] for i in range(num_camera)]\n dim = [[] for i in range(num_camera)]\n # output = [[] for i in range(num_camera)]\n # output = torch.tensor(output)\n # print(\"output_shape\\n\", output.shape)\n\n for i in range(num_camera):\n model[i] = Darknet(cfgfile) #model1の作成\n model[i].load_weights(weightsfile) # model1に重みを読み込む\n\n model[i].net_info[\"height\"] = args.reso\n inp_dim[i] = int(model[i].net_info[\"height\"])\n\n assert inp_dim[i] % 32 == 0\n assert inp_dim[i] > 32\n\n #mixer.init() #初期化\n\n if CUDA:\n for i in range(num_camera):\n model[i].cuda() #CUDAが使用可能であればcudaを起動\n\n for i in range(num_camera):\n model[i].eval()\n\n cap[0] = cv2.VideoCapture(1) #カメラを指定(USB接続)\n # cap[1] = cv2.VideoCapture(1) #カメラを指定(USB接続)\n # cap = cv2.VideoCapture(\"movies/sample.mp4\")\n #cap = cv2.VideoCapture(\"movies/one_v2.avi\")\n\n # Use the next line if your camera has a username and password\n # cap = cv2.VideoCapture('protocol://username:password@IP:port/1')\n #cap = cv2.VideoCapture('rtsp://admin:[email protected]/1') #(ネットワーク接続)\n #cap = cv2.VideoCapture('rtsp://admin:[email protected]/80')\n #cap = cv2.VideoCapture('http://admin:[email protected]:80/video')\n #cap = cv2.VideoCapture('http://admin:[email protected]/camera-cgi/admin/recorder.cgi?action=start&id=samba')\n #cap = cv2.VideoCapture('http://admin:[email protected]/recorder.cgi?action=start&id=samba')\n #cap = cv2.VideoCapture('http://admin:[email protected]:80/snapshot.jpg?user=admin&pwd=admin&strm=0')\n print('-1')\n\n #assert cap.isOpened(), 'Cannot capture source' #カメラが起動できたか確認\n\n img1 = cv2.imread(\"images/phase_1.jpg\")\n img2 = cv2.imread(\"images/phase_2.jpg\")\n img3 = cv2.imread(\"images/phase_2_red.jpg\")\n img4 = cv2.imread(\"images/phase_3.jpg\")\n #mixer.music.load(\"voice/voice_3.m4a\")\n #print(img1)\n frames = 0\n count_frame = 0 #フレーム数カウント\n flag = 0 #密状態(0:疎密,1:密入り)\n start = time.time()\n print('-1')\n while (cap[i].isOpened() for i in range(num_camera)): #カメラが起動している間\n count=0 #人数をカウント\n point = [[] for i in range(num_camera)]\n for i in range(num_camera):\n ret[i], frame[i] = cap[i].read() #キャプチャ画像を取得\n if (ret[i] for i in range(num_camera)):\n # 解析準備としてキャプチャ画像を加工\n for i in range(num_camera):\n img[i], orig_im[i], dim[i] = prep_image(frame[i], inp_dim[i])\n\n if CUDA:\n for i in range(num_camera):\n im_dim[i] = im_dim[i].cuda()\n img[i] = img[i].cuda()\n\n for i in range(num_camera):\n # output[i] = model[i](Variable(img[i]), CUDA)\n output = model[i](Variable(img[i]), CUDA)\n\n #print(\"output:\\n\", output)\n # output[i] = write_results(output[i], confidence, num_classes, nms = True, nms_conf = nms_thesh)\n output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh)\n\n # print(\"output\", i, \":\\n\", output[i])\n print(output.shape)\n \"\"\"\n # FPSの表示\n if (type(output[i]) == int for i in range(num_camera)):\n print(\"表示\")\n frames += 1\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n # qキーを押すとFPS表示の終了\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n continue\n for i in range(num_camera):\n output[i][:,1:5] = torch.clamp(output[i][:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]\n output[i][:,[1,3]] *= frame[i].shape[1]\n output[i][:,[2,4]] *= frame[i].shape[0]\n \"\"\"\n # FPSの表示\n if type(output) == int:\n print(\"表示\")\n frames += 1\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n # qキーを押すとFPS表示の終了\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n continue\n for i in range(num_camera):\n output[:,1:5] = torch.clamp(output[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]\n output[:,[1,3]] *= frame[i].shape[1]\n output[:,[2,4]] *= frame[i].shape[0]\n\n\n\n colors = pkl.load(open(\"pallete\", \"rb\"))\n\n #count = lambda x: count(x, orig_im, count) #人数をカウント\n \"\"\"\n for i in range(num_camera):\n list(map(lambda x: write(x, orig_im[i]), output[i]))\n print(\"count:\\n\",count)\n \"\"\"\n for i in range(num_camera):\n list(map(lambda x: write(x, orig_im[i], i), output))\n print(\"count:\\n\",count)\n print(\"count_frame\", count_frame)\n print(\"framex\", frame[0].shape[1])\n print(\"framey\", frame[0].shape[0])\n print(\"point0\",point[0])\n\n #LiDARの情報の人識別\n radian_lists = []\n close_list = [0] * 4\n dense_list = [0] * 4\n for count, (radian, length) in enumerate(LiDAR):\n radian_cam = [[] for i in range(len(point))]\n num_person = 0\n if count % 90 == 0:\n radian_list = []\n if count < 90:\n for num, p in enumerate(point[0]):\n radian_cam[num] = p[0] / frame[0].shape[1] * 100\n for dif in range(10):\n for radi_num in range(len(radian_cam)):\n if int(radian)+dif-5 == int(radian_cam[radi_num]):\n num_person += 1\n radian_list.append(radian)\n elif count < 180:\n for num, p in enumerate(point[0]):\n radian_cam[num] = p[0] / frame[0].shape[1] * 100\n for dif in range(10):\n if int(radian)+dif-5 == int(radian_cam):\n num_person += 1\n radian_list.append(radian)\n elif count < 270:\n for num, p in enumerate(point[0]):\n radian_cam[num] = p[0] / frame[0].shape[1] * 100\n for dif in range(10):\n if int(radian)+dif-5 == int(radian_cam):\n num_person += 1\n radian_list.append(radian)\n else:\n for num, p in enumerate(point[0]):\n radian_cam[num] = p[0] / frame[0].shape[1] * 100\n for dif in range(10):\n if int(radian)+dif-5 == int(radian_cam):\n num_person += 1\n radian_list.append(radian)\n radian_lists.append(radian_list)\n\n #距離計算\n dis_list = []\n for direction in range(4):\n if len(radian_lists[direction]) > 1:\n # n = combinations_count(len(radian_lists[direction]), 2)\n dis_combination = itertools.combinations(radian_lists[direction], 2)\n distance = [[] for i in range(len(dis_combination))]\n for num_dis, com_list in enumerate(dis_combination):\n distance[num_dis] = cosineTheorem(LiDAR,com_list[0], com_list[1])\n dis_list.append(distance)\n\n #密集判定\n for direction in range(4):\n close = 0 #密接数\n dense = 0 #密集数\n for dis in distance[distance]:\n if dis < 2:\n close += 1\n close_list[direction] = 1\n if close > 1:\n dense_list[direction] = 1\n\n print(\"close_list\", close_list)\n print(\"dense_list\", dense_list)\n\n # print(\"point1\",point[1])\n\n\n if count > max:\n count_frame += 1\n #print(\"-1\")\n if count_frame <= 50:\n x=0\n y=0\n angle=20\n scale=1.5\n for i in range(num_camera):\n imgpaste = cvpaste(img1, orig_im[i], x, y, angle, scale)\n if flag == 1:\n play.googlehome()\n flag += 1\n #mixer.music.play(1)\n elif count_frame <= 100:\n x=-30\n y=10\n angle=20\n scale=1.1\n if count_frame%2==1:\n for i in range(num_camera):\n imgpaste = cvpaste(img2, orig_im[i], x, y, angle, scale)\n else:\n for i in range(num_camera):\n imgpaste = cvpaste(img3, orig_im[i], x, y, angle, scale)\n if flag == 2:\n play.googlehome()\n flag += 1\n else:\n x=-30\n y=0\n angle=20\n scale=1.5\n for i in range(num_camera):\n imgpaste = cvpaste(img4, orig_im[i], x, y, angle, scale)\n if count_frame > 101: #<--2フレームずらす\n print(\"\\007\") #警告音\n time.sleep(3)\n if flag == 3:\n play.googlehome()\n flag += 1\n cv2.imshow(\"frame\", imgpaste)\n else:\n count_frame = 0\n flag = 0\n #print(\"-2\")\n for i in range(num_camera):\n cv2.imshow(\"frame\", orig_im[i])\n # play.googlehome()\n key = cv2.waitKey(1)\n # qキーを押すと動画表示の終了\n if key & 0xFF == ord('q'):\n break\n frames += 1\n print(\"count_frame:\\n\", count_frame)\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n else:\n break\n\n" ]
[ [ "torch.from_numpy", "torch.cuda.is_available", "numpy.float32", "numpy.zeros", "torch.autograd.Variable" ], [ "numpy.split", "torch.from_numpy", "numpy.append", "numpy.float32", "torch.cuda.is_available", "numpy.array", "numpy.zeros", "torch.autograd.Variable" ], [ "torch.from_numpy", "torch.cuda.is_available", "numpy.float32", "numpy.zeros", "torch.autograd.Variable" ], [ "torch.from_numpy", "torch.cuda.is_available", "numpy.float32", "numpy.zeros", "torch.autograd.Variable" ], [ "torch.from_numpy", "torch.cuda.is_available", "numpy.float32", "numpy.zeros", "torch.autograd.Variable" ], [ "torch.from_numpy", "torch.cuda.is_available", "numpy.float32", "numpy.zeros", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
XHChen0528/Pytorch-Project-Template
[ "3615fd90eb5ae2eb098b591204f1da087ab1179b" ]
[ "related_scripts/test_dataloader_hdf5.py" ]
[ "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n#############################################################\n# File: test_dataloader_hdf5.py.py\n# Created Date: Tuesday January 12th 2021\n# Author: Chen Xuanhong\n# Email: [email protected]\n# Last Modified: Wednesday, 13th January 2021 10:39:00 am\n# Modified By: Chen Xuanhong\n# Copyright (c) 2021 Shanghai Jiao Tong University\n#############################################################\n\nimport os\nimport glob\nimport h5py\nimport torch\nimport random\n\nfrom pathlib import Path\n\n\nclass TestDataset:\n def __init__(self, \n h5_path,\n batch_size=16):\n \"\"\"Initialize and preprocess the CelebA dataset.\"\"\"\n self.batch_size = batch_size\n self.pointer = 0\n\n self.h5_path= h5_path\n self.h5file = h5py.File(h5_path,'r')\n self.keys = self.h5file[\"__len__\"][()] #86366\n self.length = self.keys\n self.keys = [str(k) for k in range(self.keys)]\n\n def __call__(self):\n \"\"\"Return one batch images.\"\"\"\n \n # if self.pointer>=self.length:\n # self.pointer = 0\n\n if self.pointer>=self.length:\n self.pointer = 0\n a = \"The end of the story!\"\n raise StopIteration(print(a))\n elif (self.pointer+self.batch_size) > self.length:\n end = self.length\n else:\n end = self.pointer+self.batch_size\n for i in range(self.pointer, end):\n iii = self.keys[i]\n hr = torch.from_numpy(self.h5file[iii+\"hr\"][()])\n lr = torch.from_numpy(self.h5file[iii+\"lr\"][()])\n \n if (i-self.pointer) == 0:\n hr_ls = hr.unsqueeze(0)\n lr_ls = lr.unsqueeze(0)\n else:\n hr_ls = torch.cat((hr_ls,hr.unsqueeze(0)),0)\n lr_ls = torch.cat((lr_ls,lr.unsqueeze(0)),0)\n self.pointer = end\n return lr_ls, hr_ls\n \n def __len__(self):\n return self.length\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' + self.h5_path + ')'\n\nif __name__ == \"__main__\":\n \n dataset_path = \"G:\\\\RainNet\\\\RainNet_H5\\\\RainNet_Evaluation.hdf5\"\n\n hdf5_dataloader = TestDataset(dataset_path,64)\n print(len(hdf5_dataloader))\n # hdf5_dataloader = iter(hdf5_dataloader)\n import time\n import datetime\n start_time = time.time()\n for i in range(100):\n lr,hr = hdf5_dataloader()\n # lr,hr = next(hdf5_dataloader)\n print(hr.shape)\n # hr = hr +1\n elapsed = time.time() - start_time\n elapsed = str(datetime.timedelta(seconds=elapsed))\n print(\"Elapsed [{}]\".format(elapsed))" ]
[ [ "torch.from_numpy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
B-DE/ChatbotMachine
[ "ec5668411c037fe1f0b265f21114b944dc9ba169" ]
[ "kochat/utils/visualizer.py" ]
[ "\"\"\"\n@auther Hyunwoong\n@since {6/23/2020}\n@see : https://github.com/gusdnd852\n\"\"\"\nimport itertools\nimport os\nimport re\n\nimport numpy as np\nimport pandas as pd\nimport six\nfrom matplotlib import pyplot as plt\nfrom pandas import DataFrame\nfrom sklearn.decomposition import IncrementalPCA\nfrom torch import Tensor\n\nfrom kochat.decorators import backend\n\n\n# gui 오류에 대한 코드\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\n\n\n@backend\nclass Visualizer:\n\n def __init__(self, model_dir: str, model_file: str):\n \"\"\"\n 학습, 검증 결과를 저장하고 시각화하는 클래스입니다.\n \"\"\"\n\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n\n if not os.path.exists(model_dir + 'temp'):\n os.makedirs(model_dir + 'temp')\n\n self.model_dir = model_dir\n self.model_file = model_file\n self.train_loss, self.test_loss = [], []\n self.train_accuracy, self.test_accuracy = [], []\n self.train_precision, self.test_precision = [], []\n self.train_recall, self.test_recall = [], []\n self.train_f1_score, self.test_f1_score = [], []\n\n def save_result(self, loss: Tensor, eval_dict: dict, mode: str):\n \"\"\"\n training / test 결과를 저장합니다.\n\n :param loss: loss 리스트\n :param eval_dict: 다양한 메트릭으로 평가한 결과가 저장된 딕셔너리\n :param mode: train or test\n \"\"\"\n if mode == 'train':\n self.train_loss.append(loss.item())\n self.train_accuracy.append(eval_dict['accuracy'].item())\n self.train_precision.append(eval_dict['precision'].item())\n self.train_recall.append(eval_dict['recall'].item())\n self.train_f1_score.append(eval_dict['f1_score'].item())\n self.__save_txt(self.train_accuracy, 'train_accuracy')\n self.__save_txt(self.train_precision, 'train_precision')\n self.__save_txt(self.train_recall, 'train_recall')\n self.__save_txt(self.train_f1_score, 'train_f1_score')\n self.__save_txt(self.train_loss, 'train_loss')\n\n elif mode == 'test':\n self.test_loss.append(loss.item())\n self.test_accuracy.append(eval_dict['accuracy'].item())\n self.test_precision.append(eval_dict['precision'].item())\n self.test_recall.append(eval_dict['recall'].item())\n self.test_f1_score.append(eval_dict['f1_score'].item())\n self.__save_txt(self.test_accuracy, 'test_accuracy')\n self.__save_txt(self.test_precision, 'test_precision')\n self.__save_txt(self.test_recall, 'test_recall')\n self.__save_txt(self.test_f1_score, 'test_f1_score')\n self.__save_txt(self.test_loss, 'test_loss')\n\n else:\n raise Exception('mode는 train과 test만 가능합니다.')\n\n def draw_graphs(self):\n \"\"\"\n 다양한 메트릭 그래프를 그립니다.\n test가 True인 경우 testing 결과도 함께 그립니다.\n \"\"\"\n\n plt.plot(self.__load_txt('train_accuracy'), 'darkgreen', label='train_accuracy')\n if len(self.test_accuracy) != 0:\n plt.plot(self.__load_txt('test_accuracy'), 'limegreen', label='test_accuracy')\n self.__draw_graph('accuracy')\n\n plt.plot(self.__load_txt('train_precision'), 'darkcyan', label='train_precision')\n if len(self.test_precision) != 0:\n plt.plot(self.__load_txt('test_precision'), 'cyan', label='test_precision')\n self.__draw_graph('precision')\n\n plt.plot(self.__load_txt('train_recall'), 'darkred', label='train_recall')\n if len(self.test_recall) != 0:\n plt.plot(self.__load_txt('test_recall'), 'red', label='test_recall')\n self.__draw_graph('recall')\n\n plt.plot(self.__load_txt('train_f1_score'), 'darkmagenta', label='train_f1_score')\n if len(self.test_f1_score) != 0:\n plt.plot(self.__load_txt('test_f1_score'), 'magenta', label='test_f1_score')\n self.__draw_graph('f1_score')\n\n plt.plot(self.__load_txt('train_loss'), 'darkgoldenrod', label='train_loss')\n if len(self.test_loss) != 0:\n plt.plot(self.__load_txt('test_loss'), 'gold', label='test_loss')\n self.__draw_graph('loss')\n\n def draw_matrix(self, cm: np.ndarray, target_names: list, mode: str):\n \"\"\"\n metrics에서 출력된 confusion matrix을 시각화해서 그리고 저장합니다.\n\n :param cm: confusion matrix 객체\n :param target_names: 각 클래스들의 이름\n :param mode: train or test 모드\n \"\"\"\n\n label_length = len(target_names)\n figure_base_size = (label_length * 1.5)\n title_font_size = (label_length * 3) + 7\n\n cmap = plt.get_cmap('Blues')\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n plt.figure(figsize=(figure_base_size + 7, figure_base_size + 4))\n im = plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(\n label='{} confusion matrix'.format(mode),\n fontdict={'fontsize': title_font_size},\n pad=35\n )\n\n tick_marks = np.arange(label_length)\n plt.xticks(tick_marks, target_names)\n plt.yticks(tick_marks, target_names)\n plt.colorbar(im, fraction=0.046, pad=0.04)\n thresh = cm.max() / 1.5\n\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, \"{:0.4f}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.savefig(self.model_file + '_confusion_matrix_{}.png'.format(mode))\n plt.close()\n\n def draw_report(self, report: DataFrame, mode: str):\n \"\"\"\n metrics에서 출력된 classification report를 시각화해서 그리고 저장합니다.\n\n :param report: report 데이터 프래임\n :param mode: train or test 모드\n \"\"\"\n row_colors = ['#f0f0f0', 'w']\n col_width, row_height, header_columns = 3.0, 0.625, 0\n\n size = (np.array(report.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])\n fig, ax = plt.subplots(figsize=size)\n ax.axis('off')\n\n table = ax.table(cellText=report.values,\n bbox=[0, 0, 1, 1],\n colLabels=report.columns,\n rowLabels=report.index)\n\n table.auto_set_font_size(False)\n table.set_fontsize(12)\n\n for k, cell in six.iteritems(table._cells):\n cell.set_edgecolor('white')\n if k[0] == 0 or k[1] < header_columns:\n cell.set_text_props(weight='bold', color='w')\n cell.set_facecolor('#09539d')\n else:\n cell.set_facecolor(row_colors[k[0] % len(row_colors)])\n\n fig = ax.get_figure()\n fig.savefig(self.model_file + '_report_{}.png'.format(mode))\n plt.close()\n\n def draw_feature_space(self, feats: Tensor, labels: Tensor, label_dict: dict, loss_name: str,\n d_loss: int, epoch: int, mode: str):\n \"\"\"\n 모든 데이터의 샘플들의 분포를 시각화합니다.\n\n :param feats: 모델의 출력 features\n :param labels: 라벨 리스트\n :param label_dict: 라벨 딕셔너리\n :param loss_name: loss 함수의 이름\n :param d_loss: loss 시각화 차원\n :param epoch: 현재 진행된 epochs\n :param mode: train or test\n \"\"\"\n\n if not isinstance(labels, np.ndarray):\n labels = labels.detach().cpu().numpy()\n if not isinstance(feats, np.ndarray):\n feats = feats.detach().cpu().numpy()\n\n if d_loss == 2:\n self.__2d_feature_space(feats, labels, label_dict)\n else:\n self.__3d_feature_space(feats, labels, label_dict, d_loss)\n\n if not os.path.exists(self.model_dir + 'feature_space'):\n os.makedirs(self.model_dir + 'feature_space')\n\n plt.legend(loc='upper right')\n plt.savefig(self.model_dir +\n 'feature_space/{loss_name}_{d_loss}D_{mode}_{epoch}.png'\n .format(loss_name=loss_name, d_loss=d_loss, mode=mode, epoch=epoch))\n\n plt.close()\n\n def __draw_graph(self, mode: str):\n \"\"\"\n plot된 정보들을 바탕으로 그래프를 그리고 저장합니다.\n\n :param mode: train or test\n \"\"\"\n\n plt.xlabel('epochs')\n plt.ylabel(mode)\n plt.title('train test {}'.format(mode))\n plt.grid(True, which='both', axis='both')\n plt.legend()\n plt.savefig(self.model_file + '_graph_{}.png'.format(mode))\n plt.close()\n\n def __2d_feature_space(self, feats: np.ndarray, labels: np.ndarray, label_dict: dict):\n \"\"\"\n d_loss가 2차원인 경우 2D로 시각화 합니다.\n\n :param feats: 모델의 출력 features\n :param labels: 라벨 리스트\n :param label_dict: 라벨 딕셔너리\n \"\"\"\n\n data = pd.DataFrame(np.c_[feats, labels], columns=['x', 'y', 'label'])\n ax = plt.figure().add_subplot()\n for group in data.groupby('label'):\n group_index, group_table = group\n ax.scatter(group_table['x'], group_table['y'],\n marker='o',\n label=list(label_dict)[int(group_index)])\n\n def __3d_feature_space(self, feats: np.ndarray, labels: np.ndarray, label_dict: dict, d_loss: int):\n \"\"\"\n d_loss가 3차원 이상인 경우 3D로 시각화 합니다.\n\n :param feats: 모델의 출력 features\n :param labels: 라벨 리스트\n :param label_dict: 라벨 딕셔너리\n :param d_loss: loss 시각화 차원\n \"\"\"\n\n if d_loss != 3:\n # 3차원 이상이면 PCA 수행해서 3차원으로 만듬\n pca = IncrementalPCA(n_components=3)\n split_size = (feats.shape[0] // self.batch_size) + 1\n for batch_x in np.array_split(feats, split_size):\n pca.partial_fit(batch_x)\n feats = pca.transform(feats)\n\n ax = plt.figure().gca(projection='3d')\n data = pd.DataFrame(np.c_[feats, labels], columns=['x', 'y', 'z', 'label'])\n for group in data.groupby('label'):\n group_index, group_table = group\n ax.scatter(group_table['x'], group_table['y'], group_table['z'],\n marker='o',\n label=list(label_dict)[int(group_index)])\n\n def __load_txt(self, mode: str):\n \"\"\"\n 저장된 파일을 로드하여 배열로 반환합니다.\n\n :param mode: train or test\n \"\"\"\n\n f = open(self.model_dir + 'temp{_}{mode}.txt'.format(_=self.delimeter, mode=mode), 'r')\n file = f.read()\n file = re.sub('\\\\[', '', file)\n file = re.sub('\\\\]', '', file)\n f.close()\n\n return [float(i) for idx, i in enumerate(file.split(','))]\n\n def __save_txt(self, array: list, mode: str):\n \"\"\"\n 배열을 입력받아서 string 변환해 txt파일로 저장합니다.\n\n :param array: 저장할 배열\n :param mode: train or test\n \"\"\"\n\n f = open(self.model_dir + 'temp{_}{mode}.txt'.format(_=self.delimeter, mode=mode), 'w')\n f.write(str(array))\n f.close()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.imshow", "sklearn.decomposition.IncrementalPCA", "matplotlib.pyplot.get_cmap", "pandas.DataFrame", "matplotlib.pyplot.tight_layout", "numpy.arange", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "numpy.array", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel", "matplotlib.use", "matplotlib.pyplot.subplots", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks", "numpy.array_split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
thirono/basil
[ "99052482d9334dd1f5598eb2d2fb4d5399a32291" ]
[ "basil/HL/sitcp_fifo.py" ]
[ "#\n# ------------------------------------------------------------\n# Copyright (c) All rights reserved\n# SiLab, Institute of Physics, University of Bonn\n# ------------------------------------------------------------\n#\nimport struct\nimport array\n\nimport numpy as np\n\nfrom basil.HL.HardwareLayer import HardwareLayer\n\n\nclass sitcp_fifo(HardwareLayer):\n ''' SiTCP driver that mimics the RegisterHardwareLayer BRAM and SRAM FIFO interfaces.\n\n No firmware module is available for this driver.\n This is just a driver that replaces BRAM and SRAM FIFO when the SiTCP transfer layer is used.\n '''\n _version = 0\n\n def __getitem__(self, name):\n if name == \"RESET\":\n self._intf.reset() # returns None\n elif name == 'VERSION':\n return self._version\n elif name == 'FIFO_SIZE':\n return self._intf._get_tcp_data_size()\n else:\n super(sitcp_fifo, self).__getitem__(name)\n\n def __setitem__(self, name, value):\n if name == \"RESET\":\n self._intf.reset()\n else:\n super(sitcp_fifo, self).__setitem__(name, value)\n\n def __getattr__(self, name):\n '''called only on last resort if there are no attributes in the instance that match the name\n '''\n if name.isupper():\n return self[name]\n else:\n def method(*args, **kwargs):\n nsplit = name.split('_', 1)\n if len(nsplit) == 2 and nsplit[0] == 'set' and nsplit[1].isupper() and len(args) == 1 and not kwargs:\n self[nsplit[1]] = args[0] # returns None\n elif len(nsplit) == 2 and nsplit[0] == 'get' and nsplit[1].isupper() and not args and not kwargs:\n return self[nsplit[1]]\n else:\n raise AttributeError(\"%r object has no attribute %r\" % (self.__class__, name))\n return method\n\n def __setattr__(self, name, value):\n if name.isupper():\n self[name] = value\n else:\n super(sitcp_fifo, self).__setattr__(name, value)\n\n def get_data(self):\n ''' Reading data from SiTCP FIFO (via TCP).\n\n Returns\n -------\n array : numpy.ndarray\n Array of unsigned integers (32 bit).\n '''\n fifo_size = self._intf._get_tcp_data_size()\n fifo_int_size = int((fifo_size - (fifo_size % 4)) / 4)\n data = self._intf._get_tcp_data(fifo_int_size * 4)\n return np.frombuffer(data, dtype=np.dtype('<u4'))\n\n def set_data(self, data):\n ''' Sending data to via TCP.\n\n Parameters\n ----------\n data : array\n Array of unsigned integers (32 bit).\n '''\n data = array.array('B', struct.unpack(\"{}B\".format(len(data) * 4), struct.pack(\"{}I\".format(len(data)), *data)))\n self._intf._send_tcp_data(data)\n" ]
[ [ "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
glennhickey/CharlieSandbox
[ "2949f8357433a6219abf192f899ab50e2c8edaba", "2949f8357433a6219abf192f899ab50e2c8edaba" ]
[ "brca_exchange_cooccurrence_analysis/extract_roh_regions_stats.py", "brca_exchange_cooccurrence_analysis/extract_roh_region_distributions.plot.py" ]
[ "import matplotlib\nmatplotlib.use('Agg')\nimport vcf, argparse, sys\nimport numpy as np\nimport pandas as pd\nimport math\nfrom scipy.stats import chisquare\nfrom collections import defaultdict\nimport matplotlib.pyplot\nimport ast\n\ndef parse_args():\n \"\"\" \n Description:\n function 'parse_args' parses arguments from command-line and returns an argparse\n object containing the arguments and their values. Default values are 'False' if option\n is not listed in the command, else the option value is set to True.\n \"\"\"\n parser = argparse.ArgumentParser('Input bcftools roh tab-delimited file and output roh report and histogram.')\n parser.add_argument('-i', '--inROH', type=str,\n help='Input bcftools roh output filepath.')\n parser.add_argument('-o', '--outReport', type=str,\n help='Output report filename.')\n\n options = parser.parse_args()\n return options\n\n# Shamelessly pulled from https://onestopdataanalysis.com/n50-genome/\ndef calculate_N50(list_of_lengths):\n \"\"\"Calculate N50 for a sequence of numbers.\n \n Args:\n list_of_lengths (list): List of numbers.\n \n Returns:\n float: N50 value.\n \n \"\"\"\n tmp = []\n for tmp_number in set(list_of_lengths):\n tmp += [tmp_number] * list_of_lengths.count(tmp_number) * tmp_number\n tmp.sort()\n \n if (len(tmp) % 2) == 0:\n median = (tmp[int(len(tmp) / 2) - 1] + tmp[int(len(tmp) / 2)]) / 2\n else:\n median = tmp[int(len(tmp) / 2)]\n \n return median\n\ndef main(args):\n\n options = parse_args()\n \n roh_region_dict = defaultdict(list)\n roh_region_length_dict = defaultdict(list)\n with open(options.inROH, 'r') as roh_file:\n for line in roh_file:\n parsed_line = line.strip().split('\\t')\n if parsed_line[0] == 'RG':\n print(parsed_line)\n sample_name = parsed_line[1]\n chromosome = parsed_line[2]\n start = parsed_line[3]\n end = parsed_line[4]\n length = parsed_line[5]\n num_markers = parsed_line[6]\n quality = parsed_line[7]\n roh_region_dict[sample_name].append([chromosome,start,end,length,num_markers,quality])\n roh_region_length_dict[sample_name].append(int(length))\n \n \n for sample_id in roh_region_dict.keys():\n with open('{}_{}'.format(sample_id,options.outReport), 'w') as bed_file:\n for roh_region in sorted(roh_region_dict[sample_id],key=lambda region_list:int(region_list[2])):\n region_chr = roh_region[0]\n region_start = roh_region[1]\n region_end = roh_region[2]\n bed_file.write('{}\\t{}\\t{}\\n'.format(region_chr,region_start,region_end))\n \n for sample_id in roh_region_dict.keys():\n sorted_list = sorted(roh_region_length_dict[sample_id])\n num_stat = len(sorted_list)\n min_stat = min(sorted_list)\n Q1_stat = sorted_list[-int(len(sorted_list)*0.75)]\n median_stat = sorted_list[-int(len(sorted_list)*0.5)]\n Q3_stat = sorted_list[-int(len(sorted_list)*0.25)]\n max_stat = max(sorted_list)\n n50 = calculate_N50(sorted_list)\n print(sample_id)\n print('number\\tmin\\tQ1\\tmedian\\tQ3\\tmax\\tn50')\n print('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(num_stat, min_stat, Q1_stat, median_stat, Q3_stat, max_stat, n50))\n with open('{}_roh_lengths_list.{}'.format(sample_id,options.outReport), 'w') as lengths_file:\n for length in sorted_list:\n lengths_file.write('{}\\n'.format(length))\n \n \n \nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n\n", "import matplotlib\nmatplotlib.use('Agg')\nimport vcf, argparse, sys\nimport numpy as np\nimport pandas as pd\nimport math\nfrom scipy.stats import chisquare\nfrom collections import defaultdict\nimport matplotlib\nmatplotlib.use('Agg')\nimport vcf, argparse, sys\nimport numpy as np\nimport pandas as pd\nimport math\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndef parse_args():\n \"\"\" \n Description:\n function 'parse_args' parses arguments from command-line and returns an argparse\n object containing the arguments and their values. Default values are 'False' if option\n is not listed in the command, else the option value is set to True.\n \"\"\"\n parser = argparse.ArgumentParser('Input bcftools roh tab-delimited file and output roh report and histogram.')\n parser.add_argument('-i', '--inROHdistA', type=str,\n help='Input 1st roh distribution filepath.')\n parser.add_argument('-j', '--inROHdistB', type=str,\n help='Input 2nd roh distribution filepath.')\n parser.add_argument('-l', '--regionLength', type=int,\n help='Input length of region used in calculating SROH.')\n parser.add_argument('-o', '--outReport', type=str,\n help='Output plot filename.')\n\n options = parser.parse_args()\n return options\n\ndef main(args):\n\n options = parse_args()\n \n roh_distribution_dict = defaultdict(list)\n with open(options.inROHdistA, 'r') as roh_file_a, open(options.inROHdistB, 'r') as roh_file_b:\n for line in roh_file_a:\n if 'sample_id' in line: continue\n parsed_line = line.strip().split('\\t')\n roh_distribution_dict['SROH'].extend([float(parsed_line[2]),float(parsed_line[3]),float(parsed_line[4]),float(parsed_line[5]),float(parsed_line[6])])\n roh_distribution_dict['SROH_length'].extend(['all','100kb','1mb', '1500kb', '5mb'])\n roh_distribution_dict['group'].extend(['No']*5)\n for line in roh_file_b:\n if 'sample_id' in line: continue\n parsed_line = line.strip().split('\\t')\n roh_distribution_dict['SROH'].extend([float(parsed_line[2]),float(parsed_line[3]),float(parsed_line[4]),float(parsed_line[5]),float(parsed_line[6])])\n roh_distribution_dict['SROH_length'].extend(['all','100kb','1mb', '1500kb', '5mb'])\n roh_distribution_dict['group'].extend(['Yes']*5)\n \n violin_df = pd.DataFrame(data=roh_distribution_dict) \n sns.set(style=\"whitegrid\", font_scale=1.5)\n \n fig, axes = plt.subplots(figsize=(10, 10)) \n order=[\"all\", \"100kb\", \"1mb\", \"1500kb\", \"5mb\"]\n sns.boxplot(\n x=\"SROH_length\", y=\"SROH\", hue=\"group\", data=violin_df, \n order=order,\n ax=axes\n )\n axes.set_xticklabels([\"All\", \"100 (kb)\", \"1 (mb)\", \"1.5 (mb)\", \"5 (mb)\"])\n axes.set_xlabel(\"Minimum ROH Length\")\n axes.legend(\"\")\n fig.savefig(\"roh_distribution_violin.{}.png\".format(options.outReport)) \n matplotlib.pyplot.close(fig)\n \nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n\n" ]
[ [ "matplotlib.use" ], [ "matplotlib.pyplot.close", "matplotlib.use", "matplotlib.pyplot.subplots", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
orico/DeepMoji
[ "db1bdbd95e299c3fdbef9981e352cea79528b7b4" ]
[ "deepmoji/create_vocab.py" ]
[ "from __future__ import print_function, division\n\nimport glob\nimport json\nimport numpy as np\nimport uuid\nfrom .filter_utils import is_special_token\nfrom .word_generator import WordGenerator\nfrom collections import defaultdict, OrderedDict\nfrom .global_variables import SPECIAL_TOKENS, VOCAB_PATH\nfrom copy import deepcopy\n\n\nclass VocabBuilder():\n \"\"\" Create vocabulary with words extracted from sentences as fed from a\n word generator.\n \"\"\"\n\n def __init__(self, word_gen):\n # initialize any new key with value of 0\n self.word_counts = defaultdict(lambda: 0, {})\n self.word_length_limit = 30\n\n for token in SPECIAL_TOKENS:\n assert len(token) < self.word_length_limit\n self.word_counts[token] = 0\n self.word_gen = word_gen\n\n def count_words_in_sentence(self, words):\n \"\"\" Generates word counts for all tokens in the given sentence.\n\n # Arguments:\n words: Tokenized sentence whose words should be counted.\n \"\"\"\n for word in words:\n if 0 < len(word) and len(word) <= self.word_length_limit:\n try:\n self.word_counts[word] += 1\n except KeyError:\n self.word_counts[word] = 1\n\n def save_vocab(self, path=None):\n \"\"\" Saves the vocabulary into a file.\n\n # Arguments:\n path: Where the vocabulary should be saved. If not specified, a\n randomly generated filename is used instead.\n \"\"\"\n dtype = ([('word', '|S{}'.format(self.word_length_limit)), ('count', 'int')])\n np_dict = np.array(self.word_counts.items(), dtype=dtype)\n\n # sort from highest to lowest frequency\n np_dict[::-1].sort(order='count')\n data = np_dict\n\n if path is None:\n path = str(uuid.uuid4())\n\n np.savez_compressed(path, data=data)\n print(\"Saved dict to {}\".format(path))\n\n def get_next_word(self):\n \"\"\" Returns next tokenized sentence from the word geneerator.\n\n # Returns:\n List of strings, representing the next tokenized sentence.\n \"\"\"\n return self.word_gen.__iter__().next()\n\n def count_all_words(self):\n \"\"\" Generates word counts for all words in all sentences of the word\n generator.\n \"\"\"\n for words, _ in self.word_gen:\n self.count_words_in_sentence(words)\n\n\nclass MasterVocab():\n \"\"\" Combines vocabularies.\n \"\"\"\n\n def __init__(self):\n\n # initialize custom tokens\n self.master_vocab = {}\n\n def populate_master_vocab(self, vocab_path, min_words=1, force_appearance=None):\n \"\"\" Populates the master vocabulary using all vocabularies found in the\n given path. Vocabularies should be named *.npz. Expects the\n vocabularies to be numpy arrays with counts. Normalizes the counts\n and combines them.\n\n # Arguments:\n vocab_path: Path containing vocabularies to be combined.\n min_words: Minimum amount of occurences a word must have in order\n to be included in the master vocabulary.\n force_appearance: Optional vocabulary filename that will be added\n to the master vocabulary no matter what. This vocabulary must\n be present in vocab_path.\n \"\"\"\n\n paths = glob.glob(vocab_path + '*.npz')\n sizes = {path: 0 for path in paths}\n dicts = {path: {} for path in paths}\n\n # set up and get sizes of individual dictionaries\n for path in paths:\n np_data = np.load(path)['data']\n\n for entry in np_data:\n word, count = entry\n if count < min_words:\n continue\n if is_special_token(word):\n continue\n dicts[path][word] = count\n\n sizes[path] = sum(dicts[path].values())\n print('Overall word count for {} -> {}'.format(path, sizes[path]))\n print('Overall word number for {} -> {}'.format(path, len(dicts[path])))\n\n vocab_of_max_size = max(sizes, key=sizes.get)\n max_size = sizes[vocab_of_max_size]\n print('Min: {}, {}, {}'.format(sizes, vocab_of_max_size, max_size))\n\n # can force one vocabulary to always be present\n if force_appearance is not None:\n force_appearance_path = [p for p in paths if force_appearance in p][0]\n force_appearance_vocab = deepcopy(dicts[force_appearance_path])\n print(force_appearance_path)\n else:\n force_appearance_path, force_appearance_vocab = None, None\n\n # normalize word counts before inserting into master dict\n for path in paths:\n normalization_factor = max_size / sizes[path]\n print('Norm factor for path {} -> {}'.format(path, normalization_factor))\n\n for word in dicts[path]:\n if is_special_token(word):\n print(\"SPECIAL - \", word)\n continue\n normalized_count = dicts[path][word] * normalization_factor\n\n # can force one vocabulary to always be present\n if force_appearance_vocab is not None:\n try:\n force_word_count = force_appearance_vocab[word]\n except KeyError:\n continue\n # if force_word_count < 5:\n # continue\n\n if word in self.master_vocab:\n self.master_vocab[word] += normalized_count\n else:\n self.master_vocab[word] = normalized_count\n\n print('Size of master_dict {}'.format(len(self.master_vocab)))\n print(\"Hashes for master dict: {}\".format(\n len([w for w in self.master_vocab if '#' in w[0]])))\n\n def save_vocab(self, path_count, path_vocab, word_limit=100000):\n \"\"\" Saves the master vocabulary into a file.\n \"\"\"\n\n # reserve space for 10 special tokens\n words = OrderedDict()\n for token in SPECIAL_TOKENS:\n # store -1 instead of np.inf, which can overflow\n words[token] = -1\n\n # sort words by frequency\n desc_order = OrderedDict(sorted(self.master_vocab.items(),\n key=lambda kv: kv[1], reverse=True))\n words.update(desc_order)\n\n # use encoding of up to 30 characters (no token conversions)\n # use float to store large numbers (we don't care about precision loss)\n np_vocab = np.array(words.items(),\n dtype=([('word', '|S30'), ('count', 'float')]))\n\n # output count for debugging\n counts = np_vocab[:word_limit]\n np.savez_compressed(path_count, counts=counts)\n\n # output the index of each word for easy lookup\n final_words = OrderedDict()\n for i, w in enumerate(words.keys()[:word_limit]):\n final_words.update({w: i})\n with open(path_vocab, 'w') as f:\n f.write(json.dumps(final_words, indent=4, separators=(',', ': ')))\n\n\ndef all_words_in_sentences(sentences):\n \"\"\" Extracts all unique words from a given list of sentences.\n\n # Arguments:\n sentences: List or word generator of sentences to be processed.\n\n # Returns:\n List of all unique words contained in the given sentences.\n \"\"\"\n vocab = []\n if isinstance(sentences, WordGenerator):\n sentences = [s for s, _ in sentences]\n\n for sentence in sentences:\n for word in sentence:\n if word not in vocab:\n vocab.append(word)\n\n return vocab\n\n\ndef extend_vocab_in_file(vocab, max_tokens=10000, vocab_path=VOCAB_PATH):\n \"\"\" Extends JSON-formatted vocabulary with words from vocab that are not\n present in the current vocabulary. Adds up to max_tokens words.\n Overwrites file in vocab_path.\n\n # Arguments:\n new_vocab: Vocabulary to be added. MUST have word_counts populated, i.e.\n must have run count_all_words() previously.\n max_tokens: Maximum number of words to be added.\n vocab_path: Path to the vocabulary json which is to be extended.\n \"\"\"\n try:\n with open(vocab_path, 'r') as f:\n current_vocab = json.load(f)\n except IOError:\n print('Vocabulary file not found, expected at ' + vocab_path)\n return\n\n extend_vocab(current_vocab, vocab, max_tokens)\n\n # Save back to file\n with open(vocab_path, 'w') as f:\n json.dump(current_vocab, f, sort_keys=True, indent=4, separators=(',', ': '))\n\n\ndef extend_vocab(current_vocab, new_vocab, max_tokens=10000):\n \"\"\" Extends current vocabulary with words from vocab that are not\n present in the current vocabulary. Adds up to max_tokens words.\n\n # Arguments:\n current_vocab: Current dictionary of tokens.\n new_vocab: Vocabulary to be added. MUST have word_counts populated, i.e.\n must have run count_all_words() previously.\n max_tokens: Maximum number of words to be added.\n\n # Returns:\n How many new tokens have been added.\n \"\"\"\n if max_tokens < 0:\n max_tokens = 10000\n\n words = OrderedDict()\n\n # sort words by frequency\n desc_order = OrderedDict(sorted(new_vocab.word_counts.items(),\n key=lambda kv: kv[1], reverse=True))\n words.update(desc_order)\n\n base_index = len(current_vocab.keys())\n added = 0\n for word in words:\n if added >= max_tokens:\n break\n if word not in current_vocab.keys():\n current_vocab[word] = base_index + added\n added += 1\n\n return added\n" ]
[ [ "numpy.load", "numpy.savez_compressed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RElbers/strotss-pytorch
[ "0c2550d713d9b17711fba3f7f9621d1d7768a5eb" ]
[ "imgs/make_table.py" ]
[ "from math import ceil, floor\nfrom pathlib import Path\n\nimport cv2\nimport numpy as np\n\nfrom util import load, save\n\n\n\ndef pad_square(img):\n h, w, c = img.shape\n size = max(h, w)\n pad_h = size - h\n pad_w = size - w\n\n img = np.pad(img, ((floor(pad_h / 2), ceil(pad_h / 2)),\n (floor(pad_w / 2), ceil(pad_w / 2)),\n (0, 0)),\n 'constant',\n constant_values=255)\n return img\n\n\ndef f(img):\n img = pad_square(img)\n\n _, w__, _ = img.shape\n w_ = w__ * r\n pad = w_ - w__\n pad = pad / 2\n\n img = np.pad(img, ((0, 0), (floor(pad), ceil(pad)), (0, 0)), 'constant', constant_values=255)\n\n img = cv2.resize(img, (w, h), interpolation=cv2.INTER_AREA)\n img = np.pad(img, ((8, 8), (8, 8), (0, 0)), 'constant', constant_values=255)\n return img\n\n\npath = Path(rf\"../replicate_style\")\n# path = Path(rf\"./replicate_content\")\n\nh = 440\nw = 658\nr = w / h\n\nstyles = [rf'./styles/style_{i:02}.png' for i in range(5)]\nstyles = [load(f) for f in styles]\nstyles = [f(img) for img in styles]\n\ncontents = [rf'./contents/content_{i:02}.png' for i in range(5)]\ncontents = [load(f) for f in contents]\ncontents = [f(img) for img in contents]\n\nrows = []\nrows.append(np.hstack([np.ones_like(styles[0]) *255, *styles]))\nfor j in range(5):\n row = [contents[j]]\n\n for i in range(5):\n file = path.joinpath(rf'output_{j:02}_{i:02}.png')\n img = load(file)[:h, :w]\n\n img = np.pad(img, ((8, 8), (8, 8), (0, 0)), 'constant', constant_values=255)\n\n row.append(img)\n\n row = np.hstack(row)\n rows.append(row)\n\nimg = np.vstack(rows)\n\nsave('tbl.png', img)\n" ]
[ [ "numpy.hstack", "numpy.ones_like", "numpy.pad", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
facebbook/jina
[ "e8079af3d58f1de0f51f8aef6cdf1eb3d87a9873" ]
[ "tests/integration/evaluation/rank/test_evaluation.py" ]
[ "import os\n\nimport numpy as np\n\nfrom jina import Document\nfrom jina.flow import Flow\n\nfrom tests import validate_callback\n\n\ndef test_evaluation(tmpdir, mocker):\n os.environ['JINA_TEST_RANKING_EVALUATION'] = str(tmpdir)\n\n def index_documents():\n \"\"\"Index Documents:\n doc: tag__id = 0\n tag__dummy_score = 0\n embedding = 0\n doc: tag__id = 1\n tag__dummy_score = -1\n embedding = 1\n doc: tag__id = 2\n tag__dummy_score = -2\n embedding = 2\n \"\"\"\n with Document() as doc0:\n doc0.tags['id'] = '0'\n doc0.tags['dummy_score'] = 0\n doc0.embedding = np.array([0])\n\n with Document() as doc1:\n doc1.tags['id'] = '1'\n doc1.tags['dummy_score'] = -1\n doc1.embedding = np.array([1])\n\n with Document() as doc2:\n doc2.tags['id'] = '2'\n doc2.tags['dummy_score'] = -2\n doc2.embedding = np.array([2])\n\n return [doc0, doc1, doc2]\n\n with Flow.load_config('flow-index.yml') as index_flow:\n index_flow.index(inputs=index_documents)\n\n def validate_evaluation_response(resp):\n assert len(resp.docs) == 2\n for doc in resp.docs:\n assert (\n len(doc.evaluations) == 8\n ) # 2 evaluation Pods with 4 evaluations each\n\n doc = resp.docs[0]\n assert len(doc.matches) == 2\n assert doc.evaluations[0].op_name == 'PrecisionEvaluator@1'\n assert doc.evaluations[0].value == 1.0\n assert doc.evaluations[1].op_name == 'PrecisionEvaluator@2'\n assert doc.evaluations[1].value == 0.5\n assert doc.evaluations[2].op_name == 'RecallEvaluator@1'\n assert doc.evaluations[2].value == 0.5\n assert doc.evaluations[3].op_name == 'RecallEvaluator@2'\n assert doc.evaluations[3].value == 0.5\n\n assert doc.evaluations[4].op_name == 'PrecisionEvaluator@1'\n assert doc.evaluations[4].value == 1.0\n assert doc.evaluations[5].op_name == 'PrecisionEvaluator@2'\n assert doc.evaluations[5].value == 0.5\n assert doc.evaluations[6].op_name == 'RecallEvaluator@1'\n assert doc.evaluations[6].value == 0.5\n assert doc.evaluations[7].op_name == 'RecallEvaluator@2'\n assert doc.evaluations[7].value == 0.5\n\n doc = resp.docs[1]\n assert doc.evaluations[0].op_name == 'PrecisionEvaluator@1'\n assert doc.evaluations[0].value == 1.0\n assert doc.evaluations[1].op_name == 'PrecisionEvaluator@2'\n assert doc.evaluations[1].value == 1.0\n assert doc.evaluations[2].op_name == 'RecallEvaluator@1'\n assert doc.evaluations[2].value == 0.5\n assert doc.evaluations[3].op_name == 'RecallEvaluator@2'\n assert doc.evaluations[3].value == 1.0\n\n assert doc.evaluations[4].op_name == 'PrecisionEvaluator@1'\n assert doc.evaluations[4].value == 1.0\n assert doc.evaluations[5].op_name == 'PrecisionEvaluator@2'\n assert doc.evaluations[5].value == 1.0\n assert doc.evaluations[6].op_name == 'RecallEvaluator@1'\n assert doc.evaluations[6].value == 0.5\n assert doc.evaluations[7].op_name == 'RecallEvaluator@2'\n assert doc.evaluations[7].value == 1.0\n\n def doc_groundtruth_evaluation_pairs():\n with Document() as doc0:\n doc0.embedding = np.array([0])\n\n with Document() as groundtruth0:\n m1 = Document(id='1' * 16)\n m1.score.value = -1\n match0 = groundtruth0.matches.append(m1)\n match0.tags['id'] = '0'\n m2 = Document(id='2' * 16)\n m2.score.value = -1\n match1 = groundtruth0.matches.append(m2)\n match1.tags['id'] = '2'\n # top_k is set to 2 for VectorSearchDriver\n # expects as matches [0, 2] but given [0, 1]\n # Precision@1 = 100%\n # Precision@2 = 50%\n # Recall@1 = 100%\n # Recall@2 = 50%\n\n # expects as ranked [0, 2] but given [0, 1]\n # Precision@1 = 100%\n # Precision@2 = 50%\n # Recall@1 = 100%\n # Recall@2 = 50%\n\n with Document() as doc1:\n doc1.embedding = np.array([2])\n\n with Document() as groundtruth1:\n m1 = Document(id='1' * 16)\n m1.score.value = -1\n match0 = groundtruth1.matches.append(m1)\n match0.tags['id'] = '1'\n m2 = Document(id='2' * 16)\n m2.score.value = -1\n match1 = groundtruth1.matches.append(m2)\n match1.tags['id'] = '2'\n # expects as matches [1, 2] but given [2, 1]\n # Precision@1 = 100%\n # Precision@2 = 100%\n # Recall@1 = 100%\n # Recall@2 = 100%\n\n # expects as ranked [1, 2] but given [2, 1]\n # Precision@1 = 100%\n # Precision@2 = 100%\n # Recall@1 = 100%\n # Recall@2 = 100%\n\n return [(doc0, groundtruth0), (doc1, groundtruth1)]\n\n response_mock = mocker.Mock()\n with Flow.load_config('flow-evaluate.yml') as evaluate_flow:\n evaluate_flow.search(\n inputs=doc_groundtruth_evaluation_pairs, on_done=response_mock, top_k=2\n )\n\n del os.environ['JINA_TEST_RANKING_EVALUATION']\n validate_callback(response_mock, validate_evaluation_response)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yousszr/Dji-TelloPy-PoseEstimation-FaceTracking
[ "40ba58c307ea081a6ddef470ca2557b48df33680", "40ba58c307ea081a6ddef470ca2557b48df33680" ]
[ "src/videos.py", "src/facetracking.py" ]
[ "import cv2;\nimport imutils\nimport numpy as np\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml');\nnet = cv2.dnn.readNetFromCaffe('deploy.prototxt.txt', 'res10_300x300_ssd_iter_140000.caffemodel')\nvideo = cv2.VideoCapture(0);\n\nwhile True:\n check, frame = video.read();\n \"\"\"convert frame to cv2 image and show\"\"\"\n frame = imutils.resize(frame, width=400)\n \n #CATTURO I FRAME E GLI CONVERTO IN BLOB\n (h, w) = frame.shape[:2] \n blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,\n (300, 300), (104.0, 177.0, 123.0))\n \n #passo i blob alla rete neurale per la identificazione delle faccie\n net.setInput(blob)\n detections = net.forward()\n \n \n #loop sulle faccie rilevate\n for i in range(0, detections.shape[2]):\n \n ##probabilità della predizione\n confidence = detections[0, 0, i, 2]\n\n # Filtra le faccie errate\n if confidence < 0.5:\n continue\n \n #calcolo delle coordinate del box\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n \n \n \n \n #disegno sul frame\n text = \"{:.2f}%\".format(confidence * 100)\n y = startY - 10 if startY - 10 > 10 else startY + 10\n cv2.rectangle(frame, (startX, startY), (endX, endY),\n (0, 0, 255), 2)\n \n \n cv2.putText(frame, text, (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)\n \n \n\n cv2.imshow('tello', frame)\n _ = cv2.waitKey(1) & 0xFF\n\nvideo.release();\ncv2.destroyAllWindows();\n", "import cv2\nimport numpy as np\nfrom djitellopy import tello\nimport time\n\n\ndef findFace(faceCascade, img):\n imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(imgGray, 1.2, 8)\n myFaceListC = []\n myFaceListArea = []\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)\n cx = x + w // 2\n cy = y + h // 2\n area = w * h\n cv2.circle(img, (cx, cy), 5, (0, 255, 0), cv2.FILLED)\n myFaceListC.append([cx, cy])\n myFaceListArea.append(area)\n if len(myFaceListArea) != 0:\n i = myFaceListArea.index(max(myFaceListArea))\n return img, [myFaceListC[i], myFaceListArea[i]]\n else:\n return img, [[0, 0], 0]\n\n\ndef trackFace(info, w, pid, pError):\n x, y = info[0]\n area = info[1]\n fb = 0\n error = x - w // 2\n speed = pid[0] * error + pid[1] * (error - pError)\n speed = int(np.clip(speed, -100, 100))\n if fbRange[0] < area < fbRange[1]:\n fb = 0\n elif area > fbRange[1]:\n fb = -10\n elif area < fbRange[0] and area != 0:\n fb = 10\n if x == 0:\n speed = 0\n error = 0\n drone.send_rc_control(0, fb, 0, speed)\n return error\n\n\ndrone = tello.Tello()\ndrone.connect()\nprint(drone.get_battery())\ndrone.streamon()\ndrone.takeoff()\n# drone.send_rc_control(0, 0, 25, 0)\ndrone.send_rc_control(0, 0, 20, 0)\ntime.sleep(2.2)\n\n\nw, h = 360, 240\nfbRange = [6200, 6800]\n# fbRange = [5200, 5800]\npid = [0.4, 0.4, 0]\npError = 0\nfaceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n\nwhile True:\n img = drone.get_frame_read().frame\n img = cv2.resize(img, (w, h))\n img, info = findFace(faceCascade, img)\n pError = trackFace(info, w, pid, pError)\n cv2.imshow(\"Output\", img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n drone.land()\n break\n\ndrone.streamoff()\ndrone.end()\n" ]
[ [ "numpy.array" ], [ "numpy.clip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
danielsuo/toy_flood
[ "471d3c4091d86d4a00fbf910937d4e60fdaf79a1", "471d3c4091d86d4a00fbf910937d4e60fdaf79a1" ]
[ "skgaip/tspdb/tspdb/tests/test_module_local.py", "skgaip/tspdb/tspdb/src/algorithms/pymf/sivm_search.py" ]
[ "\nimport numpy as np\nfrom tspdb.src.pindex.predict import get_prediction_range, get_prediction\nfrom tspdb.src.pindex.pindex_managment import TSPI, load_pindex\nfrom tspdb.src.pindex.pindex_utils import index_ts_mapper\nimport time\nimport timeit\nimport pandas as pd\nfrom tspdb.src.hdf_util import read_data\nfrom tspdb.src.tsUtils import randomlyHideValues\nfrom scipy.stats import norm\nfrom sklearn.metrics import r2_score\nimport tspdb\nimport psycopg2\n\ndef r2_var(y,y_h,X):\n average = np.mean(X**2) - np.mean(X)**2\n return 1 - sum((y-y_h)**2)/sum((y-average)**2)\n\ndef create_table_data():\n\n\tobs = np.arange(10**5).astype('float')\n\tmeans = obs\n\tvar = np.zeros(obs.shape)\n\tobs_9 = randomlyHideValues(np.array(obs), 0.9)[0]\n\tobs_7 = randomlyHideValues(np.array(obs), 0.7)[0]\n\tprint(obs_9)\n\tdf = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7,'var': var }) \n\tdf.to_csv('testdata/tables/ts_basic_5.csv',index_label = 'time')\n\n\ttimestamps = pd.date_range('2012-10-01 00:00:00', periods = 10**5, freq='5s')\n\tdf.index = timestamps\n\tdf.to_csv('testdata/tables/ts_basic_ts_5_5.csv', index_label = 'time')\n\t\n\n\t# real time series variance constant\n\tdata = read_data('testdata/MixtureTS2.h5')\n\tobs = data['obs'][:]\n\tmeans = data['means'][:]\n\tvar = np.ones(obs.shape)\n\tobs_9 = randomlyHideValues(np.array(obs), 0.9)[0]\n\tobs_7 = randomlyHideValues(np.array(obs), 0.7)[0]\n\tdf = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7 ,'var': var }) \n\tdf.index_label = 'time'\n\tdf.to_csv('testdata/tables/MixtureTS2.csv', index_label = 'time')\n\t\n\t# real time series variance constant\n\tdata = read_data('testdata/MixtureTS.h5')\n\tobs = data['obs'][:]\n\tmeans = data['means'][:]\n\tvar = np.ones(obs.shape)\n\tobs_9 = randomlyHideValues(np.array(obs), 0.9)[0]\n\tobs_7 = randomlyHideValues(np.array(obs), 0.7)[0]\n\tdf = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7,'var': var }) \n\tdf.to_csv('testdata/tables/MixtureTS.csv', index_label = 'time')\n\t\n\t# real time series varaince harmonics\n\tdata = read_data('testdata/MixtureTS_var.h5')\n\tobs = data['obs'][:]\n\tmeans = data['means'][:]\n\tvar = data['var'][:]\n\tobs_9 = randomlyHideValues(np.array(obs), 0.9)[0]\n\tobs_7 = randomlyHideValues(np.array(obs), 0.7)[0]\n\tdf = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7, 'var': var }) \n\tdf.to_csv('testdata/tables/MixtureTS_var.csv', index_label = 'time')\n\ndef create_tables(interface):\n\tdir_ = tspdb.__path__[0]+'/tests/'\t\n\tfor table in ['mixturets2','ts_basic_5','ts_basic_ts_5_5','mixturets_var']:\n\t\tdf = pd.read_csv(dir_+'testdata/tables/%s.csv'%table) \n\t\tif table == 'ts_basic_ts_5_5': df['time'] = df['time'].astype('datetime64[ns]')\n\t\tinterface.create_table(table, df, 'time', include_index = False)\n\t\n\ndef update_test(interface, init_points = 10**4 , update_points = [1000,100,5000,10000], T = 1000, direct_var = True ,index_name = 'ts_basic_test_pindex'):\n\tdf = pd.DataFrame(data ={'ts': np.arange(init_points).astype('float')}) \n\tinterface.create_table('ts_basic_test', df, 'row_id', index_label='row_id')\n\ttime_series_table = ['ts_basic_test','ts', 'row_id']\n\tT0 = 1000\n\tgamma = 0.5\n\tk = 2\n\tk_var = 1\n\tagg_interval = 1.\n\tconn = interface.engine.raw_connection()\n\tcur = conn.cursor()\n\tcur.execute('''SELECT create_pindex('%s','%s','%s','%s', \"T\" => %s, k => %s, k_var => %s, agg_interval => %s, var_direct => %s)'''%('ts_basic_test','row_id','ts', index_name, T, k,k_var, agg_interval, direct_var))\n\tcur.close()\n\tconn.commit()\n\tconn.close()\n\tfor points in update_points:\n\t\tdf = pd.DataFrame(data = {'ts':np.arange(init_points,points+init_points).astype('float')}, index = np.arange(init_points,points+init_points) ) \n\t\tinterface.bulk_insert('ts_basic_test', df, index_label='row_id')\n\t\tinit_points += points\n\t\tprint ('successfully updated %s points' %points)\n\t\t\ndef ts_table_tests(init_points = 10**4 , update_points = [1000,100,5000,10000], T = 1000, direct_var = True ,index_name = 'ts_basic_ts_pindex'):\n\tinterface = SqlImplementation(driver=\"postgresql\", host=\"localhost\", database=\"querytime_test\",user=\"aalomar\",password=\"AAmit32lids\")\n\t\n\tdf = pd.DataFrame(data ={'ts': np.arange(init_points).astype('float')}) \n\ttimestamps = pd.date_range('2012-10-01 00:00:00', periods = init_points+1, freq='5s')\n\tend = timestamps[-1]\n\tdf.index = timestamps[:-1]\n\tinterface.create_table('ts_basic_ts', df, 'timestamp', index_label='timestamp')\n\ttime_series_table = ['ts_basic_ts','ts', 'timestamp']\n\tT0 = 1000\n\tgamma = 0.5\n\tk = 2\n\tk_var = 1\n\tTSPD = TSPI(_dir = 'C:/Program Files/PostgreSQL/10/data/', agg_interval = 5, T = T,T_var = T, rank = k, rank_var = k_var, col_to_row_ratio = 10, index_name = index_name,gamma = gamma, interface= interface ,time_series_table = time_series_table, direct_var = direct_var )\n\tTSPD.create_index()\n\tinterface = SqlImplementation(driver=\"postgresql\", host=\"localhost\", database=\"querytime_test\",user=\"aalomar\",password=\"AAmit32lids\")\n\tfor points in update_points:\n\t\tdf = pd.DataFrame(data = {'ts':np.arange(init_points,points+init_points).astype('float')} ) \n\t\ttimestamps = pd.date_range(end, periods = points+1, freq='5s')\n\t\tend = timestamps[-1]\n\t\tdf.index = timestamps[:-1]\n\t\tinterface.bulk_insert('ts_basic_ts', df, index_label='timestamp')\n\t\tinit_points += points\n\t\tprint ('successfully updated %s points' %points)\n\n\t\ndef create_pindex_test(interface,table_name, T,T_var, k ,k_var, direct_var,value_column= ['ts'], index_name = None , agg_interval = 1., col_to_row_ratio= 10, time_column = 'row_id'):\n\n\tT0 = 1000\n\tgamma = 0.5\n\tif index_name is None: index_name = 'pindex'\n\tvalue_column = ','.join(value_column)\n\tinterface.engine.execute('''SELECT create_pindex('%s','%s','{%s}','%s', \"T\" => %s,\"t_var\" =>%s, k => %s, k_var => %s, agg_interval => %s, var_direct => %s, col_to_row_ratio => %s)'''%(table_name,time_column, value_column, index_name, T, T_var, k,k_var, agg_interval, direct_var, col_to_row_ratio))\n\ndef create_pindex_test2(interface,table_name, T,T_var, k ,k_var, direct_var,value_column= ['ts'], index_name = None , agg_interval = 1., col_to_row_ratio= 10, time_column = 'row_id'):\n\tdatabase = 'querytime_test'\n\tuser = 'postgres'\n\tpassword = '0505'\n\thost = 'localhost'\n\tconn_string = \"host='%s' dbname='%s' user='%s' password='%s'\" %(host, database, user, password) \n\tvalue_column = ','.join(value_column)\n\tconn = psycopg2.connect(conn_string)\n\tcursor = conn.cursor()\n\tcursor.execute(\"\"\"SELECT create_pindex('%s','%s','{%s}','%s', \"T\" => %s,\"t_var\" =>%s, k => %s, k_var => %s, agg_interval => %s, var_direct => %s, col_to_row_ratio => %s)\"\"\"%(table_name,time_column, value_column, index_name, T, T_var, k,k_var, agg_interval, direct_var, col_to_row_ratio))\n\tconn.commit()\n\tconn.close()\t\n\n\ndef range_prediction_queries_test(index_name, table_name, max_):\n\t\n\tT1 = [0,0,max_-10, max_-15000, max_] + list((max_+1000) * np.random.random(10))\n\tT2 = [10, 10**5, max_-1, max_, max_ +10] + list((max_+1000) * np.random.random(10))\n\tT1 = np.array(T1).astype(int)\n\tT2 = np.array(T2).astype(int)\n\tfor t1_,t2_ in zip(T1,T2):\n\t\tt1,t2 = sorted([t1_,t2_])\n\t\t# print (')\n\t\t# try:\n\t\tget_prediction_range(index_name, table_name, 'ts', 'row_id', interface, int(t1),int(t2), uq = True)\n\t\t# except: print('failure to query range between %s and %s' % (t1,t2))\n\ndef prediction_queries_test(index_name, table_name, max_):\n\t\n\tT1 = [0,max_-10, max_-1000, max_+1, max_+10] + list((max_+1000) * np.random.random(50))\n\tT1 = np.array(T1).astype(int)\n\n\tfor t1 in T1:\n\t\t# try: \n\t\t\tget_prediction(index_name, table_name, 'ts', 'row_id', interface, int(t1), uq = True)\n\t\t# except: print('failure to query point %s' %t1)\n\ndef prediction_queries_accuracy_test(max_, index_name = \"tspdb.ts_basic2_pindex2\", table_name = \"ts_basic2\"):\n\tT1 = [100000,max_-1000, max_] + list((max_-1) * np.random.random(100))\n\tT1 = np.array(T1).astype(int)\n\t\n\tfor t1 in T1:\n\t\tprint ('t = '+str(t1))\n\t\tA,_ = get_prediction(index_name, table_name, 'ts', 'row_id', interface, int(t1))\n\t\tprint (t1,A )\n\t\tassert abs(A - t1) < 1e-3\n\t\n\ndef range_prediction_queries_accuracy_test( index_name, file_name, table_name , value_column, max_ ):\n\tmax_ = interface.engine.execute('Select \"last_TS_inc\" from tspdb.'+index_name+'_meta').fetchone()[0]\n\tT1 = [0, max_]\n\tT2 = [max_, 10**5-1]\n\tT1 = np.array(T1).astype(int)\n\tT2 = np.array(T2).astype(int)\n\tdf = pd.read_csv('testdata/tables/%s.csv'%file_name) \n\tmeans = df['means']\n\tvar = df['var']\n\talpha = norm.ppf(1./2 + 95./200)\n\n\tfor t1_,t2_ in zip(T1,T2):\n\t\tt1,t2 = sorted([t1_,t2_])\n\t\tM = np.array(interface.engine.execute(\"select * from predict_range(%s, %s, %s,%s, %s, uq => True)\", (table_name, value_column,int(t1),int(t2),index_name)).fetchall()).astype(np.float)\n\t\tA = M[:,0]\n\t\test_var = (abs(M[:,0] - M[:,1])/alpha)**2\n\t\tprint (t1,t2,' rmse: ',np.sqrt(np.mean(np.square(A - means[t1:t2+1]))), np.sqrt(np.mean(np.square(est_var - var[t1:t2+1]))))\n\t\tprint (t1,t2,'r2: ',r2_score(means[t1:t2+1],A ), r2_var(var[t1:t2+1],est_var, df['ts'][t1:t2+1]))\n\t\t# print('first ten (predicted, actual) points for var: ', [(i,j) for i,j in zip(var[t1: t1+10],est_var[:10])])\n\t\t#assert abs(np.max(A - np.arange(t1,t2+1))) < 1e-3\n\ndef metrics_test(interface):\n\tcreate_pindex_test(interface,'mixturets2', 100000,100000, 3, 1, True, index_name = 'test_pindex', time_column = 'time',agg_interval = 1. )\n\tratio = {1:10, 2:20, 4:16, 3:18, 5:20,6:24,8:16,10:20,12:24}\n\tts_time = []\n\tdb_time = []\n\tinsert_time_2 ,insert_time = [], []\n\tpredict_time, select_time,predict_time_var, forecast_time_var, forecast_time, forecast_range_time_var, forecast_range_time, predict_range_time_var, predict_range_time, select_time_range = [],[],[],[],[],[],[],[],[],[]\n\tfor ii, ts in enumerate([1]):\n\t\tprint(ts)\n\t\tdf = pd.DataFrame()\n\t\tfor i in range(ts):\n\t\t\tcol = 'ts%s'%i\n\t\t\tdf[col]= np.arange(10**6) + np.random.normal(0,1,10**6)\n\t\t\n\t\t# Throughput test\n\t\tinterface.create_table('ts_basic', df, 'time', include_index = True, index_label = 'time', load_data=False)\n\t\tdf.to_csv('test.csv', sep='\\t', header=False, index=True, index_label='time')\n\t\tconn = interface.engine.raw_connection()\n\t\tcur = conn.cursor()\n\t\tt = time.time()\n\t\tcur.copy_from(open('test.csv','rb'), 'ts_basic', null=\"\")\n\t\tconn.commit()\n\t\tconn.close()\n\t\tdb_time.append(time.time() - t)\n\t\t\n\t\tinterface.create_table('ts_basic_%s'%ts, df, 'time', include_index = True, index_label = 'time', load_data=False)\n\t\tinterface.bulk_insert('ts_basic_%s'%ts, df, include_index=True, index_label='time')\n\t\tcolumns = ['ts%s'%i for i in range(ts)]\n\t\tt = time.time()\n\t\tcreate_pindex_test2(interface,'ts_basic_%s'%ts, 2500000,2500000, 3, 1, True, index_name = 'test_pindex',value_column= columns, time_column = 'time',agg_interval = 1., col_to_row_ratio = ratio[ts] )\n\t\tts_time.append(time.time() - t)\n\t\t\n\t\t#update test\n\t\tbatch = 100\n\t\tno_batches = 1000\n\t\tinsert_time_2.append(0)\n\t\tinsert_time.append(0)\n\t\tfor i in range(no_batches) :\n\t\t\t\n\t\t\tdf = pd.DataFrame()\n\t\t\tdf['time'] = np.arange(10**6 + i *batch, 10**6 + (i+1)*batch) \n\t\t\tfor n in range(ts):\n\t\t\t\tcol = 'ts%s'%n\n\t\t\t\tdf[col]= np.arange(10**6 + i *batch, 10**6 + (i+1)*batch) + np.random.normal(0,1,batch)\n\t\t\tcols = ['time']+['ts%s'%n for n in range(ts)]\n\t\t\tsql1 = \"INSERT INTO ts_basic\"\n\t\t\tsql = \"(\"+','.join(cols)+\") VALUES \"\n\t\t\tsql2 = \"INSERT INTO ts_basic_%s\"%ts\n\t\t\tfor row in df.values.astype(str):\n\t\t\t\tvalues = '('+','.join(row)+'),'\n\t\t\t\tsql = sql + values\n\t\t\t\n\t\t\tsql = sql[:-1]\n\t\t\t\n\t\t\t# df.to_csv('test.csv', sep='\\t', header=False, index=True, index_label='time')\n\t\t\t\n\t\t\tconn = interface.engine.raw_connection()\n\t\t\t\n\t\t\tt = time.time()\n\t\t\tcur = conn.cursor()\n\t\t\t# cur.copy_from(open('test.csv','rb'), 'ts_basic', null=\"\")\n\t\t\tcur.execute(sql1+sql)\n\t\t\tconn.commit()\n\t\t\tinsert_time[ii]+= (time.time() - t)\n\t\t\t\n\t\t\tt = time.time()\n\t\t\tcur = conn.cursor()\n\t\t\t# cur.copy_from(open('test.csv','rb'), 'ts_basic_%s'%ts, null=\"\")\n\t\t\tcur.execute(sql2+sql)\n\t\t\tconn.commit()\n\t\t\tinsert_time_2[ii] += (time.time() - t)\n\n\t\t\tconn.close()\n\n\n\t\tt_f = interface.engine.execute('select \"last_TS_inc\"/%s from tspdb.test_pindex_meta;'%ts).fetchone()[0]\n\t\t# prediction queries\n\t\tN = 100\n\t\tT = (10**6*np.random.random(N)).astype(int)\n\t\t\n\t\tinterface.engine.execute(\"select * from predict('ts_basic_%s', 'ts0', 0, 'test_pindex', uq => false)\"%ts)\n\t\t\n\n\t\ttt = []\n\t\tfor t in T: \n\t\t\tt1 = time.time()\n\t\t\tinterface.engine.execute(\"select * from predict('ts_basic_%s', 'ts0', %s, 'test_pindex', uq => false)\"%(ts,t)) \n\t\t\ttt.append((time.time() - t1))\n\t\tpredict_time.append(np.median(tt))\n\n\t\t\n\t\ttt = []\n\t\tfor t in T: \n\t\t\tt1 = time.time()\n\t\t\tinterface.engine.execute(\"select * from predict('ts_basic_%s', 'ts0', %s, 'test_pindex', uq => true)\"%(ts,t)) \n\t\t\ttt.append(time.time() - t)\n\t\tpredict_time_var.append(np.median(tt))\n\t\t\n\t\tM = 100\n\t\t\n\t\ttt = []\n\t\tfor t in range(M): \n\t\t\tt1 = time.time()\n\t\t\tinterface.engine.execute(\"select * from predict('ts_basic_%s', 'ts0', %s, 'test_pindex', uq => true)\"%(ts,t_f)) \n\t\t\ttt.append((time.time() - t1))\n\t\tforecast_time_var.append(np.median(tt))\n\n\t\ttt = []\n\t\tfor t in range(M): \n\t\t\tt1 = time.time()\t\n\t\t\tinterface.engine.execute(\"select * from predict('ts_basic_%s', 'ts0', %s, 'test_pindex', uq => false)\"%(ts,t_f)) \n\t\t\ttt.append((time.time() - t1))\n\t\tforecast_time.append(np.median(tt))\n\n\t\t\n\t\ttt = []\n\t\tfor t in T: \n\t\t\tt1 = time.time()\n\t\t\ta = interface.engine.execute(\"select ts0 from ts_basic_%s where time = %s\"%(ts,t,))\n\t\t\ttt.append((time.time() - t1))\n\t\tselect_time.append(np.median(tt))\n\n\n\t\tT = ((10**6-1000)*np.random.random(N)).astype(int)\n\t\t\n\t\ttt = []\n\t\tfor t in T: \n\t\t\tt1 = time.time()\n\t\t\tinterface.engine.execute(\"select ts0 from ts_basic_%s where time >= %s and time <= %s\"%(ts, t,t+1000,))\n\t\t\ttt.append(time.time() - t1)\n\t\tselect_time_range.append(np.median(tt))\n\n\t\ttt = []\n\t\tfor t in T: \n\t\t\tt1 = time.time()\n\t\t\tinterface.engine.execute(\"select * from predict_range('ts_basic_%s', 'ts0', %s,%s, 'test_pindex', uq => false)\"%(ts,t, t+1000,)) \n\t\t\ttt.append(time.time() - t1)\n\t\tpredict_range_time.append(np.median(tt))\n\t\t\n\t\ttt = []\n\t\tfor t in T: \n\t\t\tt1 = time.time()\n\t\t\tinterface.engine.execute(\"select * from predict_range('ts_basic_%s', 'ts0', %s,%s, 'test_pindex', uq => true)\"%(ts,t, t+1000,)) \n\t\t\ttt.append(time.time() - t1)\n\t\tpredict_range_time_var.append(np.median(tt))\n\t\t\n\t\ttt = []\n\t\tfor t in T: \n\t\t\tt1 = time.time()\n\t\t\tinterface.engine.execute(\"select * from predict_range('ts_basic_%s', 'ts0', %s,%s, 'test_pindex', uq => false)\"%(ts,t_f, t_f+1000)) \n\t\t\ttt.append(time.time() - t1)\n\t\tforecast_range_time.append(np.median(tt))\n\t\t\n\t\ttt = []\n\t\tfor t in T: \n\t\t\tt1 = time.time()\n\t\t\tinterface.engine.execute(\"select * from predict_range('ts_basic_%s', 'ts0',%s,%s, 'test_pindex', uq => true)\"%(ts,t_f, t_f+1000))\n\t\t\ttt.append(time.time() - t1)\n\t\tforecast_range_time_var.append(np.median(tt))\n\t\t\n\tdf = pd.DataFrame()\n\tdf['db_time'] = db_time\n\tdf['ts_time'] = ts_time\n\tdf['select_time'] = select_time\n\tdf['predict_time'] = predict_time\n\tdf['forecast_time'] = forecast_time\n\tdf['predict_time_var'] = predict_time_var\n\tdf['forecast_time_var'] = forecast_time_var\n\tdf['select_time_range'] = select_time_range\n\tdf['predict_range_time'] = predict_range_time\n\tdf['forecast_range_time'] = forecast_range_time\n\tdf['predict_range_time_var'] = predict_range_time_var\n\tdf['forecast_range_time_var'] = forecast_range_time_var\n\tdf['insert_time'] = insert_time\n\tdf['insert_time_2'] = insert_time_2\n\n\treturn df\n\n\ndef prediction_queries_latency_test():\n\tsetup = '''import numpy as np\nfrom tspdb.src.database_module.sql_imp import SqlImplementation\nfrom tspdb.src.pindex.predict import get_prediction_range, get_prediction\ninterface = SqlImplementation(driver=\"postgresql\", host=\"localhost\", database=\"querytime_test\",user=\"aalomar\",password=\"AAmit32lids\")\n\t'''\n\tstmt1 = '''interface.engine.execute(\"select * from predict('ts_basic_5', 'ts', 10, 'ts_basic5_pindex', uq => false)\")'''\n\tstmt2 = '''interface.engine.execute(\"select * from predict('ts_basic_5', 'ts', 10, 'ts_basic5_pindex', uq => true)\")'''\n\tstmt3 = '''interface.engine.execute(\"select * from predict_range('ts_basic_5', 'ts', 10,110, 'ts_basic5_pindex', uq => False)\")'''\n\t\n\t\n\tstmtA = '''interface.engine.execute(\"select ts from ts_basic_5 where time = 10\") '''\n\tstmtB = '''interface.engine.execute(\"select ts from ts_basic_5 where time >= 10 and time <= 110 \") '''\n\t\n\tprint ('(test1 pindex: point query )\t \timp query latency is %s that of SELECT ' %(timeit.timeit(setup = setup,stmt= stmt1, number =10000)/timeit.timeit(setup = setup,stmt= stmtA, number =10000)))\n\tprint ('(test2 pindex: point query with uq ) \timp query latency is %s that of SELECT ' %(timeit.timeit(setup = setup,stmt= stmt2, number =10000)/timeit.timeit(setup = setup,stmt= stmtA, number =10000)))\n\tprint ('(test3 pindex: range query 100 points )\t\timp query latency is %s that of SELECT ' %(timeit.timeit(setup = setup,stmt= stmt3, number =10000)/timeit.timeit(setup = setup,stmt= stmtB, number =10000)))\n\t\t\n\tstmt1 = '''interface.engine.execute(\"select * from predict('ts_basic_5', 'ts', 99995, 'ts_basic5_pindex', uq => false)\")'''\n\tstmt2 = '''interface.engine.execute(\"select * from predict('ts_basic_5', 'ts', 99995, 'ts_basic5_pindex', uq => true)\")'''\n\tstmt3 = '''interface.engine.execute(\"select * from predict_range('ts_basic_5', 'ts', 99995,99995+100, 'ts_basic5_pindex', uq => False)\")'''\n\t\n\tprint ('(test1 pindex: point query)\t \tForecast query latency is %s that of SELECT ' %(timeit.timeit(setup = setup,stmt= stmt1, number =1000)/timeit.timeit(setup = setup,stmt= stmtB, number =1000)))\n\tprint ('(test2 pindex: point query with uq) \tForecast query latency is %s that of SELECT ' %(timeit.timeit(setup = setup,stmt= stmt2, number =1000)/timeit.timeit(setup = setup,stmt= stmtA, number =1000)))\n\tprint ('(test3 pindex: range query 100 points)\t\tForecast query latency is %s that of SELECT ' %(timeit.timeit(setup = setup,stmt= stmt3, number =1000)/timeit.timeit(setup = setup,stmt= stmtB, number =1000)))\n\n", "# Authors: Christian Thurau\r\n# License: BSD 3 Clause\r\n\"\"\" \r\nPyMF Simplex Volume Maximization [1]\r\n\r\n SIVM_SEARCH: class for search-SiVM\r\n\r\n[1] C. Thurau, K. Kersting, and C. Bauckhage. Yes We Can - Simplex Volume \r\nMaximization for Descriptive Web-Scale Matrix Factorization. In Proc. Int. \r\nConf. on Information and Knowledge Management. ACM. 2010.\r\n\"\"\"\r\nimport scipy.sparse\r\nimport numpy as np\r\n\r\nfrom dist import pdist\r\nfrom vol import *\r\nfrom sivm import SIVM\r\n\r\n__all__ = [\"SIVM_SEARCH\"]\r\n\r\nclass SIVM_SEARCH(SIVM):\r\n \"\"\" \r\n SIVM_SEARCH(data, num_bases=4, dist_measure='l2')\r\n \r\n \r\n Simplex Volume Maximization. Factorize a data matrix into two matrices s.t.\r\n F = | data - W*H | is minimal. H is restricted to convexity. W is iteratively\r\n found by maximizing the volume of the resulting simplex (see [1]). A solution\r\n is found by employing a simple A-star like search strategy.\r\n \r\n Parameters\r\n ----------\r\n data : array_like, shape (_data_dimension, _num_samples)\r\n the input data\r\n num_bases: int, optional\r\n Number of bases to compute (column rank of W and row rank of H).\r\n 4 (default) \r\n dist_measure : one of 'l2' ,'cosine', 'l1', 'kl'\r\n Standard is 'l2' which maximizes the volume of the simplex. In contrast,\r\n 'cosine' maximizes the volume of a cone (see [1] for details).\r\n init : string (default: 'fastmap')\r\n 'fastmap' or 'origin'. Sets the method used for finding the very first \r\n basis vector. 'Origin' assumes the zero vector, 'Fastmap' picks one of \r\n the two vectors that have the largest pairwise distance.\r\n \r\n Attributes\r\n ----------\r\n W : \"data_dimension x num_bases\" matrix of basis vectors\r\n H : \"num bases x num_samples\" matrix of coefficients\r\n ferr : frobenius norm (after calling .factorize()) \r\n \r\n Example\r\n -------\r\n Applying SIVM to some rather stupid data set:\r\n \r\n >>> import numpy as np\r\n >>> data = np.array([[1.0, 0.0, 2.0], [0.0, 1.0, 1.0]])\r\n >>> sivm_mdl = SIVM_SEARCH(data, num_bases=2)\r\n >>> sivm_mdl.factorize()\r\n \r\n The basis vectors are now stored in sivm_mdl.W, the coefficients in sivm_mdl.H. \r\n To compute coefficients for an existing set of basis vectors simply copy W \r\n to sivm_mdl.W, and set compute_w to False:\r\n \r\n >>> data = np.array([[1.5, 1.3], [1.2, 0.3]])\r\n >>> W = np.array([[1.0, 0.0], [0.0, 1.0]])\r\n >>> sivm_mdl = SIVM_SEARCH(data, num_bases=2)\r\n >>> sivm_mdl.W = W\r\n >>> sivm_mdl.factorize(compute_w=False)\r\n \r\n The result is a set of coefficients sivm_mdl.H, s.t. data = W * sivm_mdl.H.\r\n \"\"\"\r\n \r\n def _update_w(self):\r\n \r\n def h(sel, D, k):\r\n # compute the volume for a selection of sel columns\r\n # and a k-1 simplex (-> k columns have to be selected)\r\n mv = np.max(D)\r\n \r\n # fill the remaining distance by the maximal overall found distance\r\n d = np.zeros((k,k)) + mv\r\n for i in range(k):\r\n d[i,i] = 0.0\r\n \r\n for idx_i,i in enumerate(sel):\r\n for idx_j,j in enumerate(sel):\r\n d[idx_i,idx_j] = D[i, j]\r\n \r\n return d\r\n \r\n # compute distance matrix -> required for the volume\r\n D = pdist(self.data, self.data)\r\n Openset = {} \r\n \r\n for i in range(self._num_samples):\r\n # compute volume for temp selection\r\n d = h([i],D,self._num_bases) \r\n Openset[tuple([i])] = cmdet(d)\r\n \r\n Closedset = {}\r\n finished = False\r\n self._v = []\r\n self.init_sivm()\r\n next_sel = np.array([self.select[0]])\r\n niter = 0\r\n\r\n while not finished:\r\n # add the current selection to closedset\r\n Closedset[(tuple(next_sel))] = []\r\n\r\n for i in range(D.shape[0]): \r\n # create a temp selection\r\n tmp_sel = np.array(next_sel).flatten()\r\n tmp_sel = np.concatenate((tmp_sel, [i]),axis=0)\r\n tmp_sel = np.unique(tmp_sel)\r\n tmp_sel = list(tmp_sel)\r\n hkey = tuple(tmp_sel)\r\n\r\n if len(tmp_sel) > len(next_sel) and (\r\n not Closedset.has_key(hkey)) and (\r\n not Openset.has_key(hkey)):\r\n \r\n # compute volume for temp selection\r\n d = h(tmp_sel, D, self._num_bases)\r\n \r\n # add to openset\r\n Openset[hkey] = cmdet(d)\r\n\r\n # get next best tuple\r\n vmax = 0.0\r\n for (k,v) in Openset.iteritems():\r\n if v > vmax:\r\n next_sel = k\r\n vmax = v\r\n\r\n self._logger.info('Iter:' + str(niter))\r\n self._logger.info('Current selection:' + str(next_sel))\r\n self._logger.info('Current volume:' + str(vmax))\r\n self._v.append(vmax)\r\n\r\n # remove next_sel from openset\r\n Openset.pop(next_sel)\r\n\r\n if len(list(next_sel)) == self._num_bases:\r\n finished = True\r\n niter += 1\r\n\r\n # update some values ...\r\n self.select = list(next_sel)\r\n self.W = self.data[:, self.select] \r\n\r\ndef _test():\r\n import doctest\r\n doctest.testmod()\r\n \r\nif __name__ == \"__main__\":\r\n _test()\r\n" ]
[ [ "numpy.square", "scipy.stats.norm.ppf", "pandas.read_csv", "sklearn.metrics.r2_score", "numpy.random.random", "numpy.arange", "numpy.median", "pandas.DataFrame", "numpy.ones", "numpy.random.normal", "numpy.mean", "pandas.date_range", "numpy.array", "numpy.zeros" ], [ "numpy.unique", "numpy.concatenate", "numpy.max", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
readthedocs-assistant/pangeo-pyinterp
[ "e9dc18445dce36638d5a90f64c8e2f1b53164f90", "e9dc18445dce36638d5a90f64c8e2f1b53164f90" ]
[ "src/pyinterp/tests/core/test_trivariate.py", "src/pyinterp/grid.py" ]
[ "# Copyright (c) 2022 CNES\n#\n# All rights reserved. Use of this source code is governed by a\n# BSD-style license that can be found in the LICENSE file.\nimport os\nimport pickle\nimport pytest\nimport netCDF4\ntry:\n import matplotlib.pyplot\n import matplotlib.colors\n HAVE_PLT = True\nexcept ImportError:\n HAVE_PLT = False\nimport numpy as np\nfrom ... import core\nfrom .. import grid3d_path, make_or_compare_reference\n\n\ndef plot(x, y, z, filename):\n figure = matplotlib.pyplot.figure(figsize=(15, 15), dpi=150)\n value = z.mean()\n std = z.std()\n normalize = matplotlib.colors.Normalize(vmin=value - 3 * std,\n vmax=value + 3 * std)\n axe = figure.add_subplot(2, 1, 1)\n axe.pcolormesh(x, y, z, cmap='jet', norm=normalize, shading='auto')\n figure.savefig(os.path.join(os.path.dirname(os.path.abspath(__file__)),\n filename),\n bbox_inches='tight',\n pad_inches=0.4)\n\n\ndef load_data(temporal_axis=False):\n with netCDF4.Dataset(grid3d_path()) as ds: # type: ignore\n z = np.flip(ds.variables['tcw'][:].T, axis=1)\n z[z.mask] = float(\"nan\")\n if temporal_axis:\n z_axis = core.TemporalAxis(\n netCDF4.num2date( # type: ignore\n ds.variables['time'][:],\n ds.variables['time'].units,\n only_use_cftime_datetimes=False,\n only_use_python_datetimes=True).astype(\"datetime64[h]\"))\n class_ = (core.TemporalGrid3DFloat64\n if temporal_axis else core.Grid3DFloat64)\n\n return core.TemporalGrid3DFloat64(\n core.Axis(ds.variables['longitude'][:], is_circle=True),\n core.Axis(np.flip(ds.variables['latitude'][:])), z_axis,\n z.data)\n return core.Grid3DFloat64(\n core.Axis(ds.variables['longitude'][:], is_circle=True),\n core.Axis(np.flip(ds.variables['latitude'][:])),\n core.Axis(ds.variables['time'][:]), z.data)\n\n\ndef test_grid3d_accessors():\n \"\"\"Test construction and accessors of the object\"\"\"\n grid = load_data()\n assert isinstance(grid.x, core.Axis)\n assert isinstance(grid.y, core.Axis)\n assert isinstance(grid.z, core.Axis)\n assert isinstance(grid.array, np.ndarray)\n\n\ndef test_grid3d_pickle():\n \"\"\"Serialization test\"\"\"\n grid = load_data()\n other = pickle.loads(pickle.dumps(grid))\n assert grid.x == other.x\n assert grid.y == other.y\n assert grid.z == other.z\n assert np.all(\n np.ma.fix_invalid(grid.array) == np.ma.fix_invalid(other.array))\n\n\ndef run_interpolator(interpolator, filename, visualize, dump):\n \"\"\"Testing an interpolation method.\"\"\"\n grid = load_data()\n lon = np.arange(-180, 180, 1 / 3.0) + 1 / 3.0\n lat = np.arange(-90, 90, 1 / 3.0) + 1 / 3.0\n time = 898500 + 3\n x, y, t = np.meshgrid(lon, lat, time, indexing=\"ij\")\n z0 = core.trivariate_float64(grid,\n x.ravel(),\n y.ravel(),\n t.ravel(),\n interpolator,\n num_threads=0)\n z1 = core.trivariate_float64(grid,\n x.ravel(),\n y.ravel(),\n t.ravel(),\n interpolator,\n num_threads=1)\n make_or_compare_reference(filename + \".npy\", z1, dump)\n shape = (len(lon), len(lat))\n z0 = np.ma.fix_invalid(z0)\n z1 = np.ma.fix_invalid(z1)\n assert np.all(z1 == z0)\n if HAVE_PLT and visualize:\n plot(x.reshape(shape), y.reshape(shape), z0.reshape(shape), filename)\n return z0\n\n\ndef test_trivariate_spline(pytestconfig):\n \"\"\"Testing of the spline interpolation\"\"\"\n grid = load_data()\n lon = np.arange(-180, 180, 1 / 3.0) + 1 / 3.0\n lat = np.arange(-80, 80, 1 / 3.0) + 1 / 3.0\n time = 898524 + 3\n x, y, t = np.meshgrid(lon, lat, time, indexing='ij')\n z0 = core.spline_float64(grid,\n x.ravel(),\n y.ravel(),\n t.ravel(),\n fitting_model=\"akima\",\n bounds_error=True,\n num_threads=0)\n z1 = core.spline_float64(grid,\n x.ravel(),\n y.ravel(),\n t.ravel(),\n fitting_model=\"akima\",\n bounds_error=True,\n num_threads=1)\n make_or_compare_reference(\"test_trivariate_spline.npy\", z1,\n pytestconfig.getoption(\"dump\"))\n shape = (len(lon), len(lat))\n z0 = np.ma.fix_invalid(z0)\n z1 = np.ma.fix_invalid(z1)\n assert np.all(z1 == z0)\n if HAVE_PLT and pytestconfig.getoption(\"visualize\"):\n plot(x.reshape(shape), y.reshape(shape), z0.reshape(shape),\n \"tcw_spline.png\")\n\n\ndef test_grid3d_bounds_error():\n \"\"\"Test of the detection on interpolation outside bounds\"\"\"\n grid = load_data()\n interpolator = core.Bilinear3D()\n lon = np.arange(-180, 180, 1 / 3.0) + 1 / 3.0\n lat = np.arange(-90, 90 + 1, 1 / 3.0) + 1 / 3.0\n time = 898500 + 3\n x, y, t = np.meshgrid(lon, lat, time, indexing=\"ij\")\n core.trivariate_float64(\n grid, # type: ignore\n x.ravel(),\n y.ravel(),\n t.ravel(),\n interpolator,\n num_threads=0)\n with pytest.raises(ValueError):\n core.trivariate_float64(\n grid, # type: ignore\n x.ravel(),\n y.ravel(),\n t.ravel(),\n interpolator,\n bounds_error=True,\n num_threads=0)\n\n\ndef test_grid3d_z_method(pytestconfig):\n \"\"\"Test of the interpolation method used on Z-axis\"\"\"\n dump = pytestconfig.getoption(\"dump\")\n grid = load_data(temporal_axis=True)\n interpolator = core.TemporalBilinear3D()\n lon = np.arange(-180, 180, 1 / 3.0) + 1 / 3.0\n lat = np.arange(-90, 90 + 1, 1 / 3.0) + 1 / 3.0\n time = np.array([\n netCDF4.num2date( # type: ignore\n 898500 + 3,\n \"hours since 1900-01-01 00:00:0.0\",\n only_use_cftime_datetimes=False,\n only_use_python_datetimes=True)\n ]).astype(\"datetime64[h]\").astype(\"int64\")\n x, y, t = np.meshgrid(lon, lat, time, indexing=\"ij\")\n z0 = core.trivariate_float64(\n grid, # type: ignore\n x.ravel(),\n y.ravel(),\n t.ravel(),\n interpolator,\n num_threads=0)\n z1 = core.trivariate_float64(\n grid, # type: ignore\n x.ravel(),\n y.ravel(),\n t.ravel(),\n interpolator,\n z_method=\"linear\",\n num_threads=0)\n make_or_compare_reference(\"test_grid3d_z_method_linear.npy\", z1, dump)\n z0 = np.ma.fix_invalid(z0)\n z1 = np.ma.fix_invalid(z1)\n assert np.all(z0 == z1)\n z1 = core.trivariate_float64(\n grid, # type: ignore\n x.ravel(),\n y.ravel(),\n t.ravel(),\n interpolator,\n z_method=\"nearest\",\n num_threads=0)\n make_or_compare_reference(\"test_grid3d_z_method_nearest.npy\", z1, dump)\n z1 = np.ma.fix_invalid(z1)\n assert np.all(z0 != z1)\n with pytest.raises(ValueError):\n core.trivariate_float64(\n grid, # type: ignore\n x.ravel(),\n y.ravel(),\n t.ravel(),\n interpolator,\n z_method=\"NEAREST\",\n num_threads=0)\n\n\ndef test_grid3d_interpolator(pytestconfig):\n \"\"\"Testing of different interpolation methods\"\"\"\n visualize = pytestconfig.getoption(\"visualize\")\n dump = pytestconfig.getoption(\"dump\")\n a = run_interpolator(core.Nearest3D(), \"tcw_trivariate_nearest\", visualize,\n dump)\n b = run_interpolator(core.Bilinear3D(), \"tcw_trivariate_bilinear\",\n visualize, dump)\n c = run_interpolator(core.InverseDistanceWeighting3D(),\n \"tcw_trivariate_idw\", visualize, dump)\n assert (a - b).std() != 0\n assert (a - c).std() != 0\n assert (b - c).std() != 0\n", "# Copyright (c) 2022 CNES\n#\n# All rights reserved. Use of this source code is governed by a\n# BSD-style license that can be found in the LICENSE file.\n\"\"\"\nRegular grids\n=============\n\"\"\"\nfrom typing import Optional, Union\nimport numpy as np\nfrom . import core\nfrom . import interface\n\n\nclass Grid2D:\n \"\"\"2D Cartesian Grid.\n \"\"\"\n #: The number of grid dimensions handled by this object\n _DIMENSIONS = 2\n\n def __init__(self, *args, increasing_axes: Optional[str] = None):\n \"\"\"\n Initialize a new 2D Cartesian Grid.\n\n Args:\n x (pyinterp.Axis): X-Axis.\n y (pyinterp.Axis): Y-Axis.\n array (numpy.ndarray): Discrete representation of a continuous\n function on a uniform 2-dimensional grid.\n increasing_axes ({'inplace', 'copy'}, optional): Optional string\n indicating how to ensure that the grid axes are increasing. If\n axes are decreasing, the axes and grid provided will be flipped\n in place or copied before being flipped. By default, the\n decreasing axes are not modified.\n\n Examples:\n\n >>> import numpy as np\n >>> import pyinterp\n >>> x_axis = pyinterp.Axis(np.arange(-180.0, 180.0, 1.0),\n ... is_circle=True)\n >>> y_axis = pyinterp.Axis(np.arange(-80.0, 80.0, 1.0),\n ... is_circle=False)\n >>> array = np.zeros((len(x_axis), len(y_axis)))\n >>> grid = pyinterp.Grid2D(x_axis, y_axis, array)\n >>> grid\n <pyinterp.grid.Grid2D>\n array([[0., 0., 0., ..., 0., 0., 0.],\n [0., 0., 0., ..., 0., 0., 0.],\n [0., 0., 0., ..., 0., 0., 0.],\n ...,\n [0., 0., 0., ..., 0., 0., 0.],\n [0., 0., 0., ..., 0., 0., 0.],\n [0., 0., 0., ..., 0., 0., 0.]])\n Axis:\n * x: <pyinterp.axis.Axis>\n min_value: -180.0\n max_value: 179.0\n step: 1.0\n is_circle: True\n * y: <pyinterp.axis.Axis>\n min_value: -80.0\n max_value: 79.0\n step: 1.0\n is_circle: False\n \"\"\"\n prefix = \"\"\n for idx, item in enumerate(args):\n if isinstance(item, core.TemporalAxis):\n prefix = \"Temporal\"\n break\n _class = f\"{prefix}Grid{self._DIMENSIONS}D\" + \\\n interface._core_class_suffix(args[-1], handle_integer=True)\n if increasing_axes is not None:\n if increasing_axes not in ['inplace', 'copy']:\n raise ValueError(\"increasing_axes \"\n f\"{increasing_axes!r} is not defined\")\n inplace = increasing_axes == 'inplace'\n # Tuple does not support item assignment\n args = list(args)\n for idx, item in enumerate(args):\n if isinstance(item,\n (core.Axis,\n core.TemporalAxis)) and not item.is_ascending():\n args[idx] = item.flip(inplace=inplace)\n args[-1] = np.flip(args[-1], axis=idx)\n self._instance = getattr(core, _class)(*args)\n self._prefix = prefix\n\n def __repr__(self):\n \"\"\"Called by the ``repr()`` built-in function to compute the string\n representation of this instance.\n \"\"\"\n pad = lambda s, n: \"\\n\".join([(\" \" * n if ix else \"\") + line for ix,\n line in enumerate(s.split(\"\\n\"))])\n result = [\n f\"<{self.__module__}.{self.__class__.__name__}>\",\n repr(self.array),\n ]\n result.append(\"Axis:\")\n for item in dir(self):\n attr = getattr(self, item)\n if isinstance(attr, (core.Axis, core.TemporalAxis)):\n prefix = f\"* {item}: \"\n result.append(f\" {prefix}{pad(repr(attr), len(prefix))}\")\n return \"\\n\".join(result)\n\n @property\n def x(self) -> core.Axis:\n \"\"\"Gets the X-Axis handled by this instance.\n\n Returns:\n pyinterp.Axis: X-Axis.\n \"\"\"\n return self._instance.x\n\n @property\n def y(self) -> core.Axis:\n \"\"\"Gets the Y-Axis handled by this instance\n\n Returns:\n pyinterp.Axis: Y-Axis.\n \"\"\"\n return self._instance.y\n\n @property\n def array(self) -> np.ndarray:\n \"\"\"Gets the values handled by this instance.\n\n Returns:\n numpy.ndarray: values.\n \"\"\"\n return self._instance.array\n\n\nclass Grid3D(Grid2D):\n \"\"\"3D Cartesian Grid.\n \"\"\"\n _DIMENSIONS = 3\n\n def __init__(self, *args, increasing_axes=None):\n \"\"\"\n Initialize a new 3D Cartesian Grid.\n\n Args:\n x (pyinterp.Axis): X-Axis.\n y (pyinterp.Axis): Y-Axis.\n z (pyinterp.Axis, pyinterp.TemporalAxis): Z-Axis.\n array (numpy.ndarray): Discrete representation of a continuous\n function on a uniform 3-dimensional grid.\n increasing_axes (bool, optional): Ensure that the axes of the grid\n are increasing. If this is not the case, the axes and grid\n provided will be flipped. Default to False.\n\n .. note::\n\n If the Z axis is a :py:class:`temporal axis\n <pyinterp.TemporalAxis>`, the grid will handle this axis during\n interpolations as a time axis.\n\n Examples:\n\n >>> import numpy as np\n >>> import pyinterp\n >>> x_axis = pyinterp.Axis(np.arange(-180.0, 180.0, 1.0),\n ... is_circle=True)\n >>> y_axis = pyinterp.Axis(np.arange(-80.0, 80.0, 1.0),\n ... is_circle=False)\n >>> z_axis = pyinterp.TemporalAxis(\n ... np.array(['2000-01-01'], dtype=\"datetime64[s]\"))\n >>> array = np.zeros((len(x_axis), len(y_axis), len(z_axis)))\n >>> grid = pyinterp.Grid3D(x_axis, y_axis, z_axis, array)\n \"\"\"\n super().__init__(*args, increasing_axes=increasing_axes)\n\n @property\n def z(self) -> Union[core.Axis, core.TemporalAxis]:\n \"\"\"\n Gets the Z-Axis handled by this instance.\n\n Returns:\n pyinterp.Axis, pyinterp.TemporalAxis: Z-Axis.\n \"\"\"\n return self._instance.z\n\n\nclass Grid4D(Grid3D):\n \"\"\"4D Cartesian Grid.\n \"\"\"\n _DIMENSIONS = 4\n\n def __init__(self, *args, increasing_axes=None):\n \"\"\"\n Initialize a new 4D Cartesian Grid.\n\n Args:\n x (pyinterp.Axis): X-Axis.\n y (pyinterp.Axis): Y-Axis.\n z (pyinterp.Axis, pyinterp.TemporalAxis): Z-Axis.\n u (pyinterp.Axis): U-Axis.\n array (numpy.ndarray): Discrete representation of a continuous\n function on a uniform 4-dimensional grid.\n increasing_axes (bool, optional): Ensure that the axes of the grid\n are increasing. If this is not the case, the axes and grid\n provided will be flipped. Default to False.\n\n .. note::\n\n If the Z axis is a temporal axis, the grid will handle this axis\n during interpolations as a time axis.\n \"\"\"\n super().__init__(*args, increasing_axes=increasing_axes)\n\n @property\n def u(self) -> core.Axis:\n \"\"\"Gets the U-Axis handled by this instance.\n\n Returns:\n pyinterp.Axis: U-Axis.\n \"\"\"\n return self._instance.u\n\n\ndef _core_variate_interpolator(instance: object, interpolator: str, **kwargs):\n \"\"\"Obtain the interpolator from the string provided.\"\"\"\n if isinstance(instance, Grid2D):\n dimensions = instance._DIMENSIONS\n # 4D interpolation uses the 3D interpolator\n if dimensions > 3:\n dimensions -= 1\n else:\n raise TypeError(\"instance is not an object handling a grid.\")\n\n prefix = instance._prefix\n\n if interpolator == \"bilinear\":\n return getattr(core, f\"{prefix}Bilinear{dimensions}D\")(**kwargs)\n if interpolator == \"nearest\":\n return getattr(core, f\"{prefix}Nearest{dimensions}D\")(**kwargs)\n if interpolator == \"inverse_distance_weighting\":\n return getattr(\n core, f\"{prefix}InverseDistanceWeighting{dimensions}D\")(**kwargs)\n\n raise ValueError(f\"interpolator {interpolator!r} is not defined\")\n" ]
[ [ "numpy.ma.fix_invalid", "numpy.arange", "numpy.all", "numpy.meshgrid", "numpy.flip" ], [ "numpy.flip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
johnnykoo84/DS-Unit-3-Sprint-2-SQL-and-Databases
[ "484de1902b3785b68a0f598ca75e32d67e6ba09a" ]
[ "module2-sql-for-analysis/insert_titanic.py" ]
[ "import os\nimport pandas as pd\nimport psycopg2\nfrom psycopg2 import extras\nfrom dotenv import load_dotenv\nimport numpy as np\nload_dotenv()\nprint(__file__)\npsycopg2.extensions.register_adapter(np.int64, psycopg2._psycopg.AsIs)\n\n# load csv to dataframe\nCSV_FILE_PATH = os.path.join(os.path.dirname(__file__), 'titanic.csv')\ndf = pd.read_csv(CSV_FILE_PATH)\n# print(df.head())\n# print(df.columns)\n# print(df.values)\nprint('df.dtypes\\n', df.dtypes)\n# Connect to ElephantSQL-hosted PostgreSQL\nconn = psycopg2.connect(\n dbname=os.getenv(\"dbname\"),\n user=os.getenv(\"user\"),\n password=os.getenv(\"password\"),\n host=os.getenv(\"host\")\n)\n# A \"cursor\", a structure to iterate over db records to perform queries\ncur = conn.cursor()\n\n# Drop the table if exist\ncur.execute('DROP TABLE IF EXISTS Titanic;')\n\n# CREATE TABLE query\nquery_create = \"\"\"CREATE TABLE Titanic (\n Survived INT,\n Pclass INT,\n Name varchar(120),\n Sex varchar(10),\n Age INT,\n SiblingsSpouses INT,\n ParentsChildren INT,\n Fare INT);\n\"\"\"\ncur.execute(query_create)\n\n# test\n\n# query = 'INSERT INTO Titanic VALUES (0, 3, \\'Mr. Owen\\', \\'male\\', 22.0, 1, 0, 7.25);'\n# cur.execute(query)\n# cur.execute('SELECT * FROM Titanic')\n# print('first fetch', cur.fetchall())\n\n# this is a solution from Mike\nlist_of_tuples = list(df.to_records(index=False))\nins_query = 'INSERT INTO Titanic (Survived, Pclass, Name, Sex, Age, SiblingsSpouses, ParentsChildren, Fare) VALUES %s;'\nextras.execute_values(cur, ins_query, list_of_tuples)\n\n# this was my initial code but not working\n# for row in df.values:\n# print('######')\n# print(type(row))\n# print(row)\n# cur.execute(\"INSERT INTO Titanic (Survived, Pclass, Name, Sex, Age, SiblingsSpouses, ParentsChildren, Fare) VALUES %s;\", tuple(row))\n\nconn.commit()\n\ncur.execute('SELECT * FROM Titanic')\nprint('second fetch', cur.fetchall())\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
rougier/VSOM
[ "78e6eb924b5f89a0e6f42eb6bbe7971473a9abaa", "78e6eb924b5f89a0e6f42eb6bbe7971473a9abaa" ]
[ "attic/vsom.py", "attic/vsom-spatial.py" ]
[ "# -----------------------------------------------------------------------------\n# VSOM (Voronoidal Self Organized Map)\n# Copyright (c) 2019 Nicolas P. Rougier\n#\n# Distributed under the terms of the BSD License.\n# -----------------------------------------------------------------------------\nimport sys\nimport tqdm\nimport numpy as np\nimport scipy.spatial\nfrom math import sqrt, ceil, floor, pi, cos, sin\n\nimport scipy.spatial\nimport networkx as nx\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.patheffects as path_effects\nfrom matplotlib.offsetbox import OffsetImage, AnnotationBbox\nfrom matplotlib.collections import LineCollection, PolyCollection\n\n\n\n# http://stackoverflow.com/questions/28665491/...\n# ...getting-a-bounded-polygon-coordinates-from-voronoi-cells\ndef in_box(points, bbox):\n return np.logical_and(\n np.logical_and(bbox[0] <= points[:, 0], points[:, 0] <= bbox[1]),\n np.logical_and(bbox[2] <= points[:, 1], points[:, 1] <= bbox[3]))\n\n\ndef voronoi(points, bbox):\n # See http://stackoverflow.com/questions/28665491/...\n # ...getting-a-bounded-polygon-coordinates-from-voronoi-cells\n # See also https://gist.github.com/pv/8036995\n \n # Select points inside the bounding box\n i = in_box(points, bbox)\n\n # Mirror points\n points_center = points[i, :]\n points_left = np.copy(points_center)\n points_left[:, 0] = bbox[0] - (points_left[:, 0] - bbox[0])\n points_right = np.copy(points_center)\n points_right[:, 0] = bbox[1] + (bbox[1] - points_right[:, 0])\n points_down = np.copy(points_center)\n points_down[:, 1] = bbox[2] - (points_down[:, 1] - bbox[2])\n points_up = np.copy(points_center)\n points_up[:, 1] = bbox[3] + (bbox[3] - points_up[:, 1])\n points = np.append(points_center,\n np.append(np.append(points_left, points_right, axis=0),\n np.append(points_down, points_up, axis=0),\n axis=0), axis=0)\n # Compute Voronoi\n vor = scipy.spatial.Voronoi(points)\n epsilon = sys.float_info.epsilon\n\n # Filter regions\n regions = []\n for region in vor.regions:\n flag = True\n for index in region:\n if index == -1:\n flag = False\n break\n else:\n x = vor.vertices[index, 0]\n y = vor.vertices[index, 1]\n if not(bbox[0]-epsilon <= x <= bbox[1]+epsilon and\n bbox[2]-epsilon <= y <= bbox[3]+epsilon):\n flag = False\n break\n if region != [] and flag:\n regions.append(region)\n vor.filtered_points = points_center\n vor.filtered_regions = regions\n return vor\n\n\ndef centroid(V):\n \"\"\"\n Given an ordered set of vertices V describing a polygon,\n returns the uniform surface centroid.\n\n See http://paulbourke.net/geometry/polygonmesh/\n \"\"\"\n A = 0\n Cx = 0\n Cy = 0\n for i in range(len(V)-1):\n s = (V[i, 0]*V[i+1, 1] - V[i+1, 0]*V[i, 1])\n A += s\n Cx += (V[i, 0] + V[i+1, 0]) * s\n Cy += (V[i, 1] + V[i+1, 1]) * s\n Cx /= 3*A\n Cy /= 3*A\n return [Cx, Cy]\n\n\ndef blue_noise(shape, radius, k=30, seed=None):\n \"\"\"\n Generate blue noise over a two-dimensional rectangle of size (width,height)\n\n Parameters\n ----------\n\n shape : tuple\n Two-dimensional domain (width x height) \n radius : float\n Minimum distance between samples\n k : int, optional\n Limit of samples to choose before rejection (typically k = 30)\n seed : int, optional\n If provided, this will set the random seed before generating noise,\n for valid pseudo-random comparisons.\n\n References\n ----------\n\n .. [1] Fast Poisson Disk Sampling in Arbitrary Dimensions, Robert Bridson,\n Siggraph, 2007. :DOI:`10.1145/1278780.1278807`\n \"\"\"\n\n def sqdist(a, b):\n \"\"\" Squared Euclidean distance \"\"\"\n dx, dy = a[0] - b[0], a[1] - b[1]\n return dx * dx + dy * dy\n\n def grid_coords(p):\n \"\"\" Return index of cell grid corresponding to p \"\"\"\n return int(floor(p[0] / cellsize)), int(floor(p[1] / cellsize))\n\n def fits(p, radius):\n \"\"\" Check whether p can be added to the queue \"\"\"\n\n radius2 = radius*radius\n gx, gy = grid_coords(p)\n for x in range(max(gx - 2, 0), min(gx + 3, grid_width)):\n for y in range(max(gy - 2, 0), min(gy + 3, grid_height)):\n g = grid[x + y * grid_width]\n if g is None:\n continue\n if sqdist(p, g) <= radius2:\n return False\n return True\n\n # When given a seed, we use a private random generator in order to not\n # disturb the default global random generator\n if seed is not None:\n from numpy.random.mtrand import RandomState\n rng = RandomState(seed=seed)\n else:\n rng = np.random\n \n width, height = shape\n cellsize = radius / sqrt(2)\n grid_width = int(ceil(width / cellsize))\n grid_height = int(ceil(height / cellsize))\n grid = [None] * (grid_width * grid_height)\n\n p = rng.uniform(0, shape, 2)\n queue = [p]\n grid_x, grid_y = grid_coords(p)\n grid[grid_x + grid_y * grid_width] = p\n\n while queue:\n qi = rng.randint(len(queue))\n qx, qy = queue[qi]\n queue[qi] = queue[-1]\n queue.pop()\n for _ in range(k):\n theta = rng.uniform(0,2*pi)\n r = radius * np.sqrt(rng.uniform(1, 4))\n p = qx + r * cos(theta), qy + r * sin(theta)\n if not (0 <= p[0] < width and 0 <= p[1] < height) or not fits(p, radius):\n continue\n queue.append(p)\n gx, gy = grid_coords(p)\n grid[gx + gy * grid_width] = p\n\n return np.array([p for p in grid if p is not None])\n\n\n\n\nclass VSOM2:\n \"\"\" Self Organizing Map \"\"\"\n\n def __init__(self, topology=\"regular\", n=1024, n_neighbour=2):\n \"\"\"\n Initialize SOM\n\n type: string\n \"regular\" or \"random\"\n n : int\n number of neurons\n ndim: int\n dimension of data to be fed to the SOM\n \"\"\"\n\n self.topology = topology\n self.n_neighbour = n_neighbour\n \n if self.topology == \"regular\":\n n = int(np.ceil(np.sqrt(n)))\n X, Y = np.meshgrid(np.linspace(0, 1, n+2, endpoint=True)[1:-1],\n np.linspace(0, 1, n+2, endpoint=True)[1:-1])\n P = np.c_[X.ravel(), Y.ravel()]\n D = scipy.spatial.distance.cdist(P,P)\n self.positions = P\n self.distances = D / D.max()\n self.voronoi = voronoi(P, bbox=[0, 1, 0, 1])\n self.edges = np.zeros((n*n*2, 2), dtype=int)\n index = 0\n for i in range(n):\n for j in range(n-1):\n source, target = i*n+j, i*n+j+1\n self.edges[index] = source, target\n index += 1\n source, target = j*n+i, (j+1)*n+i\n self.edges[index] = source, target\n index += 1\n \n else:\n radius = np.sqrt(2/(n*np.pi))\n P = blue_noise((1,1), radius=radius)\n self.voronoi = voronoi(P, bbox=[0, 1, 0, 1])\n \n # for i in range(10):\n # V = voronoi(P, bbox=[0,1,0,1])\n # C = []\n # for region in V.filtered_regions:\n # vertices = V.vertices[region + [region[0]], :]\n # C.append(centroid(vertices))\n # P = np.array(C)\n \n self.positions = P\n self.voronoi = V\n D = scipy.spatial.distance.cdist(P,P)\n sources = np.repeat(np.arange(len(P)),n_neighbour)\n sources = sources.reshape(len(P),n_neighbour)\n targets = np.argsort(D,axis=1)[:,1:n_neighbour+1]\n self.edges = np.c_[sources.ravel(), targets.ravel()]\n C = np.zeros(D.shape, dtype=int)\n C[sources,targets] = 1\n lengths = nx.floyd_warshall_numpy(nx.Graph(C))\n self.distances = np.array(lengths).astype(int)\n self.distances = self.distances/self.distances.max()\n \n\n def __len__(self):\n \"\"\" x.__len__() <==> len(x) \"\"\"\n\n return len(self.positions)\n \n \n def learn(self, samples, n=10000,\n sigma=(0.50, 0.01), lrate=(0.50, 0.01), labels=None):\n \"\"\" Learn samples \"\"\"\n\n t = np.linspace(0, 1, n)\n\n # We will reshape the final codebook to keep samples shape\n shape = [len(self)] + list(samples.shape[1:])\n \n samples = samples.reshape(len(samples), -1)\n self.codebook = np.zeros((len(self), samples.shape[-1]))\n self.labels = np.zeros(len(self))\n\n lrate = lrate[0]*(lrate[1]/lrate[0])**t\n sigma = sigma[0]*(sigma[1]/sigma[0])**t\n I = np.random.randint(0, len(samples), n)\n samples = samples[I]\n if labels is not None:\n labels = labels[I]\n \n for i in tqdm.trange(n):\n # Get random sample\n data = samples[i]\n\n # Get index of nearest node (minimum distance)\n winner = np.argmin(((self.codebook - data)**2).sum(axis=-1))\n\n # Gaussian centered on winner\n G = np.exp(-self.distances[winner]**2/sigma[i]**2)\n\n # Move nodes towards sample according to Gaussian \n self.codebook -= lrate[i]*G[...,np.newaxis]*(self.codebook - data)\n\n if labels is not None:\n self.labels -= lrate[i]*G*(self.labels-labels[i])\n # self.labels[winner] = labels[i]\n\n self.codebook = self.codebook.reshape(shape)\n\n\n def test(self, samples, labels=None):\n \"\"\" Learn samples \"\"\"\n\n samples = samples.reshape(len(samples), -1)\n codebook = self.codebook.reshape((len(self), -1))\n error = 0\n for i in tqdm.trange(len(samples)):\n sample = samples[i]\n winner = np.argmin(((codebook - sample)**2).sum(axis=-1))\n error += ((codebook[i] - sample)**2).sum()\n error /= len(samples)\n return error\n\n \n # samples = samples.reshape(len(samples), -1)\n # codebook = self.codebook.reshape((len(self), -1))\n # #self.labels = np.zeros(len(self))\n # s = []\n # z = 0\n # for i in tqdm.trange(len(samples)):\n # sample = samples[i]\n # label = labels[i]\n # winner = np.argmin(((codebook - sample)**2).sum(axis=-1))\n # s.append(np.abs(label - self.labels[winner]))\n\n # if label == int((self.labels[winner])):\n # z += 1\n \n # print(np.mean(s))\n # print(z/len(samples))\n \n\n\n def plot_activation(self, ax, sample, cmap='plasma'):\n\n codebook = self.codebook.reshape(len(self), -1)\n \n D = -np.sqrt(((codebook - sample.ravel())**2).sum(axis=-1))\n P = self.positions\n \n if self.topology == \"random\":\n V = self.voronoi\n cmap = matplotlib.cm.get_cmap(cmap)\n norm = matplotlib.colors.Normalize(vmin=D.min(), vmax=D.max())\n segments = []\n for region in V.filtered_regions:\n segments.append(V.vertices[region + [region[0]], :])\n collection = PolyCollection(segments, linewidth=1.0,\n edgecolors=cmap(norm(D)),\n facecolors=cmap(norm(D)))\n ax.add_collection(collection)\n\n from scipy.interpolate import griddata\n X, Y = np.linspace(0, 1, 512), np.linspace(0, 1, 512)\n Z = griddata(P, D, (X[None,:], Y[:,None]), method='nearest')\n ax.contour(X, Y, Z, 8, linewidths=0.5, colors='k', alpha=0.75)\n\n else: # regular\n n = int(np.ceil(np.sqrt(len(self))))\n Z = D.reshape(n,n)\n X, Y = np.linspace(0, 1, n), np.linspace(0, 1, n)\n ax.imshow(Z, cmap=cmap, interpolation='nearest', extent=[0,1,0,1],\n origin=\"lower\")\n ax.contour(X, Y, Z, 8, linewidths=0.5, colors='k', alpha=0.75)\n\n if len(sample.shape) == 2:\n rows,cols = sample.shape\n image = np.zeros((rows,cols,4))\n image[:,:,0] = image[:,:,1] = image[:,:,2] = 0\n image[:,:,3] = sample\n image = OffsetImage(image, zoom=1.5, zorder=20,\n interpolation=\"nearest\")\n box = AnnotationBbox(image, (0.9,0.9), frameon=True)\n ax.add_artist(box)\n\n ax.set_xlim(0,1), ax.set_ylim(0,1)\n ax.set_xticks([]), ax.set_yticks([])\n\n\n\n def plot_network(self, ax):\n size = 50 * 1000/len(self)\n P,V,E = self.positions, self.voronoi, self.edges\n ax.scatter(P[:,0], P[:,1], s=size,\n edgecolor=\"k\", facecolor=\"w\", linewidth=1.)\n segments = np.zeros((len(E), 2, 2))\n for i in range(len(E)):\n segments[i] = P[E[i,0]], P[E[i,1]]\n collection = LineCollection(segments, color=\"k\", zorder=-10, lw=1.)\n ax.add_collection(collection)\n\n segments = []\n for region in V.filtered_regions:\n segments.append(V.vertices[region + [region[0]], :])\n collection = LineCollection(segments, color=\"k\", linewidth=0.5,\n zorder=-20, alpha=0.25)\n ax.add_collection(collection)\n ax.set_xlim(0,1), ax.set_ylim(0,1)\n ax.set_xticks([]), ax.set_yticks([])\n\n\n def plot_weights(self, ax, cmap='magma', samples=None):\n P,V,E = self.positions, self.voronoi, self.edges\n\n # Display weights as localized images\n if len(self.codebook.shape) == 3:\n rows,cols = self.codebook.shape[1:]\n segments = []\n for region in V.filtered_regions:\n segments.append(V.vertices[region + [region[0]], :])\n collection = PolyCollection(segments, linewidth=0.25, alpha=1.0,\n edgecolors=\"k\", facecolors=\"w\")\n ax.add_collection(collection)\n\n \n for position, data in zip(P, self.codebook):\n image = np.zeros((rows,cols,4))\n image[:,:,3] = data.reshape(rows,cols)\n image = OffsetImage(image,\n zoom=0.5, zorder=20, interpolation=\"nearest\")\n box = AnnotationBbox(image, position, frameon=False)\n ax.add_artist(box)\n\n ax.set_xlim(0,1), ax.set_ylim(0,1)\n ax.set_xticks([]), ax.set_yticks([])\n return\n \n codebook = self.codebook.reshape(len(self), -1)\n\n # Display weights as a mesh in data space\n if codebook.shape[-1] == 2:\n size = 50 * 1000/len(self)\n X, Y = codebook[:,0], codebook[:,1]\n ax.scatter(X, Y, s=size, edgecolor=\"w\", facecolor=\"k\", linewidth=1.0)\n ax.scatter(samples[:,0], samples[:,1], s=5,\n edgecolor=\"None\", facecolor=\"blue\",\n alpha=0.25, zorder=-30)\n \n segments = np.zeros((len(self.edges), 2, 2))\n for i in range(len(self.edges)): \n segments[i] = codebook[self.edges[i,0]], codebook[self.edges[i,1]]\n collection = LineCollection(segments, linewidth=0.75,\n color='black', zorder=-10, alpha=1.0)\n ax.add_collection(collection)\n ax.set_xlim(-1,1), ax.set_ylim(-1,1)\n ax.set_xticks([]), ax.set_yticks([])\n return\n\n \n if self.topology == \"random\":\n # Display weights as voronoi cells + cmap\n if codebook.shape[-1] == 1:\n cmap = matplotlib.cm.get_cmap(cmap)\n norm = matplotlib.colors.Normalize(vmin=0, vmax=1)\n facecolors = edgecolors = cmap(norm(self.codebook.ravel()))\n # Display weights as colored voronoi cells\n elif codebook.shape[-1] == 3:\n facecolors = edgecolors = codebook\n segments = []\n for region in V.filtered_regions:\n segments.append(V.vertices[region + [region[0]], :])\n collection = PolyCollection(segments, linewidth=1.0,\n edgecolors = edgecolors, facecolors = facecolors)\n ax.add_collection(collection)\n else:\n n = int(np.ceil(np.sqrt(len(self))))\n # Display weights on a regular grid + cmap\n if codebook.shape[-1] == 1:\n ax.imshow(codebook.reshape(n,n), cmap=cmap, origin=\"lower\",\n interpolation='nearest', extent=[0, 1, 0, 1])\n # Display weights on a colored regular grid\n elif self.codebook.shape[-1] == 3:\n ax.imshow(codebook.reshape(n,n,3), origin=\"lower\",\n interpolation='nearest', extent=[0, 1, 0, 1])\n\n ax.set_xlim(0,1), ax.set_ylim(0,1)\n ax.set_xticks([]), ax.set_yticks([])\n\n \n def plot_letter(self, ax, letter):\n text = ax.text(0.05, 0.05, letter, zorder=20,\n fontsize=32, fontweight=\"bold\", transform=ax.transAxes)\n text.set_path_effects(\n [path_effects.Stroke(linewidth=2, foreground='white'),\n path_effects.Normal()])\n\n\n \nclass VSOM:\n \"\"\" Randomized Self Organizing Map \"\"\"\n\n def __init__(self, shape, distance):\n ''' Initialize som '''\n\n self.codebook = np.random.uniform(0, 1, shape)\n self.labels = np.random.uniform(0, 1, len(self.codebook))\n self.distance = distance / distance.max()\n\n \n def learn(self, samples, n=10000, sigma=(0.25, 0.01), lrate=(0.5, 0.01)):\n \"\"\" Learn samples \"\"\"\n\n t = np.linspace(0, 1, n)\n lrate = lrate[0]*(lrate[1]/lrate[0])**t\n sigma = sigma[0]*(sigma[1]/sigma[0])**t\n I = np.random.randint(0, len(samples), n)\n samples = samples[I]\n\n for i in tqdm.trange(n):\n # Get random sample\n data = samples[i]\n\n # Get index of nearest node (minimum distance)\n winner = np.argmin(((self.codebook - data)**2).sum(axis=-1))\n\n # Gaussian centered on winner\n G = np.exp(-self.distance[winner]**2/sigma[i]**2)\n\n # Move nodes towards sample according to Gaussian \n self.codebook -= lrate[i]*G[...,np.newaxis]*(self.codebook - data)\n\n\n", "# -----------------------------------------------------------------------------\n# VSOM (Voronoidal Self Organized Map)\n# Copyright (c) 2019 Nicolas P. Rougier\n#\n# Distributed under the terms of the BSD License.\n# -----------------------------------------------------------------------------\nimport numpy as np\nimport scipy.spatial\nimport networkx as nx\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.patheffects as path_effects\nfrom matplotlib.collections import LineCollection, PolyCollection\nfrom vsom import VSOM, blue_noise, voronoi, centroid\n\n\n# -----------------------------------------------------------------------------\nif __name__ == '__main__':\n\n # Parameters\n # ----------\n seed = 1\n radius = 0.025 # number of neurons ~ 2/(pi*radius**2)\n n_neighbour = 3\n n_samples = 25000\n n_epochs = 25000\n sigma = 0.50, 0.01\n lrate = 0.50, 0.01\n\n \n # Initialization\n # --------------\n if seed is None:\n seed = np.random.randin(0,1000)\n np.random.seed(seed)\n print(\"Random seed: {0}\".format(seed))\n \n \n # Nice uniform random distribution (blue noise)\n # ---------------------------------------------\n P = blue_noise((1,1), radius=radius)\n print(\"Number of neurons: {0}\".format(len(P)))\n\n \n # Centroidal Voronoi Tesselation (10 iterations)\n # ----------------------------------------------\n for i in range(10):\n V = voronoi(P, bbox=[0,1,0,1])\n C = []\n for region in V.filtered_regions:\n vertices = V.vertices[region + [region[0]], :]\n C.append(centroid(vertices))\n P = np.array(C)\n\n\n # Connecticity matrix (C) and distance matrix (D)\n # -----------------------------------------------\n D = scipy.spatial.distance.cdist(P,P)\n sources = np.repeat(np.arange(len(P)),n_neighbour).reshape(len(P),n_neighbour)\n targets = np.argsort(D,axis=1)[:,1:n_neighbour+1]\n edges = np.c_[sources.ravel(), targets.ravel()]\n C = np.zeros(D.shape, dtype=int)\n C[sources,targets] = 1\n lengths = nx.shortest_path_length(nx.Graph(C))\n distance = np.zeros(D.shape, dtype=int)\n for i in range(len(P)):\n for j in range(len(P)):\n distance[i,j] = lengths[i][j]\n\n \n # Train SOM\n # ---------\n som = VSOM((len(P),2), distance)\n\n # samples = np.random.uniform(-1, 1, (n_samples,2))\n # samples = np.random.normal(0,.35,(n_samples,2))\n T = np.random.uniform(0.0, 2.0*np.pi, n_samples)\n R = np.sqrt(np.random.uniform(0.50**2, 1.0**2, n_samples))\n samples = np.c_[R*np.cos(T), R*np.sin(T)]\n som.learn(samples, n_epochs, sigma=sigma, lrate=lrate)\n\n\n # Display activation for 6 random points\n # --------------------------------------\n indices = np.random.randint(0,len(samples),12)[-6:]\n fig = plt.figure(figsize=(12,8))\n for i in range(len(indices)):\n ax = plt.subplot(2, 3, i+1, aspect=1)\n data = samples[indices[i]]\n D = -np.sqrt(((som.codebook - data)**2).sum(axis=-1))\n cmap = matplotlib.cm.get_cmap('plasma')\n norm = matplotlib.colors.Normalize(vmin=D.min(), vmax=D.max())\n segments = []\n for region in V.filtered_regions:\n segments.append(V.vertices[region + [region[0]], :])\n collection = PolyCollection(segments, linewidth=1.0,\n edgecolors=cmap(norm(D)),\n facecolors=cmap(norm(D)))\n ax.add_collection(collection)\n text = ax.text(0.05, 0.05, chr(ord(\"C\")+i),\n fontsize=24, fontweight=\"bold\", transform=ax.transAxes)\n text.set_path_effects([path_effects.Stroke(linewidth=2, foreground='white'),\n path_effects.Normal()])\n ax.set_xlim(0,1), ax.set_ylim(0,1)\n ax.set_xticks([]), ax.set_yticks([])\n\n from scipy.interpolate import griddata\n X = np.linspace(0, 1, 512)\n Y = np.linspace(0, 1, 512)\n Z = griddata(P, D, (X[None,:], Y[:,None]), method='nearest')\n ax.contour(X, Y, Z, 8, linewidths=0.5, colors='k', alpha=0.75)\n \n\n plt.tight_layout()\n plt.savefig(\"vsom-spatial-2.pdf\")\n plt.show()\n\n\n # Display neural and weight maps\n # ------------------------------\n fig = plt.figure(figsize=(16,8))\n\n # Neuronal space\n # --------------\n ax = plt.subplot(1, 2, 1, aspect=1)\n \n ax.scatter(P[:,0], P[:,1], s=50, edgecolor=\"k\", facecolor=\"w\", linewidth=1.5)\n segments = np.zeros((len(edges), 2, 2))\n for i in range(len(edges)):\n segments[i] = P[edges[i,0]], P[edges[i,1]]\n collection = LineCollection(segments, color=\"k\", zorder=-10, lw=1.5)\n ax.add_collection(collection)\n\n segments = []\n for region in V.filtered_regions:\n segments.append(V.vertices[region + [region[0]], :])\n collection = LineCollection(segments, color=\"k\", linewidth=0.5,\n zorder=-20, alpha=0.25)\n ax.add_collection(collection)\n ax.set_xlim(0,1), ax.set_ylim(0,1)\n ax.set_xticks([]), ax.set_yticks([])\n text = ax.text(0.05, 0.05, \"A\",\n fontsize=32, fontweight=\"bold\", transform=ax.transAxes)\n text.set_path_effects([path_effects.Stroke(linewidth=2, foreground='white'),\n path_effects.Normal()])\n\n # Weight space\n # ------------\n ax = plt.subplot(1, 2, 2, aspect=1)\n\n X, Y = som.codebook[:,0], som.codebook[:,1]\n\n ax.scatter(X, Y, s=30, edgecolor=\"w\", facecolor=\"k\", linewidth=1.0)\n \n\n ax.scatter(samples[:,0], samples[:,1], s=5,\n edgecolor=\"None\", facecolor=\"blue\", alpha=0.25, zorder=-30)\n\n # Highlight chosen samples\n S = samples[indices]\n ax.scatter(S[:,0], S[:,1], s=1000, linewidth=0, alpha=.75,\n edgecolor=\"None\", facecolor=\"white\", zorder=30)\n ax.scatter(S[:,0], S[:,1], s=50, linewidth=1.5,\n edgecolor=\"red\", facecolor=\"white\", zorder=40)\n for i in range(len(S)):\n text = ax.text(S[i,0], S[i,1]-0.02, chr(ord(\"C\")+i),\n color=\"red\", ha = \"center\", va = \"top\", zorder=100,\n fontsize=12, fontweight=\"bold\", transform=ax.transData)\n text.set_path_effects([path_effects.Stroke(linewidth=1, foreground='white'),\n path_effects.Normal()])\n\n\n \n \n segments = np.zeros((len(edges), 2, 2))\n for i in range(len(edges)): \n segments[i] = som.codebook[edges[i,0]], som.codebook[edges[i,1]]\n collection = LineCollection(segments, linewidth=0.75,\n color='black', zorder=-10, alpha=1.0)\n ax.add_collection(collection)\n ax.set_xlim(-1,1), ax.set_ylim(-1,1)\n ax.set_xticks([]), ax.set_yticks([])\n text = ax.text(0.05, 0.05, \"B\",\n fontsize=32, fontweight=\"bold\", transform=ax.transAxes)\n text.set_path_effects([path_effects.Stroke(linewidth=2, foreground='white'),\n path_effects.Normal()])\n\n plt.tight_layout()\n plt.savefig(\"vsom-spatial-1.pdf\")\n plt.show()\n \n" ]
[ [ "matplotlib.patheffects.Normal", "numpy.sqrt", "numpy.linspace", "scipy.interpolate.griddata", "matplotlib.cm.get_cmap", "numpy.exp", "matplotlib.offsetbox.AnnotationBbox", "numpy.copy", "numpy.zeros", "numpy.random.mtrand.RandomState", "matplotlib.offsetbox.OffsetImage", "matplotlib.collections.LineCollection", "numpy.append", "numpy.argsort", "numpy.logical_and", "numpy.array", "matplotlib.colors.Normalize", "matplotlib.patheffects.Stroke", "matplotlib.collections.PolyCollection", "numpy.random.uniform" ], [ "matplotlib.patheffects.Normal", "numpy.linspace", "scipy.interpolate.griddata", "matplotlib.cm.get_cmap", "numpy.random.randin", "matplotlib.pyplot.tight_layout", "numpy.sin", "matplotlib.pyplot.subplot", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.collections.LineCollection", "matplotlib.pyplot.savefig", "numpy.argsort", "numpy.array", "matplotlib.pyplot.show", "numpy.random.seed", "numpy.cos", "matplotlib.patheffects.Stroke", "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
fanyizhe/aws-reserved-instance-expiration-notification
[ "018c37f7d547937f6390b5184756d199338ba7d6" ]
[ "src/ri_expiration.py" ]
[ "# Copyright <2019> Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\nimport json\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom multiprocessing import Process\nfrom dateutil import relativedelta\nfrom datetime import date, datetime, timedelta\nimport numpy as np\nimport pandas as pd\nimport os\n\nfrom email.mime.text import MIMEText\nfrom email.mime.application import MIMEApplication\nfrom email.mime.multipart import MIMEMultipart\n\nTARGET_REGION = 'ap-northeast-2'\nSES_REGION = 'us-east-1'\nSENDER = \"blah <[email protected]>\"\n\ndef send_email(info):\n RECIPIENT = info['email']\n attachments = info['attach']\n BODY_HTML = info['msg']\n\n message = MIMEMultipart()\n message['Subject'] = 'Amazon RI Expiration Notification'\n message['From'] = SENDER\n message['To'] = RECIPIENT\n destinations = []\n destinations.append(RECIPIENT)\n\n # message body\n part = MIMEText(BODY_HTML, 'html')\n message.attach(part)\n\n # attachment\n for attachment in attachments:\n with open(attachment, 'rb') as f:\n part = MIMEApplication(f.read())\n part.add_header('Content-Disposition', 'attachment', filename=os.path.basename(attachment))\n message.attach(part)\n\n client = boto3.client('ses', region_name=SES_REGION)\n response = client.send_raw_email(\n Source=message['From'],\n Destinations=destinations,\n RawMessage={\n 'Data': message.as_string()\n }\n )\n\n\ndef getExpRIList(response: boto3.resources.response, response_name, filter_name, filter_value, select_column=[]):\n if len(response[response_name]) > 0:\n print(\"Exist \" + response_name)\n\n next_month = np.datetime64(datetime.utcnow() + relativedelta.relativedelta(days=31))\n ri_list = list(map(lambda a: a.values(), filter(lambda a: a[filter_name] == filter_value, response[response_name])))\n head = response[response_name][0].keys()\n df = pd.DataFrame(ri_list, columns=head)\n df['StartTime'] = pd.to_datetime(df['StartTime']).dt.tz_convert(None)\n df['End'] = df['StartTime'] + pd.to_timedelta(df['Duration'], 's')\n will_expire = df[df['End'] <= next_month]\n filtered_list = will_expire[select_column].values.tolist()\n return filtered_list, will_expire[select_column].columns.to_list(), df\n\n\ndef to_excel(df, filename, condition):\n writer = pd.ExcelWriter(filename)\n row_style = lambda row: pd.Series('background-color: {}'.format('yellow' if condition(row) else 'green'), row.index)\n df.style.apply(row_style, axis=1).to_excel(writer, )\n writer.save()\n writer.close()\n\n\ndef makeMessage():\n # df\n next_month = np.datetime64(datetime.utcnow() + relativedelta.relativedelta(days=31))\n\n # ec2\n ec2_client = boto3.client('ec2', region_name=TARGET_REGION)\n ec2_response = ec2_client.describe_reserved_instances(Filters=[{'Name': 'state', 'Values': ['active']}])\n if len(ec2_response['ReservedInstances']) > 0:\n print(\"Exist EC2 Reserved Instance\")\n\n ec2_ri_list = list(map(lambda a: a.values(), ec2_response['ReservedInstances']))\n ec2_head = ec2_response['ReservedInstances'][0].keys()\n ec2_df = pd.DataFrame(ec2_ri_list, columns=ec2_head)\n ec2_df['Start'] = pd.to_datetime(ec2_df['Start']).dt.tz_convert(None)\n ec2_df['End'] = pd.to_datetime(ec2_df['End']).dt.tz_convert(None)\n ec2_will_expire = ec2_df[ec2_df['End'] <= next_month]\n select_column = ['ReservedInstancesId', 'Start', 'State', 'End', 'InstanceType', 'InstanceCount']\n ec2_head2 = ec2_will_expire[select_column].columns.to_list()\n ec2_filtered_list = ec2_will_expire[select_column].values.tolist()\n\n # rds\n rds_client = boto3.client('rds', region_name=TARGET_REGION)\n rds_filtered_list, rds_head, rds_df = getExpRIList(rds_client.describe_reserved_db_instances(),\n 'ReservedDBInstances',\n 'State',\n 'active',\n ['ReservedDBInstanceId', 'StartTime', 'State', 'End', 'DBInstanceClass',\n 'DBInstanceCount'])\n\n # redshift\n red_client = boto3.client('redshift', region_name=TARGET_REGION)\n red_filtered_list, red_head, red_df = getExpRIList(red_client.describe_reserved_nodes(),\n 'ReservedNodes',\n 'State',\n 'active',\n ['ReservedNodeId', 'StartTime', 'State', 'End', 'NodeType', 'NodeCount'])\n\n # elasticache\n ec_client = boto3.client('elasticache', region_name=TARGET_REGION)\n ec_filtered_list, ec_head, ec_df = getExpRIList(ec_client.describe_reserved_cache_nodes(),\n 'ReservedCacheNodes',\n 'State',\n 'active',\n ['ReservedCacheNodeId', 'StartTime', 'State', 'End', 'CacheNodeType',\n 'CacheNodeCount'])\n\n # elasticsearch\n es_client = boto3.client('es', region_name=TARGET_REGION)\n es_filtered_list, es_head, es_df = getExpRIList(es_client.describe_reserved_elasticsearch_instances(),\n 'ReservedElasticsearchInstances',\n 'State',\n 'active',\n ['ReservationName', 'ReservedElasticsearchInstanceId', 'StartTime',\n 'State', 'End', 'ElasticsearchInstanceType',\n 'ElasticsearchInstanceCount'])\n\n to_excel(ec2_df, '/tmp/ec2_df.xlsx', lambda df: df['End'] <= next_month)\n to_excel(rds_df, '/tmp/rds_df.xlsx', lambda df: df['End'] <= next_month)\n to_excel(red_df, '/tmp/red_df.xlsx', lambda df: df['End'] <= next_month)\n to_excel(ec_df, '/tmp/ec_df.xlsx', lambda df: df['End'] <= next_month)\n to_excel(es_df, '/tmp/es_df.xlsx', lambda df: df['End'] <= next_month)\n\n html = \"<!DOCTYPE html>\\\n <html lang='en'>\\\n <head>\\\n <title>RI Status</title>\\\n <style>\\\n table, th, td {\\\n border: 1px solid black;\\\n border-collapse: collapse;\\\n font-size: 10pt;\\\n width: 1500px;\\\n }\\\n th, td {\\\n padding: 5px;\\\n text-align: left;\\\n }\\\n </style>\\\n </head>\\\n <body>\" +\\\n getHTMLTable(\"EC2\", ec2_head2, ec2_filtered_list) + \\\n getHTMLTable(\"RDS\", rds_head, rds_filtered_list) + \\\n getHTMLTable(\"Redshift\", red_head, red_filtered_list) + \\\n getHTMLTable(\"ElastiCache\", ec_head, ec_filtered_list) + \\\n getHTMLTable(\"ElasticSearch\", es_head, es_filtered_list) + \\\n \"</body></html>\"\n return html\n\n\ndef getHTMLTable(table_name, header, rows):\n html_middle = \"<h3>{}</h3><table>{}</table>\"\n table_head, table = \"\", \"\"\n table_items = []\n end_column = -1\n for index, head in enumerate(header):\n if head == \"End\":\n table_head += \"<th bgcolor='#D45B5B'>{}</th>\".format(head)\n end_column = index\n else:\n table_head += \"<th>{}</th>\".format(head)\n table_items.append(table_head)\n for row in rows:\n table_row = \"\"\n for index, item in enumerate(row):\n if index == end_column:\n table_row += \"<td bgcolor='#D45B5B'>{}</td>\".format(item)\n else:\n table_row += \"<td>{}</td>\".format(item)\n table_items.append(table_row)\n for row in table_items:\n table += \"<tr>{}</tr>\".format(row)\n table = html_middle.format(table_name, table)\n return table\n\n\ndef save_msg_to_s3(msg, bucket, obj):\n s3 = boto3.client('s3')\n s3.put_object(Body=msg, Bucket=bucket, Key=obj+\"ri_exp.html\")\n\n\ndef lambda_handler(event, context):\n # TODO implement\n dn_client = boto3.client('dynamodb', region_name=TARGET_REGION)\n dn_response = dn_client.scan(TableName='ri_exp_mailing')\n email_list = list(map(lambda a: a['email']['S'], dn_response['Items']))\n\n # if there is an email address validation in SES, send a validation email.\n ses_client = boto3.client('ses', region_name=SES_REGION)\n ses_list = list(filter(lambda a: '.com' in a, ses_client.list_identities()['Identities']))\n for i in email_list:\n if i not in ses_list:\n try:\n response = ses_client.verify_email_address(EmailAddress=i)\n except ClientError as e:\n print(e.response['Error']['Message'])\n else:\n print(\"Validation Email sent! Message ID:\"),\n print(response['MessageId'])\n\n msg = makeMessage()\n save_msg_to_s3(msg, \"ri-exp-contents\", datetime.today().strftime(\"%Y/%m/\"))\n attach = ['/tmp/ec2_df.xlsx', '/tmp/rds_df.xlsx', '/tmp/red_df.xlsx', '/tmp/ec_df.xlsx', '/tmp/es_df.xlsx']\n infos = list(map(lambda a: {'email': a, 'msg': msg, 'attach': attach}, email_list))\n\n procs = []\n for info in infos:\n p = Process(target=send_email, args=(info,))\n procs.append(p)\n p.start()\n\n for p in procs:\n p.join()\n\n return {\n 'statusCode': 200,\n 'body': json.dumps(\"success\", indent=4, sort_keys=True, default=str)\n }\n" ]
[ [ "pandas.to_timedelta", "pandas.to_datetime", "pandas.DataFrame", "pandas.ExcelWriter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
diam045/chainer-chemistry
[ "aedd64049e7b2480a59c44b186171296ea69e55e", "aedd64049e7b2480a59c44b186171296ea69e55e", "aedd64049e7b2480a59c44b186171296ea69e55e" ]
[ "tests/models_tests/test_relgcn.py", "tests/models_tests/test_relgat.py", "examples/qm9/predict_qm9.py" ]
[ "from chainer import cuda\nfrom chainer import gradient_check\nimport numpy\nimport pytest\n\nfrom chainer_chemistry.config import MAX_ATOMIC_NUM\nfrom chainer_chemistry.models.relgcn import RelGCN\nfrom chainer_chemistry.models.relgcn import rescale_adj\nfrom chainer_chemistry.utils.permutation import permute_adj\nfrom chainer_chemistry.utils.permutation import permute_node\n\natom_size = 5\nout_ch = 4\nbatch_size = 2\nnum_edge_type = 4\n\n\[email protected]\ndef model():\n return RelGCN(out_channels=out_ch)\n\n\[email protected]\ndef data():\n numpy.random.seed(0)\n atom_data = numpy.random.randint(\n 0, high=MAX_ATOMIC_NUM, size=(batch_size, atom_size)).astype('i')\n adj_data = numpy.random.randint(\n 0, high=2,\n size=(batch_size, num_edge_type, atom_size, atom_size)).astype('f')\n y_grad = numpy.random.uniform(-1, 1, (batch_size, out_ch)).astype('f')\n return atom_data, adj_data, y_grad\n\n\ndef check_forward(model, atom_data, adj_data):\n y_actual = cuda.to_cpu(model(atom_data, adj_data).data)\n assert y_actual.shape == (batch_size, out_ch)\n\n\ndef test_forward_cpu(model, data):\n atom_data, adj_data = data[0], data[1]\n check_forward(model, atom_data, adj_data)\n\n\[email protected]\ndef test_forward_gpu(model, data):\n atom_data, adj_data = cuda.to_gpu(data[0]), cuda.to_gpu(data[1])\n model.to_gpu()\n check_forward(model, atom_data, adj_data)\n\n\ndef test_backward_cpu(model, data):\n atom_data, adj_data, y_grad = data\n gradient_check.check_backward(model, (atom_data, adj_data), y_grad,\n atol=1e-3, rtol=1e-3)\n\n\[email protected]\ndef test_backward_gpu(model, data):\n atom_data, adj_data, y_grad = [cuda.to_gpu(d) for d in data]\n model.to_gpu()\n gradient_check.check_backward(model, (atom_data, adj_data), y_grad,\n atol=1e-3, rtol=1e-3)\n\n\ndef test_forward_cpu_invariant(model, data):\n atom_data, adj_data = data[0], data[1]\n y_actual = cuda.to_cpu(model(atom_data, adj_data).data)\n\n permutation_index = numpy.random.permutation(atom_size)\n permute_atom_data = permute_node(atom_data, permutation_index)\n permute_adj_data = permute_adj(adj_data, permutation_index)\n permute_y_actual = cuda.to_cpu(model(\n permute_atom_data, permute_adj_data).data)\n assert numpy.allclose(y_actual, permute_y_actual, rtol=1e-5, atol=1e-5)\n\n\ndef test_rescale_adj(data):\n adj = data[1]\n numpy.testing.assert_allclose(rescale_adj(adj).data.sum(axis=(1, 2)),\n numpy.ones((batch_size, atom_size)),\n atol=1e-5, rtol=1e-5)\n\n\nif __name__ == '__main__':\n pytest.main((__file__, '-v'))\n", "from chainer import cuda\nfrom chainer import gradient_check\nimport numpy\nimport pytest\n\nfrom chainer_chemistry.config import MAX_ATOMIC_NUM\nfrom chainer_chemistry.models.relgat import RelGAT\nfrom chainer_chemistry.utils.permutation import permute_adj\nfrom chainer_chemistry.utils.permutation import permute_node\n\natom_size = 5\nout_dim = 4\nbatch_size = 2\nnum_edge_type = 4\n\n\[email protected](params=[True, False])\ndef model(request):\n return RelGAT(out_dim=out_dim, concat_heads=request.param)\n\n\[email protected]\ndef data():\n numpy.random.seed(0)\n atom_data = numpy.random.randint(\n 0, high=MAX_ATOMIC_NUM, size=(batch_size, atom_size)\n ).astype(numpy.int32)\n adj_data = numpy.random.randint(\n 0, high=2, size=(batch_size, num_edge_type, atom_size, atom_size)\n ).astype(numpy.float32)\n y_grad = numpy.random.uniform(\n -1, 1, (batch_size, out_dim)).astype(numpy.float32)\n return atom_data, adj_data, y_grad\n\n\ndef check_forward(model, atom_data, adj_data):\n y_actual = cuda.to_cpu(model(atom_data, adj_data).data)\n assert y_actual.shape == (batch_size, out_dim)\n\n\ndef test_forward_cpu(model, data):\n atom_data, adj_data = data[0], data[1]\n check_forward(model, atom_data, adj_data)\n\n\[email protected]\ndef test_forward_gpu(model, data):\n atom_data, adj_data = cuda.to_gpu(data[0]), cuda.to_gpu(data[1])\n model.to_gpu()\n check_forward(model, atom_data, adj_data)\n\n\n# TODO(mottodora): check why tolerance is high\ndef test_backward_cpu(model, data):\n atom_data, adj_data, y_grad = data\n params = tuple(model.params())\n gradient_check.check_backward(model, (atom_data, adj_data), y_grad,\n params=params, no_grads=[True, True],\n atol=1e3, rtol=1e3)\n\n\n# TODO(nakago): check why tolerance is high\[email protected]\ndef test_backward_gpu(model, data):\n atom_data, adj_data, y_grad = [cuda.to_gpu(d) for d in data]\n model.to_gpu()\n params = tuple(model.params())\n gradient_check.check_backward(model, (atom_data, adj_data), y_grad,\n params=params, no_grads=[True, True],\n atol=1e3, rtol=1e3)\n\n\ndef test_forward_cpu_graph_invariant(model, data):\n atom_data, adj_data = data[0], data[1]\n y_actual = cuda.to_cpu(model(atom_data, adj_data).data)\n\n permutation_index = numpy.random.permutation(atom_size)\n permute_atom_data = permute_node(atom_data, permutation_index)\n permute_adj_data = permute_adj(adj_data, permutation_index)\n permute_y_actual = cuda.to_cpu(model(\n permute_atom_data, permute_adj_data).data)\n assert numpy.allclose(y_actual, permute_y_actual, rtol=1e-5, atol=1e-6)\n\n\nif __name__ == '__main__':\n pytest.main([__file__, '-v'])\n", "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport numpy\nimport pandas\n\nimport chainer.functions as F\nfrom chainer import cuda\nfrom chainer.datasets import split_dataset_random\nfrom chainer.iterators import SerialIterator\nfrom chainer.training.extensions import Evaluator\n\nfrom chainer_chemistry.utils import save_json\n\ntry:\n import matplotlib\n matplotlib.use('Agg')\nexcept ImportError:\n pass\n\n\n\nfrom chainer_chemistry.dataset.converters import concat_mols\nfrom chainer_chemistry.dataset.preprocessors import preprocess_method_dict\nfrom chainer_chemistry import datasets as D\nfrom chainer_chemistry.datasets import NumpyTupleDataset\nfrom chainer_chemistry.models.prediction import Regressor\n\n# These import is necessary for pickle to work\nfrom chainer_chemistry.links.scaler.standard_scaler import StandardScaler # NOQA\n# from sklearn.preprocessing import StandardScaler # NOQA\nfrom train_qm9 import GraphConvPredictor # NOQA\nfrom train_qm9 import MeanAbsError, RootMeanSqrError # NOQA\n\n\ndef parse_arguments():\n # Lists of supported preprocessing methods/models.\n method_list = ['nfp', 'ggnn', 'schnet', 'weavenet', 'rsgcn', 'relgcn',\n 'relgat']\n label_names = ['A', 'B', 'C', 'mu', 'alpha', 'homo', 'lumo', 'gap', 'r2',\n 'zpve', 'U0', 'U', 'H', 'G', 'Cv']\n scale_list = ['standardize', 'none']\n\n # Set up the argument parser.\n parser = argparse.ArgumentParser(description='Regression on QM9.')\n parser.add_argument('--method', '-m', type=str, choices=method_list,\n help='method name', default='nfp')\n parser.add_argument('--label', '-l', type=str,\n choices=label_names + ['all'], default='all',\n help='target label for regression; all means '\n 'predicting all properties at once')\n parser.add_argument('--scale', type=str, choices=scale_list,\n help='label scaling method', default='standardize')\n parser.add_argument('--gpu', '-g', type=int, default=-1,\n help='id of gpu to use; negative value means running'\n 'the code on cpu')\n parser.add_argument('--seed', '-s', type=int, default=777,\n help='random seed value')\n parser.add_argument('--train-data-ratio', '-r', type=float, default=0.7,\n help='ratio of training data w.r.t the dataset')\n parser.add_argument('--in-dir', '-i', type=str, default='result',\n help='directory to load model data from')\n parser.add_argument('--model-filename', type=str, default='regressor.pkl',\n help='saved model filename')\n parser.add_argument('--num-data', type=int, default=-1,\n help='amount of data to be parsed; -1 indicates '\n 'parsing all data.')\n return parser.parse_args()\n\n\ndef main():\n # Parse the arguments.\n args = parse_arguments()\n device = args.gpu\n\n # Set up some useful variables that will be used later on.\n method = args.method\n if args.label != 'all':\n label = args.label\n cache_dir = os.path.join('input', '{}_{}'.format(method, label))\n labels = [label]\n else:\n labels = D.get_qm9_label_names()\n cache_dir = os.path.join('input', '{}_all'.format(method))\n\n # Get the filename corresponding to the cached dataset, based on the amount\n # of data samples that need to be parsed from the original dataset.\n num_data = args.num_data\n if num_data >= 0:\n dataset_filename = 'data_{}.npz'.format(num_data)\n else:\n dataset_filename = 'data.npz'\n\n # Load the cached dataset.\n dataset_cache_path = os.path.join(cache_dir, dataset_filename)\n\n dataset = None\n if os.path.exists(dataset_cache_path):\n print('Loading cached data from {}.'.format(dataset_cache_path))\n dataset = NumpyTupleDataset.load(dataset_cache_path)\n if dataset is None:\n print('Preprocessing dataset...')\n preprocessor = preprocess_method_dict[method]()\n dataset = D.get_qm9(preprocessor, labels=labels)\n\n # Cache the newly preprocessed dataset.\n if not os.path.exists(cache_dir):\n os.mkdir(cache_dir)\n NumpyTupleDataset.save(dataset_cache_path, dataset)\n\n # Use a predictor with scaled output labels.\n model_path = os.path.join(args.in_dir, args.model_filename)\n regressor = Regressor.load_pickle(model_path, device=device)\n scaler = regressor.predictor.scaler\n\n if scaler is not None:\n original_t = dataset.get_datasets()[-1]\n if args.gpu >= 0:\n scaled_t = cuda.to_cpu(scaler.transform(\n cuda.to_gpu(original_t)))\n else:\n scaled_t = scaler.transform(original_t)\n\n dataset = NumpyTupleDataset(*(dataset.get_datasets()[:-1] +\n (scaled_t,)))\n\n # Split the dataset into training and testing.\n train_data_size = int(len(dataset) * args.train_data_ratio)\n _, test = split_dataset_random(dataset, train_data_size, args.seed)\n\n # This callback function extracts only the inputs and discards the labels.\n def extract_inputs(batch, device=None):\n return concat_mols(batch, device=device)[:-1]\n\n def postprocess_fn(x):\n if scaler is not None:\n scaled_x = scaler.inverse_transform(x)\n return scaled_x\n else:\n return x\n\n # Predict the output labels.\n print('Predicting...')\n y_pred = regressor.predict(\n test, converter=extract_inputs,\n postprocess_fn=postprocess_fn)\n\n # Extract the ground-truth labels.\n t = concat_mols(test, device=device)[-1]\n original_t = cuda.to_cpu(scaler.inverse_transform(t))\n\n # Construct dataframe.\n df_dict = {}\n for i, l in enumerate(labels):\n df_dict.update({'y_pred_{}'.format(l): y_pred[:, i],\n 't_{}'.format(l): original_t[:, i], })\n df = pandas.DataFrame(df_dict)\n\n # Show a prediction/ground truth table with 5 random examples.\n print(df.sample(5))\n\n n_eval = 10\n for target_label in range(y_pred.shape[1]):\n label_name = labels[target_label]\n diff = y_pred[:n_eval, target_label] - original_t[:n_eval,\n target_label]\n print('label_name = {}, y_pred = {}, t = {}, diff = {}'\n .format(label_name, y_pred[:n_eval, target_label],\n original_t[:n_eval, target_label], diff))\n\n # Run an evaluator on the test dataset.\n print('Evaluating...')\n test_iterator = SerialIterator(test, 16, repeat=False, shuffle=False)\n eval_result = Evaluator(test_iterator, regressor, converter=concat_mols,\n device=device)()\n print('Evaluation result: ', eval_result)\n # Save the evaluation results.\n save_json(os.path.join(args.in_dir, 'eval_result.json'), eval_result)\n\n # Calculate mean abs error for each label\n mae = numpy.mean(numpy.abs(y_pred - original_t), axis=0)\n eval_result = {}\n for i, l in enumerate(labels):\n eval_result.update({l: mae[i]})\n save_json(os.path.join(args.in_dir, 'eval_result_mae.json'), eval_result)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.allclose", "numpy.random.seed", "numpy.ones", "numpy.random.permutation", "numpy.random.uniform", "numpy.random.randint" ], [ "numpy.allclose", "numpy.random.seed", "numpy.random.permutation", "numpy.random.uniform", "numpy.random.randint" ], [ "matplotlib.use", "numpy.abs", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
grantseiter/Tax-Brain
[ "180063a193ff8cb0b56349878110066b012dde6d" ]
[ "cs-config/cs_config/helpers.py" ]
[ "\"\"\"\nFunctions used to help tax-brain configure to COMP\n\"\"\"\nimport os\nimport inspect\nimport time\nimport copy\nimport hashlib\nimport gzip\nimport copy\nfrom pathlib import Path\nimport warnings\n\nimport pandas as pd\nimport numpy as np\nfrom collections import defaultdict\nfrom taxbrain.report_utils import convert_params\nfrom taxcalc import (Policy, DIFF_TABLE_COLUMNS, DIFF_TABLE_LABELS,\n DIST_TABLE_COLUMNS, DIST_TABLE_LABELS,\n add_income_table_row_variable,\n add_quantile_table_row_variable, STANDARD_INCOME_BINS)\nfrom operator import itemgetter\nfrom .constants import (POLICY_SCHEMA, RESULTS_TABLE_TAGS,\n RESULTS_TABLE_TITLES, RESULTS_TOTAL_ROW_KEY_LABELS,\n MONEY_VARS)\nfrom .tables import (summary_aggregate, summary_diff_xbin, summary_diff_xdec,\n summary_dist_xbin, summary_dist_xdec)\n\ntry:\n from s3fs import S3FileSystem\nexcept ImportError as ie:\n S3FileSystem = None\n\nTCPATH = inspect.getfile(Policy)\nTCDIR = os.path.dirname(TCPATH)\n\n\nAWS_ACCESS_KEY_ID = os.environ.get(\"AWS_ACCESS_KEY_ID\", None)\nAWS_SECRET_ACCESS_KEY = os.environ.get(\"AWS_SECRET_ACCESS_KEY\", None)\n\n\ndef random_seed(user_mods, year):\n \"\"\"\n Compute random seed based on specified user_mods, which is a\n dictionary returned by Calculator.read_json_parameter_files().\n \"\"\"\n def random_seed_from_subdict(subdict):\n \"\"\"\n Compute random seed from one user_mods subdictionary.\n \"\"\"\n assert isinstance(subdict, dict)\n all_vals = []\n for year in sorted(subdict.keys()):\n all_vals.append(str(year))\n params = subdict[year]\n for param in sorted(params.keys()):\n try:\n tple = tuple(params[param])\n except TypeError:\n # params[param] is not an iterable value; make it so\n tple = tuple((params[param],))\n all_vals.append(str((param, tple)))\n txt = u''.join(all_vals).encode('utf-8')\n hsh = hashlib.sha512(txt)\n seed = int(hsh.hexdigest(), 16)\n return seed % np.iinfo(np.uint32).max\n # start of random_seed function\n # modify the user mods to work in the random_seed_from_subdict function\n # TODO: Change all of this to work with new adjustments\n user_mods_copy = copy.deepcopy(user_mods)\n beh_mods_dict = {year: {}}\n for param, value in user_mods_copy[\"behavior\"].items():\n beh_mods_dict[year][param] = [value]\n user_mods_copy[\"behavior\"] = beh_mods_dict\n ans = 0\n for subdict_name in user_mods_copy:\n subdict = user_mods_copy[subdict_name]\n if subdict_name == \"policy\":\n subdict = convert_params(subdict)\n ans += random_seed_from_subdict(subdict)\n return ans % np.iinfo(np.uint32).max\n\n\nNUM_TO_FUZZ = 3 # when using dropq algorithm on puf.csv results\n\n\ndef fuzzed(df1, df2, reform_affected, table_row_type):\n \"\"\"\n Create fuzzed df2 dataframe and corresponding unfuzzed df1 dataframe.\n\n Parameters\n ----------\n df1: Pandas DataFrame\n contains results variables for the baseline policy, which are not\n changed by this function\n\n df2: Pandas DataFrame\n contains results variables for the reform policy, which are not\n changed by this function\n\n reform_affected: boolean numpy array (not changed by this function)\n True for filing units with a reform-induced combined tax difference;\n otherwise False\n\n table_row_type: string\n valid values are 'aggr', 'xbin', and 'xdec'\n\n Returns\n -------\n df1, df2: Pandas DataFrames\n where copied df2 is fuzzed to maintain data privacy and\n where copied df1 has same filing unit order as has the fuzzed df2\n \"\"\"\n assert table_row_type in ('aggr', 'xbin', 'xdec')\n assert len(df1.index) == len(df2.index)\n assert reform_affected.size == len(df1.index)\n df1 = copy.deepcopy(df1)\n df2 = copy.deepcopy(df2)\n # add copy of reform_affected to df2\n df2['reform_affected'] = copy.deepcopy(reform_affected)\n # construct table rows, for which filing units in each row must be fuzzed\n if table_row_type == 'xbin':\n df1 = add_income_table_row_variable(df1, 'expanded_income',\n STANDARD_INCOME_BINS)\n df2['expanded_income_baseline'] = df1['expanded_income']\n df2 = add_income_table_row_variable(df2, 'expanded_income_baseline',\n STANDARD_INCOME_BINS)\n del df2['expanded_income_baseline']\n elif table_row_type == 'xdec':\n df1 = add_quantile_table_row_variable(df1, 'expanded_income',\n 10, decile_details=True)\n df2['expanded_income_baseline'] = df1['expanded_income']\n df2 = add_quantile_table_row_variable(df2, 'expanded_income_baseline',\n 10, decile_details=True)\n del df2['expanded_income_baseline']\n elif table_row_type == 'aggr':\n df1['table_row'] = np.ones(reform_affected.shape, dtype=int)\n df2['table_row'] = df1['table_row']\n gdf1 = df1.groupby('table_row', sort=False)\n gdf2 = df2.groupby('table_row', sort=False)\n del df1['table_row']\n del df2['table_row']\n # fuzz up to NUM_TO_FUZZ filing units randomly chosen in each group\n # (or table row), where fuzz means to replace the reform (2) results\n # with the baseline (1) results for each chosen filing unit\n pd.options.mode.chained_assignment = None\n group_list = list()\n for name, group2 in gdf2:\n group2 = copy.deepcopy(group2)\n indices = np.where(group2['reform_affected'])\n num = min(len(indices[0]), NUM_TO_FUZZ)\n if num > 0:\n choices = np.random.choice(indices[0], size=num, replace=False)\n group1 = gdf1.get_group(name)\n for idx in choices:\n group2.iloc[idx] = group1.iloc[idx]\n group_list.append(group2)\n del group2\n df2 = pd.concat(group_list)\n del df2['reform_affected']\n pd.options.mode.chained_assignment = 'warn'\n # reinstate index order of df1 and df2 and return\n df1.sort_index(inplace=True)\n df2.sort_index(inplace=True)\n return (df1, df2)\n\n\ndef nth_year_results(tb, year, user_mods, fuzz, return_html=True):\n \"\"\"\n Function to process taxbrain results for a given year\n \"\"\"\n start_time = time.time()\n dv1 = tb.base_data[year]\n dv2 = tb.reform_data[year]\n sres = {}\n if fuzz:\n # seed random number generator with a seed value based on user_mods\n # (reform-specific seed is used to choose whose results are fuzzed)\n seed = random_seed(user_mods, year)\n np.random.seed(seed)\n # make bool array marking which filing units are affected by the reform\n reform_affected = np.logical_not(\n np.isclose(dv1['combined'], dv2['combined'], atol=0.01, rtol=0.0)\n )\n agg1, agg2 = fuzzed(dv1, dv2, reform_affected, 'aggr')\n sres = summary_aggregate(sres, tb)\n del agg1\n del agg2\n dv1b, dv2b = fuzzed(dv1, dv2, reform_affected, 'xbin')\n sres = summary_dist_xbin(sres, tb, year)\n sres = summary_diff_xbin(sres, tb, year)\n del dv1b\n del dv2b\n dv1d, dv2d = fuzzed(dv1, dv2, reform_affected, 'xdec')\n sres = summary_dist_xdec(sres, tb, year)\n sres = summary_diff_xdec(sres, tb, year)\n del dv1d\n del dv2d\n del reform_affected\n else:\n sres = summary_aggregate(sres, tb)\n sres = summary_dist_xbin(sres, tb, year)\n sres = summary_diff_xbin(sres, tb, year)\n sres = summary_dist_xdec(sres, tb, year)\n sres = summary_diff_xdec(sres, tb, year)\n\n # optionally return non-JSON-like results\n # it would be nice to allow the user to download the full CSV instead\n # of a CSV for each year\n # what if we allowed an aggregate format call?\n # - presents project with all data proeduced in a run?\n\n if return_html:\n res = {}\n for id in sres:\n res[id] = [{\n 'dimension': year,\n 'raw': sres[id]\n }]\n elapsed_time = time.time() - start_time\n print('elapsed time for this run: {:.1f}'.format(elapsed_time))\n return res\n else:\n elapsed_time = time.time() - start_time\n print('elapsed time for this run: {:.1f}'.format(elapsed_time))\n return sres\n\n\ndef postprocess(data_to_process):\n \"\"\"\n Receives results from run_nth_year_taxcalc_model over N years,\n formats the results, and combines the aggregate results\n \"\"\"\n labels = {x: DIFF_TABLE_LABELS[i]\n for i, x in enumerate(DIFF_TABLE_COLUMNS)}\n labels.update({x: DIST_TABLE_LABELS[i]\n for i, x in enumerate(DIST_TABLE_COLUMNS)})\n\n # nested functions used below\n def label_columns(pdf):\n pdf.columns = [(labels[str(col)] if str(col) in labels else str(col))\n for col in pdf.columns]\n return pdf\n\n def append_year(pdf, year):\n \"\"\"\n append_year embedded function revises all column names in dframe\n \"\"\"\n pdf.columns = ['{}_{}'.format(col, year)\n for col in pdf.columns]\n return pdf\n\n def year_columns(pdf, year):\n pdf.columns = [str(year)]\n return pdf\n\n def arbitrary_defaultdict():\n \"\"\"\n Return an arbitrary number of defaultdicts. This is used to store all\n of the distribution and differences tables\n \"\"\"\n return defaultdict(arbitrary_defaultdict)\n\n formatted = {\"tbl_outputs\": arbitrary_defaultdict(),\n \"aggr_outputs\": defaultdict(dict)}\n downloadable = []\n year_getter = itemgetter('dimension')\n for id, pdfs in data_to_process.items():\n if id.startswith('aggr'):\n pdfs.sort(key=year_getter)\n tbl = pdfs[0][\"raw\"]\n tbl.index = pd.Index(RESULTS_TOTAL_ROW_KEY_LABELS[i]\n for i in tbl.index)\n # format table\n for col in tbl.columns:\n tbl.update(tbl[col].apply(\"${:,.2f}\".format))\n\n title = RESULTS_TABLE_TITLES[id]\n tags = RESULTS_TABLE_TAGS[id]\n formatted[\"aggr_outputs\"][tags[\"law\"]] = {\n \"title\": title,\n \"renderable\": pdf_to_clean_html(tbl)\n }\n # append a downloadable version of the results\n downloadable.append(\n {\n \"media_type\": \"CSV\",\n \"title\": title + \".csv\",\n \"data\": tbl.to_csv()\n }\n )\n\n else:\n for i in pdfs:\n year = i[\"dimension\"]\n tbl = label_columns(i[\"raw\"])\n title = '{} ({})'.format(RESULTS_TABLE_TITLES[id],\n year)\n # format table\n for col in tbl.columns:\n if col in MONEY_VARS:\n tbl.update(tbl[col].apply(\"${:,.2f}\".format))\n\n tags = RESULTS_TABLE_TAGS[id]\n tbl_type = tags[\"table_type\"]\n group = tags[\"grouping\"]\n if id.startswith(\"dist\"):\n law = tags[\"law\"]\n formatted[\"tbl_outputs\"][tbl_type][law][group][year] = {\n \"title\": title,\n \"renderable\": pdf_to_clean_html(tbl)\n }\n else:\n tax = tags[\"tax_type\"]\n formatted[\"tbl_outputs\"][tbl_type][tax][group][year] = {\n \"title\": title,\n \"renderable\": pdf_to_clean_html(tbl)\n }\n\n # add downloadable information\n downloadable.append(\n {\n \"media_type\": \"CSV\",\n \"title\": title + \".csv\",\n \"data\": tbl.to_csv()\n }\n )\n\n return formatted, downloadable\n\n\ndef pdf_to_clean_html(pdf):\n \"\"\"Takes a PDF and returns an HTML table without any deprecated tags or\n irrelevant styling\"\"\"\n tb_replace = ('<table class=\"table table-striped\"')\n\n return (pdf.to_html()\n .replace('<table ', tb_replace)\n .replace(' border=\"1\"', '')\n .replace('class=\"dataframe\"', ''))\n\n\ndef retrieve_puf(\n aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY\n):\n \"\"\"\n Function for retrieving the PUF from the OSPC S3 bucket\n \"\"\"\n s3_reader_installed = S3FileSystem is not None\n has_credentials = (\n aws_access_key_id is not None and aws_secret_access_key is not None\n )\n if has_credentials and s3_reader_installed:\n print(\"Reading puf from S3 bucket.\")\n fs = S3FileSystem(key=AWS_ACCESS_KEY_ID, secret=AWS_SECRET_ACCESS_KEY,)\n with fs.open(\"s3://ospc-data-files/puf.csv.gz\") as f:\n # Skips over header from top of file.\n puf_df = pd.read_csv(f, compression=\"gzip\")\n return puf_df\n elif Path(\"puf.csv.gz\").exists():\n print(\"Reading puf from puf.csv.gz.\")\n return pd.read_csv(\"puf.csv.gz\", compression=\"gzip\")\n elif Path(\"puf.csv\").exists():\n print(\"Reading puf from puf.csv.\")\n return pd.read_csv(\"puf.csv\")\n else:\n warnings.warn(\n f\"PUF file not available (has_credentials={has_credentials}, \"\n f\"s3_reader_installed={s3_reader_installed})\"\n )\n return None\n" ]
[ [ "pandas.concat", "pandas.read_csv", "numpy.random.seed", "numpy.random.choice", "pandas.Index", "numpy.ones", "numpy.iinfo", "numpy.where", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
nickgerend/Downtown
[ "165d3143ac69fd661050b73c3580e1221da981f8" ]
[ "linework.py" ]
[ "# Written by: Nick Gerend, @dataoutsider\n# Viz: \"Downtown\", enjoy!\n\nimport matplotlib.pyplot as plt\n\n#region Functions\n\ndef line_slope_intercept(x1, y1, x2, y2):\n m = (y2 - y1) / (x2 - x1)\n b = y1 - m * x1\n return m, b # y = mx + b\n\ndef two_lines_intercept(m1, b1, m2, b2):\n x = (b2 - b1) / (m1 - m2)\n y = m1 * x + b1\n return x, y\n\ndef two_lines_intercept2(p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y):\n s1_x = p1_x - p0_x \n s1_y = p1_y - p0_y\n s2_x = p3_x - p2_x\n s2_y = p3_y - p2_y\n #s = (-s1_y * (p0_x - p2_x) + s1_x * (p0_y - p2_y)) / (-s2_x * s1_y + s1_x * s2_y)\n t = ( s2_x * (p0_y - p2_y) - s2_y * (p0_x - p2_x)) / (-s2_x * s1_y + s1_x * s2_y)\n i_x = p0_x + (t * s1_x)\n i_y = p0_y + (t * s1_y)\n return i_x, i_y\n\n#endregion\n\n#region Classes\n\nclass building:\n def __init__(self, row, column, path, x, y): \n self.row = row\n self.column = column\n self.path = path\n self.x = x\n self.y = y\n\nclass lines:\n def __init__(self, line, x1, y1, x2, y2): \n self.line = line\n self.x1 = x1\n self.y1 = y1\n self.x2 = x2\n self.y2 = y2\n\nclass intercept:\n def __init__(self, intercept, line1, line2): \n self.intercept = intercept\n self.x1 = line1.x1\n self.y1 = line1.y1\n self.x2 = line1.x2\n self.y2 = line1.y2\n self.x3 = line2.x1\n self.y3 = line2.y1\n self.x4 = line2.x2\n self.y4 = line2.y2\n self.iPoint_x = 0.0\n self.iPoint_y = 0.0\n def calc_iPoint(self):\n self.iPoint_x, self.iPoint_y = two_lines_intercept2(self.x1, self.y1, self.x2, self.y2, self.x3, self.y3, self.x4, self.y4)\n\n#endregion\n\n#region Initialize\nwidth_height_ratio = 0.8\nhalf_whr = width_height_ratio / 2\n\nV_x = 10.0\nV_y = 5.0\n\nlines_list = []\n\n# A1:\nlines_list.append(lines('a1', half_whr, 0.0, half_whr, 1.0))\nlines_list.append(lines('a1_', 0.0, 0.0, V_x, V_y))\nA1_i = intercept('A1', lines_list[0], lines_list[1])\nA1_i.calc_iPoint()\nA1_x = A1_i.iPoint_x\nA1_y = A1_i.iPoint_y\n\n# A2:\nlines_list.append(lines('a2', half_whr, 0.0, half_whr, 1.0))\nlines_list.append(lines('a2_', 0.0, 0.5, V_x, V_y))\nA2_i = intercept('A2', lines_list[2], lines_list[3])\nA2_i.calc_iPoint()\nA2_x = A2_i.iPoint_x\nA2_y = A2_i.iPoint_y\n\n# A3:\nlines_list.append(lines('a3', half_whr, 0.0, half_whr, 1.0))\nlines_list.append(lines('a3_', 0.0, 1.0, V_x, V_y))\nA3_i = intercept('A3', lines_list[4], lines_list[5])\nA3_i.calc_iPoint()\nA3_x = A3_i.iPoint_x\nA3_y = A3_i.iPoint_y\n\nbuilding_list = []\nbuilding_list.append(building(0.0, 0.0, -1.0, 0.0, 0.0)) \nbuilding_list.append(building(0.5, 0.0, -1.0, 0.0, 0.5))\nbuilding_list.append(building(1.0, 0.0, -1.0, 0.0, 1.0))\nbuilding_list.append(building(0.0, 0.5, -1.0, A1_x, A1_y))\nbuilding_list.append(building(0.5, 0.5, -1.0, A2_x, A2_y))\nbuilding_list.append(building(1.0, 0.5, -1.0, A3_x, A3_y))\n\n#endregion\n\n#region Loop\n\ncolumns = 8\ncol_counter = 1\ndiag_point = 2\n\nfor j in range(2 * columns - 1):\n \n lines_list.clear()\n\n #region Calc Points\n # A1:\n lines_list.append(lines('a1', building_list[diag_point].x, building_list[diag_point].y, building_list[diag_point + 2].x, building_list[diag_point + 2].y))\n lines_list.append(lines('a1_', 0.0, 0.0, V_x, V_y))\n A1_i = intercept('A1', lines_list[0], lines_list[1])\n A1_i.calc_iPoint()\n A1_x = A1_i.iPoint_x\n A1_y = A1_i.iPoint_y\n\n # A2:\n lines_list.append(lines('a2', A1_x, 0.0, A1_x, 1.0))\n lines_list.append(lines('a2_', 0.0, 0.5, V_x, V_y))\n A2_i = intercept('A2', lines_list[2], lines_list[3])\n A2_i.calc_iPoint()\n A2_x = A2_i.iPoint_x\n A2_y = A2_i.iPoint_y\n\n # A3:\n lines_list.append(lines('a3', A1_x, 0.0, A1_x, 1.0))\n lines_list.append(lines('a3_', 0.0, 1.0, V_x, V_y))\n A3_i = intercept('A3', lines_list[4], lines_list[5])\n A3_i.calc_iPoint()\n A3_x = A3_i.iPoint_x\n A3_y = A3_i.iPoint_y\n #endregion\n\n building_list.append(building(0.0, col_counter, -1.0, A1_x, A1_y))\n building_list.append(building(0.5, col_counter, -1.0, A2_x, A2_y))\n building_list.append(building(1.0, col_counter, -1.0, A3_x, A3_y))\n\n col_counter += .5\n diag_point += 3\n\n# Diagram:\n#\n# 1.0 --b----A3----B3--------- \\\n# 0.5 --|---A2,b---B2--------- > (x_offset, y_offset) vanishing point\n# ^ 0.0 --|----A1----B1--------- /\n# y a,c\n# x > 0.0 [*/2] [w/h]*\n\n#endregion\n\n#region Display points\n\nx = [o.x for o in building_list if (o.column % 1 == 0) and (o.row % 1 == 0)]\ny = [o.y for o in building_list if (o.column % 1 == 0) and (o.row % 1 == 0)]\nplt.scatter(x, y)\nplt.show()\n\n#endregion\n\n#region Create polygons\nFFR = [o for o in building_list if (o.column % 1 == 0) and (o.row % 1 == 0)] # First Floor Right\nbuilding_poly = []\nindex = 0\nfor i in range(columns):\n index = 2 * i\n building_poly.append(building(0, index, 0, FFR[index].x, FFR[index].y))\n building_poly.append(building(0, index, 1, FFR[index+2].x, FFR[index+2].y))\n building_poly.append(building(0, index, 2, FFR[index+3].x, FFR[index+3].y))\n building_poly.append(building(0, index, 3, FFR[index+1].x, FFR[index+1].y))\n#endregion\n\n#region Write out file\nimport csv\nimport os\nwith open(os.path.dirname(__file__) + '/building.csv', 'w',) as csvfile:\n writer = csv.writer(csvfile, lineterminator = '\\n')\n writer.writerow(['row', 'column', 'path', 'x', 'y'])\n for window in building_poly:\n writer.writerow([window.row, window.column, window.path, window.x, window.y])\n#endregion" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.scatter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
grburgess/gammapy
[ "609e460698caca7223afeef5e71826c7b32728d1", "609e460698caca7223afeef5e71826c7b32728d1", "609e460698caca7223afeef5e71826c7b32728d1", "609e460698caca7223afeef5e71826c7b32728d1", "609e460698caca7223afeef5e71826c7b32728d1", "609e460698caca7223afeef5e71826c7b32728d1", "609e460698caca7223afeef5e71826c7b32728d1", "609e460698caca7223afeef5e71826c7b32728d1" ]
[ "gammapy/scripts/tests/test_image_fit.py", "gammapy/detect/tests/test_lima.py", "gammapy/image/tests/test_catalog.py", "gammapy/utils/coordinates/celestial.py", "gammapy/utils/tests/test_energy.py", "gammapy/data/tests/test_obs_stats.py", "docs/tutorials/flux_point/residuals_images.py", "gammapy/detect/iterfind.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport json\nfrom numpy.testing.utils import assert_allclose\nfrom astropy.stats import gaussian_sigma_to_fwhm\nfrom astropy.tests.helper import pytest\nfrom ...utils.testing import requires_dependency, requires_data, run_cli\nfrom ...datasets import load_poisson_stats_image\nfrom ..image_fit import image_fit\n\n\nEXPECTED = ([9.016526, 99.865985, 100.147877, 1010.824189],\n [4 * gaussian_sigma_to_fwhm, 100, 100, 1E3],\n [5 * gaussian_sigma_to_fwhm, 100, 100, 1E3])\nRTOL = (1E-5, 1E-3, 1E-3)\nPSF = (True, True, False)\nDATA = (True, False, False)\n\n@requires_dependency('sherpa')\n@requires_data('gammapy-extra')\[email protected]('expected, rtol, psf, data',\n zip(EXPECTED, RTOL, PSF, DATA))\ndef test_sherpa_like(tmpdir, expected, rtol, psf, data):\n \"\"\"\n Fit Poisson stats image test data.\n \"\"\"\n\n # load test data\n filenames = load_poisson_stats_image(extra_info=True, return_filenames=True)\n outfile = tmpdir / 'test_sherpa_like.json'\n\n # write test source json file\n sources_data = {}\n sources_data['gaussian'] = {'ampl': 1E3,\n 'xpos': 99,\n 'ypos': 99,\n 'fwhm': 4 * gaussian_sigma_to_fwhm}\n\n filename = tmpdir / 'test_sherpa_like_sources.json'\n with filename.open('w') as fh:\n json.dump(sources_data, fh)\n\n # set up args\n args = {'exposure': str(filenames['exposure']),\n 'background': str(filenames['background']),\n 'sources': str(filename),\n 'roi': None,\n 'outfile': str(outfile)}\n\n if data:\n args['counts'] = str(filenames['counts'])\n else:\n args['counts'] = str(filenames['model'])\n if psf:\n args['psf'] = filenames['psf']\n else:\n args['psf'] = None\n\n image_fit(**args)\n\n with outfile.open() as fh:\n data = json.load(fh)\n\n # This recovers the values from the test dataset documented here:\n # https://github.com/gammapy/gammapy-extra/tree/master/\n # test_datasets/unbundled/poisson_stats_image#data\n actual = data['fit']['parvals']\n assert_allclose(actual, expected, rtol=rtol)\n\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport numpy as np\nfrom numpy.testing.utils import assert_allclose, assert_equal\n\nfrom astropy.convolution import Tophat2DKernel\nfrom astropy.io import fits\n\nfrom ...utils.testing import requires_dependency, requires_data\nfrom ...detect import compute_ts_map, compute_lima_map, compute_lima_on_off_map\nfrom ...datasets import load_poisson_stats_image, gammapy_extra\nfrom ...image import SkyImageCollection, SkyImage\n\nfrom ...extern.pathlib import Path\n\n\n@requires_dependency('scipy')\n@requires_data('gammapy-extra')\ndef test_compute_lima_map():\n \"\"\"\n Test Li&Ma map against TS map for Tophat kernel\n \"\"\"\n filenames = load_poisson_stats_image(extra_info=True, return_filenames=True)\n data = SkyImageCollection()\n data.counts = SkyImage.read(filenames['counts'])\n data.background = SkyImage.read(filenames['background'])\n data.exposure = SkyImage.read(filenames['exposure'])\n\n kernel = Tophat2DKernel(5)\n result_lima = compute_lima_map(data['counts'], data['background'], kernel,\n data['exposure'])\n kernel.normalize('integral')\n result_ts = compute_ts_map(data['counts'], data['background'], data['exposure'],\n kernel)\n\n assert_allclose(result_ts.sqrt_ts, result_lima.significance, atol=1E-3)\n assert_allclose(result_ts.amplitude, result_lima.flux, atol=3E-12)\n\n\n@requires_dependency('scipy')\n@requires_data('gammapy-extra')\ndef test_compute_lima_on_off_map():\n \"\"\"\n Test Li&Ma map with snippet from the H.E.S.S. survey data.\n \"\"\"\n filename = gammapy_extra.filename('test_datasets/unbundled/hess/survey/'\n 'hess_survey_snippet.fits.gz')\n maps = SkyImageCollection.read(filename)\n\n kernel = Tophat2DKernel(5)\n\n result_lima = compute_lima_on_off_map(maps.on.data, maps.off.data, maps.onexposure.data,\n maps.offexposure.data, kernel)\n\n # reproduce safe significance threshold from HESS software\n result_lima.significance.data[result_lima.n_on.data < 5] = 0\n\n # Set boundary to NaN in reference image\n maps.significance.data[np.isnan(result_lima.significance)] = np.nan\n assert_allclose(result_lima.significance, maps.significance, atol=1E-5)\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom numpy.testing import assert_allclose\nfrom astropy.units import Quantity\nfrom astropy.wcs import WCS\nfrom ...utils.testing import requires_dependency, requires_data\nfrom .. import catalog\nfrom ...image import SkyImage\nfrom ...irf import EnergyDependentTablePSF\nfrom ...cube import SkyCube\nfrom ...datasets import FermiGalacticCenter\n\n\ndef test_extended_image():\n # TODO: implement me\n pass\n\n\n@requires_data('gammapy-extra')\ndef test_source_image():\n reference_hdu = SkyImage.empty(nxpix=10, nypix=10, binsz=1).to_image_hdu()\n reference_wcs = WCS(reference_hdu.header)\n energy = Quantity([10, 500], 'GeV')\n reference = SkyCube(data=reference_hdu.data,\n wcs=reference_wcs, energy=energy)\n\n psf_file = FermiGalacticCenter.filenames()['psf']\n psf = EnergyDependentTablePSF.read(psf_file)\n\n image, energies = catalog._source_image(catalog='1FHL',\n reference_cube=reference,\n total_flux=True)\n\n actual = image.sum()\n # Flux of sources within a 10x10 deg region about Galactic Center\n expected = 1.6098631760996795e-07\n assert_allclose(actual, expected)\n\n\n@requires_dependency('scipy')\n@requires_data('gammapy-extra')\ndef test_catalog_image():\n reference_hdu = SkyImage.empty(nxpix=10, nypix=10, binsz=1).to_image_hdu()\n reference_wcs = WCS(reference_hdu.header)\n energy = Quantity([10, 500], 'GeV')\n\n psf_file = FermiGalacticCenter.filenames()['psf']\n psf = EnergyDependentTablePSF.read(psf_file)\n\n out_cube = catalog.catalog_image(reference_hdu, psf, catalog='1FHL',\n source_type='point', total_flux=True,\n sim_table=None)\n\n actual = out_cube.data.sum()\n\n # Ensures flux is consistent following PSF convolution to within 1%\n expected = 1.6098631760996795e-07\n assert_allclose(actual, expected, rtol=0.01)\n\n\n@requires_data('gammapy-extra')\ndef test_catalog_table():\n # Checks catalogs are loaded correctly\n\n table_1fhl = catalog.catalog_table('1FHL')\n assert len(table_1fhl) == 514\n\n table_2fgl = catalog.catalog_table('2FGL')\n assert len(table_2fgl) == 1873\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Celestial coordinate utility functions.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nfrom numpy import (cos, sin, arccos, arcsin,\n arctan2, radians, degrees, pi)\n\n__all__ = ['galactic_to_radec',\n 'radec_to_galactic',\n 'separation',\n 'minimum_separation',\n 'pair_correlation',\n ]\n\n\ndef galactic_to_radec(glon, glat, unit='deg'):\n \"\"\"Convert Galactic to Equatorial J2000 coordinates.\n\n Parameters\n ----------\n glon, glat : array_like\n Galactic coordinates\n unit : {'deg', 'rad'}\n Units of input and output coordinates\n\n Returns\n -------\n ra, dec : array_like\n Equatorial coordinates.\n\n Notes\n -----\n This is a standalone implementation that only uses ``numpy`` for testing.\n Use `~astropy.coordinates.SkyCoord` instead.\n Only accurate to ~ 3 digits.\n \"\"\"\n if unit == 'deg':\n glon, glat = radians(glon), radians(glat)\n\n ra_gp = radians(192.85948)\n de_gp = radians(27.12825)\n lcp = radians(122.932)\n\n term1 = cos(glat) * sin(lcp - glon)\n term2 = cos(de_gp) * sin(glat) - sin(de_gp) * cos(glat) * cos(lcp - glon)\n ramragp = arctan2(term1, term2)\n ra = (ramragp + ra_gp + 2 * pi) % (2 * pi)\n\n sin_d = sin(de_gp) * sin(glat) + cos(de_gp) * cos(glat) * cos(lcp - glon)\n dec = arcsin(sin_d)\n\n if unit == 'deg':\n ra, dec = degrees(ra), degrees(dec)\n\n return ra, dec\n\n\ndef radec_to_galactic(ra, dec, unit='deg'):\n \"\"\"Convert Equatorial J2000 to Galactic coordinates.\n\n Parameters\n ----------\n ra, dec : array_like\n Equatorial coordinates.\n unit : {'deg', 'rad'}\n Units of input and output coordinates\n\n Returns\n -------\n glon, glat : array_like\n Galactic coordinates\n\n Notes\n -----\n This is a standalone implementation that only uses ``numpy`` for testing.\n Use `~astropy.coordinates.SkyCoord` instead.\n Only accurate to ~ 3 digits.\n \"\"\"\n if unit == 'deg':\n ra, dec = radians(ra), radians(dec)\n\n ra_gp = radians(192.85948)\n de_gp = radians(27.12825)\n lcp = radians(122.932)\n\n term1 = cos(dec) * sin(ra - ra_gp)\n term2 = cos(de_gp) * sin(dec) - sin(de_gp) * cos(dec) * cos(ra - ra_gp)\n lcpml = arctan2(term1, term2)\n glon = (lcp - lcpml + 2 * pi) % (2 * pi)\n\n sin_b = sin(de_gp) * sin(dec) + cos(de_gp) * cos(dec) * cos(ra - ra_gp)\n glat = arcsin(sin_b)\n\n if unit == 'deg':\n glon, glat = degrees(glon), degrees(glat)\n\n return glon, glat\n\n\ndef separation(lon1, lat1, lon2, lat2, unit='deg'):\n \"\"\"Angular separation between points on the sphere.\n\n Parameters\n ----------\n lon1, lat1, lon2, lat2 : array_like\n Coordinates of the two points\n unit : {'deg', 'rad'}\n Units of input and output coordinates\n\n Returns\n -------\n separation : array_like\n Angular separation\n \"\"\"\n if unit == 'deg':\n lon1, lat1, lon2, lat2 = map(radians, (lon1, lat1, lon2, lat2))\n\n term1 = cos(lat1) * cos(lon1) * cos(lat2) * cos(lon2)\n term2 = cos(lat1) * sin(lon1) * cos(lat2) * sin(lon2)\n term3 = sin(lat1) * sin(lat2)\n mu = term1 + term2 + term3\n separation = arccos(mu)\n\n if unit == 'deg':\n separation = degrees(separation)\n\n return separation\n\n\ndef minimum_separation(lon1, lat1, lon2, lat2, unit='deg'):\n \"\"\"Compute minimum distance of each (lon1, lat1) to any (lon2, lat2).\n\n Parameters\n ----------\n lon1, lat1 : array_like\n Primary coordinates of interest\n lon2, lat2 : array_like\n Counterpart coordinate array\n unit : {'deg', 'rad'}\n Units of input and output coordinates\n\n Returns\n -------\n theta_min : array\n Minimum distance\n \"\"\"\n lon1 = np.asanyarray(lon1)\n lat1 = np.asanyarray(lat1)\n\n theta_min = np.empty_like(lon1, dtype=np.float64)\n\n for i1 in range(lon1.size):\n thetas = separation(lon1[i1], lat1[i1],\n lon2, lat2, unit=unit)\n theta_min[i1] = thetas.min()\n\n return theta_min\n\n\ndef pair_correlation(lon, lat, theta_bins, unit='deg'):\n \"\"\"Compute pair correlation function for points on the sphere.\n\n Parameters\n ----------\n lon, lat : array_like\n Coordinate arrays\n theta_bins : array_like\n Array defining the ``theta`` binning.\n ``theta`` is the angular offset between positions.\n unit : {'deg', 'rad'}\n Units of input and output coordinates\n\n Returns\n -------\n counts : array\n Array of point separations per ``theta`` bin.\n \"\"\"\n # TODO: Implement speedups:\n # - use radians\n # - avoid processing each pair twice (distance a to b and b to a)\n counts = np.zeros(shape=len(theta_bins) - 1, dtype=int)\n # If there are many points this should have acceptable performance\n # because the inner loop is in np.histogram, not in Python\n for ii in range(len(lon)):\n theta = separation(lon[ii], lat[ii], lon, lat, unit=unit)\n hist = np.histogram(theta, theta_bins)[0]\n counts += hist\n\n return counts\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nfrom numpy.testing import assert_equal, assert_allclose\nimport astropy.units as u\nfrom astropy.io import fits\nfrom ...utils.testing import requires_data\nfrom ...datasets import gammapy_extra\nfrom ...utils.energy import Energy, EnergyBounds\n\n\ndef test_Energy():\n # Explicit constructor call\n energy = Energy([1, 3, 6, 8, 12], 'TeV')\n actual = str(energy.__class__)\n desired = \"<class 'gammapy.utils.energy.Energy'>\"\n assert_equal(actual, desired)\n\n val = u.Quantity([1, 3, 6, 8, 12], 'TeV')\n actual = Energy(val, 'GeV')\n desired = Energy((1, 3, 6, 8, 12), 'TeV')\n assert_equal(actual, desired)\n\n # View casting\n energy = val.view(Energy)\n actual = type(energy).__module__\n desired = 'gammapy.utils.energy'\n assert_equal(actual, desired)\n\n # New from template\n energy = Energy([0, 1, 2, 3, 4, 5], 'eV')\n energy2 = energy[1:3]\n actual = energy2\n desired = Energy([1, 2], 'eV')\n assert_equal(actual, desired)\n\n actual = energy2.nbins\n desired = 2\n assert_equal(actual, desired)\n\n actual = energy2.unit\n desired = u.eV\n assert_equal(actual, desired)\n\n # Equal log spacing\n energy = Energy.equal_log_spacing(1 * u.GeV, 10 * u.TeV, 6)\n actual = energy[0]\n desired = Energy(1 * u.GeV, 'TeV')\n assert_equal(actual, desired)\n\n energy = Energy.equal_log_spacing(2, 6, 3, 'GeV')\n actual = energy.nbins\n desired = 3\n assert_equal(actual, desired)\n\n # range + nbins\n erange = energy.range.value\n bins = energy.nbins\n actual = np.logspace(np.log10(erange[0]), np.log10(erange[1]), bins)\n desired = energy.value\n assert_equal(actual, desired)\n\n # Input string\n e_string = '10 TeV'\n actual = Energy(e_string)\n desired = Energy(10, 'TeV')\n assert_equal(actual, desired)\n\n e_string = u'10 TeV'\n actual = Energy(e_string)\n desired = Energy(10, 'TeV')\n assert_equal(actual, desired)\n\n\ndef test_EnergyBounds():\n val = u.Quantity([1, 2, 3, 4, 5], 'TeV')\n actual = EnergyBounds(val, 'GeV')\n desired = EnergyBounds((1, 2, 3, 4, 5), 'TeV')\n assert_equal(actual, desired)\n\n # View casting\n energy = val.view(EnergyBounds)\n actual = type(energy).__module__\n desired = 'gammapy.utils.energy'\n assert_equal(actual, desired)\n\n # New from template\n energy = EnergyBounds([0, 1, 2, 3, 4, 5], 'keV')\n energy2 = energy[1:4]\n actual = energy2\n desired = EnergyBounds([1, 2, 3], 'keV')\n assert_equal(actual, desired)\n\n actual = energy2.nbins\n desired = 2\n assert_equal(actual, desired)\n\n actual = energy2.unit\n desired = u.keV\n assert_equal(actual, desired)\n\n # Equal log spacing\n energy = EnergyBounds.equal_log_spacing(1 * u.TeV, 10 * u.TeV, 10)\n actual = energy.nbins\n desired = 10\n assert_equal(actual, desired)\n\n # Log centers\n center = energy.log_centers\n actual = type(center).__module__\n desired = 'gammapy.utils.energy'\n assert_equal(actual, desired)\n\n # Upper/lower bounds\n actual = energy.upper_bounds\n desired = energy[1:]\n assert_equal(actual, desired)\n\n actual = energy.lower_bounds\n desired = energy[:-1]\n assert_equal(actual, desired)\n\n lower = [1, 3, 4, 5]\n upper = [3, 4, 5, 8]\n actual = EnergyBounds.from_lower_and_upper_bounds(lower, upper, 'TeV')\n desired = EnergyBounds([1, 3, 4, 5, 8], 'TeV')\n assert_equal(actual, desired)\n\n # Range\n erange = energy.range\n actual = erange[0]\n desired = energy[0]\n assert_equal(actual, desired)\n actual = erange[1]\n desired = energy[-1]\n assert_equal(actual, desired)\n\n # Bands\n bands = energy.bands\n actual = bands[0]\n desired = energy[1] - energy[0]\n assert_equal(actual, desired)\n\n\n@requires_data('gammapy-extra')\ndef test_EnergyBounds_read():\n # read EBOUNDS extension\n filename = gammapy_extra.filename('test_datasets/irf/hess/ogip/run_rmf60741.fits')\n\n hdulist = fits.open(filename)\n ebounds = EnergyBounds.from_ebounds(hdulist['EBOUNDS'])\n desired = hdulist['EBOUNDS'].data['E_MAX'][-1]\n actual = ebounds[-1].value\n assert_equal(actual, desired)\n\n # read MATRIX extension\n ebounds = EnergyBounds.from_rmf_matrix(hdulist['MATRIX'])\n desired = hdulist['MATRIX'].data['ENERG_LO'][3]\n actual = ebounds[3].value\n assert_equal(actual, desired)\n\n\ndef test_EnergyBounds_write(tmpdir):\n ebounds = EnergyBounds.equal_log_spacing(1 * u.TeV, 10 * u.TeV, 10)\n writename = str(tmpdir / 'ebounds_test.fits')\n hdu = ebounds.to_ebounds()\n prim_hdu = fits.PrimaryHDU()\n hdulist = fits.HDUList([prim_hdu, hdu])\n hdulist.writeto(writename)\n\n ebounds2 = EnergyBounds.from_ebounds(hdulist[1])\n actual = ebounds2\n desired = ebounds\n assert_allclose(actual, desired)\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom numpy.testing import assert_allclose\nfrom astropy.tests.helper import pytest\nfrom astropy.coordinates import SkyCoord\nimport astropy.units as u\nfrom regions import CircleSkyRegion\nfrom ...data import DataStore, ObservationList, ObservationStats, Target\nfrom ...utils.testing import requires_data, requires_dependency\nfrom ...background import reflected_regions_background_estimate as refl\nfrom ...image import SkyMask\n\n\n@requires_data('gammapy-extra')\ndef get_obs_list():\n data_store = DataStore.from_dir('$GAMMAPY_EXTRA/datasets/hess-crab4-hd-hap-prod2/')\n run_list = [23523, 23526]\n obs_list = ObservationList([data_store.obs(_) for _ in run_list])\n return obs_list\n\n\n@requires_data('gammapy-extra')\ndef get_obs(id):\n obs_list = get_obs_list()\n for run in obs_list:\n if run.obs_id == id:\n return run\n\n\[email protected]\ndef target():\n pos = SkyCoord(83.63 * u.deg, 22.01 * u.deg)\n on_size = 0.3 * u.deg\n on_region = CircleSkyRegion(pos, on_size)\n\n target = Target(position=pos,\n on_region=on_region,\n name='Crab Nebula',\n tag='crab')\n return target\n\n\n@requires_data('gammapy-extra')\[email protected]\ndef get_mask():\n mask = SkyMask.read('$GAMMAPY_EXTRA/datasets/exclusion_masks/tevcat_exclusion.fits')\n return mask\n\n\n@requires_data('gammapy-extra')\n@requires_dependency('scipy')\ndef test_str(target):\n run = get_obs(23523)\n bg = refl(target.on_region,\n run.pointing_radec, get_mask(), run.events)\n stats = ObservationStats.from_target(run, target, bg)\n text = str(stats)\n assert 'Observation summary report' in text\n\n\n@requires_data('gammapy-extra')\n@requires_dependency('scipy')\ndef test_stack(target):\n obs_list = get_obs_list()\n obs_stats = list()\n for run in obs_list:\n bg = refl(target.on_region,\n run.pointing_radec, get_mask(), run.events)\n obs_stats.append(ObservationStats.from_target(run, target, bg))\n sum_obs_stats = ObservationStats.stack(obs_stats)\n assert_allclose(sum_obs_stats.alpha, 0.284, rtol=1e-2)\n assert_allclose(sum_obs_stats.sigma, 23.5757575757, rtol=1e-3)\n", "\"\"\"Create residuals image based on the two flux point methods\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom gammapy.spectrum.flux_point import compute_differential_flux_points\nfrom gammapy.spectrum.powerlaw import power_law_evaluate, power_law_integral_flux\n\n\ndef compute_flux_error(gamma_true, gamma_reco, method):\n # Let's assume a concrete true spectrum and energy bin.\n # Note that the residuals computed below do *not* depend on\n # these parameters.\n energy_min, energy_max = 1, 10\n energy_ref, diff_flux_ref = 1, 1\n # Compute integral flux in the energy band assuming `gamma_true`\n int_flux = power_law_integral_flux(diff_flux_ref, gamma_true,\n energy_ref, energy_min, energy_max)\n # Compute flux point\n table = compute_differential_flux_points(method, 'power_law',\n spectral_index=gamma_reco,\n energy_min=energy_min, energy_max=energy_max,\n int_flux=int_flux)\n # Compute relative error of the flux point\n energy = table['ENERGY'].data\n flux_reco = table['DIFF_FLUX'].data\n flux_true = power_law_evaluate(energy, diff_flux_ref * np.ones_like(energy),\n np.array(gamma_true).reshape(energy.shape),\n energy_ref * np.ones_like(energy))\n flux_true = flux_true.reshape(gamma_true.shape)\n flux_reco = flux_reco.reshape(gamma_true.shape)\n flux_error = (flux_reco - flux_true) / flux_true\n return flux_error\n\n\ndef residuals_image():\n fig = plt.figure(figsize=(15, 5))\n gamma_true = np.arange(1.01, 7, 1)\n gamma_reco = np.arange(1.01, 7, 1)\n gamma_true, gamma_reco = np.meshgrid(gamma_true, gamma_reco)\n flux_error_lafferty = compute_flux_error(gamma_true, gamma_reco,\n method='lafferty')\n flux_error_log_center = compute_flux_error(gamma_true, gamma_reco,\n method='log_center')\n flux_error_ratio = np.log10(flux_error_lafferty / flux_error_log_center)\n extent = [0.5, 6.5, 0.5, 6.5]\n vmin, vmax = -3, 3\n axes_1 = fig.add_subplot(131)\n axes_1.imshow(np.array(flux_error_lafferty),\n interpolation='nearest', extent=extent,\n origin=\"lower\", vmin=vmin, vmax=vmax,\n cmap='RdBu')\n axes_1.set_ylabel('Assumed Spectral Index', fontsize=14)\n axes_1.set_title('Lafferty Method', fontsize=12)\n\n axes_2 = fig.add_subplot(132)\n axes_2.imshow(np.array(flux_error_log_center),\n interpolation='nearest', extent=extent,\n origin=\"lower\", vmin=vmin, vmax=vmax,\n cmap='RdBu')\n axes_2.set_xlabel('True Spectral Index', fontsize=14)\n axes_2.set_title('Log-Center Method', fontsize=12)\n\n axes_3 = fig.add_subplot(133)\n im = axes_3.imshow(np.array(flux_error_ratio),\n interpolation='nearest', extent=extent,\n origin=\"lower\", vmin=vmin, vmax=vmax,\n cmap='RdBu')\n axes_3.set_title('Residual Log Ratio: \\n Log(Lafferty/Log Center)',\n fontsize=12)\n plt.tight_layout()\n fig.subplots_adjust(right=0.9)\n cbar_ax = fig.add_axes([0.92, 0.11, 0.025, 0.78])\n fig.colorbar(im, cax=cbar_ax)\n return fig\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"An iterative multi-scale source detection method.\n\nThis is a prototype implementation of the following algorithm:\n1. Input is: count, background and exposure map and list of scales\n2. Compute significance maps on multiple scales (disk-correlate)\n3. Largest peak on any scale gives a seed position / extension (the scale)\n4. Fit a 2D Gauss-model source using the seed parameters\n5. Add the source to a list of detected sources and the background model\n6. Restart at 2, but this time with detected sources added to the background\n model, i.e. significance maps will be \"residual significance\" maps.\n\nTODO: tons of things, e.g.\n* Use Sherpa catalog pipeline for `fit_source_parameters step.\n This will automatically take care of these points:\n * Keep parameters of previously found sources free when adding a new source\n * Write more debug maps (e.g. excess)\n and info (e.g. sources_guess positions).\n * Add PSF convolution\n* Use TS maps with Gauss source morphology instead of disk.\n* Make it more modular and more abstract; put in gammapy.detect\n - user should be able to plug-in their significance map computation?\n - support different source models?\n - Separate Iterator, SignificanceMapCalculator, Guesser, Fitter ...\n and implement different methods as sub-classes or via callbacks?\n - e.g. list of peaks should be replaced with some abstract class that\n allows different characteristics / methods to be implemented.\n* Introduce parameters that allow us to vary the procedure\n* Check if Python garbage collection for iter_maps sets in OK\n or if explicit garbage collection is needed.\n* Use photutils aperture photometry for estimate_flux?\n* Introduce FLUX_SCALE = 1e-10 parameter to avoid roundoff error problems?\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport logging\nimport numpy as np\nfrom astropy.io import fits\nfrom ..extern.pathlib import Path\nfrom .. import stats\nfrom ..image import disk_correlate\n\n__all__ = [\n 'IterativeSourceDetector',\n]\n\nlog = logging.getLogger(__name__)\n\n\nclass FitFailedError(object):\n \"\"\"Fit failed error.\n \"\"\"\n pass\n\n\ndef gauss2d(x, y, xpos, ypos, sigma, flux):\n \"\"\"2D Gaussian source model.\"\"\"\n x = np.asanyarray(x, dtype=np.float64)\n y = np.asanyarray(y, dtype=np.float64)\n theta2 = (x - xpos) ** 2 + (y - ypos) ** 2\n sigma2 = sigma * sigma\n term_a = 1 / (2 * np.pi * sigma2)\n term_b = np.exp(-0.5 * theta2 / sigma2)\n image = term_a * term_b\n return flux * image / image.sum()\n\n\nclass IterativeSourceDetector(object):\n \"\"\"An iterative source detection algorithm.\n\n TODO: document\n\n Parameters\n ----------\n debug_output_folder : str\n Use empty string for no debug output.\n \"\"\"\n\n def __init__(self, maps, scales, max_sources=10, significance_threshold=5,\n max_ncall=300, debug_output_folder='', overwrite=False):\n self.maps = maps\n # Note: FITS convention is to start counting pixels at 1\n y, x = np.indices(maps['counts'].shape, dtype=np.int32) + 1\n self.maps['x'], self.maps['y'] = x, y\n\n # Temp maps that change in each iteration\n self.iter_maps = dict()\n self.find_peaks = []\n\n self.scales = np.asanyarray(scales)\n self.max_sources = max_sources\n self.significance_threshold = significance_threshold\n self.max_ncall = max_ncall\n self.debug_output_folder = debug_output_folder\n self.overwrite = overwrite\n\n self.sources_guess = []\n self.sources = []\n\n # At the moment we only\n # self.peaks = np.zeros_like(self.scales)\n\n def run(self):\n \"\"\"Run source detection.\"\"\"\n log.debug('Running source detection')\n\n for _ in range(self.max_sources):\n log.debug('Starting iteration number {0}'.format(_))\n debug_folder = self.debug_output_folder + '/' + str(_)\n if self.debug_output_folder:\n Path(debug_folder).mkdir()\n log.info('mkdir {0}'.format(debug_folder))\n\n self.compute_iter_maps()\n if self.debug_output_folder:\n # Save per iteration maps\n for name in ['background']:\n filename = '{0}/{1}.fits'.format(debug_folder, name)\n log.info('Writing {0}'.format(filename))\n fits.writeto(filename, self.iter_maps[name], clobber=self.overwrite)\n\n # Save per iteration and scale maps\n for name in ['significance']:\n for scale in self.scales:\n filename = '{0}/{1}_{2}.fits'.format(debug_folder, name, scale)\n log.info('Writing {0}'.format(filename))\n fits.writeto(filename, self.iter_maps[name][scale], clobber=self.overwrite)\n\n self.find_peaks()\n # TODO: debug output to JSON here and for later steps\n\n if self.stop_iteration():\n break\n\n self.guess_source_parameters()\n if self.debug_output_folder:\n filename = '{0}/{1}'.format(debug_folder, 'sources_guess.reg')\n self.save_regions(filename, selection='guess')\n\n try:\n self.fit_source_parameters()\n except FitFailedError:\n log.warning('Fit failed. Full stop.')\n break\n\n def compute_iter_maps(self):\n \"\"\"Compute maps for this iteration.\"\"\"\n log.debug('Computing maps for this iteration.')\n self.iter_maps = dict()\n\n background = self.maps['background']\n background += self.model_excess(self.sources)\n self.iter_maps['background'] = background\n\n self.iter_maps['significance'] = dict()\n for scale in self.scales:\n counts = disk_correlate(self.maps['counts'], scale)\n background = disk_correlate(self.iter_maps['background'], scale)\n significance = stats.significance(counts, background)\n self.iter_maps['significance'][scale] = significance\n\n def model_excess(self, sources):\n \"\"\"Compute model excess image.\"\"\"\n x, y = self.maps['x'], self.maps['y']\n flux = np.zeros_like(x, dtype=np.float64)\n for source in sources:\n source_flux = gauss2d(x, y, **source)\n flux += source_flux\n excess = flux * self.maps['exposure']\n return excess\n\n def find_peaks(self):\n \"\"\"Find peaks in residual significance image.\"\"\"\n log.debug('Finding peaks.')\n self.peaks = []\n for scale in self.scales:\n image = self.iter_maps['significance'][scale]\n # Note: significance images sometimes contain Inf or NaN values.\n # We set them here to a value so that they will be ignored\n mask = np.invert(np.isfinite(image))\n image[mask] = -1e10\n\n # This is how to find the peak position in a 2D numpy array\n y, x = np.unravel_index(np.nanargmax(image), image.shape)\n val = image[y, x]\n peak = dict()\n peak['xpos'], peak['ypos'] = x, y\n peak['val'], peak['scale'] = val, scale\n self.peaks.append(peak)\n log.debug('Peak on scale {scale:5.2f} is at ({xpos:5d}, {ypos:5d}) with value {val:7.2f}'\n ''.format(**peak))\n\n def stop_iteration(self):\n \"\"\"Criteria to stop the iteration process.\"\"\"\n max_significance = max([_['val'] for _ in self.peaks])\n if max_significance < self.significance_threshold:\n log.debug('Max peak significance of {0:7.2f} is smaller than detection threshold {1:7.2f}'\n ''.format(max_significance, self.significance_threshold))\n log.debug('Stopping iteration.')\n return True\n else:\n return False\n\n def guess_source_parameters(self):\n \"\"\"Guess source start parameters for the fit.\n\n At the moment take the position and scale of the maximum residual peak\n and compute the excess within a circle around that position.\n \"\"\"\n log.debug('Guessing Gauss source parameters:')\n\n # Find the scale with the most significant peak\n peak = self.peaks[0]\n for _ in range(1, len(self.scales)):\n if self.peaks[_]['val'] > peak['val']:\n peak = self.peaks[_]\n\n source = dict()\n source['xpos'], source['ypos'] = peak['xpos'], peak['ypos']\n # TODO: introduce rough scale factor disk -> gauss here\n SIGMA_SCALE_FACTOR = 1\n source['sigma'] = SIGMA_SCALE_FACTOR * peak['scale']\n log.debug('xpos: {xpos}'.format(**source))\n log.debug('ypos: {ypos}'.format(**source))\n log.debug('sigma: {sigma}'.format(**source))\n source['flux'] = self.estimate_flux(source)\n self.sources_guess.append(source)\n\n def fit_source_parameters(self):\n \"\"\"Fit source parameters using the guess as start values.\n\n For this prototype we simply roll our own using iminuit,\n this should probably be changed to astropy or Sherpa.\n \"\"\"\n log.debug('Fitting source parameters')\n from iminuit import Minuit\n\n def fit_stat(xpos, ypos, sigma, flux):\n \"\"\"Define CASH fit statistic for Gauss model\"\"\"\n data = self.maps['counts']\n # Note: No need to re-compute excess model for all previous source,\n # that is already contained in the background in iter_maps.\n background = self.iter_maps['background']\n sources = [dict(xpos=xpos, ypos=ypos, sigma=sigma, flux=flux)]\n model = background + self.model_excess(sources)\n cash = stats.cash(data, model).sum()\n return cash\n\n source = self.sources_guess[-1]\n log.debug('Source parameters before fit: {0}'.format(source))\n pars = source.copy()\n pars['error_xpos'] = 0.01\n pars['error_ypos'] = 0.01\n pars['error_flux'] = 0.1 * source['flux']\n pars['error_sigma'] = 0.1 * source['sigma']\n SIGMA_LIMITS = (0.01, 1e6)\n pars['limit_sigma'] = SIGMA_LIMITS\n minuit = Minuit(fit_stat, pedantic=False, print_level=1, **pars)\n # minuit.print_initial_param()\n minuit.migrad(ncall=self.max_ncall)\n\n source = minuit.values\n log.debug('Source parameters after fit: {0}'.format(source))\n\n if not minuit.migrad_ok():\n # If fit doesn't converge we simply abort\n # TODO: should we use exceptions here or return False as signal?\n minuit.print_fmin()\n raise FitFailedError\n else:\n # Store best-fit source parameters\n self.sources.append(source)\n\n def estimate_flux(self, source, method='sum_and_divide'):\n \"\"\"Estimate flux in a circular region around the source.\n\n Note: It's not clear which is the better flux estimate.\n\n * ``method == 'sum_and_divide'``::\n\n flux = (counts.sum() - background.sum()) / exposure.mean()\n\n * ``method = 'divide_and_sum'``::\n\n flux = ((counts - background) / exposure).sum()\n \"\"\"\n log.debug('Estimating flux')\n SOURCE_RADIUS_FACTOR = 2\n radius = SOURCE_RADIUS_FACTOR * source['sigma']\n r2 = ((self.maps['x'] - source['xpos']) ** 2 +\n (self.maps['y'] - source['ypos']) ** 2)\n mask = (r2 < radius ** 2)\n npix = mask.sum()\n if method == 'sum_and_divide':\n counts = self.maps['counts'][mask].sum()\n background = self.iter_maps['background'][mask].sum()\n # Note: exposure is not per pixel.\n # It has units m^2 s TeV\n exposure = self.maps['exposure'][mask].mean()\n excess = counts - background\n # TODO: check if true:\n # Flux is differential flux at 1 TeV in units m^-2 s^-1 TeV^-1\n # Or is it integral flux above 1 TeV in units of m^-2 s^-1?\n flux = excess / exposure\n elif method == 'divide_and_sum':\n counts = self.maps['counts'][mask].sum()\n background = self.iter_maps['background'][mask].sum()\n exposure = self.maps['exposure'][mask].mean()\n excess_image = self.maps['counts'] - self.iter_maps['background']\n excess = excess_image[mask].sum()\n flux_image = (self.maps['counts'] - self.iter_maps['background']) / self.maps['exposure']\n flux = flux_image[mask].sum()\n log.debug('Flux estimation for source region radius: {0}'.format(radius))\n log.debug('npix: {0}'.format(npix))\n log.debug('counts: {0}'.format(counts))\n log.debug('background: {0}'.format(background))\n log.debug('excess: {0}'.format(excess))\n log.debug('exposure: {0}'.format(exposure))\n log.debug('flux: {0}'.format(flux))\n return flux\n\n def save_fits(self, filename):\n \"\"\"Save source catalog to FITS file.\"\"\"\n log.info('Writing source detections in FITS format to {0}'.format(filename))\n # TODO\n\n def save_regions(self, filename, selection='fit'):\n \"\"\"Save ds9 region file.\"\"\"\n log.info('Writing source detections in ds9 region format to {0}'.format(filename))\n if selection == 'fit':\n sources = self.sources\n color = 'green'\n elif selection == 'guess':\n sources = self.sources_guess\n color = 'magenta'\n else:\n raise ValueError('Unknown selection: {0}'.format(selection))\n with open(filename, 'w') as outfile:\n outfile.write('image\\n')\n for ii, source in enumerate(sources):\n fmt = 'circle({xpos:3.3f},{ypos:3.3f},{radius:3.3f}) # text=\"{name}\" color={color}\\n'\n data = dict(xpos=source['xpos'], ypos=source['ypos'])\n N_SIGMA = 3\n data['radius'] = N_SIGMA * source['sigma']\n data['name'] = 'Source {0}'.format(ii)\n data['color'] = color\n text = fmt.format(**data)\n outfile.write(text)\n\n def save_json(self, filename):\n \"\"\"Save source catalog to JSON file.\"\"\"\n log.info('Writing source detections in JSON format to {0}'.format(filename))\n import json\n data = dict(sources=self.sources, sources_guess=self.sources_guess)\n # TODO: this fails because data contains np.float32 values, which are not JSON serializable:\n # TypeError: 1.2617354e-10 is not JSON serializable\n with open(filename, 'w') as outfile:\n json.dump(data, outfile, indent=4)\n" ]
[ [ "numpy.testing.utils.assert_allclose" ], [ "numpy.isnan", "numpy.testing.utils.assert_allclose" ], [ "numpy.testing.assert_allclose" ], [ "numpy.radians", "numpy.arcsin", "numpy.empty_like", "numpy.degrees", "numpy.arccos", "numpy.cos", "numpy.sin", "numpy.arctan2", "numpy.asanyarray", "numpy.histogram" ], [ "numpy.testing.assert_equal", "numpy.log10", "numpy.testing.assert_allclose" ], [ "numpy.testing.assert_allclose" ], [ "matplotlib.pyplot.tight_layout", "numpy.ones_like", "numpy.arange", "numpy.log10", "numpy.array", "numpy.meshgrid", "matplotlib.pyplot.figure" ], [ "numpy.nanargmax", "numpy.isfinite", "numpy.indices", "numpy.asanyarray", "numpy.zeros_like", "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
owennewo/kfserving
[ "89f73c87525b8e06ea799f69f2979c4ad272fcb3", "89f73c87525b8e06ea799f69f2979c4ad272fcb3", "6aa83398ab03bfae822f36772757097bcb98b6ed", "6aa83398ab03bfae822f36772757097bcb98b6ed", "89f73c87525b8e06ea799f69f2979c4ad272fcb3", "89f73c87525b8e06ea799f69f2979c4ad272fcb3", "89f73c87525b8e06ea799f69f2979c4ad272fcb3", "89f73c87525b8e06ea799f69f2979c4ad272fcb3", "89f73c87525b8e06ea799f69f2979c4ad272fcb3", "89f73c87525b8e06ea799f69f2979c4ad272fcb3", "89f73c87525b8e06ea799f69f2979c4ad272fcb3", "89f73c87525b8e06ea799f69f2979c4ad272fcb3", "89f73c87525b8e06ea799f69f2979c4ad272fcb3", "89f73c87525b8e06ea799f69f2979c4ad272fcb3", "6aa83398ab03bfae822f36772757097bcb98b6ed", "6aa83398ab03bfae822f36772757097bcb98b6ed", "89f73c87525b8e06ea799f69f2979c4ad272fcb3" ]
[ "vendor/github.com/tensorflow/tensorflow/tensorflow/python/ops/linalg/linear_operator_kronecker.py", "docs/samples/explanation/alibi/alibi_helper.py", "vendor/github.com/tensorflow/tensorflow/tensorflow/contrib/distribute/python/examples/simple_estimator_example.py", "vendor/github.com/tensorflow/tensorflow/tensorflow/python/ops/sparse_ops_test.py", "vendor/github.com/tensorflow/tensorflow/tensorflow/python/data/experimental/benchmarks/csv_dataset_benchmark.py", "vendor/github.com/tensorflow/tensorflow/tensorflow/python/autograph/converters/control_flow_test.py", "vendor/github.com/tensorflow/tensorflow/tensorflow/python/data/kernel_tests/take_test.py", "vendor/github.com/tensorflow/tensorflow/tensorflow/contrib/tensorrt/test/base_test.py", "vendor/github.com/tensorflow/tensorflow/tensorflow/python/data/kernel_tests/flat_map_test.py", "vendor/github.com/tensorflow/tensorflow/tensorflow/python/kernel_tests/linalg_grad_test.py", "vendor/github.com/tensorflow/tensorflow/tensorflow/python/keras/engine/training_distributed.py", "vendor/github.com/tensorflow/tensorflow/tensorflow/python/data/experimental/__init__.py", "vendor/github.com/tensorflow/tensorflow/tensorflow/python/ops/rnn_cell_impl.py", "vendor/github.com/tensorflow/tensorflow/tensorflow/contrib/kafka/python/kernel_tests/kafka_test.py", "vendor/github.com/tensorflow/tensorflow/tensorflow/python/autograph/pyct/pretty_printer_test.py", "vendor/github.com/tensorflow/tensorflow/tensorflow/python/profiler/model_analyzer_test.py", "vendor/github.com/tensorflow/tensorflow/tensorflow/contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Construct the Kronecker product of one or more `LinearOperators`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import common_shapes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.linalg import linalg_impl as linalg\nfrom tensorflow.python.ops.linalg import linear_operator\nfrom tensorflow.python.util.tf_export import tf_export\n\n__all__ = [\"LinearOperatorKronecker\"]\n\n\ndef _vec(x):\n \"\"\"Stacks column of matrix to form a single column.\"\"\"\n return array_ops.reshape(\n array_ops.matrix_transpose(x),\n array_ops.concat(\n [array_ops.shape(x)[:-2], [-1]], axis=0))\n\n\ndef _unvec_by(y, num_col):\n \"\"\"Unstack vector to form a matrix, with a specified amount of columns.\"\"\"\n return array_ops.matrix_transpose(\n array_ops.reshape(\n y,\n array_ops.concat(\n [array_ops.shape(y)[:-1], [num_col, -1]], axis=0)))\n\n\ndef _rotate_last_dim(x, rotate_right=False):\n \"\"\"Rotate the last dimension either left or right.\"\"\"\n ndims = array_ops.rank(x)\n if rotate_right:\n transpose_perm = array_ops.concat(\n [[ndims - 1], math_ops.range(0, ndims - 1)], axis=0)\n else:\n transpose_perm = array_ops.concat(\n [math_ops.range(1, ndims), [0]], axis=0)\n return array_ops.transpose(x, transpose_perm)\n\n\n@tf_export(\"linalg.LinearOperatorKronecker\")\nclass LinearOperatorKronecker(linear_operator.LinearOperator):\n \"\"\"Kronecker product between two `LinearOperators`.\n\n This operator composes one or more linear operators `[op1,...,opJ]`,\n building a new `LinearOperator` representing the Kronecker product:\n `op1 x op2 x .. opJ` (we omit parentheses as the Kronecker product is\n associative).\n\n If `opj` has shape `batch_shape_j` + [M_j, N_j`, then the composed operator\n will have shape equal to `broadcast_batch_shape + [prod M_j, prod N_j]`,\n where the product is over all operators.\n\n ```python\n # Create a 4 x 4 linear operator composed of two 2 x 2 operators.\n operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])\n operator_2 = LinearOperatorFullMatrix([[1., 0.], [2., 1.]])\n operator = LinearOperatorKronecker([operator_1, operator_2])\n\n operator.to_dense()\n ==> [[1., 2., 0., 0.],\n [3., 4., 0., 0.],\n [2., 4., 1., 2.],\n [6., 8., 3., 4.]]\n\n operator.shape\n ==> [4, 4]\n\n operator.log_abs_determinant()\n ==> scalar Tensor\n\n x = ... Shape [4, 2] Tensor\n operator.matmul(x)\n ==> Shape [4, 2] Tensor\n\n # Create a [2, 3] batch of 4 x 5 linear operators.\n matrix_45 = tf.random_normal(shape=[2, 3, 4, 5])\n operator_45 = LinearOperatorFullMatrix(matrix)\n\n # Create a [2, 3] batch of 5 x 6 linear operators.\n matrix_56 = tf.random_normal(shape=[2, 3, 5, 6])\n operator_56 = LinearOperatorFullMatrix(matrix_56)\n\n # Compose to create a [2, 3] batch of 20 x 30 operators.\n operator_large = LinearOperatorKronecker([operator_45, operator_56])\n\n # Create a shape [2, 3, 20, 2] vector.\n x = tf.random_normal(shape=[2, 3, 6, 2])\n operator_large.matmul(x)\n ==> Shape [2, 3, 30, 2] Tensor\n ```\n\n #### Performance\n\n The performance of `LinearOperatorKronecker` on any operation is equal to\n the sum of the individual operators' operations.\n\n #### Matrix property hints\n\n This `LinearOperator` is initialized with boolean flags of the form `is_X`,\n for `X = non_singular, self_adjoint, positive_definite, square`.\n These have the following meaning:\n\n * If `is_X == True`, callers should expect the operator to have the\n property `X`. This is a promise that should be fulfilled, but is *not* a\n runtime assert. For example, finite floating point precision may result\n in these promises being violated.\n * If `is_X == False`, callers should expect the operator to not have `X`.\n * If `is_X == None` (the default), callers should have no expectation either\n way.\n \"\"\"\n\n def __init__(self,\n operators,\n is_non_singular=None,\n is_self_adjoint=None,\n is_positive_definite=None,\n is_square=None,\n name=None):\n r\"\"\"Initialize a `LinearOperatorKronecker`.\n\n `LinearOperatorKronecker` is initialized with a list of operators\n `[op_1,...,op_J]`.\n\n Args:\n operators: Iterable of `LinearOperator` objects, each with\n the same `dtype` and composable shape, representing the Kronecker\n factors.\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix\\\n #Extension_for_non_symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n name: A name for this `LinearOperator`. Default is the individual\n operators names joined with `_x_`.\n\n Raises:\n TypeError: If all operators do not have the same `dtype`.\n ValueError: If `operators` is empty.\n \"\"\"\n # Validate operators.\n check_ops.assert_proper_iterable(operators)\n operators = list(operators)\n if not operators:\n raise ValueError(\n \"Expected a list of >=1 operators. Found: %s\" % operators)\n self._operators = operators\n\n # Validate dtype.\n dtype = operators[0].dtype\n for operator in operators:\n if operator.dtype != dtype:\n name_type = (str((o.name, o.dtype)) for o in operators)\n raise TypeError(\n \"Expected all operators to have the same dtype. Found %s\"\n % \" \".join(name_type))\n\n # Auto-set and check hints.\n # A Kronecker product is invertible, if and only if all factors are\n # invertible.\n if all(operator.is_non_singular for operator in operators):\n if is_non_singular is False:\n raise ValueError(\n \"The Kronecker product of non-singular operators is always \"\n \"non-singular.\")\n is_non_singular = True\n\n if all(operator.is_self_adjoint for operator in operators):\n if is_self_adjoint is False:\n raise ValueError(\n \"The Kronecker product of self-adjoint operators is always \"\n \"self-adjoint.\")\n is_self_adjoint = True\n\n # The eigenvalues of a Kronecker product are equal to the products of eigen\n # values of the corresponding factors.\n if all(operator.is_positive_definite for operator in operators):\n if is_positive_definite is False:\n raise ValueError(\"The Kronecker product of positive-definite operators \"\n \"is always positive-definite.\")\n is_positive_definite = True\n\n # Initialization.\n graph_parents = []\n for operator in operators:\n graph_parents.extend(operator.graph_parents)\n\n if name is None:\n name = operators[0].name\n for operator in operators[1:]:\n name += \"_x_\" + operator.name\n with ops.name_scope(name, values=graph_parents):\n super(LinearOperatorKronecker, self).__init__(\n dtype=dtype,\n graph_parents=graph_parents,\n is_non_singular=is_non_singular,\n is_self_adjoint=is_self_adjoint,\n is_positive_definite=is_positive_definite,\n is_square=is_square,\n name=name)\n\n @property\n def operators(self):\n return self._operators\n\n def _shape(self):\n # Get final matrix shape.\n domain_dimension = self.operators[0].domain_dimension\n for operator in self.operators[1:]:\n domain_dimension *= operator.domain_dimension\n\n range_dimension = self.operators[0].range_dimension\n for operator in self.operators[1:]:\n range_dimension *= operator.range_dimension\n\n matrix_shape = tensor_shape.TensorShape([\n range_dimension, domain_dimension])\n\n # Get broadcast batch shape.\n # broadcast_shape checks for compatibility.\n batch_shape = self.operators[0].batch_shape\n for operator in self.operators[1:]:\n batch_shape = common_shapes.broadcast_shape(\n batch_shape, operator.batch_shape)\n\n return batch_shape.concatenate(matrix_shape)\n\n def _shape_tensor(self):\n domain_dimension = self.operators[0].domain_dimension_tensor()\n for operator in self.operators[1:]:\n domain_dimension *= operator.domain_dimension_tensor()\n\n range_dimension = self.operators[0].range_dimension_tensor()\n for operator in self.operators[1:]:\n range_dimension *= operator.range_dimension_tensor()\n\n matrix_shape = [range_dimension, domain_dimension]\n\n # Get broadcast batch shape.\n # broadcast_shape checks for compatibility.\n batch_shape = self.operators[0].batch_shape_tensor()\n for operator in self.operators[1:]:\n batch_shape = array_ops.broadcast_dynamic_shape(\n batch_shape, operator.batch_shape_tensor())\n\n return array_ops.concat((batch_shape, matrix_shape), 0)\n\n def _matmul(self, x, adjoint=False, adjoint_arg=False):\n # Here we heavily rely on Roth's column Lemma [1]:\n # (A x B) * vec X = vec BXA^T,\n # where vec stacks all the columns of the matrix under each other. In our\n # case, x represents a batch of vec X (i.e. we think of x as a batch of\n # column vectors, rather than a matrix). Each member of the batch can be\n # reshaped to a matrix (hence we get a batch of matrices).\n # We can iteratively apply this lemma by noting that if B is a Kronecker\n # product, then we can apply the lemma again.\n\n # [1] W. E. Roth, \"On direct product matrices,\"\n # Bulletin of the American Mathematical Society, vol. 40, pp. 461-468,\n # 1934\n\n # Efficiency\n\n # Naively doing the Kronecker product, by calculating the dense matrix and\n # applying it will can take cubic time in the size of domain_dimension\n # (assuming a square matrix). The other issue is that calculating the dense\n # matrix can be prohibitively expensive, in that it can take a large amount\n # of memory.\n #\n # This implementation avoids this memory blow up by only computing matmuls\n # with the factors. In this way, we don't have to realize the dense matrix.\n # In terms of complexity, if we have Kronecker Factors of size:\n # (n1, n1), (n2, n2), (n3, n3), ... (nJ, nJ), with N = \\prod n_i, and we\n # have as input a [N, M] matrix, the naive approach would take O(N^2 M).\n # With this approach (ignoring reshaping of tensors and transposes for now),\n # the time complexity can be O(M * (\\sum n_i) * N). There is also the\n # benefit of batched multiplication (In this example, the batch size is\n # roughly M * N) so this can be much faster. However, not factored in are\n # the costs of the several transposing of tensors, which can affect cache\n # behavior.\n\n # Below we document the shape manipulation for adjoint=False,\n # adjoint_arg=False, but the general case of different adjoints is still\n # handled.\n\n if adjoint_arg:\n x = linalg.adjoint(x)\n\n # Always add a batch dimension to enable broadcasting to work.\n batch_shape = array_ops.concat(\n [array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)\n x += array_ops.zeros(batch_shape, dtype=x.dtype.base_dtype)\n\n # x has shape [B, R, C], where B represent some number of batch dimensions,\n # R represents the number of rows, and C represents the number of columns.\n # In order to apply Roth's column lemma, we need to operate on a batch of\n # column vectors, so we reshape into a batch of column vectors. We put it\n # at the front to ensure that broadcasting between operators to the batch\n # dimensions B still works.\n output = _rotate_last_dim(x, rotate_right=True)\n\n # Also expand the shape to be [A, C, B, R]. The first dimension will be\n # used to accumulate dimensions from each operator matmul.\n output = output[array_ops.newaxis, ...]\n\n # In this loop, A is going to refer to the value of the accumulated\n # dimension. A = 1 at the start, and will end up being self.range_dimension.\n # V will refer to the last dimension. V = R at the start, and will end up\n # being 1 in the end.\n for operator in self.operators[:-1]:\n # Reshape output from [A, C, B, V] to be\n # [A, C, B, V / op.domain_dimension, op.domain_dimension]\n if adjoint:\n operator_dimension = operator.range_dimension_tensor()\n else:\n operator_dimension = operator.domain_dimension_tensor()\n\n output = _unvec_by(output, operator_dimension)\n\n # We are computing (XA^T) = (AX^T)^T.\n # output has [A, C, B, V / op.domain_dimension, op.domain_dimension],\n # which is being converted to:\n # [A, C, B, V / op.domain_dimension, op.range_dimension]\n output = array_ops.matrix_transpose(output)\n output = operator.matmul(output, adjoint=adjoint, adjoint_arg=False)\n output = array_ops.matrix_transpose(output)\n # Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]\n output = _rotate_last_dim(output, rotate_right=False)\n output = _vec(output)\n output = _rotate_last_dim(output, rotate_right=True)\n\n # After the loop, we will have\n # A = self.range_dimension / op[-1].range_dimension\n # V = op[-1].domain_dimension\n\n # We convert that using matvec to get:\n # [A, C, B, op[-1].range_dimension]\n output = self.operators[-1].matvec(output, adjoint=adjoint)\n # Rearrange shape to be [B1, ... Bn, self.range_dimension, C]\n output = _rotate_last_dim(output, rotate_right=False)\n output = _vec(output)\n output = _rotate_last_dim(output, rotate_right=False)\n\n if x.shape.is_fully_defined():\n column_dim = x.shape[-1]\n broadcast_batch_shape = common_shapes.broadcast_shape(\n x.shape[:-2], self.batch_shape)\n if adjoint:\n matrix_dimensions = [self.domain_dimension, column_dim]\n else:\n matrix_dimensions = [self.range_dimension, column_dim]\n\n output.set_shape(broadcast_batch_shape.concatenate(\n matrix_dimensions))\n\n return output\n\n def _determinant(self):\n # Note that we have |X1 x X2| = |X1| ** n * |X2| ** m, where X1 is an m x m\n # matrix, and X2 is an n x n matrix. We can iteratively apply this property\n # to get the determinant of |X1 x X2 x X3 ...|. If T is the product of the\n # domain dimension of all operators, then we have:\n # |X1 x X2 x X3 ...| =\n # |X1| ** (T / m) * |X2 x X3 ... | ** m =\n # |X1| ** (T / m) * |X2| ** (m * (T / m) / n) * ... =\n # |X1| ** (T / m) * |X2| ** (T / n) * | X3 x X4... | ** (m * n)\n # And by doing induction we have product(|X_i| ** (T / dim(X_i))).\n total = self.domain_dimension_tensor()\n determinant = 1.\n for operator in self.operators:\n determinant *= operator.determinant() ** math_ops.cast(\n total / operator.domain_dimension_tensor(),\n dtype=operator.dtype)\n return determinant\n\n def _log_abs_determinant(self):\n # This will be sum((total / dim(x_i)) * log |X_i|)\n total = self.domain_dimension_tensor()\n log_abs_det = 0.\n for operator in self.operators:\n log_abs_det += operator.log_abs_determinant() * math_ops.cast(\n total / operator.domain_dimension_tensor(),\n dtype=operator.dtype)\n return log_abs_det\n\n def _trace(self):\n # tr(A x B) = tr(A) * tr(B)\n trace = 1.\n for operator in self.operators:\n trace *= operator.trace()\n return trace\n\n def _solve(self, rhs, adjoint=False, adjoint_arg=False):\n # Here we follow the same use of Roth's column lemma as in `matmul`, with\n # the key difference that we replace all `matmul` instances with `solve`.\n # This follows from the property that inv(A x B) = inv(A) x inv(B).\n\n # Below we document the shape manipulation for adjoint=False,\n # adjoint_arg=False, but the general case of different adjoints is still\n # handled.\n\n if adjoint_arg:\n rhs = linalg.adjoint(rhs)\n\n # Always add a batch dimension to enable broadcasting to work.\n batch_shape = array_ops.concat(\n [array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)\n rhs += array_ops.zeros(batch_shape, dtype=rhs.dtype.base_dtype)\n\n # rhs has shape [B, R, C], where B represent some number of batch\n # dimensions,\n # R represents the number of rows, and C represents the number of columns.\n # In order to apply Roth's column lemma, we need to operate on a batch of\n # column vectors, so we reshape into a batch of column vectors. We put it\n # at the front to ensure that broadcasting between operators to the batch\n # dimensions B still works.\n output = _rotate_last_dim(rhs, rotate_right=True)\n\n # Also expand the shape to be [A, C, B, R]. The first dimension will be\n # used to accumulate dimensions from each operator matmul.\n output = output[array_ops.newaxis, ...]\n\n # In this loop, A is going to refer to the value of the accumulated\n # dimension. A = 1 at the start, and will end up being self.range_dimension.\n # V will refer to the last dimension. V = R at the start, and will end up\n # being 1 in the end.\n for operator in self.operators[:-1]:\n # Reshape output from [A, C, B, V] to be\n # [A, C, B, V / op.domain_dimension, op.domain_dimension]\n if adjoint:\n operator_dimension = operator.range_dimension_tensor()\n else:\n operator_dimension = operator.domain_dimension_tensor()\n\n output = _unvec_by(output, operator_dimension)\n\n # We are computing (XA^-1^T) = (A^-1 X^T)^T.\n # output has [A, C, B, V / op.domain_dimension, op.domain_dimension],\n # which is being converted to:\n # [A, C, B, V / op.domain_dimension, op.range_dimension]\n output = array_ops.matrix_transpose(output)\n output = operator.solve(output, adjoint=adjoint, adjoint_arg=False)\n output = array_ops.matrix_transpose(output)\n # Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]\n output = _rotate_last_dim(output, rotate_right=False)\n output = _vec(output)\n output = _rotate_last_dim(output, rotate_right=True)\n\n # After the loop, we will have\n # A = self.range_dimension / op[-1].range_dimension\n # V = op[-1].domain_dimension\n\n # We convert that using matvec to get:\n # [A, C, B, op[-1].range_dimension]\n output = self.operators[-1].solvevec(output, adjoint=adjoint)\n # Rearrange shape to be [B1, ... Bn, self.range_dimension, C]\n output = _rotate_last_dim(output, rotate_right=False)\n output = _vec(output)\n output = _rotate_last_dim(output, rotate_right=False)\n\n if rhs.shape.is_fully_defined():\n column_dim = rhs.shape[-1]\n broadcast_batch_shape = common_shapes.broadcast_shape(\n rhs.shape[:-2], self.batch_shape)\n if adjoint:\n matrix_dimensions = [self.domain_dimension, column_dim]\n else:\n matrix_dimensions = [self.range_dimension, column_dim]\n\n output.set_shape(broadcast_batch_shape.concatenate(\n matrix_dimensions))\n\n return output\n\n def _diag_part(self):\n diag_part = self.operators[0].diag_part()\n for operator in self.operators[1:]:\n diag_part = diag_part[..., :, array_ops.newaxis]\n op_diag_part = operator.diag_part()[..., array_ops.newaxis, :]\n diag_part *= op_diag_part\n diag_part = array_ops.reshape(\n diag_part,\n shape=array_ops.concat(\n [array_ops.shape(diag_part)[:-2], [-1]], axis=0))\n if self.range_dimension > self.domain_dimension:\n diag_dimension = self.domain_dimension\n else:\n diag_dimension = self.range_dimension\n diag_part.set_shape(\n self.batch_shape.concatenate(diag_dimension))\n return diag_part\n\n def _to_dense(self):\n product = self.operators[0].to_dense()\n for operator in self.operators[1:]:\n # Product has shape [B, R1, 1, C1].\n product = product[\n ..., :, array_ops.newaxis, :, array_ops.newaxis]\n # Operator has shape [B, 1, R2, 1, C2].\n op_to_mul = operator.to_dense()[\n ..., array_ops.newaxis, :, array_ops.newaxis, :]\n # This is now [B, R1, R2, C1, C2].\n product *= op_to_mul\n # Now merge together dimensions to get [B, R1 * R2, C1 * C2].\n product = array_ops.reshape(\n product,\n shape=array_ops.concat(\n [array_ops.shape(product)[:-4],\n [array_ops.shape(product)[-4] * array_ops.shape(product)[-3],\n array_ops.shape(product)[-2] * array_ops.shape(product)[-1]]\n ], axis=0))\n product.set_shape(self.shape)\n return product\n\n def _assert_non_singular(self):\n if all(operator.is_square for operator in self.operators):\n asserts = [operator.assert_non_singular() for operator in self.operators]\n return control_flow_ops.group(asserts)\n else:\n raise errors.InvalidArgumentError(\n node_def=None, op=None, message=\"All Kronecker factors must be \"\n \"square for the product to be invertible.\")\n\n def _assert_self_adjoint(self):\n if all(operator.is_square for operator in self.operators):\n asserts = [operator.assert_self_adjoint() for operator in self.operators]\n return control_flow_ops.group(asserts)\n else:\n raise errors.InvalidArgumentError(\n node_def=None, op=None, message=\"All Kronecker factors must be \"\n \"square for the product to be self adjoint.\")\n", "import numpy as np\nimport requests\nfrom alibi.datasets import fetch_adult\nimport pandas as pd\nimport plotly.graph_objects as go\nfrom IPython.display import display, Markdown, display\n\ndef getFeatures(X,cmap):\n return pd.DataFrame(X).replace(cmap).values.squeeze().tolist()\n\ndef predict(X, name, ds, svc_hostname, cluster_ip):\n formData = {\n 'instances': X\n }\n headers = {}\n headers[\"Host\"] = svc_hostname\n res = requests.post('http://'+cluster_ip+'/v1/models/'+name+':predict', json=formData, headers=headers)\n if res.status_code == 200:\n return ds.target_names[np.array(res.json()[\"predictions\"])[0]]\n else:\n print(\"Failed with \",res.status_code)\n return []\n\ndef explain(X, name, svc_hostname, cluster_ip):\n formData = {\n 'instances': X\n }\n headers = {}\n headers[\"Host\"] = svc_hostname\n res = requests.post('http://'+cluster_ip+'/v1/models/'+name+':explain', json=formData, headers=headers)\n if res.status_code == 200:\n return res.json()\n else:\n print(\"Failed with \",res.status_code)\n return []\n\ndef show_bar(X, labels, title):\n fig = go.Figure(go.Bar(x=X,y=labels,orientation='h',width=[0.5]))\n fig.update_layout(autosize=False,width=700,height=300,\n xaxis=dict(range=[0, 1]),\n title_text=title,\n font=dict(family=\"Courier New, monospace\",size=18,color=\"#7f7f7f\"\n ))\n fig.show()\n\n\ndef show_feature_coverage(exp):\n data = []\n for idx, name in enumerate(exp[\"anchor\"]):\n data.append(go.Bar(name=name, x=[\"coverage\"], y=[exp['raw']['coverage'][idx]]))\n fig = go.Figure(data=data)\n fig.update_layout(yaxis=dict(range=[0, 1]))\n fig.show()\n\ndef show_anchors(names):\n display(Markdown('# Explanation:'))\n display(Markdown('## {}'.format(names)))\n\ndef show_examples(exp,fidx,ds,covered=True):\n if covered:\n cname = 'covered_true'\n display(Markdown(\"## Examples covered by Anchors: {}\".format(exp['anchor'][0:fidx+1])))\n else:\n cname = 'covered_false'\n display(Markdown(\"## Examples not covered by Anchors: {}\".format(exp['anchor'][0:fidx+1])))\n if \"feature_names\" in ds:\n return pd.DataFrame(exp['raw']['examples'][fidx][cname],columns=ds.feature_names)\n else:\n return pd.DataFrame(exp['raw']['examples'][fidx][cname])\n\ndef show_prediction(prediction):\n display(Markdown('## Prediction: {}'.format(prediction)))\n\ndef show_row(X,ds):\n display(pd.DataFrame(X,columns=ds.feature_names))\n\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A simple example to test the a DistributionStrategy with Estimators.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow.python.keras import metrics as metrics_module\n\n\ndef build_model_fn_optimizer():\n \"\"\"Simple model_fn with optimizer.\"\"\"\n # TODO(anjalisridhar): Move this inside the model_fn once OptimizerV2 is\n # done?\n optimizer = tf.train.GradientDescentOptimizer(0.2)\n\n def model_fn(features, labels, mode): # pylint: disable=unused-argument\n \"\"\"model_fn which uses a single unit Dense layer.\"\"\"\n # You can also use the Flatten layer if you want to test a model without any\n # weights.\n layer = tf.layers.Dense(1, use_bias=True)\n logits = layer(features)\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\"logits\": logits}\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n def loss_fn():\n y = tf.reshape(logits, []) - tf.constant(1.)\n return y * y\n\n if mode == tf.estimator.ModeKeys.EVAL:\n acc_obj = metrics_module.BinaryAccuracy()\n acc_obj.update_state(labels, labels)\n return tf.estimator.EstimatorSpec(\n mode, loss=loss_fn(), eval_metric_ops={\"Accuracy\": acc_obj})\n\n assert mode == tf.estimator.ModeKeys.TRAIN\n\n global_step = tf.train.get_global_step()\n train_op = optimizer.minimize(loss_fn(), global_step=global_step)\n return tf.estimator.EstimatorSpec(mode, loss=loss_fn(), train_op=train_op)\n\n return model_fn\n\n\ndef main(_):\n distribution = tf.contrib.distribute.MirroredStrategy(\n [\"/device:GPU:0\", \"/device:GPU:1\"])\n config = tf.estimator.RunConfig(train_distribute=distribution,\n eval_distribute=distribution)\n # Since there are 2 devices and 10 samples, we set steps=5.\n steps = 5\n\n def train_input_fn():\n features = tf.data.Dataset.from_tensors([[1.]]).repeat(10)\n labels = tf.data.Dataset.from_tensors([1.]).repeat(10)\n return tf.data.Dataset.zip((features, labels))\n\n estimator = tf.estimator.Estimator(\n model_fn=build_model_fn_optimizer(), config=config)\n estimator.train(input_fn=train_input_fn, steps=steps)\n\n def eval_input_fn():\n features = tf.data.Dataset.from_tensors([[1.]]).repeat(10)\n labels = tf.data.Dataset.from_tensors([1.]).repeat(10)\n return tf.data.Dataset.zip((features, labels))\n\n eval_result = estimator.evaluate(input_fn=eval_input_fn, steps=steps)\n print(\"Eval result: {}\".format(eval_result))\n assert eval_result[\"Accuracy\"] == 1.0\n\n def predict_input_fn():\n predict_features = tf.data.Dataset.from_tensors([[1.]]).repeat(10)\n return predict_features\n\n predictions = estimator.predict(input_fn=predict_input_fn)\n # TODO(anjalsridhar): This returns a generator object, figure out how to get\n # meaningful results here.\n print(\"Prediction results: {}\".format(predictions))\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for sparse ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.platform import googletest\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass SparseOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):\n\n def testSparseEye(self):\n def test_one(n, m, as_tensors):\n expected = np.eye(n, m)\n if as_tensors:\n m = constant_op.constant(m)\n n = constant_op.constant(n)\n s = sparse_ops.sparse_eye(n, m)\n d = sparse_ops.sparse_to_dense(s.indices, s.dense_shape, s.values)\n self.assertAllEqual(self.evaluate(d), expected)\n\n for n in range(2, 10, 2):\n for m in range(2, 10, 2):\n # Test with n and m as both constants and tensors.\n test_one(n, m, True)\n test_one(n, m, False)\n\n def testSparseExpandDims(self):\n for rank in range(1, 4):\n # Create a dummy input. When rank=3, shape=[2, 4, 6].\n shape = np.arange(1, rank + 1) * 2\n before = np.arange(np.prod(shape)).reshape(shape)\n\n # Make entries sparse.\n before *= np.random.binomial(1, .2, before.shape)\n dense_shape = before.shape\n indices = np.array(np.where(before)).T\n values = before[before != 0]\n\n # Try every possible valid value of axis.\n for axis in range(-rank - 1, rank):\n expected_after = np.expand_dims(before, axis)\n\n for axis_as_tensor in [False, True]:\n dense_shape_t = constant_op.constant(dense_shape, dtype=dtypes.int64)\n indices_t = constant_op.constant(indices)\n values_t = constant_op.constant(values)\n before_t = sparse_tensor.SparseTensor(\n indices=indices_t, values=values_t, dense_shape=dense_shape_t)\n\n if axis_as_tensor:\n axis = constant_op.constant(axis)\n\n s = sparse_ops.sparse_expand_dims(before_t, axis)\n d = sparse_ops.sparse_to_dense(s.indices, s.dense_shape, s.values)\n self.assertAllEqual(self.evaluate(d), expected_after)\n\n @parameterized.parameters([\n (math_ops.abs, [1.0, -1.0, 3.0, -4.0], [1.0, 1.0, 3.0, 4.0]),\n (math_ops.negative, [1.0, -1.0, 3.0, -4.0], [-1.0, 1.0, -3.0, 4.0]),\n (math_ops.sign, [3.0, -2.0, 0.0, -4.0], [1.0, -1.0, 0.0, -1.0]),\n (math_ops.square, [1.0, -1.0, 3.0, -4.0], [1.0, 1.0, 9.0, 16.0]),\n ])\n def testUnarySparseDispatch(self, op, values, expected):\n st = sparse_tensor.SparseTensor(\n indices=[[0, 0], [0, 1], [2, 0], [2, 4]],\n values=values,\n dense_shape=[3, 6])\n result = op(st)\n result_value = self.evaluate(result)\n self.assertAllEqual(result_value.indices, st.indices)\n self.assertAllEqual(result_value.values, expected)\n self.assertAllEqual(result_value.dense_shape, st.dense_shape)\n\n\nif __name__ == '__main__':\n googletest.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Benchmarks for `tf.data.experimental.CsvDataset`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport string\nimport tempfile\nimport time\n\nimport numpy as np\n\nfrom tensorflow.python.client import session\nfrom tensorflow.python.data.experimental.ops import readers\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import readers as core_readers\nfrom tensorflow.python.ops import parsing_ops\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.platform import test\n\n\nclass CsvDatasetBenchmark(test.Benchmark):\n \"\"\"Benchmarks for `tf.data.experimental.CsvDataset`.\"\"\"\n\n FLOAT_VAL = '1.23456E12'\n STR_VAL = string.ascii_letters * 10\n\n def _setUp(self, str_val):\n # Since this isn't test.TestCase, have to manually create a test dir\n gfile.MakeDirs(googletest.GetTempDir())\n self._temp_dir = tempfile.mkdtemp(dir=googletest.GetTempDir())\n\n self._num_cols = [4, 64, 256]\n self._num_per_iter = 5000\n self._filenames = []\n for n in self._num_cols:\n fn = os.path.join(self._temp_dir, 'file%d.csv' % n)\n with open(fn, 'wb') as f:\n # Just write 100 rows and use `repeat`... Assumes the cost\n # of creating an iterator is not significant\n row = ','.join([str_val for _ in range(n)])\n f.write('\\n'.join([row for _ in range(100)]))\n self._filenames.append(fn)\n\n def _tearDown(self):\n gfile.DeleteRecursively(self._temp_dir)\n\n def _runBenchmark(self, dataset, num_cols, prefix):\n dataset = dataset.skip(self._num_per_iter - 1)\n deltas = []\n for _ in range(10):\n next_element = dataset_ops.make_one_shot_iterator(dataset).get_next()\n with session.Session() as sess:\n start = time.time()\n # NOTE: This depends on the underlying implementation of skip, to have\n # the net effect of calling `GetNext` num_per_iter times on the\n # input dataset. We do it this way (instead of a python for loop, or\n # batching N inputs in one iter) so that the overhead from session.run\n # or batch doesn't dominate. If we eventually optimize skip, this has\n # to change.\n sess.run(next_element)\n end = time.time()\n deltas.append(end - start)\n # Median wall time per CSV record read and decoded\n median_wall_time = np.median(deltas) / self._num_per_iter\n print('%s num_cols: %d Median wall time: %f' % (prefix, num_cols,\n median_wall_time))\n self.report_benchmark(\n iters=self._num_per_iter,\n wall_time=median_wall_time,\n name='%s_with_cols_%d' % (prefix, num_cols))\n\n def benchmarkMapWithFloats(self):\n self._setUp(self.FLOAT_VAL)\n for i in range(len(self._filenames)):\n num_cols = self._num_cols[i]\n kwargs = {'record_defaults': [[0.0]] * num_cols}\n dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()\n dataset = dataset.map(lambda l: parsing_ops.decode_csv(l, **kwargs)) # pylint: disable=cell-var-from-loop\n self._runBenchmark(dataset, num_cols, 'csv_float_map_decode_csv')\n self._tearDown()\n\n def benchmarkMapWithStrings(self):\n self._setUp(self.STR_VAL)\n for i in range(len(self._filenames)):\n num_cols = self._num_cols[i]\n kwargs = {'record_defaults': [['']] * num_cols}\n dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()\n dataset = dataset.map(lambda l: parsing_ops.decode_csv(l, **kwargs)) # pylint: disable=cell-var-from-loop\n self._runBenchmark(dataset, num_cols, 'csv_strings_map_decode_csv')\n self._tearDown()\n\n def benchmarkCsvDatasetWithFloats(self):\n self._setUp(self.FLOAT_VAL)\n for i in range(len(self._filenames)):\n num_cols = self._num_cols[i]\n kwargs = {'record_defaults': [[0.0]] * num_cols}\n dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()\n dataset = readers.CsvDataset(self._filenames[i], **kwargs).repeat() # pylint: disable=cell-var-from-loop\n self._runBenchmark(dataset, num_cols, 'csv_float_fused_dataset')\n self._tearDown()\n\n def benchmarkCsvDatasetWithStrings(self):\n self._setUp(self.STR_VAL)\n for i in range(len(self._filenames)):\n num_cols = self._num_cols[i]\n kwargs = {'record_defaults': [['']] * num_cols}\n dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()\n dataset = readers.CsvDataset(self._filenames[i], **kwargs).repeat() # pylint: disable=cell-var-from-loop\n self._runBenchmark(dataset, num_cols, 'csv_strings_fused_dataset')\n self._tearDown()\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for control_flow module.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.autograph.converters import control_flow\nfrom tensorflow.python.autograph.core import converter_testing\nfrom tensorflow.python.autograph.pyct import transformer\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import test\n\n\nclass ControlFlowTest(converter_testing.TestCase):\n\n def assertTransformedResult(self, test_fn, inputs, expected):\n if not isinstance(inputs, tuple):\n inputs = (inputs,)\n with self.converted(test_fn, control_flow, {},\n constant_op.constant) as result:\n with self.cached_session() as sess:\n self.assertEqual(sess.run(result.test_fn(*inputs)), expected)\n\n @test_util.run_deprecated_v1\n def test_while_basic(self):\n\n def test_fn(n):\n i = 0\n s = 0\n while i < n:\n s += i\n i += 1\n return s, i, n\n\n self.assertTransformedResult(test_fn, constant_op.constant(5), (10, 5, 5))\n\n @test_util.run_deprecated_v1\n def test_while_nested(self):\n\n def test_fn(n):\n i = 0\n j = 0\n s = 0\n while i < n:\n while j < i:\n j += 3\n u = i + j # 'u' is not defined within the inner loop\n s += u\n i += 1\n j = 0\n return s, i, j, n\n\n self.assertTransformedResult(test_fn, constant_op.constant(5),\n (25, 5, 0, 5))\n\n @test_util.run_deprecated_v1\n def test_while_single_output(self):\n\n def test_fn(n):\n while n > 0:\n n -= 1\n return n\n\n self.assertTransformedResult(test_fn, constant_op.constant(5), 0)\n\n def test_while_variable_defined_in_body(self):\n def bad_while_loop(n):\n while n > 0:\n n -= 1\n s = n\n return s\n\n node, ctx = self.prepare(bad_while_loop, {})\n with self.assertRaises(NameError):\n control_flow.transform(node, ctx)\n\n @test_util.run_deprecated_v1\n def test_if_basic(self):\n\n def test_fn(n):\n a = 0\n b = 0\n if n > 0:\n a = -n\n else:\n b = 2 * n\n return a, b\n\n self.assertTransformedResult(test_fn, constant_op.constant(1), (-1, 0))\n self.assertTransformedResult(test_fn, constant_op.constant(-1), (0, -2))\n\n @test_util.run_deprecated_v1\n def test_if_complex_outputs(self):\n\n class TestClass(object):\n\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\n def test_fn(n, obj):\n obj.a = 0\n obj.b = 0\n if n > 0:\n obj.a = -n\n else:\n obj.b = 2 * n\n return obj\n\n with self.converted(test_fn, control_flow, {}) as result:\n with self.cached_session() as sess:\n res_obj = result.test_fn(constant_op.constant(1), TestClass(0, 0))\n self.assertEqual(sess.run((res_obj.a, res_obj.b)), (-1, 0))\n res_obj = result.test_fn(constant_op.constant(-1), TestClass(0, 0))\n self.assertEqual(sess.run((res_obj.a, res_obj.b)), (0, -2))\n\n @test_util.run_deprecated_v1\n def test_if_single_output(self):\n\n def test_fn(n):\n if n > 0:\n n = -n\n return n\n\n self.assertTransformedResult(test_fn, constant_op.constant(1), -1)\n\n @test_util.run_deprecated_v1\n def test_if_semi(self):\n\n def test_fn(n):\n if n > 0:\n n = 3\n return n\n\n self.assertTransformedResult(test_fn, constant_op.constant(2), 3)\n self.assertTransformedResult(test_fn, constant_op.constant(-3), -3)\n\n @test_util.run_deprecated_v1\n def test_if_local_var(self):\n\n def test_fn(n):\n if n > 0:\n b = 4\n n = b + 1\n return n\n\n self.assertTransformedResult(test_fn, constant_op.constant(1), 5)\n self.assertTransformedResult(test_fn, constant_op.constant(-1), -1)\n\n @test_util.run_deprecated_v1\n def test_if_no_outputs(self):\n\n def test_fn(n):\n if n > 0:\n b = 4 # pylint:disable=unused-variable\n return n\n\n # Without side effect guards, the if statement will stage a cond,\n # but that will be pruned at execution.\n self.assertTransformedResult(test_fn, constant_op.constant(1), 1)\n self.assertTransformedResult(test_fn, constant_op.constant(-1), -1)\n\n def test_if_imbalanced_outputs(self):\n\n def test_fn(n):\n if n > 0:\n b = 4\n return b\n\n node, ctx = self.prepare(test_fn, {})\n with self.assertRaises(transformer.AutographParseError):\n control_flow.transform(node, ctx)\n\n @test_util.run_deprecated_v1\n def test_simple_for(self):\n\n def test_fn(l):\n s1 = 0\n s2 = 0\n for e in l:\n s1 += e\n s2 += e * e\n return s1, s2\n\n self.assertTransformedResult(test_fn, constant_op.constant([1, 3]), (4, 10))\n empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)\n self.assertTransformedResult(test_fn, empty_vector, (0, 0))\n\n @test_util.run_deprecated_v1\n def test_for_single_output(self):\n\n def test_fn(l):\n s = 0\n for e in l:\n s += e\n return s\n\n self.assertTransformedResult(test_fn, constant_op.constant([1, 3]), 4)\n empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)\n self.assertTransformedResult(test_fn, empty_vector, 0)\n\n def test_for_iterated_expression(self):\n\n eval_count = [0]\n\n def count_evals(x):\n eval_count[0] += 1\n return x\n\n def test_fn(n):\n s = 0\n for e in count_evals(range(n)):\n s += e\n return s\n\n ns = {'count_evals': count_evals}\n node, ctx = self.prepare(test_fn, ns)\n node = control_flow.transform(node, ctx)\n\n with self.compiled(node, ns) as result:\n self.assertEqual(result.test_fn(5), 10)\n self.assertEqual(eval_count[0], 1)\n\n def test_for_variable_defined_in_body(self):\n def bad_for_loop(n):\n for i in range(n):\n s = i\n return s\n\n node, ctx = self.prepare(bad_for_loop, {})\n with self.assertRaises(NameError):\n control_flow.transform(node, ctx)\n\n @test_util.run_deprecated_v1\n def test_for_tuple_unpacking(self):\n def test_fn(x_list):\n z = tf.constant(0) # pylint:disable=undefined-variable\n for i, x in enumerate(x_list):\n z = z + x + i\n return z\n\n self.assertTransformedResult(test_fn, [3, 3], 7)\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tf.data.Dataset.take()`.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import test\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass TakeTest(test_base.DatasetTestBase):\n\n def testTakeTensorDataset(self):\n components = (np.arange(10),)\n\n def do_test(count):\n dataset = dataset_ops.Dataset.from_tensor_slices(components).take(count)\n self.assertEqual([c.shape[1:] for c in components],\n [shape for shape in dataset.output_shapes])\n num_output = min(count, 10) if count != -1 else 10\n self.assertDatasetProduces(\n dataset, [tuple(components[0][i:i + 1]) for i in range(num_output)])\n\n # Take fewer than input size\n do_test(4)\n\n # Take more than input size\n do_test(25)\n\n # Take all of input\n do_test(-1)\n\n # Take nothing\n do_test(0)\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Basic tests for TF-TensorRT integration.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.contrib.tensorrt.python import trt_convert\nfrom tensorflow.contrib.tensorrt.test import tf_trt_integration_test_base as trt_test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.platform import test\n\n\nclass SimpleSingleEngineTest(trt_test.TfTrtIntegrationTestBase):\n\n def GetParams(self):\n \"\"\"Create a graph containing single segment.\"\"\"\n # TODO(aaroey): test graph with different dtypes.\n dtype = dtypes.float32\n input_name = \"input\"\n input_dims = [100, 24, 24, 2]\n output_name = \"output\"\n g = ops.Graph()\n with g.as_default():\n inp = array_ops.placeholder(\n dtype=dtype, shape=[None] + input_dims[1:], name=input_name)\n with g.device(\"/GPU:0\"):\n conv_filter = constant_op.constant(\n [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],\n name=\"weights\",\n dtype=dtype)\n conv = nn.conv2d(\n input=inp,\n filter=conv_filter,\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n name=\"conv\")\n bias = constant_op.constant([4., 1.5, 2., 3., 5., 7.],\n name=\"bias\",\n dtype=dtype)\n added = nn.bias_add(conv, bias, name=\"bias_add\")\n relu = nn.relu(added, \"relu\")\n identity = array_ops.identity(relu, \"identity\")\n pool = nn_ops.max_pool(\n identity, [1, 2, 2, 1], [1, 2, 2, 1], \"VALID\", name=\"max_pool\")\n array_ops.squeeze(pool, name=output_name)\n return trt_test.TfTrtIntegrationTestParams(\n gdef=g.as_graph_def(),\n input_names=[input_name],\n input_dims=[input_dims],\n output_names=[output_name],\n expected_output_dims=[(100, 6, 6, 6)])\n\n def ExpectedEnginesToBuild(self, run_params):\n \"\"\"Return the expected engines to build.\"\"\"\n return {\n \"TRTEngineOp_0\": [\n \"weights\", \"conv\", \"bias\", \"bias_add\", \"relu\", \"identity\",\n \"max_pool\"\n ]\n }\n\n\nclass SimpleMultiEnginesTest(trt_test.TfTrtIntegrationTestBase):\n\n def GetParams(self):\n \"\"\"Create a graph containing multiple segment.\"\"\"\n # TODO(aaroey): test graph with different dtypes.\n dtype = dtypes.float32\n input_name = \"input\"\n input_dims = [100, 24, 24, 2]\n output_name = \"output\"\n g = ops.Graph()\n with g.as_default():\n inp = array_ops.placeholder(\n dtype=dtype, shape=input_dims, name=input_name)\n with g.device(\"/GPU:0\"):\n conv_filter = constant_op.constant(\n [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],\n name=\"weights\",\n dtype=dtype)\n conv = nn.conv2d(\n input=inp,\n filter=conv_filter,\n strides=[1, 2, 2, 1],\n padding=\"SAME\",\n name=\"conv\")\n c1 = constant_op.constant(\n np.random.randn(12, 12, 6), dtype=dtype, name=\"c1\")\n p = math_ops.mul(conv, c1, name=\"mul\")\n c2 = constant_op.constant(\n np.random.randn(12, 12, 6), dtype=dtype, name=\"c2\")\n q = math_ops.div(conv, c2, name=\"div\")\n\n edge = self.trt_incompatible_op(q, name=\"incompatible\")\n edge = math_ops.div(edge, edge, name=\"div1\")\n r = math_ops.add(edge, edge, name=\"add\")\n\n p = math_ops.sub(p, edge, name=\"sub\")\n q = math_ops.mul(q, edge, name=\"mul1\")\n s = math_ops.add(p, q, name=\"add1\")\n s = math_ops.sub(s, r, name=\"sub1\")\n array_ops.squeeze(s, name=output_name)\n return trt_test.TfTrtIntegrationTestParams(\n gdef=g.as_graph_def(),\n input_names=[input_name],\n input_dims=[input_dims],\n output_names=[output_name],\n expected_output_dims=[(100, 12, 12, 6)])\n\n def ExpectedEnginesToBuild(self, run_params):\n \"\"\"Return the expected engines to build.\"\"\"\n return {\n \"TRTEngineOp_0\": [\n \"add\", \"add1\", \"c1\", \"div1\", \"mul\", \"mul1\", \"sub\", \"sub1\"\n ],\n \"TRTEngineOp_1\": [\"c2\", \"conv\", \"div\", \"weights\"]\n }\n\n def GetConversionParams(self, run_params):\n \"\"\"Return a ConversionParams for test.\"\"\"\n return super(\n SimpleMultiEnginesTest, self\n ).GetConversionParams(run_params)._replace(\n # Disable layout optimizer, since it'll add Transpose(Const, Const) to\n # the graph and breaks the conversion check.\n rewriter_config=trt_test.OptimizerDisabledRewriterConfig())\n\n\nclass PartiallyConvertedTestA(trt_test.TfTrtIntegrationTestBase):\n\n def setUp(self):\n \"\"\"Setup method.\"\"\"\n super(PartiallyConvertedTestA, self).setUp()\n # Let it fail to build the second engine.\n trt_convert.add_test_value(\"TRTEngineOp_1:CreateTRTNode\", \"fail\")\n\n def GetParams(self):\n \"\"\"Create a graph containing two segment.\"\"\"\n input_name = \"input\"\n input_dims = [2, 32, 32, 3]\n output_name = \"output\"\n g = ops.Graph()\n with g.as_default():\n inp = array_ops.placeholder(\n dtype=dtypes.float32, shape=input_dims, name=input_name)\n with g.device(\"/GPU:0\"):\n n = inp\n for i in range(2):\n c = constant_op.constant(1.0, name=\"c%d\" % i)\n n = math_ops.add(n, c, name=\"add%d\" % i)\n n = math_ops.mul(n, n, name=\"mul%d\" % i)\n edge = self.trt_incompatible_op(n, name=\"incompatible\")\n with g.control_dependencies([edge]):\n c = constant_op.constant(1.0, name=\"c2\")\n n = math_ops.add(n, c, name=\"add2\")\n n = math_ops.mul(n, n, name=\"mul2\")\n c = constant_op.constant(1.0, name=\"c3\")\n n = math_ops.add(n, c, name=\"add3\")\n n = math_ops.mul(n, n, name=\"mul3\")\n array_ops.squeeze(n, name=output_name)\n return trt_test.TfTrtIntegrationTestParams(\n gdef=g.as_graph_def(),\n input_names=[input_name],\n input_dims=[input_dims],\n output_names=[output_name],\n expected_output_dims=[tuple(input_dims)])\n\n def ExpectedEnginesToBuild(self, run_params):\n \"\"\"Return the expected engines to build.\"\"\"\n return {\n # Only the first engine is built.\n \"TRTEngineOp_0\": [\"c0\", \"c1\", \"add0\", \"add1\", \"mul0\", \"mul1\"]\n }\n\n def ShouldRunTest(self, run_params):\n \"\"\"Whether to run the test.\"\"\"\n # Disable the test in fp16 mode since multiple matmul and add ops together\n # can cause overflow.\n return ((run_params.precision_mode != \"FP16\") and\n not (trt_test.IsQuantizationMode(run_params.precision_mode) and\n not run_params.use_calibration))\n\n\nclass PartiallyConvertedTestB(PartiallyConvertedTestA):\n\n def setUp(self):\n \"\"\"Setup method.\"\"\"\n super(PartiallyConvertedTestB, self).setUp()\n # Let it fail to build the first engine.\n trt_convert.clear_test_values(\"\")\n trt_convert.add_test_value(\"TRTEngineOp_0:CreateTRTNode\", \"fail\")\n\n def ExpectedEnginesToBuild(self, run_params):\n \"\"\"Return the expected engines to build.\"\"\"\n return {\n # Only the second engine is built.\n \"TRTEngineOp_1\": [\"c2\", \"c3\", \"add2\", \"add3\", \"mul2\", \"mul3\"]\n }\n\n\nclass ConstInputTest(trt_test.TfTrtIntegrationTestBase):\n\n def GetParams(self):\n \"\"\"Create a graph containing multiple segment.\"\"\"\n input_name = \"input\"\n input_dims = [2, 32, 32, 3]\n output_name = \"output\"\n g = ops.Graph()\n with g.as_default():\n inp = array_ops.placeholder(\n dtype=dtypes.float32, shape=input_dims, name=input_name)\n with g.device(\"/GPU:0\"):\n n = inp\n c = constant_op.constant(1.0, name=\"c\")\n # Adds control dependency from the constant op to a trt incompatible op,\n # and adds control dependency from the trt incompatible op to all other\n # ops, to make sure the constant op cannot be contracted with any trt\n # segment that depends on it.\n with g.control_dependencies([c]):\n d = self.trt_incompatible_op(n, name=\"incompatible\")\n with g.control_dependencies([d]):\n n = math_ops.add(n, c, name=\"add\")\n n = math_ops.mul(n, n, name=\"mul\")\n n = math_ops.add(n, n, name=\"add1\")\n n = self.trt_incompatible_op(n, name=\"incompatible1\")\n with g.control_dependencies([d]):\n n = math_ops.add(n, c, name=\"add2\")\n n = math_ops.mul(n, n, name=\"mul1\")\n n = math_ops.add(n, n, name=\"add3\")\n array_ops.squeeze(n, name=output_name)\n return trt_test.TfTrtIntegrationTestParams(\n gdef=g.as_graph_def(),\n input_names=[input_name],\n input_dims=[input_dims],\n output_names=[output_name],\n expected_output_dims=[tuple(input_dims)])\n\n def ExpectedEnginesToBuild(self, run_params):\n \"\"\"Return the expected engines to build.\"\"\"\n return {\n \"TRTEngineOp_0\": [\"add\", \"add1\", \"mul\"],\n \"TRTEngineOp_1\": [\"add2\", \"add3\", \"mul1\"]\n }\n\n\nclass ConstDataInputSingleEngineTest(trt_test.TfTrtIntegrationTestBase):\n\n def GetParams(self):\n \"\"\"Create a graph containing single segment.\"\"\"\n input_name = \"input\"\n input_dims = [2, 32, 32, 3]\n output_name = \"output\"\n g = ops.Graph()\n with g.as_default():\n inp = array_ops.placeholder(\n dtype=dtypes.float32, shape=input_dims, name=input_name)\n with g.device(\"/GPU:0\"):\n n = inp\n c = constant_op.constant(1.0, name=\"c\")\n n = math_ops.add(n, c, name=\"add\")\n n = math_ops.mul(n, n, name=\"mul\")\n n = math_ops.add(n, n, name=\"add1\")\n array_ops.squeeze(n, name=output_name)\n return trt_test.TfTrtIntegrationTestParams(\n gdef=g.as_graph_def(),\n input_names=[input_name],\n input_dims=[input_dims],\n output_names=[output_name],\n expected_output_dims=[tuple(input_dims)])\n\n def ExpectedEnginesToBuild(self, run_params):\n \"\"\"Return the expected engines to build.\"\"\"\n return {\"TRTEngineOp_0\": [\"c\", \"add\", \"add1\", \"mul\"]}\n\n\nclass ConstDataInputMultipleEnginesTest(trt_test.TfTrtIntegrationTestBase):\n\n def GetParams(self):\n \"\"\"Create a graph containing multiple segment.\"\"\"\n input_name = \"input\"\n input_dims = [2, 32, 32, 3]\n output_name = \"output\"\n g = ops.Graph()\n with g.as_default():\n inp = array_ops.placeholder(\n dtype=dtypes.float32, shape=input_dims, name=input_name)\n with g.device(\"/GPU:0\"):\n n = inp\n c = constant_op.constant(1.0, name=\"c\")\n n = math_ops.add(n, c, name=\"add\")\n n = math_ops.mul(n, n, name=\"mul\")\n n = math_ops.add(n, n, name=\"add1\")\n n = self.trt_incompatible_op(n, name=\"incompatible1\")\n n = math_ops.add(n, c, name=\"add2\")\n n = math_ops.mul(n, n, name=\"mul1\")\n n = math_ops.add(n, n, name=\"add3\")\n array_ops.squeeze(n, name=output_name)\n return trt_test.TfTrtIntegrationTestParams(\n gdef=g.as_graph_def(),\n input_names=[input_name],\n input_dims=[input_dims],\n output_names=[output_name],\n expected_output_dims=[tuple(input_dims)])\n\n def ExpectedEnginesToBuild(self, run_params):\n \"\"\"Return the expected engines to build.\"\"\"\n return {\n \"TRTEngineOp_0\": [\"add2\", \"add3\", \"mul1\"],\n # Why segment [\"add\", \"add1\", \"mul\"] was assigned segment id 1\n # instead of 0: the parent node of this segment is actually const\n # node 'c', but it's removed later since it's const output of the\n # segment which is not allowed.\n \"TRTEngineOp_1\": [\"add\", \"add1\", \"mul\"]\n }\n\n\nclass ControlDependencyTest(trt_test.TfTrtIntegrationTestBase):\n\n def GetParams(self):\n \"\"\"Create a graph containing multiple segment.\"\"\"\n input_name = \"input\"\n input_dims = [2, 32, 32, 3]\n output_name = \"output\"\n g = ops.Graph()\n with g.as_default():\n inp = array_ops.placeholder(\n dtype=dtypes.float32, shape=input_dims, name=input_name)\n with g.device(\"/GPU:0\"):\n c1 = constant_op.constant(1.0, name=\"c1\")\n c2 = constant_op.constant(1.0, name=\"c2\")\n d1 = constant_op.constant(1.0, name=\"d1\")\n d2 = self.trt_incompatible_op(inp, name=\"d2\")\n with g.control_dependencies([d1, d2]):\n add = math_ops.add(inp, c1, name=\"add\")\n with g.control_dependencies([d1, d2]):\n mul = math_ops.mul(add, add, name=\"mul\")\n with g.control_dependencies([d1, d2]):\n add1 = math_ops.add(mul, mul, name=\"add1\")\n edge = self.trt_incompatible_op(add1, name=\"incompatible\")\n with g.control_dependencies([d1, d2, add, mul]):\n add2 = math_ops.add(edge, c2, name=\"add2\")\n with g.control_dependencies([d1, d2, add1, mul]):\n mul1 = math_ops.mul(add2, add2, name=\"mul1\")\n with g.control_dependencies([d1, d2, add, add1]):\n add3 = math_ops.add(mul1, mul1, name=\"add3\")\n array_ops.squeeze(add3, name=output_name)\n return trt_test.TfTrtIntegrationTestParams(\n gdef=g.as_graph_def(),\n input_names=[input_name],\n input_dims=[input_dims],\n output_names=[output_name],\n expected_output_dims=[tuple(input_dims)])\n\n def ExpectedEnginesToBuild(self, run_params):\n \"\"\"Return the expected engines to build.\"\"\"\n return {\n \"TRTEngineOp_0\": [\"c1\", \"add\", \"add1\", \"mul\"],\n \"TRTEngineOp_1\": [\"c2\", \"add2\", \"add3\", \"mul1\"]\n }\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tf.data.Dataset.flat_map()`.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport random\n\nimport numpy as np\n\nfrom tensorflow.python.client import session\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import server_lib\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass FlatMapTest(test_base.DatasetTestBase):\n\n # pylint: disable=g-long-lambda\n def testFlatMapDataset(self):\n repeats = [1, 2, 3, 4, 5, 0, 1]\n components = np.array(repeats, dtype=np.int64)\n dataset = dataset_ops.Dataset.from_tensor_slices(components).flat_map(\n lambda x: dataset_ops.Dataset.from_tensors([x]).repeat(x))\n expected_output = []\n for i in repeats:\n expected_output.extend([[i]] * i)\n self.assertDatasetProduces(dataset, expected_output=expected_output)\n\n def testNestedFlatMapDataset(self):\n repeats = [[1, 2], [3, 4], [5, 0], [1, 7]]\n components = np.array(repeats, dtype=np.int64)\n dataset = dataset_ops.Dataset.from_tensor_slices(components).flat_map(\n lambda x: dataset_ops.Dataset.from_tensor_slices(x).flat_map(\n lambda y: dataset_ops.Dataset.from_tensors(y).repeat(y))\n )\n expected_output = []\n for row in repeats:\n for i in row:\n expected_output.extend([i] * i)\n self.assertDatasetProduces(dataset, expected_output=expected_output)\n\n # Note: no eager mode coverage, session specific test.\n @test_util.run_deprecated_v1\n def testSkipEagerSharedResourceNestedFlatMapDataset(self):\n repeats = [[1, 2], [3, 4], [5, 0], [1, 7]]\n components = np.array(repeats, dtype=np.int64)\n iterator = (\n dataset_ops.Dataset.from_tensor_slices(components)\n .flat_map(lambda x: dataset_ops.Dataset.from_tensor_slices(x)\n .flat_map(lambda y: dataset_ops.Dataset.from_tensors(y)\n .repeat(y))).make_initializable_iterator(\n shared_name=\"shared_flat_map_iterator\"))\n init_op = iterator.initializer\n get_next = iterator.get_next()\n\n # Create two concurrent sessions that share the same iterator\n # resource on the same server, and verify that a random\n # interleaving of `Session.run(get_next)` calls on the two\n # sessions yields the expected result.\n server = server_lib.Server.create_local_server()\n with session.Session(server.target) as sess1:\n with session.Session(server.target) as sess2:\n for _ in range(3):\n sess = random.choice([sess1, sess2])\n sess.run(init_op)\n for row in repeats:\n for i in row:\n for _ in range(i):\n sess = random.choice([sess1, sess2])\n self.assertEqual(i, sess.run(get_next))\n\n with self.assertRaises(errors.OutOfRangeError):\n sess = random.choice([sess1, sess2])\n sess.run(get_next)\n\n def testMapDict(self):\n dataset = dataset_ops.Dataset.range(10).map(\n lambda x: {\"foo\": x * 2, \"bar\": x ** 2}).flat_map(\n lambda d: dataset_ops.Dataset.from_tensors(\n d[\"foo\"]).repeat(d[\"bar\"]))\n get_next = self.getNext(dataset)\n for i in range(10):\n for _ in range(i**2):\n self.assertEqual(i * 2, self.evaluate(get_next()))\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(get_next())\n\n def testSparse(self):\n def _map_fn(i):\n return sparse_tensor.SparseTensorValue(\n indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])\n\n def _flat_map_fn(x):\n return dataset_ops.Dataset.from_tensor_slices(\n sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))\n\n dataset = dataset_ops.Dataset.range(10).map(_map_fn).flat_map(_flat_map_fn)\n expected_output = []\n for i in range(10):\n for j in range(2):\n expected_output.append([i, 0] if j % 2 == 0 else [0, -i])\n self.assertDatasetProduces(dataset, expected_output=expected_output)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.ops.linalg_grad.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import linalg_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.linalg import linalg_impl\nfrom tensorflow.python.platform import test as test_lib\n\n\ndef _AddTest(test, op_name, testcase_name, fn):\n test_name = '_'.join(['test', op_name, testcase_name])\n if hasattr(test, test_name):\n raise RuntimeError('Test %s defined more than once' % test_name)\n setattr(test, test_name, fn)\n\n\nclass ShapeTest(test_lib.TestCase):\n\n @test_util.run_deprecated_v1\n def testBatchGradientUnknownSize(self):\n with self.cached_session():\n batch_size = constant_op.constant(3)\n matrix_size = constant_op.constant(4)\n batch_identity = array_ops.tile(\n array_ops.expand_dims(\n array_ops.diag(array_ops.ones([matrix_size])), 0),\n [batch_size, 1, 1])\n determinants = linalg_ops.matrix_determinant(batch_identity)\n reduced = math_ops.reduce_sum(determinants)\n sum_grad = gradients_impl.gradients(reduced, batch_identity)[0]\n self.assertAllClose(batch_identity.eval(), self.evaluate(sum_grad))\n\n\nclass MatrixUnaryFunctorGradientTest(test_lib.TestCase):\n pass # Filled in below\n\n\ndef _GetMatrixUnaryFunctorGradientTest(functor_, dtype_, shape_, **kwargs_):\n\n @test_util.run_v1_only('b/120545219')\n def Test(self):\n with self.session(use_gpu=True):\n np.random.seed(1)\n a_np = np.random.uniform(\n low=-1.0, high=1.0,\n size=np.prod(shape_)).reshape(shape_).astype(dtype_)\n a = constant_op.constant(a_np)\n if functor_.__name__ == 'matrix_square_root':\n # Square the input matrix to ensure that its matrix square root exists\n a = math_ops.matmul(a, a)\n a_np = self.evaluate(a)\n b = functor_(a, **kwargs_)\n\n # Optimal stepsize for central difference is O(epsilon^{1/3}).\n epsilon = np.finfo(dtype_).eps\n delta = epsilon**(1.0 / 3.0)\n # tolerance obtained by looking at actual differences using\n # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build\n tol = 1e-6 if dtype_ == np.float64 else 0.05\n\n theoretical, numerical = gradient_checker.compute_gradient(\n a,\n a.get_shape().as_list(),\n b,\n b.get_shape().as_list(),\n x_init_value=a_np,\n delta=delta)\n self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)\n\n return Test\n\n\nclass MatrixBinaryFunctorGradientTest(test_lib.TestCase):\n pass # Filled in below\n\n\ndef _GetMatrixBinaryFunctorGradientTest(functor_,\n dtype_,\n shape_,\n float32_tol_fudge=1.0,\n **kwargs_):\n\n @test_util.run_v1_only('b/120545219')\n def Test(self):\n # TODO(rmlarsen): Debug illegal address bug on CUDA and re-enable\n # GPU test for matrix_solve.\n use_gpu = False if functor_ == linalg_ops.matrix_solve else True\n\n with self.session(use_gpu=use_gpu):\n np.random.seed(1)\n a_np = np.random.uniform(\n low=-1.0, high=1.0,\n size=np.prod(shape_)).reshape(shape_).astype(dtype_)\n a = constant_op.constant(a_np)\n\n b_np = np.random.uniform(\n low=-1.0, high=1.0,\n size=np.prod(shape_)).reshape(shape_).astype(dtype_)\n b = constant_op.constant(b_np)\n c = functor_(a, b, **kwargs_)\n\n # Optimal stepsize for central difference is O(epsilon^{1/3}).\n epsilon = np.finfo(dtype_).eps\n delta = epsilon**(1.0 / 3.0)\n # tolerance obtained by looking at actual differences using\n # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build\n tol = 1e-6 if dtype_ == np.float64 else float32_tol_fudge * 0.05\n # The gradients for a and b may be of very different magnitudes,\n # so to not get spurious failures we test them separately.\n for factor, factor_init in [a, a_np], [b, b_np]:\n theoretical, numerical = gradient_checker.compute_gradient(\n factor,\n factor.get_shape().as_list(),\n c,\n c.get_shape().as_list(),\n x_init_value=factor_init,\n delta=delta)\n self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)\n\n return Test\n\n\nif __name__ == '__main__':\n # Tests for gradients of binary matrix operations.\n for dtype in np.float32, np.float64:\n for size in 2, 5, 10:\n # We skip the rank 4, size 10 case: it is slow and conceptually covered\n # by the other cases.\n for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):\n for adjoint in False, True:\n shape = extra + (size, size)\n name = '%s_%s_adj_%s' % (dtype.__name__, '_'.join(map(str, shape)),\n str(adjoint))\n _AddTest(MatrixBinaryFunctorGradientTest, 'MatrixSolveGradient', name,\n _GetMatrixBinaryFunctorGradientTest(\n linalg_ops.matrix_solve, dtype, shape, adjoint=adjoint))\n\n for lower in True, False:\n name = '%s_low_%s' % (name, lower)\n _AddTest(MatrixBinaryFunctorGradientTest,\n 'MatrixTriangularSolveGradient', name,\n _GetMatrixBinaryFunctorGradientTest(\n linalg_ops.matrix_triangular_solve,\n dtype,\n shape,\n float32_tol_fudge=4.0,\n adjoint=adjoint,\n lower=lower))\n\n # Tests for gradients of unary matrix operations.\n for dtype in np.float32, np.float64:\n for size in 2, 5, 10:\n # We skip the rank 4, size 10 case: it is slow and conceptually covered\n # by the other cases.\n for extra in [(), (2,), (3,)] + [(3, 2)] * (size < 10):\n shape = extra + (size, size)\n name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))\n _AddTest(MatrixUnaryFunctorGradientTest, 'MatrixInverseGradient', name,\n _GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_inverse,\n dtype, shape))\n _AddTest(MatrixUnaryFunctorGradientTest, 'MatrixExponentialGradient',\n name,\n _GetMatrixUnaryFunctorGradientTest(\n linalg_impl.matrix_exponential, dtype, shape))\n _AddTest(\n MatrixUnaryFunctorGradientTest, 'MatrixDeterminantGradient', name,\n _GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_determinant,\n dtype, shape))\n _AddTest(\n MatrixUnaryFunctorGradientTest, 'LogMatrixDeterminantGradient',\n name,\n _GetMatrixUnaryFunctorGradientTest(\n lambda x: linalg_ops.log_matrix_determinant(x)[1],\n dtype, shape))\n\n # The numerical Jacobian is consistently invalid for these four shapes\n # because the matrix square root of the perturbed input doesn't exist\n if shape in {(2, 5, 5), (3, 5, 5), (3, 10, 10), (3, 2, 5, 5)}:\n # Alternative shape that consistently produces a valid numerical Jacobian\n shape = extra + (size + 1, size + 1)\n name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape)))\n _AddTest(\n MatrixUnaryFunctorGradientTest, 'MatrixSquareRootGradient', name,\n _GetMatrixUnaryFunctorGradientTest(linalg_ops.matrix_square_root,\n dtype, shape))\n\n # Tests for gradients of matrix_solve_ls\n for dtype in np.float32, np.float64:\n for rows in 2, 5, 10:\n for cols in 2, 5, 10:\n for l2_regularization in 1e-6, 0.001, 1.0:\n shape = (rows, cols)\n name = '%s_%s_%s' % (dtype.__name__, '_'.join(map(str, shape)),\n l2_regularization)\n _AddTest(\n MatrixBinaryFunctorGradientTest,\n 'MatrixSolveLsGradient',\n name,\n # pylint: disable=long-lambda,g-long-lambda\n _GetMatrixBinaryFunctorGradientTest(\n (lambda a, b, l=l2_regularization:\n linalg_ops.matrix_solve_ls(a, b, l)),\n dtype,\n shape,\n float32_tol_fudge=4.0))\n\n test_lib.main()\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Part of the Keras training engine related to distributed training.\n\"\"\"\n# pylint: disable=protected-access\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport enum # pylint: disable=g-bad-import-order\nimport numpy as np\n\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import reduce_util as ds_reduce_util\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras import callbacks as cbks\nfrom tensorflow.python.keras import metrics as metrics_module\nfrom tensorflow.python.keras import optimizers\nfrom tensorflow.python.keras.engine import distributed_training_utils\nfrom tensorflow.python.keras.utils.generic_utils import Progbar\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import nest\n\n\n# TODO(sourabhbajaj): Check if we can merge the test and prediction graphs\nclass _Mode(enum.Enum):\n TRAIN = 'train'\n TEST = 'test'\n PREDICT = 'predict'\n# TODO(priyag, sourabhbajaj): Refactor this file to address code duplication.\n\n\ndef experimental_fit_loop(model,\n iterator,\n epochs=100,\n verbose=1,\n callbacks=None,\n initial_epoch=0,\n steps_per_epoch=None,\n val_iterator=None,\n validation_steps=None):\n \"\"\"Fit loop for training with TPU DistributionStrategy.\n\n Arguments:\n model: Keras Model instance.\n iterator: Iterator that returns inputs and targets\n epochs: Number of times to iterate over the data\n verbose: Integer, Verbosity mode, 0, 1 or 2\n callbacks: List of callbacks to be called during training\n initial_epoch: Epoch at which to start training\n (useful for resuming a previous training run)\n steps_per_epoch: Total number of steps (batches of samples)\n before declaring one epoch finished and starting the\n next epoch. Ignored with the default value of `None`.\n val_iterator: Iterator for validation data.\n validation_steps: Number of steps to run validation for\n (only if doing validation from data tensors).\n Ignored with the default value of `None`.\n\n Returns:\n Returns `None`.\n\n Raises:\n ValueError: in case of invalid arguments.\n \"\"\"\n current_strategy = model._distribution_strategy\n\n K.get_session().run(current_strategy.initialize())\n\n def _per_device_fit_function(model):\n model._make_fit_function()\n return (model._fit_function.inputs, model._fit_function.outputs,\n model._fit_function.updates_op, model._fit_function.session_kwargs)\n\n # TODO(priyag, sourabhbajaj): This should likely not be hardcoded here.\n K.set_learning_phase(1)\n out_labels = model.metrics_names or []\n\n def step_fn(ctx, inputs):\n \"\"\"Clones the model and calls make_fit_function.\"\"\"\n # TODO(priyag, sourabhbajaj): The model gets cloned every time\n # fit/test/predict is called. We should look into caching this keyed on\n # input shapes.\n inputs, targets = inputs\n clone_model_on_replicas(\n model,\n current_strategy,\n make_callback_model=True,\n inputs=inputs,\n targets=targets,\n mode=_Mode.TRAIN)\n\n (grouped_inputs, grouped_outputs, grouped_updates,\n grouped_session_args) = current_strategy.extended.call_for_each_replica(\n _per_device_fit_function, args=(model._grouped_model_train,))\n (all_inputs, all_outputs, all_updates,\n all_session_args) = distributed_training_utils.unwrap_values(\n current_strategy, grouped_inputs, grouped_outputs,\n grouped_updates, grouped_session_args)\n combined_fn = K.function(\n all_inputs,\n all_outputs,\n updates=all_updates,\n name='distributed_fit_function',\n **all_session_args)\n\n for label, output in zip(out_labels, combined_fn.outputs):\n if label == 'loss':\n reduce_op = distribute_lib.get_loss_reduction()\n else:\n # We reduce all other metrics using mean for now. This is temporary\n # workaround until new metrics are in place.\n reduce_op = ds_reduce_util.ReduceOp.MEAN\n ctx.set_last_step_output(label, output, reduce_op)\n\n # TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn:\n # feed_dict, session kwargs, run options, run_metadata for now. These should\n # be handled appropriately\n return combined_fn.updates_op\n\n # Add initial dummy values for loss and other metric tensors.\n initial_loop_values = {}\n initial_loop_values['loss'] = constant_op.constant(1e7)\n for name in model.metrics_names[1:]:\n tensor = model._all_stateful_metrics_tensors[name]\n initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)\n\n if steps_per_epoch is None:\n raise ValueError('`steps_per_epoch` should be specified when calling '\n '`fit` on the model.')\n steps_per_run = K.variable(\n value=min(steps_per_epoch, current_strategy.extended.steps_per_run),\n dtype='int32',\n name='steps_per_run')\n\n with current_strategy.scope():\n ctx = current_strategy.extended.experimental_run_steps_on_iterator(\n step_fn, iterator, iterations=steps_per_run,\n initial_loop_values=initial_loop_values)\n\n train_op = ctx.run_op\n output_tensors = ctx.last_step_outputs\n\n do_validation = bool(validation_steps)\n\n # Copy the weights from the original model to each of the replicated models.\n with current_strategy.scope():\n _copy_weights_to_distributed_model(model, model._grouped_model_train)\n\n callbacks = cbks.configure_callbacks(\n callbacks,\n model,\n do_validation=do_validation,\n epochs=epochs,\n steps_per_epoch=steps_per_epoch,\n verbose=verbose)\n\n # Calculate the steps each time on the device.\n steps_to_run = [current_strategy.extended.steps_per_run] * (\n steps_per_epoch // current_strategy.extended.steps_per_run)\n if steps_per_epoch % current_strategy.extended.steps_per_run:\n steps_to_run.append(\n steps_per_epoch % current_strategy.extended.steps_per_run)\n\n callbacks.on_train_begin()\n for epoch in range(initial_epoch, epochs):\n with current_strategy.scope():\n _reset_metrics(model, model._grouped_model_train)\n callbacks.on_epoch_begin(epoch)\n epoch_logs = {}\n step_index = 0\n prev_step_count = None\n for step_count in steps_to_run:\n batch_logs = {'batch': step_index, 'size': 1, 'num_steps': step_count}\n callbacks.on_batch_begin(step_index, batch_logs)\n if prev_step_count is None or step_count != prev_step_count:\n steps_per_run.load(step_count, K.get_session())\n prev_step_count = step_count\n try:\n _, outputs = K.get_session().run([train_op, output_tensors])\n except errors.OutOfRangeError:\n logging.warning('Your dataset iterator ran out of data; '\n 'interrupting training. Make sure that your dataset '\n 'can generate at least `steps_per_epoch * epochs` '\n 'batches (in this case, %d batches).' %\n steps_per_epoch * epochs)\n break\n\n batch_logs.update(outputs)\n callbacks.on_batch_end(step_index, batch_logs)\n step_index = step_index + step_count\n if callbacks.model.stop_training:\n break\n\n if do_validation:\n logging.info('Running validation at fit epoch: %s', epoch)\n\n # Since we create a new clone from the original model we need to copy\n # the weights back to the original model before we can run validation.\n with current_strategy.scope():\n _copy_weights_to_original_model(model, model._grouped_model_train,\n 'train')\n\n val_outs = experimental_test_loop( # pylint: disable=undefined-variable\n model,\n val_iterator,\n steps=validation_steps,\n verbose=verbose,\n initialize_finalize_strategy=False)\n if not isinstance(val_outs, list):\n val_outs = [val_outs]\n # Same labels assumed.\n for label, val_out in zip(out_labels, val_outs):\n epoch_logs['val_' + label] = val_out\n\n callbacks.on_epoch_end(epoch, epoch_logs)\n if callbacks.model.stop_training:\n break\n callbacks.on_train_end()\n\n # Copy the weights back from the replicated model to the original model.\n with current_strategy.scope():\n _copy_weights_to_original_model(model, model._grouped_model_train, 'train')\n\n K.get_session().run(current_strategy.finalize())\n return model.history\n\n\ndef experimental_test_loop(model,\n iterator,\n verbose=0,\n steps=None,\n initialize_finalize_strategy=True):\n \"\"\"Test loop for evaluating with TPU DistributionStrategy.\n\n Arguments:\n model: Keras Model instance.\n iterator: Iterator for input data.\n verbose: Integer, Verbosity mode 0 or 1.\n steps: Total number of steps (batches of samples)\n before declaring predictions finished.\n Ignored with the default value of `None`.\n initialize_finalize_strategy: Should the strategy initialize and finalize\n functions be called.\n\n Returns:\n Scalar loss (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the outputs.\n \"\"\"\n current_strategy = model._distribution_strategy\n if initialize_finalize_strategy:\n K.get_session().run(current_strategy.initialize())\n\n def _per_device_eval_function(model):\n model._make_eval_function()\n return (model._eval_function.inputs, model._eval_function.outputs,\n model._eval_function.updates_op,\n model._eval_function.session_kwargs)\n\n # TODO(priyag, sourabhbajaj): This should likely not be hardcoded here.\n K.set_learning_phase(0)\n\n def step_fn(ctx, inputs):\n \"\"\"Clones the model and calls make_eval_function.\"\"\"\n # TODO(priyag, sourabhbajaj): The model gets cloned every time\n # fit/test/predict is called. We should look into caching this keyed on\n # input shapes.\n inputs, targets = inputs\n clone_model_on_replicas(\n model,\n current_strategy,\n make_callback_model=False,\n inputs=inputs,\n targets=targets,\n mode=_Mode.TEST)\n\n (grouped_inputs, grouped_outputs, grouped_updates,\n grouped_session_args) = current_strategy.extended.call_for_each_replica(\n _per_device_eval_function, args=(model._grouped_model_test,))\n\n (all_inputs, all_outputs, all_updates,\n all_session_args) = distributed_training_utils.unwrap_values(\n current_strategy, grouped_inputs, grouped_outputs, grouped_updates,\n grouped_session_args)\n\n combined_fn = K.function(\n all_inputs, all_outputs,\n updates=all_updates,\n name='distributed_test_function',\n **all_session_args)\n\n for label, output in zip(model.metrics_names, combined_fn.outputs):\n if label == 'loss':\n reduce_op = distribute_lib.get_loss_reduction()\n else:\n # We reduce all other metrics using mean for now. This is temporary\n # workaround until new metrics are in place.\n reduce_op = ds_reduce_util.ReduceOp.MEAN\n ctx.set_last_step_output(label, output, reduce_op)\n\n return combined_fn.updates_op\n\n # Add initial dummy values for loss and other metric tensors.\n initial_loop_values = {}\n initial_loop_values['loss'] = constant_op.constant(1e7)\n for name in model.metrics_names[1:]:\n tensor = model._all_stateful_metrics_tensors[name]\n initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)\n\n with current_strategy.scope():\n # TODO(priyag): Use steps_per_run when we use new metrics as they will\n # allow handling metric computation at each step using variables.\n ctx = current_strategy.extended.experimental_run_steps_on_iterator(\n step_fn, iterator, iterations=1,\n initial_loop_values=initial_loop_values)\n\n test_op = ctx.run_op\n output_tensors = ctx.last_step_outputs\n\n if verbose == 1:\n progbar = Progbar(target=steps)\n\n # Copy the weights from the original model to each of the replicated models.\n with current_strategy.scope():\n _copy_weights_to_distributed_model(model, model._grouped_model_test)\n _reset_metrics(model, model._grouped_model_test)\n assert steps is not None\n outs = [0.] * len(model.metrics_names)\n for step in range(steps):\n _, batch_outs = K.get_session().run([test_op, output_tensors])\n for i, label in enumerate(model.metrics_names):\n if i == 0:\n # Loss is stateless metrics.\n outs[i] += batch_outs[label]\n else:\n # For all stateful metrics, the aggregation is handled by mirrored vars.\n outs[i] = batch_outs[label]\n\n if verbose >= 1:\n progbar.update(step + 1)\n\n if len(outs) >= 0:\n outs[0] /= (steps)\n\n if initialize_finalize_strategy:\n K.get_session().run(current_strategy.finalize())\n\n if len(outs) == 1:\n return outs[0]\n return outs\n\n\ndef experimental_predict_loop(model, iterator, verbose=0, steps=None):\n \"\"\"Predict loop for predicting with TPU DistributionStrategy.\n\n Arguments:\n model: Keras Model instance.\n iterator: Iterator for input data.\n verbose: Integer, Verbosity mode 0 or 1.\n steps: Total number of steps (batches of samples)\n before declaring `_predict_loop` finished.\n Ignored with the default value of `None`.\n\n Returns:\n Array of predictions (if the model has a single output)\n or list of arrays of predictions\n (if the model has multiple outputs).\n \"\"\"\n current_strategy = model._distribution_strategy\n K.get_session().run(current_strategy.initialize())\n\n # TODO(priyag, sourabhbajaj): This should likely not be hardcoded here.\n K.set_learning_phase(0)\n\n def _per_device_predict_function(model):\n model._make_predict_function()\n return (model.predict_function.inputs,\n model.predict_function.outputs,\n model.predict_function.updates_op,\n model.predict_function.session_kwargs)\n\n def step_fn(ctx, inputs):\n \"\"\"Clones the model and calls make_predict_function.\"\"\"\n\n # TODO(priyag, sourabhbajaj): The model gets cloned every time\n # fit/test/predict is called. We should look into caching this keyed on\n # input shapes.\n clone_model_on_replicas(\n model,\n current_strategy,\n make_callback_model=False,\n inputs=inputs,\n mode=_Mode.PREDICT)\n\n (grouped_inputs, grouped_outputs, grouped_updates,\n grouped_session_args) = current_strategy.extended.call_for_each_replica(\n _per_device_predict_function, args=(model._grouped_model_predict,))\n\n (all_inputs, all_outputs, all_updates,\n all_session_args) = distributed_training_utils.unwrap_values(\n current_strategy, grouped_inputs, grouped_outputs, grouped_updates,\n grouped_session_args)\n\n combined_fn = K.function(\n all_inputs, all_outputs,\n updates=all_updates,\n name='distributed_predict_function',\n **all_session_args)\n\n for label, output in zip(model.output_names, combined_fn.outputs):\n ctx.set_last_step_output(label, output)\n\n return combined_fn.updates_op\n\n # Add initial dummy values for outputs.\n initial_loop_values = {}\n batch_dimension = distributed_training_utils.get_batch_dimension(iterator)\n for name, tensor in zip(model.output_names, model.outputs):\n # TODO(priyag): This is a workaround as we do not know the batch dimension\n # of the model's output at this point.\n shape = tensor_shape.TensorShape(tensor.shape.dims)\n shape.dims = [batch_dimension] + shape.dims[1:]\n initial_loop_values[name] = array_ops.zeros(shape, tensor.dtype)\n\n with current_strategy.scope():\n # TODO(priyag, sourabhbajaj): Support steps_per_run if/when we add outfeed.\n ctx = current_strategy.extended.experimental_run_steps_on_iterator(\n step_fn, iterator, iterations=1,\n initial_loop_values=initial_loop_values)\n\n predict_op = ctx.run_op\n output_tensors = ctx.last_step_outputs\n\n if verbose == 1:\n progbar = Progbar(target=steps)\n\n # Copy the weights from the original model to each of the replicated models.\n with current_strategy.scope():\n _copy_weights_to_distributed_model(model, model._grouped_model_predict)\n _reset_metrics(model, model._grouped_model_predict)\n assert steps is not None\n # Since we do not know how many samples we will see, we cannot pre-allocate\n # the returned Numpy arrays. Instead, we store one array per batch seen\n # and concatenate them upon returning.\n unconcatenated_outs = [[] for _ in model.outputs]\n for step in range(steps):\n _, batch_outs = K.get_session().run([predict_op, output_tensors])\n # TODO(priyag): maybe need to unwrap the outputs first for MirroredStrategy.\n for i, label in enumerate(model.output_names):\n unconcatenated_outs[i].extend(batch_outs[label])\n if verbose >= 1:\n progbar.update(step + 1)\n\n K.get_session().run(current_strategy.finalize())\n\n if len(unconcatenated_outs) == 1:\n return np.concatenate(unconcatenated_outs[0], axis=0)\n return [\n np.concatenate(unconcatenated_outs[i], axis=0)\n for i in range(len(unconcatenated_outs))\n ]\n\n\ndef _custom_compile_for_predict(model):\n \"\"\"Custom compile for TPU predict mode.\"\"\"\n model.total_loss = None\n model._fit_function = None\n model._eval_function = None\n model.train_function = None\n model.test_function = None\n model.predict_function = None\n\n\ndef _clone_and_build_model(model, inputs=None, targets=None, mode=None):\n \"\"\"Clone and build the given keras_model.\"\"\"\n # We need to set the import here since we run into a circular dependency\n # error.\n from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top\n cloned_model = models.clone_model(model, input_tensors=inputs)\n\n # Compile and build model.\n if isinstance(model.optimizer, optimizers.TFOptimizer):\n optimizer = model.optimizer\n else:\n optimizer_config = model.optimizer.get_config()\n optimizer = model.optimizer.__class__.from_config(optimizer_config)\n\n # Recast all low precision outputs back to float32 since we only casted\n # the inputs to bfloat16 and not targets. This is done so that we can preserve\n # precision when calculating the loss value.\n def _upcast_low_precision_outputs(output):\n if output.dtype == dtypes.bfloat16:\n return math_ops.cast(output, dtypes.float32)\n else:\n return output\n cloned_model.outputs = [_upcast_low_precision_outputs(o)\n for o in cloned_model.outputs]\n\n if isinstance(targets, tuple):\n targets = nest.flatten(targets)\n if mode == _Mode.PREDICT:\n _custom_compile_for_predict(cloned_model)\n else:\n cloned_model.compile(\n optimizer,\n model.loss,\n metrics=metrics_module.clone_metrics(model._compile_metrics),\n loss_weights=model.loss_weights,\n sample_weight_mode=model.sample_weight_mode,\n weighted_metrics=metrics_module.clone_metrics(\n model._compile_weighted_metrics),\n target_tensors=targets)\n return cloned_model\n\n\ndef clone_model_on_replicas(model, strategy, make_callback_model=False,\n inputs=None, targets=None, mode=None):\n \"\"\"Create a cloned model on each replica.\"\"\"\n with K.get_graph().as_default(), strategy.scope():\n grouped_model = strategy.extended.call_for_each_replica(\n _clone_and_build_model, args=(model, inputs, targets, mode))\n if mode is _Mode.TRAIN:\n model._grouped_model_train = grouped_model\n elif mode is _Mode.TEST:\n model._grouped_model_test = grouped_model\n elif mode is _Mode.PREDICT:\n model._grouped_model_predict = grouped_model\n else:\n model._grouped_model = grouped_model\n if make_callback_model:\n model._make_callback_model(grouped_model)\n\n\ndef _get_input_from_iterator(iterator, model):\n \"\"\"Get elements from the iterator and verify the input shape and type.\"\"\"\n next_element = iterator.get_next()\n\n if len(nest.flatten(next_element)) == len(model.inputs):\n x = next_element\n y = None\n sample_weights = None\n elif len(nest.flatten(next_element)) == (len(model.inputs) +\n len(model.outputs)):\n x, y = next_element\n sample_weights = None\n else:\n x, y, sample_weights = next_element\n\n # Validate that all the elements in x and y are of the same type and shape.\n # We can then pass the first element of x and y to `_standardize_weights`\n # below and be confident of the output.\n distributed_training_utils.validate_distributed_dataset_inputs(\n model._distribution_strategy, x, y, sample_weights)\n return x, y, sample_weights\n\n\ndef _make_execution_function(model, mode):\n \"\"\"Makes function to run one step of distributed model execution.\"\"\"\n if context.executing_eagerly():\n return _make_eager_execution_function(model, mode)\n\n strategy = model._distribution_strategy\n if not model._grouped_model:\n clone_model_on_replicas(\n model, strategy, make_callback_model=(mode == 'train'))\n\n def _per_device_function(model):\n f = model._make_execution_function(mode)\n return (f.inputs, f.outputs, f.updates_op, f.session_kwargs)\n\n with strategy.scope():\n # Create train ops on each of the devices when we call\n # `_per_device_fit_function`.\n (grouped_inputs, grouped_outputs, grouped_updates,\n grouped_session_args) = strategy.extended.call_for_each_replica(\n _per_device_function, args=(model._grouped_model,))\n\n if mode == 'train':\n # Initialize the variables in the replicated model. This is necessary for\n # multi-worker training because on some workers, initialization is not\n # needed. This method does initialization or waiting for initialization\n # according to the context object of distribute coordinator.\n distributed_training_utils.init_restore_or_wait_for_variables()\n\n # Unwrap all the per device values returned from `call_for_each_replica`.\n # Unwrapping per device values gives you a list of values that can be\n # used to construct a new train function that is composed of update ops on\n # all the devices over which the model is distributed.\n (all_inputs, all_outputs, all_updates,\n all_session_args) = distributed_training_utils.unwrap_values(\n strategy,\n grouped_inputs,\n grouped_outputs,\n grouped_updates,\n grouped_session_args,\n with_loss_tensor=(mode != 'predict'))\n\n return K.function(\n all_inputs,\n all_outputs,\n updates=all_updates,\n name='distributed_{}_function'.format(mode),\n **all_session_args)\n\n\ndef _make_eager_execution_function(model, mode):\n \"\"\"Makes function to run one step of distributed model eager execution.\"\"\"\n strategy = model._distribution_strategy\n if not model._grouped_model:\n clone_model_on_replicas(\n model, strategy, make_callback_model=(mode == 'train'))\n\n def _per_device_function(model):\n f = model._make_execution_function(mode)\n return (f.inputs, f.outputs)\n\n # NOTE(priyag): Try creating a new FuncGraph within DS scope instead of using\n # the global one.\n with K.get_graph().as_default(), strategy.scope():\n # Create train ops on each of the devices when we call\n # `_per_device_fit_function`.\n (grouped_inputs, grouped_outputs) = strategy.call_for_each_replica(\n _per_device_function, args=(model._grouped_model,))\n\n # Unwrap all the per device values returned from `call_for_each_replica`.\n # Unwrapping per device values gives you a list of values that can be\n # used to construct a new train function that is composed of inptus/outputs\n # on all the devices over which the model is distributed.\n (all_inputs, all_outputs, _, _) = distributed_training_utils.unwrap_values(\n strategy,\n grouped_inputs,\n grouped_outputs,\n with_loss_tensor=(mode != 'predict'))\n\n return K.function(\n all_inputs,\n all_outputs,\n name='eager_distributed_{}_function'.format(mode))\n\n\ndef _prepare_feed_values(model, inputs, targets, sample_weights, mode):\n \"\"\"Prepare feed values to the model execution function.\n\n Arguments:\n model: Model to prepare feed values for.\n inputs: List or dict of model inputs.\n targets: Optional list of model targets.\n sample_weights: Optional list of sample weight arrays.\n mode: One of 'train'/'test'/'predict'.\n\n Returns:\n Feed values for the model in the given mode.\n \"\"\"\n strategy = model._distribution_strategy\n inputs, targets, sample_weights = _get_input_from_iterator(inputs, model)\n inputs = distributed_training_utils.flatten_perdevice_values(strategy, inputs)\n targets = distributed_training_utils.flatten_perdevice_values(\n strategy, targets)\n if mode == 'predict':\n sample_weights = []\n targets = []\n else:\n sample_weights = [\n None for _ in range(len(model.outputs) * strategy.num_replicas_in_sync)\n ]\n ins = inputs + targets + sample_weights\n if mode == 'train' and not isinstance(K.symbolic_learning_phase(), int):\n ins += [True]\n return ins\n\n\ndef _copy_weights_to_distributed_model(original_model, grouped_model):\n \"\"\"Copies weights from original model to distributed models.\"\"\"\n strategy = original_model._distribution_strategy\n if strategy:\n # Copy the weights from the original model to each of the replicated\n # models.\n orig_model_weights = original_model.get_weights()\n distributed_model = strategy.unwrap(grouped_model)[0]\n distributed_training_utils.set_weights(strategy, distributed_model,\n orig_model_weights)\n\n\ndef _copy_weights_to_original_model(model, grouped_model, mode):\n \"\"\"Copies weights from first distributed model back to original model.\"\"\"\n if model._distribution_strategy and mode == 'train':\n updated_weights = model._distribution_strategy.unwrap(\n grouped_model)[0].get_weights()\n model.set_weights(updated_weights)\n\n\ndef _per_device_aggregate_batch(batch_outs, model, mode):\n \"\"\"Aggregates the per-device batch-level outputs from a distributed step.\"\"\"\n if model._distribution_strategy is not None and mode == 'predict':\n total_batch_outs = []\n for i in range(len(model.outputs)):\n num_replicas = model._distribution_strategy.num_replicas_in_sync\n nested_outs = batch_outs[i * num_replicas:i * num_replicas + num_replicas]\n total_batch_outs.append(np.concatenate(nest.flatten(nested_outs)))\n return total_batch_outs\n return batch_outs\n\n\ndef _reset_metrics(model, distributed_model=None):\n if model._distribution_strategy:\n distributed_model = (\n distributed_model or\n model._distribution_strategy.unwrap(model._grouped_model)[0])\n distributed_model.reset_metrics()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Experimental API for building input pipelines.\n\nThis module contains experimental `Dataset` sources and transformations that can\nbe used in conjunction with the `tf.data.Dataset` API. Note that the\n`tf.data.experimental` API is not subject to the same backwards compatibility\nguarantees as `tf.data`, but we will provide deprecation advice in advance of\nremoving existing functionality.\n\nSee [Importing Data](https://tensorflow.org/guide/datasets) for an overview.\n\n@@Counter\n@@CheckpointInputPipelineHook\n@@CsvDataset\n@@DatasetStructure\n@@NestedStructure\n@@OptimizationOptions\n@@Optional\n@@OptionalStructure\n@@RandomDataset\n@@Reducer\n@@SparseTensorStructure\n@@SqlDataset\n@@StatsAggregator\n@@StatsOptions\n@@Structure\n@@TFRecordWriter\n@@TensorStructure\n@@ThreadingOptions\n\n@@bucket_by_sequence_length\n@@cardinality\n@@choose_from_datasets\n@@copy_to_device\n@@dense_to_sparse_batch\n@@enumerate_dataset\n@@filter_for_shard\n@@get_next_as_optional\n@@get_single_element\n@@group_by_reducer\n@@group_by_window\n@@ignore_errors\n@@latency_stats\n@@make_batched_features_dataset\n@@make_csv_dataset\n@@make_saveable_from_iterator\n@@map_and_batch\n@@parallel_interleave\n@@parse_example_dataset\n@@prefetch_to_device\n@@rejection_resample\n@@sample_from_datasets\n@@scan\n@@shuffle_and_repeat\n@@unbatch\n@@unique\n\n@@AUTOTUNE\n@@INFINITE_CARDINALITY\n@@UNKNOWN_CARDINALITY\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# pylint: disable=unused-import\n\nfrom tensorflow.python.data.experimental.ops.batching import dense_to_sparse_batch\nfrom tensorflow.python.data.experimental.ops.batching import map_and_batch\nfrom tensorflow.python.data.experimental.ops.batching import unbatch\nfrom tensorflow.python.data.experimental.ops.cardinality import cardinality\nfrom tensorflow.python.data.experimental.ops.cardinality import INFINITE as INFINITE_CARDINALITY\nfrom tensorflow.python.data.experimental.ops.cardinality import UNKNOWN as UNKNOWN_CARDINALITY\nfrom tensorflow.python.data.experimental.ops.counter import Counter\nfrom tensorflow.python.data.experimental.ops.enumerate_ops import enumerate_dataset\nfrom tensorflow.python.data.experimental.ops.error_ops import ignore_errors\nfrom tensorflow.python.data.experimental.ops.filter_for_shard_ops import filter_for_shard\nfrom tensorflow.python.data.experimental.ops.get_single_element import get_single_element\nfrom tensorflow.python.data.experimental.ops.grouping import bucket_by_sequence_length\nfrom tensorflow.python.data.experimental.ops.grouping import group_by_reducer\nfrom tensorflow.python.data.experimental.ops.grouping import group_by_window\nfrom tensorflow.python.data.experimental.ops.grouping import Reducer\nfrom tensorflow.python.data.experimental.ops.interleave_ops import choose_from_datasets\nfrom tensorflow.python.data.experimental.ops.interleave_ops import parallel_interleave\nfrom tensorflow.python.data.experimental.ops.interleave_ops import sample_from_datasets\nfrom tensorflow.python.data.experimental.ops.iterator_ops import CheckpointInputPipelineHook\nfrom tensorflow.python.data.experimental.ops.iterator_ops import make_saveable_from_iterator\nfrom tensorflow.python.data.experimental.ops.optimization import AUTOTUNE\nfrom tensorflow.python.data.experimental.ops.optimization_options import OptimizationOptions\nfrom tensorflow.python.data.experimental.ops.parsing_ops import parse_example_dataset\nfrom tensorflow.python.data.experimental.ops.prefetching_ops import copy_to_device\nfrom tensorflow.python.data.experimental.ops.prefetching_ops import prefetch_to_device\nfrom tensorflow.python.data.experimental.ops.random_ops import RandomDataset\nfrom tensorflow.python.data.experimental.ops.readers import CsvDataset\nfrom tensorflow.python.data.experimental.ops.readers import make_batched_features_dataset\nfrom tensorflow.python.data.experimental.ops.readers import make_csv_dataset\nfrom tensorflow.python.data.experimental.ops.readers import SqlDataset\nfrom tensorflow.python.data.experimental.ops.resampling import rejection_resample\nfrom tensorflow.python.data.experimental.ops.scan_ops import scan\nfrom tensorflow.python.data.experimental.ops.shuffle_ops import shuffle_and_repeat\nfrom tensorflow.python.data.experimental.ops.stats_aggregator import StatsAggregator\nfrom tensorflow.python.data.experimental.ops.stats_ops import latency_stats\nfrom tensorflow.python.data.experimental.ops.stats_options import StatsOptions\nfrom tensorflow.python.data.experimental.ops.threading_options import ThreadingOptions\nfrom tensorflow.python.data.experimental.ops.unique import unique\nfrom tensorflow.python.data.experimental.ops.writers import TFRecordWriter\nfrom tensorflow.python.data.ops.dataset_ops import DatasetStructure\nfrom tensorflow.python.data.ops.iterator_ops import get_next_as_optional\nfrom tensorflow.python.data.ops.optional_ops import Optional\nfrom tensorflow.python.data.ops.optional_ops import OptionalStructure\nfrom tensorflow.python.data.util.structure import NestedStructure\nfrom tensorflow.python.data.util.structure import SparseTensorStructure\nfrom tensorflow.python.data.util.structure import Structure\nfrom tensorflow.python.data.util.structure import TensorStructure\n# pylint: enable=unused-import\n\nfrom tensorflow.python.util.all_util import remove_undocumented\nremove_undocumented(__name__)\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Module implementing RNN Cells.\n\nThis module provides a number of basic commonly used RNN cells, such as LSTM\n(Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of\noperators that allow adding dropouts, projections, or embeddings for inputs.\nConstructing multi-layer cells is supported by the class `MultiRNNCell`, or by\ncalling the `rnn` ops several times.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport hashlib\nimport numbers\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.keras import activations\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras.engine import input_spec\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.layers import base as base_layer\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import partitioned_variables\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variable_scope as vs\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training.checkpointable import base as checkpointable\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.deprecation import deprecated\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n_BIAS_VARIABLE_NAME = \"bias\"\n_WEIGHTS_VARIABLE_NAME = \"kernel\"\n\n# This can be used with self.assertRaisesRegexp for assert_like_rnncell.\nASSERT_LIKE_RNNCELL_ERROR_REGEXP = \"is not an RNNCell\"\n\n\ndef assert_like_rnncell(cell_name, cell):\n \"\"\"Raises a TypeError if cell is not like an RNNCell.\n\n NOTE: Do not rely on the error message (in particular in tests) which can be\n subject to change to increase readability. Use\n ASSERT_LIKE_RNNCELL_ERROR_REGEXP.\n\n Args:\n cell_name: A string to give a meaningful error referencing to the name\n of the functionargument.\n cell: The object which should behave like an RNNCell.\n\n Raises:\n TypeError: A human-friendly exception.\n \"\"\"\n conditions = [\n hasattr(cell, \"output_size\"),\n hasattr(cell, \"state_size\"),\n hasattr(cell, \"get_initial_state\") or hasattr(cell, \"zero_state\"),\n callable(cell),\n ]\n errors = [\n \"'output_size' property is missing\",\n \"'state_size' property is missing\",\n \"either 'zero_state' or 'get_initial_state' method is required\",\n \"is not callable\"\n ]\n\n if not all(conditions):\n\n errors = [error for error, cond in zip(errors, conditions) if not cond]\n raise TypeError(\"The argument {!r} ({}) is not an RNNCell: {}.\".format(\n cell_name, cell, \", \".join(errors)))\n\n\ndef _concat(prefix, suffix, static=False):\n \"\"\"Concat that enables int, Tensor, or TensorShape values.\n\n This function takes a size specification, which can be an integer, a\n TensorShape, or a Tensor, and converts it into a concatenated Tensor\n (if static = False) or a list of integers (if static = True).\n\n Args:\n prefix: The prefix; usually the batch size (and/or time step size).\n (TensorShape, int, or Tensor.)\n suffix: TensorShape, int, or Tensor.\n static: If `True`, return a python list with possibly unknown dimensions.\n Otherwise return a `Tensor`.\n\n Returns:\n shape: the concatenation of prefix and suffix.\n\n Raises:\n ValueError: if `suffix` is not a scalar or vector (or TensorShape).\n ValueError: if prefix or suffix was `None` and asked for dynamic\n Tensors out.\n \"\"\"\n if isinstance(prefix, ops.Tensor):\n p = prefix\n p_static = tensor_util.constant_value(prefix)\n if p.shape.ndims == 0:\n p = array_ops.expand_dims(p, 0)\n elif p.shape.ndims != 1:\n raise ValueError(\"prefix tensor must be either a scalar or vector, \"\n \"but saw tensor: %s\" % p)\n else:\n p = tensor_shape.as_shape(prefix)\n p_static = p.as_list() if p.ndims is not None else None\n p = (constant_op.constant(p.as_list(), dtype=dtypes.int32)\n if p.is_fully_defined() else None)\n if isinstance(suffix, ops.Tensor):\n s = suffix\n s_static = tensor_util.constant_value(suffix)\n if s.shape.ndims == 0:\n s = array_ops.expand_dims(s, 0)\n elif s.shape.ndims != 1:\n raise ValueError(\"suffix tensor must be either a scalar or vector, \"\n \"but saw tensor: %s\" % s)\n else:\n s = tensor_shape.as_shape(suffix)\n s_static = s.as_list() if s.ndims is not None else None\n s = (constant_op.constant(s.as_list(), dtype=dtypes.int32)\n if s.is_fully_defined() else None)\n\n if static:\n shape = tensor_shape.as_shape(p_static).concatenate(s_static)\n shape = shape.as_list() if shape.ndims is not None else None\n else:\n if p is None or s is None:\n raise ValueError(\"Provided a prefix or suffix of None: %s and %s\"\n % (prefix, suffix))\n shape = array_ops.concat((p, s), 0)\n return shape\n\n\ndef _zero_state_tensors(state_size, batch_size, dtype):\n \"\"\"Create tensors of zeros based on state_size, batch_size, and dtype.\"\"\"\n def get_state_shape(s):\n \"\"\"Combine s with batch_size to get a proper tensor shape.\"\"\"\n c = _concat(batch_size, s)\n size = array_ops.zeros(c, dtype=dtype)\n if not context.executing_eagerly():\n c_static = _concat(batch_size, s, static=True)\n size.set_shape(c_static)\n return size\n return nest.map_structure(get_state_shape, state_size)\n\n\n@tf_export(\"nn.rnn_cell.RNNCell\")\nclass RNNCell(base_layer.Layer):\n \"\"\"Abstract object representing an RNN cell.\n\n Every `RNNCell` must have the properties below and implement `call` with\n the signature `(output, next_state) = call(input, state)`. The optional\n third input argument, `scope`, is allowed for backwards compatibility\n purposes; but should be left off for new subclasses.\n\n This definition of cell differs from the definition used in the literature.\n In the literature, 'cell' refers to an object with a single scalar output.\n This definition refers to a horizontal array of such units.\n\n An RNN cell, in the most abstract setting, is anything that has\n a state and performs some operation that takes a matrix of inputs.\n This operation results in an output matrix with `self.output_size` columns.\n If `self.state_size` is an integer, this operation also results in a new\n state matrix with `self.state_size` columns. If `self.state_size` is a\n (possibly nested tuple of) TensorShape object(s), then it should return a\n matching structure of Tensors having shape `[batch_size].concatenate(s)`\n for each `s` in `self.batch_size`.\n \"\"\"\n\n def __init__(self, trainable=True, name=None, dtype=None, **kwargs):\n super(RNNCell, self).__init__(\n trainable=trainable, name=name, dtype=dtype, **kwargs)\n # Attribute that indicates whether the cell is a TF RNN cell, due the slight\n # difference between TF and Keras RNN cell.\n self._is_tf_rnn_cell = True\n\n def __call__(self, inputs, state, scope=None):\n \"\"\"Run this RNN cell on inputs, starting from the given state.\n\n Args:\n inputs: `2-D` tensor with shape `[batch_size, input_size]`.\n state: if `self.state_size` is an integer, this should be a `2-D Tensor`\n with shape `[batch_size, self.state_size]`. Otherwise, if\n `self.state_size` is a tuple of integers, this should be a tuple\n with shapes `[batch_size, s] for s in self.state_size`.\n scope: VariableScope for the created subgraph; defaults to class name.\n\n Returns:\n A pair containing:\n\n - Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.\n - New state: Either a single `2-D` tensor, or a tuple of tensors matching\n the arity and shapes of `state`.\n \"\"\"\n if scope is not None:\n with vs.variable_scope(scope,\n custom_getter=self._rnn_get_variable) as scope:\n return super(RNNCell, self).__call__(inputs, state, scope=scope)\n else:\n scope_attrname = \"rnncell_scope\"\n scope = getattr(self, scope_attrname, None)\n if scope is None:\n scope = vs.variable_scope(vs.get_variable_scope(),\n custom_getter=self._rnn_get_variable)\n setattr(self, scope_attrname, scope)\n with scope:\n return super(RNNCell, self).__call__(inputs, state)\n\n def _rnn_get_variable(self, getter, *args, **kwargs):\n variable = getter(*args, **kwargs)\n if context.executing_eagerly():\n trainable = variable._trainable # pylint: disable=protected-access\n else:\n trainable = (\n variable in tf_variables.trainable_variables() or\n (isinstance(variable, tf_variables.PartitionedVariable) and\n list(variable)[0] in tf_variables.trainable_variables()))\n if trainable and variable not in self._trainable_weights:\n self._trainable_weights.append(variable)\n elif not trainable and variable not in self._non_trainable_weights:\n self._non_trainable_weights.append(variable)\n return variable\n\n @property\n def state_size(self):\n \"\"\"size(s) of state(s) used by this cell.\n\n It can be represented by an Integer, a TensorShape or a tuple of Integers\n or TensorShapes.\n \"\"\"\n raise NotImplementedError(\"Abstract method\")\n\n @property\n def output_size(self):\n \"\"\"Integer or TensorShape: size of outputs produced by this cell.\"\"\"\n raise NotImplementedError(\"Abstract method\")\n\n def build(self, _):\n # This tells the parent Layer object that it's OK to call\n # self.add_variable() inside the call() method.\n pass\n\n def get_initial_state(self, inputs=None, batch_size=None, dtype=None):\n if inputs is not None:\n # Validate the given batch_size and dtype against inputs if provided.\n inputs = ops.convert_to_tensor(inputs, name=\"inputs\")\n if batch_size is not None:\n if tensor_util.is_tensor(batch_size):\n static_batch_size = tensor_util.constant_value(\n batch_size, partial=True)\n else:\n static_batch_size = batch_size\n if inputs.shape.dims[0].value != static_batch_size:\n raise ValueError(\n \"batch size from input tensor is different from the \"\n \"input param. Input tensor batch: {}, batch_size: {}\".format(\n inputs.shape.dims[0].value, batch_size))\n\n if dtype is not None and inputs.dtype != dtype:\n raise ValueError(\n \"dtype from input tensor is different from the \"\n \"input param. Input tensor dtype: {}, dtype: {}\".format(\n inputs.dtype, dtype))\n\n batch_size = inputs.shape.dims[0].value or array_ops.shape(inputs)[0]\n dtype = inputs.dtype\n if None in [batch_size, dtype]:\n raise ValueError(\n \"batch_size and dtype cannot be None while constructing initial \"\n \"state: batch_size={}, dtype={}\".format(batch_size, dtype))\n return self.zero_state(batch_size, dtype)\n\n def zero_state(self, batch_size, dtype):\n \"\"\"Return zero-filled state tensor(s).\n\n Args:\n batch_size: int, float, or unit Tensor representing the batch size.\n dtype: the data type to use for the state.\n\n Returns:\n If `state_size` is an int or TensorShape, then the return value is a\n `N-D` tensor of shape `[batch_size, state_size]` filled with zeros.\n\n If `state_size` is a nested list or tuple, then the return value is\n a nested list or tuple (of the same structure) of `2-D` tensors with\n the shapes `[batch_size, s]` for each s in `state_size`.\n \"\"\"\n # Try to use the last cached zero_state. This is done to avoid recreating\n # zeros, especially when eager execution is enabled.\n state_size = self.state_size\n is_eager = context.executing_eagerly()\n if is_eager and hasattr(self, \"_last_zero_state\"):\n (last_state_size, last_batch_size, last_dtype,\n last_output) = getattr(self, \"_last_zero_state\")\n if (last_batch_size == batch_size and\n last_dtype == dtype and\n last_state_size == state_size):\n return last_output\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\n output = _zero_state_tensors(state_size, batch_size, dtype)\n if is_eager:\n self._last_zero_state = (state_size, batch_size, dtype, output)\n return output\n\n\nclass LayerRNNCell(RNNCell):\n \"\"\"Subclass of RNNCells that act like proper `tf.Layer` objects.\n\n For backwards compatibility purposes, most `RNNCell` instances allow their\n `call` methods to instantiate variables via `tf.get_variable`. The underlying\n variable scope thus keeps track of any variables, and returning cached\n versions. This is atypical of `tf.layer` objects, which separate this\n part of layer building into a `build` method that is only called once.\n\n Here we provide a subclass for `RNNCell` objects that act exactly as\n `Layer` objects do. They must provide a `build` method and their\n `call` methods do not access Variables `tf.get_variable`.\n \"\"\"\n\n def __call__(self, inputs, state, scope=None, *args, **kwargs):\n \"\"\"Run this RNN cell on inputs, starting from the given state.\n\n Args:\n inputs: `2-D` tensor with shape `[batch_size, input_size]`.\n state: if `self.state_size` is an integer, this should be a `2-D Tensor`\n with shape `[batch_size, self.state_size]`. Otherwise, if\n `self.state_size` is a tuple of integers, this should be a tuple\n with shapes `[batch_size, s] for s in self.state_size`.\n scope: optional cell scope.\n *args: Additional positional arguments.\n **kwargs: Additional keyword arguments.\n\n Returns:\n A pair containing:\n\n - Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.\n - New state: Either a single `2-D` tensor, or a tuple of tensors matching\n the arity and shapes of `state`.\n \"\"\"\n # Bypass RNNCell's variable capturing semantics for LayerRNNCell.\n # Instead, it is up to subclasses to provide a proper build\n # method. See the class docstring for more details.\n return base_layer.Layer.__call__(self, inputs, state, scope=scope,\n *args, **kwargs)\n\n\n@tf_export(v1=[\"nn.rnn_cell.BasicRNNCell\"])\nclass BasicRNNCell(LayerRNNCell):\n \"\"\"The most basic RNN cell.\n\n Note that this cell is not optimized for performance. Please use\n `tf.contrib.cudnn_rnn.CudnnRNNTanh` for better performance on GPU.\n\n Args:\n num_units: int, The number of units in the RNN cell.\n activation: Nonlinearity to use. Default: `tanh`. It could also be string\n that is within Keras activation function names.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n name: String, the name of the layer. Layers with the same name will\n share weights, but to avoid mistakes we require reuse=True in such\n cases.\n dtype: Default dtype of the layer (default of `None` means use the type\n of the first input). Required when `build` is called before `call`.\n **kwargs: Dict, keyword named properties for common layer attributes, like\n `trainable` etc when constructing the cell from configs of get_config().\n \"\"\"\n\n @deprecated(None, \"This class is equivalent as tf.keras.layers.SimpleRNNCell,\"\n \" and will be replaced by that in Tensorflow 2.0.\")\n def __init__(self,\n num_units,\n activation=None,\n reuse=None,\n name=None,\n dtype=None,\n **kwargs):\n super(BasicRNNCell, self).__init__(\n _reuse=reuse, name=name, dtype=dtype, **kwargs)\n if context.executing_eagerly() and context.num_gpus() > 0:\n logging.warn(\"%s: Note that this cell is not optimized for performance. \"\n \"Please use tf.contrib.cudnn_rnn.CudnnRNNTanh for better \"\n \"performance on GPU.\", self)\n\n # Inputs must be 2-dimensional.\n self.input_spec = input_spec.InputSpec(ndim=2)\n\n self._num_units = num_units\n if activation:\n self._activation = activations.get(activation)\n else:\n self._activation = math_ops.tanh\n\n @property\n def state_size(self):\n return self._num_units\n\n @property\n def output_size(self):\n return self._num_units\n\n @tf_utils.shape_type_conversion\n def build(self, inputs_shape):\n if inputs_shape[-1] is None:\n raise ValueError(\"Expected inputs.shape[-1] to be known, saw shape: %s\"\n % str(inputs_shape))\n\n input_depth = inputs_shape[-1]\n self._kernel = self.add_variable(\n _WEIGHTS_VARIABLE_NAME,\n shape=[input_depth + self._num_units, self._num_units])\n self._bias = self.add_variable(\n _BIAS_VARIABLE_NAME,\n shape=[self._num_units],\n initializer=init_ops.zeros_initializer(dtype=self.dtype))\n\n self.built = True\n\n def call(self, inputs, state):\n \"\"\"Most basic RNN: output = new_state = act(W * input + U * state + B).\"\"\"\n\n gate_inputs = math_ops.matmul(\n array_ops.concat([inputs, state], 1), self._kernel)\n gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)\n output = self._activation(gate_inputs)\n return output, output\n\n def get_config(self):\n config = {\n \"num_units\": self._num_units,\n \"activation\": activations.serialize(self._activation),\n \"reuse\": self._reuse,\n }\n base_config = super(BasicRNNCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@tf_export(v1=[\"nn.rnn_cell.GRUCell\"])\nclass GRUCell(LayerRNNCell):\n \"\"\"Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).\n\n Note that this cell is not optimized for performance. Please use\n `tf.contrib.cudnn_rnn.CudnnGRU` for better performance on GPU, or\n `tf.contrib.rnn.GRUBlockCellV2` for better performance on CPU.\n\n Args:\n num_units: int, The number of units in the GRU cell.\n activation: Nonlinearity to use. Default: `tanh`.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n kernel_initializer: (optional) The initializer to use for the weight and\n projection matrices.\n bias_initializer: (optional) The initializer to use for the bias.\n name: String, the name of the layer. Layers with the same name will\n share weights, but to avoid mistakes we require reuse=True in such\n cases.\n dtype: Default dtype of the layer (default of `None` means use the type\n of the first input). Required when `build` is called before `call`.\n **kwargs: Dict, keyword named properties for common layer attributes, like\n `trainable` etc when constructing the cell from configs of get_config().\n \"\"\"\n\n @deprecated(None, \"This class is equivalent as tf.keras.layers.GRUCell,\"\n \" and will be replaced by that in Tensorflow 2.0.\")\n def __init__(self,\n num_units,\n activation=None,\n reuse=None,\n kernel_initializer=None,\n bias_initializer=None,\n name=None,\n dtype=None,\n **kwargs):\n super(GRUCell, self).__init__(\n _reuse=reuse, name=name, dtype=dtype, **kwargs)\n\n if context.executing_eagerly() and context.num_gpus() > 0:\n logging.warn(\"%s: Note that this cell is not optimized for performance. \"\n \"Please use tf.contrib.cudnn_rnn.CudnnGRU for better \"\n \"performance on GPU.\", self)\n # Inputs must be 2-dimensional.\n self.input_spec = input_spec.InputSpec(ndim=2)\n\n self._num_units = num_units\n if activation:\n self._activation = activations.get(activation)\n else:\n self._activation = math_ops.tanh\n self._kernel_initializer = initializers.get(kernel_initializer)\n self._bias_initializer = initializers.get(bias_initializer)\n\n @property\n def state_size(self):\n return self._num_units\n\n @property\n def output_size(self):\n return self._num_units\n\n @tf_utils.shape_type_conversion\n def build(self, inputs_shape):\n if inputs_shape[-1] is None:\n raise ValueError(\"Expected inputs.shape[-1] to be known, saw shape: %s\"\n % str(inputs_shape))\n\n input_depth = inputs_shape[-1]\n self._gate_kernel = self.add_variable(\n \"gates/%s\" % _WEIGHTS_VARIABLE_NAME,\n shape=[input_depth + self._num_units, 2 * self._num_units],\n initializer=self._kernel_initializer)\n self._gate_bias = self.add_variable(\n \"gates/%s\" % _BIAS_VARIABLE_NAME,\n shape=[2 * self._num_units],\n initializer=(\n self._bias_initializer\n if self._bias_initializer is not None\n else init_ops.constant_initializer(1.0, dtype=self.dtype)))\n self._candidate_kernel = self.add_variable(\n \"candidate/%s\" % _WEIGHTS_VARIABLE_NAME,\n shape=[input_depth + self._num_units, self._num_units],\n initializer=self._kernel_initializer)\n self._candidate_bias = self.add_variable(\n \"candidate/%s\" % _BIAS_VARIABLE_NAME,\n shape=[self._num_units],\n initializer=(\n self._bias_initializer\n if self._bias_initializer is not None\n else init_ops.zeros_initializer(dtype=self.dtype)))\n\n self.built = True\n\n def call(self, inputs, state):\n \"\"\"Gated recurrent unit (GRU) with nunits cells.\"\"\"\n\n gate_inputs = math_ops.matmul(\n array_ops.concat([inputs, state], 1), self._gate_kernel)\n gate_inputs = nn_ops.bias_add(gate_inputs, self._gate_bias)\n\n value = math_ops.sigmoid(gate_inputs)\n r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)\n\n r_state = r * state\n\n candidate = math_ops.matmul(\n array_ops.concat([inputs, r_state], 1), self._candidate_kernel)\n candidate = nn_ops.bias_add(candidate, self._candidate_bias)\n\n c = self._activation(candidate)\n new_h = u * state + (1 - u) * c\n return new_h, new_h\n\n def get_config(self):\n config = {\n \"num_units\": self._num_units,\n \"kernel_initializer\": initializers.serialize(self._kernel_initializer),\n \"bias_initializer\": initializers.serialize(self._bias_initializer),\n \"activation\": activations.serialize(self._activation),\n \"reuse\": self._reuse,\n }\n base_config = super(GRUCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n_LSTMStateTuple = collections.namedtuple(\"LSTMStateTuple\", (\"c\", \"h\"))\n\n\n@tf_export(\"nn.rnn_cell.LSTMStateTuple\")\nclass LSTMStateTuple(_LSTMStateTuple):\n \"\"\"Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state.\n\n Stores two elements: `(c, h)`, in that order. Where `c` is the hidden state\n and `h` is the output.\n\n Only used when `state_is_tuple=True`.\n \"\"\"\n __slots__ = ()\n\n @property\n def dtype(self):\n (c, h) = self\n if c.dtype != h.dtype:\n raise TypeError(\"Inconsistent internal state: %s vs %s\" %\n (str(c.dtype), str(h.dtype)))\n return c.dtype\n\n\n@tf_export(v1=[\"nn.rnn_cell.BasicLSTMCell\"])\nclass BasicLSTMCell(LayerRNNCell):\n \"\"\"DEPRECATED: Please use `tf.nn.rnn_cell.LSTMCell` instead.\n\n Basic LSTM recurrent network cell.\n\n The implementation is based on: http://arxiv.org/abs/1409.2329.\n\n We add forget_bias (default: 1) to the biases of the forget gate in order to\n reduce the scale of forgetting in the beginning of the training.\n\n It does not allow cell clipping, a projection layer, and does not\n use peep-hole connections: it is the basic baseline.\n\n For advanced models, please use the full `tf.nn.rnn_cell.LSTMCell`\n that follows.\n\n Note that this cell is not optimized for performance. Please use\n `tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or\n `tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for\n better performance on CPU.\n \"\"\"\n\n @deprecated(None, \"This class is equivalent as tf.keras.layers.LSTMCell,\"\n \" and will be replaced by that in Tensorflow 2.0.\")\n def __init__(self,\n num_units,\n forget_bias=1.0,\n state_is_tuple=True,\n activation=None,\n reuse=None,\n name=None,\n dtype=None,\n **kwargs):\n \"\"\"Initialize the basic LSTM cell.\n\n Args:\n num_units: int, The number of units in the LSTM cell.\n forget_bias: float, The bias added to forget gates (see above).\n Must set to `0.0` manually when restoring from CudnnLSTM-trained\n checkpoints.\n state_is_tuple: If True, accepted and returned states are 2-tuples of\n the `c_state` and `m_state`. If False, they are concatenated\n along the column axis. The latter behavior will soon be deprecated.\n activation: Activation function of the inner states. Default: `tanh`. It\n could also be string that is within Keras activation function names.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n name: String, the name of the layer. Layers with the same name will\n share weights, but to avoid mistakes we require reuse=True in such\n cases.\n dtype: Default dtype of the layer (default of `None` means use the type\n of the first input). Required when `build` is called before `call`.\n **kwargs: Dict, keyword named properties for common layer attributes, like\n `trainable` etc when constructing the cell from configs of get_config().\n\n When restoring from CudnnLSTM-trained checkpoints, must use\n `CudnnCompatibleLSTMCell` instead.\n \"\"\"\n super(BasicLSTMCell, self).__init__(\n _reuse=reuse, name=name, dtype=dtype, **kwargs)\n if not state_is_tuple:\n logging.warn(\"%s: Using a concatenated state is slower and will soon be \"\n \"deprecated. Use state_is_tuple=True.\", self)\n if context.executing_eagerly() and context.num_gpus() > 0:\n logging.warn(\"%s: Note that this cell is not optimized for performance. \"\n \"Please use tf.contrib.cudnn_rnn.CudnnLSTM for better \"\n \"performance on GPU.\", self)\n\n # Inputs must be 2-dimensional.\n self.input_spec = input_spec.InputSpec(ndim=2)\n\n self._num_units = num_units\n self._forget_bias = forget_bias\n self._state_is_tuple = state_is_tuple\n if activation:\n self._activation = activations.get(activation)\n else:\n self._activation = math_ops.tanh\n\n @property\n def state_size(self):\n return (LSTMStateTuple(self._num_units, self._num_units)\n if self._state_is_tuple else 2 * self._num_units)\n\n @property\n def output_size(self):\n return self._num_units\n\n @tf_utils.shape_type_conversion\n def build(self, inputs_shape):\n if inputs_shape[-1] is None:\n raise ValueError(\"Expected inputs.shape[-1] to be known, saw shape: %s\"\n % str(inputs_shape))\n\n input_depth = inputs_shape[-1]\n h_depth = self._num_units\n self._kernel = self.add_variable(\n _WEIGHTS_VARIABLE_NAME,\n shape=[input_depth + h_depth, 4 * self._num_units])\n self._bias = self.add_variable(\n _BIAS_VARIABLE_NAME,\n shape=[4 * self._num_units],\n initializer=init_ops.zeros_initializer(dtype=self.dtype))\n\n self.built = True\n\n def call(self, inputs, state):\n \"\"\"Long short-term memory cell (LSTM).\n\n Args:\n inputs: `2-D` tensor with shape `[batch_size, input_size]`.\n state: An `LSTMStateTuple` of state tensors, each shaped\n `[batch_size, num_units]`, if `state_is_tuple` has been set to\n `True`. Otherwise, a `Tensor` shaped\n `[batch_size, 2 * num_units]`.\n\n Returns:\n A pair containing the new hidden state, and the new state (either a\n `LSTMStateTuple` or a concatenated state, depending on\n `state_is_tuple`).\n \"\"\"\n sigmoid = math_ops.sigmoid\n one = constant_op.constant(1, dtype=dtypes.int32)\n # Parameters of gates are concatenated into one multiply for efficiency.\n if self._state_is_tuple:\n c, h = state\n else:\n c, h = array_ops.split(value=state, num_or_size_splits=2, axis=one)\n\n gate_inputs = math_ops.matmul(\n array_ops.concat([inputs, h], 1), self._kernel)\n gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)\n\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n i, j, f, o = array_ops.split(\n value=gate_inputs, num_or_size_splits=4, axis=one)\n\n forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype)\n # Note that using `add` and `multiply` instead of `+` and `*` gives a\n # performance improvement. So using those at the cost of readability.\n add = math_ops.add\n multiply = math_ops.multiply\n new_c = add(multiply(c, sigmoid(add(f, forget_bias_tensor))),\n multiply(sigmoid(i), self._activation(j)))\n new_h = multiply(self._activation(new_c), sigmoid(o))\n\n if self._state_is_tuple:\n new_state = LSTMStateTuple(new_c, new_h)\n else:\n new_state = array_ops.concat([new_c, new_h], 1)\n return new_h, new_state\n\n def get_config(self):\n config = {\n \"num_units\": self._num_units,\n \"forget_bias\": self._forget_bias,\n \"state_is_tuple\": self._state_is_tuple,\n \"activation\": activations.serialize(self._activation),\n \"reuse\": self._reuse,\n }\n base_config = super(BasicLSTMCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@tf_export(v1=[\"nn.rnn_cell.LSTMCell\"])\nclass LSTMCell(LayerRNNCell):\n \"\"\"Long short-term memory unit (LSTM) recurrent network cell.\n\n The default non-peephole implementation is based on:\n\n https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf\n\n Felix Gers, Jurgen Schmidhuber, and Fred Cummins.\n \"Learning to forget: Continual prediction with LSTM.\" IET, 850-855, 1999.\n\n The peephole implementation is based on:\n\n https://research.google.com/pubs/archive/43905.pdf\n\n Hasim Sak, Andrew Senior, and Francoise Beaufays.\n \"Long short-term memory recurrent neural network architectures for\n large scale acoustic modeling.\" INTERSPEECH, 2014.\n\n The class uses optional peep-hole connections, optional cell clipping, and\n an optional projection layer.\n\n Note that this cell is not optimized for performance. Please use\n `tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or\n `tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for\n better performance on CPU.\n \"\"\"\n\n @deprecated(None, \"This class is equivalent as tf.keras.layers.LSTMCell,\"\n \" and will be replaced by that in Tensorflow 2.0.\")\n def __init__(self, num_units,\n use_peepholes=False, cell_clip=None,\n initializer=None, num_proj=None, proj_clip=None,\n num_unit_shards=None, num_proj_shards=None,\n forget_bias=1.0, state_is_tuple=True,\n activation=None, reuse=None, name=None, dtype=None, **kwargs):\n \"\"\"Initialize the parameters for an LSTM cell.\n\n Args:\n num_units: int, The number of units in the LSTM cell.\n use_peepholes: bool, set True to enable diagonal/peephole connections.\n cell_clip: (optional) A float value, if provided the cell state is clipped\n by this value prior to the cell output activation.\n initializer: (optional) The initializer to use for the weight and\n projection matrices.\n num_proj: (optional) int, The output dimensionality for the projection\n matrices. If None, no projection is performed.\n proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is\n provided, then the projected values are clipped elementwise to within\n `[-proj_clip, proj_clip]`.\n num_unit_shards: Deprecated, will be removed by Jan. 2017.\n Use a variable_scope partitioner instead.\n num_proj_shards: Deprecated, will be removed by Jan. 2017.\n Use a variable_scope partitioner instead.\n forget_bias: Biases of the forget gate are initialized by default to 1\n in order to reduce the scale of forgetting at the beginning of\n the training. Must set it manually to `0.0` when restoring from\n CudnnLSTM trained checkpoints.\n state_is_tuple: If True, accepted and returned states are 2-tuples of\n the `c_state` and `m_state`. If False, they are concatenated\n along the column axis. This latter behavior will soon be deprecated.\n activation: Activation function of the inner states. Default: `tanh`. It\n could also be string that is within Keras activation function names.\n reuse: (optional) Python boolean describing whether to reuse variables\n in an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n name: String, the name of the layer. Layers with the same name will\n share weights, but to avoid mistakes we require reuse=True in such\n cases.\n dtype: Default dtype of the layer (default of `None` means use the type\n of the first input). Required when `build` is called before `call`.\n **kwargs: Dict, keyword named properties for common layer attributes, like\n `trainable` etc when constructing the cell from configs of get_config().\n\n When restoring from CudnnLSTM-trained checkpoints, use\n `CudnnCompatibleLSTMCell` instead.\n \"\"\"\n super(LSTMCell, self).__init__(\n _reuse=reuse, name=name, dtype=dtype, **kwargs)\n if not state_is_tuple:\n logging.warn(\"%s: Using a concatenated state is slower and will soon be \"\n \"deprecated. Use state_is_tuple=True.\", self)\n if num_unit_shards is not None or num_proj_shards is not None:\n logging.warn(\n \"%s: The num_unit_shards and proj_unit_shards parameters are \"\n \"deprecated and will be removed in Jan 2017. \"\n \"Use a variable scope with a partitioner instead.\", self)\n if context.executing_eagerly() and context.num_gpus() > 0:\n logging.warn(\"%s: Note that this cell is not optimized for performance. \"\n \"Please use tf.contrib.cudnn_rnn.CudnnLSTM for better \"\n \"performance on GPU.\", self)\n\n # Inputs must be 2-dimensional.\n self.input_spec = input_spec.InputSpec(ndim=2)\n\n self._num_units = num_units\n self._use_peepholes = use_peepholes\n self._cell_clip = cell_clip\n self._initializer = initializers.get(initializer)\n self._num_proj = num_proj\n self._proj_clip = proj_clip\n self._num_unit_shards = num_unit_shards\n self._num_proj_shards = num_proj_shards\n self._forget_bias = forget_bias\n self._state_is_tuple = state_is_tuple\n if activation:\n self._activation = activations.get(activation)\n else:\n self._activation = math_ops.tanh\n\n if num_proj:\n self._state_size = (\n LSTMStateTuple(num_units, num_proj)\n if state_is_tuple else num_units + num_proj)\n self._output_size = num_proj\n else:\n self._state_size = (\n LSTMStateTuple(num_units, num_units)\n if state_is_tuple else 2 * num_units)\n self._output_size = num_units\n\n @property\n def state_size(self):\n return self._state_size\n\n @property\n def output_size(self):\n return self._output_size\n\n @tf_utils.shape_type_conversion\n def build(self, inputs_shape):\n if inputs_shape[-1] is None:\n raise ValueError(\"Expected inputs.shape[-1] to be known, saw shape: %s\"\n % str(inputs_shape))\n\n input_depth = inputs_shape[-1]\n h_depth = self._num_units if self._num_proj is None else self._num_proj\n maybe_partitioner = (\n partitioned_variables.fixed_size_partitioner(self._num_unit_shards)\n if self._num_unit_shards is not None\n else None)\n self._kernel = self.add_variable(\n _WEIGHTS_VARIABLE_NAME,\n shape=[input_depth + h_depth, 4 * self._num_units],\n initializer=self._initializer,\n partitioner=maybe_partitioner)\n if self.dtype is None:\n initializer = init_ops.zeros_initializer\n else:\n initializer = init_ops.zeros_initializer(dtype=self.dtype)\n self._bias = self.add_variable(\n _BIAS_VARIABLE_NAME,\n shape=[4 * self._num_units],\n initializer=initializer)\n if self._use_peepholes:\n self._w_f_diag = self.add_variable(\"w_f_diag\", shape=[self._num_units],\n initializer=self._initializer)\n self._w_i_diag = self.add_variable(\"w_i_diag\", shape=[self._num_units],\n initializer=self._initializer)\n self._w_o_diag = self.add_variable(\"w_o_diag\", shape=[self._num_units],\n initializer=self._initializer)\n\n if self._num_proj is not None:\n maybe_proj_partitioner = (\n partitioned_variables.fixed_size_partitioner(self._num_proj_shards)\n if self._num_proj_shards is not None\n else None)\n self._proj_kernel = self.add_variable(\n \"projection/%s\" % _WEIGHTS_VARIABLE_NAME,\n shape=[self._num_units, self._num_proj],\n initializer=self._initializer,\n partitioner=maybe_proj_partitioner)\n\n self.built = True\n\n def call(self, inputs, state):\n \"\"\"Run one step of LSTM.\n\n Args:\n inputs: input Tensor, must be 2-D, `[batch, input_size]`.\n state: if `state_is_tuple` is False, this must be a state Tensor,\n `2-D, [batch, state_size]`. If `state_is_tuple` is True, this must be a\n tuple of state Tensors, both `2-D`, with column sizes `c_state` and\n `m_state`.\n\n Returns:\n A tuple containing:\n\n - A `2-D, [batch, output_dim]`, Tensor representing the output of the\n LSTM after reading `inputs` when previous state was `state`.\n Here output_dim is:\n num_proj if num_proj was set,\n num_units otherwise.\n - Tensor(s) representing the new state of LSTM after reading `inputs` when\n the previous state was `state`. Same type and shape(s) as `state`.\n\n Raises:\n ValueError: If input size cannot be inferred from inputs via\n static shape inference.\n \"\"\"\n num_proj = self._num_units if self._num_proj is None else self._num_proj\n sigmoid = math_ops.sigmoid\n\n if self._state_is_tuple:\n (c_prev, m_prev) = state\n else:\n c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])\n m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])\n\n input_size = inputs.get_shape().with_rank(2).dims[1].value\n if input_size is None:\n raise ValueError(\"Could not infer input size from inputs.get_shape()[-1]\")\n\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n lstm_matrix = math_ops.matmul(\n array_ops.concat([inputs, m_prev], 1), self._kernel)\n lstm_matrix = nn_ops.bias_add(lstm_matrix, self._bias)\n\n i, j, f, o = array_ops.split(\n value=lstm_matrix, num_or_size_splits=4, axis=1)\n # Diagonal connections\n if self._use_peepholes:\n c = (sigmoid(f + self._forget_bias + self._w_f_diag * c_prev) * c_prev +\n sigmoid(i + self._w_i_diag * c_prev) * self._activation(j))\n else:\n c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *\n self._activation(j))\n\n if self._cell_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)\n # pylint: enable=invalid-unary-operand-type\n if self._use_peepholes:\n m = sigmoid(o + self._w_o_diag * c) * self._activation(c)\n else:\n m = sigmoid(o) * self._activation(c)\n\n if self._num_proj is not None:\n m = math_ops.matmul(m, self._proj_kernel)\n\n if self._proj_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)\n # pylint: enable=invalid-unary-operand-type\n\n new_state = (LSTMStateTuple(c, m) if self._state_is_tuple else\n array_ops.concat([c, m], 1))\n return m, new_state\n\n def get_config(self):\n config = {\n \"num_units\": self._num_units,\n \"use_peepholes\": self._use_peepholes,\n \"cell_clip\": self._cell_clip,\n \"initializer\": initializers.serialize(self._initializer),\n \"num_proj\": self._num_proj,\n \"proj_clip\": self._proj_clip,\n \"num_unit_shards\": self._num_unit_shards,\n \"num_proj_shards\": self._num_proj_shards,\n \"forget_bias\": self._forget_bias,\n \"state_is_tuple\": self._state_is_tuple,\n \"activation\": activations.serialize(self._activation),\n \"reuse\": self._reuse,\n }\n base_config = super(LSTMCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\ndef _enumerated_map_structure_up_to(shallow_structure, map_fn, *args, **kwargs):\n ix = [0]\n def enumerated_fn(*inner_args, **inner_kwargs):\n r = map_fn(ix[0], *inner_args, **inner_kwargs)\n ix[0] += 1\n return r\n return nest.map_structure_up_to(shallow_structure,\n enumerated_fn, *args, **kwargs)\n\n\ndef _default_dropout_state_filter_visitor(substate):\n if isinstance(substate, LSTMStateTuple):\n # Do not perform dropout on the memory state.\n return LSTMStateTuple(c=False, h=True)\n elif isinstance(substate, tensor_array_ops.TensorArray):\n return False\n return True\n\n\n@tf_export(\"nn.rnn_cell.DropoutWrapper\")\nclass DropoutWrapper(RNNCell):\n \"\"\"Operator adding dropout to inputs and outputs of the given cell.\"\"\"\n\n def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,\n state_keep_prob=1.0, variational_recurrent=False,\n input_size=None, dtype=None, seed=None,\n dropout_state_filter_visitor=None):\n \"\"\"Create a cell with added input, state, and/or output dropout.\n\n If `variational_recurrent` is set to `True` (**NOT** the default behavior),\n then the same dropout mask is applied at every step, as described in:\n\n Y. Gal, Z Ghahramani. \"A Theoretically Grounded Application of Dropout in\n Recurrent Neural Networks\". https://arxiv.org/abs/1512.05287\n\n Otherwise a different dropout mask is applied at every time step.\n\n Note, by default (unless a custom `dropout_state_filter` is provided),\n the memory state (`c` component of any `LSTMStateTuple`) passing through\n a `DropoutWrapper` is never modified. This behavior is described in the\n above article.\n\n Args:\n cell: an RNNCell, a projection to output_size is added to it.\n input_keep_prob: unit Tensor or float between 0 and 1, input keep\n probability; if it is constant and 1, no input dropout will be added.\n output_keep_prob: unit Tensor or float between 0 and 1, output keep\n probability; if it is constant and 1, no output dropout will be added.\n state_keep_prob: unit Tensor or float between 0 and 1, output keep\n probability; if it is constant and 1, no output dropout will be added.\n State dropout is performed on the outgoing states of the cell.\n **Note** the state components to which dropout is applied when\n `state_keep_prob` is in `(0, 1)` are also determined by\n the argument `dropout_state_filter_visitor` (e.g. by default dropout\n is never applied to the `c` component of an `LSTMStateTuple`).\n variational_recurrent: Python bool. If `True`, then the same\n dropout pattern is applied across all time steps per run call.\n If this parameter is set, `input_size` **must** be provided.\n input_size: (optional) (possibly nested tuple of) `TensorShape` objects\n containing the depth(s) of the input tensors expected to be passed in to\n the `DropoutWrapper`. Required and used **iff**\n `variational_recurrent = True` and `input_keep_prob < 1`.\n dtype: (optional) The `dtype` of the input, state, and output tensors.\n Required and used **iff** `variational_recurrent = True`.\n seed: (optional) integer, the randomness seed.\n dropout_state_filter_visitor: (optional), default: (see below). Function\n that takes any hierarchical level of the state and returns\n a scalar or depth=1 structure of Python booleans describing\n which terms in the state should be dropped out. In addition, if the\n function returns `True`, dropout is applied across this sublevel. If\n the function returns `False`, dropout is not applied across this entire\n sublevel.\n Default behavior: perform dropout on all terms except the memory (`c`)\n state of `LSTMCellState` objects, and don't try to apply dropout to\n `TensorArray` objects:\n ```\n def dropout_state_filter_visitor(s):\n if isinstance(s, LSTMCellState):\n # Never perform dropout on the c state.\n return LSTMCellState(c=False, h=True)\n elif isinstance(s, TensorArray):\n return False\n return True\n ```\n\n Raises:\n TypeError: if `cell` is not an `RNNCell`, or `keep_state_fn` is provided\n but not `callable`.\n ValueError: if any of the keep_probs are not between 0 and 1.\n \"\"\"\n super(DropoutWrapper, self).__init__()\n assert_like_rnncell(\"cell\", cell)\n\n if (dropout_state_filter_visitor is not None\n and not callable(dropout_state_filter_visitor)):\n raise TypeError(\"dropout_state_filter_visitor must be callable\")\n self._dropout_state_filter = (\n dropout_state_filter_visitor or _default_dropout_state_filter_visitor)\n with ops.name_scope(\"DropoutWrapperInit\"):\n def tensor_and_const_value(v):\n tensor_value = ops.convert_to_tensor(v)\n const_value = tensor_util.constant_value(tensor_value)\n return (tensor_value, const_value)\n for prob, attr in [(input_keep_prob, \"input_keep_prob\"),\n (state_keep_prob, \"state_keep_prob\"),\n (output_keep_prob, \"output_keep_prob\")]:\n tensor_prob, const_prob = tensor_and_const_value(prob)\n if const_prob is not None:\n if const_prob < 0 or const_prob > 1:\n raise ValueError(\"Parameter %s must be between 0 and 1: %d\"\n % (attr, const_prob))\n setattr(self, \"_%s\" % attr, float(const_prob))\n else:\n setattr(self, \"_%s\" % attr, tensor_prob)\n\n # Set cell, variational_recurrent, seed before running the code below\n self._cell = cell\n if isinstance(cell, checkpointable.CheckpointableBase):\n self._track_checkpointable(self._cell, name=\"cell\")\n self._variational_recurrent = variational_recurrent\n self._seed = seed\n\n self._recurrent_input_noise = None\n self._recurrent_state_noise = None\n self._recurrent_output_noise = None\n\n if variational_recurrent:\n if dtype is None:\n raise ValueError(\n \"When variational_recurrent=True, dtype must be provided\")\n\n def convert_to_batch_shape(s):\n # Prepend a 1 for the batch dimension; for recurrent\n # variational dropout we use the same dropout mask for all\n # batch elements.\n return array_ops.concat(\n ([1], tensor_shape.TensorShape(s).as_list()), 0)\n\n def batch_noise(s, inner_seed):\n shape = convert_to_batch_shape(s)\n return random_ops.random_uniform(shape, seed=inner_seed, dtype=dtype)\n\n if (not isinstance(self._input_keep_prob, numbers.Real) or\n self._input_keep_prob < 1.0):\n if input_size is None:\n raise ValueError(\n \"When variational_recurrent=True and input_keep_prob < 1.0 or \"\n \"is unknown, input_size must be provided\")\n self._recurrent_input_noise = _enumerated_map_structure_up_to(\n input_size,\n lambda i, s: batch_noise(s, inner_seed=self._gen_seed(\"input\", i)),\n input_size)\n self._recurrent_state_noise = _enumerated_map_structure_up_to(\n cell.state_size,\n lambda i, s: batch_noise(s, inner_seed=self._gen_seed(\"state\", i)),\n cell.state_size)\n self._recurrent_output_noise = _enumerated_map_structure_up_to(\n cell.output_size,\n lambda i, s: batch_noise(s, inner_seed=self._gen_seed(\"output\", i)),\n cell.output_size)\n\n def _gen_seed(self, salt_prefix, index):\n if self._seed is None:\n return None\n salt = \"%s_%d\" % (salt_prefix, index)\n string = (str(self._seed) + salt).encode(\"utf-8\")\n return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF\n\n @property\n def wrapped_cell(self):\n return self._cell\n\n @property\n def state_size(self):\n return self._cell.state_size\n\n @property\n def output_size(self):\n return self._cell.output_size\n\n def zero_state(self, batch_size, dtype):\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\n return self._cell.zero_state(batch_size, dtype)\n\n def _variational_recurrent_dropout_value(\n self, index, value, noise, keep_prob):\n \"\"\"Performs dropout given the pre-calculated noise tensor.\"\"\"\n # uniform [keep_prob, 1.0 + keep_prob)\n random_tensor = keep_prob + noise\n\n # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)\n binary_tensor = math_ops.floor(random_tensor)\n ret = math_ops.div(value, keep_prob) * binary_tensor\n ret.set_shape(value.get_shape())\n return ret\n\n def _dropout(self, values, salt_prefix, recurrent_noise, keep_prob,\n shallow_filtered_substructure=None):\n \"\"\"Decides whether to perform standard dropout or recurrent dropout.\"\"\"\n\n if shallow_filtered_substructure is None:\n # Put something so we traverse the entire structure; inside the\n # dropout function we check to see if leafs of this are bool or not.\n shallow_filtered_substructure = values\n\n if not self._variational_recurrent:\n def dropout(i, do_dropout, v):\n if not isinstance(do_dropout, bool) or do_dropout:\n return nn_ops.dropout(\n v, keep_prob=keep_prob, seed=self._gen_seed(salt_prefix, i))\n else:\n return v\n return _enumerated_map_structure_up_to(\n shallow_filtered_substructure, dropout,\n *[shallow_filtered_substructure, values])\n else:\n def dropout(i, do_dropout, v, n):\n if not isinstance(do_dropout, bool) or do_dropout:\n return self._variational_recurrent_dropout_value(i, v, n, keep_prob)\n else:\n return v\n return _enumerated_map_structure_up_to(\n shallow_filtered_substructure, dropout,\n *[shallow_filtered_substructure, values, recurrent_noise])\n\n def __call__(self, inputs, state, scope=None):\n \"\"\"Run the cell with the declared dropouts.\"\"\"\n def _should_dropout(p):\n return (not isinstance(p, float)) or p < 1\n\n if _should_dropout(self._input_keep_prob):\n inputs = self._dropout(inputs, \"input\",\n self._recurrent_input_noise,\n self._input_keep_prob)\n output, new_state = self._cell(inputs, state, scope=scope)\n if _should_dropout(self._state_keep_prob):\n # Identify which subsets of the state to perform dropout on and\n # which ones to keep.\n shallow_filtered_substructure = nest.get_traverse_shallow_structure(\n self._dropout_state_filter, new_state)\n new_state = self._dropout(new_state, \"state\",\n self._recurrent_state_noise,\n self._state_keep_prob,\n shallow_filtered_substructure)\n if _should_dropout(self._output_keep_prob):\n output = self._dropout(output, \"output\",\n self._recurrent_output_noise,\n self._output_keep_prob)\n return output, new_state\n\n\n@tf_export(\"nn.rnn_cell.ResidualWrapper\")\nclass ResidualWrapper(RNNCell):\n \"\"\"RNNCell wrapper that ensures cell inputs are added to the outputs.\"\"\"\n\n def __init__(self, cell, residual_fn=None):\n \"\"\"Constructs a `ResidualWrapper` for `cell`.\n\n Args:\n cell: An instance of `RNNCell`.\n residual_fn: (Optional) The function to map raw cell inputs and raw cell\n outputs to the actual cell outputs of the residual network.\n Defaults to calling nest.map_structure on (lambda i, o: i + o), inputs\n and outputs.\n \"\"\"\n super(ResidualWrapper, self).__init__()\n self._cell = cell\n if isinstance(cell, checkpointable.CheckpointableBase):\n self._track_checkpointable(self._cell, name=\"cell\")\n self._residual_fn = residual_fn\n\n @property\n def state_size(self):\n return self._cell.state_size\n\n @property\n def output_size(self):\n return self._cell.output_size\n\n def zero_state(self, batch_size, dtype):\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\n return self._cell.zero_state(batch_size, dtype)\n\n def __call__(self, inputs, state, scope=None):\n \"\"\"Run the cell and then apply the residual_fn on its inputs to its outputs.\n\n Args:\n inputs: cell inputs.\n state: cell state.\n scope: optional cell scope.\n\n Returns:\n Tuple of cell outputs and new state.\n\n Raises:\n TypeError: If cell inputs and outputs have different structure (type).\n ValueError: If cell inputs and outputs have different structure (value).\n \"\"\"\n outputs, new_state = self._cell(inputs, state, scope=scope)\n # Ensure shapes match\n def assert_shape_match(inp, out):\n inp.get_shape().assert_is_compatible_with(out.get_shape())\n def default_residual_fn(inputs, outputs):\n nest.assert_same_structure(inputs, outputs)\n nest.map_structure(assert_shape_match, inputs, outputs)\n return nest.map_structure(lambda inp, out: inp + out, inputs, outputs)\n res_outputs = (self._residual_fn or default_residual_fn)(inputs, outputs)\n return (res_outputs, new_state)\n\n\n@tf_export(\"nn.rnn_cell.DeviceWrapper\")\nclass DeviceWrapper(RNNCell):\n \"\"\"Operator that ensures an RNNCell runs on a particular device.\"\"\"\n\n def __init__(self, cell, device):\n \"\"\"Construct a `DeviceWrapper` for `cell` with device `device`.\n\n Ensures the wrapped `cell` is called with `tf.device(device)`.\n\n Args:\n cell: An instance of `RNNCell`.\n device: A device string or function, for passing to `tf.device`.\n \"\"\"\n super(DeviceWrapper, self).__init__()\n self._cell = cell\n if isinstance(cell, checkpointable.CheckpointableBase):\n self._track_checkpointable(self._cell, name=\"cell\")\n self._device = device\n\n @property\n def state_size(self):\n return self._cell.state_size\n\n @property\n def output_size(self):\n return self._cell.output_size\n\n def zero_state(self, batch_size, dtype):\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\n with ops.device(self._device):\n return self._cell.zero_state(batch_size, dtype)\n\n def __call__(self, inputs, state, scope=None):\n \"\"\"Run the cell on specified device.\"\"\"\n with ops.device(self._device):\n return self._cell(inputs, state, scope=scope)\n\n\n@tf_export(v1=[\"nn.rnn_cell.MultiRNNCell\"])\nclass MultiRNNCell(RNNCell):\n \"\"\"RNN cell composed sequentially of multiple simple cells.\n\n Example:\n\n ```python\n num_units = [128, 64]\n cells = [BasicLSTMCell(num_units=n) for n in num_units]\n stacked_rnn_cell = MultiRNNCell(cells)\n ```\n \"\"\"\n\n @deprecated(None, \"This class is equivalent as \"\n \"tf.keras.layers.StackedRNNCells, and will be replaced by \"\n \"that in Tensorflow 2.0.\")\n def __init__(self, cells, state_is_tuple=True):\n \"\"\"Create a RNN cell composed sequentially of a number of RNNCells.\n\n Args:\n cells: list of RNNCells that will be composed in this order.\n state_is_tuple: If True, accepted and returned states are n-tuples, where\n `n = len(cells)`. If False, the states are all\n concatenated along the column axis. This latter behavior will soon be\n deprecated.\n\n Raises:\n ValueError: if cells is empty (not allowed), or at least one of the cells\n returns a state tuple but the flag `state_is_tuple` is `False`.\n \"\"\"\n super(MultiRNNCell, self).__init__()\n if not cells:\n raise ValueError(\"Must specify at least one cell for MultiRNNCell.\")\n if not nest.is_sequence(cells):\n raise TypeError(\n \"cells must be a list or tuple, but saw: %s.\" % cells)\n\n if len(set([id(cell) for cell in cells])) < len(cells):\n logging.log_first_n(logging.WARN,\n \"At least two cells provided to MultiRNNCell \"\n \"are the same object and will share weights.\", 1)\n\n self._cells = cells\n for cell_number, cell in enumerate(self._cells):\n # Add Checkpointable dependencies on these cells so their variables get\n # saved with this object when using object-based saving.\n if isinstance(cell, checkpointable.CheckpointableBase):\n # TODO(allenl): Track down non-Checkpointable callers.\n self._track_checkpointable(cell, name=\"cell-%d\" % (cell_number,))\n self._state_is_tuple = state_is_tuple\n if not state_is_tuple:\n if any(nest.is_sequence(c.state_size) for c in self._cells):\n raise ValueError(\"Some cells return tuples of states, but the flag \"\n \"state_is_tuple is not set. State sizes are: %s\"\n % str([c.state_size for c in self._cells]))\n\n @property\n def state_size(self):\n if self._state_is_tuple:\n return tuple(cell.state_size for cell in self._cells)\n else:\n return sum(cell.state_size for cell in self._cells)\n\n @property\n def output_size(self):\n return self._cells[-1].output_size\n\n def zero_state(self, batch_size, dtype):\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\n if self._state_is_tuple:\n return tuple(cell.zero_state(batch_size, dtype) for cell in self._cells)\n else:\n # We know here that state_size of each cell is not a tuple and\n # presumably does not contain TensorArrays or anything else fancy\n return super(MultiRNNCell, self).zero_state(batch_size, dtype)\n\n @property\n def trainable_weights(self):\n if not self.trainable:\n return []\n weights = []\n for cell in self._cells:\n if isinstance(cell, base_layer.Layer):\n weights += cell.trainable_weights\n return weights\n\n @property\n def non_trainable_weights(self):\n weights = []\n for cell in self._cells:\n if isinstance(cell, base_layer.Layer):\n weights += cell.non_trainable_weights\n if not self.trainable:\n trainable_weights = []\n for cell in self._cells:\n if isinstance(cell, base_layer.Layer):\n trainable_weights += cell.trainable_weights\n return trainable_weights + weights\n return weights\n\n def call(self, inputs, state):\n \"\"\"Run this multi-layer cell on inputs, starting from state.\"\"\"\n cur_state_pos = 0\n cur_inp = inputs\n new_states = []\n for i, cell in enumerate(self._cells):\n with vs.variable_scope(\"cell_%d\" % i):\n if self._state_is_tuple:\n if not nest.is_sequence(state):\n raise ValueError(\n \"Expected state to be a tuple of length %d, but received: %s\" %\n (len(self.state_size), state))\n cur_state = state[i]\n else:\n cur_state = array_ops.slice(state, [0, cur_state_pos],\n [-1, cell.state_size])\n cur_state_pos += cell.state_size\n cur_inp, new_state = cell(cur_inp, cur_state)\n new_states.append(new_state)\n\n new_states = (tuple(new_states) if self._state_is_tuple else\n array_ops.concat(new_states, 1))\n\n return cur_inp, new_states\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n# ==============================================================================\n\"\"\"Tests for KafkaDataset.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.kafka.python.ops import kafka_dataset_ops\nfrom tensorflow.python.data.ops import iterator_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.platform import test\n\n\nclass KafkaDatasetTest(test.TestCase):\n\n def setUp(self):\n # The Kafka server has to be setup before the test\n # and tear down after the test manually.\n # The docker engine has to be installed.\n #\n # To setup the Kafka server:\n # $ bash kafka_test.sh start kafka\n #\n # To team down the Kafka server:\n # $ bash kafka_test.sh stop kafka\n pass\n\n def testKafkaDataset(self):\n topics = array_ops.placeholder(dtypes.string, shape=[None])\n num_epochs = array_ops.placeholder(dtypes.int64, shape=[])\n batch_size = array_ops.placeholder(dtypes.int64, shape=[])\n\n repeat_dataset = kafka_dataset_ops.KafkaDataset(\n topics, group=\"test\", eof=True).repeat(num_epochs)\n batch_dataset = repeat_dataset.batch(batch_size)\n\n iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)\n init_op = iterator.make_initializer(repeat_dataset)\n init_batch_op = iterator.make_initializer(batch_dataset)\n get_next = iterator.get_next()\n\n with self.cached_session() as sess:\n # Basic test: read from topic 0.\n sess.run(init_op, feed_dict={topics: [\"test:0:0:4\"], num_epochs: 1})\n for i in range(5):\n self.assertEqual(\"D\" + str(i), sess.run(get_next))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n # Basic test: read from topic 1.\n sess.run(init_op, feed_dict={topics: [\"test:0:5:-1\"], num_epochs: 1})\n for i in range(5):\n self.assertEqual(\"D\" + str(i + 5), sess.run(get_next))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n # Basic test: read from both topics.\n sess.run(\n init_op,\n feed_dict={\n topics: [\"test:0:0:4\", \"test:0:5:-1\"],\n num_epochs: 1\n })\n for j in range(2):\n for i in range(5):\n self.assertEqual(\"D\" + str(i + j * 5), sess.run(get_next))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n # Test repeated iteration through both files.\n sess.run(\n init_op,\n feed_dict={\n topics: [\"test:0:0:4\", \"test:0:5:-1\"],\n num_epochs: 10\n })\n for _ in range(10):\n for j in range(2):\n for i in range(5):\n self.assertEqual(\"D\" + str(i + j * 5), sess.run(get_next))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next)\n\n # Test batched and repeated iteration through both files.\n sess.run(\n init_batch_op,\n feed_dict={\n topics: [\"test:0:0:4\", \"test:0:5:-1\"],\n num_epochs: 10,\n batch_size: 5\n })\n for _ in range(10):\n self.assertAllEqual([\"D\" + str(i) for i in range(5)],\n sess.run(get_next))\n self.assertAllEqual([\"D\" + str(i + 5) for i in range(5)],\n sess.run(get_next))\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for pretty_printer module.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport ast\n\nfrom tensorflow.python.autograph.pyct import pretty_printer\nfrom tensorflow.python.platform import test\n\n\nclass PrettyPrinterTest(test.TestCase):\n\n def test_format(self):\n node = ast.FunctionDef(\n name='f',\n args=ast.arguments(\n args=[ast.Name(id='a', ctx=ast.Param())],\n vararg=None,\n kwarg=None,\n defaults=[]),\n body=[\n ast.Return(\n ast.BinOp(\n op=ast.Add(),\n left=ast.Name(id='a', ctx=ast.Load()),\n right=ast.Num(1)))\n ],\n decorator_list=[],\n returns=None)\n # Just checking for functionality, the color control characters make it\n # difficult to inspect the result.\n self.assertIsNotNone(pretty_printer.fmt(node))\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gzip\nimport io\nimport os\nimport random\nimport re\n\nimport numpy as np\n\nfrom tensorflow.core.profiler import profile_pb2\nfrom tensorflow.core.profiler import tfprof_log_pb2\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gradients\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.profiler import model_analyzer\nfrom tensorflow.python.profiler import option_builder\nfrom tensorflow.python.profiler import profile_context\nfrom tensorflow.python.profiler.internal import model_analyzer_testlib as lib\nfrom tensorflow.python.util import compat\n\nbuilder = option_builder.ProfileOptionBuilder\n\n\nclass PrintModelAnalysisTest(test.TestCase):\n\n def _no_rewrite_session_config(self):\n rewriter_config = rewriter_config_pb2.RewriterConfig(\n pin_to_host_optimization=rewriter_config_pb2.RewriterConfig.OFF)\n graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)\n return config_pb2.ConfigProto(graph_options=graph_options)\n\n def testDumpToFile(self):\n ops.reset_default_graph()\n outfile = os.path.join(test.get_temp_dir(), 'dump')\n opts = builder(builder.trainable_variables_parameter()\n ).with_file_output(outfile).build()\n\n with session.Session(config=self._no_rewrite_session_config()) as sess:\n _ = lib.BuildSmallModel()\n model_analyzer.profile(sess.graph, options=opts)\n\n with gfile.Open(outfile, 'r') as f:\n self.assertEqual(u'node name | # parameters\\n'\n '_TFProfRoot (--/451 params)\\n'\n ' DW (3x3x3x6, 162/162 params)\\n'\n ' DW2 (2x2x6x12, 288/288 params)\\n'\n ' ScalarW (1, 1/1 params)\\n',\n lib.CheckAndRemoveDoc(f.read()))\n\n @test_util.run_v1_only('b/120545219')\n def testSelectEverythingDetail(self):\n ops.reset_default_graph()\n dev = '/device:GPU:0' if test.is_gpu_available() else '/device:CPU:0'\n outfile = os.path.join(test.get_temp_dir(), 'dump')\n opts = (builder(builder.trainable_variables_parameter())\n .with_file_output(outfile)\n .with_accounted_types(['.*'])\n .select(['micros', 'bytes', 'params', 'float_ops', 'occurrence',\n 'device', 'op_types', 'input_shapes']).build())\n\n with profile_context.ProfileContext(test.get_temp_dir(),\n trace_steps=[],\n dump_steps=[]) as pctx:\n with session.Session(\n config=self._no_rewrite_session_config()) as sess, ops.device(dev):\n x = lib.BuildSmallModel()\n\n self.evaluate(variables.global_variables_initializer())\n pctx.trace_next_step()\n pctx.dump_next_step()\n _ = self.evaluate(x)\n\n pctx.profiler.profile_name_scope(options=opts)\n\n with gfile.Open(outfile, 'r') as f:\n # pylint: disable=line-too-long\n dump_str = lib.CheckAndRemoveDoc(f.read())\n outputs = dump_str.split('\\n')\n\n self.assertEqual(outputs[0],\n 'node name | # parameters | # float_ops | requested bytes | total execution time | accelerator execution time | cpu execution time | assigned devices | op types | op count (run|defined) | input shapes')\n for o in outputs[1:]:\n if o.find('Conv2D ') > 0:\n metrics = o[o.find('(') +1: o.find(')')].split(',')\n # Make sure time is profiled.\n gap = 1 if test.is_gpu_available() else 2\n for i in range(3, 6, gap):\n mat = re.search('(.*)(?:us|ms|sec)/(.*)(?:us|ms|sec)', metrics[i])\n self.assertGreater(float(mat.group(1)), 0.0)\n self.assertGreater(float(mat.group(2)), 0.0)\n # Make sure device is profiled.\n if test.is_gpu_available():\n self.assertTrue(metrics[6].find('gpu') > 0)\n self.assertFalse(metrics[6].find('cpu') > 0)\n else:\n self.assertFalse(metrics[6].find('gpu') > 0)\n self.assertTrue(metrics[6].find('cpu') > 0)\n # Make sure float_ops is profiled.\n mat = re.search('(.*)k/(.*)k flops', metrics[1].strip())\n self.assertGreater(float(mat.group(1)), 0.0)\n self.assertGreater(float(mat.group(2)), 0.0)\n # Make sure op_count is profiled.\n self.assertEqual(metrics[8].strip(), '1/1|1/1')\n # Make sure input_shapes is profiled.\n self.assertEqual(metrics[9].strip(), '0:2x6x6x3|1:3x3x3x6')\n\n if o.find('DW (3x3x3x6') > 0:\n metrics = o[o.find('(') +1: o.find(')')].split(',')\n mat = re.search('(.*)/(.*) params', metrics[1].strip())\n self.assertGreater(float(mat.group(1)), 0.0)\n self.assertGreater(float(mat.group(2)), 0.0)\n # pylint: enable=line-too-long\n\n # Test that profiler restored from profile file gives the same result.\n gfile.Remove(outfile)\n profile_file = os.path.join(test.get_temp_dir(), 'profile_1')\n with lib.ProfilerFromFile(profile_file) as profiler:\n profiler.profile_name_scope(options=opts)\n with gfile.Open(outfile, 'r') as f:\n self.assertEqual(dump_str, lib.CheckAndRemoveDoc(f.read()))\n\n def testSelectEverything(self):\n ops.reset_default_graph()\n outfile = os.path.join(test.get_temp_dir(), 'dump')\n opts = (builder(builder.trainable_variables_parameter())\n .with_file_output(outfile)\n .with_accounted_types(['.*'])\n .select(['params', 'float_ops', 'occurrence', 'device', 'op_types',\n 'input_shapes']).build())\n\n with session.Session(config=self._no_rewrite_session_config()\n ) as sess, ops.device('/device:CPU:0'):\n x = lib.BuildSmallModel()\n\n self.evaluate(variables.global_variables_initializer())\n run_meta = config_pb2.RunMetadata()\n _ = sess.run(x,\n options=config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE),\n run_metadata=run_meta)\n\n model_analyzer.profile(\n sess.graph, run_meta, options=opts)\n\n def testSimpleCodeView(self):\n ops.reset_default_graph()\n outfile = os.path.join(test.get_temp_dir(), 'dump')\n # TODO(xpan): Test 'micros'. Since the execution time changes each run,\n # it's a bit difficult to test it now.\n opts = (builder(builder.trainable_variables_parameter())\n .with_file_output(outfile)\n .with_accounted_types(['.*'])\n .with_node_names(show_name_regexes=['.*model_analyzer_testlib.*'])\n .account_displayed_op_only(False)\n .select(['bytes', 'params', 'float_ops', 'num_hidden_ops', 'device',\n 'input_shapes']).build())\n\n with session.Session(config=self._no_rewrite_session_config()) as sess:\n x = lib.BuildSmallModel()\n\n self.evaluate(variables.global_variables_initializer())\n run_meta = config_pb2.RunMetadata()\n _ = sess.run(x,\n options=config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE),\n run_metadata=run_meta)\n\n model_analyzer.profile(\n sess.graph, run_meta, cmd='code', options=opts)\n\n with gfile.Open(outfile, 'r') as f:\n # pylint: disable=line-too-long\n self.assertEqual(\n 'node name | requested bytes | # parameters | # float_ops | assigned devices | in',\n lib.CheckAndRemoveDoc(f.read())[0:80])\n # pylint: enable=line-too-long\n\n @test_util.run_v1_only('b/120545219')\n def testComplexCodeView(self):\n ops.reset_default_graph()\n outfile = os.path.join(test.get_temp_dir(), 'dump')\n opts = (builder(builder.trainable_variables_parameter())\n .with_file_output(outfile)\n .with_accounted_types(['.*'])\n .with_node_names(show_name_regexes=\n ['.*model_analyzer_testlib.py.*'])\n .account_displayed_op_only(False)\n .select(['params', 'float_ops']).build())\n\n with profile_context.ProfileContext(test.get_temp_dir(),\n trace_steps=[],\n dump_steps=[]) as pctx:\n with session.Session(config=self._no_rewrite_session_config()) as sess:\n x = lib.BuildFullModel()\n\n self.evaluate(variables.global_variables_initializer())\n pctx.trace_next_step()\n _ = self.evaluate(x)\n tfprof_node = pctx.profiler.profile_python(options=opts)\n\n # pylint: disable=line-too-long\n with gfile.Open(outfile, 'r') as f:\n lines = f.read().split('\\n')\n self.assertGreater(len(lines), 5)\n result = '\\n'.join([l[:min(len(l), 80)] for l in lines])\n self.assertTrue(\n compat.as_text(lib.CheckAndRemoveDoc(result))\n .startswith('node name | # parameters | # float_ops'))\n\n self.assertLess(0, tfprof_node.total_exec_micros)\n self.assertEqual(2844, tfprof_node.total_parameters)\n #The graph is modifed when MKL is enabled,total_float_ops will\n #be different\n if test_util.IsMklEnabled():\n self.assertLess(101600, tfprof_node.total_float_ops)\n else:\n self.assertLess(145660, tfprof_node.total_float_ops)\n self.assertEqual(8, len(tfprof_node.children))\n self.assertEqual('_TFProfRoot', tfprof_node.name)\n self.assertEqual(\n 'model_analyzer_testlib.py:63:BuildFullModel',\n tfprof_node.children[0].name)\n self.assertEqual(\n 'model_analyzer_testlib.py:63:BuildFullModel (gradient)',\n tfprof_node.children[1].name)\n self.assertEqual(\n 'model_analyzer_testlib.py:67:BuildFullModel',\n tfprof_node.children[2].name)\n self.assertEqual(\n 'model_analyzer_testlib.py:67:BuildFullModel (gradient)',\n tfprof_node.children[3].name)\n self.assertEqual(\n 'model_analyzer_testlib.py:69:BuildFullModel',\n tfprof_node.children[4].name)\n self.assertEqual(\n 'model_analyzer_testlib.py:70:BuildFullModel',\n tfprof_node.children[5].name)\n self.assertEqual(\n 'model_analyzer_testlib.py:70:BuildFullModel (gradient)',\n tfprof_node.children[6].name)\n self.assertEqual(\n 'model_analyzer_testlib.py:72:BuildFullModel',\n tfprof_node.children[7].name)\n # pylint: enable=line-too-long\n\n def testCodeViewLeafGraphNode(self):\n ops.reset_default_graph()\n opts = (builder(builder.trainable_variables_parameter())\n .with_empty_output()\n .with_accounted_types(['.*'])\n .account_displayed_op_only(False)\n .select(['bytes', 'params', 'float_ops', 'device']).build())\n\n with session.Session(config=self._no_rewrite_session_config()) as sess:\n x = lib.BuildSmallModel()\n\n self.evaluate(variables.global_variables_initializer())\n run_meta = config_pb2.RunMetadata()\n _ = sess.run(x,\n options=config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE),\n run_metadata=run_meta)\n\n tfprof_node = model_analyzer.profile(\n sess.graph, run_meta, cmd='code', options=opts)\n\n leaf = tfprof_node\n while leaf.children:\n self.assertEqual(0, len(leaf.graph_nodes))\n leaf = leaf.children[0]\n self.assertEqual(1, len(leaf.graph_nodes))\n\n def testTimeline(self):\n ops.reset_default_graph()\n outfile = os.path.join(test.get_temp_dir(), 'timeline')\n opts = (builder(builder.trainable_variables_parameter())\n .with_max_depth(100000)\n .with_step(0)\n .with_timeline_output(outfile)\n .with_accounted_types(['.*']).build())\n\n with session.Session(config=self._no_rewrite_session_config()) as sess:\n x = lib.BuildFullModel()\n\n self.evaluate(variables.global_variables_initializer())\n run_meta = config_pb2.RunMetadata()\n _ = sess.run(\n x,\n options=config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE),\n run_metadata=run_meta)\n\n _ = model_analyzer.profile(\n sess.graph, run_meta, cmd='graph', options=opts)\n\n with gfile.Open(outfile + '_0', 'r') as f:\n # Test that a json file is created.\n # TODO(xpan): tfprof Timeline isn't quite correct on Windows.\n # Investigate why.\n if os.name != 'nt':\n self.assertLess(1000, len(f.read()))\n else:\n self.assertLess(1, len(f.read()))\n\n def testOpView(self):\n ops.reset_default_graph()\n outfile = os.path.join(test.get_temp_dir(), 'dump')\n\n opts = (builder(builder.trainable_variables_parameter())\n .with_file_output(outfile)\n .with_accounted_types(['.*'])\n .with_min_occurrence(10)\n .order_by('occurrence')\n .select(['params', 'micros', 'bytes',\n 'peak_bytes', 'residual_bytes',\n 'output_bytes', 'occurrence', 'input_shapes']).build())\n\n with session.Session(config=self._no_rewrite_session_config()) as sess:\n x = lib.BuildFullModel()\n\n self.evaluate(variables.global_variables_initializer())\n run_meta = config_pb2.RunMetadata()\n _ = sess.run(x,\n options=config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE),\n run_metadata=run_meta)\n\n tfprof_node = model_analyzer.profile(\n sess.graph, run_meta, cmd='op', options=opts)\n\n with gfile.Open(outfile, 'r') as f:\n # pylint: disable=line-too-long\n self.assertEqual(\n 'nodename|requestedbytes|peakbytes|residualbytes|outputbytes|totalexecutiontime|acceleratorexecutiontime|cpuexecutiontime|#parameters|opoccurrence(run|defined)|inputshapes',\n lib.CheckAndRemoveDoc(f.read()).replace('\\t',\n '').replace(' ', '')[0:170])\n # pylint: enable=line-too-long\n\n total_children = 0\n last_occurrence = 1e32\n input_shapes = 0\n last_total_micros = tfprof_node.total_exec_micros\n last_micros = tfprof_node.exec_micros\n while tfprof_node.children:\n for gnode in tfprof_node.graph_nodes:\n input_shapes += len(gnode.input_shapes)\n self.assertEqual(len(tfprof_node.children), 1)\n tfprof_node = tfprof_node.children[0]\n\n self.assertEqual(\n last_total_micros, tfprof_node.total_exec_micros + last_micros)\n last_total_micros = tfprof_node.total_exec_micros\n last_micros = tfprof_node.exec_micros\n\n total_children += 1\n self.assertLessEqual(len(tfprof_node.graph_nodes), last_occurrence)\n last_occurrence = len(tfprof_node.graph_nodes)\n\n self.assertGreater(input_shapes, 0)\n\n def testAdvisor(self):\n ops.reset_default_graph()\n\n with session.Session(config=self._no_rewrite_session_config()) as sess:\n x = lib.BuildFullModel()\n\n self.evaluate(variables.global_variables_initializer())\n run_meta = config_pb2.RunMetadata()\n _ = sess.run(\n x,\n options=config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE),\n run_metadata=run_meta)\n\n advice_pb = model_analyzer.advise(sess.graph, run_meta)\n self.assertTrue('AcceleratorUtilizationChecker' in advice_pb.checkers)\n self.assertTrue('ExpensiveOperationChecker' in advice_pb.checkers)\n self.assertTrue('OperationChecker' in advice_pb.checkers)\n\n checker = advice_pb.checkers['AcceleratorUtilizationChecker']\n if test.is_gpu_available():\n self.assertGreater(len(checker.reports), 0)\n else:\n self.assertEqual(len(checker.reports), 0)\n checker = advice_pb.checkers['ExpensiveOperationChecker']\n self.assertGreater(len(checker.reports), 0)\n\n def pprof_test_helper(self, attribute, should_fail=False):\n ops.reset_default_graph()\n outfile = os.path.join(test.get_temp_dir(), attribute + '_pprof.pb.gz')\n opts = (builder(builder.time_and_memory())\n .select([attribute])\n .with_max_depth(100000)\n .with_node_names(trim_name_regexes=['ops.py.*'])\n .with_pprof_output(outfile).build())\n\n with session.Session(config=self._no_rewrite_session_config()) as sess:\n x = lib.BuildFullModel()\n\n self.evaluate(variables.global_variables_initializer())\n run_meta = config_pb2.RunMetadata()\n _ = sess.run(\n x,\n options=config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE),\n run_metadata=run_meta)\n\n _ = model_analyzer.profile(\n sess.graph, run_meta, cmd='code', options=opts)\n\n if should_fail:\n self.assertFalse(gfile.Exists(outfile))\n return\n\n profile_pb = profile_pb2.Profile()\n with gfile.Open(outfile, 'rb') as f:\n with gzip.GzipFile(fileobj=io.BytesIO(f.read())) as gzipf:\n profile_pb.ParseFromString(gzipf.read())\n\n self.assertGreater(len(profile_pb.sample), 10)\n self.assertGreater(len(profile_pb.location), 10)\n self.assertGreater(len(profile_pb.function), 10)\n self.assertGreater(len(profile_pb.string_table), 30)\n\n has_rnn = False\n has_loop = False\n for s in profile_pb.string_table:\n if s.find('rnn') > 0:\n has_rnn = True\n if s.find('while') > 0:\n has_loop = True\n self.assertFalse(s.startswith('ops.py'))\n self.assertTrue(has_rnn)\n self.assertTrue(has_loop)\n\n def testPprof(self):\n for attr in ['micros', 'bytes', 'accelerator_micros', 'cpu_micros',\n 'params', 'float_ops']:\n self.pprof_test_helper(attr)\n for attr in ['op_types', 'device', 'input_shapes']:\n self.pprof_test_helper(attr, True)\n\n def testMinOption(self):\n ops.reset_default_graph()\n\n def check_min(nodes, mm=0, mam=0, mcm=0, mb=0, mpb=0, mrb=0, mob=0):\n for n in nodes:\n if mm > 0:\n self.assertGreaterEqual(n.exec_micros, mm)\n if mam > 0:\n self.assertGreaterEqual(n.accelerator_exec_micros, mam)\n if mcm > 0:\n self.assertGreaterEqual(n.cpu_exec_micros, mcm)\n if mb > 0:\n self.assertGreaterEqual(n.requested_bytes, mb)\n if mpb > 0:\n self.assertGreaterEqual(n.peak_bytes, mpb)\n if mrb > 0:\n self.assertGreaterEqual(n.residual_bytes, mrb)\n if mob > 0:\n self.assertGreaterEqual(n.output_bytes, mob)\n check_min(n.children, mm, mam, mcm, mb, mpb, mrb, mob)\n\n with session.Session(config=self._no_rewrite_session_config()) as sess:\n x = lib.BuildSmallModel()\n self.evaluate(variables.global_variables_initializer())\n run_meta = config_pb2.RunMetadata()\n _ = sess.run(x,\n options=config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE),\n run_metadata=run_meta)\n\n min_val = random.randint(0, 10000)\n\n opts = builder(builder.time_and_memory(min_micros=min_val)\n ).with_empty_output().build()\n tfprof_node = model_analyzer.profile(\n sess.graph, run_meta=run_meta, options=opts)\n check_min(tfprof_node.children, mm=min_val)\n\n opts = builder(builder.time_and_memory(min_accelerator_micros=min_val)\n ).with_empty_output().build()\n tfprof_node = model_analyzer.profile(\n sess.graph, run_meta=run_meta, options=opts)\n check_min(tfprof_node.children, mam=min_val)\n\n opts = builder(builder.time_and_memory(min_cpu_micros=min_val)\n ).with_empty_output().build()\n tfprof_node = model_analyzer.profile(\n sess.graph, run_meta=run_meta, options=opts)\n check_min(tfprof_node.children, mcm=min_val)\n\n opts = builder(builder.time_and_memory(min_bytes=min_val)\n ).with_empty_output().build()\n tfprof_node = model_analyzer.profile(\n sess.graph, run_meta=run_meta, options=opts)\n check_min(tfprof_node.children, mb=min_val)\n\n opts = builder(builder.time_and_memory(min_peak_bytes=min_val)\n ).with_empty_output().build()\n tfprof_node = model_analyzer.profile(\n sess.graph, run_meta=run_meta, options=opts)\n check_min(tfprof_node.children, mpb=min_val)\n\n opts = builder(builder.time_and_memory(min_residual_bytes=min_val)\n ).with_empty_output().build()\n tfprof_node = model_analyzer.profile(\n sess.graph, run_meta=run_meta, options=opts)\n check_min(tfprof_node.children, mrb=min_val)\n\n opts = builder(builder.time_and_memory(min_output_bytes=min_val)\n ).with_empty_output().build()\n tfprof_node = model_analyzer.profile(\n sess.graph, run_meta=run_meta, options=opts)\n check_min(tfprof_node.children, mob=min_val)\n\n def testSelectOption(self):\n ops.reset_default_graph()\n outfile = os.path.join(test.get_temp_dir(), 'dump')\n\n def check_selection(selected, not_selected):\n with gfile.Open(outfile, 'r') as f:\n s = f.read()\n for attr in selected:\n self.assertTrue(s.find(attr) > 0, s)\n for attr in not_selected:\n self.assertFalse(s.find(attr) > 0, s)\n\n with session.Session(config=self._no_rewrite_session_config()) as sess:\n x = lib.BuildSmallModel()\n self.evaluate(variables.global_variables_initializer())\n run_meta = config_pb2.RunMetadata()\n _ = sess.run(x,\n options=config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE),\n run_metadata=run_meta)\n\n opts = builder(builder.time_and_memory()\n ).with_file_output(outfile).select(['micros']).build()\n _ = model_analyzer.profile(\n sess.graph, run_meta=run_meta, options=opts)\n check_selection(['total execution time', 'accelerator execution time'],\n ['bytes'])\n\n opts = builder(builder.time_and_memory()\n ).with_file_output(outfile).select(['bytes']).build()\n _ = model_analyzer.profile(\n sess.graph, run_meta=run_meta, options=opts)\n check_selection(['requested bytes'],\n ['peak bytes', 'residual bytes', 'output bytes'])\n\n opts = builder(builder.time_and_memory()).with_file_output(\n outfile).select(\n ['peak_bytes', 'residual_bytes', 'output_bytes']).build()\n _ = model_analyzer.profile(\n sess.graph, run_meta=run_meta, options=opts)\n check_selection(['peak bytes', 'residual bytes', 'output bytes'],\n ['requested_bytes'])\n\n def _trainLoop(self, train_op, train_steps, time_dir, time_step,\n memory_dir, memory_step, profile_dir, dump_step):\n with session.Session(config=self._no_rewrite_session_config()) as sess:\n self.evaluate(variables.global_variables_initializer())\n # start from 1 because variable_initializer took one step.\n for i in range(1, train_steps + 1):\n _ = self.evaluate(train_op)\n if i in time_step:\n ret = gfile.ListDirectory(time_dir)\n self.assertEqual(len(ret), 1)\n self.assertTrue(\n gfile.Open(os.path.join(time_dir, ret[0]), 'r').read()\n .find('execution time') > 0)\n _ = [gfile.Remove(os.path.join(time_dir, x)) for x in ret]\n else:\n self.assertEqual(len(gfile.ListDirectory(time_dir)), 0)\n if i in memory_step:\n ret = gfile.ListDirectory(memory_dir)\n self.assertEqual(len(ret), 1)\n self.assertTrue(\n gfile.Open(os.path.join(memory_dir, ret[0]), 'r').read()\n .find('requested bytes') > 0)\n _ = [gfile.Remove(os.path.join(memory_dir, x)) for x in ret]\n else:\n self.assertEqual(len(gfile.ListDirectory(memory_dir)), 0)\n if i in dump_step:\n ret = gfile.ListDirectory(profile_dir)\n self.assertAllEqual(ret, ['profile_%d' % i])\n _ = [gfile.Remove(os.path.join(profile_dir, x)) for x in ret]\n else:\n if i < dump_step[0]:\n self.assertFalse(gfile.Exists(profile_dir))\n else:\n self.assertEqual(len(gfile.ListDirectory(profile_dir)), 0)\n\n @test_util.run_v1_only('b/120545219')\n def testAutoProfiling(self):\n ops.reset_default_graph()\n time_dir = os.path.join(test.get_temp_dir(), 'time')\n memory_dir = os.path.join(test.get_temp_dir(), 'memory')\n profile_dir = os.path.join(test.get_temp_dir(), 'dir/dir2/profile')\n # TODO(xpan): Should we create parent directory for them?\n gfile.MkDir(time_dir)\n gfile.MkDir(memory_dir)\n\n time_opts = (builder(builder.time_and_memory())\n .with_file_output(os.path.join(time_dir, 'profile'))\n .select(['micros']).build())\n memory_opts = (builder(builder.time_and_memory())\n .with_file_output(os.path.join(memory_dir, 'profile'))\n .select(['bytes']).build())\n\n time_steps = [2, 3]\n memory_steps = [1, 3]\n dump_steps = [3, 4]\n\n x = lib.BuildSmallModel()\n with profile_context.ProfileContext(profile_dir,\n trace_steps=[1, 2, 3],\n dump_steps=[3, 4]) as pctx:\n pctx.add_auto_profiling('scope', time_opts, time_steps)\n pctx.add_auto_profiling('scope', memory_opts, memory_steps)\n\n self._trainLoop(x, 10, time_dir, time_steps,\n memory_dir, memory_steps, profile_dir, dump_steps)\n\n def testOOM(self):\n if not test.is_gpu_available():\n return\n ops.reset_default_graph()\n with ops.device('/device:GPU:0'):\n a = random_ops.random_normal([1, 10000, 20000], name='test_random1')\n b = random_ops.random_normal([30000, 10000, 1], name='test_random2')\n c = a * b\n\n try:\n with session.Session(config=self._no_rewrite_session_config()) as sess:\n sess.run(c, options=config_pb2.RunOptions(\n report_tensor_allocations_upon_oom=True))\n except Exception as e: # pylint: disable=broad-except\n exception_str = '%s' % e\n # This trace reports allocations for to random tensor.\n self.assertTrue(\n 'OOM when allocating tensor with shape[30000,10000,20000]' in\n exception_str)\n mat = re.search('(.*)GiB from test_random2/RandomStandardNormal',\n exception_str)\n self.assertGreater(float(mat.group(1)), 0.0)\n mat = re.search('(.*)MiB from test_random1/RandomStandardNormal',\n exception_str)\n self.assertGreater(float(mat.group(1)), 0.0)\n\n def testDistributedOOM(self):\n if not test.is_gpu_available():\n return\n ops.reset_default_graph()\n\n workers, _ = test_util.create_local_cluster(2, 0)\n\n with ops.device('/job:worker/replica:0/task:0/gpu:0'):\n a = random_ops.random_normal([1, 10000, 20000], name='test_random1')\n with ops.device('/job:worker/replica:0/task:1/gpu:0'):\n b = random_ops.random_normal([30000, 10000, 1], name='test_random2')\n c = a * b\n\n try:\n with session.Session(workers[1].target) as sess:\n sess.run(c, options=config_pb2.RunOptions(\n report_tensor_allocations_upon_oom=True))\n except Exception as e: # pylint: disable=broad-except\n exception_str = '%s' % e\n # test_random2 is reported because it's allocated in worker 1.\n self.assertTrue('Current usage from device: '\n '/job:worker/replica:0/task:1/device:GPU:0, '\n 'allocator: GPU_0_bfc' in exception_str)\n mat = re.search('(.*)GiB from test_random2/RandomStandardNormal',\n exception_str)\n self.assertGreater(float(mat.group(1)), 0.0)\n # test_random1 is not reported because it's allocated in worker 0.\n mat = re.search('(.*)MiB from test_random1/RandomStandardNormal',\n exception_str)\n self.assertTrue(mat is None)\n\n @test_util.run_v1_only('b/120545219')\n def testTrackPersistentBytes(self):\n ops.reset_default_graph()\n a = array_ops.constant(np.ones((100, 100)))\n b = array_ops.constant(np.ones((100, 100)))\n c = a * b\n config = config_pb2.ConfigProto()\n config.graph_options.rewrite_options.min_graph_nodes = -1\n\n with session.Session(config=config) as sess:\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n sess.run(c, options=run_options, run_metadata=run_metadata)\n\n options = option_builder.ProfileOptionBuilder.time_and_memory()\n options['min_bytes'] = 0\n options['select'] = ('bytes', 'peak_bytes', 'output_bytes',\n 'residual_bytes')\n ret = model_analyzer.profile(\n sess.graph, run_meta=run_metadata, cmd='scope', options=options)\n\n run_metadata = config_pb2.RunMetadata()\n sess.run(c, options=run_options, run_metadata=run_metadata)\n ret2 = model_analyzer.profile(\n sess.graph, run_meta=run_metadata, cmd='scope', options=options)\n\n n = lib.SearchTFProfNode(ret, 'mul')\n n2 = lib.SearchTFProfNode(ret2, 'mul')\n self.assertGreater(n.peak_bytes, 0)\n self.assertGreater(n.output_bytes, 0)\n self.assertGreater(n.residual_bytes, 0)\n self.assertEqual(n.peak_bytes, n2.peak_bytes)\n self.assertEqual(n.output_bytes, n2.output_bytes)\n self.assertEqual(n.residual_bytes, n2.residual_bytes)\n\n def testTraceLoopBytes(self):\n if not test.is_gpu_available(): return\n ops.reset_default_graph()\n steps = 100\n\n with ops.device('/gpu:0'):\n x = array_ops.ones((100, 100), dtype=dtypes.float32)\n n = array_ops.constant(steps, dtype=dtypes.int32)\n x1 = array_ops.ones((100, 100))\n\n x *= x1\n def loop_body(i, x):\n x *= x\n return i + 1, x\n\n _, y = control_flow_ops.while_loop(\n lambda i, x: i < n, loop_body,\n [array_ops.constant(0), x])\n\n grad = gradients.gradients(y, [x1])\n\n with session.Session(config=self._no_rewrite_session_config()) as sess:\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n sess.run(grad, options=run_options, run_metadata=run_metadata)\n\n options = option_builder.ProfileOptionBuilder.time_and_memory()\n options['min_bytes'] = 0\n options['min_micros'] = 0\n options['select'] = ('bytes', 'peak_bytes', 'output_bytes',\n 'residual_bytes')\n options['output'] = 'none'\n ret_pb = model_analyzer.profile(\n sess.graph, run_meta=run_metadata, cmd='scope', options=options)\n self.assertGreater(ret_pb.total_requested_bytes, 1000000)\n\n def testEager(self):\n ops.reset_default_graph()\n with context.eager_mode():\n outfile = os.path.join(test.get_temp_dir(), 'dump')\n opts = builder(\n builder.time_and_memory()).with_file_output(outfile).build()\n context.enable_run_metadata()\n lib.BuildSmallModel()\n\n profiler = model_analyzer.Profiler()\n profiler.add_step(0, context.export_run_metadata())\n context.disable_run_metadata()\n profiler.profile_operations(opts)\n with gfile.Open(outfile, 'r') as f:\n out_str = f.read()\n self.assertTrue('Conv2D' in out_str)\n self.assertTrue('VarHandleOp' in out_str)\n\n with gfile.Open('/tmp/eager_profile', 'wb') as f:\n profile_pb = tfprof_log_pb2.ProfileProto()\n profile_pb.ParseFromString(profiler.serialize_to_string())\n profile_pb_str = '%s' % profile_pb\n self.assertTrue('Conv2D' in profile_pb_str)\n self.assertTrue('VarHandleOp' in profile_pb_str)\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Cudnn RNN operators.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom tensorflow.contrib.checkpoint.python import split_dependency\nfrom tensorflow.contrib.rnn.python.ops import lstm_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.keras.engine import base_layer\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_cudnn_rnn_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import rnn_cell_impl\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope as vs\nfrom tensorflow.python.training import saver\nfrom tensorflow.python.training.checkpointable import tracking as checkpointable_lib\n\nCUDNN_RNN_UNIDIRECTION = \"unidirectional\"\nCUDNN_RNN_BIDIRECTION = \"bidirectional\"\nCUDNN_LSTM = \"lstm\"\nCUDNN_GRU = \"gru\"\nCUDNN_RNN_RELU = \"rnn_relu\"\nCUDNN_RNN_TANH = \"rnn_tanh\"\n\n# Half for cell input, half for hidden states.\nCUDNN_LSTM_PARAMS_PER_LAYER = 8\nCUDNN_GRU_PARAMS_PER_LAYER = 6\nCUDNN_RNN_TANH_PARAMS_PER_LAYER = 2\nCUDNN_RNN_RELU_PARAMS_PER_LAYER = 2\n\nCUDNN_INPUT_LINEAR_MODE = \"linear_input\"\nCUDNN_INPUT_SKIP_MODE = \"skip_input\"\nCUDNN_INPUT_AUTO_MODE = \"auto_select\"\n\n# pylint:disable=protected-access\n_BIAS_VARIABLE_NAME = rnn_cell_impl._BIAS_VARIABLE_NAME\n_WEIGHTS_VARIABLE_NAME = rnn_cell_impl._WEIGHTS_VARIABLE_NAME\n# pylint:enable=protected-access\n\n\nclass CudnnCompatibleLSTMCell(lstm_ops.LSTMBlockCell):\n \"\"\"Cudnn Compatible LSTMCell.\n\n A simple wrapper around `tf.contrib.rnn.LSTMBlockCell` to use along with\n `tf.contrib.cudnn_rnn.CudnnLSTM`. The latter's params can be used by\n this cell seamlessly.\n \"\"\"\n\n def __init__(self, num_units, reuse=None):\n super(CudnnCompatibleLSTMCell, self).__init__(\n num_units, forget_bias=0, cell_clip=None, use_peephole=False,\n reuse=reuse, name=\"cudnn_compatible_lstm_cell\")\n self._names.update({\"scope\": \"cudnn_compatible_lstm_cell\"})\n\n\nclass CudnnCompatibleGRUCell(rnn_cell_impl.GRUCell):\n r\"\"\"Cudnn Compatible GRUCell.\n\n A GRU impl akin to `tf.nn.rnn_cell.GRUCell` to use along with\n `tf.contrib.cudnn_rnn.CudnnGRU`. The latter's params can be used by\n it seamlessly.\n\n It differs from platform-independent GRUs in how the new memory gate is\n calculated. Nvidia picks this variant based on GRU author's[1] suggestion and\n the fact it has no accuracy impact[2].\n [1] https://arxiv.org/abs/1406.1078\n [2] http://svail.github.io/diff_graphs/\n\n Cudnn compatible GRU (from Cudnn library user guide):\n ```python\n # reset gate\n $$r_t = \\sigma(x_t * W_r + h_t-1 * R_h + b_{Wr} + b_{Rr})$$\n # update gate\n $$u_t = \\sigma(x_t * W_u + h_t-1 * R_u + b_{Wu} + b_{Ru})$$\n # new memory gate\n $$h'_t = tanh(x_t * W_h + r_t .* (h_t-1 * R_h + b_{Rh}) + b_{Wh})$$\n $$h_t = (1 - u_t) .* h'_t + u_t .* h_t-1$$\n ```\n\n Other GRU (see `tf.nn.rnn_cell.GRUCell` and `tf.contrib.rnn.GRUBlockCell`):\n ```python\n # new memory gate\n \\\\(h'_t = tanh(x_t * W_h + (r_t .* h_t-1) * R_h + b_{Wh})\\\\)\n ```\n which is not equivalent to Cudnn GRU: in addition to the extra bias term b_Rh,\n ```python\n \\\\(r .* (h * R) != (r .* h) * R\\\\)\n ```\n \"\"\"\n\n def __init__(self, num_units, reuse=None, kernel_initializer=None):\n super(CudnnCompatibleGRUCell, self).__init__(\n num_units,\n activation=None,\n reuse=reuse,\n kernel_initializer=kernel_initializer)\n\n def build(self, inputs_shape):\n if inputs_shape[1].value is None:\n raise ValueError(\"Expected inputs.shape[-1] to be known, saw shape: %s\"\n % inputs_shape)\n\n input_depth = inputs_shape[1].value\n self._gate_kernel = self.add_variable(\n \"gates/%s\" % _WEIGHTS_VARIABLE_NAME,\n shape=[input_depth + self._num_units, 2 * self._num_units],\n initializer=self._kernel_initializer)\n self._gate_bias = self.add_variable(\n \"gates/%s\" % _BIAS_VARIABLE_NAME,\n shape=[2 * self._num_units],\n initializer=(\n self._bias_initializer\n if self._bias_initializer is not None\n else init_ops.constant_initializer(1.0, dtype=self.dtype)))\n\n self._candidate_input_kernel = self.add_variable(\n \"candidate/input_projection/%s\" % _WEIGHTS_VARIABLE_NAME,\n shape=[input_depth, self._num_units],\n initializer=self._kernel_initializer)\n self._candidate_hidden_kernel = self.add_variable(\n \"candidate/hidden_projection/%s\" % _WEIGHTS_VARIABLE_NAME,\n shape=[self._num_units, self._num_units],\n initializer=self._kernel_initializer)\n\n self._candidate_input_bias = self.add_variable(\n \"candidate/input_projection/%s\" % _BIAS_VARIABLE_NAME,\n shape=[self._num_units],\n initializer=(\n self._bias_initializer\n if self._bias_initializer is not None\n else init_ops.zeros_initializer(dtype=self.dtype)))\n self._candidate_hidden_bias = self.add_variable(\n \"candidate/hidden_projection/%s\" % _BIAS_VARIABLE_NAME,\n shape=[self._num_units],\n initializer=(\n self._bias_initializer\n if self._bias_initializer is not None\n else init_ops.zeros_initializer(dtype=self.dtype)))\n\n def call(self, inputs, state):\n \"\"\"Gated recurrent unit (GRU) with nunits cells.\"\"\"\n gate_inputs = math_ops.matmul(\n array_ops.concat([inputs, state], 1), self._gate_kernel)\n gate_inputs = nn_ops.bias_add(gate_inputs, self._gate_bias)\n\n value = math_ops.sigmoid(gate_inputs)\n r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)\n\n candidate = nn_ops.bias_add(\n math_ops.matmul(inputs, self._candidate_input_kernel),\n self._candidate_input_bias)\n candidate += r * nn_ops.bias_add(\n math_ops.matmul(state, self._candidate_hidden_kernel),\n self._candidate_hidden_bias)\n candidate = self._activation(candidate)\n new_h = (1-u) * candidate + u * state\n return new_h, new_h\n\n\nclass CudnnParamsFormatConverter(object):\n \"\"\"Abstract class that converts between params of Cudnn Rnn and TF Rnn.\"\"\"\n\n def __init__(self,\n num_layers,\n num_units,\n input_size,\n input_mode=CUDNN_INPUT_LINEAR_MODE,\n direction=CUDNN_RNN_UNIDIRECTION):\n \"\"\"Constructor.\n\n Args:\n num_layers: the number of layers for the RNN model.\n num_units: the number of units within the RNN model.\n input_size: the size of the input, it could be different from the\n num_units.\n input_mode: indicate whether there is a linear projection between the\n input and the actual computation before the first layer. It could be one\n of 'linear_input', 'skip_input' or 'auto_select'. * 'linear_input'\n (default) always applies a linear projection of input onto RNN hidden\n state. (standard RNN behavior). * 'skip_input' is only allowed when\n input_size == num_units; * 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\n direction: the direction model that the model operates. Could be either\n 'unidirectional' or 'bidirectional'\n \"\"\"\n self._num_layers = num_layers\n self._input_size = input_size\n self._num_units = num_units\n self._input_mode = input_mode\n self._direction = direction\n self._num_dirs = 1 if self._direction == CUDNN_RNN_UNIDIRECTION else 2\n self._num_params = (\n self._num_params_per_layer * self._num_layers * self._num_dirs)\n\n def tf_canonical_to_opaque(self, tf_canonicals):\n r\"\"\"Converts tf canonical weights to cudnn opaque param.\"\"\"\n cu_weights, cu_biases = self._tf_canonical_to_cu_canonical(tf_canonicals)\n cu_weights = [array_ops.reshape(w, [-1]) for w in cu_weights]\n opaque_params = self._cu_canonical_to_opaque(cu_weights, cu_biases)\n return opaque_params\n\n def opaque_to_tf_canonical(self, opaque_param):\n r\"\"\"Converts cudnn opaque param to tf canonical weights.\"\"\"\n cu_weights, cu_biases = self._opaque_to_cu_canonical(opaque_param)\n weights, biases = self._cu_canonical_to_tf_canonical(cu_weights, cu_biases)\n return weights, biases\n\n def _opaque_to_cu_canonical(self, opaque_param):\n \"\"\"Converts opaque params to Cudnn canonical format.\n\n Args:\n opaque_param: An opaque tensor storing cudnn rnn params (weights and\n biases).\n Returns:\n 2 list for weights and biases respectively.\n \"\"\"\n with ops.device(\"/gpu:0\"):\n weights, biases = gen_cudnn_rnn_ops.cudnn_rnn_params_to_canonical(\n num_layers=self._num_layers,\n num_units=self._num_units,\n input_size=self._input_size,\n params=opaque_param,\n num_params=self._num_params,\n rnn_mode=self._rnn_mode,\n input_mode=self._input_mode,\n direction=self._direction)\n return (weights, biases)\n\n def _cu_canonical_to_opaque(self, cu_weights, cu_biases):\n \"\"\"Converts from Cudnn canonical format to opaque params.\n\n Args:\n cu_weights: a list of tensors, Cudnn canonical weights.\n cu_biases: a list of tensors, Cudnn canonical biases.\n Returns:\n a single opaque tensor.\n \"\"\"\n with ops.device(\"/gpu:0\"):\n return gen_cudnn_rnn_ops.cudnn_rnn_canonical_to_params(\n num_layers=self._num_layers,\n num_units=self._num_units,\n input_size=self._input_size,\n weights=cu_weights,\n biases=cu_biases,\n rnn_mode=self._rnn_mode,\n input_mode=self._input_mode,\n direction=self._direction)\n\n def _cu_canonical_to_tf_canonical(self, cu_weights, cu_biases):\n r\"\"\"Transform from Cudnn canonical to tf canonical.\n\n The elements of argument lists are laid out in the following format:\n ------------------------------------------------------------\n | weights | biases |\n ------------------------------------------------------------\n \\ \\\n \\ \\\n -------------------------------\n | layer1 |layer2 |... |\n -------------------------------\n \\ \\\n ---------------\n |fwd |bak |\n ---------------\n Args:\n cu_weights: a list of tensors of Cudnn canonical weights.\n cu_biases: a list of tensors of Cudnn canonical biases.\n Returns:\n 1 tuple, tf canonical weights and biases.\n \"\"\"\n tf_weights, tf_biases = [], []\n\n layer_weights_num = self._num_params_per_layer * self._num_dirs\n layer_biases_num = layer_weights_num\n\n for i in range(self._num_layers):\n layer_weights = cu_weights[i * layer_weights_num:(i + 1) *\n layer_weights_num]\n layer_biases = cu_biases[i * layer_biases_num:(i + 1) * layer_biases_num]\n if self._direction == CUDNN_RNN_UNIDIRECTION:\n self._cu_canonical_to_tf_canonical_single_layer(\n layer_weights, layer_biases, tf_weights, tf_biases)\n else:\n fw_weights = layer_weights[:len(layer_weights) // 2]\n bw_weights = layer_weights[len(layer_weights) // 2:]\n fw_biases = layer_biases[:len(layer_biases) // 2]\n bw_biases = layer_biases[len(layer_biases) // 2:]\n\n self._cu_canonical_to_tf_canonical_single_layer(\n fw_weights,\n fw_biases,\n tf_weights,\n tf_biases,\n )\n\n self._cu_canonical_to_tf_canonical_single_layer(\n bw_weights,\n bw_biases,\n tf_weights,\n tf_biases,\n )\n return (tf_weights, tf_biases)\n\n def _cu_canonical_to_tf_canonical_single_layer(self, cu_weights, cu_biases,\n tf_weights, tf_biases):\n r\"\"\"Transform single layer Cudnn canonicals to tf canonicals.\n\n The elements of cu_weights, cu_biases are laid out in the following format:\n -------------------------------------------------------------------------\n | gate0 param on inputs | gate0 param on hidden state | gate1 ..........|\n -------------------------------------------------------------------------\n Args:\n cu_weights: a list of tensors, single layer weights.\n cu_biases: a list of tensors, single layer biases.\n tf_weights: a list where transformed weights are stored.\n tf_biases: a list where transformed biases are stored.\n \"\"\"\n raise NotImplementedError(\"Abstract method\")\n\n def _tf_canonical_to_cu_canonical(self, tf_canonicals):\n r\"\"\"Transform from tf canonical to Cudnn canonical.\n\n This is the reverse routine of _TransformCanonical().\n Args:\n tf_canonicals: a list of tensors of tf canonical params. The elements are\n laid out in the following format:\n ------------------------------------------------------------\n | weights | biases |\n ------------------------------------------------------------\n \\ \\\n \\ \\\n -------------------------------\n | layer1 |layer2 |... |\n -------------------------------\n \\ \\\n ---------------\n |fwd |bak |\n ---------------\n Returns:\n 2 lists: the recovered cudnn canonical weights and biases.\n \"\"\"\n weights = tf_canonicals[:len(tf_canonicals) // 2]\n biases = tf_canonicals[len(tf_canonicals) // 2:]\n\n cu_weights, cu_biases = [], []\n layer_weights_num = len(weights) // self._num_layers\n layer_biases_num = len(biases) // self._num_layers\n for i in range(self._num_layers):\n layer_weights = weights[i * layer_weights_num:(i + 1) * layer_weights_num]\n layer_biases = biases[i * layer_biases_num:(i + 1) * layer_biases_num]\n if self._direction == CUDNN_RNN_UNIDIRECTION:\n cu_weights.extend(self._tf_to_cudnn_weights(i, *layer_weights))\n cu_biases.extend(self._tf_to_cudnn_biases(*layer_biases))\n else:\n fw_weights, bw_weights = layer_weights[:len(\n layer_weights) // 2], layer_weights[len(layer_weights) // 2:]\n fw_biases, bw_biases = layer_biases[:len(\n layer_biases) // 2], layer_biases[len(layer_biases) // 2:]\n cu_weights.extend(self._tf_to_cudnn_weights(i, *fw_weights))\n cu_biases.extend(self._tf_to_cudnn_biases(*fw_biases))\n\n cu_weights.extend(self._tf_to_cudnn_weights(i, *bw_weights))\n cu_biases.extend(self._tf_to_cudnn_biases(*bw_biases))\n return cu_weights, cu_biases\n\n def _cudnn_to_tf_weights(self, *cu_weights):\n r\"\"\"Stitches cudnn canonical weights to generate tf canonical weights.\"\"\"\n raise NotImplementedError(\"Abstract method\")\n\n def _tf_to_cudnn_weights(self, layer, *tf_weights):\n r\"\"\"Reverses the operations in StitchWeights().\"\"\"\n raise NotImplementedError(\"Abstract method\")\n\n def _cudnn_to_tf_biases(self, *biases):\n r\"\"\"Stitches cudnn canonical biases to generate tf canonical biases.\"\"\"\n raise NotImplementedError(\"Abstract method\")\n\n def _tf_to_cudnn_biases(self, *tf_biases):\n r\"\"\"Reverses the operations in StitchBiases().\"\"\"\n raise NotImplementedError(\"Abstract method\")\n\n\nclass CudnnParamsFormatConverterLSTM(CudnnParamsFormatConverter):\n \"\"\"Helper class that converts between params of Cudnn and TF LSTM.\"\"\"\n _rnn_mode = CUDNN_LSTM\n _num_params_per_layer = CUDNN_LSTM_PARAMS_PER_LAYER\n\n def _cudnn_to_tf_gate_params(self, *cu_gate_order):\n i_g, f_g, c_g, o_g = cu_gate_order\n return [i_g, c_g, f_g, o_g]\n\n def _tf_to_cudnn_gate_params(self, *tf_gate_order):\n i_g, c_g, f_g, o_g = tf_gate_order\n return [i_g, f_g, c_g, o_g]\n\n def _cudnn_to_tf_weights(self, *cu_weights):\n r\"\"\"Stitching cudnn canonical weights to generate tf canonical weights.\"\"\"\n w_i, w_f, w_c, w_o, r_i, r_f, r_c, r_o = cu_weights\n\n # pylint: disable=invalid-name\n W_i = array_ops.concat([w_i, r_i], axis=1)\n W_f = array_ops.concat([w_f, r_f], axis=1)\n W_c = array_ops.concat([w_c, r_c], axis=1)\n W_o = array_ops.concat([w_o, r_o], axis=1)\n # pylint: enable=invalid-name\n # Cudnn LSTM weights are in ifco order, other tf LSTMs are in icfo order.\n reordered = self._cudnn_to_tf_gate_params(* [W_i, W_f, W_c, W_o])\n return (array_ops.transpose(array_ops.concat(reordered, axis=0)),)\n\n def _tf_to_cudnn_weights(self, layer, *tf_weights):\n r\"\"\"Reverse the operations in StitchWeights().\"\"\"\n input_size = self._input_size\n num_units = self._num_units\n if layer == 0:\n input_weight_width = input_size\n else:\n input_weight_width = num_units\n if self._direction == CUDNN_RNN_BIDIRECTION:\n input_weight_width *= 2\n\n (tf_weight,) = tf_weights\n w = array_ops.transpose(tf_weight)\n # pylint: disable=invalid-name\n W_i, W_f, W_c, W_o = self._tf_to_cudnn_gate_params(*array_ops.split(\n w, 4, axis=0))\n\n w_i, r_i = array_ops.split(W_i, [input_weight_width, num_units], axis=1)\n w_c, r_c = array_ops.split(W_c, [input_weight_width, num_units], axis=1)\n w_f, r_f = array_ops.split(W_f, [input_weight_width, num_units], axis=1)\n w_o, r_o = array_ops.split(W_o, [input_weight_width, num_units], axis=1)\n return w_i, w_f, w_c, w_o, r_i, r_f, r_c, r_o\n # pylint: enable=invalid-name\n\n def _cudnn_to_tf_biases(self, *cu_biases):\n r\"\"\"Stitching cudnn canonical biases to generate tf canonical biases.\"\"\"\n b_wi, b_wf, b_wc, b_wo, b_ri, b_rf, b_rc, b_ro = cu_biases\n # Save only the sum instead of individual biases. When recovering, return\n # two biases each with half the value. Since RNN does not regularize by\n # weight decay, it has no side effect in training or inference.\n # pylint: disable=invalid-name\n B_i = b_wi + b_ri\n B_f = b_wf + b_rf\n B_c = b_wc + b_rc\n B_o = b_wo + b_ro\n # pylint: enable=invalid-name\n reordered = self._cudnn_to_tf_gate_params(* [B_i, B_f, B_c, B_o])\n return (array_ops.concat(reordered, axis=0),)\n\n def _tf_to_cudnn_biases(self, *tf_biases):\n r\"\"\"Reverse the operations in StitchBiases().\"\"\"\n (tf_bias,) = tf_biases\n # pylint: disable=invalid-name\n B_i, B_f, B_c, B_o = self._tf_to_cudnn_gate_params(*array_ops.split(\n tf_bias, 4, axis=0))\n # pylint: enable=invalid-name\n # pylint: disable=unbalanced-tuple-unpacking\n b_wi, b_ri = (B_i * 0.5,) * 2\n b_wf, b_rf = (B_f * 0.5,) * 2\n b_wc, b_rc = (B_c * 0.5,) * 2\n b_wo, b_ro = (B_o * 0.5,) * 2\n # pylint: enable=unbalanced-tuple-unpacking\n # Return ifco order for Cudnn LSTM.\n return b_wi, b_wf, b_wc, b_wo, b_ri, b_rf, b_rc, b_ro\n\n def _cu_canonical_to_tf_canonical_single_layer(self, cu_weights, cu_biases,\n tf_weights, tf_biases):\n (w,) = self._cudnn_to_tf_weights(*cu_weights)\n (b,) = self._cudnn_to_tf_biases(*cu_biases)\n tf_weights.append(w)\n tf_biases.append(b)\n\n\nclass CudnnParamsFormatConverterGRU(CudnnParamsFormatConverter):\n \"\"\"Helper class that converts between params of Cudnn and TF GRU.\"\"\"\n\n _rnn_mode = CUDNN_GRU\n _num_params_per_layer = CUDNN_GRU_PARAMS_PER_LAYER\n\n _rnn_cell_name = base_layer.to_snake_case(CudnnCompatibleGRUCell.__name__)\n\n def _cudnn_to_tf_weights(self, *cu_weights):\n r\"\"\"Stitching cudnn canonical weights to generate tf canonical weights.\"\"\"\n w_i, w_r, w_h, r_i, r_r, r_h = cu_weights\n\n # pylint: disable=invalid-name\n W_i = array_ops.concat([w_i, r_i], axis=1)\n W_r = array_ops.concat([w_r, r_r], axis=1)\n # pylint: enable=invalid-name\n return (array_ops.transpose(array_ops.concat([W_i, W_r], axis=0)),\n array_ops.transpose(w_h), array_ops.transpose(r_h))\n\n def _tf_to_cudnn_weights(self, layer, *tf_weights):\n r\"\"\"Reverse the operations in StitchWeights().\"\"\"\n input_size = self._input_size\n num_units = self._num_units\n if layer == 0:\n input_weight_width = input_size\n else:\n input_weight_width = num_units\n if self._direction == CUDNN_RNN_BIDIRECTION:\n input_weight_width *= 2\n # pylint: disable=invalid-name\n W_ir, w_h, r_h = tf_weights\n W_ir = array_ops.transpose(W_ir)\n w_h = array_ops.transpose(w_h)\n r_h = array_ops.transpose(r_h)\n\n W_i, W_r = array_ops.split(W_ir, 2, axis=0)\n w_i, r_i = array_ops.split(W_i, [input_weight_width, num_units], axis=1)\n w_r, r_r = array_ops.split(W_r, [input_weight_width, num_units], axis=1)\n # pylint: enable=invalid-name\n return w_i, w_r, w_h, r_i, r_r, r_h\n\n def _cudnn_to_tf_biases(self, *biases):\n r\"\"\"Stitching cudnn canonical biases to generate tf canonical biases.\"\"\"\n b_wi, b_wr, b_wh, b_ri, b_rr, b_rh = biases\n return (\n # Save only the sum instead of individual biases. When recovering,\n # return two biases each with half the value. Since RNN does not\n # regularize by weight decay, it has no side effect in training or\n # inference.\n array_ops.concat([b_wi, b_wr], axis=0) + array_ops.concat(\n [b_ri, b_rr], axis=0),\n b_wh,\n b_rh)\n\n def _tf_to_cudnn_biases(self, *tf_biases):\n r\"\"\"Reverse the operations in StitchBiases().\"\"\"\n # b_ir is the summed bias of reset and update gate.\n b_ir, b_wh, b_rh = tf_biases\n bi, br = b_ir * 0.5, b_ir * 0.5\n b_wi, b_wr = array_ops.split(bi, 2, axis=0)\n b_ri, b_rr = array_ops.split(br, 2, axis=0)\n return b_wi, b_wr, b_wh, b_ri, b_rr, b_rh\n\n def _cu_canonical_to_tf_canonical_single_layer(self, cu_weights, cu_biases,\n tf_weights, tf_biases):\n # pylint: disable=invalid-name\n W_ir, w_h, r_h = self._cudnn_to_tf_weights(*cu_weights)\n b_ir, b_wh, b_rh = self._cudnn_to_tf_biases(*cu_biases)\n # pylint: enable=invalid-name\n tf_weights.extend([W_ir, w_h, r_h])\n tf_biases.extend([b_ir, b_wh, b_rh])\n\n\nclass CudnnParamsFormatConverterBasic(CudnnParamsFormatConverterLSTM):\n \"\"\"Helper class that converts between params of Cudnn and TF Relu/Tanh RNN.\"\"\"\n\n def _cudnn_to_tf_weights(self, *cu_weights):\n r\"\"\"Stitching cudnn canonical weights to generate tf canonical weights.\"\"\"\n w_i, w_h = cu_weights\n W = array_ops.concat([w_i, w_h], axis=1) # pylint: disable=invalid-name\n return (array_ops.transpose(W),)\n\n def _tf_to_cudnn_weights(self, layer, *tf_weights):\n r\"\"\"Reverse the operations in StitchWeights().\"\"\"\n input_size = self._input_size\n num_units = self._num_units\n if layer == 0:\n input_weight_width = input_size\n else:\n input_weight_width = num_units\n if self._direction == CUDNN_RNN_BIDIRECTION:\n input_weight_width *= 2\n\n (tf_weight,) = tf_weights\n # pylint: disable=invalid-name\n W = array_ops.transpose(tf_weight)\n w_i, w_h = array_ops.split(W, [input_weight_width, num_units], axis=1)\n return w_i, w_h\n # pylint: enable=invalid-name\n\n def _cudnn_to_tf_biases(self, *cu_biases):\n r\"\"\"Stitching cudnn canonical biases to generate tf canonical biases.\"\"\"\n # Save only the sum instead of individual biases. When recovering, return\n # two biases each with half the value. Since RNN does not regularize by\n # weight decay, it has no side effect in training or inference.\n b_wi, b_wh = cu_biases\n return (b_wi + b_wh,)\n\n def _tf_to_cudnn_biases(self, *tf_biases):\n r\"\"\"Reverse the operations in StitchBiases().\"\"\"\n (tf_bias,) = tf_biases\n b_i = tf_bias * 0.5\n b_h = tf_bias * 0.5\n return b_i, b_h\n\n\nclass CudnnParamsFormatConverterTanh(CudnnParamsFormatConverterBasic):\n \"\"\"Helper class that converts between params of Cudnn and TF Tanh RNN.\"\"\"\n _rnn_mode = CUDNN_RNN_TANH\n _num_params_per_layer = CUDNN_RNN_TANH_PARAMS_PER_LAYER\n\n\nclass CudnnParamsFormatConverterRelu(CudnnParamsFormatConverterBasic):\n \"\"\"Helper class that converts between params of Cudnn and TF Relu RNN.\"\"\"\n _rnn_mode = CUDNN_RNN_RELU\n _num_params_per_layer = CUDNN_RNN_RELU_PARAMS_PER_LAYER\n\n\n# TODO(yaozhang): make sure we only save the canonical version of params and\n# don't save the platform-specific version to avoid potential race\n# conditions where params is updated by both versions when being restored.\n# Currently, checkpointing will function properly, despite that we save both\n# versions, because Saver restores customized savables after Variables.\n# However, it is good to not rely on this restoring order of Saver and to\n# avoid unnecessary storage. Add a test to check only the canonical version is\n# saved.\nclass CudnnOpaqueParamsSaveable(saver.BaseSaverBuilder.SaveableObject):\n \"\"\"Abstract SaveableObject implementation handling Cudnn opaque params.\"\"\"\n\n def __init__(self,\n opaque_params,\n num_layers,\n num_units,\n input_size,\n input_mode=CUDNN_INPUT_LINEAR_MODE,\n direction=CUDNN_RNN_UNIDIRECTION,\n scope=None,\n name=\"cudnn_rnn_saveable\"):\n \"\"\"Creates a CudnnOpaqueParamsSaveable object.\n\n CudnnOpaqueParamsSaveable is saveable/restorable in a checkpoint file\n and is used to save/restore the weights and biases parameters in a\n canonical format which is directly consumable by platform-independent tf\n RNN cells. Parameters are saved as tensors layer by layer with weight\n tensors followed by bias tensors, and forward direction followed by\n backward direction (if applicable). When restoring, a user could name\n param_variables as desired, and restore weight and bias tensors to these\n variables.\n\n For CudnnRNNRelu or CudnnRNNTanh, there are 2 tensors per weight and per\n bias for each layer: tensor 0 is applied to the input from the previous\n layer and tensor 1 to the recurrent input.\n\n For CudnnLSTM, there are 8 tensors per weight and per bias for each\n layer: tensor 0-3 are applied to the input from the previous layer and\n tensor 4-7 to the recurrent input. Tensor 0 and 4 are for the input gate;\n tensor 1 and 5 the forget gate; tensor 2 and 6 the new memory gate;\n tensor 3 and 7 the output gate.\n\n For CudnnGRU, there are 6 tensors per weight and per bias for each layer:\n tensor 0-2 are applied to the input from the previous layer and\n tensor 3-5 to the recurrent input. Tensor 0 and 3 are for the reset gate;\n tensor 1 and 4 the update gate; tensor 2 and 5 the new memory gate.\n\n Args:\n opaque_params: a variable, Cudnn RNN opaque params.\n num_layers: the number of layers for the RNN model.\n num_units: the number of units within the RNN model.\n input_size: the size of the input, it could be different from the\n num_units.\n input_mode: indicate whether there is a linear projection between the\n input and the actual computation before the first layer. It could be\n 'linear_input', 'skip_input' or 'auto_select'. 'linear_input' (default)\n always applies a linear projection of input onto RNN hidden state.\n (standard RNN behavior). 'skip_input' is only allowed when input_size ==\n num_units; 'auto_select' implies 'skip_input' when input_size ==\n num_units; otherwise, it implies 'linear_input'.\n direction: the direction model that the model operates. Could be either\n 'unidirectional' or 'bidirectional'\n scope: string of VariableScope, the scope of equivalent subgraph\n consisting only platform-independent tf RNN cells.\n name: the name of the CudnnOpaqueParamsSaveable object.\n \"\"\"\n # Define in subclasses.\n self._num_layers = num_layers\n self._input_size = input_size\n self._num_units = num_units\n self._input_mode = input_mode\n self._direction = direction\n if scope is not None:\n scope_name = scope.name if isinstance(scope, vs.VariableScope) else scope\n self._scope = scope_name or None\n else:\n self._scope = None\n\n self._variables = opaque_params\n self._num_dirs = 1 if self._direction == CUDNN_RNN_UNIDIRECTION else 2\n # Defined in subclasses.\n self._format_converter = None\n\n tf_weights, tf_biases = (\n self.format_converter.opaque_to_tf_canonical(self._variables))\n tf_weight_names, tf_bias_names = self._tf_canonical_names()\n # We currently don't use slice_spec. It might be useful in a distributed\n # setting where each parameter server node stores a slice of variable,\n # instead of having the master pull all slices and then save them.\n slice_spec = \"\"\n params = tf_weights + tf_biases\n self._weight_names = tf_weight_names\n self._bias_names = tf_bias_names\n self._param_names = tf_weight_names + tf_bias_names\n prefixed_param_names = tf_weight_names + tf_bias_names\n if self._scope:\n prefixed_param_names = [\n \"%s/%s\" % (self._scope, pn) for pn in prefixed_param_names\n ]\n specs = [\n saver.BaseSaverBuilder.SaveSpec(param, slice_spec, param_name)\n for param, param_name in zip(params, prefixed_param_names)\n ]\n super(CudnnOpaqueParamsSaveable, self).__init__(\n array_ops.identity(self._variables), specs, name)\n\n @property\n def format_converter(self):\n if self._format_converter is None:\n self._format_converter = self._format_converter_cls(\n self._num_layers, self._num_units, self._input_size, self._input_mode,\n self._direction)\n return self._format_converter\n\n def restore(self, restored_tensors, restored_shapes):\n opaque_params = self.format_converter.tf_canonical_to_opaque(\n restored_tensors)\n return state_ops.assign(\n self._variables, opaque_params, validate_shape=False)\n\n def _checkpointable_save(self, save_buffer):\n weights, biases = self.format_converter.opaque_to_tf_canonical(\n self._variables)\n for name, tensor in zip(self._param_names, weights + biases):\n save_buffer[name] = array_ops.identity(tensor)\n\n def _checkpointable_restore(self, restore_buffer):\n tensors = [\n array_ops.identity(restore_buffer[name]) for name in self._param_names\n ]\n return self.restore(\n restored_tensors=tensors,\n restored_shapes=None # Unused\n )\n\n def _add_checkpointable_dependencies(self, checkpointable, dtype):\n \"\"\"Add canonical weight dependencies to `checkpointable`.\n\n When saving or restoring, converts to or from the opaque buffer\n format. Weights are saved and loaded in the configuration expected by\n cuDNN-compatible cells.\n\n Args:\n checkpointable: An object inheriting from `CheckpointableBase` to add\n dependencies too (typically the cuDNN `Layer`).\n dtype: The dtype for the canonical parameter Tensors.\n \"\"\"\n split_dependencies = split_dependency.split_dependency(\n component_names=self._param_names,\n component_dtypes=(dtype,) * len(self._param_names),\n fill_save_buffer_fn=self._checkpointable_save,\n consume_restore_buffer_fn=self._checkpointable_restore)\n self._checkpointable_track_params(checkpointable, split_dependencies)\n\n def _checkpointable_track_params(self, checkpointable, params):\n \"\"\"Tracks parameters in a canonical configuration.\"\"\"\n return # NotImplementedError raised by the Layer.\n\n def _tf_canonical_names(self):\n tf_weights_names, tf_biases_names = [], []\n for i in range(self._num_layers):\n if self._direction == CUDNN_RNN_UNIDIRECTION:\n prefix = self._tf_canonical_name_prefix(i)\n self._tf_canonical_names_single_layer(prefix, tf_weights_names,\n tf_biases_names)\n else:\n fwd_prefix = self._tf_canonical_name_prefix(i, is_fwd=True)\n bak_prefix = self._tf_canonical_name_prefix(i, is_fwd=False)\n\n self._tf_canonical_names_single_layer(fwd_prefix, tf_weights_names,\n tf_biases_names)\n self._tf_canonical_names_single_layer(bak_prefix, tf_weights_names,\n tf_biases_names)\n return tf_weights_names, tf_biases_names\n\n def _tf_canonical_name_prefix(self, layer, is_fwd=True):\n if self._direction == CUDNN_RNN_UNIDIRECTION:\n return \"rnn/multi_rnn_cell/cell_%d/%s\" % (layer, self._rnn_cell_name)\n else:\n if is_fwd:\n return (\"stack_bidirectional_rnn/cell_%d/bidirectional_rnn/fw/%s\" %\n (layer, self._rnn_cell_name))\n else:\n return (\"stack_bidirectional_rnn/cell_%d/bidirectional_rnn/bw/%s\" %\n (layer, self._rnn_cell_name))\n\n def _tf_canonical_names_single_layer(self, prefix, tf_weights_names,\n tf_biases_names):\n raise NotImplementedError(\"Abstract method\")\n\n\nclass CudnnLSTMSaveable(CudnnOpaqueParamsSaveable):\n \"\"\"SaveableObject implementation handling Cudnn LSTM opaque params.\"\"\"\n\n _format_converter_cls = CudnnParamsFormatConverterLSTM\n _rnn_cell_name = base_layer.to_snake_case(CudnnCompatibleLSTMCell.__name__)\n\n def _tf_canonical_names_single_layer(self, prefix, tf_weights_names,\n tf_bias_names):\n tf_weights_names.append(prefix + \"/kernel\")\n tf_bias_names.append(prefix + \"/bias\")\n\n def _checkpointable_track_params(self, checkpointable, params):\n \"\"\"Track parameters for compatibility with CudnnCompatibleLSTMCell.\"\"\"\n biases = []\n weights = []\n for name in self._weight_names:\n weights.append(params[name])\n for name in self._bias_names:\n biases.append(params[name])\n assert len(params) == len(weights) + len(biases)\n if len(weights) == 1 and len(biases) == 1:\n # For single-layer cells, allow substituting a cell with no MultiRNNCell\n # wrapping.\n kernel, = weights # pylint: disable=unbalanced-tuple-unpacking\n bias, = biases # pylint: disable=unbalanced-tuple-unpacking\n checkpointable._track_checkpointable(kernel, name=\"kernel\") # pylint: disable=protected-access\n checkpointable._track_checkpointable(bias, name=\"bias\") # pylint: disable=protected-access\n assert len(biases) == len(weights)\n for cell_index, (bias, kernel) in enumerate(zip(biases, weights)):\n cell = checkpointable_lib.Checkpointable()\n checkpointable._track_checkpointable(cell, name=\"cell-%d\" % cell_index) # pylint: disable=protected-access\n cell.bias = bias\n cell.kernel = kernel\n\n\nclass CudnnGRUSaveable(CudnnOpaqueParamsSaveable):\n \"\"\"SaveableObject implementation handling Cudnn GRU opaque params.\"\"\"\n\n _format_converter_cls = CudnnParamsFormatConverterGRU\n _rnn_cell_name = base_layer.to_snake_case(CudnnCompatibleGRUCell.__name__)\n\n def _tf_canonical_names_single_layer(self, prefix, tf_weights_names,\n tf_bias_names):\n tf_weights_names.append(prefix + \"/gates/kernel\")\n tf_weights_names.append(prefix + \"/candidate/input_projection/kernel\")\n tf_weights_names.append(prefix + \"/candidate/hidden_projection/kernel\")\n\n tf_bias_names.append(prefix + \"/gates/bias\")\n tf_bias_names.append(prefix + \"/candidate/input_projection/bias\")\n tf_bias_names.append(prefix + \"/candidate/hidden_projection/bias\")\n\n\nclass CudnnRNNTanhSaveable(CudnnLSTMSaveable):\n _format_converter_cls = CudnnParamsFormatConverterTanh\n _rnn_cell_name = base_layer.to_snake_case(rnn_cell_impl.BasicRNNCell.__name__)\n\n\nclass CudnnRNNReluSaveable(CudnnLSTMSaveable):\n _format_converter_cls = CudnnParamsFormatConverterRelu\n _rnn_cell_name = base_layer.to_snake_case(rnn_cell_impl.BasicRNNCell.__name__)\n\n\n_cudnn_rnn_common_doc_string = \"\"\"\n Cudnn RNN has an opaque parameter buffer that can be used for inference and\n training. But it is possible that the layout of the parameter buffers\n changes between generations. So it is highly recommended to use\n CudnnOpaqueParamsSaveable to save and restore weights and biases in a\n canonical format.\n\n This is a typical use case:\n\n * The user creates a CudnnRNN model.\n * The user query that parameter buffer size.\n * The user creates a variable of that size that serves as the parameter\n buffers.\n * The user either initialize the parameter buffer, or load the canonical\n weights into the parameter buffer.\n * The user calls the model with the parameter buffer for inference, or\n training.\n * If training, the user creates a Saver object.\n * If training, the user creates a CudnnOpaqueParamsSaveable object from the\n parameter buffer for it to be later saved in the canonical format. When\n creating a CudnnOpaqueParamsSaveable object, a name could be provided,\n which is useful in distinguishing the names of multiple\n CudnnOpaqueParamsSaveable objects (e.g. for an encoder-decoder model).\n * Once a while, the user saves the parameter buffer into model checkpoints\n with Saver.save().\n * When restoring, the user creates a CudnnOpaqueParamsSaveable object and\n uses Saver.restore() to restore the parameter buffer from the canonical\n format to a user-defined format, as well as to restore other savable\n objects in the checkpoint file.\n\"\"\"\n\n\ndef _check_rnn_mode(rnn_mode):\n if rnn_mode not in (CUDNN_LSTM, CUDNN_GRU, CUDNN_RNN_TANH, CUDNN_RNN_RELU):\n raise ValueError(\"Invalid rnn_mode: %s, expect one of (%s, %s, %s, %s)\" %\n (rnn_mode, CUDNN_LSTM, CUDNN_GRU, CUDNN_RNN_TANH,\n CUDNN_RNN_RELU))\n\n\ndef _get_seed(seed):\n seed, seed2 = random_seed.get_seed(seed)\n if seed is None and seed2 is None:\n seed, seed2 = 0, 0\n return seed, seed2\n\n\ndef check_direction(direction):\n \"\"\"Check validity of direction.\"\"\"\n if direction not in (CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION):\n raise ValueError(\"Invalid direction: %s, expecting %s or %s\" %\n (direction, CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION))\n\n\ndef check_input_mode(input_mode):\n if input_mode not in (CUDNN_INPUT_LINEAR_MODE, CUDNN_INPUT_SKIP_MODE,\n CUDNN_INPUT_AUTO_MODE):\n raise ValueError(\"Invalid input_mode: %s, expect one of (%s, %s, %s)\" %\n (input_mode, CUDNN_INPUT_LINEAR_MODE,\n CUDNN_INPUT_SKIP_MODE, CUDNN_INPUT_AUTO_MODE))\n\n\ndef _get_num_params(rnn_mode, num_layers, direction):\n \"\"\"Return num params for given Cudnn config.\"\"\"\n if rnn_mode == CUDNN_LSTM:\n num_params_per_layer = CUDNN_LSTM_PARAMS_PER_LAYER\n elif rnn_mode == CUDNN_GRU:\n num_params_per_layer = CUDNN_GRU_PARAMS_PER_LAYER\n elif rnn_mode == CUDNN_RNN_RELU:\n num_params_per_layer = CUDNN_RNN_RELU_PARAMS_PER_LAYER\n elif rnn_mode == CUDNN_RNN_TANH:\n num_params_per_layer = CUDNN_RNN_TANH_PARAMS_PER_LAYER\n else:\n raise ValueError(\"Invalid \\'rnn_mode\\': %s\" % rnn_mode)\n num_params = num_layers * num_params_per_layer\n if direction != CUDNN_RNN_UNIDIRECTION:\n num_params *= 2\n return num_params\n\n\ndef _cudnn_rnn(inputs,\n input_h,\n input_c,\n params,\n is_training,\n rnn_mode,\n input_mode=CUDNN_INPUT_LINEAR_MODE,\n direction=CUDNN_RNN_UNIDIRECTION,\n dropout=0.,\n seed=0,\n name=None):\n \"\"\"Cudnn RNN.\n\n Args:\n inputs: the input sequence to the RNN model. A Tensor of shape [?,\n batch_size, input_size].\n input_h: the initial hidden state for h. A Tensor of shape [num_layers,\n batch_size, num_units].\n input_c: the initial hidden state for c. This is only relevant for LSTM.\n A Tensor of the same shape as input_h.\n params: the parameter buffer created for this model.\n is_training: whether this operation will be used in training or inference\n rnn_mode: one of ('lstm', 'gru', 'rnn_relu', 'rnn_tanh').\n input_mode: indicate whether there is a linear projection between the\n input and the actual computation before the first layer. It could be\n 'linear_input', 'skip_input' or 'auto_select'.\n 'linear_input' (default) always applies a linear projection of input\n onto RNN hidden state. (standard RNN behavior).\n 'skip_input' is only allowed when input_size == num_units;\n 'auto_select' implies 'skip_input' when input_size == num_units;\n otherwise, it implies 'linear_input'.\n direction: the direction model that the model operates. Could be either\n 'unidirectional' or 'bidirectional'\n dropout: whether to enable dropout. With it is 0, dropout is disabled.\n seed: the op seed used for initializing dropout. See `tf.set_random_seed`\n for behavior.\n name: name of the operation.\n Returns:\n outputs, output_h, output_c\n \"\"\"\n _check_rnn_mode(rnn_mode)\n check_direction(direction)\n check_input_mode(input_mode)\n seed, seed2 = random_seed.get_seed(seed)\n # TODO(jamesqin): switch default value to \"1\" on May 25th 2018, and get rid\n # of V1 ops.\n use_cudnn_v2 = os.environ.get(\"TF_CUDNN_RNN_USE_V2\", \"0\")\n args = {\n \"input\": inputs,\n \"input_h\": input_h,\n \"input_c\": input_c,\n \"params\": params,\n \"is_training\": is_training,\n \"rnn_mode\": rnn_mode,\n \"input_mode\": input_mode,\n \"direction\": direction,\n \"dropout\": dropout,\n \"seed\": seed,\n \"seed2\": seed2,\n \"name\": name\n }\n if use_cudnn_v2 != \"1\":\n outputs, output_h, output_c, _ = gen_cudnn_rnn_ops.cudnn_rnn(**args)\n else:\n outputs, output_h, output_c, _, _ = gen_cudnn_rnn_ops.cudnn_rnnv2(**args)\n return (outputs, output_h, output_c)\n\n\ndef cudnn_lstm(inputs,\n input_h,\n input_c,\n params,\n is_training,\n input_mode=CUDNN_INPUT_LINEAR_MODE,\n direction=CUDNN_RNN_UNIDIRECTION,\n dropout=0.,\n seed=0,\n name=None):\n \"\"\"Cudnn LSTM.\n\n Args:\n inputs: the input sequence to the RNN model. A Tensor of shape [?,\n batch_size, input_size].\n input_h: the initial hidden state for h. A Tensor of shape [num_layers,\n batch_size, num_units].\n input_c: the initial hidden state for c. This is only relevant for LSTM.\n A Tensor of the same shape as input_h.\n params: the parameter buffer created for this model.\n is_training: whether this operation will be used in training or inference\n input_mode: indicate whether there is a linear projection between the\n input and the actual computation before the first layer. It could be\n 'linear_input', 'skip_input' or 'auto_select'.\n 'linear_input' (default) always applies a linear projection of input\n onto RNN hidden state. (standard RNN behavior).\n 'skip_input' is only allowed when input_size == num_units;\n 'auto_select' implies 'skip_input' when input_size == num_units;\n otherwise, it implies 'linear_input'.\n direction: the direction model that the model operates. Could be either\n 'unidirectional' or 'bidirectional'\n dropout: whether to enable dropout. With it is 0, dropout is disabled.\n seed: the op seed used for initializing dropout. See `tf.set_random_seed`\n for behavior.\n name: name of the operation.\n Returns:\n outputs, output_h, output_c\n \"\"\"\n return _cudnn_rnn(inputs, input_h, input_c, params, is_training, CUDNN_LSTM,\n input_mode, direction, dropout, seed, name)\n\n\ndef _cudnn_rnn_no_input_c(inputs,\n input_h,\n params,\n is_training,\n rnn_mode,\n input_mode=CUDNN_INPUT_LINEAR_MODE,\n direction=CUDNN_RNN_UNIDIRECTION,\n dropout=0.,\n seed=0,\n name=None):\n \"\"\"Cudnn RNN w/o input_c.\n\n Args:\n inputs: the input sequence to the RNN model. A Tensor of shape [?,\n batch_size, input_size].\n input_h: the initial hidden state for h. A Tensor of shape [num_layers,\n batch_size, num_units].\n params: the parameter buffer created for this model.\n is_training: whether this operation will be used in training or inference\n rnn_mode: one of ('lstm', 'gru', 'rnn_relu', 'rnn_tanh').\n input_mode: indicate whether there is a linear projection between the\n input and the actual computation before the first layer. It could be\n 'linear_input', 'skip_input' or 'auto_select'.\n 'linear_input' (default) always applies a linear projection of input\n onto RNN hidden state. (standard RNN behavior).\n 'skip_input' is only allowed when input_size == num_units;\n 'auto_select' implies 'skip_input' when input_size == num_units;\n otherwise, it implies 'linear_input'.\n direction: the direction model that the model operates. Could be either\n 'unidirectional' or 'bidirectional'\n dropout: whether to enable dropout. With it is 0, dropout is disabled.\n seed: the op seed used for initializing dropout. See `tf.set_random_seed`\n for behavior.\n name: name of the operation.\n Returns:\n outputs, output_h\n \"\"\"\n input_c = array_ops.constant([], dtype=input_h.dtype)\n outputs, output_h, _ = _cudnn_rnn(inputs, input_h, input_c, params,\n is_training, rnn_mode, input_mode,\n direction, dropout, seed, name)\n return outputs, output_h\n\n\ndef cudnn_gru(inputs,\n input_h,\n params,\n is_training,\n input_mode=CUDNN_INPUT_LINEAR_MODE,\n direction=CUDNN_RNN_UNIDIRECTION,\n dropout=0.,\n seed=0,\n name=None):\n \"\"\"Cudnn GRU.\n\n Args:\n inputs: the input sequence to the RNN model. A Tensor of shape [?,\n batch_size, input_size].\n input_h: the initial hidden state for h. A Tensor of shape [num_layers,\n batch_size, num_units].\n params: the parameter buffer created for this model.\n is_training: whether this operation will be used in training or inference\n input_mode: indicate whether there is a linear projection between the\n input and the actual computation before the first layer. It could be\n 'linear_input', 'skip_input' or 'auto_select'.\n 'linear_input' (default) always applies a linear projection of input\n onto RNN hidden state. (standard RNN behavior).\n 'skip_input' is only allowed when input_size == num_units;\n 'auto_select' implies 'skip_input' when input_size == num_units;\n otherwise, it implies 'linear_input'.\n direction: the direction model that the model operates. Could be either\n 'unidirectional' or 'bidirectional'\n dropout: whether to enable dropout. With it is 0, dropout is disabled.\n seed: the op seed used for initializing dropout. See `tf.set_random_seed`\n for behavior.\n name: name of the operation.\n Returns:\n outputs, output_h\n \"\"\"\n return _cudnn_rnn_no_input_c(inputs, input_h, params, is_training, CUDNN_GRU,\n input_mode, direction, dropout, seed, name)\n\n\ndef cudnn_rnn_relu(inputs,\n input_h,\n params,\n is_training,\n input_mode=CUDNN_INPUT_LINEAR_MODE,\n direction=CUDNN_RNN_UNIDIRECTION,\n dropout=0.,\n seed=0,\n name=None):\n \"\"\"Cudnn RNN Relu.\n\n Args:\n inputs: the input sequence to the RNN model. A Tensor of shape [?,\n batch_size, input_size].\n input_h: the initial hidden state for h. A Tensor of shape [num_layers,\n batch_size, num_units].\n params: the parameter buffer created for this model.\n is_training: whether this operation will be used in training or inference\n input_mode: indicate whether there is a linear projection between the\n input and the actual computation before the first layer. It could be\n 'linear_input', 'skip_input' or 'auto_select'.\n 'linear_input' (default) always applies a linear projection of input\n onto RNN hidden state. (standard RNN behavior).\n 'skip_input' is only allowed when input_size == num_units;\n 'auto_select' implies 'skip_input' when input_size == num_units;\n otherwise, it implies 'linear_input'.\n direction: the direction model that the model operates. Could be either\n 'unidirectional' or 'bidirectional'\n dropout: whether to enable dropout. With it is 0, dropout is disabled.\n seed: the op seed used for initializing dropout. See `tf.set_random_seed`\n for behavior.\n name: name of the operation.\n Returns:\n outputs, output_h\n \"\"\"\n return _cudnn_rnn_no_input_c(inputs, input_h, params, is_training,\n CUDNN_RNN_RELU, input_mode, direction, dropout,\n seed, name)\n\n\ndef cudnn_rnn_tanh(inputs,\n input_h,\n params,\n is_training,\n input_mode=CUDNN_INPUT_LINEAR_MODE,\n direction=CUDNN_RNN_UNIDIRECTION,\n dropout=0.,\n seed=0,\n name=None):\n \"\"\"Cudnn RNN Tanh.\n\n Args:\n inputs: the input sequence to the RNN model. A Tensor of shape [?,\n batch_size, input_size].\n input_h: the initial hidden state for h. A Tensor of shape [num_layers,\n batch_size, num_units].\n params: the parameter buffer created for this model.\n is_training: whether this operation will be used in training or inference\n input_mode: indicate whether there is a linear projection between the\n input and the actual computation before the first layer. It could be\n 'linear_input', 'skip_input' or 'auto_select'.\n 'linear_input' (default) always applies a linear projection of input\n onto RNN hidden state. (standard RNN behavior).\n 'skip_input' is only allowed when input_size == num_units;\n 'auto_select' implies 'skip_input' when input_size == num_units;\n otherwise, it implies 'linear_input'.\n direction: the direction model that the model operates. Could be either\n 'unidirectional' or 'bidirectional'\n dropout: whether to enable dropout. With it is 0, dropout is disabled.\n seed: the op seed used for initializing dropout. See `tf.set_random_seed`\n for behavior.\n name: name of the operation.\n Returns:\n outputs, output_h\n \"\"\"\n return _cudnn_rnn_no_input_c(inputs, input_h, params, is_training,\n CUDNN_RNN_TANH, input_mode, direction, dropout,\n seed, name)\n\n\ndef cudnn_rnn_opaque_params_to_canonical(rnn_mode,\n num_layers,\n num_units,\n input_size,\n params,\n input_mode=CUDNN_INPUT_LINEAR_MODE,\n direction=CUDNN_RNN_UNIDIRECTION,\n dropout=0,\n seed=0,\n name=None):\n \"\"\"Convert cudnn opaque params to canonical.\n\n Args:\n rnn_mode: a string specifies the mode, under which this RNN model runs.\n Could be either 'lstm', 'gru', 'rnn_tanh' or 'rnn_relu'.\n num_layers: the number of layers for the RNN model.\n num_units: the number of units within the RNN model.\n input_size: the size of the input, it could be different from the\n num_units.\n params: opaque cudnn params var.\n input_mode: indicate whether there is a linear projection between the\n input and the actual computation before the first layer. It could be\n 'linear_input', 'skip_input' or 'auto_select'.\n 'linear_input' (default) always applies a linear projection of input\n onto RNN hidden state. (standard RNN behavior).\n 'skip_input' is only allowed when input_size == num_units;\n 'auto_select' implies 'skip_input' when input_size == num_units;\n otherwise, it implies 'linear_input'.\n direction: the direction model that the model operates. Could be either\n 'unidirectional' or 'bidirectional'\n dropout: whether to enable dropout. With it is 0, dropout is disabled.\n seed: the op seed used for initializing dropout. See `tf.set_random_seed`\n for behavior.\n name: name of the operation.\n Returns:\n weights list and bias list\n Raises:\n ValueError: if rnn_mode or direction is invalid.\n \"\"\"\n\n _check_rnn_mode(rnn_mode)\n check_direction(direction)\n check_input_mode(input_mode)\n num_params = _get_num_params(rnn_mode, num_layers, direction)\n seed, seed2 = random_seed.get_seed(seed)\n weights, biases = gen_cudnn_rnn_ops.cudnn_rnn_params_to_canonical(\n rnn_mode=rnn_mode,\n num_layers=num_layers,\n num_units=num_units,\n input_size=input_size,\n params=params,\n input_mode=input_mode,\n direction=direction,\n dropout=dropout,\n seed=seed,\n seed2=seed2,\n num_params=num_params,\n name=name)\n return weights, biases\n\n\ndef cudnn_rnn_canonical_to_opaque_params(rnn_mode,\n num_layers,\n num_units,\n input_size,\n weights,\n biases,\n input_mode=CUDNN_INPUT_LINEAR_MODE,\n direction=CUDNN_RNN_UNIDIRECTION,\n dropout=0,\n seed=0,\n name=None):\n \"\"\"Converts params from the canonical format to a specific format of cuDNN.\n\n Args:\n rnn_mode: a string specifies the mode, under which this RNN model runs.\n Could be either 'lstm', 'gru', 'rnn_tanh' or 'rnn_relu'.\n num_layers: the number of layers for the RNN model.\n num_units: the number of units within the RNN model.\n input_size: the size of the input, it could be different from the\n num_units.\n weights: a Tensor for weight parameters.\n biases: a Tensor for bias parameters.\n input_mode: indicate whether there is a linear projection between the\n input and the actual computation before the first layer. It could be\n 'linear_input', 'skip_input' or 'auto_select'.\n 'linear_input' (default) always applies a linear projection of input\n onto RNN hidden state. (standard RNN behavior).\n 'skip_input' is only allowed when input_size == num_units;\n 'auto_select' implies 'skip_input' when input_size == num_units;\n otherwise, it implies 'linear_input'.\n direction: the direction model that the model operates. Could be either\n 'unidirectional' or 'bidirectional'\n dropout: whether to enable dropout. With it is 0, dropout is disabled.\n seed: the op seed used for initializing dropout. See `tf.set_random_seed`\n for behavior.\n name: name of the operation.\n Returns:\n an opaque Cudnn param.\n Raises:\n ValueError: if rnn_mode or direction is invalid.\n \"\"\"\n _check_rnn_mode(rnn_mode)\n check_direction(direction)\n check_input_mode(input_mode)\n seed, seed2 = random_seed.get_seed(seed)\n return gen_cudnn_rnn_ops.cudnn_rnn_canonical_to_params(\n rnn_mode=rnn_mode,\n num_layers=num_layers,\n num_units=num_units,\n input_size=input_size,\n weights=weights,\n biases=biases,\n input_mode=input_mode,\n direction=direction,\n dropout=dropout,\n seed=seed,\n seed2=seed2,\n name=name)\n\n\ndef cudnn_rnn_opaque_params_size(rnn_mode,\n num_layers,\n num_units,\n input_size,\n input_mode=CUDNN_INPUT_LINEAR_MODE,\n direction=CUDNN_RNN_UNIDIRECTION,\n dtype=dtypes.float32,\n dropout=0,\n seed=0,\n name=None):\n \"\"\"Returns opaque params size for specific Cudnn config.\n\n Args:\n rnn_mode: a string specifies the mode, under which this RNN model runs.\n Could be either 'lstm', 'gru', 'rnn_tanh' or 'rnn_relu'.\n num_layers: the number of layers for the RNN model.\n num_units: the number of units within the RNN model.\n input_size: the size of the input, it could be different from the\n num_units.\n input_mode: indicate whether there is a linear projection between the\n input and the actual computation before the first layer. It could be\n 'linear_input', 'skip_input' or 'auto_select'.\n 'linear_input' (default) always applies a linear projection of input\n onto RNN hidden state. (standard RNN behavior).\n 'skip_input' is only allowed when input_size == num_units;\n 'auto_select' implies 'skip_input' when input_size == num_units;\n otherwise, it implies 'linear_input'.\n direction: the direction model that the model operates. Could be either\n 'unidirectional' or 'bidirectional'\n dtype: one of tf.float32 or tf.float64.\n dropout: whether to enable dropout. With it is 0, dropout is disabled.\n seed: the op seed used for initializing dropout. See `tf.set_random_seed`\n for behavior.\n name: name of the operation.\n Returns:\n a int, size of Cudnn opaque params.\n Raises:\n ValueError: if rnn_mode or direction is invalid.\n \"\"\"\n _check_rnn_mode(rnn_mode)\n check_direction(direction)\n check_input_mode(input_mode)\n seed, seed2 = random_seed.get_seed(seed)\n return gen_cudnn_rnn_ops.cudnn_rnn_params_size(\n rnn_mode=rnn_mode,\n num_layers=num_layers,\n num_units=num_units,\n input_size=input_size,\n T=dtype,\n S=dtypes.int32,\n dropout=dropout,\n seed=seed,\n seed2=seed2,\n input_mode=input_mode,\n direction=direction,\n name=name)[0]\n\n\nclass _CudnnRNN(object):\n \"\"\"Creates an RNN model using the underlying Cudnn implementation.\n\n Note that self._NUM_PARAMS_PER_LAYER is the number of parameter sets of\n weight and bias per layer. It needs to be defined in subclasses.\n \"\"\"\n __doc__ += _cudnn_rnn_common_doc_string\n\n # TODO(jamesqin): support float16 CuDNN RNN\n def __init__(self,\n rnn_mode,\n num_layers,\n num_units,\n input_size,\n input_mode=CUDNN_INPUT_LINEAR_MODE,\n direction=CUDNN_RNN_UNIDIRECTION,\n dtype=dtypes.float32,\n dropout=0.,\n seed=0):\n \"\"\"Creates a CudnnRNN model from model spec.\n\n Args:\n rnn_mode: a string specifies the mode, under which this RNN model runs.\n Could be either 'lstm', 'gru', 'rnn_tanh' or 'rnn_relu'.\n num_layers: the number of layers for the RNN model.\n num_units: the number of units within the RNN model.\n input_size: the size of the input, it could be different from the\n num_units.\n input_mode: indicate whether there is a linear projection between the\n input and the actual computation before the first layer. It could be\n 'linear_input', 'skip_input' or 'auto_select'.\n 'linear_input' (default) always applies a linear projection of input\n onto RNN hidden state. (standard RNN behavior).\n 'skip_input' is only allowed when input_size == num_units;\n 'auto_select' implies 'skip_input' when input_size == num_units;\n otherwise, it implies 'linear_input'.\n direction: the direction model that the model operates. Could be either\n 'unidirectional' or 'bidirectional'\n dtype: dtype of params, tf.float32 or tf.float64.\n dropout: whether to enable dropout. With it is 0, dropout is disabled.\n seed: the op seed used for initializing dropout. See `tf.set_random_seed`\n for behavior.\n Raises:\n ValueError: if direction is invalid.\n \"\"\"\n self._num_layers = num_layers\n self._num_units = num_units\n self._input_size = input_size\n self._rnn_mode = rnn_mode\n self._input_mode = input_mode\n self._direction = direction\n self._dtype = dtype\n self._dropout = dropout\n self._seed = seed\n\n @property\n def input_mode(self):\n return self._input_mode\n\n @property\n def input_size(self):\n return self._input_size\n\n @property\n def num_units(self):\n return self._num_units\n\n @property\n def num_layers(self):\n return self._num_layers\n\n @property\n def rnn_mode(self):\n return self._rnn_mode\n\n @property\n def direction(self):\n return self._direction\n\n def params_size(self):\n \"\"\"Calculates the size of the opaque parameter buffer needed for this model.\n\n Returns:\n The calculated parameter buffer size.\n \"\"\"\n return cudnn_rnn_opaque_params_size(\n rnn_mode=self._rnn_mode,\n num_layers=self._num_layers,\n num_units=self._num_units,\n input_size=self._input_size,\n dtype=self._dtype,\n dropout=self._dropout,\n seed=self._seed,\n input_mode=self._input_mode,\n direction=self._direction)\n\n def __call__(self, input_data, input_h, input_c, params, is_training=True):\n \"\"\"Runs the forward step for the RNN model.\n\n Args:\n input_data: the input sequence to the RNN model. A Tensor of shape [?,\n batch_size, input_size].\n input_h: the initial hidden state for h. A Tensor of shape [num_layers,\n batch_size, num_units].\n input_c: the initial hidden state for c. This is only relevant for LSTM.\n A Tensor of the same shape as input_h.\n params: the parameter buffer created for this model.\n is_training: whether this operation will be used in training or inference.\n Returns:\n output: the output sequence.\n output_h: the final state for h.\n output_c: the final state for c. This is only relevant for LSTM.\n \"\"\"\n return _cudnn_rnn(\n input_data,\n input_h,\n input_c,\n params,\n is_training,\n self._rnn_mode,\n input_mode=self._input_mode,\n direction=self._direction,\n dropout=self._dropout,\n seed=self._seed)\n\n def params_to_canonical(self, params):\n \"\"\"Converts params from a specific format of cuDNN to the canonical format.\n\n Args:\n params: a Variable for weight and bias parameters.\n\n Returns:\n A function for the specific-to-canonical conversion.\n \"\"\"\n return cudnn_rnn_opaque_params_to_canonical(\n rnn_mode=self._rnn_mode,\n num_layers=self._num_layers,\n num_units=self._num_units,\n input_size=self._input_size,\n params=params,\n input_mode=self._input_mode,\n direction=self._direction,\n dropout=self._dropout,\n seed=self._seed)\n\n def canonical_to_params(self, weights, biases):\n \"\"\"Converts params from the canonical format to a specific format of cuDNN.\n\n Args:\n weights: a Tensor for weight parameters.\n biases: a Tensor for bias parameters.\n\n Returns:\n A function for the canonical-to-params-to-specific conversion..\n \"\"\"\n return cudnn_rnn_canonical_to_opaque_params(\n rnn_mode=self._rnn_mode,\n num_layers=self._num_layers,\n num_units=self._num_units,\n input_size=self._input_size,\n weights=weights,\n biases=biases,\n input_mode=self._input_mode,\n direction=self._direction,\n dropout=self._dropout,\n seed=self._seed)\n\n\nclass CudnnLSTM(_CudnnRNN):\n \"\"\"Cudnn implementation of the LSTM model.\"\"\"\n __doc__ += _cudnn_rnn_common_doc_string\n # 4 sets of weight and bias parameters for the recurrent input, and 4 for the\n # previous layer input.\n _NUM_PARAMS_PER_LAYER = CUDNN_LSTM_PARAMS_PER_LAYER\n\n def __init__(self,\n num_layers,\n num_units,\n input_size,\n input_mode=CUDNN_INPUT_LINEAR_MODE,\n direction=CUDNN_RNN_UNIDIRECTION,\n dtype=dtypes.float32,\n dropout=0.,\n seed=0):\n \"\"\"Creates a Cudnn LSTM model from model spec.\n\n Args:\n num_layers: the number of layers for the RNN model.\n num_units: the number of units within the RNN model.\n input_size: the size of the input, it could be different from the\n num_units.\n input_mode: indicate whether there is a linear projection between the\n input and The actual computation before the first layer. It could be\n 'skip_input', 'linear_input' or 'auto_select'.\n 'skip_input' is only allowed when input_size == num_units;\n 'auto_select' implies 'skip_input' when input_size == num_units;\n otherwise, it implies 'linear_input'.\n direction: the direction model that the model operates. Could be either\n 'unidirectional' or 'bidirectional'\n dtype: dtype of params, tf.float32 or tf.float64.\n dropout: whether to enable dropout. With it is 0, dropout is disabled.\n seed: the seed used for initializing dropout.\n \"\"\"\n super(CudnnLSTM, self).__init__(\n CUDNN_LSTM,\n num_layers,\n num_units,\n input_size,\n input_mode=input_mode,\n direction=direction,\n dtype=dtype,\n dropout=dropout,\n seed=seed)\n\n def __call__(self, input_data, input_h, input_c, params, is_training=True):\n \"\"\"Runs the forward step for the Cudnn LSTM model.\n\n Args:\n input_data: the input sequence to the LSTM model. A Tensor of shape [?,\n batch_size, input_size].\n input_h: the initial hidden state for h. A Tensor of shape [num_layers,\n batch_size, num_units].\n input_c: the initial hidden state for c. A Tensor of the same shape as\n input_h.\n params: the parameter buffer created for this model.\n is_training: whether this operation will be used in training or inference.\n Returns:\n output: the output sequence.\n output_h: the final state for h.\n output_c: the final state for c.\n \"\"\"\n output, output_h, output_c = super(CudnnLSTM, self).__call__(\n input_data, input_h, input_c, params, is_training=is_training)\n return (output, output_h, output_c)\n\n\nclass _CudnnRNNNoInputC(_CudnnRNN):\n \"\"\"Simple CudnnRNN models without input_c.\"\"\"\n __doc__ += _cudnn_rnn_common_doc_string\n\n def __init__(self,\n num_layers,\n num_units,\n input_size,\n input_mode=CUDNN_INPUT_LINEAR_MODE,\n direction=CUDNN_RNN_UNIDIRECTION,\n dtype=dtypes.float32,\n dropout=0.,\n seed=0):\n \"\"\"Creates a Cudnn RNN model from model without hidden-state C.\n\n Args:\n num_layers: the number of layers for the RNN model.\n num_units: the number of units within the RNN model.\n input_size: the size of the input, it could be different from the\n num_units.\n input_mode: indicate whether there is a linear projection between the\n input and The actual computation before the first layer. It could be\n 'skip_input', 'linear_input' or 'auto_select'.\n 'skip_input' is only allowed when input_size == num_units;\n 'auto_select' implies 'skip_input' when input_size == num_units;\n otherwise, it implies 'linear_input'.\n direction: the direction model that the model operates. Could be either\n 'unidirectional' or 'bidirectional'\n dtype: dtype of params, tf.float32 or tf.float64.\n dropout: whether to enable dropout. With it is 0, dropout is disabled.\n seed: the seed used for initializing dropout.\n\n Raises:\n ValueError: if direction is not 'unidirectional' or 'bidirectional'.\n \"\"\"\n\n if direction not in (CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION):\n raise ValueError(\"Invalid direction: %s\" % direction)\n\n super(_CudnnRNNNoInputC, self).__init__(\n self._rnn_mode,\n num_layers,\n num_units,\n input_size,\n input_mode=input_mode,\n direction=direction,\n dtype=dtype,\n dropout=dropout,\n seed=seed)\n\n def __call__(self, input_data, input_h, params, is_training=True):\n \"\"\"Runs the forward step for the Cudnn LSTM model.\n\n Args:\n input_data: the input sequence to the RNN model. A Tensor of shape [?,\n batch_size, input_size].\n input_h: the initial hidden state for h. A Tensor of shape [num_layers,\n batch_size, num_units].\n params: the parameter buffer created for this model.\n is_training: whether this operation will be used in training or inference.\n Returns:\n output: the output sequence.\n output_h: the final state for h.\n \"\"\"\n return _cudnn_rnn_no_input_c(\n input_data,\n input_h,\n params,\n is_training,\n self._rnn_mode,\n input_mode=self._input_mode,\n direction=self._direction,\n dropout=self._dropout,\n seed=self._seed)\n\n\nclass CudnnGRU(_CudnnRNNNoInputC):\n \"\"\"Cudnn implementation of the GRU model.\"\"\"\n __doc__ += _cudnn_rnn_common_doc_string\n _rnn_mode = CUDNN_GRU\n # 3 sets of weight and bias parameters for the recurrent input, and 3 for the\n # previous layer input.\n _NUM_PARAMS_PER_LAYER = CUDNN_GRU_PARAMS_PER_LAYER\n\n\nclass CudnnRNNTanh(_CudnnRNNNoInputC):\n \"\"\"Cudnn implementation of the RNN-tanh model.\"\"\"\n __doc__ += _cudnn_rnn_common_doc_string\n _rnn_mode = CUDNN_RNN_TANH\n # 1 set of weight and bias parameters for the recurrent input, and 1 for the\n # previous layer input.\n _NUM_PARAMS_PER_LAYER = CUDNN_RNN_TANH_PARAMS_PER_LAYER\n\n\nclass CudnnRNNRelu(_CudnnRNNNoInputC):\n \"\"\"Cudnn implementation of the RNN-relu model.\"\"\"\n __doc__ += _cudnn_rnn_common_doc_string\n _rnn_mode = CUDNN_RNN_RELU\n # 1 set of weight and bias parameters for the recurrent input, and 1 for the\n # previous layer input.\n _NUM_PARAMS_PER_LAYER = CUDNN_RNN_RELU_PARAMS_PER_LAYER\n" ]
[ [ "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.ops.check_ops.assert_proper_iterable", "tensorflow.python.ops.array_ops.rank", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.linalg.linalg_impl.adjoint", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.math_ops.range", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.array_ops.matrix_transpose", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.framework.common_shapes.broadcast_shape", "tensorflow.python.framework.errors.InvalidArgumentError" ], [ "pandas.DataFrame" ], [ "tensorflow.data.Dataset.from_tensors", "tensorflow.constant", "tensorflow.reshape", "tensorflow.layers.Dense", "tensorflow.train.get_global_step", "tensorflow.data.Dataset.zip", "tensorflow.train.GradientDescentOptimizer", "tensorflow.python.keras.metrics.BinaryAccuracy", "tensorflow.estimator.EstimatorSpec", "tensorflow.estimator.RunConfig", "tensorflow.contrib.distribute.MirroredStrategy", "tensorflow.app.run" ], [ "numpy.expand_dims", "tensorflow.python.ops.sparse_ops.sparse_eye", "numpy.arange", "numpy.eye", "tensorflow.python.framework.sparse_tensor.SparseTensor", "tensorflow.python.ops.sparse_ops.sparse_to_dense", "tensorflow.python.platform.googletest.main", "tensorflow.python.ops.sparse_ops.sparse_expand_dims", "numpy.random.binomial", "numpy.prod", "numpy.where", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.data.ops.readers.TextLineDataset", "tensorflow.python.ops.parsing_ops.decode_csv", "numpy.median", "tensorflow.python.platform.gfile.DeleteRecursively", "tensorflow.python.data.ops.dataset_ops.make_one_shot_iterator", "tensorflow.python.data.experimental.ops.readers.CsvDataset", "tensorflow.python.client.session.Session", "tensorflow.python.platform.test.main", "tensorflow.python.platform.googletest.GetTempDir" ], [ "tensorflow.python.autograph.converters.control_flow.transform", "tensorflow.python.platform.test.main", "tensorflow.python.framework.constant_op.constant" ], [ "numpy.arange", "tensorflow.python.platform.test.main", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices" ], [ "tensorflow.contrib.tensorrt.test.tf_trt_integration_test_base.IsQuantizationMode", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.array_ops.squeeze", "numpy.random.randn", "tensorflow.python.ops.nn.conv2d", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.math_ops.sub", "tensorflow.python.ops.math_ops.add", "tensorflow.python.platform.test.main", "tensorflow.contrib.tensorrt.python.trt_convert.clear_test_values", "tensorflow.python.ops.nn.relu", "tensorflow.python.ops.math_ops.div", "tensorflow.python.ops.math_ops.mul", "tensorflow.python.ops.nn.bias_add", "tensorflow.contrib.tensorrt.python.trt_convert.add_test_value", "tensorflow.python.ops.nn_ops.max_pool", "tensorflow.python.framework.ops.Graph", "tensorflow.contrib.tensorrt.test.tf_trt_integration_test_base.OptimizerDisabledRewriterConfig", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors", "tensorflow.python.framework.sparse_tensor.SparseTensorValue", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices", "tensorflow.python.training.server_lib.Server.create_local_server", "tensorflow.python.platform.test.main", "tensorflow.python.client.session.Session", "tensorflow.python.ops.sparse_ops.sparse_to_dense", "tensorflow.python.data.ops.dataset_ops.Dataset.range", "numpy.array" ], [ "numpy.random.seed", "tensorflow.python.framework.test_util.run_v1_only", "tensorflow.python.ops.linalg_ops.matrix_determinant", "numpy.finfo", "tensorflow.python.platform.test.main", "tensorflow.python.ops.linalg_ops.log_matrix_determinant", "tensorflow.python.ops.array_ops.ones", "numpy.prod", "tensorflow.python.ops.linalg_ops.matrix_solve_ls", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.ops.gradients_impl.gradients", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.framework.tensor_shape.TensorShape", "numpy.concatenate", "tensorflow.python.keras.backend.function", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.keras.backend.symbolic_learning_phase", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.keras.engine.distributed_training_utils.flatten_perdevice_values", "tensorflow.python.keras.metrics.clone_metrics", "tensorflow.python.keras.models.clone_model", "tensorflow.python.keras.utils.generic_utils.Progbar", "tensorflow.python.keras.engine.distributed_training_utils.validate_distributed_dataset_inputs", "tensorflow.python.keras.engine.distributed_training_utils.set_weights", "tensorflow.python.keras.backend.get_session", "tensorflow.python.keras.engine.distributed_training_utils.get_batch_dimension", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.keras.engine.distributed_training_utils.init_restore_or_wait_for_variables", "tensorflow.python.keras.callbacks.configure_callbacks", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.distribute.distribute_lib.get_loss_reduction", "tensorflow.python.keras.engine.distributed_training_utils.unwrap_values", "tensorflow.python.keras.backend.set_learning_phase", "tensorflow.python.keras.backend.get_graph", "tensorflow.python.util.nest.flatten", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.util.all_util.remove_undocumented" ], [ "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.random_ops.random_uniform", "tensorflow.python.ops.array_ops.split", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.framework.ops.device", "tensorflow.python.ops.variables.trainable_variables", "tensorflow.python.ops.math_ops.floor", "tensorflow.python.ops.init_ops.constant_initializer", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.partitioned_variables.fixed_size_partitioner", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.platform.tf_logging.log_first_n", "tensorflow.python.ops.clip_ops.clip_by_value", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.ops.nn_ops.bias_add", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.util.nest.map_structure", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.ops.array_ops.slice", "tensorflow.python.util.nest.is_sequence", "tensorflow.python.framework.tensor_util.is_tensor", "tensorflow.python.layers.base.Layer.__call__", "tensorflow.python.util.nest.map_structure_up_to", "tensorflow.python.ops.variable_scope.get_variable_scope", "tensorflow.python.ops.math_ops.div", "tensorflow.python.keras.activations.get", "tensorflow.python.util.nest.get_traverse_shallow_structure", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.eager.context.num_gpus", "tensorflow.python.ops.math_ops.sigmoid", "tensorflow.python.keras.activations.serialize", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.platform.tf_logging.warn", "tensorflow.python.util.nest.assert_same_structure", "tensorflow.python.ops.init_ops.zeros_initializer", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.keras.engine.input_spec.InputSpec", "tensorflow.python.keras.initializers.get", "tensorflow.python.framework.tensor_shape.as_shape", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.keras.initializers.serialize", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.ops.array_ops.placeholder", "tensorflow.contrib.kafka.python.ops.kafka_dataset_ops.KafkaDataset", "tensorflow.python.platform.test.main", "tensorflow.python.data.ops.iterator_ops.Iterator.from_structure" ], [ "tensorflow.python.platform.test.main", "tensorflow.python.autograph.pyct.pretty_printer.fmt" ], [ "tensorflow.python.eager.context.enable_run_metadata", "tensorflow.core.protobuf.config_pb2.RunMetadata", "tensorflow.python.ops.array_ops.constant", "tensorflow.core.protobuf.config_pb2.GraphOptions", "tensorflow.python.profiler.model_analyzer.profile", "tensorflow.core.protobuf.config_pb2.RunOptions", "tensorflow.python.framework.test_util.create_local_cluster", "tensorflow.python.platform.gfile.Exists", "tensorflow.python.profiler.internal.model_analyzer_testlib.BuildFullModel", "tensorflow.python.framework.ops.device", "tensorflow.python.platform.gfile.Remove", "tensorflow.python.profiler.internal.model_analyzer_testlib.ProfilerFromFile", "tensorflow.python.framework.test_util.run_v1_only", "tensorflow.python.profiler.internal.model_analyzer_testlib.SearchTFProfNode", "tensorflow.python.platform.gfile.MkDir", "tensorflow.core.profiler.tfprof_log_pb2.ProfileProto", "tensorflow.python.platform.test.main", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.platform.test.get_temp_dir", "tensorflow.python.framework.test_util.IsMklEnabled", "tensorflow.python.framework.ops.reset_default_graph", "tensorflow.python.profiler.model_analyzer.advise", "tensorflow.python.eager.context.eager_mode", "tensorflow.python.platform.gfile.ListDirectory", "tensorflow.python.profiler.profile_context.ProfileContext", "tensorflow.python.platform.test.is_gpu_available", "tensorflow.python.client.session.Session", "tensorflow.python.eager.context.export_run_metadata", "tensorflow.python.profiler.internal.model_analyzer_testlib.BuildSmallModel", "tensorflow.python.eager.context.disable_run_metadata", "tensorflow.core.protobuf.rewriter_config_pb2.RewriterConfig", "tensorflow.core.profiler.profile_pb2.Profile", "tensorflow.python.ops.gradients.gradients", "tensorflow.python.profiler.model_analyzer.Profiler", "numpy.ones", "tensorflow.python.profiler.option_builder.ProfileOptionBuilder.time_and_memory", "tensorflow.python.ops.random_ops.random_normal", "tensorflow.python.profiler.internal.model_analyzer_testlib.CheckAndRemoveDoc", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.python.platform.gfile.Open" ], [ "tensorflow.python.ops.array_ops.constant", "tensorflow.python.ops.array_ops.split", "tensorflow.python.framework.ops.device", "tensorflow.python.ops.state_ops.assign", "tensorflow.python.ops.gen_cudnn_rnn_ops.cudnn_rnn", "tensorflow.python.ops.init_ops.constant_initializer", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.framework.random_seed.get_seed", "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.ops.nn_ops.bias_add", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.keras.engine.base_layer.to_snake_case", "tensorflow.python.ops.gen_cudnn_rnn_ops.cudnn_rnn_params_size", "tensorflow.python.ops.gen_cudnn_rnn_ops.cudnn_rnn_canonical_to_params", "tensorflow.python.training.saver.BaseSaverBuilder.SaveSpec", "tensorflow.python.training.checkpointable.tracking.Checkpointable", "tensorflow.python.ops.gen_cudnn_rnn_ops.cudnn_rnnv2", "tensorflow.python.ops.math_ops.sigmoid", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.init_ops.zeros_initializer", "tensorflow.python.ops.gen_cudnn_rnn_ops.cudnn_rnn_params_to_canonical", "tensorflow.python.ops.array_ops.reshape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.5", "1.7" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.7", "1.10", "1.12" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.12", "2.6", "2.7", "1.13", "2.3", "2.4", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.10", "1.12" ] } ]
dshahrokhian/handtracking
[ "1003bd5e7a087952f777f04420d0c9785c549bb7" ]
[ "detect_multi_threaded.py" ]
[ "from utils import detector_utils as detector_utils \nimport cv2\nimport tensorflow as tf\nimport multiprocessing\nfrom multiprocessing import Queue, Pool\nimport time\nfrom utils.detector_utils import WebcamVideoStream\nimport datetime\nimport argparse\n\n\nframe_processed = 0\nscore_thresh = 0.2\n\n# Create a worker thread that loads graph and\n# does detection on images in an input queue and puts it on an output queue\n\ndef worker(input_q, output_q, cap_params, frame_processed):\n print(\">> loading frozen model for worker\")\n detection_graph, sess = detector_utils.load_inference_graph()\n sess = tf.Session(graph=detection_graph)\n while True:\n #print(\"> ===== in worker loop, frame \", frame_processed)\n frame = input_q.get()\n if (frame is not None):\n # actual detection\n boxes, scores = detector_utils.detect_objects(\n frame, detection_graph, sess)\n # draw bounding boxes\n detector_utils.draw_box_on_image(\n cap_params['num_hands_detect'], cap_params[\"score_thresh\"], scores, boxes, cap_params['im_width'], cap_params['im_height'], frame)\n # add frame annotated with bounding box to queue\n output_q.put(frame)\n frame_processed += 1\n else:\n output_q.put(frame)\n sess.close()\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-src', '--source', dest='video_source', type=int,\n default=0, help='Device index of the camera.')\n parser.add_argument('-nhands', '--num_hands', dest='num_hands', type=int,\n default=2, help='Max number of hands to detect.')\n parser.add_argument('-fps', '--fps', dest='fps', type=int,\n default=1, help='Show FPS on detection/display visualization')\n parser.add_argument('-wd', '--width', dest='width', type=int,\n default=300, help='Width of the frames in the video stream.')\n parser.add_argument('-ht', '--height', dest='height', type=int,\n default=200, help='Height of the frames in the video stream.')\n parser.add_argument('-ds', '--display', dest='display', type=int,\n default=1, help='Display the detected images using OpenCV. This reduces FPS')\n parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int,\n default=4, help='Number of workers.')\n parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int,\n default=5, help='Size of the queue.')\n args = parser.parse_args()\n\n input_q = Queue(maxsize=args.queue_size)\n output_q = Queue(maxsize=args.queue_size)\n\n video_capture = WebcamVideoStream(src=args.video_source,\n width=args.width,\n height=args.height).start()\n\n cap_params = {}\n frame_processed = 0\n cap_params['im_width'], cap_params['im_height'] = video_capture.size()\n cap_params['score_thresh'] = score_thresh\n\n # max number of hands we want to detect/track\n cap_params['num_hands_detect'] = args.num_hands\n\n print(cap_params, args)\n\n # spin up workers to paralleize detection.\n pool = Pool(args.num_workers, worker,\n (input_q, output_q, cap_params, frame_processed))\n\n start_time = datetime.datetime.now()\n num_frames = 0\n fps = 0\n index = 0\n\n cv2.namedWindow('Multi-Threaded Detection', cv2.WINDOW_NORMAL)\n\n while True:\n frame = video_capture.read()\n frame = cv2.flip(frame, 1)\n index += 1\n\n input_q.put(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n output_frame = output_q.get()\n\n output_frame = cv2.cvtColor(output_frame, cv2.COLOR_RGB2BGR)\n\n elapsed_time = (datetime.datetime.now() -\n start_time).total_seconds()\n num_frames += 1\n fps = num_frames / elapsed_time\n # print(\"frame \", index, num_frames, elapsed_time, fps)\n\n if (output_frame is not None):\n if (args.display > 0):\n if (args.fps > 0):\n detector_utils.draw_fps_on_image(\n \"FPS : \" + str(int(fps)), output_frame)\n cv2.imshow('Multi-Threaded Detection', output_frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n if (num_frames == 400):\n num_frames = 0\n start_time = datetime.datetime.now()\n else:\n print(\"frames processed: \", index,\n \"elapsed time: \", elapsed_time, \"fps: \", str(int(fps)))\n else:\n # print(\"video end\")\n break\n elapsed_time = (datetime.datetime.now() -\n start_time).total_seconds()\n fps = num_frames / elapsed_time\n print(\"fps\", fps)\n pool.terminate()\n video_capture.stop()\n cv2.destroyAllWindows()\n" ]
[ [ "tensorflow.Session" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
tmoopenn/atari-representation-learning
[ "bd5628f77fb4ff421077e74200ebd7b386b87f7c" ]
[ "atariari/methods/utils.py" ]
[ "import argparse\nimport copy\nimport os\nimport subprocess\n\nimport torch\nimport numpy as np\nfrom sklearn.metrics import f1_score as compute_f1_score\nfrom a2c_ppo_acktr.envs import make_vec_envs\nfrom a2c_ppo_acktr.utils import get_vec_normalize\nfrom collections import defaultdict\n\n# methods that need encoder trained before\ntrain_encoder_methods = ['cpc', 'jsd-stdim', 'vae', \"naff\", \"infonce-stdim\", \"global-infonce-stdim\",\n \"global-local-infonce-stdim\", \"dim\"]\nprobe_only_methods = [\"supervised\", \"random-cnn\", \"majority\", \"pretrained-rl-agent\"]\n\n\ndef get_argparser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--env-name', default='MontezumaRevengeNoFrameskip-v4',\n help='environment to train on (default: MontezumaRevengeNoFrameskip-v4)')\n parser.add_argument('--num-frame-stack', type=int, default=1,\n help='Number of frames to stack for a state')\n parser.add_argument('--no-downsample', action='store_true', default=True,\n help='Whether to use a linear classifier')\n parser.add_argument('--pretraining-steps', type=int, default=100000,\n help='Number of steps to pretrain representations (default: 100000)')\n parser.add_argument('--probe-steps', type=int, default=50000,\n help='Number of steps to train probes (default: 30000 )')\n # parser.add_argument('--probe-test-steps', type=int, default=15000,\n # help='Number of steps to train probes (default: 15000 )')\n parser.add_argument('--num-processes', type=int, default=8,\n help='Number of parallel environments to collect samples from (default: 8)')\n parser.add_argument('--method', type=str, default='infonce-stdim',\n choices=train_encoder_methods + probe_only_methods,\n help='Method to use for training representations (default: infonce-stdim)')\n parser.add_argument('--linear', action='store_true', default=True,\n help='Whether to use a linear classifier')\n parser.add_argument('--use_multiple_predictors', action='store_true', default=False,\n help='Whether to use multiple linear classifiers in the contrastive loss')\n\n parser.add_argument('--lr', type=float, default=3e-4,\n help='Learning Rate foe learning representations (default: 5e-4)')\n parser.add_argument('--batch-size', type=int, default=64,\n help='Mini-Batch Size (default: 64)')\n parser.add_argument('--epochs', type=int, default=100,\n help='Number of epochs for (default: 100)')\n parser.add_argument('--cuda-id', type=int, default=0,\n help='CUDA device index')\n parser.add_argument('--seed', type=int, default=42,\n help='Random seed to use')\n parser.add_argument('--encoder-type', type=str, default=\"Nature\", choices=[\"Impala\", \"Nature\"],\n help='Encoder type (Impala or Nature)')\n parser.add_argument('--feature-size', type=int, default=256,\n help='Size of features')\n parser.add_argument(\"--patience\", type=int, default=15)\n parser.add_argument(\"--entropy-threshold\", type=float, default=0.6)\n parser.add_argument(\"--color\", action='store_true', default=False)\n parser.add_argument(\"--end-with-relu\", action='store_true', default=False)\n parser.add_argument(\"--wandb-proj\", type=str, default=\"atari-reps\")\n parser.add_argument(\"--wandb-entity\", type=str, default=None)\n parser.add_argument(\"--num_rew_evals\", type=int, default=10)\n # rl-probe specific arguments\n parser.add_argument(\"--checkpoint-index\", type=int, default=-1)\n\n # naff-specific arguments\n parser.add_argument(\"--naff_fc_size\", type=int, default=2048,\n help=\"fully connected layer width for naff\")\n parser.add_argument(\"--pred_offset\", type=int, default=1,\n help=\"how many steps in future to predict\")\n # CPC-specific arguments\n parser.add_argument('--sequence_length', type=int, default=100,\n help='Sequence length.')\n parser.add_argument('--steps_start', type=int, default=0,\n help='Number of immediate future steps to ignore.')\n parser.add_argument('--steps_end', type=int, default=99,\n help='Number of future steps to predict.')\n parser.add_argument('--steps_step', type=int, default=4,\n help='Skip every these many frames.')\n parser.add_argument('--gru_size', type=int, default=256,\n help='Hidden size of the GRU layers.')\n parser.add_argument('--gru_layers', type=int, default=2,\n help='Number of GRU layers.')\n parser.add_argument(\"--collect-mode\", type=str, choices=[\"random_agent\", \"pretrained_ppo\"],\n default=\"random_agent\")\n\n parser.add_argument(\"--beta\", default=1.0)\n # probe arguments\n parser.add_argument(\"--weights-path\", type=str, default=\"None\")\n parser.add_argument(\"--train-encoder\", action='store_true', default=True)\n parser.add_argument('--probe-lr', type=float, default=3e-4)\n parser.add_argument(\"--probe-collect-mode\", type=str, choices=[\"random_agent\", \"pretrained_ppo\"],\n default=\"random_agent\")\n parser.add_argument('--num-runs', type=int, default=1)\n return parser\n\n\ndef set_seeds(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n\n if torch.cuda.is_available():\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n\ndef calculate_accuracy(preds, y):\n preds = preds >= 0.5\n labels = y >= 0.5\n acc = preds.eq(labels).sum().float() / labels.numel()\n return acc\n\n\ndef calculate_multiclass_f1_score(preds, labels):\n preds = torch.argmax(preds, dim=1).detach().numpy()\n labels = labels.numpy()\n f1score = compute_f1_score(labels, preds, average=\"weighted\")\n return f1score\n\n\ndef calculate_multiclass_accuracy(preds, labels):\n preds = torch.argmax(preds, dim=1)\n acc = float(torch.sum(torch.eq(labels, preds)).data) / labels.size(0)\n return acc\n\n\ndef save_model(model, envs, save_dir, model_name, use_cuda):\n save_path = os.path.join(save_dir)\n try:\n os.makedirs(save_path)\n except OSError:\n pass\n\n # A really ugly way to save a model to CPU\n save_model = model\n if use_cuda:\n save_model = copy.deepcopy(model).cpu()\n\n save_model = [save_model,\n getattr(get_vec_normalize(envs), 'ob_rms', None)]\n\n torch.save(save_model, os.path.join(save_path, model_name + \".pt\"))\n\n\ndef evaluate_policy(actor_critic, envs, args, eval_log_dir, device):\n eval_envs = make_vec_envs(\n args.env_name, args.seed + args.num_processes, args.num_processes,\n args.gamma, eval_log_dir, args.add_timestep, device, True)\n\n vec_norm = get_vec_normalize(eval_envs)\n if vec_norm is not None:\n vec_norm.eval()\n vec_norm.ob_rms = get_vec_normalize(envs).ob_rms\n\n eval_episode_rewards = []\n\n obs = eval_envs.reset()\n eval_recurrent_hidden_states = torch.zeros(args.num_processes,\n actor_critic.recurrent_hidden_state_size, device=device)\n eval_masks = torch.zeros(args.num_processes, 1, device=device)\n\n while len(eval_episode_rewards) < 10:\n with torch.no_grad():\n _, action, _, eval_recurrent_hidden_states = actor_critic.act(\n obs, eval_recurrent_hidden_states, eval_masks, deterministic=True)\n\n # Obser reward and next obs\n obs, reward, done, infos = eval_envs.step(action)\n\n eval_masks = torch.FloatTensor([[0.0] if done_ else [1.0]\n for done_ in done])\n for info in infos:\n if 'episode' in info.keys():\n eval_episode_rewards.append(info['episode']['r'])\n print(\" Evaluation using {} episodes: mean reward {:.5f}\\n\".\n format(len(eval_episode_rewards),\n np.mean(eval_episode_rewards)))\n eval_envs.close()\n return eval_episode_rewards\n\n\ndef generate_video():\n os.chdir(\"act_maps\")\n subprocess.call([\n 'ffmpeg', '-framerate', '8', '-i', 'file%02d.png', '-r', '30', '-pix_fmt', 'yuv420p',\n 'video_name.mp4'\n ])\n\n\nclass appendabledict(defaultdict):\n def __init__(self, type_=list, *args, **kwargs):\n self.type_ = type_\n super().__init__(type_, *args, **kwargs)\n\n # def map_(self, func):\n # for k, v in self.items():\n # self.__setitem__(k, func(v))\n\n def subslice(self, slice_):\n \"\"\"indexes every value in the dict according to a specified slice\n\n Parameters\n ----------\n slice : int or slice type\n An indexing slice , e.g., ``slice(2, 20, 2)`` or ``2``.\n\n\n Returns\n -------\n sliced_dict : dict (not appendabledict type!)\n A dictionary with each value from this object's dictionary, but the value is sliced according to slice_\n e.g. if this dictionary has {a:[1,2,3,4], b:[5,6,7,8]}, then self.subslice(2) returns {a:3,b:7}\n self.subslice(slice(1,3)) returns {a:[2,3], b:[6,7]}\n\n \"\"\"\n sliced_dict = {}\n for k, v in self.items():\n sliced_dict[k] = v[slice_]\n return sliced_dict\n\n def append_update(self, other_dict):\n \"\"\"appends current dict's values with values from other_dict\n\n Parameters\n ----------\n other_dict : dict\n A dictionary that you want to append to this dictionary\n\n\n Returns\n -------\n Nothing. The side effect is this dict's values change\n\n \"\"\"\n for k, v in other_dict.items():\n self.__getitem__(k).append(v)\n\n\n# Thanks Bjarten! (https://github.com/Bjarten/early-stopping-pytorch)\nclass EarlyStopping(object):\n \"\"\"Early stops the training if validation loss doesn't improve after a given patience.\"\"\"\n\n def __init__(self, patience=7, verbose=False, wandb=None, name=\"\"):\n \"\"\"\n Args:\n patience (int): How long to wait after last time validation loss improved.\n Default: 7\n verbose (bool): If True, prints a message for each validation loss improvement.\n Default: False\n \"\"\"\n self.patience = patience\n self.verbose = verbose\n self.counter = 0\n self.best_score = None\n self.early_stop = False\n self.val_acc_max = 0.\n self.name = name\n self.wandb = wandb\n\n def __call__(self, val_acc, model):\n\n score = val_acc\n\n if self.best_score is None:\n self.best_score = score\n self.save_checkpoint(val_acc, model)\n elif score <= self.best_score:\n self.counter += 1\n print(f'EarlyStopping for {self.name} counter: {self.counter} out of {self.patience}')\n if self.counter >= self.patience:\n self.early_stop = True\n print(f'{self.name} has stopped')\n\n else:\n self.best_score = score\n self.save_checkpoint(val_acc, model)\n self.counter = 0\n\n def save_checkpoint(self, val_acc, model):\n '''Saves model when validation loss decrease.'''\n if self.verbose:\n print(\n f'Validation accuracy increased for {self.name} ({self.val_acc_max:.6f} --> {val_acc:.6f}). Saving model ...')\n\n save_dir = self.wandb.run.dir\n torch.save(model.state_dict(), save_dir + \"/\" + self.name + \".pt\")\n self.val_acc_max = val_acc\n\n\nclass Cutout(object):\n \"\"\"Randomly mask out one or more patches from an image.\n Args:\n n_holes (int): Number of patches to cut out of each image.\n length (int): The length (in pixels) of each square patch.\n \"\"\"\n\n def __init__(self, n_holes, length):\n self.n_holes = n_holes\n self.length = length\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (Tensor): Tensor image of size (C, H, W).\n Returns:\n Tensor: Image with n_holes of dimension length x length cut out of it.\n \"\"\"\n h = img.size(1)\n w = img.size(2)\n\n mask = np.ones((h, w), np.float32)\n\n for n in range(self.n_holes):\n y = np.random.randint(h)\n x = np.random.randint(w)\n\n y1 = np.clip(y - self.length // 2, 0, h)\n y2 = np.clip(y + self.length // 2, 0, h)\n x1 = np.clip(x - self.length // 2, 0, w)\n x2 = np.clip(x + self.length // 2, 0, w)\n\n mask[y1: y2, x1: x2] = 0.\n\n mask = torch.from_numpy(mask)\n mask = mask.expand_as(img)\n img = img * mask\n\n return img\n" ]
[ [ "numpy.random.seed", "torch.zeros", "numpy.clip", "torch.manual_seed", "torch.eq", "torch.from_numpy", "numpy.ones", "torch.FloatTensor", "torch.no_grad", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "numpy.mean", "numpy.random.randint", "sklearn.metrics.f1_score", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
crmauceri/dataset_loaders
[ "542401b1a9a1ca98a15b4e529a3e78f9d0f91f52" ]
[ "dataloaders/plots.py" ]
[ "from PIL import Image, ImageDraw, ImageFont\nimport torch, random\nfrom dataloaders.utils import xywh2xyxy\n\n## Modified from https://github.com/ultralytics/yolov5/utils/plots.py under GNU License\ndef plot_bboxes(images, targets, fname='images.jpg', names=None, max_size=640, max_subplots=16, colors=None):\n # Plot image grid with labels\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n\n # un-normalise\n if np.max(images[0]) <= 1:\n images *= 255\n\n tl = 3 # line thickness\n tf = max(tl - 1, 1) # font thickness\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n\n # Check if we should resize\n scale_factor = max_size / max(h, w)\n if scale_factor < 1:\n h = math.ceil(scale_factor * h)\n w = math.ceil(scale_factor * w)\n\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.float32) # init\n for i, sample in enumerate(zip(images, targets)):\n img, image_targets = sample\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n\n block_x = int(w * (i // ns))\n block_y = int(h * (i % ns))\n\n img = Image.fromarray(img.transpose(1, 2, 0)[:, :, :3].astype(np.uint8))\n if scale_factor < 1:\n img = img.resize((w, h))\n\n if image_targets.shape[0] > 0:\n boxes = xywh2xyxy(image_targets[:, 1:5]).T\n classes = image_targets[:, 0].astype('int')\n labels = image_targets.shape[1] == 5 # labels if no conf column\n conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)\n\n if boxes.shape[1]:\n if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n boxes[[0, 2]] *= w # scale to pixels\n boxes[[1, 3]] *= h\n elif scale_factor < 1: # absolute coords need scale if image scales\n boxes *= scale_factor\n boxes[[0, 2]] += block_x\n boxes[[1, 3]] += block_y\n for j, box in enumerate(boxes.T):\n cls = int(classes[j])\n if colors is not None:\n color = colors[cls % len(colors)]\n else:\n color = None\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])\n plot_one_box(box, img, label=label, color=color, line_thickness=tl)\n\n # Image border\n draw = ImageDraw.Draw(img)\n draw.rectangle([(0, 0), img.size], outline=(255, 255, 255), width=3)\n mosaic[block_y:block_y + h, block_x:block_x + w, :] = np.array(img)\n\n if fname:\n r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size\n mosaic_img = Image.fromarray(mosaic[:, :, :3].astype(np.uint8))\n mosaic_img.resize((int(ns * w * r), int(ns * h * r)))\n mosaic_img.save(fname) # PIL save\n return mosaic\n\ndef plot_one_box(x, img, color=None, label=None, line_thickness=None):\n # Plots one bounding box on image img\n tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness\n color = color or [random.randint(0, 255) for _ in range(3)]\n c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))\n draw = ImageDraw.Draw(img)\n draw.rectangle([c1, c2], outline=color, width=tl) # [(x0, y0), (x1, y1)]\n if label:\n tf = max(tl - 1, 1) # font thickness\n fnt = ImageFont.truetype(\"/Library/Fonts/Arial.ttf\") #, tl / 3)\n t_size = fnt.getsize(label)\n c2 = c1[0] + t_size[0]*1.5, c1[1] - t_size[1] - 3\n draw.rectangle([c1, c2], fill=color) # filled\n draw.text((c1[0], c1[1] - t_size[1]), label, fnt=fnt)\n\nif __name__ == '__main__':\n from dataloaders import make_dataset\n from torch.utils.data import DataLoader\n import matplotlib.pyplot as plt\n import numpy as np\n import math\n from dataloaders.config.defaults import get_cfg_defaults\n\n cfg = get_cfg_defaults()\n cfg.merge_from_file('configs/sunrgbd.yaml')\n cfg.merge_from_list(['DATASET.ANNOTATION_TYPE', 'bbox',\n 'DATASET.NO_TRANSFORMS', True,\n 'TRAIN.BATCH_SIZE', 1])\n\n # Same as main method of dataloaders.datasets.coco\n val = make_dataset(cfg, split='val')\n dataloader = DataLoader(val, batch_size=16, shuffle=False, num_workers=0)\n names = [x.replace('_', ' ') for x in val.loader.class_names]\n\n for ii, sample in enumerate(dataloader):\n mosaic = plot_bboxes(sample[\"image\"], sample[\"label\"], names=names)\n plt.figure()\n plt.imshow(mosaic.astype(np.uint8))\n plt.show()\n break" ]
[ [ "torch.utils.data.DataLoader", "numpy.ceil", "numpy.max", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Loptt/pong-autoencoder
[ "c5123bb2466f644f7513d807044e11a4a17aae22" ]
[ "latent_tests/generate_latent_dataset.py" ]
[ "import os\r\nos.add_dll_directory(\r\n \"C:\\\\Program Files\\\\NVIDIA GPU Computing Toolkit\\\\CUDA\\\\v11.4\\\\bin\")\r\n\r\nimport tensorflow as tf\r\nfrom keras.preprocessing.image import load_img\r\nfrom keras.preprocessing.image import img_to_array\r\nfrom keras.models import load_model\r\nimport matplotlib.pyplot as plt\r\nimport pickle\r\n\r\ndef load_images(path):\r\n files = os.listdir(path)\r\n images = []\r\n\r\n for f in files:\r\n print(\"Loading \" + path + f)\r\n img = load_img(path + f, color_mode='grayscale')\r\n img_arr = img_to_array(img)\r\n images.append(img_arr)\r\n \r\n return tf.constant(images)\r\n\r\ndef find_balls(data):\r\n # Assuming data is tensor shape (batch_size, height, width , 1)\r\n y = tf.math.reduce_max(tf.math.argmax(data, axis=1), axis=1)\r\n x = tf.math.reduce_max(tf.math.argmax(data, axis=2), axis=1)\r\n return tf.concat([x, y], axis=1)\r\n\r\ndef find_paddles(data):\r\n # Assuming data is tensor shape (batch_size, height, width , 1)\r\n left_paddle = data[:, :, :10]\r\n right_paddle = data[:, :, 10:]\r\n\r\n left_y = tf.math.reduce_max(tf.math.argmax(left_paddle, axis=1), axis=1)\r\n left_x = tf.math.reduce_max(tf.math.argmax(left_paddle, axis=2), axis=1)\r\n\r\n right_y = tf.math.reduce_max(tf.math.argmax(right_paddle, axis=1), axis=1)\r\n right_x = tf.math.reduce_max(tf.math.argmax(right_paddle, axis=2), axis=1)\r\n\r\n return tf.concat([left_x, left_y, right_x, right_y], axis=1)\r\n\r\nif __name__ == '__main__':\r\n imgs_paddle = load_images(\"./images_ballless/\")\r\n imgs_ball = load_images(\"./images_paddleless_big/\")\r\n\r\n model_paddle = load_model(\"./prod_models/vae_ballless\")\r\n model_ball = load_model(\"./prod_models/vae_big_paddleless\")\r\n\r\n latents_paddle = tf.constant(model_paddle.encoder.predict(imgs_paddle)[2])\r\n latents_ball = tf.constant(model_ball.encoder.predict(imgs_ball)[2])\r\n\r\n paddles_loc = find_paddles(imgs_paddle)\r\n balls_loc = find_balls(imgs_ball)\r\n\r\n pickle.dump((latents_paddle, paddles_loc), open('./latent_tests/paddle_latent_ds.p', 'wb'))\r\n pickle.dump((latents_ball, balls_loc), open('./latent_tests/balls_latent_ds.p', 'wb'))\r\n\r\n \r\n" ]
[ [ "tensorflow.math.argmax", "tensorflow.concat", "tensorflow.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] } ]
JosephineRabbit/TAL_attention
[ "d04ef6ecf4b67a41e4c672f39e811327faf31951" ]
[ "gtad_GA_NL_4_train.py" ]
[ "import os\nimport torch\nimport torch.nn.parallel\nimport torch.optim as optim\nfrom torch import autograd\nimport numpy as np\n\nfrom gtad_lib import opts\nfrom gtad_lib.model_GA_NL_4 import GTAD\nfrom gtad_lib.dataset import VideoDataSet\nfrom gtad_lib.loss_function import get_mask, gtad_loss_func # subgraph_loss_func, node_loss_func\n\n################## fix everything ##################\nimport random\n\nseed = 0\nrandom.seed(seed)\nnp.random.seed(seed)\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed(seed)\ntorch.cuda.manual_seed_all(seed)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\n\n#######################################################\ndef load_checkpoint(model, checkpoint):\n\n print(\"loading checkpoint...\")\n model_dict = model.state_dict()\n modelCheckpoint = torch.load(checkpoint)\n pretrained_dict = modelCheckpoint['state_dict']\n # 过滤操作\n new_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict.keys()}\n model_dict.update(new_dict)\n # 打印出来,更新了多少的参数\n print('Total : {}, update: {}'.format(len(pretrained_dict), len(new_dict)))\n model.load_state_dict(model_dict)\n print(\"loaded finished!\")\n\n return model\n\n# keep track of statistics\nclass AverageMeter(object):\n def __init__(self):\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.sum += val\n self.count += n\n\n def avg(self):\n return self.sum / self.count\n\n\ndef get_mem_usage():\n GB = 1024.0 ** 3\n output = [\"device_%d = %.03fGB\" % (device, torch.cuda.max_memory_allocated(torch.device('cuda:%d' % device)) / GB)\n for device in range(opt['n_gpu'])]\n return ' '.join(output)[:-1]\n\n\n# train\ndef train(data_loader, model, optimizer, epoch, bm_mask):\n model.train()\n total_am, subgraph_am, node_am = AverageMeter(), AverageMeter(), AverageMeter()\n for n_iter, (input_data, label_confidence, label_start, label_end) in enumerate(data_loader):\n # forward pass\n confidence_map, start, end = model(input_data.cuda())\n # loss\n loss = gtad_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask.cuda())\n\n # update step\n optimizer.zero_grad()\n loss[0].backward()\n # torch.nn.utils.clip_grad_norm_(model.parameters(), 1)\n optimizer.step()\n\n # update losses\n total_am.update(loss[0].detach())\n subgraph_am.update(loss[1].detach())\n node_am.update(loss[2].detach())\n\n print(\"[Epoch {0:03d}]\\tLoss {1:.2f} = {2:.2f} + {3:.2f} (train)\".format(\n epoch, total_am.avg(), subgraph_am.avg(), node_am.avg()))\n\n\ndef test(data_loader, model, epoch, bm_mask, best_loss):\n model.eval()\n total_am, subgraph_am, node_am = AverageMeter(), AverageMeter(), AverageMeter()\n with torch.no_grad():\n for n_iter, (input_data, label_confidence, label_start, label_end) in enumerate(data_loader):\n # forward pass\n confidence_map, start, end = model(input_data.cuda())\n # loss\n # gt_iou_map = label_confidence.cuda() * bm_mask\n loss = gtad_loss_func(confidence_map, start, end, label_confidence, label_start, label_end, bm_mask.cuda())\n\n # update losses\n total_am.update(loss[0].detach())\n subgraph_am.update(loss[1].detach())\n node_am.update(loss[2].detach())\n\n print(\"[Epoch {0:03d}]\\tLoss {1:.2f} = {2:.2f} + {3:.2f} (validation)\".format(\n epoch, total_am.avg(), subgraph_am.avg(), node_am.avg()))\n\n state = {'epoch': epoch + 1,\n 'state_dict': model.state_dict()}\n torch.save(state, opt[\"output\"] + \"/GTAD_checkpoint_\"+str(epoch)+\".pth.tar\")\n if total_am.avg() < best_loss:\n\n best_loss = total_am.avg()\n torch.save(state, opt[\"output\"] + \"/GTAD_best.pth.tar\")\n\n\n return best_loss\n\n\nif __name__ == '__main__':\n opt = opts.parse_opt()\n opt = vars(opt)\n opt['output'] = './GA_NL_4_output'\n if not os.path.exists(opt[\"output\"]):\n os.makedirs(opt[\"output\"])\n\n # model = GTAD(opt)\n # a = torch.randn(1, 400, 100)\n # b, c = model(a)\n # print(b.shape, c.shape)\n # print(b)\n # print(c)\n\n centers = torch.load('./centers/512_200_centers.rar')\n centers = centers[:, :400]\n device = torch.device(\"cuda:0,1\")\n centers = centers.to(device)\n\n model = GTAD(opt, centers)\n\n policies = model.get_optim_policies()\n\n model = torch.nn.DataParallel(model, device_ids=list(range(opt['n_gpu']))).cuda()\n #path = './output/GTAD_best.pth.tar'\n #model = load_checkpoint(model,path)\n print('use {} gpus to train!'.format(opt['n_gpu']))\n\n optimizer = optim.Adam(policies, lr=opt[\"training_lr\"],\n weight_decay=opt[\"weight_decay\"])\n train_loader = torch.utils.data.DataLoader(VideoDataSet(opt, subset=\"train\"),\n batch_size=opt[\"batch_size\"], shuffle=True,\n num_workers=8, pin_memory=True)\n\n test_loader = torch.utils.data.DataLoader(VideoDataSet(opt, subset=\"validation\"),\n batch_size=opt[\"batch_size\"], shuffle=False,\n num_workers=8, pin_memory=True)\n\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opt[\"step_size\"], gamma=opt[\"step_gamma\"])\n mask = get_mask(opt[\"temporal_scale\"])\n best_loss = 1e10\n\n for epoch in range(opt[\"train_epochs\"]):\n with autograd.detect_anomaly():\n train(train_loader, model, optimizer, epoch, mask)\n best_loss = test(test_loader, model, epoch, mask, best_loss)\n scheduler.step()\n\n\n\n" ]
[ [ "torch.optim.Adam", "torch.cuda.manual_seed", "torch.load", "numpy.random.seed", "torch.manual_seed", "torch.no_grad", "torch.save", "torch.cuda.manual_seed_all", "torch.device", "torch.autograd.detect_anomaly", "torch.optim.lr_scheduler.StepLR" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
L-Zhe/SDISS
[ "0ee3f8614f4d92a3c916d04457784dbdffac8a06" ]
[ "generate.py" ]
[ "import torch\nimport Constants\nfrom Preprocess import translate2word, restore2Ori\nfrom Parameter import device, maxLen\n\ndef generator(model, testData, SrcOriPath, index2word, DictPath, search_method, beam_size):\n model.eval()\n translate = lambda outputs: [translate2word(seq, index2word) for seq in outputs]\n outputs = []\n attn_weight = []\n with torch.no_grad():\n for data in testData:\n if len(data) == 3:\n source, graph, rgraph = data\n else:\n source, graph, rgraph, _, _ = data\n source = source.to(device)\n lentok = torch.LongTensor(source.size(0), 1).fill_(Constants.MIDDLE).to(device)\n source = torch.cat((lentok, source), dim=-1)\n graph = graph.to(device)\n rgraph = rgraph.to(device)\n sentence, attn = model.predict(source, graph, rgraph, maxLen,\n Constants.PAD, Constants.BOS, Constants.EOS,\n search_method=search_method, beam_size=beam_size)\n outputs.extend(translate(sentence))\n attn_weight.extend(attn)\n outputs = restore2Ori(outputs, DictPath)\n return replaceUNK(SrcOriPath, outputs, attn_weight)\n\n\ndef replaceUNK(srcPath, sentence, attn):\n with open(srcPath, 'r') as f:\n source = [[word.lower() for word in line.split()] for line in f.readlines()]\n for i in range(len(sentence)):\n for j in range(len(sentence[i])):\n if sentence[i][j] == Constants.UNK_WORD:\n sentence[i][j] = source[i][attn[i][j] - 1]\n return sentence\n\n\nif __name__ == '__main__':\n pass" ]
[ [ "torch.no_grad", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tpopordanoska/confidence-intervals
[ "6b9325093fdfd9dd903a56d6832e472427a4a24f" ]
[ "cieg/experiments/methods/pmatrix.py" ]
[ "import os\n\nimport numpy as np\nimport torch\n\n\ndef pmatrix_bounds(eigvals_lower, eigvals_upper, eigvects_lower, eigvects_upper, sigma, eig):\n inv_eigvals_lower, inv_eigvals_upper = get_inverse_eigenvalue_bounds(eigvals_lower, eigvals_upper)\n p = eig.eigenvalues.size()[0]\n lower_bound, upper_bound = get_pmatrix_bounds(sigma,\n eigvects_lower,\n eigvects_upper,\n inv_eigvals_upper,\n inv_eigvals_lower,\n p)\n\n non_zero_precision = (lower_bound > 0) + (upper_bound < 0) > 0\n\n return lower_bound, upper_bound, non_zero_precision\n\n\ndef get_inverse_eigenvalue_bounds(lambdas_lower, lambdas_upper):\n inv_lambdas_lower = 1.0 / lambdas_upper\n inv_lambdas_upper = 1.0 / lambdas_lower\n\n return inv_lambdas_lower, inv_lambdas_upper\n\n\ndef get_pmatrix_bounds(sigma, v_lower, v_upper, inv_lambdas_upper, inv_lambdas_lower, p):\n # initialize to zero\n lower_bound = torch.zeros_like(sigma)\n upper_bound = torch.zeros_like(sigma)\n # Now reconstruct the bounds on the precision matrix using interval arithmetic over the matrix multiplication of\n # V*Sig^{-1}*V^T. Doing it naively for now, vectorize it later\n for i in range(p):\n for j in range(p):\n for k in range(p):\n updated_this_round = False\n # CHECK IF ANY OF THE LOWER OR UPPER BOUNDS ARE ZERO! HAVEN'T IMPLEMENTED THESE CASES\n if (v_lower[i, k] == 0) + (v_upper[i, k] == 0) + (v_lower[j, k] == 0) + (v_upper[j, k] == 0):\n print(\"oh no, some value is zero\")\n print(v_lower[i, k])\n print(v_upper[i, k])\n print(v_lower[j, k])\n print(v_upper[j, k])\n continue\n # assert False # die here, fix code for these cases rather than return false result\n # case 1a: i lower is positive; j lower is positive\n if (v_lower[i, k] > 0) * (v_lower[j, k] > 0):\n lower_bound[i, j] += v_lower[i, k] * inv_lambdas_lower[k] * v_lower[j, k]\n upper_bound[i, j] += v_upper[i, k] * inv_lambdas_upper[k] * v_upper[j, k]\n updated_this_round = True\n # case 2a: i lower is negative, upper is positive; j lower is positive\n if (v_lower[i, k] < 0) * (v_upper[i, k] > 0) * (v_lower[j, k] > 0):\n lower_bound[i, j] += v_lower[i, k] * inv_lambdas_upper[k] * v_upper[j, k]\n upper_bound[i, j] += v_upper[i, k] * inv_lambdas_upper[k] * v_upper[j, k]\n updated_this_round = True\n # case 3a: i upper is negative; j lower is positive\n if (v_upper[i, k] < 0) * (v_lower[j, k] > 0):\n lower_bound[i, j] += v_lower[i, k] * inv_lambdas_upper[k] * v_upper[j, k]\n upper_bound[i, j] += v_upper[i, k] * inv_lambdas_lower[k] * v_lower[j, k]\n updated_this_round = True\n\n # case 1b: i lower and upper are positive; j lower is negative, upper is positive\n if (v_lower[i, k] > 0) * (v_lower[j, k] < 0) * (v_upper[j, k] > 0):\n lower_bound[i, j] += v_upper[i, k] * inv_lambdas_upper[k] * v_lower[j, k]\n upper_bound[i, j] += v_upper[i, k] * inv_lambdas_upper[k] * v_upper[j, k]\n updated_this_round = True\n # case 2b: i lower is negative, upper is positive; j lower is negative, upper is positive\n # this is the complicated one where there are a couple possibilities\n if (v_lower[i, k] < 0) * (v_upper[i, k] > 0) * (v_lower[j, k] < 0) * (v_upper[j, k] > 0):\n # Lower bound will be negative (unless i==j), and there are two possibilities for this\n tmp = v_lower[i, k] * inv_lambdas_upper[k] * v_upper[j, k]\n if tmp > v_upper[i, k] * inv_lambdas_upper[k] * v_lower[j, k]:\n tmp = v_upper[i, k] * inv_lambdas_upper[k] * v_lower[j, k]\n if i == j:\n # in this case, the minimum is actually zero as we are on the diagonal\n if tmp < 0: # this condition should always hold, but just being explicit\n tmp = 0\n lower_bound[i, j] += tmp\n # Upper bound will be positive, and there are two possibilities for this\n tmp = v_lower[i, k] * inv_lambdas_upper[k] * v_lower[j, k]\n if tmp < v_upper[i, k] * inv_lambdas_upper[k] * v_upper[j, k]:\n tmp = v_upper[i, k] * inv_lambdas_upper[k] * v_upper[j, k]\n upper_bound[i, j] += tmp\n updated_this_round = True\n # case 3b: i lower and upper are negative; j lower is negative, upper is positive\n if (v_upper[i, k] < 0) * (v_lower[j, k]<0) * (v_upper[j, k] > 0):\n lower_bound[i, j] += v_lower[i, k] * inv_lambdas_upper[k] * v_upper[j, k]\n upper_bound[i, j] += v_lower[i, k] * inv_lambdas_upper[k] * v_lower[j, k]\n updated_this_round = True\n\n # case 1c: i lower and upper are positive; j lower and upper are negative\n if (v_lower[i, k]>0) * (v_upper[j, k]<0):\n lower_bound[i, j] += v_upper[i, k] * inv_lambdas_upper[k] * v_lower[j, k]\n upper_bound[i, j] += v_lower[i, k] * inv_lambdas_lower[k] * v_upper[j, k]\n updated_this_round = True\n # case 2c: i lower is negative, upper is positive; j lower and upper are negative\n if (v_lower[i, k] < 0) * (v_upper[i, k] > 0) * (v_upper[j, k] < 0):\n lower_bound[i, j] += v_upper[i, k] * inv_lambdas_upper[k] * v_lower[j, k]\n upper_bound[i, j] += v_lower[i, k] * inv_lambdas_upper[k] * v_lower[j, k]\n updated_this_round = True\n # case 3c: i lower and upper are negative; j lower and upper are negative\n if (v_upper[i, k] < 0) * (v_upper[j, k] < 0):\n lower_bound[i, j] += v_upper[i, k] * inv_lambdas_lower[k] * v_upper[j, k]\n upper_bound[i, j] += v_lower[i, k] * inv_lambdas_upper[k] * v_lower[j, k]\n updated_this_round = True\n\n if not updated_this_round:\n print(i)\n print(j)\n print(k)\n print(v_lower[i, k])\n print(v_upper[i, k])\n print(v_lower[j, k])\n print(v_upper[j, k])\n print(inv_lambdas_lower[k])\n print(inv_lambdas_upper[k])\n assert False # should have hit at least one of the cases!\n\n return lower_bound, upper_bound\n\n\ndef print_pmatrix_bounds(lower, upper, prec_emp):\n print(\"Verifying bounds\")\n print(f\"Prec. matrix lower: {lower}\")\n print(f\"Prec. matrix: {prec_emp}\")\n print(f\"Prec. matrix upper: {upper}\")\n\n\ndef check_pmatrix_bounds(lower, upper, prec_emp):\n print(\"Verifying bounds\")\n assert torch.all(lower.lt(prec_emp))\n assert torch.all(upper.gt(prec_emp))\n\n # upper bounds and lower bounds should bound the empirical precision matrix\n assert torch.min(upper - prec_emp) >= 0\n assert torch.min(prec_emp - lower) >= 0\n\n\ndef save_pmatrix_bounds(lower, upper, prec_emp, path, method):\n np.save(os.path.join(path, f\"pm_lower_{method}.npy\"), lower)\n np.save(os.path.join(path, f\"pm_upper_{method}.npy\"), upper)\n np.save(os.path.join(path, f\"pm_emp_{method}.npy\"), prec_emp)\n\n\ndef save_eig_bounds(eigvals_lower, eigvals_upper, eigvects_lower, eigvects_upper, eig, path, method):\n np.save(os.path.join(path, f\"eigvals_lower_{method}.npy\"), eigvals_lower)\n np.save(os.path.join(path, f\"eigvals_upper_{method}.npy\"), eigvals_upper)\n np.save(os.path.join(path, f\"eigvals_emp_{method}.npy\"), eig.eigenvalues)\n\n np.save(os.path.join(path, f\"eigvects_lower_{method}.npy\"), eigvects_lower)\n np.save(os.path.join(path, f\"eigvects_upper_{method}.npy\"), eigvects_upper)\n np.save(os.path.join(path, f\"eigvects_emp_{method}.npy\"), eig.eigenvectors)\n\n\ndef load_pmatrix_bounds(path, method):\n lower = np.load(os.path.join(path, f\"pm_lower_{method}.npy\"))\n upper = np.load(os.path.join(path, f\"pm_upper_{method}.npy\"))\n emp = np.load(os.path.join(path, f\"pm_emp_{method}.npy\"))\n\n return lower, upper, emp\n\n\ndef load_eig_bounds(path, method):\n eigvals_lower = np.load(os.path.join(path, f\"eigvals_lower_{method}.npy\"))\n eigvals_upper = np.load(os.path.join(path, f\"eigvals_upper_{method}.npy\"))\n eigvals_emp = np.load(os.path.join(path, f\"eigvals_emp_{method}.npy\"))\n\n eigvects_lower = np.load(os.path.join(path, f\"eigvects_lower_{method}.npy\"))\n eigvects_upper = np.load(os.path.join(path, f\"eigvects_upper_{method}.npy\"))\n eigvects_emp = np.load(os.path.join(path, f\"eigvects_emp_{method}.npy\"))\n\n return eigvals_lower, eigvals_upper, eigvals_emp, eigvects_lower, eigvects_upper, eigvects_emp\n" ]
[ [ "torch.min", "torch.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tranduytrung/rgbd-classification
[ "156d829200c4592867af5ade85e8642afbe8580e", "156d829200c4592867af5ade85e8642afbe8580e" ]
[ "train.py", "models/rgbnet.py" ]
[ "import os, json, glob, copy, argparse, shutil\nimport torch\nimport torchvision\nimport numpy as np\nimport PIL.Image\nfrom datetime import datetime\nfrom config import get_train_config\nfrom models import DepthNet, RGBNet, RGBDNet\nimport augmentation\nimport loaders\nfrom tensorboardX import SummaryWriter\nfrom utils import load_last\n\n\ndef save_acc_hist(acc_hist, ckpt_root):\n np.save(f'{ckpt_root}/acc_hist.npy', acc_hist)\n\n\ndef train_model(model, data_loader, criterion, optimizer, scheduler, cfg, resume=True):\n best_acc = 0.0\n best_epoch = 0\n max_epoch = cfg['max_epoch']\n ckpt_root = cfg['ckpt_root']\n batch_size = cfg['batch_size']\n\n summary = SummaryWriter(ckpt_root)\n\n if resume:\n model, last_epoch, val_acc_hist = load_last(model, ckpt_root)\n if len(val_acc_hist) > 0:\n arg_best_acc = np.argmax(val_acc_hist)\n best_epoch = arg_best_acc + 1\n best_acc = val_acc_hist[arg_best_acc]\n for _ in range(last_epoch):\n scheduler.step()\n\n start_epoch = last_epoch + 1\n\n for epoch in range(start_epoch, max_epoch + 1):\n\n print('-' * 60)\n print('Epoch: {} / {}'.format(epoch, max_epoch))\n print('-' * 60)\n\n for phrase in ['train', 'val']:\n\n if phrase == 'train':\n scheduler.step()\n model.train()\n else:\n model.eval()\n\n running_loss = 0.0\n running_corrects = 0\n\n dataset_size = len(data_loader[phrase].dataset)\n total_steps = int(dataset_size / batch_size)\n\n for i, (images, targets) in enumerate(data_loader[phrase]):\n optimizer.zero_grad()\n\n if torch.cuda.is_available():\n images = images.cuda()\n targets = targets.cuda()\n\n with torch.set_grad_enabled(phrase == 'train'):\n outputs = model(images)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, targets)\n\n if phrase == 'train':\n loss.backward()\n optimizer.step()\n\n batch_loss = loss.item()\n batch_correct = torch.sum(preds == targets.data).item()\n batch_acc = batch_correct / batch_size\n running_loss += batch_loss * batch_size\n running_corrects += batch_correct\n\n print(\n f'{datetime.now()} {phrase} epoch={epoch}/{max_epoch} step={i}/{total_steps} loss={batch_loss:.4f} acc={batch_acc:.4f}')\n summary.add_scalar(\n f'{phrase}/batch/loss', batch_loss, global_step=(epoch - 1)*total_steps + i)\n\n epoch_loss = running_loss / dataset_size\n epoch_acc = running_corrects / dataset_size\n\n if phrase == 'train':\n print('{} {} Loss: {:.4f} Acc: {:.4f}'.format(\n datetime.now(), phrase, epoch_loss, epoch_acc))\n print('================================')\n\n if phrase == 'val':\n val_acc_hist.append(epoch_acc)\n save_acc_hist(val_acc_hist, ckpt_root)\n if epoch_acc > best_acc:\n best_acc = epoch_acc\n best_epoch = epoch\n\n filename = os.path.join(ckpt_root, f'{epoch:04d}.pkl')\n torch.save(copy.deepcopy(model.state_dict()), filename)\n\n print(\n f'{phrase} epoch={epoch} loss={epoch_loss:.4f} acc={epoch_acc:.4f}')\n\n summary.add_scalar(f'{phrase}/epoch/accuracy',\n epoch_acc, global_step=epoch*total_steps)\n summary.add_scalar(f'{phrase}/epoch/loss',\n epoch_loss, global_step=epoch*total_steps)\n\n summary.close()\n return best_epoch, best_acc\n\n\ndef train_depth(cfg):\n imagenet_transform_train = torchvision.transforms.Compose([\n augmentation.CropAndResize((224, 224), scale=(0.4, 1.0)),\n torchvision.transforms.ToTensor(),\n augmentation.DepthTranslate(minmax=(0, .8)),\n augmentation.GaussianNoise(std=0.005),\n augmentation.DepthUniformNoise(p=0.01, minmax=(0.15, 1.0)),\n augmentation.Clamp((0.15, 1.0)),\n torchvision.transforms.Normalize(mean=[0.575], std=[0.425])\n ])\n\n imagenet_transform_val = torchvision.transforms.Compose([\n augmentation.CenterCrop((224, 224)),\n torchvision.transforms.ToTensor(),\n augmentation.DepthTranslate(minmax=(0, .8)),\n augmentation.GaussianNoise(std=0.005),\n augmentation.DepthUniformNoise(p=0.01, minmax=(0.15, 1.0)),\n augmentation.Clamp((0.15, 1.0)),\n torchvision.transforms.Normalize(mean=[0.575], std=[0.425])\n ])\n\n datasets = {\n \"train\": torchvision.datasets.DatasetFolder(cfg['data_root']['train'], loaders.depth_from_exr, extensions=('depth.exr'), transform=imagenet_transform_train),\n \"val\": torchvision.datasets.DatasetFolder(cfg['data_root']['val'], loaders.depth_from_exr, extensions=('depth.exr'), transform=imagenet_transform_val),\n }\n\n num_workers = cfg['worker']\n data_loader = {\n \"train\": torch.utils.data.DataLoader(datasets['train'], batch_size=cfg['batch_size'],\n num_workers=num_workers, shuffle=True, pin_memory=True, drop_last=True),\n \"val\": torch.utils.data.DataLoader(datasets['val'], batch_size=cfg['batch_size'],\n num_workers=num_workers, shuffle=True, pin_memory=True, drop_last=True),\n }\n\n assert len(datasets['train'].classes) == len(datasets['val'].classes)\n # save classes name\n classes = datasets['train'].classes\n os.makedirs(cfg['ckpt_root'], exist_ok=True)\n with open(os.path.join(cfg['ckpt_root'], 'classes.json'), 'wt') as f:\n json.dump(classes, f)\n\n cfg_depth = cfg['depth']\n cfg_depth['num_classes'] = len(classes)\n model = DepthNet(cfg_depth)\n # enable cuda if available\n if torch.cuda.is_available():\n model = model.cuda()\n\n criterion = torch.nn.CrossEntropyLoss()\n # optimizer = torch.optim.SGD(model.parameters(), lr=cfg['lr'], momentum=cfg['momentum'], weight_decay=cfg['weight_decay'])\n optimizer = torch.optim.Adam(\n model.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])\n # scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg['milestones'], gamma=cfg['gamma'])\n scheduler = torch.optim.lr_scheduler.StepLR(\n optimizer, 10, gamma=cfg['gamma'])\n\n train_model(model, data_loader, criterion,\n optimizer, scheduler, cfg, resume=True)\n\n\ndef train_rgb(cfg):\n imagenet_transform_train = torchvision.transforms.Compose([\n augmentation.GaussianBlur(r=1),\n torchvision.transforms.RandomResizedCrop(224, scale=(0.25, 1.0)),\n torchvision.transforms.ToTensor(),\n augmentation.GaussianNoise(),\n augmentation.Clamp((0.0, 1.0)),\n torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n imagenet_transform_val = torchvision.transforms.Compose([\n augmentation.GaussianBlur(r=1),\n torchvision.transforms.CenterCrop(224),\n torchvision.transforms.ToTensor(),\n augmentation.GaussianNoise(),\n augmentation.Clamp((0.0, 1.0)),\n torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n datasets = {\n \"train\": torchvision.datasets.DatasetFolder(cfg['data_root']['train'], loaders.rgb_from_image, extensions=('rgb.png'), transform=imagenet_transform_train),\n \"val\": torchvision.datasets.DatasetFolder(cfg['data_root']['val'], loaders.rgb_from_image, extensions=('rgb.png'), transform=imagenet_transform_val),\n }\n\n num_workers = cfg['worker']\n data_loader = {\n \"train\": torch.utils.data.DataLoader(datasets['train'], batch_size=cfg['batch_size'],\n num_workers=num_workers, shuffle=True, pin_memory=True, drop_last=True),\n \"val\": torch.utils.data.DataLoader(datasets['val'], batch_size=cfg['batch_size'],\n num_workers=num_workers, shuffle=True, pin_memory=True, drop_last=True),\n }\n\n assert len(datasets['train'].classes) == len(datasets['val'].classes)\n # save classes name\n classes = datasets['train'].classes\n os.makedirs(cfg['ckpt_root'], exist_ok=True)\n with open(os.path.join(cfg['ckpt_root'], 'classes.json'), 'wt') as f:\n json.dump(classes, f)\n\n cfg_rgb = cfg['rgb']\n cfg_rgb['num_classes'] = len(classes)\n model = RGBNet(cfg_rgb)\n # enable cuda if available\n if torch.cuda.is_available():\n model = model.cuda()\n\n criterion = torch.nn.CrossEntropyLoss()\n # optimizer = torch.optim.SGD(model.parameters(), lr=cfg['lr'], momentum=cfg['momentum'], weight_decay=cfg['weight_decay'])\n optimizer = torch.optim.Adam(\n model.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])\n # scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg['milestones'], gamma=cfg['gamma'])\n scheduler = torch.optim.lr_scheduler.StepLR(\n optimizer, 10, gamma=cfg['gamma'])\n\n train_model(model, data_loader, criterion,\n optimizer, scheduler, cfg, resume=True)\n\ndef train_rgbd(cfg):\n datasets = {\n \"train\": loaders.RGBDDataset(cfg['data_root']['train'], loader=loaders.RGBDLoader(mode='train')),\n \"val\": loaders.RGBDDataset(cfg['data_root']['val'], loader=loaders.RGBDLoader(mode='val')),\n }\n\n num_workers = cfg['worker']\n data_loader = {\n \"train\": torch.utils.data.DataLoader(datasets['train'], batch_size=cfg['batch_size'],\n num_workers=num_workers, shuffle=True, pin_memory=True, drop_last=True),\n \"val\": torch.utils.data.DataLoader(datasets['val'], batch_size=cfg['batch_size'],\n num_workers=num_workers, shuffle=True, pin_memory=True, drop_last=True),\n }\n\n assert len(datasets['train'].classes) == len(datasets['val'].classes)\n # save classes name\n classes = datasets['train'].classes\n os.makedirs(cfg['ckpt_root'], exist_ok=True)\n with open(os.path.join(cfg['ckpt_root'], 'classes.json'), 'wt') as f:\n json.dump(classes, f)\n\n cfg_rgbd = cfg['rgbd']\n cfg_rgbd['num_classes'] = len(classes)\n model = RGBDNet(cfg_rgbd)\n # enable cuda if available\n if torch.cuda.is_available():\n model = model.cuda()\n\n criterion = torch.nn.CrossEntropyLoss()\n # optimizer = torch.optim.SGD(model.parameters(), lr=cfg['lr'], momentum=cfg['momentum'], weight_decay=cfg['weight_decay'])\n optimizer = torch.optim.Adam(\n model.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])\n # scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg['milestones'], gamma=cfg['gamma'])\n scheduler = torch.optim.lr_scheduler.StepLR(\n optimizer, 10, gamma=cfg['gamma'])\n\n train_model(model, data_loader, criterion,\n optimizer, scheduler, cfg, resume=True)\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('model', type=str)\n parser.add_argument('path', type=str)\n\n args = parser.parse_args()\n return {\n 'model': args.model,\n 'ckpt_root': args.path\n }\n\n\nif __name__ == \"__main__\":\n arg_cfg = parse_args()\n cfg = get_train_config()\n cfg.update(arg_cfg)\n model = cfg['model']\n if model == 'rgb':\n train_rgb(cfg)\n elif model == 'rgbd':\n train_rgbd(cfg)\n elif model == 'depth':\n train_depth(cfg)\n else:\n print(f'Error: unknown model {model}')\n", "from collections import OrderedDict\nimport torch\nimport torchvision\n\nclass RGBNet(torch.nn.Module):\n def __init__(self, cfg):\n super(RGBNet, self).__init__()\n num_classes = cfg['num_classes']\n pretrained = 'pretrained' in cfg and cfg['pretrained']\n refine = 'refine' in cfg and cfg['refine']\n mobilenet = torchvision.models.mobilenet_v2(pretrained=pretrained)\n \n if pretrained and not refine:\n for parameter in mobilenet.parameters():\n parameter.requires_grad = False\n\n mobilenet.classifier = torch.nn.Sequential(\n torch.nn.Dropout(0.2),\n torch.nn.Linear(mobilenet.last_channel, num_classes),\n )\n\n self.mobilenet = mobilenet\n \n def forward(self, x):\n out = self.mobilenet(x)\n \n return out" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.max", "torch.utils.data.DataLoader", "torch.sum", "numpy.save", "numpy.argmax", "torch.set_grad_enabled", "torch.cuda.is_available", "torch.optim.lr_scheduler.StepLR" ], [ "torch.nn.Linear", "torch.nn.Dropout" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
UpStride/tutorial
[ "cbb8f78c4f70855031ca340b6a18befeded3e7d6" ]
[ "quaternion-transformers/train_and_eval.py" ]
[ "import logging\nimport sys\nimport os\nimport time\nimport datetime\nimport numpy as np\nimport tensorflow as tf\n\nfrom tabulate import tabulate\n\nfrom data_loading.dataset import dataset_factory\nfrom networks.optimization import OptimizationManager, \\\n loss_function, accuracy_function\nfrom networks.network import get_network\nfrom arguments.parser import PARSER\n\n# suppress warnings\nlogging.getLogger('tensorflow').setLevel(logging.ERROR)\n\nargs = PARSER.parse_args()\n\n# Dataset\ndataset = dataset_factory[args.task]()\nvocab_size = dataset.get_vocab_size()\ntrain_batches, test_batches = dataset.get_batched_data(args.batch_size)\n\n# Network\ntransformer = get_network(args, vocab_size)\n\n# Checkpoint management\nexp_name = args.exp_name\ncheckpoint_path = f\"./checkpoints/{dataset.get_dataset_name()}/{exp_name}\"\nif not os.path.exists(checkpoint_path):\n os.makedirs(checkpoint_path)\nckpt = tf.train.Checkpoint(transformer=transformer)\nckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)\n\n# if a checkpoint exists, restore the latest checkpoint.\nif ckpt_manager.latest_checkpoint and args.from_pretrained:\n ckpt.restore(ckpt_manager.latest_checkpoint)\n print ('Latest checkpoint restored!!')\n\n# Tensorboard management\ntrain_loss = tf.keras.metrics.Mean(name='train_loss')\ntrain_accuracy = tf.keras.metrics.Mean(name='train_accuracy')\ntest_loss = tf.keras.metrics.Mean(name='test_loss')\ntest_accuracy = tf.keras.metrics.Mean(name='test_accuracy')\n\ncurrent_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ntrain_log_dir = 'logs/' + exp_name + '/' + current_time + '/train'\ntest_log_dir = 'logs/' + exp_name + '/' + current_time + '/test'\ntrain_summary_writer = tf.summary.create_file_writer(train_log_dir)\ntest_summary_writer = tf.summary.create_file_writer(test_log_dir)\n\n# Optimization management\nopt_manager = OptimizationManager(args.task, transformer, args.d_model,\n train_loss, train_accuracy,\n test_loss, test_accuracy)\ntrain_step = opt_manager.get_train_step()\ntest_step = opt_manager.get_test_step()\n\nfor epoch in range(args.epochs):\n start = time.time()\n\n train_loss.reset_states()\n train_accuracy.reset_states()\n test_loss.reset_states()\n test_accuracy.reset_states()\n\n for (batch, (inp, tar)) in enumerate(train_batches):\n train_step(inp, tar)\n\n if epoch == 0 and batch == 0:\n weights = transformer.trainable_weights\n tab_list = [[v.name, v.get_shape(), np.prod(v.get_shape())] for v in weights]\n n_params = np.sum([tl[2] for tl in tab_list])\n n_params_no_embed = np.sum([tl[2]\n for tl in tab_list[:-2]\n if ('embedding' not in tl[0])])\n print(tabulate(tab_list, headers=['Name', 'Shape', 'Params']))\n print(f\"Number of trainable parameters: {n_params}\")\n print(\"Number of trainable parameters w/o the embedding layers\" +\n f\" and w/o the final dense layer: {n_params_no_embed}\")\n del weights\n\n if batch % 50 == 0:\n print(f'Epoch {epoch + 1} Batch {batch} \\\n Loss {train_loss.result():.4f} \\\n Accuracy {train_accuracy.result():.4f}')\n\n # Log training metric on tensorboard\n with train_summary_writer.as_default():\n tf.summary.scalar('loss', train_loss.result(), step=epoch)\n tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)\n\n # Evaluation\n for inp, tar in test_batches:\n test_step(inp, tar)\n print(f'--- Test Metrics: Epoch {epoch + 1} \\\n Loss {test_loss.result():.4f} \\\n Accuracy {test_accuracy.result():.4f} ---')\n\n with test_summary_writer.as_default():\n tf.summary.scalar('loss', test_loss.result(), step=epoch)\n tf.summary.scalar('accuracy', test_accuracy.result(), step=epoch)\n\n if (epoch + 1) % 5 == 0:\n ckpt_save_path = ckpt_manager.save()\n print (f'Saving checkpoint for epoch {epoch+1} at {ckpt_save_path}')\n\n print(f'Epoch {epoch + 1} Loss {train_loss.result():.4f} \\\n Accuracy {train_accuracy.result():.4f}')\n\n print(f'Time taken for 1 epoch: {time.time() - start:.2f} secs\\n')\n" ]
[ [ "tensorflow.train.CheckpointManager", "tensorflow.train.Checkpoint", "numpy.sum", "tensorflow.keras.metrics.Mean", "tensorflow.summary.create_file_writer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
marchirschvogel/ambit
[ "9c21852d2c7c562b7accdd34025fc6b829eb1d3e", "9c21852d2c7c562b7accdd34025fc6b829eb1d3e" ]
[ "modules/flow0d/cardiovascular0D.py", "testing/solid_flow0d_monolithicdirect2field_flux_syspulcap_3Dheart_iterative.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) 2019-2022, Dr.-Ing. Marc Hirschvogel\n# All rights reserved.\n\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport sys, time\nimport numpy as np\nimport sympy as sp\n\nfrom mpiroutines import allgather_vec_entry\nfrom oderoutines import ode\n\n\nclass cardiovascular0Dbase(ode):\n \n def __init__(self, init=True, comm=None):\n \n # initialize base class\n ode.__init__(self, init=init, comm=comm)\n \n self.T_cycl = 0 # duration of one cardiac cycle (gets overridden by derived syspul* classes)\n\n \n # check for cardiac cycle periodicity \n def cycle_check(self, var, varTc, varTc_old, t, cycle, cyclerr, eps_periodic, check='allvar', inioutpath=None, nm='', induce_pert_after_cycl=-1):\n \n if isinstance(varTc, np.ndarray): vs, ve = 0, len(varTc)\n else: vs, ve = var.getOwnershipRange()\n\n is_periodic = False\n \n if self.T_cycl > 0. and np.isclose(t, self.T_cycl):\n \n varTc[vs:ve] = var[vs:ve]\n \n if check is not None: is_periodic = self.check_periodic(varTc, varTc_old, eps_periodic, check, cyclerr)\n \n # definitely should not be True if we've not yet surpassed the \"disease induction\" cycle\n if cycle[0] <= induce_pert_after_cycl:\n is_periodic = False\n \n # write \"periodic\" initial conditions in case we want to restart from this model in another simulation\n if is_periodic and inioutpath is not None:\n self.write_initial(inioutpath, nm, varTc_old, varTc)\n \n varTc_old[vs:ve] = varTc[vs:ve]\n \n # update cycle counter\n cycle[0] += 1\n\n return is_periodic\n\n\n # some perturbations/diseases we want to simulate (mr: mitral regurgitation, ms: mitral stenosis, ar: aortic regurgitation, as: aortic stenosis)\n def induce_perturbation(self, perturb_type, perturb_factor):\n\n if perturb_type=='mr': self.R_vin_l_max *= perturb_factor\n if perturb_type=='ms': self.R_vin_l_min *= perturb_factor\n if perturb_type=='ar': self.R_vout_l_max *= perturb_factor\n if perturb_type=='as': self.R_vout_l_min *= perturb_factor\n\n # arrays need re-initialization, expressions have to be re-set\n self.setup_arrays(), self.set_compartment_interfaces()\n self.equation_map(), self.set_stiffness(), self.lambdify_expressions()\n\n \n # set pressure function for 3D FEM model (FEniCS)\n def set_pressure_fem(self, var, ids, pr0D, p0Da):\n \n # set pressure functions\n for i in range(len(ids)):\n pr0D.val = -allgather_vec_entry(var, ids[i], self.comm)\n p0Da[i].interpolate(pr0D.evaluate)\n\n\n # set valve q(p) relationship\n def valvelaw(self, p, popen, Rmin, Rmax, vparams, topen, tclose):\n\n if vparams[0]=='pwlin_pres': # piecewise linear with resistance depending on pressure difference\n R = sp.Piecewise( (Rmax, p < popen), (Rmin, p >= popen) )\n vl = (popen - p) / R\n elif vparams[0]=='pwlin_time': # piecewise linear with resistance depending on timing\n if topen > tclose: R = sp.Piecewise( (Rmax, sp.And(self.t_ < topen, self.t_ >= tclose)), (Rmin, sp.Or(self.t_ >= topen, self.t_ < tclose)) )\n else: R = sp.Piecewise( (Rmax, sp.Or(self.t_ < topen, self.t_ >= tclose)), (Rmin, sp.And(self.t_ >= topen, self.t_ < tclose)) )\n vl = (popen - p) / R\n elif vparams[0]=='smooth_pres_resistance': # smooth resistance value\n R = 0.5*(Rmax - Rmin)*(sp.tanh((popen - p)/vparams[-1]) + 1.) + Rmin\n vl = (popen - p) / R \n elif vparams[0]=='smooth_pres_momentum': # smooth q(p) relationship\n # interpolation by cubic spline in epsilon interval\n p0 = (popen-vparams[-1]/2. - popen)/Rmax\n p1 = (popen+vparams[-1]/2. - popen)/Rmin\n m0 = 1./Rmax\n m1 = 1./Rmin\n s = (p - (popen-vparams[-1]/2.))/vparams[-1]\n # spline ansatz functions\n h00 = 2.*s**3. - 3*s**2. + 1.\n h01 = -2.*s**3. + 3*s**2.\n h10 = s**3. - 2.*s**2. + s\n h11 = s**3. - s**2.\n # spline\n c = h00*p0 + h10*m0*vparams[-1] + h01*p1 + h11*m1*vparams[-1]\n vl = sp.Piecewise( ((popen - p)/Rmax, p < popen-vparams[-1]/2), (-c, sp.And(p >= popen-vparams[-1]/2., p < popen+vparams[-1]/2.)), ((popen - p)/Rmin, p >= popen+vparams[-1]/2.) )\n elif vparams[0]=='pw_pres_regurg':\n vl = sp.Piecewise( (vparams[1]*vparams[2]*sp.sqrt(popen - p), p < popen), ((popen - p) / Rmin, p >= popen) )\n else:\n raise NameError(\"Unknown valve law %s!\" % (vparams[0]))\n \n vlaw = vl\n if popen is not sp.S.Zero:\n res = 1./sp.diff(vl,popen)\n else:\n res = sp.S.One\n \n return vlaw, res\n\n\n # set compartment interfaces according to case and coupling quantity (can be volume, flux, or pressure)\n def set_compartment_interfaces(self):\n \n # loop over chambers\n for i, ch in enumerate(['lv','rv','la','ra', 'ao']):\n \n if ch == 'lv': chn = 'v_l'\n if ch == 'rv': chn = 'v_r'\n if ch == 'la': chn = 'at_l'\n if ch == 'ra': chn = 'at_r'\n if ch == 'ao': chn = 'aort_sys'\n\n if self.chmodels[ch]['type']=='0D_elast' or self.chmodels[ch]['type']=='0D_elast_prescr':\n self.switch_V[i] = 1\n \n elif self.chmodels[ch]['type']=='0D_rigid':\n self.switch_V[i] = 0\n \n elif self.chmodels[ch]['type']=='prescribed':\n if self.cq[i] == 'volume':\n self.switch_V[i] = 1\n self.cname.append('V_'+chn)\n elif self.cq[i] == 'flux':\n self.switch_V[i] = 0\n self.cname.append('Q_'+chn)\n else:\n raise NameError(\"Unknown coupling quantity!\")\n \n elif self.chmodels[ch]['type']=='3D_solid':\n if self.cq[i] == 'volume':\n self.v_ids.append(self.vindex_ch[i]) # variable indices for coupling\n self.c_ids.append(self.cindex_ch[i]) # coupling quantity indices for coupling\n self.cname.append('V_'+chn)\n self.switch_V[i], self.vname[i] = 1, 'p_'+chn\n elif self.cq[i] == 'flux':\n self.cname.append('Q_'+chn)\n self.switch_V[i], self.vname[i] = 0, 'p_'+chn\n self.v_ids.append(self.vindex_ch[i]) # variable indices for coupling\n self.c_ids.append(self.cindex_ch[i]) # coupling quantity indices for coupling\n elif self.cq[i] == 'pressure':\n if self.vq[i] == 'volume':\n self.switch_V[i], self.vname[i] = 1, 'V_'+chn\n elif self.vq[i] == 'flux':\n self.switch_V[i], self.vname[i] = 0, 'Q_'+chn\n else:\n raise ValueError(\"Variable quantity has to be volume or flux!\")\n self.cname.append('p_'+chn)\n self.si[i] = 1 # switch indices of pressure / outflux\n self.v_ids.append(self.vindex_ch[i]-self.si[i]) # variable indices for coupling\n else:\n raise NameError(\"Unknown coupling quantity!\")\n \n # 3D fluid currently only working with Cheart!\n elif self.chmodels[ch]['type']=='3D_fluid':\n assert(self.cq[i] == 'pressure')\n self.switch_V[i], self.vname[i] = 0, 'Q_'+chn\n if ch != 'ao': self.si[i] = 1 # switch indices of pressure / outflux\n #self.v_ids.append(self.vindex_ch[i]-self.si[i]) # variable indices for coupling\n # add inflow pressures to coupling name prefixes\n for m in range(self.chmodels[ch]['num_inflows']):\n self.cname.append('p_'+chn+'_i'+str(m+1)+'')\n # add outflow pressures to coupling name prefixes\n for m in range(self.chmodels[ch]['num_outflows']):\n self.cname.append('p_'+chn+'_o'+str(m+1)+'')\n \n else:\n raise NameError(\"Unknown chamber model for chamber %s!\" % (ch))\n\n\n # set coupling state (populate x and c vectors with Sympy symbols) according to case and coupling quantity (can be volume, flux, or pressure)\n def set_coupling_state(self, ch, chvars, chfncs=[]):\n \n if ch == 'lv': V_unstressed, i = self.V_v_l_u, 0\n if ch == 'rv': V_unstressed, i = self.V_v_r_u, 1\n if ch == 'la': V_unstressed, i = self.V_at_l_u, 2\n if ch == 'ra': V_unstressed, i = self.V_at_r_u, 3\n if ch == 'ao': V_unstressed, i = self.V_ar_sys_u, 4\n \n # \"distributed\" p variables\n num_pdist = len(chvars)-1\n\n # time-varying elastances\n if self.chmodels[ch]['type']=='0D_elast' or self.chmodels[ch]['type']=='0D_elast_prescr':\n chvars['VQ'] = chvars['pi1']/chfncs[0] + V_unstressed # V = p/E(t) + V_u\n self.fnc_.append(chfncs[0])\n \n # all \"distributed\" p are equal to \"main\" p of chamber (= pi1)\n for k in range(10): # no more than 10 distributed p's allowed\n if 'pi'+str(k+1)+'' in chvars.keys(): chvars['pi'+str(k+1)+''] = chvars['pi1']\n if 'po'+str(k+1)+'' in chvars.keys(): chvars['po'+str(k+1)+''] = chvars['pi1']\n\n # rigid\n elif self.chmodels[ch]['type']=='0D_rigid':\n chvars['VQ'] = 0\n \n # all \"distributed\" p are equal to \"main\" p of chamber (= pi1)\n for k in range(10): # no more than 10 distributed p's allowed\n if 'pi'+str(k+1)+'' in chvars.keys(): chvars['pi'+str(k+1)+''] = chvars['pi1']\n if 'po'+str(k+1)+'' in chvars.keys(): chvars['po'+str(k+1)+''] = chvars['pi1']\n\n # 3D solid mechanics model, or 0D prescribed volume/flux/pressure (non-primary variables!)\n elif self.chmodels[ch]['type']=='3D_solid' or self.chmodels[ch]['type']=='prescribed':\n\n # all \"distributed\" p are equal to \"main\" p of chamber (= pi1)\n for k in range(10): # no more than 10 distributed p's allowed\n if 'pi'+str(k+1)+'' in chvars.keys(): chvars['pi'+str(k+1)+''] = chvars['pi1']\n if 'po'+str(k+1)+'' in chvars.keys(): chvars['po'+str(k+1)+''] = chvars['pi1']\n\n if self.cq[i] == 'volume' or self.cq[i] == 'flux':\n self.c_.append(chvars['VQ']) # V or Q\n if self.cq[i] == 'pressure':\n self.x_[self.vindex_ch[i]-self.si[i]] = chvars['VQ'] # V or Q\n self.c_.append(chvars['pi1'])\n\n # 3D fluid mechanics model\n elif self.chmodels[ch]['type']=='3D_fluid': # also for 2D FEM models\n \n assert(self.cq[i] == 'pressure' and self.vq[i] == 'flux')\n\n self.x_[self.vindex_ch[i]-self.si[i]] = chvars['VQ'] # Q of chamber is now variable\n\n # all \"distributed\" p that are not coupled are set to first inflow p\n for k in range(self.chmodels[ch]['num_inflows'],10):\n if 'pi'+str(k+1)+'' in chvars.keys(): chvars['pi'+str(k+1)+''] = chvars['pi1']\n\n # if no inflow is present, set to zero\n if self.chmodels[ch]['num_inflows']==0: chvars['pi1'] = sp.S.Zero\n\n # now add inflow pressures to coupling array\n for m in range(self.chmodels[ch]['num_inflows']):\n self.c_.append(chvars['pi'+str(m+1)+''])\n \n # all \"distributed\" p that are not coupled are set to first outflow p\n for k in range(self.chmodels[ch]['num_outflows'],10):\n if 'po'+str(k+1)+'' in chvars.keys(): chvars['po'+str(k+1)+''] = chvars['po1']\n \n # if no outflow is present, set to zero\n if self.chmodels[ch]['num_outflows']==0: chvars['po1'] = sp.S.Zero\n\n # now add outflow pressures to coupling array\n for m in range(self.chmodels[ch]['num_outflows']):\n self.c_.append(chvars['po'+str(m+1)+''])\n\n else:\n raise NameError(\"Unknown chamber model for chamber %s!\" % (ch))\n \n\n # evaluate time-dependent state of chamber (for 0D elastance models)\n def evaluate_chamber_state(self, y, t):\n \n chamber_funcs=[]\n\n ci=0\n for i, ch in enumerate(['lv','rv','la','ra']):\n\n if self.chmodels[ch]['type']=='0D_elast':\n \n if ch == 'lv': E_max, E_min = self.E_v_max_l, self.E_v_min_l\n if ch == 'rv': E_max, E_min = self.E_v_max_r, self.E_v_min_r\n if ch == 'la': E_max, E_min = self.E_at_max_l, self.E_at_min_l\n if ch == 'ra': E_max, E_min = self.E_at_max_r, self.E_at_min_r\n\n # time-varying elastance model (y should be normalized activation function provided by user)\n E_ch_t = (E_max - E_min) * y[ci] + E_min\n \n chamber_funcs.append(E_ch_t)\n \n ci+=1\n\n elif self.chmodels[ch]['type']=='0D_elast_prescr':\n \n E_ch_t = y[ci]\n \n chamber_funcs.append(E_ch_t)\n \n ci+=1\n \n else:\n \n pass\n \n return chamber_funcs\n\n\n # initialize Lagrange multipliers for monolithic Lagrange-type coupling (FEniCS)\n def initialize_lm(self, var, iniparam):\n \n for i, ch in enumerate(['lv','rv','la','ra']):\n \n if self.chmodels[ch]['type']=='3D_solid':\n \n if ch=='lv':\n if 'p_v_l_0' in iniparam.keys(): var[i] = iniparam['p_v_l_0']\n if ch=='rv':\n if 'p_v_r_0' in iniparam.keys(): var[i] = iniparam['p_v_r_0']\n if ch=='la':\n if 'p_at_l_0' in iniparam.keys(): var[i] = iniparam['p_at_l_0']\n if ch=='ra':\n if 'p_at_r_0' in iniparam.keys(): var[i] = iniparam['p_at_r_0']\n", "#!/usr/bin/env python3\n\n### 3D biventricular generic heart, testing of:\n# - incompressible Neo-Hookean material (p2p1 interpolation)\n# - 3D-0D monolithic solution of 3D heart w/ syspulcap circulation (flux coupling)\n# - Robin BCs in xyz and normal direction (spring and dashpot)\n# - OST time-integration for solid\n# - 3x3 block iterative method for incompressible solid coupled to 0D model\n# - Rayleigh damping\n\nimport ambit\n\nimport sys, traceback\nimport numpy as np\nfrom pathlib import Path\n\nimport resultcheck\n\ndef main():\n \n basepath = str(Path(__file__).parent.absolute())\n\n # all possible input parameters\n\n IO_PARAMS = {'problem_type' : 'solid_flow0d', # solid, fluid, flow0d, solid_flow0d, fluid_flow0d\n 'mesh_domain' : ''+basepath+'/input/heart3Dcoarse_domain.xdmf',\n 'mesh_boundary' : ''+basepath+'/input/heart3Dcoarse_boundary.xdmf',\n 'write_results_every' : -999,\n 'output_path' : ''+basepath+'/tmp/',\n 'results_to_write' : ['displacement','pressure'], # see io_routines.py for what to write\n 'simname' : 'test'} # how to name the output\n\n SOLVER_PARAMS_SOLID = {'solve_type' : 'iterative', # direct, iterative\n 'tol_res' : 1.0e-8,\n 'tol_inc' : 1.0e-8,\n 'tol_lin' : 1.0e-7,\n 'print_liniter_every' : 50,\n 'divergence_continue' : None, # what to apply when Newton diverges: None, PTC ('ptc' can stay False)\n 'ptc' : False, # if you want to use PTC straight away (independent of divergence_continue)\n 'k_ptc_initial' : 0.1} # initial PTC value that adapts during nonlinear iteration\n \n SOLVER_PARAMS_FLOW0D = {'tol_res' : 1.0e-6,\n 'tol_inc' : 1.0e-6}\n\n TIME_PARAMS_SOLID = {'maxtime' : 1.0,\n 'numstep' : 100,\n 'numstep_stop' : 1,\n 'timint' : 'ost',\n 'theta_ost' : 0.5}\n \n TIME_PARAMS_FLOW0D = {'timint' : 'ost',\n 'theta_ost' : 0.5,\n 'initial_conditions' : init(), # a dictionary\n 'eps_periodic' : 1.0e-3, # cardiac cycle periodicity tolerance\n 'periodic_checktype' : None} # None, 'allvar', 'pQvar'\n\n MODEL_PARAMS_FLOW0D = {'modeltype' : 'syspulcap',\n 'parameters' : param(),\n 'chamber_models' : {'lv' : {'type' : '3D_solid'}, 'rv' : {'type' : '3D_solid'}, 'la' : {'type' : '0D_elast', 'activation_curve' : 1}, 'ra' : {'type' : '0D_elast', 'activation_curve' : 1}}}\n\n FEM_PARAMS = {'order_disp' : 2, # order of displacement interpolation (solid mechanics)\n 'order_pres' : 1, # order of pressure interpolation (solid, fluid mechanics)\n 'quad_degree' : 5, # quadrature degree\n 'incompressible_2field' : True} # if we want to use a 2-field functional for pressure dofs (always applies for fluid mechanics, optional for solid)\n \n COUPLING_PARAMS = {'surface_ids' : [[1],[2]], # for syspul* models: order is lv, rv, la, ra (has to be consistent with chamber_models dict)\n 'surface_p_ids' : [[1],[2]],\n 'coupling_quantity' : ['flux','flux'], # volume, flux, pressure (former need 'monolithic_direct', latter needs 'monolithic_lagrange' as coupling_type)\n 'coupling_type' : 'monolithic_direct'} # monolithic_direct, monolithic_lagrange\n\n # see solid_material.py or fluid_material.py for material laws available (and their parameters)\n MATERIALS = {'MAT1' : {'neohooke_dev' : {'mu' : 10.},\n 'inertia' : {'rho0' : 1.0e-6},\n 'rayleigh_damping' : {'eta_m' : 0.0, 'eta_k' : 0.0001}}}\n\n\n\n # define your load curves here (syntax: tcX refers to curve X, to be used in BC_DICT key 'curve' : [X,0,0], or 'curve' : X)\n # some examples... up to 9 possible (tc1 until tc9 - feel free to implement more in timeintegration.py --> timecurves function if needed...)\n class time_curves():\n \n def tc1(self, t): # atrial activation\n \n act_dur = 2.*param()['t_ed']\n t0 = 0.\n \n if t >= t0 and t <= t0 + act_dur:\n return 0.5*(1.-np.cos(2.*np.pi*(t-t0)/act_dur))\n else:\n return 0.0\n \n\n BC_DICT = { 'robin' : [{'type' : 'spring', 'id' : [3], 'dir' : 'normal', 'stiff' : 0.075},\n {'type' : 'dashpot', 'id' : [3], 'dir' : 'normal', 'visc' : 0.005},\n {'type' : 'spring', 'id' : [4], 'dir' : 'normal', 'stiff' : 2.5},\n {'type' : 'dashpot', 'id' : [4], 'dir' : 'normal', 'visc' : 0.0005},\n {'type' : 'spring', 'id' : [4], 'dir' : 'xyz', 'stiff' : 0.25},\n {'type' : 'dashpot', 'id' : [4], 'dir' : 'xyz', 'visc' : 0.0005}] }\n\n # problem setup\n problem = ambit.Ambit(IO_PARAMS, [TIME_PARAMS_SOLID, TIME_PARAMS_FLOW0D], [SOLVER_PARAMS_SOLID, SOLVER_PARAMS_FLOW0D], FEM_PARAMS, [MATERIALS, MODEL_PARAMS_FLOW0D], BC_DICT, time_curves=time_curves(), coupling_params=COUPLING_PARAMS)\n \n # problem solve\n problem.solve_problem()\n\n # --- results check\n tol = 1.0e-6\n \n s_corr = np.zeros(problem.mp.pbf.cardvasc0D.numdof)\n\n # correct 0D results\n s_corr[0] = 1.5095864743040130E+06\n s_corr[1] = 1.2844204058649662E+00\n s_corr[2] = -2.2965082274795714E+00\n s_corr[3] = -2.2516606843904677E-01\n s_corr[4] = 1.2035047944220826E+01\n s_corr[5] = -2.2965082274795789E+00\n s_corr[6] = 1.2035061723270191E+01\n s_corr[7] = 7.9266376779754364E+03\n s_corr[8] = 1.0920576465746846E+01\n s_corr[9] = 4.8757248185773104E+04\n s_corr[10] = 4.5874998121783552E+04\n s_corr[11] = 3.5972406593836102E+04\n s_corr[12] = 2.4558826877860327E+04\n s_corr[13] = 8.1860969826562832E+03\n s_corr[14] = 2.2677989771675335E+00\n s_corr[15] = 8.3769499009227275E+02\n s_corr[16] = 2.2702566758390823E+00\n s_corr[17] = 9.5189334925959599E+02\n s_corr[18] = 2.2702868360248418E+00\n s_corr[19] = 7.5312458499319700E+02\n s_corr[20] = 2.2701207039868367E+00\n s_corr[21] = 5.0027868673701960E+02\n s_corr[22] = 2.2705227157170751E+00\n s_corr[23] = 1.7138025576859900E+02\n s_corr[24] = 2.2541277949292278E+00\n s_corr[25] = 2.0733856020336604E+05\n s_corr[26] = 9.9000307744360238E+04\n s_corr[27] = 2.7306345247149444E-01\n s_corr[28] = -4.3686268354670260E-01\n s_corr[29] = 1.7406314472713416E-01\n s_corr[30] = 2.4017163277669913E+00\n s_corr[31] = 1.1803816043509409E+04\n s_corr[32] = 2.3131877074406706E+00\n s_corr[33] = 2.0066530960234333E+05\n s_corr[34] = 1.6159475288856615E+00\n s_corr[35] = 3.9878128320907672E+04\n\n check1 = resultcheck.results_check_vec(problem.mp.pbf.s, s_corr, problem.mp.comm, tol=tol)\n success = resultcheck.success_check([check1], problem.mp.comm)\n \n return success\n\n\n\n\n# syspulcap circulation model initial condition and parameter dicts...\n\ndef init():\n \n factor_kPa_mmHg = 7.500615\n\n return {'q_vin_l_0' : 0.0,\n 'p_at_l_0' : 10.0/factor_kPa_mmHg,\n 'q_vout_l_0' : 0.0,\n 'p_v_l_0' : 10.0/factor_kPa_mmHg,\n 'p_ar_sys_0' : 90.29309546/factor_kPa_mmHg,\n 'q_ar_sys_0' : 0.0,\n\n 'p_arperi_sys_0' : 90.29309546/factor_kPa_mmHg,\n 'q_arspl_sys_0' : 0.0,\n 'q_arespl_sys_0' : 0.0,\n 'q_armsc_sys_0' : 0.0,\n 'q_arcer_sys_0' : 0.0,\n 'q_arcor_sys_0' : 0.0,\n 'p_venspl_sys_0' : 17.0/factor_kPa_mmHg,\n 'q_venspl_sys_0' : 0.0,\n 'p_venespl_sys_0' : 17.0/factor_kPa_mmHg,\n 'q_venespl_sys_0' : 0.0,\n 'p_venmsc_sys_0' : 17.0/factor_kPa_mmHg,\n 'q_venmsc_sys_0' : 0.0,\n 'p_vencer_sys_0' : 17.0/factor_kPa_mmHg,\n 'q_vencer_sys_0' : 0.0,\n 'p_vencor_sys_0' : 17.0/factor_kPa_mmHg,\n 'q_vencor_sys_0' : 0.0,\n\n 'p_ven_sys_0' : 17.0/factor_kPa_mmHg,\n 'q_ven_sys_0' : 0.0,\n 'q_vin_r_0' : 0.0,\n 'p_at_r_0' : 10.0/(5.*factor_kPa_mmHg),\n 'q_vout_r_0' : 0.0,\n 'p_v_r_0' : 10.0/(5.*factor_kPa_mmHg),\n 'p_ar_pul_0' : 90.29309546/(5.*factor_kPa_mmHg),\n 'q_ar_pul_0' : 0.0,\n 'p_cap_pul_0' : 90.29309546/(5.*factor_kPa_mmHg),\n 'q_cap_pul_0' : 0.0,\n 'p_ven_pul_0' : 12.0/factor_kPa_mmHg,\n 'q_ven_pul_0' : 0.0}\n\n\n\ndef param():\n\n R_ar_sys = 120.0e-6\n tau_ar_sys = 1.65242332\n tau_ar_pul = 0.3\n \n # Diss Hirschvogel tab. 2.7\n C_ar_sys = tau_ar_sys/R_ar_sys\n Z_ar_sys = R_ar_sys/20.\n R_ven_sys = R_ar_sys/5.\n C_ven_sys = 30.*C_ar_sys\n R_ar_pul = R_ar_sys/8.\n C_ar_pul = tau_ar_pul/R_ar_pul\n Z_ar_pul = 0.\n R_ven_pul = R_ar_pul\n C_ven_pul = 2.5*C_ar_pul\n \n L_ar_sys = 0.667e-6\n L_ven_sys = 0.\n L_ar_pul = 0.\n L_ven_pul = 0.\n \n # atrial elastances\n E_at_A_l, E_at_min_l = 20.0e-6, 9.0e-6\n E_at_A_r, E_at_min_r = 10.0e-6, 8.0e-6\n \n # timings\n t_ed = 0.2\n t_es = 0.53\n T_cycl = 1.0\n\n ## systemic arterial\n # now we have to separate the resistance into a proximal and a peripheral part\n frac_Rprox_Rtotal = 0.06 # Ursino et al. factor: 0.06 - OK\n R_arperi_sys = (1.-frac_Rprox_Rtotal)*R_ar_sys\n R_ar_sys *= frac_Rprox_Rtotal # now R_ar_sys(prox)\n\n frac_Cprox_Ctotal = 0.95#0.07 # Ursino et al. factor: 0.07 - XXXX too small???!!!!! - keep in mind that most compliance lies in the aorta / proximal!\n C_arperi_sys = (1.-frac_Cprox_Ctotal)*C_ar_sys\n C_ar_sys *= frac_Cprox_Ctotal # now C_ar_sys(prox)\n\n # R in parallel:\n # R_arperi_sys = (1/R_arspl_sys + 1/R_arespl_sys + 1/R_armsc_sys + 1/R_arcer_sys + 1/R_arcor_sys)^(-1)\n R_arspl_sys = 3.35 * R_arperi_sys # Ursino et al. factor: 3.35 - OK\n R_arespl_sys = 3.56 * R_arperi_sys # Ursino et al. factor: 3.56 - OK\n R_armsc_sys = 4.54 * R_arperi_sys # Ursino et al. factor: 4.54 - OK\n R_arcer_sys = 6.65 * R_arperi_sys # Ursino et al. factor: 6.65 - OK\n R_arcor_sys = 19.95 * R_arperi_sys # Ursino et al. factor: 19.95 - OK\n\n ## muscular resistance (and hence total systemic arterial resistance!) falls in sportsmode (Hogan 2009)\n #if sportsmode:\n #R_armsc_sys *= 0.3\n\n # C in parallel (fractions have to sum to 1):\n # C_arperi_sys = C_arspl_sys + C_arespl_sys + C_armsc_sys + C_arcer_sys + C_arcor_sys\n C_arspl_sys = 0.55 * C_arperi_sys # Ursino et al. factor: 0.55 - OK\n C_arespl_sys = 0.18 * C_arperi_sys # Ursino et al. factor: 0.18 - OK\n C_armsc_sys = 0.14 * C_arperi_sys # Ursino et al. factor: 0.14 - OK\n C_arcer_sys = 0.11 * C_arperi_sys # Ursino et al. factor: 0.11 - OK\n C_arcor_sys = 0.03 * C_arperi_sys # Ursino et al. factor: 0.03 - OK\n\n ## systemic venous\n frac_Rprox_Rtotal = 0.8 # no Ursino et al. factor since they do not have that extra compartment!\n R_venperi_sys = (1.-frac_Rprox_Rtotal) * R_ven_sys\n R_ven_sys *= frac_Rprox_Rtotal # now R_ven_sys(prox)\n\n frac_Cprox_Ctotal = 0.2 # no Ursino et al. factor since they do not have that extra compartment!\n C_venperi_sys = (1.-frac_Cprox_Ctotal)*C_ven_sys\n C_ven_sys *= frac_Cprox_Ctotal # now C_ven_sys(prox)\n\n # R in parallel:\n # R_venperi_sys = (1/R_venspl_sys + 1/R_venespl_sys + 1/R_venmsc_sys + 1/R_vencer_sys + 1/R_vencor_sys)^(-1)\n R_venspl_sys = 3.4 * R_venperi_sys # Ursino et al. factor: 3.4 - OK\n R_venespl_sys = 3.53 * R_venperi_sys # Ursino et al. factor: 3.53 - OK\n R_venmsc_sys = 4.47 * R_venperi_sys # Ursino et al. factor: 4.47 - OK\n R_vencer_sys = 6.66 * R_venperi_sys # Ursino et al. factor: 6.66 - OK\n R_vencor_sys = 19.93 * R_venperi_sys # Ursino et al. factor: 19.93 - OK\n\n # C in parallel (fractions have to sum to 1):\n # C_venperi_sys = C_venspl_sys + C_venespl_sys + C_venmsc_sys + C_vencer_sys + C_vencor_sys\n C_venspl_sys = 0.55 * C_venperi_sys # Ursino et al. factor: 0.55 - OK\n C_venespl_sys = 0.18 * C_venperi_sys # Ursino et al. factor: 0.18 - OK\n C_venmsc_sys = 0.14 * C_venperi_sys # Ursino et al. factor: 0.14 - OK\n C_vencer_sys = 0.1 * C_venperi_sys # Ursino et al. factor: 0.1 - OK\n C_vencor_sys = 0.03 * C_venperi_sys # Ursino et al. factor: 0.03 - OK\n\n ## pulmonary arterial\n frac_Rprox_Rtotal = 0.5#0.72 # Ursino et al. factor: 0.72 - hm... doubt that - stick with 0.5\n R_cap_pul = (1.-frac_Rprox_Rtotal)*R_ar_pul\n R_ar_pul *= frac_Rprox_Rtotal # now R_ar_pul(prox)\n\n ## pulmonary venous\n frac_Cprox_Ctotal = 0.5#0.12 # Ursino et al. factor: 0.12 - XXX?: gives shitty p_puls... - stick with 0.5\n C_cap_pul = (1.-frac_Cprox_Ctotal)*C_ar_pul\n C_ar_pul *= frac_Cprox_Ctotal # now C_ar_pul(prox)\n\n ### unstressed compartment volumes, diffult to estimate - use literature values!\n # these volumes only become relevant for the gas transport models as they determine the capacity of each\n # compartment to store constituents - however, they are also used for postprocessing of the flow models...\n V_at_l_u = 5000.0 # applies only in case of 0D or prescribed atria\n V_at_r_u = 4000.0 # applies only in case of 0D or prescribed atria\n V_v_l_u = 10000.0 # applies only in case of 0D or prescribed ventricles\n V_v_r_u = 8000.0 # applies only in case of 0D or prescribed ventricles\n V_ar_sys_u = 0.0 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3\n V_ar_pul_u = 0.0 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3\n V_ven_pul_u = 120.0e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3\n # peripheral systemic arterial\n V_arspl_sys_u = 274.4e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3\n V_arespl_sys_u = 134.64e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3\n V_armsc_sys_u = 105.8e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3\n V_arcer_sys_u = 72.13e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3\n V_arcor_sys_u = 24.0e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3\n # peripheral systemic venous\n V_venspl_sys_u = 1121.0e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3\n V_venespl_sys_u = 550.0e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3\n V_venmsc_sys_u = 432.14e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3\n V_vencer_sys_u = 294.64e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3\n V_vencor_sys_u = 98.21e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3\n V_ven_sys_u = 100.0e3 # estimated (Ursino et al. do not have that extra venous compartment...)\n # pulmonary capillary\n V_cap_pul_u = 123.0e3 # Ursino et al. Am J Physiol Heart Circ Physiol (2000), mm^3\n\n return {'R_ar_sys' : R_ar_sys,\n 'C_ar_sys' : C_ar_sys,\n 'L_ar_sys' : L_ar_sys,\n 'Z_ar_sys' : Z_ar_sys,\n 'R_arspl_sys' : R_arspl_sys,\n 'C_arspl_sys' : C_arspl_sys,\n 'R_arespl_sys' : R_arespl_sys,\n 'C_arespl_sys' : C_arespl_sys,\n 'R_armsc_sys' : R_armsc_sys,\n 'C_armsc_sys' : C_armsc_sys,\n 'R_arcer_sys' : R_arcer_sys,\n 'C_arcer_sys' : C_arcer_sys,\n 'R_arcor_sys' : R_arcor_sys,\n 'C_arcor_sys' : C_arcor_sys,\n 'R_venspl_sys' : R_venspl_sys,\n 'C_venspl_sys' : C_venspl_sys,\n 'R_venespl_sys' : R_venespl_sys,\n 'C_venespl_sys' : C_venespl_sys,\n 'R_venmsc_sys' : R_venmsc_sys,\n 'C_venmsc_sys' : C_venmsc_sys,\n 'R_vencer_sys' : R_vencer_sys,\n 'C_vencer_sys' : C_vencer_sys,\n 'R_vencor_sys' : R_vencor_sys,\n 'C_vencor_sys' : C_vencor_sys,\n 'R_ar_pul' : R_ar_pul,\n 'C_ar_pul' : C_ar_pul,\n 'L_ar_pul' : L_ar_pul,\n 'Z_ar_pul' : Z_ar_pul,\n 'R_cap_pul' : R_cap_pul,\n 'C_cap_pul' : C_cap_pul,\n 'R_ven_sys' : R_ven_sys,\n 'C_ven_sys' : C_ven_sys, \n 'L_ven_sys' : L_ven_sys,\n 'R_ven_pul' : R_ven_pul,\n 'C_ven_pul' : C_ven_pul,\n 'L_ven_pul' : L_ven_pul,\n # atrial elastances\n 'E_at_max_l' : E_at_min_l+E_at_A_l,\n 'E_at_min_l' : E_at_min_l,\n 'E_at_max_r' : E_at_min_r+E_at_A_r,\n 'E_at_min_r' : E_at_min_r,\n # ventricular elastances\n 'E_v_max_l' : 7.0e-5,\n 'E_v_min_l' : 12.0e-6,\n 'E_v_max_r' : 3.0e-5,\n 'E_v_min_r' : 10.0e-6,\n # valve resistances\n 'R_vin_l_min' : 1.0e-6,\n 'R_vin_l_max' : 1.0e1,\n 'R_vout_l_min' : 1.0e-6,\n 'R_vout_l_max' : 1.0e1,\n 'R_vin_r_min' : 1.0e-6,\n 'R_vin_r_max' : 1.0e1,\n 'R_vout_r_min' : 1.0e-6,\n 'R_vout_r_max' : 1.0e1,\n # timings\n 't_ed' : t_ed,\n 't_es' : t_es,\n 'T_cycl' : T_cycl,\n # unstressed compartment volumes (for post-processing)\n 'V_at_l_u' : V_at_l_u,\n 'V_at_r_u' : V_at_r_u,\n 'V_v_l_u' : V_v_l_u,\n 'V_v_r_u' : V_v_r_u,\n 'V_ar_sys_u' : V_ar_sys_u,\n 'V_arspl_sys_u' : V_arspl_sys_u,\n 'V_arespl_sys_u' : V_arespl_sys_u,\n 'V_armsc_sys_u' : V_armsc_sys_u,\n 'V_arcer_sys_u' : V_arcer_sys_u,\n 'V_arcor_sys_u' : V_arcor_sys_u,\n 'V_venspl_sys_u' : V_venspl_sys_u,\n 'V_venespl_sys_u' : V_venespl_sys_u,\n 'V_venmsc_sys_u' : V_venmsc_sys_u,\n 'V_vencer_sys_u' : V_vencer_sys_u,\n 'V_vencor_sys_u' : V_vencor_sys_u,\n 'V_ven_sys_u' : V_ven_sys_u,\n 'V_ar_pul_u' : V_ar_pul_u,\n 'V_cap_pul_u' : V_cap_pul_u,\n 'V_ven_pul_u' : V_ven_pul_u}\n\n\n\n\nif __name__ == \"__main__\":\n \n success = False\n \n try:\n success = main()\n except:\n print(traceback.format_exc())\n \n if success:\n sys.exit(0)\n else:\n sys.exit(1)\n" ]
[ [ "numpy.isclose" ], [ "numpy.zeros", "numpy.cos" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sophiaas/alphacsc
[ "402b8f6c8ee4ba9c86e9da0e2073d900cf8da207", "402b8f6c8ee4ba9c86e9da0e2073d900cf8da207" ]
[ "alphacsc/other/sporco/examples/scripts/tv/tvl2den_clr.py", "alphacsc/other/sporco/sporco/admm/cbpdntv.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# This file is part of the SPORCO package. Details of the copyright\n# and user license can be found in the 'LICENSE.txt' file distributed\n# with the package.\n\n\"\"\"\nColour ℓ2-TV Denoising\n======================\n\nThis example demonstrates the use of class :class:`.tvl2.TVL2Denoise` for removing Gaussian white noise from a colour image using Total Variation regularization with an ℓ2 data fidelity term (ℓ2-TV denoising).\n\"\"\"\n\n\nfrom __future__ import print_function\nfrom builtins import input\nfrom builtins import range\n\nimport numpy as np\n\nfrom sporco.admm import tvl2\nfrom sporco import util\nfrom sporco import metric\nfrom sporco import plot\n\n\n\"\"\"\nLoad reference image.\n\"\"\"\n\nimg = util.ExampleImages().image('monarch.png', scaled=True,\n idxexp=np.s_[:,160:672])\n\n\n\"\"\"\nConstruct test image corrupted by Gaussian white noise with a 0.05 standard deviation.\n\"\"\"\n\nnp.random.seed(12345)\nimgn = img + np.random.normal(0.0, 0.05, img.shape)\n\n\n\"\"\"\nSet regularization parameter and options for ℓ2-TV denoising solver. The regularization parameter used here has been manually selected for good performance.\n\"\"\"\n\nlmbda = 0.04\nopt = tvl2.TVL2Denoise.Options({'Verbose': True, 'MaxMainIter': 200,\n 'gEvalY': False, 'AutoRho': {'Enabled': True}})\n\n\n\"\"\"\nCreate solver object and solve, returning the the denoised image ``imgr``.\n\"\"\"\n\nb = tvl2.TVL2Denoise(imgn, lmbda, opt)\nimgr = b.solve()\n\n\n\"\"\"\nDisplay solve time and denoising performance.\n\"\"\"\n\nprint(\"TVL2Denoise solve time: %5.2f s\" % b.timer.elapsed('solve'))\nprint(\"Noisy image PSNR: %5.2f dB\" % metric.psnr(img, imgn))\nprint(\"Denoised image PSNR: %5.2f dB\" % metric.psnr(img, imgr))\n\n\n\"\"\"\nDisplay reference, corrupted, and denoised images.\n\"\"\"\n\nfig = plot.figure(figsize=(20, 5))\nplot.subplot(1, 3, 1)\nplot.imview(img, fig=fig, title='Reference')\nplot.subplot(1, 3, 2)\nplot.imview(imgn, fig=fig, title='Corrupted')\nplot.subplot(1, 3, 3)\nplot.imview(imgr, fig=fig, title=r'Restored ($\\ell_2$-TV)')\nfig.show()\n\n\n\"\"\"\nGet iterations statistics from solver object and plot functional value, ADMM primary and dual residuals, and automatically adjusted ADMM penalty parameter against the iteration number.\n\"\"\"\n\nits = b.getitstat()\nfig = plot.figure(figsize=(20, 5))\nplot.subplot(1, 3, 1)\nplot.plot(its.ObjFun, fig=fig, xlbl='Iterations', ylbl='Functional')\nplot.subplot(1, 3, 2)\nplot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T, fig=fig,\n ptyp='semilogy', xlbl='Iterations', ylbl='Residual',\n lgnd=['Primal', 'Dual'])\nplot.subplot(1, 3, 3)\nplot.plot(its.Rho, fig=fig, xlbl='Iterations', ylbl='Penalty Parameter')\nfig.show()\n\n\n# Wait for enter on keyboard\ninput()\n", "# -*- coding: utf-8 -*-\n# Copyright (C) 2016-2017 by Brendt Wohlberg <[email protected]>\n# All rights reserved. BSD 3-clause License.\n# This file is part of the SPORCO package. Details of the copyright\n# and user license can be found in the 'LICENSE.txt' file distributed\n# with the package.\n\n\"\"\"Classes for ADMM algorithm for the variants of the Convolutional BPDN\nproblem with Total Variation regularisation terms\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom builtins import range\nfrom builtins import object\n\nimport numpy as np\nfrom scipy import linalg\nimport copy\n\nfrom sporco.admm import admm\nimport sporco.cnvrep as cr\nfrom sporco.admm import cbpdn\nimport sporco.linalg as sl\nfrom sporco.util import u\n\n\n__author__ = \"\"\"Brendt Wohlberg <[email protected]>\"\"\"\n\n\nclass ConvBPDNScalarTV(admm.ADMM):\n r\"\"\"**Class inheritance structure**\n\n .. inheritance-diagram:: ConvBPDNScalarTV\n :parts: 2\n\n |\n\n ADMM algorithm for an extension of Convolutional BPDN including\n terms penalising the total variation of each coefficient map\n :cite:`wohlberg-2017-convolutional`.\n\n Solve the optimisation problem\n\n .. math::\n \\mathrm{argmin}_\\mathbf{x} \\; \\frac{1}{2}\n \\left\\| \\sum_m \\mathbf{d}_m * \\mathbf{x}_m - \\mathbf{s}\n \\right\\|_2^2 + \\lambda \\sum_m \\| \\mathbf{x}_m \\|_1 +\n \\mu \\sum_m \\left\\| \\sqrt{\\sum_i (G_i \\mathbf{x}_m)^2} \\right\\|_1\n \\;\\;,\n\n where :math:`G_i` is an operator computing the derivative along index\n :math:`i`, via the ADMM problem\n\n .. math::\n \\mathrm{argmin}_\\mathbf{x} \\; (1/2) \\left\\| D \\mathbf{x} -\n \\mathbf{s} \\right\\|_2^2 + \\lambda\n \\| \\mathbf{y}_L \\|_1 + \\mu \\sum_m \\left\\| \\sqrt{\\sum_{i=0}^{L-1}\n \\mathbf{y}_i^2} \\right\\|_1 \\quad \\text{ such that } \\quad\n \\left( \\begin{array}{c} \\Gamma_0 \\\\ \\Gamma_1 \\\\ \\vdots \\\\ I\n \\end{array} \\right) \\mathbf{x} =\n \\left( \\begin{array}{c} \\mathbf{y}_0 \\\\\n \\mathbf{y}_1 \\\\ \\vdots \\\\ \\mathbf{y}_L \\end{array}\n \\right) \\;\\;,\n\n where\n\n .. math::\n D = \\left( \\begin{array}{ccc} D_0 & D_1 & \\ldots \\end{array} \\right)\n \\qquad\n \\mathbf{x} = \\left( \\begin{array}{c} \\mathbf{x}_0 \\\\ \\mathbf{x}_1 \\\\\n \\vdots \\end{array} \\right) \\qquad\n \\Gamma_i = \\left( \\begin{array}{ccc}\n G_i & 0 & \\ldots \\\\ 0 & G_i & \\ldots \\\\ \\vdots & \\vdots & \\ddots\n \\end{array} \\right) \\;\\;.\n\n\n For multi-channel signals with a single-channel dictionary, scalar TV is\n applied independently to each coefficient map for channel :math:`c` and\n filter :math:`m`. Since multi-channel signals with a multi-channel\n dictionary also have one coefficient map per filter, the behaviour is\n the same as for single-channel signals.\n\n\n After termination of the :meth:`solve` method, attribute :attr:`itstat`\n is a list of tuples representing statistics of each iteration. The\n fields of the named tuple ``IterationStats`` are:\n\n ``Iter`` : Iteration number\n\n ``ObjFun`` : Objective function value\n\n ``DFid`` : Value of data fidelity term :math:`(1/2) \\| \\sum_m\n \\mathbf{d}_m * \\mathbf{x}_m - \\mathbf{s} \\|_2^2`\n\n ``RegL1`` : Value of regularisation term :math:`\\sum_m \\|\n \\mathbf{x}_m \\|_1`\n\n ``RegTV`` : Value of regularisation term :math:`\\sum_m \\left\\|\n \\sqrt{\\sum_i (G_i \\mathbf{x}_m)^2} \\right\\|_1`\n\n ``PrimalRsdl`` : Norm of primal residual\n\n ``DualRsdl`` : Norm of dual residual\n\n ``EpsPrimal`` : Primal residual stopping tolerance\n :math:`\\epsilon_{\\mathrm{pri}}`\n\n ``EpsDual`` : Dual residual stopping tolerance\n :math:`\\epsilon_{\\mathrm{dua}}`\n\n ``Rho`` : Penalty parameter\n\n ``XSlvRelRes`` : Relative residual of X step solver\n\n ``Time`` : Cumulative run time\n \"\"\"\n\n\n class Options(cbpdn.ConvBPDN.Options):\n r\"\"\"ConvBPDNScalarTV algorithm options\n\n Options include all of those defined in\n :class:`.admm.cbpdn.ConvBPDN.Options`, together with additional\n options:\n\n ``TVWeight`` : An array of weights :math:`w_m` for the term\n penalising the gradient of the coefficient maps. If this\n option is defined, the regularization term is :math:`\\sum_m w_m\n \\left\\| \\sqrt{\\sum_i (G_i \\mathbf{x}_m)^2} \\right\\|_1`\n where :math:`w_m` is the weight for filter index :math:`m`. The\n array should be an :math:`M`-vector where :math:`M` is the number\n of filters in the dictionary.\n \"\"\"\n\n defaults = copy.deepcopy(cbpdn.ConvBPDN.Options.defaults)\n defaults.update({'TVWeight' : 1.0})\n\n\n def __init__(self, opt=None):\n \"\"\"Initialise ConvBPDNScalarTV algorithm options object\"\"\"\n\n if opt is None:\n opt = {}\n cbpdn.ConvBPDN.Options.__init__(self, opt)\n\n\n\n itstat_fields_objfn = ('ObjFun', 'DFid', 'RegL1', 'RegTV')\n itstat_fields_extra = ('XSlvRelRes',)\n hdrtxt_objfn = ('Fnc', 'DFid', u('Regℓ1'), u('RegTV'))\n hdrval_objfun = {'Fnc': 'ObjFun', 'DFid': 'DFid',\n u('Regℓ1'): 'RegL1', u('RegTV'): 'RegTV'}\n\n\n def __init__(self, D, S, lmbda, mu=0.0, opt=None, dimK=None, dimN=2):\n \"\"\"\n Initialise a ConvBPDNScalarTV object with problem parameters.\n\n\n |\n\n **Call graph**\n\n .. image:: _static/jonga/cbpdnstv_init.svg\n :width: 20%\n :target: _static/jonga/cbpdnstv_init.svg\n\n |\n\n\n Parameters\n ----------\n D : array_like\n Dictionary matrix\n S : array_like\n Signal vector or matrix\n lmbda : float\n Regularisation parameter (l1)\n mu : float\n Regularisation parameter (gradient)\n opt : :class:`ConvBPDNScalarTV.Options` object\n Algorithm options\n dimK : 0, 1, or None, optional (default None)\n Number of dimensions in input signal corresponding to multiple\n independent signals\n dimN : int, optional (default 2)\n Number of spatial dimensions\n \"\"\"\n\n if opt is None:\n opt = ConvBPDNScalarTV.Options()\n\n # Infer problem dimensions and set relevant attributes of self\n self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)\n\n # Call parent class __init__\n Nx = np.product(self.cri.shpX)\n yshape = self.cri.shpX + (len(self.cri.axisN)+1,)\n super(ConvBPDNScalarTV, self).__init__(Nx, yshape, yshape,\n S.dtype, opt)\n\n # Set l1 term scaling and weight array\n self.lmbda = self.dtype.type(lmbda)\n self.Wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype)\n self.Wl1 = self.Wl1.reshape(cr.l1Wshape(self.Wl1, self.cri))\n\n self.mu = self.dtype.type(mu)\n if hasattr(opt['TVWeight'], 'ndim') and opt['TVWeight'].ndim > 0:\n self.Wtv = np.asarray(opt['TVWeight'].reshape((1,)*(dimN+2) +\n opt['TVWeight'].shape), dtype=self.dtype)\n else:\n # Wtv is a scalar: no need to change shape\n self.Wtv = self.dtype.type(opt['TVWeight'])\n\n # Set penalty parameter\n self.set_attr('rho', opt['rho'], dval=(50.0*self.lmbda + 1.0),\n dtype=self.dtype)\n\n # Set rho_xi attribute\n self.set_attr('rho_xi', opt['AutoRho', 'RsdlTarget'], dval=1.0,\n dtype=self.dtype)\n\n # Reshape D and S to standard layout\n self.D = np.asarray(D.reshape(self.cri.shpD), dtype=self.dtype)\n self.S = np.asarray(S.reshape(self.cri.shpS), dtype=self.dtype)\n\n # Compute signal in DFT domain\n self.Sf = sl.rfftn(self.S, None, self.cri.axisN)\n\n self.Gf, GHGf = sl.GradientFilters(self.cri.dimN+3, self.cri.axisN,\n self.cri.Nv, dtype=self.dtype)\n self.GHGf = self.Wtv**2 * GHGf\n\n # Initialise byte-aligned arrays for pyfftw\n self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype)\n xfshp = list(self.cri.shpX)\n xfshp[dimN-1] = xfshp[dimN-1]//2 + 1\n self.Xf = sl.pyfftw_empty_aligned(xfshp,\n dtype=sl.complex_dtype(self.dtype))\n\n self.setdict()\n\n\n\n def setdict(self, D=None):\n \"\"\"Set dictionary array.\"\"\"\n\n if D is not None:\n self.D = np.asarray(D, dtype=self.dtype)\n self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)\n # Compute D^H S\n self.DSf = np.conj(self.Df) * self.Sf\n if self.cri.Cd > 1:\n self.DSf = np.sum(self.DSf, axis=self.cri.axisC, keepdims=True)\n if self.opt['HighMemSolve'] and self.cri.Cd == 1:\n self.c = sl.solvedbi_sm_c(self.Df, np.conj(self.Df),\n self.rho*self.GHGf + self.rho, self.cri.axisM)\n else:\n self.c = None\n\n\n\n def rhochange(self):\n \"\"\"Updated cached c array when rho changes.\"\"\"\n\n if self.opt['HighMemSolve'] and self.cri.Cd == 1:\n self.c = sl.solvedbi_sm_c(self.Df, np.conj(self.Df),\n self.rho*self.GHGf + self.rho, self.cri.axisM)\n\n\n\n def xstep(self):\n r\"\"\"Minimise Augmented Lagrangian with respect to\n :math:`\\mathbf{x}`.\"\"\"\n\n self.YU[:] = self.Y - self.U\n YUf = sl.rfftn(self.YU, None, self.cri.axisN)\n\n # The sum is over the extra axis indexing spatial gradient\n # operators G_i, *not* over axisM\n b = self.DSf + self.rho*(YUf[..., -1] + self.Wtv * np.sum(\n np.conj(self.Gf) * YUf[..., 0:-1], axis=-1))\n\n if self.cri.Cd == 1:\n self.Xf[:] = sl.solvedbi_sm(self.Df, self.rho*self.GHGf +\n self.rho, b, self.c, self.cri.axisM)\n else:\n self.Xf[:] = sl.solvemdbi_ism(self.Df, self.rho*self.GHGf +\n self.rho, b, self.cri.axisM, self.cri.axisC)\n\n self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)\n\n if self.opt['LinSolveCheck']:\n Dop = lambda x: sl.inner(self.Df, x, axis=self.cri.axisM)\n if self.cri.Cd == 1:\n DHop = lambda x: np.conj(self.Df) * x\n else:\n DHop = lambda x: sl.inner(np.conj(self.Df), x,\n axis=self.cri.axisC)\n ax = DHop(Dop(self.Xf)) + (self.rho*self.GHGf + self.rho)*self.Xf\n self.xrrs = sl.rrs(ax, b)\n else:\n self.xrrs = None\n\n\n\n def ystep(self):\n r\"\"\"Minimise Augmented Lagrangian with respect to\n :math:`\\mathbf{y}`.\"\"\"\n\n AXU = self.AX + self.U\n self.Y[..., 0:-1] = sl.shrink2(AXU[..., 0:-1], self.mu/self.rho)\n self.Y[..., -1] = sl.shrink1(AXU[..., -1],\n (self.lmbda/self.rho) * self.Wl1)\n\n\n\n def obfn_fvarf(self):\n \"\"\"Variable to be evaluated in computing data fidelity term,\n depending on ``fEvalX`` option value.\n \"\"\"\n\n return self.Xf if self.opt['fEvalX'] else \\\n sl.rfftn(self.Y[..., -1], None, self.cri.axisN)\n\n\n\n def var_y0(self):\n r\"\"\"Get :math:`\\mathbf{y}_0` variable, consisting of all blocks of\n :math:`\\mathbf{y}` corresponding to a gradient operator.\"\"\"\n\n return self.Y[..., 0:-1]\n\n\n\n def var_y1(self):\n r\"\"\"Get :math:`\\mathbf{y}_1` variable, the block of\n :math:`\\mathbf{y}` corresponding to the identity operator.\"\"\"\n\n return self.Y[..., -1:]\n\n\n\n def var_yx(self):\n r\"\"\"Get component block of :math:`\\mathbf{y}` that is constrained\n to be equal to :math:`\\mathbf{x}`.\"\"\"\n\n return self.Y[..., -1]\n\n\n\n def var_yx_idx(self):\n r\"\"\"Get index expression for component block of :math:`\\mathbf{y}`\n that is constrained to be equal to :math:`\\mathbf{x}`.\n \"\"\"\n\n return np.s_[..., -1]\n\n\n\n def getmin(self):\n \"\"\"Get minimiser after optimisation.\"\"\"\n\n return self.X if self.opt['ReturnX'] else self.var_y1()[..., 0]\n\n\n\n def getcoef(self):\n \"\"\"Get final coefficient array.\"\"\"\n\n return self.getmin()\n\n\n\n def obfn_g0var(self):\n \"\"\"Variable to be evaluated in computing the TV regularisation\n term, depending on the ``gEvalY`` option value.\n \"\"\"\n\n # Use of self.AXnr[..., 0:-1] instead of self.cnst_A0(None, self.Xf)\n # reduces number of calls to self.cnst_A0\n return self.var_y0() if self.opt['gEvalY'] else \\\n self.AXnr[..., 0:-1]\n\n\n\n def obfn_g1var(self):\n r\"\"\"Variable to be evaluated in computing the :math:`\\ell_1`\n regularisation term, depending on the ``gEvalY`` option value.\n \"\"\"\n\n # Use of self.AXnr[...,-1:] instead of self.cnst_A1(self.X)\n # reduces number of calls to self.cnst_A1\n return self.var_y1() if self.opt['gEvalY'] else \\\n self.AXnr[..., -1:]\n\n\n\n def obfn_gvar(self):\n \"\"\"Method providing compatibility with the interface of\n :class:`.admm.cbpdn.ConvBPDN` and derived classes in order to make\n this class compatible with classes such as :class:`.AddMaskSim`.\n \"\"\"\n\n return self.obfn_g1var()\n\n\n\n def eval_objfn(self):\n \"\"\"Compute components of objective function as well as total\n contribution to objective function.\n \"\"\"\n\n dfd = self.obfn_dfd()\n reg = self.obfn_reg()\n obj = dfd + reg[0]\n return (obj, dfd) + reg[1:]\n\n\n\n def obfn_dfd(self):\n r\"\"\"Compute data fidelity term :math:`(1/2) \\| \\sum_m \\mathbf{d}_m *\n \\mathbf{x}_m - \\mathbf{s} \\|_2^2`.\n \"\"\"\n\n Ef = sl.inner(self.Df, self.obfn_fvarf(), axis=self.cri.axisM) \\\n - self.Sf\n return sl.rfl2norm2(Ef, self.S.shape, axis=self.cri.axisN)/2.0\n\n\n\n def obfn_reg(self):\n \"\"\"Compute regularisation term and contribution to objective\n function.\n \"\"\"\n\n rl1 = linalg.norm((self.Wl1 * self.obfn_g1var()).ravel(), 1)\n rtv = np.sum(np.sqrt(np.sum(self.obfn_g0var()**2, axis=-1)))\n return (self.lmbda*rl1 + self.mu*rtv, rl1, rtv)\n\n\n\n def itstat_extra(self):\n \"\"\"Non-standard entries for the iteration stats record tuple.\"\"\"\n\n return (self.xrrs,)\n\n\n\n def cnst_A0(self, X, Xf=None):\n r\"\"\"Compute :math:`A_0 \\mathbf{x}` component of ADMM problem\n constraint. In this case :math:`A_0 \\mathbf{x} = (\\Gamma_0^T \\;\\;\n \\Gamma_1^T \\;\\; \\ldots )^T \\mathbf{x}`.\n \"\"\"\n\n if Xf is None:\n Xf = sl.rfftn(X, axes=self.cri.axisN)\n return self.Wtv[..., np.newaxis] * sl.irfftn(self.Gf *\n Xf[..., np.newaxis], self.cri.Nv, axes=self.cri.axisN)\n\n\n\n def cnst_A0T(self, X):\n r\"\"\"Compute :math:`A_0^T \\mathbf{x}` where :math:`A_0 \\mathbf{x}`\n is a component of the ADMM problem constraint. In this case\n :math:`A_0^T \\mathbf{x} = (\\Gamma_0^T \\;\\; \\Gamma_1^T \\;\\; \\ldots )\n \\mathbf{x}`.\n \"\"\"\n\n Xf = sl.rfftn(X, axes=self.cri.axisN)\n return self.Wtv[..., np.newaxis] * sl.irfftn(np.conj(self.Gf) *\n Xf[..., 0:-1], self.cri.Nv, axes=self.cri.axisN)\n\n\n\n def cnst_A1(self, X):\n r\"\"\"Compute :math:`A_1 \\mathbf{x}` component of ADMM problem\n constraint. In this case :math:`A_1 \\mathbf{x} = \\mathbf{x}`.\n \"\"\"\n\n return X[..., np.newaxis]\n\n\n\n def cnst_A1T(self, X):\n r\"\"\"Compute :math:`A_1^T \\mathbf{x}` where :math:`A_1 \\mathbf{x}`\n is a component of the ADMM problem constraint. In this case\n :math:`A_1^T \\mathbf{x} = \\mathbf{x}`.\n \"\"\"\n\n return X[..., -1]\n\n\n\n def cnst_A(self, X, Xf=None):\n r\"\"\"Compute :math:`A \\mathbf{x}` component of ADMM problem\n constraint. In this case :math:`A \\mathbf{x} = (\\Gamma_0^T \\;\\;\n \\Gamma_1^T \\;\\; \\ldots \\;\\; I)^T \\mathbf{x}`.\n \"\"\"\n\n return np.concatenate((self.cnst_A0(X, Xf),\n self.cnst_A1(X)), axis=-1)\n\n\n\n def cnst_AT(self, X):\n r\"\"\"Compute :math:`A^T \\mathbf{x}` where :math:`A \\mathbf{x}` is\n a component of ADMM problem constraint. In this case\n :math:`A^T \\mathbf{x} = (\\Gamma_0^T \\;\\; \\Gamma_1^T \\;\\; \\ldots\n \\;\\; I) \\mathbf{x}`.\n \"\"\"\n\n return np.sum(self.cnst_A0T(X), axis=-1) + self.cnst_A1T(X)\n\n\n\n def cnst_B(self, Y):\n r\"\"\"Compute :math:`B \\mathbf{y}` component of ADMM problem constraint.\n In this case :math:`B \\mathbf{y} = -\\mathbf{y}`.\n \"\"\"\n\n return -Y\n\n\n\n def cnst_c(self):\n r\"\"\"Compute constant component :math:`\\mathbf{c}` of ADMM problem\n constraint. In this case :math:`\\mathbf{c} = \\mathbf{0}`.\n \"\"\"\n\n return 0.0\n\n\n\n def relax_AX(self):\n \"\"\"Implement relaxation if option ``RelaxParam`` != 1.0.\"\"\"\n\n # We need to keep the non-relaxed version of AX since it is\n # required for computation of primal residual r\n self.AXnr = self.cnst_A(self.X, self.Xf)\n if self.rlx == 1.0:\n # If RelaxParam option is 1.0 there is no relaxation\n self.AX = self.AXnr\n else:\n # Avoid calling cnst_c() more than once in case it is expensive\n # (e.g. due to allocation of a large block of memory)\n if not hasattr(self, '_cnst_c'):\n self._cnst_c = self.cnst_c()\n # Compute relaxed version of AX\n alpha = self.rlx\n self.AX = alpha*self.AXnr - (1-alpha)*(self.cnst_B(self.Y) -\n self._cnst_c)\n\n\n\n def reconstruct(self, X=None):\n \"\"\"Reconstruct representation.\"\"\"\n\n if X is None:\n Xf = self.Xf\n else:\n Xf = sl.rfftn(X, None, self.cri.axisN)\n Sf = np.sum(self.Df * Xf, axis=self.cri.axisM)\n return sl.irfftn(Sf, self.cri.Nv, self.cri.axisN)\n\n\n\n\n\nclass ConvBPDNVectorTV(ConvBPDNScalarTV):\n r\"\"\"**Class inheritance structure**\n\n .. inheritance-diagram:: ConvBPDNVectorTV\n :parts: 2\n\n |\n\n ADMM algorithm for an extension of Convolutional BPDN including\n a term penalising the vector total variation of the coefficient maps\n :cite:`wohlberg-2017-convolutional`.\n\n Solve the optimisation problem\n\n .. math::\n \\mathrm{argmin}_\\mathbf{x} \\; \\frac{1}{2}\n \\left\\| \\sum_m \\mathbf{d}_m * \\mathbf{x}_m - \\mathbf{s}\n \\right\\|_2^2 + \\lambda \\sum_m \\| \\mathbf{x}_m \\|_1 +\n \\mu \\left\\| \\sqrt{\\sum_m \\sum_i (G_i \\mathbf{x}_m)^2} \\right\\|_1\n \\;\\;,\n\n where :math:`G_i` is an operator computing the derivative along index\n :math:`i`, via the ADMM problem\n\n .. math::\n \\mathrm{argmin}_\\mathbf{x} \\; (1/2) \\left\\| D \\mathbf{x} -\n \\mathbf{s} \\right\\|_2^2 + \\lambda\n \\| \\mathbf{y}_L \\|_1 + \\mu \\left\\| \\sqrt{\\sum_{i=0}^{L-1}\n I_B \\mathbf{y}_i^2} \\right\\|_1 \\quad \\text{ such that } \\quad\n \\left( \\begin{array}{c} \\Gamma_0 \\\\ \\Gamma_1 \\\\ \\vdots \\\\ I\n \\end{array} \\right) \\mathbf{x} =\n \\left( \\begin{array}{c} \\mathbf{y}_0 \\\\\n \\mathbf{y}_1 \\\\ \\vdots \\\\ \\mathbf{y}_L \\end{array}\n \\right) \\;\\;,\n\n where\n\n .. math::\n D = \\left( \\begin{array}{ccc} D_0 & D_1 & \\ldots \\end{array} \\right)\n \\qquad\n \\mathbf{x} = \\left( \\begin{array}{c} \\mathbf{x}_0 \\\\ \\mathbf{x}_1 \\\\\n \\vdots \\end{array} \\right) \\qquad\n \\Gamma_i = \\left( \\begin{array}{ccc}\n G_i & 0 & \\ldots \\\\ 0 & G_i & \\ldots \\\\ \\vdots & \\vdots & \\ddots\n \\end{array} \\right) \\qquad\n I_B = \\left( \\begin{array}{ccc} I & I & \\ldots \\end{array} \\right)\n \\;\\;.\n\n\n For multi-channel signals with a single-channel dictionary, vector TV is\n applied jointly over the coefficient maps for channel :math:`c` and\n filter :math:`m`. Since multi-channel signals with a multi-channel\n dictionary also have one coefficient map per filter, the behaviour is\n the same as for single-channel signals.\n\n\n After termination of the :meth:`solve` method, attribute :attr:`itstat`\n is a list of tuples representing statistics of each iteration. The\n fields of the named tuple ``IterationStats`` are:\n\n ``Iter`` : Iteration number\n\n ``ObjFun`` : Objective function value\n\n ``DFid`` : Value of data fidelity term :math:`(1/2) \\| \\sum_m\n \\mathbf{d}_m * \\mathbf{x}_m - \\mathbf{s} \\|_2^2`\n\n ``RegL1`` : Value of regularisation term :math:`\\sum_m \\|\n \\mathbf{x}_m \\|_1`\n\n ``RegTV`` : Value of regularisation term :math:`\\left\\|\n \\sqrt{\\sum_m \\sum_i (G_i \\mathbf{x}_m)^2} \\right\\|_1`\n\n ``PrimalRsdl`` : Norm of primal residual\n\n ``DualRsdl`` : Norm of dual residual\n\n ``EpsPrimal`` : Primal residual stopping tolerance\n :math:`\\epsilon_{\\mathrm{pri}}`\n\n ``EpsDual`` : Dual residual stopping tolerance\n :math:`\\epsilon_{\\mathrm{dua}}`\n\n ``Rho`` : Penalty parameter\n\n ``XSlvRelRes`` : Relative residual of X step solver\n\n ``Time`` : Cumulative run time\n \"\"\"\n\n\n def __init__(self, D, S, lmbda, mu=0.0, opt=None, dimK=None, dimN=2):\n \"\"\"\n Initialise a ConvBPDNVectorTV object with problem parameters.\n\n\n |\n\n **Call graph**\n\n .. image:: _static/jonga/cbpdnvtv_init.svg\n :width: 20%\n :target: _static/jonga/cbpdnvtv_init.svg\n\n |\n\n\n Parameters\n ----------\n D : array_like\n Dictionary matrix\n S : array_like\n Signal vector or matrix\n lmbda : float\n Regularisation parameter (l1)\n mu : float\n Regularisation parameter (gradient)\n opt : :class:`ConvBPDNScalarTV.Options` object\n Algorithm options\n dimK : 0, 1, or None, optional (default None)\n Number of dimensions in input signal corresponding to multiple\n independent signals\n dimN : int, optional (default 2)\n Number of spatial dimensions\n \"\"\"\n\n super(ConvBPDNVectorTV, self).__init__(D, S, lmbda, mu, opt,\n dimK, dimN)\n\n\n\n def ystep(self):\n r\"\"\"Minimise Augmented Lagrangian with respect to\n :math:`\\mathbf{y}`.\"\"\"\n\n AXU = self.AX + self.U\n self.Y[..., 0:-1] = sl.shrink2(AXU[..., 0:-1], self.mu/self.rho,\n axis=(self.cri.axisM, -1))\n self.Y[..., -1] = sl.shrink1(AXU[..., -1],\n (self.lmbda/self.rho) * self.Wl1)\n\n\n\n def obfn_reg(self):\n \"\"\"Compute regularisation term and contribution to objective\n function.\n \"\"\"\n\n rl1 = linalg.norm((self.Wl1 * self.obfn_g1var()).ravel(), 1)\n rtv = np.sum(np.sqrt(np.sum(self.obfn_g0var()**2,\n axis=(self.cri.axisM, -1))))\n return (self.lmbda*rl1 + self.mu*rtv, rl1, rtv)\n\n\n\n\n\nclass ConvBPDNRecTV(admm.ADMM):\n r\"\"\"**Class inheritance structure**\n\n .. inheritance-diagram:: ConvBPDNRecTV\n :parts: 2\n\n |\n\n ADMM algorithm for an extension of Convolutional BPDN including\n terms penalising the total variation of the reconstruction from the\n sparse representation :cite:`wohlberg-2017-convolutional`.\n\n Solve the optimisation problem\n\n .. math::\n \\mathrm{argmin}_\\mathbf{x} \\; \\frac{1}{2}\n \\left\\| \\sum_m \\mathbf{d}_m * \\mathbf{x}_m - \\mathbf{s}\n \\right\\|_2^2 + \\lambda \\sum_m \\| \\mathbf{x}_m \\|_1 +\n \\mu \\left\\| \\sqrt{\\sum_i \\left( G_i \\left( \\sum_m \\mathbf{d}_m *\n \\mathbf{x}_m \\right) \\right)^2} \\right\\|_1 \\;\\;,\n\n where :math:`G_i` is an operator computing the derivative along index\n :math:`i`, via the ADMM problem\n\n .. math::\n \\mathrm{argmin}_\\mathbf{x} \\; (1/2) \\left\\| D\n \\mathbf{x} - \\mathbf{s} \\right\\|_2^2 +\n \\lambda \\| \\mathbf{y}_0 \\|_1 + \\mu \\left\\|\n \\sqrt{\\sum_{i=1}^L \\mathbf{y}_i^2} \\right\\|_1 \\quad \\text{ such that }\n \\quad \\left( \\begin{array}{c} I \\\\ \\Gamma_0 \\\\ \\Gamma_1 \\\\ \\vdots \\\\\n \\Gamma_{L-1} \\end{array} \\right) \\mathbf{x} =\n \\left( \\begin{array}{c} \\mathbf{y}_0 \\\\\n \\mathbf{y}_1 \\\\ \\mathbf{y}_2 \\\\ \\vdots \\\\ \\mathbf{y}_L \\end{array}\n \\right) \\;\\;,\n\n where\n\n .. math::\n D = \\left( \\begin{array}{ccc} D_0 & D_1 & \\ldots \\end{array} \\right)\n \\qquad\n \\mathbf{x} = \\left( \\begin{array}{c} \\mathbf{x}_0 \\\\ \\mathbf{x}_1 \\\\\n \\vdots \\end{array} \\right) \\qquad\n \\Gamma_i = \\left( \\begin{array}{ccc} G_{i,0} & G_{i,1} & \\ldots\n \\end{array} \\right) \\;\\;,\n\n and linear operator :math:`G_{i,m}` is defined such that\n\n .. math::\n G_{i,m} \\mathbf{x} = \\mathbf{g}_i * \\mathbf{d}_m * \\mathbf{x}\n \\;\\;,\n\n where :math:`\\mathbf{g}_i` is the filter corresponding to :math:`G_i`,\n i.e. :math:`G_i \\mathbf{x} = \\mathbf{g}_i * \\mathbf{x}`.\n\n\n For multi-channel signals, vector TV is applied jointly over the\n reconstructions of all channels.\n\n\n After termination of the :meth:`solve` method, attribute :attr:`itstat`\n is a list of tuples representing statistics of each iteration. The\n fields of the named tuple ``IterationStats`` are:\n\n ``Iter`` : Iteration number\n\n ``ObjFun`` : Objective function value\n\n ``DFid`` : Value of data fidelity term :math:`(1/2) \\| \\sum_m\n \\mathbf{d}_m * \\mathbf{x}_m - \\mathbf{s} \\|_2^2`\n\n ``RegL1`` : Value of regularisation term :math:`\\sum_m \\|\n \\mathbf{x}_m \\|_1`\n\n ``RegTV`` : Value of regularisation term :math:`\\left\\|\n \\sqrt{\\sum_i \\left( G_i \\left( \\sum_m \\mathbf{d}_m *\n \\mathbf{x}_m \\right) \\right)^2} \\right\\|_1`\n\n ``PrimalRsdl`` : Norm of primal residual\n\n ``DualRsdl`` : Norm of dual residual\n\n ``EpsPrimal`` : Primal residual stopping tolerance\n :math:`\\epsilon_{\\mathrm{pri}}`\n\n ``EpsDual`` : Dual residual stopping tolerance\n :math:`\\epsilon_{\\mathrm{dua}}`\n\n ``Rho`` : Penalty parameter\n\n ``XSlvRelRes`` : Relative residual of X step solver\n\n ``Time`` : Cumulative run time\n \"\"\"\n\n\n class Options(cbpdn.ConvBPDN.Options):\n r\"\"\"ConvBPDNScalarTV algorithm options\n\n Options include all of those defined in\n :class:`.admm.cbpdn.ConvBPDN.Options`, together with additional\n options:\n\n ``TVWeight`` : An array of weights :math:`w_m` for the term\n penalising the gradient of the coefficient maps. If this\n option is defined, the regularization term is :math:`\\left\\|\n \\sqrt{\\sum_i \\left( G_i \\left( \\sum_m w_m (\\mathbf{d}_m *\n \\mathbf{x}_m) \\right) \\right)^2} \\right\\|_1` where :math:`w_m`\n is the weight for filter index :math:`m`. The array should be an\n :math:`M`-vector where :math:`M` is the number of filters in the\n dictionary.\n \"\"\"\n\n defaults = copy.deepcopy(cbpdn.ConvBPDN.Options.defaults)\n defaults.update({'TVWeight' : 1.0})\n\n\n def __init__(self, opt=None):\n \"\"\"Initialise ConvBPDNRecTV algorithm options object\"\"\"\n\n if opt is None:\n opt = {}\n cbpdn.ConvBPDN.Options.__init__(self, opt)\n\n\n\n itstat_fields_objfn = ('ObjFun', 'DFid', 'RegL1', 'RegTV')\n itstat_fields_extra = ('XSlvRelRes',)\n hdrtxt_objfn = ('Fnc', 'DFid', u('Regℓ1'), u('RegTV'))\n hdrval_objfun = {'Fnc': 'ObjFun', 'DFid': 'DFid',\n u('Regℓ1'): 'RegL1', u('RegTV'): 'RegTV'}\n\n\n def __init__(self, D, S, lmbda, mu=0.0, opt=None, dimK=None, dimN=2):\n \"\"\"\n Initialise a ConvBPDNRecTV object with problem parameters.\n\n\n |\n\n **Call graph**\n\n .. image:: _static/jonga/cbpdnrtv_init.svg\n :width: 20%\n :target: _static/jonga/cbpdnrtv_init.svg\n\n |\n\n\n Parameters\n ----------\n D : array_like\n Dictionary matrix\n S : array_like\n Signal vector or matrix\n lmbda : float\n Regularisation parameter (l1)\n mu : float\n Regularisation parameter (gradient)\n opt : :class:`ConvBPDNRecTV.Options` object\n Algorithm options\n dimK : 0, 1, or None, optional (default None)\n Number of dimensions in input signal corresponding to multiple\n independent signals\n dimN : int, optional (default 2)\n Number of spatial dimensions\n \"\"\"\n\n if opt is None:\n opt = ConvBPDNRecTV.Options()\n\n # Infer problem dimensions and set relevant attributes of self\n self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)\n\n # Call parent class __init__\n Nx = np.product(self.cri.shpX)\n yshape = list(self.cri.shpX)\n yshape[self.cri.axisM] += len(self.cri.axisN) * self.cri.Cd\n super(ConvBPDNRecTV, self).__init__(Nx, yshape, yshape,\n S.dtype, opt)\n\n # Set l1 term scaling and weight array\n self.lmbda = self.dtype.type(lmbda)\n self.Wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype)\n self.Wl1 = self.Wl1.reshape(cr.l1Wshape(self.Wl1, self.cri))\n\n self.mu = self.dtype.type(mu)\n if hasattr(opt['TVWeight'], 'ndim') and opt['TVWeight'].ndim > 0:\n self.Wtv = np.asarray(opt['TVWeight'].reshape((1,)*(dimN+2) +\n opt['TVWeight'].shape), dtype=self.dtype)\n else:\n # Wtv is a scalar: no need to change shape\n self.Wtv = self.dtype.type(opt['TVWeight'])\n\n # Set penalty parameter\n self.set_attr('rho', opt['rho'], dval=(50.0*self.lmbda + 1.0),\n dtype=self.dtype)\n\n # Set rho_xi attribute\n self.set_attr('rho_xi', opt['AutoRho', 'RsdlTarget'], dval=1.0,\n dtype=self.dtype)\n\n # Reshape D and S to standard layout\n self.D = np.asarray(D.reshape(self.cri.shpD), dtype=self.dtype)\n self.S = np.asarray(S.reshape(self.cri.shpS), dtype=self.dtype)\n\n # Compute signal in DFT domain\n self.Sf = sl.rfftn(self.S, None, self.cri.axisN)\n\n self.Gf, GHGf = sl.GradientFilters(self.cri.dimN+3, self.cri.axisN,\n self.cri.Nv, dtype=self.dtype)\n\n # Initialise byte-aligned arrays for pyfftw\n self.YU = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype)\n xfshp = list(self.cri.shpX)\n xfshp[dimN-1] = xfshp[dimN-1]//2 + 1\n self.Xf = sl.pyfftw_empty_aligned(xfshp,\n dtype=sl.complex_dtype(self.dtype))\n\n self.setdict()\n\n\n\n def setdict(self, D=None):\n \"\"\"Set dictionary array.\"\"\"\n\n if D is not None:\n self.D = np.asarray(D, dtype=self.dtype)\n self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)\n\n self.GDf = self.Gf * (self.Wtv * self.Df)[..., np.newaxis]\n\n # Compute D^H S\n self.DSf = np.conj(self.Df) * self.Sf\n if self.cri.Cd > 1:\n self.DSf = np.sum(self.DSf, axis=self.cri.axisC, keepdims=True)\n\n\n\n def block_sep0(self, Y):\n \"\"\"Separate variable into component corresponding to Y0 in Y.\"\"\"\n\n return Y[..., 0:self.cri.M]\n\n\n\n def block_sep1(self, Y):\n \"\"\"Separate variable into component corresponding to Y1 in Y.\"\"\"\n\n Y1 = Y[..., self.cri.M:]\n\n # If cri.Cd > 1 (multi-channel dictionary), we need to undo the\n # reshape performed in block_cat\n if self.cri.Cd > 1:\n shp = list(Y1.shape)\n shp[self.cri.axisM] = self.cri.dimN\n shp[self.cri.axisC] = self.cri.Cd\n Y1 = Y1.reshape(shp)\n\n # Axes are swapped here for similar reasons to those\n # motivating swapping in cbpdn.ConvTwoBlockCnstrnt.block_sep0\n Y1 = np.swapaxes(Y1[..., np.newaxis], self.cri.axisM, -1)\n\n return Y1\n\n\n\n def block_cat(self, Y0, Y1):\n \"\"\"Concatenate components corresponding to Y0 and Y1 blocks\n into Y.\n \"\"\"\n\n # Axes are swapped here for similar reasons to those\n # motivating swapping in cbpdn.ConvTwoBlockCnstrnt.block_cat\n Y1sa = np.swapaxes(Y1, self.cri.axisM, -1)[..., 0]\n\n # If cri.Cd > 1 (multi-channel dictionary) Y0 has a singleton\n # channel axis but Y1 has a non-singleton channel axis. To make\n # it possible to concatenate Y0 and Y1, we reshape Y1 by a\n # partial ravel of axisM and axisC onto axisM.\n if self.cri.Cd > 1:\n shp = list(Y1sa.shape)\n shp[self.cri.axisM] *= shp[self.cri.axisC]\n shp[self.cri.axisC] = 1\n Y1sa = Y1sa.reshape(shp)\n\n return np.concatenate((Y0, Y1sa), axis=self.cri.axisM)\n\n\n\n def xstep(self):\n r\"\"\"Minimise Augmented Lagrangian with respect to\n :math:`\\mathbf{x}`.\"\"\"\n\n self.YU[:] = self.Y - self.U\n YUf = sl.rfftn(self.YU, None, self.cri.axisN)\n YUf0 = self.block_sep0(YUf)\n YUf1 = self.block_sep1(YUf)\n\n b = self.rho * np.sum(np.conj(self.GDf) * YUf1, axis=-1)\n if self.cri.Cd > 1:\n b = np.sum(b, axis=self.cri.axisC, keepdims=True)\n b += self.DSf + self.rho*YUf0\n\n # Concatenate multiple GDf components on axisC. For\n # single-channel signals, and multi-channel signals with a\n # single-channel dictionary, we end up with sl.solvemdbi_ism\n # solving a linear system of rank dimN+1 (corresponding to the\n # dictionary and a gradient operator per spatial dimension) plus\n # an identity. For multi-channel signals with a multi-channel\n # dictionary, we end up with sl.solvemdbi_ism solving a linear\n # system of rank C.d (dimN+1) (corresponding to the dictionary\n # and a gradient operator per spatial dimension for each\n # channel) plus an identity.\n\n # The structure of the linear system to be solved depends on the\n # number of channels in the signal and dictionary. Both branches are\n # the same in the single-channel signal case (the choice of handling\n # it via the 'else' branch is somewhat arbitrary).\n if self.cri.C > 1 and self.cri.Cd == 1:\n # Concatenate multiple GDf components on the final axis\n # of GDf (that indexes the number of gradient operators). For\n # multi-channel signals with a single-channel dictionary,\n # sl.solvemdbi_ism has to solve a linear system of rank dimN+1\n # (corresponding to the dictionary and a gradient operator per\n # spatial dimension)\n DfGDf = np.concatenate([self.Df[..., np.newaxis],] +\n [np.sqrt(self.rho)*self.GDf[..., k, np.newaxis] for k\n in range(self.GDf.shape[-1])], axis=-1)\n self.Xf[:] = sl.solvemdbi_ism(DfGDf, self.rho, b[..., np.newaxis],\n self.cri.axisM, -1)[..., 0]\n else:\n # Concatenate multiple GDf components on axisC. For multi-channel\n # signals with a multi-channel dictionary, sl.solvemdbi_ism has\n # to solve a linear system of rank C.d (dimN+1) (corresponding to\n # the dictionary and a gradient operator per spatial dimension\n # for each channel) plus an identity.\n DfGDf = np.concatenate([self.Df,] +\n [np.sqrt(self.rho)*self.GDf[..., k] for k\n in range(self.GDf.shape[-1])], axis=self.cri.axisC)\n self.Xf[:] = sl.solvemdbi_ism(DfGDf, self.rho, b, self.cri.axisM,\n self.cri.axisC)\n\n self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN)\n\n if self.opt['LinSolveCheck']:\n if self.cri.C > 1 and self.cri.Cd == 1:\n Dop = lambda x: sl.inner(DfGDf, x[..., np.newaxis],\n axis=self.cri.axisM)\n DHop = lambda x: sl.inner(np.conj(DfGDf), x, axis=-1)\n ax = DHop(Dop(self.Xf))[..., 0] + self.rho*self.Xf\n else:\n Dop = lambda x: sl.inner(DfGDf, x, axis=self.cri.axisM)\n DHop = lambda x: sl.inner(np.conj(DfGDf), x,\n axis=self.cri.axisC)\n ax = DHop(Dop(self.Xf)) + self.rho*self.Xf\n self.xrrs = sl.rrs(ax, b)\n else:\n self.xrrs = None\n\n\n\n def ystep(self):\n r\"\"\"Minimise Augmented Lagrangian with respect to\n :math:`\\mathbf{y}`.\"\"\"\n\n AXU = self.AX + self.U\n self.block_sep0(self.Y)[:] = sl.shrink1(self.block_sep0(AXU),\n (self.lmbda/self.rho) * self.Wl1)\n self.block_sep1(self.Y)[:] = sl.shrink2(self.block_sep1(AXU),\n self.mu/self.rho, axis=(self.cri.axisC, -1))\n\n\n\n def obfn_fvarf(self):\n \"\"\"Variable to be evaluated in computing data fidelity term,\n depending on ``fEvalX`` option value.\n \"\"\"\n\n return self.Xf if self.opt['fEvalX'] else \\\n sl.rfftn(self.block_sep0(self.Y), None, self.cri.axisN)\n\n\n\n def var_y0(self):\n r\"\"\"Get :math:`\\mathbf{y}_0` variable, the block of\n :math:`\\mathbf{y}` corresponding to the identity operator.\"\"\"\n\n return self.block_sep0(self.Y)\n\n\n\n def var_y1(self):\n r\"\"\"Get :math:`\\mathbf{y}_1` variable, consisting of all blocks of\n :math:`\\mathbf{y}` corresponding to a gradient operator.\"\"\"\n\n return self.block_sep1(self.Y)\n\n\n\n def var_yx(self):\n r\"\"\"Get component block of :math:`\\mathbf{y}` that is constrained to\n be equal to :math:`\\mathbf{x}`\"\"\"\n\n return self.var_y0()\n\n\n\n def var_yx_idx(self):\n r\"\"\"Get index expression for component block of :math:`\\mathbf{y}`\n that is constrained to be equal to :math:`\\mathbf{x}`.\n \"\"\"\n\n return np.s_[..., 0:self.cri.M]\n\n\n\n def getmin(self):\n \"\"\"Get minimiser after optimisation.\"\"\"\n\n return self.X if self.opt['ReturnX'] else self.var_y0()\n\n\n\n def getcoef(self):\n \"\"\"Get final coefficient array.\"\"\"\n\n return self.getmin()\n\n\n\n def obfn_g0var(self):\n \"\"\"Variable to be evaluated in computing the TV regularisation\n term, depending on the ``gEvalY`` option value.\n \"\"\"\n\n # Use of self.block_sep0(self.AXnr) instead of self.cnst_A0(self.X)\n # reduces number of calls to self.cnst_A0\n return self.var_y0() if self.opt['gEvalY'] else \\\n self.block_sep0(self.AXnr)\n\n\n\n def obfn_g1var(self):\n r\"\"\"Variable to be evaluated in computing the :math:`\\ell_1`\n regularisation term, depending on the ``gEvalY`` option value.\n \"\"\"\n\n # Use of self.block_sep1(self.AXnr) instead of self.cnst_A1(self.X)\n # reduces number of calls to self.cnst_A0\n return self.var_y1() if self.opt['gEvalY'] else \\\n self.block_sep1(self.AXnr)\n\n\n\n def obfn_gvar(self):\n \"\"\"Method providing compatibility with the interface of\n :class:`.admm.cbpdn.ConvBPDN` and derived classes in order to make\n this class compatible with classes such as :class:`.AddMaskSim`.\n \"\"\"\n\n return self.obfn_g1var()\n\n\n\n def eval_objfn(self):\n \"\"\"Compute components of objective function as well as total\n contribution to objective function.\n \"\"\"\n\n dfd = self.obfn_dfd()\n reg = self.obfn_reg()\n obj = dfd + reg[0]\n return (obj, dfd) + reg[1:]\n\n\n\n def obfn_dfd(self):\n r\"\"\"Compute data fidelity term :math:`(1/2) \\| \\sum_m \\mathbf{d}_m *\n \\mathbf{x}_m - \\mathbf{s} \\|_2^2`.\n \"\"\"\n\n Ef = sl.inner(self.Df, self.obfn_fvarf(), axis=self.cri.axisM) \\\n - self.Sf\n return sl.rfl2norm2(Ef, self.S.shape, axis=self.cri.axisN)/2.0\n\n\n\n def obfn_reg(self):\n \"\"\"Compute regularisation term and contribution to objective\n function.\n \"\"\"\n\n rl1 = linalg.norm((self.Wl1 * self.obfn_g0var()).ravel(), 1)\n rtv = np.sum(np.sqrt(np.sum(self.obfn_g1var()**2,\n axis=(self.cri.axisC, -1))))\n return (self.lmbda*rl1 + self.mu*rtv, rl1, rtv)\n\n\n\n def itstat_extra(self):\n \"\"\"Non-standard entries for the iteration stats record tuple.\"\"\"\n\n return (self.xrrs,)\n\n\n def cnst_A0(self, X):\n r\"\"\"Compute :math:`A_0 \\mathbf{x}` component of ADMM problem\n constraint. In this case :math:`A_0 \\mathbf{x} = \\mathbf{x}`.\n \"\"\"\n\n return X\n\n\n\n def cnst_A0T(self, Y0):\n r\"\"\"Compute :math:`A_0^T \\mathbf{y}_0` component of\n :math:`A^T \\mathbf{y}`. In this case :math:`A_0^T \\mathbf{y}_0 =\n \\mathbf{y}_0`, i.e. :math:`A_0 = I`.\n \"\"\"\n\n return Y0\n\n\n\n def cnst_A1(self, X, Xf=None):\n r\"\"\"Compute :math:`A_1 \\mathbf{x}` component of ADMM problem\n constraint. In this case :math:`A_1 \\mathbf{x} = (\\Gamma_0^T \\;\\;\n \\Gamma_1^T \\;\\; \\ldots )^T \\mathbf{x}`.\n \"\"\"\n\n if Xf is None:\n Xf = sl.rfftn(X, axes=self.cri.axisN)\n return sl.irfftn(sl.inner(self.GDf, Xf[..., np.newaxis],\n axis=self.cri.axisM), self.cri.Nv, self.cri.axisN)\n\n\n\n def cnst_A1T(self, Y1):\n r\"\"\"Compute :math:`A_1^T \\mathbf{y}_1` component of\n :math:`A^T \\mathbf{y}`. In this case :math:`A_1^T \\mathbf{y}_1 =\n (\\Gamma_0^T \\;\\; \\Gamma_1^T \\;\\; \\ldots) \\mathbf{y}_1`.\n \"\"\"\n\n Y1f = sl.rfftn(Y1, None, axes=self.cri.axisN)\n return sl.irfftn(np.conj(self.GDf) * Y1f, self.cri.Nv,\n self.cri.axisN)\n\n\n\n def cnst_A(self, X, Xf=None):\n r\"\"\"Compute :math:`A \\mathbf{x}` component of ADMM problem\n constraint. In this case :math:`A \\mathbf{x} = (I \\;\\; \\Gamma_0^T\n \\;\\; \\Gamma_1^T \\;\\; \\ldots)^T \\mathbf{x}`.\n \"\"\"\n\n return self.block_cat(self.cnst_A0(X), self.cnst_A1(X, Xf))\n\n\n\n def cnst_AT(self, Y):\n r\"\"\"Compute :math:`A^T \\mathbf{y}`. In this case\n :math:`A^T \\mathbf{y} = (I \\;\\; \\Gamma_0^T \\;\\; \\Gamma_1^T \\;\\;\n \\ldots) \\mathbf{y}`.\n \"\"\"\n\n return self.cnst_A0T(self.block_sep0(Y)) + \\\n np.sum(self.cnst_A1T(self.block_sep1(Y)), axis=-1)\n\n\n\n def cnst_B(self, Y):\n r\"\"\"Compute :math:`B \\mathbf{y}` component of ADMM problem\n constraint. In this case :math:`B \\mathbf{y} = -\\mathbf{y}`.\n \"\"\"\n\n return -Y\n\n\n\n def cnst_c(self):\n r\"\"\"Compute constant component :math:`\\mathbf{c}` of ADMM problem\n constraint. In this case :math:`\\mathbf{c} = \\mathbf{0}`.\n \"\"\"\n\n return 0.0\n\n\n\n def relax_AX(self):\n \"\"\"Implement relaxation if option ``RelaxParam`` != 1.0.\"\"\"\n\n # We need to keep the non-relaxed version of AX since it is\n # required for computation of primal residual r\n self.AXnr = self.cnst_A(self.X, self.Xf)\n if self.rlx == 1.0:\n # If RelaxParam option is 1.0 there is no relaxation\n self.AX = self.AXnr\n else:\n # Avoid calling cnst_c() more than once in case it is expensive\n # (e.g. due to allocation of a large block of memory)\n if not hasattr(self, '_cnst_c'):\n self._cnst_c = self.cnst_c()\n # Compute relaxed version of AX\n alpha = self.rlx\n self.AX = alpha*self.AXnr - (1-alpha)*(self.cnst_B(self.Y) -\n self._cnst_c)\n\n\n\n def reconstruct(self, X=None):\n \"\"\"Reconstruct representation.\"\"\"\n\n if X is None:\n Xf = self.Xf\n else:\n Xf = sl.rfftn(X, None, self.cri.axisN)\n Sf = np.sum(self.Df * Xf, axis=self.cri.axisM)\n return sl.irfftn(Sf, self.cri.Nv, self.cri.axisN)\n" ]
[ [ "numpy.random.normal", "numpy.vstack", "numpy.random.seed" ], [ "numpy.swapaxes", "numpy.product", "numpy.conj", "numpy.sqrt", "numpy.asarray", "numpy.concatenate", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
francoispichard/covid_twitter_bert_imbalanced
[ "c122a85bdcd16b051179146397ee94f3e0c597c0" ]
[ "utils/misc.py" ]
[ "import logging\nimport json\nimport os\nimport argparse\nimport tensorflow as tf\nimport numpy as np\nfrom contextlib import contextmanager\nimport fcntl\n\n\nlogger = logging.getLogger(__name__)\n\ndef save_to_json(data, f_name):\n with tf.io.gfile.GFile(f_name, 'w') as writer:\n writer.write(json.dumps(data, cls=JSONEncoder, indent=4))\n\nclass JSONEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return super(MyEncoder, self).default(obj)\n\nclass ArgParseDefault(argparse.ArgumentParser):\n \"\"\"Simple wrapper which shows defaults in help\"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs, formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\ndef add_bool_arg(parser, name, default=False, help=''):\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument('--' + name, dest=name, action='store_true', help=help)\n group.add_argument('--do_not_' + name, dest=name, action='store_false')\n parser.set_defaults(**{name: default})\n\n@contextmanager\ndef file_lock(fd):\n \"\"\" Locks FD before entering the context, always releasing the lock. \"\"\"\n try:\n fcntl.flock(fd, fcntl.LOCK_EX)\n yield\n finally:\n fcntl.flock(fd, fcntl.LOCK_UN)\n" ]
[ [ "tensorflow.io.gfile.GFile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cemkaraoguz/UFLDLTutorial
[ "4046b7d23642b3af3522af2ef1fce4b79fd9af7e" ]
[ "common/Visualization.py" ]
[ "''' UFL_Visualization.py\n\t\n\tMethods for visualization\n\t\n\tAuthor: Cem Karaoguz\n\tDate: 19.02.2015\n\tVersion: 1.0\n'''\n\nimport os, struct, sys\nimport numpy as np\nimport pylab as pl\n\ndef displayNetwork(A, kernelWidth=-1, kernelHeight=-1, opt_normalize=True, opt_graycolor=True, cols=-1, opt_colmajor=False):\n\t'''\n\t\tThis function visualizes filters in matrix A. Each column of A is a\n\t\tfilter. We will reshape each column into a square image and visualizes\n\t\ton each cell of the visualization panel. \n\t\t\n\t\topt_normalize: whether we need to normalize the filter so that all of\n\t\tthem can have similar contrast. Default value is true.\n\t\topt_graycolor: whether we use gray as the heat map. Default is true.\n\t\tcols: how many columns are there in the display. Default value is 4.\n\t\topt_colmajor: you can switch convention to row major for A. In that\n\t\tcase, each row of A is a filter. Default value is false.\n\t'''\n\n\t# rescale\n\tA = A - np.mean(A[:]);\n\n\t# compute rows, cols\n\t[L, M] = np.shape(A);\n\tif (kernelWidth<0 or kernelHeight<0):\n\t\t#sz = sqrt(L);\n\t\tw = int(np.sqrt(L));\n\t\th = int(np.sqrt(L));\n\telse:\n\t\tw = kernelWidth;\n\t\th = kernelHeight;\n\t\t\n\tbuf = 1;\n\t\t\n\tif cols<=0:\n\t\tif np.floor(np.sqrt(M))**2 != M:\n\t\t\tn = np.ceil(np.sqrt(M));\n\t\t\twhile np.mod(M, n)!=0 and n<1.2*np.sqrt(M):\n\t\t\t\tn = n+1;\n\t\t\tm = int(np.ceil(M/n));\n\t\telse:\n\t\t\tn = int(np.sqrt(M));\n\t\t\tm = int(n);\n\telse:\n\t\tn = int(cols);\n\t\tm = int(np.ceil(M/n));\n\n\tarray = -1 * np.ones([buf+m*(w+buf), buf+n*(h+buf)]);\n\t\n\tif ~opt_graycolor:\n\t\tarray = 0.1 * array;\n\t\n\tm = int(m);\n\tn = int(n);\n\tif ~opt_colmajor:\n\t\tk = 0;\n\t\tfor i in range(m):\n\t\t\tfor j in range(n):\n\t\t\t\tif (k>=M): \n\t\t\t\t\tcontinue; \n\t\t\t\tclim = np.max(abs(A[:,k]));\n\t\t\t\tif opt_normalize:\n\t\t\t\t\tarray[buf+(i)*(w+buf):buf+(i)*(w+buf)+w, buf+(j)*(h+buf):buf+(j)*(h+buf)+h] = np.reshape(A[:,k], [w, h])/clim;\n\t\t\t\telse:\n\t\t\t\t\tarray[buf+(i)*(w+buf):buf+(i)*(w+buf)+w, buf+(j)*(h+buf):buf+(j)*(h+buf)+h] = np.reshape(A[:,k], [w, h])/np.max(abs(A[:]));\n\t\t\t\tk = k+1;\n\t\t\t#end j\n\t\t#end i\n\telse:\n\t\tk = 0;\n\t\tfor j in range(n):\n\t\t\tfor i in range(m):\n\t\t\t\tif k>=M: \n\t\t\t\t\tcontinue; \n\t\t\t\tclim = np.max(abs(A[:,k]));\n\t\t\t\tif opt_normalize:\n\t\t\t\t\tarray[buf+(i)*(w+buf):buf+(i)*(w+buf)+w, buf+(j)*(h+buf):buf+(j)*(h+buf)+h] = np.reshape(A[:,k], [w, h])/clim;\n\t\t\t\telse:\n\t\t\t\t\tarray[buf+(i)*(w+buf):buf+(i)*(w+buf)+w, buf+(j)*(h+buf):buf+(j)*(h+buf)+h] = np.reshape(A[:,k], [w, h])/np.max(abs(A[:]));\n\t\t\t\tk = k+1;\n\t\t\t#end i\n\t\t#end j\n\t#end\n\n\tif opt_graycolor:\n\t\t#h = pl.imshow(array,'EraseMode','none',[-1 1]);\n\t\th = pl.imshow(array, cmap='gray');\n\telse:\n\t\t#h = pl.imshow(array,'EraseMode','none',[-1 1]);\n\t\th = pl.imshow(array);\n\n\tpl.axis('image')\n\tpl.axis('off')\n\n\tpl.show();\n\ndef displayColorNetwork(A):\n\t''' \n\tDisplay receptive field(s) or basis vector(s) for image patches \n\tA\t: the basis, with patches as column vectors\n\tIn case the midpoint is not set at 0, we shift it dynamically\n\t'''\n\tif np.min(A[:]) >= 0:\n\t\tA = A - np.mean(A[:]);\n\t\n\tcols = np.round(np.sqrt(A.shape[1]));\n\n\tchannel_size = A.shape[0]/3;\n\tdim = np.sqrt(channel_size);\n\tdimp = dim+1;\n\trows = np.ceil(A.shape[1]/cols);\n\tB = A[0:channel_size, :];\n\tC = A[channel_size:channel_size*2, :];\n\tD = A[2*channel_size:channel_size*3, :];\n\tB = B/(np.ones((B.shape[0], 1)) * np.max(np.abs(B)));\n\tC = C/(np.ones((C.shape[0], 1)) * np.max(np.abs(C)));\n\tD = D/(np.ones((D.shape[0], 1)) * np.max(np.abs(D)));\n\t# Initialization of the image\n\tI = np.ones((dim*rows+rows-1,dim*cols+cols-1,3));\n\n\t#Transfer features to this image matrix\n\trows = int(rows)\n\tcols = int(cols)\n\tfor i in range(rows):\n\t\tfor j in range(cols):\n\t\t \n\t\t\tif i*cols+j+1 > B.shape[1]:\n\t\t\t\tbreak\n\t\t\n\t\t# This sets the patch\n\t\tI[i*dimp:i*dimp+dim, j*dimp:j*dimp+dim, 0] = np.reshape(B[:,i*cols+j],[dim, dim]);\n\t\tI[i*dimp:i*dimp+dim, j*dimp:j*dimp+dim, 1] = np.reshape(C[:,i*cols+j],[dim, dim]);\n\t\tI[i*dimp:i*dimp+dim, j*dimp:j*dimp+dim, 2] = np.reshape(D[:,i*cols+j],[dim, dim]);\n\n\tI = I + 1;\n\tI = I / 2;\n\n\tpl.imshow(I);\n\tpl.axis('equal')\n\tpl.axis('off')\n\tpl.show();\n\t\nif __name__ == '__main__':\n\t\n\t#W = np.random.rand(8*8, 16)\n\tW = np.zeros((8,8, 16))\n\tW[4,:,0] = 1;\n\tW[:,4,1] = 1;\n\tW = np.reshape(W, [8*8, 16])\n\tdisplayWeights(W)\n\t\n\t\n" ]
[ [ "numpy.sqrt", "numpy.abs", "numpy.min", "numpy.reshape", "numpy.ones", "numpy.ceil", "numpy.shape", "numpy.mean", "numpy.mod", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
latimerb/sonata
[ "ed14cce11e01c920f722216ae338c16297ebd919" ]
[ "examples/300_cells/build_network.py" ]
[ "import os\nimport numpy as np\n\nfrom bmtk.builder import NetworkBuilder\nfrom bmtk.builder.bionet import SWCReader\nfrom bmtk.utils.io.spike_trains import PoissonSpikesGenerator\nfrom bmtk.builder.aux.node_params import positions_columinar, xiter_random\n\nbuild_recurrent_edges = True\n\nprint('Building internal network')\n# List of non-virtual cell models\ncell_models = [\n {\n 'model_name': 'Scnn1a', 'ei': 'e',\n 'morphology': 'Scnn1a_473845048_m',\n 'model_template': 'nml:Cell_472363762.cell.nml'\n },\n {\n 'model_name': 'Rorb', 'ei': 'e',\n 'morphology': 'Rorb_325404214_m',\n 'model_template': 'nml:Cell_473863510.cell.nml'\n },\n {\n 'model_name': 'Nr5a1', 'ei': 'e',\n 'morphology': 'Nr5a1_471087815_m',\n 'model_template': 'nml:Cell_473863035.cell.nml'\n },\n {\n 'model_name': 'PV1', 'ei': 'i',\n 'morphology': 'Pvalb_470522102_m',\n 'model_template': 'nml:Cell_472912177.cell.nml'\n },\n {\n 'model_name': 'PV2', 'ei': 'i',\n 'morphology': 'Pvalb_469628681_m',\n 'model_template': 'nml:Cell_473862421.cell.nml'\n }\n]\n\nmorphologies = {p['model_name']: SWCReader(os.path.join('../shared_components/morphologies',\n '{}.swc'.format(p['morphology'])))\n for p in cell_models}\ndef build_edges(src, trg, sections=['basal', 'apical'], dist_range=[50.0, 150.0]):\n \"\"\"Function used to randomly assign a synaptic location based on the section (soma, basal, apical) and an\n arc-length dist_range from the soma. This function should be passed into the network and called during the build\n process.\n\n :param src: source cell (dict)\n :param trg: target cell (dict)\n :param sections: list of target cell sections to synapse onto\n :param dist_range: range (distance from soma center) to place\n :return:\n \"\"\"\n # Get morphology and soma center for the target cell\n swc_reader = morphologies[trg['model_name']]\n target_coords = [trg['x'], trg['y'], trg['z']]\n\n sec_ids, sec_xs = swc_reader.choose_sections(sections, dist_range) # randomly choose sec_ids\n coords = swc_reader.get_coord(sec_ids, sec_xs, soma_center=target_coords) # get coords of sec_ids\n dist = swc_reader.get_dist(sec_ids)\n swctype = swc_reader.get_type(sec_ids)\n return sec_ids, sec_xs, coords[0][0], coords[0][1], coords[0][2], dist[0], swctype[0]\n\n\n# Build a network of 300 biophysical cells to simulate\ninternal = NetworkBuilder(\"internal\")\nfor i, model_props in enumerate(cell_models):\n n_cells = 80 if model_props['ei'] == 'e' else 30 # 80% excitatory, 20% inhib\n\n # Randomly get positions uniformly distributed in a column\n positions = positions_columinar(N=n_cells, center=[0, 10.0, 0], max_radius=50.0, height=200.0)\n\n internal.add_nodes(N=n_cells,\n x=positions[:, 0], y=positions[:, 1], z=positions[:, 2],\n rotation_angle_yaxis=xiter_random(N=n_cells, min_x=0.0, max_x=2 * np.pi), # randomly rotate y axis\n model_type='biophysical',\n model_processing='aibs_perisomatic',\n **model_props)\n\nif build_recurrent_edges:\n def n_connections(src, trg, prob=0.5, min_syns=2, max_syns=7):\n return 0 if np.random.uniform() > prob else np.random.randint(min_syns, max_syns)\n\n # exc --> exc connections\n cm = internal.add_edges(source={'ei': 'e'}, target={'ei': 'e'},\n connection_rule=n_connections,\n connection_params={'prob': 0.2},\n #connection_rule=lambda *_: np.random.randint(0, 7),\n dynamics_params='AMPA_ExcToExc.json',\n model_template='Exp2Syn',\n delay=2.0)\n cm.add_properties('syn_weight', rule=6.0e-05, dtypes=np.float)\n cm.add_properties(['sec_id', 'sec_x', 'pos_x', 'pos_y', 'pos_z', 'dist', 'type'],\n rule=build_edges,\n rule_params={'sections': ['basal', 'apical'], 'dist_range': [30.0, 150.0]},\n dtypes=[np.int32, np.float, np.float, np.float, np.float, np.float, np.uint8])\n\n # exc --> inh connections\n cm = internal.add_edges(source={'ei': 'e'}, target={'ei': 'i'},\n connection_rule=n_connections,\n dynamics_params='AMPA_ExcToInh.json',\n model_template='Exp2Syn',\n delay=2.0)\n cm.add_properties('syn_weight', rule=0.0006, dtypes=np.float)\n cm.add_properties(['sec_id', 'sec_x', 'pos_x', 'pos_y', 'pos_z', 'dist', 'type'],\n rule=build_edges,\n rule_params={'sections': ['somatic', 'basal'], 'dist_range': [0.0, 1.0e+20]},\n dtypes=[np.int32, np.float, np.float, np.float, np.float, np.float, np.uint8])\n\n # inh --> exc connections\n cm = internal.add_edges(source={'ei': 'i'}, target={'ei': 'i'},\n connection_rule=n_connections,\n #connection_rule=lambda *_: np.random.randint(0, 4),\n dynamics_params='GABA_InhToExc.json',\n model_template='Exp2Syn',\n delay=2.0)\n cm.add_properties('syn_weight', rule=0.002, dtypes=np.float)\n cm.add_properties(['sec_id', 'sec_x', 'pos_x', 'pos_y', 'pos_z', 'dist', 'type'],\n rule=build_edges,\n rule_params={'sections': ['somatic', 'basal', 'apical'], 'dist_range': [0.0, 50.0]},\n dtypes=[np.int32, np.float, np.float, np.float, np.float, np.float, np.uint8])\n\n # inh --> inh connections\n cm = internal.add_edges(source={'ei': 'i'}, target={'ei': 'i'},\n connection_rule=n_connections,\n dynamics_params='GABA_InhToInh.json',\n model_template='Exp2Syn',\n delay=2.0)\n cm.add_properties('syn_weight', rule=0.00015, dtypes=np.float)\n cm.add_properties(['sec_id', 'sec_x', 'pos_x', 'pos_y', 'pos_z', 'dist', 'type'],\n rule=build_edges,\n rule_params={'sections': ['somatic', 'basal'], 'dist_range': [0.0, 1.0e+20]},\n dtypes=[np.int32, np.float, np.float, np.float, np.float, np.float, np.uint8])\n\n\ninternal.build()\n\nprint('Saving internal')\ninternal.save(output_dir='network')\n\n\nprint('Building external connections')\nexternal = NetworkBuilder(\"external\")\nexternal.add_nodes(N=100, model_type='virtual', ei='e')\ncm = external.add_edges(target=internal.nodes(ei='e'), source=external.nodes(),\n connection_rule=lambda *_: np.random.randint(0, 5),\n dynamics_params='AMPA_ExcToExc.json',\n model_template='Exp2Syn',\n delay=2.0)\ncm.add_properties('syn_weight', rule=2.1e-4, dtypes=np.float)\ncm.add_properties(['sec_id', 'sec_x', 'pos_x', 'pos_y', 'pos_z', 'dist', 'type'],\n rule=build_edges,\n dtypes=[np.int32, np.float, np.float, np.float, np.float, np.float, np.uint8])\n\ncm = external.add_edges(target=internal.nodes(ei='i'), source=external.nodes(),\n connection_rule=lambda *_: np.random.randint(0, 5),\n dynamics_params='AMPA_ExcToInh.json',\n model_template='Exp2Syn',\n delay=2.0)\ncm.add_properties('syn_weight', rule=0.0015, dtypes=np.float)\ncm.add_properties(['sec_id', 'sec_x', 'pos_x', 'pos_y', 'pos_z', 'dist', 'type'],\n rule=build_edges,\n dtypes=[np.int32, np.float, np.float, np.float, np.float, np.float, np.uint8])\n\n\nexternal.build()\n\nprint('Saving external')\nexternal.save(output_dir='network')\n\n\n" ]
[ [ "numpy.random.uniform", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PonderaLab/estrategia-control-covid
[ "dfeb311b42baf9aa0b7652ed1472b7fd78335f82" ]
[ "coupled_dynamics/ext_params.py" ]
[ "import numpy as np\nfrom helper_functions import *\n# from arenas_params import pg, ξ, kg\n\ndef get_ext_params( n_ig, s_i, R_ij, pg, one_minus_pg, ξ, kg ):\n\n # number of patches\n try:\n NP = np.array(n_ig).shape[0]\n except:\n NP = 1\n\n # effective population given the mobility parameters\n n_i_eff = get_n_i_eff(n_ig, R_ij, pg)\n n_ig_eff = get_n_ig_eff(n_ig, R_ij, pg)\n n_g = get_n_g(n_ig)\n\n # patch related effective density\n f_i = f(n_i_eff/s_i, ξ)\n if NP != 1:\n f_i = f_i.reshape(NP,1)\n\n # age related normalization factor\n z_g = n_g / np.dot( np.transpose(f_i), n_ig_eff )\n # precoputation of age related fixed params (number of contacts could enter the bayesian formalism later...)\n zk_g = z_g * kg\n\n ## EXTERNAL FIXED PARAMETERS\n return [zk_g, f_i, n_ig_eff]\n" ]
[ [ "numpy.array", "numpy.transpose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
phy-ml/neural-solver
[ "4f5f5e8ab84fa2f6d759f8278683c6a70b16caec" ]
[ "NET_Solver/geometry/meshgen.py" ]
[ "import numpy as np\r\nfrom geometry import Annulus_Boundary\r\nfrom utils import Plot_Grid\r\nimport torch\r\nfrom geometry import *\r\nfrom utils import *\r\n\r\n\r\nclass EllipticGrid(Annulus_Boundary):\r\n def __init__(self, r_outer, r_inner, eccentricity, cg, rg, h):\r\n super().__init__(r_outer, r_inner, eccentricity)\r\n self.nx = cg\r\n self.ny = rg\r\n self.h = h\r\n self.boundary = Annulus_Boundary(r_outer, r_inner, eccentricity).__call__(cg, rg)\r\n\r\n\r\n\r\n def __call__(self):\r\n # generate the mesh for physical space\r\n x,y = self.physical_grid(tol=1e-10)\r\n\r\n # generate the computational space\r\n xi, eta = self.computation_grid()\r\n\r\n # generate derivative of the physical plane\r\n dx_dxi, dy_dxi, dx_deta, dy_deta, jac, jac_inv = self.grid_derivative(x,y)\r\n\r\n return {'x': x,\r\n 'y': y,\r\n 'xi': xi,\r\n 'eta': eta,\r\n 'dx_dxi':dx_dxi,\r\n 'dy_dxi':dy_dxi,\r\n 'dx_deta': dx_deta,\r\n 'dy_deta': dy_deta,\r\n 'jac': jac,\r\n 'jac_inv': jac_inv}\r\n\r\n def pre_processing(self):\r\n xl, yl = self.boundary['left'][0, :], self.boundary['left'][1, :]\r\n xr, yr = self.boundary['right'][0, :], self.boundary['right'][1, :]\r\n xlow, ylow = self.boundary['low'][0, :], self.boundary['low'][1, :]\r\n xtop, ytop = self.boundary['top'][0, :], self.boundary['top'][1, :]\r\n\r\n ######################################################################\r\n # combine the x and y into a 2d matrix\r\n x = np.zeros([self.ny, self.nx])\r\n y = np.zeros([self.ny, self.nx])\r\n x[:, 0] = xl\r\n y[:, 0] = yl\r\n x[:, -1] = xr\r\n y[:, -1] = yr\r\n x[0, :] = xlow\r\n y[0, :] = ylow\r\n x[-1, :] = xtop\r\n y[-1, :] = ytop\r\n\r\n return x, y\r\n\r\n def physical_grid(self, tol=1e-10):\r\n x, y = self.pre_processing()\r\n err = 2.2e-16\r\n assert x.shape == y.shape, f'Shape of X and Y does not match'\r\n count = 1\r\n A = np.ones([self.ny - 2, self.nx - 2])\r\n B = np.ones([self.ny - 2, self.nx - 2])\r\n C = np.ones([self.ny - 2, self.nx - 2])\r\n err_total = []\r\n while True:\r\n X = (A * (x[2:, 1:-1] + x[0:-2, 1:-1]) + C * (x[1:-1, 2:] + x[1:-1, 0:-2]) -\r\n B / 2 * (x[2:, 2:] + x[0:-2, 0:-2] - x[2:, 0:-2] - x[0:-2, 2:])) / 2 / (A + C)\r\n Y = (A * (y[2:, 1:-1] + y[0:-2, 1:-1]) + C * (y[1:-1, 2:] + y[1:-1, 0:-2]) -\r\n B / 2 * (y[2:, 2:] + y[0:-2, 0:-2] - y[2:, 0:-2] - y[0:-2, 2:])) / 2 / (A + C)\r\n\r\n error = np.max(np.max(np.abs(x[1:-1, 1:-1] - X)) + np.max(np.abs(y[1:-1, 1:-1] - Y)))\r\n err_total.append(error)\r\n x[1:-1, 1:-1] = X\r\n y[1:-1, 1:-1] = Y\r\n A = ((x[1:-1, 2:] - x[1:-1, 0:-2]) / 2 / self.h) ** 2 + (\r\n (y[1:-1, 2:] - y[1:-1, 0:-2]) / 2 / self.h) ** 2 + err\r\n B = (x[2:, 1:-1] - x[0:-2, 1:-1]) / 2 / self.h * (x[1:-1, 2:] - x[1:-1, 0:-2]) / 2 / self.h + (\r\n y[2:, 1:-1] - y[0:-2, 1:-1]) / 2 / self.h * (y[1:-1, 2:] - y[1:-1, 0:-2]) / 2 / self.h + err\r\n C = ((x[2:, 1:-1] - x[0:-2, 1:-1]) / 2 / self.h) ** 2 + (\r\n (y[2:, 1:-1] - y[0:-2, 1:-1]) / 2 / self.h) ** 2 + err\r\n\r\n if error < tol:\r\n #print('Mesh Converged')\r\n break\r\n pass\r\n if count > 50000:\r\n print('Mesh did not reach convergence')\r\n break\r\n pass\r\n count += 1\r\n return x, y\r\n\r\n def computation_grid(self):\r\n xi_ = np.linspace(0, self.nx - 1, self.nx)\r\n eta_ = np.linspace(0, self.ny - 1, self.ny)\r\n xi, eta = np.meshgrid(xi_, eta_)\r\n xi = xi * self.h\r\n eta = eta * self.h\r\n return xi, eta\r\n\r\n def grid_derivative(self, x, y):\r\n # create array to store the derivatives\r\n dx_dxi = np.zeros(x.shape)\r\n dx_deta = np.zeros(x.shape)\r\n dy_dxi = np.zeros(y.shape)\r\n dy_deta = np.zeros(y.shape)\r\n\r\n # compute the derivatives\r\n dx_dxi_central = (-x[:, 4:] + 8 * x[:, 3:-1] - 8 * x[:, 1:-3] + x[:, 0:-4]) / 12 / self.h\r\n dx_dxi_left = (-11 * x[:, 0:-3] + 18 * x[:, 1:-2] - 9 * x[:, 2:-1] + 2 * x[:, 3:]) / 6 / self.h\r\n dx_dxi_right = (11 * x[:, 3:] - 18 * x[:, 2:-1] + 9 * x[:, 1:-2] - 2 * x[:, 0:-3]) / 6 / self.h\r\n\r\n dy_dxi_central = (-y[:, 4:] + 8 * y[:, 3:-1] - 8 * y[:, 1:-3] + y[:, 0:-4]) / 12 / self.h\r\n dy_dxi_left = (-11 * y[:, 0:-3] + 18 * y[:, 1:-2] - 9 * y[:, 2:-1] + 2 * y[:, 3:]) / 6 / self.h\r\n dy_dxi_right = (11 * y[:, 3:] - 18 * y[:, 2:-1] + 9 * y[:, 1:-2] - 2 * y[:, 0:-3]) / 6 / self.h\r\n\r\n dx_deta_central = (-x[4:, :] + 8 * x[3:-1, :] - 8 * x[1:-3, :] + x[0:-4, :]) / 12 / self.h\r\n dx_deta_low = (-11 * x[0:-3, :] + 18 * x[1:-2, :] - 9 * x[2:-1, :] + 2 * x[3:, :]) / 6 / self.h\r\n dx_deta_up = (11 * x[3:, :] - 18 * x[2:-1, :] + 9 * x[1:-2, :] - 2 * x[0:-3, :]) / 6 / self.h\r\n\r\n dy_deta_central = (-y[4:, :] + 8 * y[3:-1, :] - 8 * y[1:-3, :] + y[0:-4, :]) / 12 / self.h\r\n dy_deta_low = (-11 * y[0:-3, :] + 18 * y[1:-2, :] - 9 * y[2:-1, :] + 2 * y[3:, :]) / 6 / self.h\r\n dy_deta_up = (11 * y[3:, :] - 18 * y[2:-1, :] + 9 * y[1:-2, :] - 2 * y[0:-3, :]) / 6 / self.h\r\n\r\n # store the central, forward and backward derivatives in a single array\r\n dx_dxi[:, 2:-2] = dx_dxi_central\r\n dx_dxi[:, 0:2] = dx_dxi_left[:, 0:2]\r\n dx_dxi[:, -2:] = dx_dxi_right[:, -2:]\r\n\r\n dy_dxi[:, 2:-2] = dy_dxi_central\r\n dy_dxi[:, 0:2] = dy_dxi_left[:, 0:2]\r\n dy_dxi[:, -2:] = dy_dxi_right[:, -2:]\r\n\r\n dx_deta[2:-2, :] = dx_deta_central\r\n dx_deta[0:2, :] = dx_deta_low[0:2, :]\r\n dx_deta[-2:, :] = dx_deta_up[-2:, :]\r\n\r\n dy_deta[2:-2, :] = dy_deta_central\r\n dy_deta[0:2, :] = dy_deta_low[0:2, :]\r\n dy_deta[-2:, :] = dy_deta_up[-2:, :]\r\n\r\n # compute jacobian\r\n jac = dx_dxi * dy_deta - dx_deta * dy_dxi\r\n\r\n # inverse of jacobian\r\n jac_inv = 1 / jac\r\n\r\n return dx_dxi, dy_dxi, dx_deta, dy_deta, jac, jac_inv\r\n\r\n\r\nif __name__ == '__main__':\r\n cg, rg = 70, 40\r\n h = 0.01\r\n anulus = EllipticGrid(1, 0.4, -0.99, cg, rg, h)()\r\n x, y = anulus['x'], anulus['y']\r\n print(x.shape, y.shape)\r\n\r\n Plot_Grid(x, y, cg, rg)\r\n\r\n#\r\n# class TFI:\r\n# def __init__(self, xi, eta, annulus, Boundary=False):\r\n# \"\"\"\r\n# Transfinite Interpolation for generating grid using analytical function\r\n# This Function maps the complex physical space into cartesian rectangular domain\r\n#\r\n# # NOTE the input Xi and Eta should be meshed before using in this function\r\n#\r\n# xi_, eta_ = np.linspace(0,1,nx), np.linspace(0,1,ny)\r\n# xi, eta = np.meshgrid(xi_, eta_)\r\n# annulus = {some function with implemented boundary conditions}\r\n# x = TFI(xi, eta, annulus).X()\r\n# y = TFI(xi, eta, annulus).Y()\r\n#\r\n# :param xi: Xi is the computational space in the x axis\r\n# :param eta: Eta is the computational space in the y axis\r\n# :param annulus: Annulus is a function which computes the grid in physical plane wrt boundary conditions\r\n# :param Boundary: Boundary a boolean parameter to specify if TFI is used for generating boundary or satisfying\r\n# boundary conditions for computation\r\n# \"\"\"\r\n# self.xi = xi\r\n# self.eta = eta\r\n# self.annulus = annulus\r\n# self.dx = (xi.max() - xi.min()) / len(xi)\r\n# self.dy = (eta.max() - eta.min()) / len(eta)\r\n# self.bound = Boundary\r\n#\r\n# def __call__(self):\r\n# # get all the x related values\r\n# x = self.X()['x']\r\n# dxdxi = self.X()['dxdxi']\r\n# dxdeta = self.X()['dxdeta']\r\n#\r\n# # get all the y related values\r\n# y = self.Y()['y']\r\n# dydxi = self.Y()['dydxi']\r\n# dydeta = self.Y()['dydeta']\r\n#\r\n# # calculate the jacobian\r\n# jac = dxdxi*dydeta - dxdeta*dydxi\r\n#\r\n#\r\n#\r\n# return {'x':x, 'dxdxi': dxdxi, 'dxdeta':dxdeta, 'y':y, 'dydxi':dydxi, 'dydeta':dydeta}\r\n#\r\n# def X(self):\r\n# \"\"\"\r\n# X returns the interpolated x-axis values from xi and eta\r\n# :return: x axis values for physical plane in a dict format\r\n# \"\"\"\r\n# out = ((1 - self.eta) * self.annulus.Xr(self.xi) + self.eta * self.annulus.Xl(self.xi) + (1 - self.xi)\r\n# * self.annulus.Xt(self.eta) + self.xi * self.annulus.Xb(self.eta) -\r\n# (self.xi * self.eta * self.annulus.Xl(np.array([1])) + self.xi * (1 - self.eta) *\r\n# self.annulus.Xr(np.array([1])) + self.eta * (1 - self.xi) * self.annulus.Xl(np.array([0])) +\r\n# (1 - self.xi) * (1 - self.eta) * self.annulus.Xr(np.array([0]))))\r\n#\r\n#\r\n# dxdxi = np.gradient(out, self.dx)[0]\r\n# dxdeta = np.gradient(out, self.dx)[1]\r\n#\r\n# # testing np.gradient\r\n# #test_dx_dxi = np.gradient(out, self.xi)\r\n# #print(dxdxi)\r\n#\r\n# return {'x': out, 'dxdxi': dxdxi, 'dxdeta': dxdeta}\r\n# #return out\r\n#\r\n# def Y(self):\r\n# \"\"\"\r\n# Y returns the interpolated y-axis values from xi and eta\r\n# :return: Y axis values for physical plane in a dict format\r\n# \"\"\"\r\n# out = ((1 - self.eta) * self.annulus.Yr(self.xi) + self.eta * self.annulus.Yl(self.xi) + (1 - self.xi)\r\n# * self.annulus.Yt(self.eta) + self.xi * self.annulus.Yb(self.eta) -\r\n# (self.xi * self.eta * self.annulus.Yl(np.array([1])) + self.xi * (1 - self.eta) *\r\n# self.annulus.Yr(np.array([1])) + self.eta * (1 - self.xi) * self.annulus.Yl(np.array([0])) +\r\n# (1 - self.xi) * (1 - self.eta) * self.annulus.Yr(np.array([0]))))\r\n#\r\n# dydxi = np.gradient(out, self.dy)[0]\r\n# dydeta = np.gradient(out, self.dy)[1]\r\n#\r\n# return {'y': out, 'dydxi': dydxi, 'dydeta': dydeta}\r\n# #return out\r\n#\r\n# if __name__ == '__main__':\r\n# xi_ = np.linspace(0,1,40)\r\n# eta_ = np.linspace(0,1,40)\r\n# xi, eta = np.meshgrid(xi_, eta_)\r\n# anulus = Analytical_Annulus(1., 0.6, 0.)\r\n# grid = TFI(xi, eta, anulus)\r\n# print(grid())" ]
[ [ "numpy.abs", "numpy.linspace", "numpy.ones", "numpy.meshgrid", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
voodoohop/magenta
[ "23c9bc51038643c2c7365aac9abcca430e8cccc1" ]
[ "magenta/music/sequences_lib.py" ]
[ "# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Defines sequence of notes objects for creating datasets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport copy\nimport itertools\nimport math\nimport operator\nimport random\n\nfrom magenta.music import chord_symbols_lib\nfrom magenta.music import constants\nfrom magenta.protobuf import music_pb2\nimport numpy as np\nfrom six.moves import range # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\n# Set the quantization cutoff.\n# Note events before this cutoff are rounded down to nearest step. Notes\n# above this cutoff are rounded up to nearest step. The cutoff is given as a\n# fraction of a step.\n# For example, with quantize_cutoff = 0.75 using 0-based indexing,\n# if .75 < event <= 1.75, it will be quantized to step 1.\n# If 1.75 < event <= 2.75 it will be quantized to step 2.\n# A number close to 1.0 gives less wiggle room for notes that start early,\n# and they will be snapped to the previous step.\nQUANTIZE_CUTOFF = 0.5\n\n# Shortcut to text annotation types.\nBEAT = music_pb2.NoteSequence.TextAnnotation.BEAT\nCHORD_SYMBOL = music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL\nUNKNOWN_PITCH_NAME = music_pb2.NoteSequence.UNKNOWN_PITCH_NAME\n\n# The amount to upweight note-on events vs note-off events.\nONSET_UPWEIGHT = 5.0\n\n# The size of the frame extension for onset event.\n# Frames in [onset_frame-ONSET_WINDOW, onset_frame+ONSET_WINDOW]\n# are considered to contain onset events.\nONSET_WINDOW = 1\n\n\nclass BadTimeSignatureError(Exception):\n pass\n\n\nclass MultipleTimeSignatureError(Exception):\n pass\n\n\nclass MultipleTempoError(Exception):\n pass\n\n\nclass NegativeTimeError(Exception):\n pass\n\n\nclass QuantizationStatusError(Exception):\n \"\"\"Exception for when a sequence was unexpectedly quantized or unquantized.\n\n Should not happen during normal operation and likely indicates a programming\n error.\n \"\"\"\n pass\n\n\nclass InvalidTimeAdjustmentError(Exception):\n pass\n\n\nclass RectifyBeatsError(Exception):\n pass\n\n\ndef trim_note_sequence(sequence, start_time, end_time):\n \"\"\"Trim notes from a NoteSequence to lie within a specified time range.\n\n Notes starting before `start_time` are not included. Notes ending after\n `end_time` are truncated.\n\n Args:\n sequence: The NoteSequence for which to trim notes.\n start_time: The float time in seconds after which all notes should begin.\n end_time: The float time in seconds before which all notes should end.\n\n Returns:\n A copy of `sequence` with all notes trimmed to lie between `start_time` and\n `end_time`.\n\n Raises:\n QuantizationStatusError: If the sequence has already been quantized.\n \"\"\"\n if is_quantized_sequence(sequence):\n raise QuantizationStatusError(\n 'Can only trim notes and chords for unquantized NoteSequence.')\n\n subsequence = music_pb2.NoteSequence()\n subsequence.CopyFrom(sequence)\n\n del subsequence.notes[:]\n for note in sequence.notes:\n if note.start_time < start_time or note.start_time >= end_time:\n continue\n new_note = subsequence.notes.add()\n new_note.CopyFrom(note)\n new_note.end_time = min(note.end_time, end_time)\n\n subsequence.total_time = min(sequence.total_time, end_time)\n\n return subsequence\n\n\nDEFAULT_SUBSEQUENCE_PRESERVE_CONTROL_NUMBERS = (\n 64, # sustain\n 66, # sostenuto\n 67, # una corda\n)\n\n\ndef _extract_subsequences(sequence, split_times,\n preserve_control_numbers=None):\n \"\"\"Extracts multiple subsequences from a NoteSequence.\n\n Args:\n sequence: The NoteSequence to extract subsequences from.\n split_times: A Python list of subsequence boundary times. The first\n subsequence will start at `split_times[0]` and end at `split_times[1]`,\n the next subsequence will start at `split_times[1]` and end at\n `split_times[2]`, and so on with the last subsequence ending at\n `split_times[-1]`.\n preserve_control_numbers: List of control change numbers to preserve as\n pedal events. The most recent event before the beginning of the\n subsequence will be inserted at the beginning of the subsequence.\n If None, will use DEFAULT_SUBSEQUENCE_PRESERVE_CONTROL_NUMBERS.\n\n Returns:\n A Python list of new NoteSequence containing the subsequences of `sequence`.\n\n Raises:\n QuantizationStatusError: If the sequence has already been quantized.\n ValueError: If there are fewer than 2 split times, or the split times are\n unsorted, or if any of the subsequences would start past the end of the\n sequence.\n \"\"\"\n if is_quantized_sequence(sequence):\n raise QuantizationStatusError(\n 'Can only extract subsequences from unquantized NoteSequence.')\n\n if len(split_times) < 2:\n raise ValueError('Must provide at least a start and end time.')\n if any(t1 > t2 for t1, t2 in zip(split_times[:-1], split_times[1:])):\n raise ValueError('Split times must be sorted.')\n if any(time >= sequence.total_time for time in split_times[:-1]):\n raise ValueError('Cannot extract subsequence past end of sequence.')\n\n if preserve_control_numbers is None:\n preserve_control_numbers = DEFAULT_SUBSEQUENCE_PRESERVE_CONTROL_NUMBERS\n\n subsequence = music_pb2.NoteSequence()\n subsequence.CopyFrom(sequence)\n\n subsequence.total_time = 0.0\n\n del subsequence.notes[:]\n del subsequence.time_signatures[:]\n del subsequence.key_signatures[:]\n del subsequence.tempos[:]\n del subsequence.text_annotations[:]\n del subsequence.control_changes[:]\n del subsequence.pitch_bends[:]\n\n subsequences = [\n copy.deepcopy(subsequence) for _ in range(len(split_times) - 1)\n ]\n\n # Extract notes into subsequences.\n subsequence_index = -1\n for note in sorted(sequence.notes, key=lambda note: note.start_time):\n if note.start_time < split_times[0]:\n continue\n while (subsequence_index < len(split_times) - 1 and\n note.start_time >= split_times[subsequence_index + 1]):\n subsequence_index += 1\n if subsequence_index == len(split_times) - 1:\n break\n subsequences[subsequence_index].notes.extend([note])\n subsequences[subsequence_index].notes[-1].start_time -= (\n split_times[subsequence_index])\n subsequences[subsequence_index].notes[-1].end_time = min(\n note.end_time,\n split_times[subsequence_index + 1]) - split_times[subsequence_index]\n if (subsequences[subsequence_index].notes[-1].end_time >\n subsequences[subsequence_index].total_time):\n subsequences[subsequence_index].total_time = (\n subsequences[subsequence_index].notes[-1].end_time)\n\n # Extract time signatures, key signatures, tempos, and chord changes (beats\n # are handled below, other text annotations and pitch bends are deleted).\n # Additional state events will be added to the beginning of each subsequence.\n\n events_by_type = [\n sequence.time_signatures, sequence.key_signatures, sequence.tempos,\n [\n annotation for annotation in sequence.text_annotations\n if annotation.annotation_type == CHORD_SYMBOL\n ]\n ]\n new_event_containers = [[s.time_signatures for s in subsequences],\n [s.key_signatures for s in subsequences],\n [s.tempos for s in subsequences],\n [s.text_annotations for s in subsequences]]\n\n for events, containers in zip(events_by_type, new_event_containers):\n previous_event = None\n subsequence_index = -1\n for event in sorted(events, key=lambda event: event.time):\n if event.time <= split_times[0]:\n previous_event = event\n continue\n while (subsequence_index < len(split_times) - 1 and\n event.time > split_times[subsequence_index + 1]):\n subsequence_index += 1\n if subsequence_index == len(split_times) - 1:\n break\n if previous_event is not None:\n # Add state event to the beginning of the subsequence.\n containers[subsequence_index].extend([previous_event])\n containers[subsequence_index][-1].time = 0.0\n if subsequence_index == len(split_times) - 1:\n break\n # Only add the event if it's actually inside the subsequence (and not on\n # the boundary with the next one).\n if event.time < split_times[subsequence_index + 1]:\n containers[subsequence_index].extend([event])\n containers[subsequence_index][-1].time -= split_times[subsequence_index]\n previous_event = event\n # Add final state event to the beginning of all remaining subsequences.\n while subsequence_index < len(split_times) - 2:\n subsequence_index += 1\n if previous_event is not None:\n containers[subsequence_index].extend([previous_event])\n containers[subsequence_index][-1].time = 0.0\n\n # Copy stateless events to subsequences. Unlike the stateful events above,\n # stateless events do not have an effect outside of the subsequence in which\n # they occur.\n stateless_events_by_type = [[\n annotation for annotation in sequence.text_annotations\n if annotation.annotation_type in (BEAT,)\n ]]\n new_stateless_event_containers = [[s.text_annotations for s in subsequences]]\n for events, containers in zip(stateless_events_by_type,\n new_stateless_event_containers):\n subsequence_index = -1\n for event in sorted(events, key=lambda event: event.time):\n if event.time < split_times[0]:\n continue\n while (subsequence_index < len(split_times) - 1 and\n event.time >= split_times[subsequence_index + 1]):\n subsequence_index += 1\n if subsequence_index == len(split_times) - 1:\n break\n containers[subsequence_index].extend([event])\n containers[subsequence_index][-1].time -= split_times[subsequence_index]\n\n # Extract piano pedal events (other control changes are deleted). Pedal state\n # is maintained per-instrument and added to the beginning of each\n # subsequence.\n pedal_events = [\n cc for cc in sequence.control_changes\n if cc.control_number in preserve_control_numbers\n ]\n previous_pedal_events = {}\n subsequence_index = -1\n for pedal_event in sorted(pedal_events, key=lambda event: event.time):\n if pedal_event.time <= split_times[0]:\n previous_pedal_events[\n (pedal_event.instrument, pedal_event.control_number)] = pedal_event\n continue\n while (subsequence_index < len(split_times) - 1 and\n pedal_event.time > split_times[subsequence_index + 1]):\n subsequence_index += 1\n if subsequence_index == len(split_times) - 1:\n break\n # Add the current pedal pedal state to the beginning of the subsequence.\n for previous_pedal_event in previous_pedal_events.values():\n subsequences[subsequence_index].control_changes.extend(\n [previous_pedal_event])\n subsequences[subsequence_index].control_changes[-1].time = 0.0\n if subsequence_index == len(split_times) - 1:\n break\n # Only add the pedal event if it's actually inside the subsequence (and\n # not on the boundary with the next one).\n if pedal_event.time < split_times[subsequence_index + 1]:\n subsequences[subsequence_index].control_changes.extend([pedal_event])\n subsequences[subsequence_index].control_changes[-1].time -= (\n split_times[subsequence_index])\n previous_pedal_events[\n (pedal_event.instrument, pedal_event.control_number)] = pedal_event\n # Add final pedal pedal state to the beginning of all remaining\n # subsequences.\n while subsequence_index < len(split_times) - 2:\n subsequence_index += 1\n for previous_pedal_event in previous_pedal_events.values():\n subsequences[subsequence_index].control_changes.extend(\n [previous_pedal_event])\n subsequences[subsequence_index].control_changes[-1].time = 0.0\n\n # Set subsequence info for all subsequences.\n for subsequence, start_time in zip(subsequences, split_times[:-1]):\n subsequence.subsequence_info.start_time_offset = start_time\n subsequence.subsequence_info.end_time_offset = (\n sequence.total_time - start_time - subsequence.total_time)\n\n return subsequences\n\n\ndef extract_subsequence(sequence,\n start_time,\n end_time,\n preserve_control_numbers=None):\n \"\"\"Extracts a subsequence from a NoteSequence.\n\n Notes starting before `start_time` are not included. Notes ending after\n `end_time` are truncated. Time signature, tempo, key signature, chord changes,\n and sustain pedal events outside the specified time range are removed;\n however, the most recent event of each of these types prior to `start_time` is\n included at `start_time`. This means that e.g. if a time signature of 3/4 is\n specified in the original sequence prior to `start_time` (and is not followed\n by a different time signature), the extracted subsequence will include a 3/4\n time signature event at `start_time`. Pitch bends and control changes other\n than sustain are removed entirely.\n\n The extracted subsequence is shifted to start at time zero.\n\n Args:\n sequence: The NoteSequence to extract a subsequence from.\n start_time: The float time in seconds to start the subsequence.\n end_time: The float time in seconds to end the subsequence.\n preserve_control_numbers: List of control change numbers to preserve as\n pedal events. The most recent event before the beginning of the\n subsequence will be inserted at the beginning of the subsequence.\n If None, will use DEFAULT_SUBSEQUENCE_PRESERVE_CONTROL_NUMBERS.\n\n\n Returns:\n A new NoteSequence containing the subsequence of `sequence` from the\n specified time range.\n\n Raises:\n QuantizationStatusError: If the sequence has already been quantized.\n ValueError: If `start_time` is past the end of `sequence`.\n \"\"\"\n return _extract_subsequences(\n sequence,\n split_times=[start_time, end_time],\n preserve_control_numbers=preserve_control_numbers)[0]\n\n\ndef shift_sequence_times(sequence, shift_seconds):\n \"\"\"Shifts times in a notesequence.\n\n Only forward shifts are supported.\n\n Args:\n sequence: The NoteSequence to shift.\n shift_seconds: The amount to shift.\n\n Returns:\n A new NoteSequence with shifted times.\n\n Raises:\n ValueError: If the shift amount is invalid.\n QuantizationStatusError: If the sequence has already been quantized.\n \"\"\"\n if shift_seconds <= 0:\n raise ValueError('Invalid shift amount: {}'.format(shift_seconds))\n if is_quantized_sequence(sequence):\n raise QuantizationStatusError(\n 'Can shift only unquantized NoteSequences.')\n\n shifted = music_pb2.NoteSequence()\n shifted.CopyFrom(sequence)\n\n # Delete subsequence_info because our frame of reference has shifted.\n shifted.ClearField('subsequence_info')\n\n # Shift notes.\n for note in shifted.notes:\n note.start_time += shift_seconds\n note.end_time += shift_seconds\n\n events_to_shift = [\n shifted.time_signatures, shifted.key_signatures, shifted.tempos,\n shifted.pitch_bends, shifted.control_changes, shifted.text_annotations,\n shifted.section_annotations\n ]\n\n for event in itertools.chain(*events_to_shift):\n event.time += shift_seconds\n\n shifted.total_time += shift_seconds\n\n return shifted\n\n\ndef remove_redundant_data(sequence):\n \"\"\"Returns a copy of the sequence with redundant data removed.\n\n An event is considered redundant if it is a time signature, a key signature,\n or a tempo that differs from the previous event of the same type only by time.\n For example, a tempo mark of 120 qpm at 5 seconds would be considered\n redundant if it followed a tempo mark of 120 qpm and 4 seconds.\n\n Fields in sequence_metadata are considered redundant if the same string is\n repeated.\n\n Args:\n sequence: The sequence to process.\n\n Returns:\n A new sequence with redundant events removed.\n \"\"\"\n fixed_sequence = copy.deepcopy(sequence)\n for events in [\n fixed_sequence.time_signatures, fixed_sequence.key_signatures,\n fixed_sequence.tempos\n ]:\n events.sort(key=lambda e: e.time)\n for i in range(len(events) - 1, 0, -1):\n tmp_ts = copy.deepcopy(events[i])\n tmp_ts.time = events[i - 1].time\n # If the only difference between the two events is time, then delete the\n # second one.\n if tmp_ts == events[i - 1]:\n del events[i]\n\n if fixed_sequence.HasField('sequence_metadata'):\n # Add composers and genres, preserving order, but dropping duplicates.\n del fixed_sequence.sequence_metadata.composers[:]\n added_composer = set()\n for composer in sequence.sequence_metadata.composers:\n if composer not in added_composer:\n fixed_sequence.sequence_metadata.composers.append(composer)\n added_composer.add(composer)\n\n del fixed_sequence.sequence_metadata.genre[:]\n added_genre = set()\n for genre in sequence.sequence_metadata.genre:\n if genre not in added_genre:\n fixed_sequence.sequence_metadata.genre.append(genre)\n added_genre.add(genre)\n\n return fixed_sequence\n\n\ndef concatenate_sequences(sequences, sequence_durations=None):\n \"\"\"Concatenate a series of NoteSequences together.\n\n Individual sequences will be shifted using shift_sequence_times and then\n merged together using the protobuf MergeFrom method. This means that any\n global values (e.g., ticks_per_quarter) will be overwritten by each sequence\n and only the final value will be used. After this, redundant data will be\n removed with remove_redundant_data.\n\n Args:\n sequences: A list of sequences to concatenate.\n sequence_durations: An optional list of sequence durations to use. If not\n specified, the total_time value will be used. Specifying durations is\n useful if the sequences to be concatenated are effectively longer than\n their total_time (e.g., a sequence that ends with a rest).\n\n Returns:\n A new sequence that is the result of concatenating *sequences.\n\n Raises:\n ValueError: If the length of sequences and sequence_durations do not match\n or if a specified duration is less than the total_time of the sequence.\n \"\"\"\n if sequence_durations and len(sequences) != len(sequence_durations):\n raise ValueError(\n 'sequences and sequence_durations must be the same length.')\n current_total_time = 0\n cat_seq = music_pb2.NoteSequence()\n for i in range(len(sequences)):\n sequence = sequences[i]\n if sequence_durations and sequence_durations[i] < sequence.total_time:\n raise ValueError(\n 'Specified sequence duration ({}) must not be less than the '\n 'total_time of the sequence ({})'.format(sequence_durations[i],\n sequence.total_time))\n if current_total_time > 0:\n cat_seq.MergeFrom(shift_sequence_times(sequence, current_total_time))\n else:\n cat_seq.MergeFrom(sequence)\n\n if sequence_durations:\n current_total_time += sequence_durations[i]\n else:\n current_total_time = cat_seq.total_time\n\n # Delete subsequence_info because we've joined several subsequences.\n cat_seq.ClearField('subsequence_info')\n\n return remove_redundant_data(cat_seq)\n\n\ndef repeat_sequence_to_duration(sequence, duration, sequence_duration=None):\n \"\"\"Repeat a sequence until it is a given duration, trimming any extra.\n\n Args:\n sequence: the sequence to repeat\n duration: the desired duration\n sequence_duration: If provided, will be used instead of sequence.total_time\n\n Returns:\n The repeated and possibly trimmed sequence.\n \"\"\"\n if not sequence_duration:\n sequence_duration = sequence.total_time\n num_repeats = int(math.ceil(duration / sequence_duration))\n repeated_ns = concatenate_sequences(\n [sequence] * num_repeats,\n sequence_durations=[sequence_duration] * num_repeats)\n\n trimmed = extract_subsequence(repeated_ns, start_time=0, end_time=duration)\n trimmed.ClearField('subsequence_info') # Not relevant in this case.\n return trimmed\n\n\ndef expand_section_groups(sequence):\n \"\"\"Expands a NoteSequence based on its section_groups.\n\n Args:\n sequence: The sequence to expand.\n\n Returns:\n A copy of the original sequence, expanded based on its section_groups. If\n the sequence has no section_groups, a copy of the original sequence will be\n returned.\n \"\"\"\n if not sequence.section_groups:\n return copy.deepcopy(sequence)\n\n sections = {}\n section_durations = {}\n for i in range(len(sequence.section_annotations)):\n section_id = sequence.section_annotations[i].section_id\n start_time = sequence.section_annotations[i].time\n if i < len(sequence.section_annotations) - 1:\n end_time = sequence.section_annotations[i + 1].time\n else:\n end_time = sequence.total_time\n\n subsequence = extract_subsequence(sequence, start_time, end_time)\n # This is a subsequence, so the section_groups no longer make sense.\n del subsequence.section_groups[:]\n # This subsequence contains only 1 section and it has been shifted to time\n # 0.\n del subsequence.section_annotations[:]\n subsequence.section_annotations.add(time=0, section_id=section_id)\n\n sections[section_id] = subsequence\n section_durations[section_id] = end_time - start_time\n\n # Recursively expand section_groups.\n def sections_in_group(section_group):\n sections = []\n for section in section_group.sections:\n field = section.WhichOneof('section_type')\n if field == 'section_id':\n sections.append(section.section_id)\n elif field == 'section_group':\n sections.extend(sections_in_group(section.section_group))\n return sections * section_group.num_times\n\n sections_to_concat = []\n for section_group in sequence.section_groups:\n sections_to_concat.extend(sections_in_group(section_group))\n\n return concatenate_sequences(\n [sections[i] for i in sections_to_concat],\n [section_durations[i] for i in sections_to_concat])\n\n\ndef _is_power_of_2(x):\n return x and not x & (x - 1)\n\n\ndef is_quantized_sequence(note_sequence):\n \"\"\"Returns whether or not a NoteSequence proto has been quantized.\n\n Args:\n note_sequence: A music_pb2.NoteSequence proto.\n\n Returns:\n True if `note_sequence` is quantized, otherwise False.\n \"\"\"\n # If the QuantizationInfo message has a non-zero steps_per_quarter or\n # steps_per_second, assume that the proto has been quantized.\n return (note_sequence.quantization_info.steps_per_quarter > 0 or\n note_sequence.quantization_info.steps_per_second > 0)\n\n\ndef is_relative_quantized_sequence(note_sequence):\n \"\"\"Returns whether a NoteSequence proto has been quantized relative to tempo.\n\n Args:\n note_sequence: A music_pb2.NoteSequence proto.\n\n Returns:\n True if `note_sequence` is quantized relative to tempo, otherwise False.\n \"\"\"\n # If the QuantizationInfo message has a non-zero steps_per_quarter, assume\n # that the proto has been quantized relative to tempo.\n return note_sequence.quantization_info.steps_per_quarter > 0\n\n\ndef is_absolute_quantized_sequence(note_sequence):\n \"\"\"Returns whether a NoteSequence proto has been quantized by absolute time.\n\n Args:\n note_sequence: A music_pb2.NoteSequence proto.\n\n Returns:\n True if `note_sequence` is quantized by absolute time, otherwise False.\n \"\"\"\n # If the QuantizationInfo message has a non-zero steps_per_second, assume\n # that the proto has been quantized by absolute time.\n return note_sequence.quantization_info.steps_per_second > 0\n\n\ndef assert_is_quantized_sequence(note_sequence):\n \"\"\"Confirms that the given NoteSequence proto has been quantized.\n\n Args:\n note_sequence: A music_pb2.NoteSequence proto.\n\n Raises:\n QuantizationStatusError: If the sequence is not quantized.\n \"\"\"\n if not is_quantized_sequence(note_sequence):\n raise QuantizationStatusError(\n 'NoteSequence %s is not quantized.' % note_sequence.id)\n\n\ndef assert_is_relative_quantized_sequence(note_sequence):\n \"\"\"Confirms that a NoteSequence proto has been quantized relative to tempo.\n\n Args:\n note_sequence: A music_pb2.NoteSequence proto.\n\n Raises:\n QuantizationStatusError: If the sequence is not quantized relative to\n tempo.\n \"\"\"\n if not is_relative_quantized_sequence(note_sequence):\n raise QuantizationStatusError(\n 'NoteSequence %s is not quantized or is '\n 'quantized based on absolute timing.' % note_sequence.id)\n\n\ndef assert_is_absolute_quantized_sequence(note_sequence):\n \"\"\"Confirms that a NoteSequence proto has been quantized by absolute time.\n\n Args:\n note_sequence: A music_pb2.NoteSequence proto.\n\n Raises:\n QuantizationStatusError: If the sequence is not quantized by absolute\n time.\n \"\"\"\n if not is_absolute_quantized_sequence(note_sequence):\n raise QuantizationStatusError(\n 'NoteSequence %s is not quantized or is '\n 'quantized based on relative timing.' % note_sequence.id)\n\n\ndef steps_per_bar_in_quantized_sequence(note_sequence):\n \"\"\"Calculates steps per bar in a NoteSequence that has been quantized.\n\n Args:\n note_sequence: The NoteSequence to examine.\n\n Returns:\n Steps per bar as a floating point number.\n \"\"\"\n assert_is_relative_quantized_sequence(note_sequence)\n\n quarters_per_beat = 4.0 / note_sequence.time_signatures[0].denominator\n quarters_per_bar = (\n quarters_per_beat * note_sequence.time_signatures[0].numerator)\n steps_per_bar_float = (\n note_sequence.quantization_info.steps_per_quarter * quarters_per_bar)\n return steps_per_bar_float\n\n\ndef split_note_sequence(note_sequence,\n hop_size_seconds,\n skip_splits_inside_notes=False):\n \"\"\"Split one NoteSequence into many at specified time intervals.\n\n If `hop_size_seconds` is a scalar, this function splits a NoteSequence into\n multiple NoteSequences, all of fixed size (unless `split_notes` is False, in\n which case splits that would have truncated notes will be skipped; i.e. each\n split will either happen at a multiple of `hop_size_seconds` or not at all).\n Each of the resulting NoteSequences is shifted to start at time zero.\n\n If `hop_size_seconds` is a list, the NoteSequence will be split at each time\n in the list (unless `split_notes` is False as above).\n\n Args:\n note_sequence: The NoteSequence to split.\n hop_size_seconds: The hop size, in seconds, at which the NoteSequence will\n be split. Alternatively, this can be a Python list of times in seconds at\n which to split the NoteSequence.\n skip_splits_inside_notes: If False, the NoteSequence will be split at all\n hop positions, regardless of whether or not any notes are sustained across\n the potential split time, thus sustained notes will be truncated. If True,\n the NoteSequence will not be split at positions that occur within\n sustained notes.\n\n Returns:\n A Python list of NoteSequences.\n \"\"\"\n notes_by_start_time = sorted(\n list(note_sequence.notes), key=lambda note: note.start_time)\n note_idx = 0\n notes_crossing_split = []\n\n if isinstance(hop_size_seconds, list):\n split_times = sorted(hop_size_seconds)\n else:\n split_times = np.arange(hop_size_seconds, note_sequence.total_time,\n hop_size_seconds)\n\n valid_split_times = [0.0]\n\n for split_time in split_times:\n # Update notes crossing potential split.\n while (note_idx < len(notes_by_start_time) and\n notes_by_start_time[note_idx].start_time < split_time):\n notes_crossing_split.append(notes_by_start_time[note_idx])\n note_idx += 1\n notes_crossing_split = [\n note for note in notes_crossing_split if note.end_time > split_time\n ]\n\n if not (skip_splits_inside_notes and notes_crossing_split):\n valid_split_times.append(split_time)\n\n # Handle the final subsequence.\n if note_sequence.total_time > valid_split_times[-1]:\n valid_split_times.append(note_sequence.total_time)\n\n if len(valid_split_times) > 1:\n return _extract_subsequences(note_sequence, valid_split_times)\n else:\n return []\n\n\ndef split_note_sequence_on_time_changes(note_sequence,\n skip_splits_inside_notes=False):\n \"\"\"Split one NoteSequence into many around time signature and tempo changes.\n\n This function splits a NoteSequence into multiple NoteSequences, each of which\n contains only a single time signature and tempo, unless `split_notes` is False\n in which case all time signature and tempo changes occur within sustained\n notes. Each of the resulting NoteSequences is shifted to start at time zero.\n\n Args:\n note_sequence: The NoteSequence to split.\n skip_splits_inside_notes: If False, the NoteSequence will be split at all\n time changes, regardless of whether or not any notes are sustained across\n the time change. If True, the NoteSequence will not be split at time\n changes that occur within sustained notes.\n\n Returns:\n A Python list of NoteSequences.\n \"\"\"\n current_numerator = 4\n current_denominator = 4\n current_qpm = constants.DEFAULT_QUARTERS_PER_MINUTE\n\n time_signatures_and_tempos = sorted(\n list(note_sequence.time_signatures) + list(note_sequence.tempos),\n key=lambda t: t.time)\n time_signatures_and_tempos = [\n t for t in time_signatures_and_tempos if t.time < note_sequence.total_time\n ]\n\n notes_by_start_time = sorted(\n list(note_sequence.notes), key=lambda note: note.start_time)\n note_idx = 0\n notes_crossing_split = []\n\n valid_split_times = [0.0]\n\n for time_change in time_signatures_and_tempos:\n if isinstance(time_change, music_pb2.NoteSequence.TimeSignature):\n if (time_change.numerator == current_numerator and\n time_change.denominator == current_denominator):\n # Time signature didn't actually change.\n continue\n else:\n if time_change.qpm == current_qpm:\n # Tempo didn't actually change.\n continue\n\n # Update notes crossing potential split.\n while (note_idx < len(notes_by_start_time) and\n notes_by_start_time[note_idx].start_time < time_change.time):\n notes_crossing_split.append(notes_by_start_time[note_idx])\n note_idx += 1\n notes_crossing_split = [\n note for note in notes_crossing_split\n if note.end_time > time_change.time\n ]\n\n if time_change.time > valid_split_times[-1]:\n if not (skip_splits_inside_notes and notes_crossing_split):\n valid_split_times.append(time_change.time)\n\n # Even if we didn't split here, update the current time signature or tempo.\n if isinstance(time_change, music_pb2.NoteSequence.TimeSignature):\n current_numerator = time_change.numerator\n current_denominator = time_change.denominator\n else:\n current_qpm = time_change.qpm\n\n # Handle the final subsequence.\n if note_sequence.total_time > valid_split_times[-1]:\n valid_split_times.append(note_sequence.total_time)\n\n if len(valid_split_times) > 1:\n return _extract_subsequences(note_sequence, valid_split_times)\n else:\n return []\n\n\ndef quantize_to_step(unquantized_seconds,\n steps_per_second,\n quantize_cutoff=QUANTIZE_CUTOFF):\n \"\"\"Quantizes seconds to the nearest step, given steps_per_second.\n\n See the comments above `QUANTIZE_CUTOFF` for details on how the quantizing\n algorithm works.\n\n Args:\n unquantized_seconds: Seconds to quantize.\n steps_per_second: Quantizing resolution.\n quantize_cutoff: Value to use for quantizing cutoff.\n\n Returns:\n The input value quantized to the nearest step.\n \"\"\"\n unquantized_steps = unquantized_seconds * steps_per_second\n return int(unquantized_steps + (1 - quantize_cutoff))\n\n\ndef steps_per_quarter_to_steps_per_second(steps_per_quarter, qpm):\n \"\"\"Calculates steps per second given steps_per_quarter and a qpm.\"\"\"\n return steps_per_quarter * qpm / 60.0\n\n\ndef _quantize_notes(note_sequence, steps_per_second):\n \"\"\"Quantize the notes and chords of a NoteSequence proto in place.\n\n Note start and end times, and chord times are snapped to a nearby quantized\n step, and the resulting times are stored in a separate field (e.g.,\n quantized_start_step). See the comments above `QUANTIZE_CUTOFF` for details on\n how the quantizing algorithm works.\n\n Args:\n note_sequence: A music_pb2.NoteSequence protocol buffer. Will be modified in\n place.\n steps_per_second: Each second will be divided into this many quantized time\n steps.\n\n Raises:\n NegativeTimeError: If a note or chord occurs at a negative time.\n \"\"\"\n for note in note_sequence.notes:\n # Quantize the start and end times of the note.\n note.quantized_start_step = quantize_to_step(note.start_time,\n steps_per_second)\n note.quantized_end_step = quantize_to_step(note.end_time, steps_per_second)\n if note.quantized_end_step == note.quantized_start_step:\n note.quantized_end_step += 1\n\n # Do not allow notes to start or end in negative time.\n if note.quantized_start_step < 0 or note.quantized_end_step < 0:\n raise NegativeTimeError(\n 'Got negative note time: start_step = %s, end_step = %s' %\n (note.quantized_start_step, note.quantized_end_step))\n\n # Extend quantized sequence if necessary.\n if note.quantized_end_step > note_sequence.total_quantized_steps:\n note_sequence.total_quantized_steps = note.quantized_end_step\n\n # Also quantize control changes and text annotations.\n for event in itertools.chain(note_sequence.control_changes,\n note_sequence.text_annotations):\n # Quantize the event time, disallowing negative time.\n event.quantized_step = quantize_to_step(event.time, steps_per_second)\n if event.quantized_step < 0:\n raise NegativeTimeError(\n 'Got negative event time: step = %s' % event.quantized_step)\n\n\ndef quantize_note_sequence(note_sequence, steps_per_quarter):\n \"\"\"Quantize a NoteSequence proto relative to tempo.\n\n The input NoteSequence is copied and quantization-related fields are\n populated. Sets the `steps_per_quarter` field in the `quantization_info`\n message in the NoteSequence.\n\n Note start and end times, and chord times are snapped to a nearby quantized\n step, and the resulting times are stored in a separate field (e.g.,\n quantized_start_step). See the comments above `QUANTIZE_CUTOFF` for details on\n how the quantizing algorithm works.\n\n Args:\n note_sequence: A music_pb2.NoteSequence protocol buffer.\n steps_per_quarter: Each quarter note of music will be divided into this many\n quantized time steps.\n\n Returns:\n A copy of the original NoteSequence, with quantized times added.\n\n Raises:\n MultipleTimeSignatureError: If there is a change in time signature\n in `note_sequence`.\n MultipleTempoError: If there is a change in tempo in `note_sequence`.\n BadTimeSignatureError: If the time signature found in `note_sequence`\n has a 0 numerator or a denominator which is not a power of 2.\n NegativeTimeError: If a note or chord occurs at a negative time.\n \"\"\"\n qns = copy.deepcopy(note_sequence)\n\n qns.quantization_info.steps_per_quarter = steps_per_quarter\n\n if qns.time_signatures:\n time_signatures = sorted(qns.time_signatures, key=lambda ts: ts.time)\n # There is an implicit 4/4 time signature at 0 time. So if the first time\n # signature is something other than 4/4 and it's at a time other than 0,\n # that's an implicit time signature change.\n if time_signatures[0].time != 0 and not (\n time_signatures[0].numerator == 4 and\n time_signatures[0].denominator == 4):\n raise MultipleTimeSignatureError(\n 'NoteSequence has an implicit change from initial 4/4 time '\n 'signature to %d/%d at %.2f seconds.' %\n (time_signatures[0].numerator, time_signatures[0].denominator,\n time_signatures[0].time))\n\n for time_signature in time_signatures[1:]:\n if (time_signature.numerator != qns.time_signatures[0].numerator or\n time_signature.denominator != qns.time_signatures[0].denominator):\n raise MultipleTimeSignatureError(\n 'NoteSequence has at least one time signature change from %d/%d to '\n '%d/%d at %.2f seconds.' %\n (time_signatures[0].numerator, time_signatures[0].denominator,\n time_signature.numerator, time_signature.denominator,\n time_signature.time))\n\n # Make it clear that there is only 1 time signature and it starts at the\n # beginning.\n qns.time_signatures[0].time = 0\n del qns.time_signatures[1:]\n else:\n time_signature = qns.time_signatures.add()\n time_signature.numerator = 4\n time_signature.denominator = 4\n time_signature.time = 0\n\n if not _is_power_of_2(qns.time_signatures[0].denominator):\n raise BadTimeSignatureError(\n 'Denominator is not a power of 2. Time signature: %d/%d' %\n (qns.time_signatures[0].numerator, qns.time_signatures[0].denominator))\n\n if qns.time_signatures[0].numerator == 0:\n raise BadTimeSignatureError(\n 'Numerator is 0. Time signature: %d/%d' %\n (qns.time_signatures[0].numerator, qns.time_signatures[0].denominator))\n\n if qns.tempos:\n tempos = sorted(qns.tempos, key=lambda t: t.time)\n # There is an implicit 120.0 qpm tempo at 0 time. So if the first tempo is\n # something other that 120.0 and it's at a time other than 0, that's an\n # implicit tempo change.\n if tempos[0].time != 0 and (tempos[0].qpm !=\n constants.DEFAULT_QUARTERS_PER_MINUTE):\n raise MultipleTempoError(\n 'NoteSequence has an implicit tempo change from initial %.1f qpm to '\n '%.1f qpm at %.2f seconds.' % (constants.DEFAULT_QUARTERS_PER_MINUTE,\n tempos[0].qpm, tempos[0].time))\n\n for tempo in tempos[1:]:\n if tempo.qpm != qns.tempos[0].qpm:\n raise MultipleTempoError(\n 'NoteSequence has at least one tempo change from %.1f qpm to %.1f '\n 'qpm at %.2f seconds.' % (tempos[0].qpm, tempo.qpm, tempo.time))\n\n # Make it clear that there is only 1 tempo and it starts at the beginning.\n qns.tempos[0].time = 0\n del qns.tempos[1:]\n else:\n tempo = qns.tempos.add()\n tempo.qpm = constants.DEFAULT_QUARTERS_PER_MINUTE\n tempo.time = 0\n\n # Compute quantization steps per second.\n steps_per_second = steps_per_quarter_to_steps_per_second(\n steps_per_quarter, qns.tempos[0].qpm)\n\n qns.total_quantized_steps = quantize_to_step(qns.total_time, steps_per_second)\n _quantize_notes(qns, steps_per_second)\n\n return qns\n\n\ndef quantize_note_sequence_absolute(note_sequence, steps_per_second):\n \"\"\"Quantize a NoteSequence proto using absolute event times.\n\n The input NoteSequence is copied and quantization-related fields are\n populated. Sets the `steps_per_second` field in the `quantization_info`\n message in the NoteSequence.\n\n Note start and end times, and chord times are snapped to a nearby quantized\n step, and the resulting times are stored in a separate field (e.g.,\n quantized_start_step). See the comments above `QUANTIZE_CUTOFF` for details on\n how the quantizing algorithm works.\n\n Tempos and time signatures will be copied but ignored.\n\n Args:\n note_sequence: A music_pb2.NoteSequence protocol buffer.\n steps_per_second: Each second will be divided into this many quantized time\n steps.\n\n Returns:\n A copy of the original NoteSequence, with quantized times added.\n\n Raises:\n NegativeTimeError: If a note or chord occurs at a negative time.\n \"\"\"\n qns = copy.deepcopy(note_sequence)\n qns.quantization_info.steps_per_second = steps_per_second\n\n qns.total_quantized_steps = quantize_to_step(qns.total_time, steps_per_second)\n _quantize_notes(qns, steps_per_second)\n\n return qns\n\n\ndef transpose_note_sequence(ns,\n amount,\n min_allowed_pitch=constants.MIN_MIDI_PITCH,\n max_allowed_pitch=constants.MAX_MIDI_PITCH,\n transpose_chords=True,\n in_place=False):\n \"\"\"Transposes note sequence specified amount, deleting out-of-bound notes.\n\n Args:\n ns: The NoteSequence proto to be transposed.\n amount: Number of half-steps to transpose up or down.\n min_allowed_pitch: Minimum pitch allowed in transposed NoteSequence. Notes\n assigned lower pitches will be deleted.\n max_allowed_pitch: Maximum pitch allowed in transposed NoteSequence. Notes\n assigned higher pitches will be deleted.\n transpose_chords: If True, also transpose chord symbol text annotations. If\n False, chord symbols will be removed.\n in_place: If True, the input note_sequence is edited directly.\n\n Returns:\n The transposed NoteSequence and a count of how many notes were deleted.\n\n Raises:\n ChordSymbolError: If a chord symbol is unable to be transposed.\n \"\"\"\n if not in_place:\n new_ns = music_pb2.NoteSequence()\n new_ns.CopyFrom(ns)\n ns = new_ns\n\n new_note_list = []\n deleted_note_count = 0\n end_time = 0\n\n for note in ns.notes:\n new_pitch = note.pitch + amount\n if (min_allowed_pitch <= new_pitch <= max_allowed_pitch) or note.is_drum:\n end_time = max(end_time, note.end_time)\n\n if not note.is_drum:\n note.pitch += amount\n\n # The pitch name, if present, will no longer be valid.\n note.pitch_name = UNKNOWN_PITCH_NAME\n\n new_note_list.append(note)\n else:\n deleted_note_count += 1\n\n if deleted_note_count > 0:\n del ns.notes[:]\n ns.notes.extend(new_note_list)\n\n # Since notes were deleted, we may need to update the total time.\n ns.total_time = end_time\n\n if transpose_chords:\n # Also update the chord symbol text annotations. This can raise a\n # ChordSymbolError if a chord symbol cannot be interpreted.\n for ta in ns.text_annotations:\n if ta.annotation_type == CHORD_SYMBOL and ta.text != constants.NO_CHORD:\n ta.text = chord_symbols_lib.transpose_chord_symbol(ta.text, amount)\n else:\n # Remove chord symbol text annotations.\n text_annotations_to_keep = []\n for ta in ns.text_annotations:\n if ta.annotation_type != CHORD_SYMBOL:\n text_annotations_to_keep.append(ta)\n if len(text_annotations_to_keep) < len(ns.text_annotations):\n del ns.text_annotations[:]\n ns.text_annotations.extend(text_annotations_to_keep)\n\n # Also transpose key signatures.\n for ks in ns.key_signatures:\n ks.key = (ks.key + amount) % 12\n\n return ns, deleted_note_count\n\n\ndef _clamp_transpose(transpose_amount, ns_min_pitch, ns_max_pitch,\n min_allowed_pitch, max_allowed_pitch):\n \"\"\"Clamps the specified transpose amount to keep a ns in the desired bounds.\n\n Args:\n transpose_amount: Number of steps to transpose up or down.\n ns_min_pitch: The lowest pitch in the target note sequence.\n ns_max_pitch: The highest pitch in the target note sequence.\n min_allowed_pitch: The lowest pitch that should be allowed in the transposed\n note sequence.\n max_allowed_pitch: The highest pitch that should be allowed in the\n transposed note sequence.\n\n Returns:\n A new transpose amount that, if applied to the target note sequence, will\n keep all notes within the range [MIN_PITCH, MAX_PITCH]\n \"\"\"\n if transpose_amount < 0:\n transpose_amount = -min(ns_min_pitch - min_allowed_pitch,\n abs(transpose_amount))\n else:\n transpose_amount = min(max_allowed_pitch - ns_max_pitch, transpose_amount)\n return transpose_amount\n\n\ndef augment_note_sequence(ns,\n min_stretch_factor,\n max_stretch_factor,\n min_transpose,\n max_transpose,\n min_allowed_pitch=constants.MIN_MIDI_PITCH,\n max_allowed_pitch=constants.MAX_MIDI_PITCH,\n delete_out_of_range_notes=False):\n \"\"\"Modifed a NoteSequence with random stretching and transposition.\n\n This method can be used to augment a dataset for training neural nets.\n Note that the provided ns is modified in place.\n\n Args:\n ns: A NoteSequence proto to be augmented.\n min_stretch_factor: Minimum amount to stretch/compress the NoteSequence.\n max_stretch_factor: Maximum amount to stretch/compress the NoteSequence.\n min_transpose: Minimum number of steps to transpose the NoteSequence.\n max_transpose: Maximum number of steps to transpose the NoteSequence.\n min_allowed_pitch: The lowest pitch permitted (ie, for regular piano this\n should be set to 21.)\n max_allowed_pitch: The highest pitch permitted (ie, for regular piano this\n should be set to 108.)\n delete_out_of_range_notes: If true, a transposition amount will be chosen on\n the interval [min_transpose, max_transpose], and any out-of-bounds notes\n will be deleted. If false, the interval [min_transpose, max_transpose]\n will be truncated such that no out-of-bounds notes will ever be created.\n TODO(dei): Add support for specifying custom distributions over possible\n values of note stretch and transposition amount.\n\n Returns:\n The randomly augmented NoteSequence.\n\n Raises:\n ValueError: If mins in ranges are larger than maxes.\n \"\"\"\n if min_stretch_factor > max_stretch_factor:\n raise ValueError('min_stretch_factor should be <= max_stretch_factor')\n if min_allowed_pitch > max_allowed_pitch:\n raise ValueError('min_allowed_pitch should be <= max_allowed_pitch')\n if min_transpose > max_transpose:\n raise ValueError('min_transpose should be <= max_transpose')\n\n if ns.notes:\n # Choose random factor by which to stretch or compress note sequence.\n stretch_factor = random.uniform(min_stretch_factor, max_stretch_factor)\n ns = stretch_note_sequence(ns, stretch_factor, in_place=True)\n\n # Choose amount by which to translate the note sequence.\n if delete_out_of_range_notes:\n # If transposition takes a note outside of the allowed note bounds,\n # we will just delete it.\n transposition_amount = random.randint(min_transpose, max_transpose)\n else:\n # Prevent transposition from taking a note outside of the allowed note\n # bounds by clamping the range we sample from.\n ns_min_pitch = min(ns.notes, key=lambda note: note.pitch).pitch\n ns_max_pitch = max(ns.notes, key=lambda note: note.pitch).pitch\n\n if ns_min_pitch < min_allowed_pitch:\n tf.logging.warn(\n 'A note sequence has some pitch=%d, which is less '\n 'than min_allowed_pitch=%d' % (ns_min_pitch, min_allowed_pitch))\n if ns_max_pitch > max_allowed_pitch:\n tf.logging.warn(\n 'A note sequence has some pitch=%d, which is greater '\n 'than max_allowed_pitch=%d' % (ns_max_pitch, max_allowed_pitch))\n\n min_transpose = _clamp_transpose(min_transpose, ns_min_pitch,\n ns_max_pitch, min_allowed_pitch,\n max_allowed_pitch)\n max_transpose = _clamp_transpose(max_transpose, ns_min_pitch,\n ns_max_pitch, min_allowed_pitch,\n max_allowed_pitch)\n transposition_amount = random.randint(min_transpose, max_transpose)\n\n ns, _ = transpose_note_sequence(\n ns,\n transposition_amount,\n min_allowed_pitch,\n max_allowed_pitch,\n in_place=True)\n\n return ns\n\n\ndef stretch_note_sequence(note_sequence, stretch_factor, in_place=False):\n \"\"\"Apply a constant temporal stretch to a NoteSequence proto.\n\n Args:\n note_sequence: The NoteSequence to stretch.\n stretch_factor: How much to stretch the NoteSequence. Values greater than\n one increase the length of the NoteSequence (making it \"slower\"). Values\n less than one decrease the length of the NoteSequence (making it\n \"faster\").\n in_place: If True, the input note_sequence is edited directly.\n\n Returns:\n A stretched copy of the original NoteSequence.\n\n Raises:\n QuantizationStatusError: If the `note_sequence` is quantized. Only\n unquantized NoteSequences can be stretched.\n \"\"\"\n if is_quantized_sequence(note_sequence):\n raise QuantizationStatusError(\n 'Can only stretch unquantized NoteSequence.')\n\n if in_place:\n stretched_sequence = note_sequence\n else:\n stretched_sequence = music_pb2.NoteSequence()\n stretched_sequence.CopyFrom(note_sequence)\n\n if stretch_factor == 1.0:\n return stretched_sequence\n\n # Stretch all notes.\n for note in stretched_sequence.notes:\n note.start_time *= stretch_factor\n note.end_time *= stretch_factor\n stretched_sequence.total_time *= stretch_factor\n\n # Stretch all other event times.\n events = itertools.chain(\n stretched_sequence.time_signatures, stretched_sequence.key_signatures,\n stretched_sequence.tempos, stretched_sequence.pitch_bends,\n stretched_sequence.control_changes, stretched_sequence.text_annotations)\n for event in events:\n event.time *= stretch_factor\n\n # Stretch tempos.\n for tempo in stretched_sequence.tempos:\n tempo.qpm /= stretch_factor\n\n return stretched_sequence\n\n\ndef adjust_notesequence_times(ns, time_func, minimum_duration=None):\n \"\"\"Adjusts notesequence timings given an adjustment function.\n\n Note that only notes, control changes, and pitch bends are adjusted. All other\n events are ignored.\n\n If the adjusted version of a note ends before or at the same time it begins,\n it will be skipped.\n\n Args:\n ns: The NoteSequence to adjust.\n time_func: A function that takes a time (in seconds) and returns an adjusted\n version of that time. This function is expected to be monotonic, i.e. if\n `t1 <= t2` then `time_func(t1) <= time_func(t2)`. In addition, if\n `t >= 0` then it should also be true that `time_func(t) >= 0`. The\n monotonicity property is not checked for all pairs of event times, only\n the start and end times of each note, but you may get strange results if\n `time_func` is non-monotonic.\n minimum_duration: If time_func results in a duration of 0, instead\n substitute this duration and do not increment the skipped_notes counter.\n If None, the note will be skipped.\n\n Raises:\n InvalidTimeAdjustmentError: If a note has an adjusted end time that is\n before its start time, or if any event times are shifted before zero.\n\n Returns:\n adjusted_ns: A new NoteSequence with adjusted times.\n skipped_notes: A count of how many notes were skipped.\n \"\"\"\n adjusted_ns = copy.deepcopy(ns)\n\n # Iterate through the original NoteSequence notes to make it easier to drop\n # skipped notes from the adjusted NoteSequence.\n adjusted_ns.total_time = 0\n skipped_notes = 0\n del adjusted_ns.notes[:]\n for note in ns.notes:\n start_time = time_func(note.start_time)\n end_time = time_func(note.end_time)\n\n if start_time == end_time:\n if minimum_duration:\n tf.logging.warn(\n 'Adjusting note duration of 0 to new minimum duration of %f. '\n 'Original start: %f, end %f. New start %f, end %f.',\n minimum_duration, note.start_time, note.end_time, start_time,\n end_time)\n end_time += minimum_duration\n else:\n tf.logging.warn(\n 'Skipping note that ends before or at the same time it begins. '\n 'Original start: %f, end %f. New start %f, end %f.',\n note.start_time, note.end_time, start_time, end_time)\n skipped_notes += 1\n continue\n\n if end_time < start_time:\n raise InvalidTimeAdjustmentError(\n 'Tried to adjust end time to before start time. '\n 'Original start: %f, end %f. New start %f, end %f.' %\n (note.start_time, note.end_time, start_time, end_time))\n\n if start_time < 0:\n raise InvalidTimeAdjustmentError(\n 'Tried to adjust note start time to before 0 '\n '(original: %f, adjusted: %f)' % (note.start_time, start_time))\n\n if end_time < 0:\n raise InvalidTimeAdjustmentError(\n 'Tried to adjust note end time to before 0 '\n '(original: %f, adjusted: %f)' % (note.end_time, end_time))\n\n if end_time > adjusted_ns.total_time:\n adjusted_ns.total_time = end_time\n\n adjusted_note = adjusted_ns.notes.add()\n adjusted_note.MergeFrom(note)\n adjusted_note.start_time = start_time\n adjusted_note.end_time = end_time\n\n events = itertools.chain(\n adjusted_ns.control_changes,\n adjusted_ns.pitch_bends,\n adjusted_ns.time_signatures,\n adjusted_ns.key_signatures,\n adjusted_ns.text_annotations\n )\n\n for event in events:\n time = time_func(event.time)\n if time < 0:\n raise InvalidTimeAdjustmentError(\n 'Tried to adjust event time to before 0 '\n '(original: %f, adjusted: %f)' % (event.time, time))\n event.time = time\n\n # Adjusting tempos to accommodate arbitrary time adjustments is too\n # complicated. Just delete them.\n del adjusted_ns.tempos[:]\n\n return adjusted_ns, skipped_notes\n\n\ndef rectify_beats(sequence, beats_per_minute):\n \"\"\"Warps a NoteSequence so that beats happen at regular intervals.\n\n Args:\n sequence: The source NoteSequence. Will not be modified.\n beats_per_minute: Desired BPM of the rectified sequence.\n\n Returns:\n rectified_sequence: A copy of `sequence` with times adjusted so that beats\n occur at regular intervals with BPM `beats_per_minute`.\n alignment: An N-by-2 array where each row contains the original and\n rectified times for a beat.\n\n Raises:\n QuantizationStatusError: If `sequence` is quantized.\n RectifyBeatsError: If `sequence` has no beat annotations.\n \"\"\"\n if is_quantized_sequence(sequence):\n raise QuantizationStatusError(\n 'Cannot rectify beat times for quantized NoteSequence.')\n\n beat_times = [\n ta.time for ta in sequence.text_annotations\n if ta.annotation_type == music_pb2.NoteSequence.TextAnnotation.BEAT\n and ta.time <= sequence.total_time\n ]\n\n if not beat_times:\n raise RectifyBeatsError('No beats in NoteSequence.')\n\n # Add a beat at the very beginning and end of the sequence and dedupe.\n sorted_beat_times = [0.0] + sorted(beat_times) + [sequence.total_time]\n unique_beat_times = np.array([\n sorted_beat_times[i] for i in range(len(sorted_beat_times))\n if i == 0 or sorted_beat_times[i] > sorted_beat_times[i - 1]\n ])\n num_beats = len(unique_beat_times)\n\n # Use linear interpolation to map original times to rectified times.\n seconds_per_beat = 60.0 / beats_per_minute\n rectified_beat_times = seconds_per_beat * np.arange(num_beats)\n def time_func(t):\n return np.interp(t, unique_beat_times, rectified_beat_times,\n left=0.0, right=sequence.total_time)\n\n rectified_sequence, _ = adjust_notesequence_times(sequence, time_func)\n\n # Sequence probably shouldn't have time signatures but delete them just to be\n # sure, and add a single tempo.\n del rectified_sequence.time_signatures[:]\n rectified_sequence.tempos.add(qpm=beats_per_minute)\n\n return rectified_sequence, np.array([unique_beat_times,\n rectified_beat_times]).T\n\n\n# Constants for processing the note/sustain stream.\n# The order here matters because we we want to process 'on' events before we\n# process 'off' events, and we want to process sustain events before note\n# events.\n_SUSTAIN_ON = 0\n_SUSTAIN_OFF = 1\n_NOTE_ON = 2\n_NOTE_OFF = 3\n\n\ndef apply_sustain_control_changes(note_sequence, sustain_control_number=64):\n \"\"\"Returns a new NoteSequence with sustain pedal control changes applied.\n\n Extends each note within a sustain to either the beginning of the next note of\n the same pitch or the end of the sustain period, whichever happens first. This\n is done on a per instrument basis, so notes are only affected by sustain\n events for the same instrument.\n\n Args:\n note_sequence: The NoteSequence for which to apply sustain. This object will\n not be modified.\n sustain_control_number: The MIDI control number for sustain pedal. Control\n events with this number and value 0-63 will be treated as sustain pedal\n OFF events, and control events with this number and value 64-127 will be\n treated as sustain pedal ON events.\n\n Returns:\n A copy of `note_sequence` but with note end times extended to account for\n sustain.\n\n Raises:\n QuantizationStatusError: If `note_sequence` is quantized. Sustain can\n only be applied to unquantized note sequences.\n \"\"\"\n if is_quantized_sequence(note_sequence):\n raise QuantizationStatusError(\n 'Can only apply sustain to unquantized NoteSequence.')\n\n sequence = copy.deepcopy(note_sequence)\n\n # Sort all note on/off and sustain on/off events.\n events = []\n events.extend([(note.start_time, _NOTE_ON, note) for note in sequence.notes])\n events.extend([(note.end_time, _NOTE_OFF, note) for note in sequence.notes])\n\n for cc in sequence.control_changes:\n if cc.control_number != sustain_control_number:\n continue\n value = cc.control_value\n if value < 0 or value > 127:\n tf.logging.warn('Sustain control change has out of range value: %d',\n value)\n if value >= 64:\n events.append((cc.time, _SUSTAIN_ON, cc))\n elif value < 64:\n events.append((cc.time, _SUSTAIN_OFF, cc))\n\n # Sort, using the event type constants to ensure the order events are\n # processed.\n events.sort(key=operator.itemgetter(0))\n\n # Lists of active notes, keyed by instrument.\n active_notes = collections.defaultdict(list)\n # Whether sustain is active for a given instrument.\n sus_active = collections.defaultdict(lambda: False)\n\n # Iterate through all sustain on/off and note on/off events in order.\n time = 0\n for time, event_type, event in events:\n if event_type == _SUSTAIN_ON:\n sus_active[event.instrument] = True\n elif event_type == _SUSTAIN_OFF:\n sus_active[event.instrument] = False\n # End all notes for the instrument that were being extended.\n new_active_notes = []\n for note in active_notes[event.instrument]:\n if note.end_time < time:\n # This note was being extended because of sustain.\n # Update the end time and don't keep it in the list.\n note.end_time = time\n if time > sequence.total_time:\n sequence.total_time = time\n else:\n # This note is actually still active, keep it.\n new_active_notes.append(note)\n active_notes[event.instrument] = new_active_notes\n elif event_type == _NOTE_ON:\n if sus_active[event.instrument]:\n # If sustain is on, end all previous notes with the same pitch.\n new_active_notes = []\n for note in active_notes[event.instrument]:\n if note.pitch == event.pitch:\n note.end_time = time\n if note.start_time == note.end_time:\n # This note now has no duration because another note of the same\n # pitch started at the same time. Only one of these notes should\n # be preserved, so delete this one.\n # TODO(fjord): A more correct solution would probably be to\n # preserve both notes and make the same duration, but that is a\n # little more complicated to implement. Will keep this solution\n # until we find that we need the more complex one.\n sequence.notes.remove(note)\n else:\n new_active_notes.append(note)\n active_notes[event.instrument] = new_active_notes\n # Add this new note to the list of active notes.\n active_notes[event.instrument].append(event)\n elif event_type == _NOTE_OFF:\n if sus_active[event.instrument]:\n # Note continues until another note of the same pitch or sustain ends.\n pass\n else:\n # Remove this particular note from the active list.\n # It may have already been removed if a note of the same pitch was\n # played when sustain was active.\n if event in active_notes[event.instrument]:\n active_notes[event.instrument].remove(event)\n else:\n raise AssertionError('Invalid event_type: %s' % event_type)\n\n # End any notes that were still active due to sustain.\n for instrument in active_notes.values():\n for note in instrument:\n note.end_time = time\n sequence.total_time = time\n\n return sequence\n\n\ndef infer_dense_chords_for_sequence(sequence,\n instrument=None,\n min_notes_per_chord=3):\n \"\"\"Infers chords for a NoteSequence and adds them as TextAnnotations.\n\n For each set of simultaneously-active notes in a NoteSequence (optionally for\n only one instrument), infers a chord symbol and adds it to NoteSequence as a\n TextAnnotation. Every change in the set of active notes will result in a new\n chord symbol unless the new set is smaller than `min_notes_per_chord`.\n\n If `sequence` is quantized, simultaneity will be determined by quantized steps\n instead of time.\n\n Not to be confused with the chord inference in magenta.music.chord_inference\n that attempts to infer a more natural chord sequence with changes at regular\n metric intervals.\n\n Args:\n sequence: The NoteSequence for which chords will be inferred. Will be\n modified in place.\n instrument: The instrument number whose notes will be used for chord\n inference. If None, all instruments will be used.\n min_notes_per_chord: The minimum number of simultaneous notes for which to\n infer a chord.\n\n Raises:\n ChordSymbolError: If a chord cannot be determined for a set of\n simultaneous notes in `sequence`.\n \"\"\"\n notes = [\n note for note in sequence.notes if not note.is_drum and\n (instrument is None or note.instrument == instrument)\n ]\n sorted_notes = sorted(notes, key=lambda note: note.start_time)\n\n # If the sequence is quantized, use quantized steps instead of time.\n if is_quantized_sequence(sequence):\n note_start = lambda note: note.quantized_start_step\n note_end = lambda note: note.quantized_end_step\n else:\n note_start = lambda note: note.start_time\n note_end = lambda note: note.end_time\n\n # Sort all note start and end events.\n onsets = [\n (note_start(note), idx, False) for idx, note in enumerate(sorted_notes)\n ]\n offsets = [\n (note_end(note), idx, True) for idx, note in enumerate(sorted_notes)\n ]\n events = sorted(onsets + offsets)\n\n current_time = 0\n current_figure = constants.NO_CHORD\n active_notes = set()\n\n for time, idx, is_offset in events:\n if time > current_time:\n active_pitches = set(sorted_notes[idx].pitch for idx in active_notes)\n if len(active_pitches) >= min_notes_per_chord:\n # Infer a chord symbol for the active pitches.\n figure = chord_symbols_lib.pitches_to_chord_symbol(active_pitches)\n\n if figure != current_figure:\n # Add a text annotation to the sequence.\n text_annotation = sequence.text_annotations.add()\n text_annotation.text = figure\n text_annotation.annotation_type = CHORD_SYMBOL\n if is_quantized_sequence(sequence):\n text_annotation.time = (\n current_time * sequence.quantization_info.steps_per_quarter)\n text_annotation.quantized_step = current_time\n else:\n text_annotation.time = current_time\n\n current_figure = figure\n\n current_time = time\n if is_offset:\n active_notes.remove(idx)\n else:\n active_notes.add(idx)\n\n assert not active_notes\n\n\nPianoroll = collections.namedtuple( # pylint:disable=invalid-name\n 'Pianoroll',\n ['active', 'weights', 'onsets', 'onset_velocities', 'active_velocities',\n 'offsets', 'control_changes'])\n\n\ndef sequence_to_pianoroll(\n sequence,\n frames_per_second,\n min_pitch,\n max_pitch,\n # pylint: disable=unused-argument\n min_velocity=constants.MIN_MIDI_PITCH,\n # pylint: enable=unused-argument\n max_velocity=constants.MAX_MIDI_PITCH,\n add_blank_frame_before_onset=False,\n onset_upweight=ONSET_UPWEIGHT,\n onset_window=ONSET_WINDOW,\n onset_length_ms=0,\n offset_length_ms=0,\n onset_mode='window',\n onset_delay_ms=0.0,\n min_frame_occupancy_for_label=0.0,\n onset_overlap=True):\n \"\"\"Transforms a NoteSequence to a pianoroll assuming a single instrument.\n\n This function uses floating point internally and may return different results\n on different platforms or with different compiler settings or with\n different compilers.\n\n Args:\n sequence: The NoteSequence to convert.\n frames_per_second: How many frames per second.\n min_pitch: pitches in the sequence below this will be ignored.\n max_pitch: pitches in the sequence above this will be ignored.\n min_velocity: minimum velocity for the track, currently unused.\n max_velocity: maximum velocity for the track, not just the local sequence,\n used to globally normalize the velocities between [0, 1].\n add_blank_frame_before_onset: Always have a blank frame before onsets.\n onset_upweight: Factor by which to increase the weight assigned to onsets.\n onset_window: Fixed window size to activate around onsets in `onsets` and\n `onset_velocities`. Used only if `onset_mode` is 'window'.\n onset_length_ms: Length in milliseconds for the onset. Used only if\n onset_mode is 'length_ms'.\n offset_length_ms: Length in milliseconds for the offset. Used only if\n offset_mode is 'length_ms'.\n onset_mode: Either 'window', to use onset_window, or 'length_ms' to use\n onset_length_ms.\n onset_delay_ms: Number of milliseconds to delay the onset. Can be negative.\n min_frame_occupancy_for_label: floating point value in range [0, 1] a note\n must occupy at least this percentage of a frame, for the frame to be given\n a label with the note.\n onset_overlap: Whether or not the onsets overlap with the frames.\n\n Raises:\n ValueError: When an unknown onset_mode is supplied.\n\n Returns:\n active: Active note pianoroll as a 2D array..\n weights: Weights to be used when calculating loss against roll.\n onsets: An onset-only pianoroll as a 2D array.\n onset_velocities: Velocities of onsets scaled from [0, 1].\n active_velocities: Velocities of active notes scaled from [0, 1].\n offsets: An offset-only pianoroll as a 2D array.\n control_changes: Control change onsets as a 2D array (time, control number)\n with 0 when there is no onset and (control_value + 1) when there is.\n \"\"\"\n roll = np.zeros((int(sequence.total_time * frames_per_second + 1),\n max_pitch - min_pitch + 1),\n dtype=np.float32)\n\n roll_weights = np.ones_like(roll)\n\n onsets = np.zeros_like(roll)\n offsets = np.zeros_like(roll)\n\n control_changes = np.zeros(\n (int(sequence.total_time * frames_per_second + 1), 128), dtype=np.int32)\n\n def frames_from_times(start_time, end_time):\n \"\"\"Converts start/end times to start/end frames.\"\"\"\n # Will round down because note may start or end in the middle of the frame.\n start_frame = int(start_time * frames_per_second)\n start_frame_occupancy = (start_frame + 1 - start_time * frames_per_second)\n # check for > 0.0 to avoid possible numerical issues\n if (min_frame_occupancy_for_label > 0.0 and\n start_frame_occupancy < min_frame_occupancy_for_label):\n start_frame += 1\n\n end_frame = int(math.ceil(end_time * frames_per_second))\n end_frame_occupancy = end_time * frames_per_second - start_frame - 1\n if (min_frame_occupancy_for_label > 0.0 and\n end_frame_occupancy < min_frame_occupancy_for_label):\n end_frame -= 1\n # can be a problem for very short notes\n end_frame = max(start_frame, end_frame)\n\n return start_frame, end_frame\n\n velocities_roll = np.zeros_like(roll, dtype=np.float32)\n\n for note in sorted(sequence.notes, key=lambda n: n.start_time):\n if note.pitch < min_pitch or note.pitch > max_pitch:\n tf.logging.warn('Skipping out of range pitch: %d', note.pitch)\n continue\n start_frame, end_frame = frames_from_times(note.start_time, note.end_time)\n\n # label onset events. Use a window size of onset_window to account of\n # rounding issue in the start_frame computation.\n onset_start_time = note.start_time + onset_delay_ms / 1000.\n onset_end_time = note.end_time + onset_delay_ms / 1000.\n if onset_mode == 'window':\n onset_start_frame_without_window, _ = frames_from_times(\n onset_start_time, onset_end_time)\n\n onset_start_frame = max(0,\n onset_start_frame_without_window - onset_window)\n onset_end_frame = min(onsets.shape[0],\n onset_start_frame_without_window + onset_window + 1)\n elif onset_mode == 'length_ms':\n onset_end_time = min(onset_end_time,\n onset_start_time + onset_length_ms / 1000.)\n onset_start_frame, onset_end_frame = frames_from_times(\n onset_start_time, onset_end_time)\n else:\n raise ValueError('Unknown onset mode: {}'.format(onset_mode))\n\n # label offset events.\n offset_start_time = min(note.end_time,\n sequence.total_time - offset_length_ms / 1000.)\n offset_end_time = offset_start_time + offset_length_ms / 1000.\n offset_start_frame, offset_end_frame = frames_from_times(\n offset_start_time, offset_end_time)\n offset_end_frame = max(offset_end_frame, offset_start_frame + 1)\n\n if not onset_overlap:\n start_frame = onset_end_frame\n end_frame = max(start_frame + 1, end_frame)\n\n offsets[offset_start_frame:offset_end_frame, note.pitch - min_pitch] = 1.0\n onsets[onset_start_frame:onset_end_frame, note.pitch - min_pitch] = 1.0\n roll[start_frame:end_frame, note.pitch - min_pitch] = 1.0\n\n if note.velocity > max_velocity:\n raise ValueError('Note velocity exceeds max velocity: %d > %d' %\n (note.velocity, max_velocity))\n\n velocities_roll[start_frame:end_frame, note.pitch -\n min_pitch] = note.velocity / max_velocity\n roll_weights[onset_start_frame:onset_end_frame, note.pitch - min_pitch] = (\n onset_upweight)\n roll_weights[onset_end_frame:end_frame, note.pitch - min_pitch] = [\n onset_upweight / x for x in range(1, end_frame - onset_end_frame + 1)\n ]\n\n if add_blank_frame_before_onset:\n if start_frame > 0:\n roll[start_frame - 1, note.pitch - min_pitch] = 0.0\n roll_weights[start_frame - 1, note.pitch - min_pitch] = 1.0\n\n for cc in sequence.control_changes:\n frame, _ = frames_from_times(cc.time, 0)\n if frame < len(control_changes):\n control_changes[frame, cc.control_number] = cc.control_value + 1\n\n return Pianoroll(\n active=roll,\n weights=roll_weights,\n onsets=onsets,\n onset_velocities=velocities_roll * onsets,\n active_velocities=velocities_roll,\n offsets=offsets,\n control_changes=control_changes)\n\n\ndef pianoroll_to_note_sequence(frames,\n frames_per_second,\n min_duration_ms,\n velocity=70,\n instrument=0,\n program=0,\n qpm=constants.DEFAULT_QUARTERS_PER_MINUTE,\n min_midi_pitch=constants.MIN_MIDI_PITCH,\n onset_predictions=None,\n offset_predictions=None,\n velocity_values=None):\n \"\"\"Convert frames to a NoteSequence.\"\"\"\n frame_length_seconds = 1 / frames_per_second\n\n sequence = music_pb2.NoteSequence()\n sequence.tempos.add().qpm = qpm\n sequence.ticks_per_quarter = constants.STANDARD_PPQ\n\n pitch_start_step = {}\n onset_velocities = velocity * np.ones(\n constants.MAX_MIDI_PITCH, dtype=np.int32)\n\n # Add silent frame at the end so we can do a final loop and terminate any\n # notes that are still active.\n frames = np.append(frames, [np.zeros(frames[0].shape)], 0)\n if velocity_values is None:\n velocity_values = velocity * np.ones_like(frames, dtype=np.int32)\n\n if onset_predictions is not None:\n onset_predictions = np.append(onset_predictions,\n [np.zeros(onset_predictions[0].shape)], 0)\n # Ensure that any frame with an onset prediction is considered active.\n frames = np.logical_or(frames, onset_predictions)\n\n if offset_predictions is not None:\n offset_predictions = np.append(offset_predictions,\n [np.zeros(offset_predictions[0].shape)], 0)\n # If the frame and offset are both on, then turn it off\n frames[np.where(np.logical_and(frames > 0, offset_predictions > 0))] = 0\n\n def end_pitch(pitch, end_frame):\n \"\"\"End an active pitch.\"\"\"\n start_time = pitch_start_step[pitch] * frame_length_seconds\n end_time = end_frame * frame_length_seconds\n\n if (end_time - start_time) * 1000 >= min_duration_ms:\n note = sequence.notes.add()\n note.start_time = start_time\n note.end_time = end_time\n note.pitch = pitch + min_midi_pitch\n note.velocity = onset_velocities[pitch]\n note.instrument = instrument\n note.program = program\n\n del pitch_start_step[pitch]\n\n def unscale_velocity(velocity):\n \"\"\"Translates a velocity estimate to a MIDI velocity value.\"\"\"\n unscaled = max(min(velocity, 1.), 0) * 80. + 10.\n if math.isnan(unscaled):\n return 0\n return int(unscaled)\n\n def process_active_pitch(pitch, i):\n \"\"\"Process a pitch being active in a given frame.\"\"\"\n if pitch not in pitch_start_step:\n if onset_predictions is not None:\n # If onset predictions were supplied, only allow a new note to start\n # if we've predicted an onset.\n if onset_predictions[i, pitch]:\n pitch_start_step[pitch] = i\n onset_velocities[pitch] = unscale_velocity(velocity_values[i, pitch])\n else:\n # Even though the frame is active, the onset predictor doesn't\n # say there should be an onset, so ignore it.\n pass\n else:\n pitch_start_step[pitch] = i\n else:\n if onset_predictions is not None:\n # pitch is already active, but if this is a new onset, we should end\n # the note and start a new one.\n if (onset_predictions[i, pitch] and\n not onset_predictions[i - 1, pitch]):\n end_pitch(pitch, i)\n pitch_start_step[pitch] = i\n onset_velocities[pitch] = unscale_velocity(velocity_values[i, pitch])\n\n for i, frame in enumerate(frames):\n for pitch, active in enumerate(frame):\n if active:\n process_active_pitch(pitch, i)\n elif pitch in pitch_start_step:\n end_pitch(pitch, i)\n\n sequence.total_time = len(frames) * frame_length_seconds\n if sequence.notes:\n assert sequence.total_time >= sequence.notes[-1].end_time\n\n return sequence\n" ]
[ [ "numpy.ones_like", "numpy.logical_and", "numpy.arange", "tensorflow.logging.warn", "numpy.ones", "numpy.logical_or", "numpy.zeros_like", "numpy.interp", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
TACC/GravIT
[ "0a79dc74036c11669075198e01b30a92a8150693" ]
[ "pygvt/cosmology_plus.py" ]
[ "#\n# cosmology_plus.py\n#\n# read and render the first two levels of the cosmology plus\n# enzo dataset. There are actually 5 levels of refinement\n# but the first two levels contain 37 grids. \n#\n# This script is rough. It is only intended to test the python\n# wrappers for amr grids. \n# to run this from inside the interpreter do\n# exec(open('cosmology_plus.py').read())\n#\n# Import the required libs.\n#\nimport gvt\nimport h5py\nimport os\nfrom mpi4py import MPI\nimport numpy as np\n#from vtk import vtkStructuredPointsReader, vtkStructuredPoints\n#\n# initialize GraviT\n#\ngvt.gvtInit()\n#\n# MPI business\n#\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nnumprocs = comm.size\n#\nprint(\" numprocs \" + str(numprocs) + \" rank \" + str(rank))\n#\n#\n# where are the data\n#\ndata_dir = os.path.join(os.environ['WORK'],\"Projects/GraviT/data/enzo_cosmology_plus\")\ngravit_dir = os.path.join(os.environ['WORK'],\"Projects/GraviT\")\n# going to want to run this from the data directory\n# so all the relative links work\nimagedir = os.getcwd()\nos.chdir(data_dir)\n# input files \nvolumefile = os.path.join(data_dir,\"DD0046/DD0046.hierarchy.hdf5\")\n#ctffile = os.path.join(gravit_dir,\"data/colormaps/Grayscale.orig.cmap\")\n#otffile = os.path.join(gravit_dir,\"data/colormaps/Grayscale.orig.omap\")\n#ctffile = os.path.join(gravit_dir,\"data/colormaps/CoolWarm.cmap\")\notffile = os.path.join(gravit_dir,\"data/colormaps/blue2cyan.omap\")\n#otffile = os.path.join(gravit_dir,\"data/colormaps/ramp.omap\")\n#ctffile = os.path.join(gravit_dir,\"data/colormaps/blue2cyan.cmap\")\n#ctffile = os.path.join(gravit_dir,\"data/colormaps/IceFire.cmap\")\n#ctffile = os.path.join(gravit_dir,\"data/colormaps/Jet.cmap\")\nctffile = os.path.join(gravit_dir,\"data/colormaps/coldhot.cmap\")\n#ctffile = os.path.join(gravit_dir,\"data/colormaps/orange-5.cmap\")\n#otffile = os.path.join(gravit_dir,\"data/colormaps/orange-5.omap\")\n#ctffile = os.path.join(gravit_dir,\"data/colormaps/Balls.cmap\")\n#otffile = os.path.join(gravit_dir,\"data/colormaps/Balls.omap\")\n#\nroot=h5py.File(volumefile)\n# the number of domains is the number of grids in level 0\nlevel0 = root['Level0']\nnumberofdomains = level0.attrs[\"NumberOfGrids\"]\nlevel0grids = list(level0.keys())\nlow_scalar = np.finfo('float32').max\nhigh_scalar = np.finfo('float32').min\nsamplingrate = 1.0\nk = 0\n#for domain in range(1):\nfor domain in range(numberofdomains):\n level = 0 \n if(domain%numprocs == rank): # read the domain (grid)\n nodename = \"enzo_cosmology_plus_domain_\" + repr(domain)\n# print(\" creating node \" + nodename)\n gvt.createVolume(nodename,True)\n gridname = level0grids[domain]\n grid = level0[gridname]\n griddata = grid.get('GridData')\n density = griddata['Density']\n with density.astype('float32'):\n scalars = density[()]\n scalardims = np.array(scalars.shape,dtype=np.int32)\n low_scalar= min(low_scalar,scalars.min())\n high_scalar= max(high_scalar,scalars.max())\n #dimensions = grid['GridDimension'].value\n startindex = grid['GridStartIndex'][()]\n endindex = grid['GridEndIndex'][()]\n dimensions = (endindex - startindex)+1 \n #dimensions = scalardims\n origin = grid['GridGlobalPosition'][()]\n left = grid['GridLeftEdge'][()]\n right = grid['GridRightEdge'][()]\n spacing = (right - left)/(dimensions)\n right = left + spacing*(dimensions)\n bounds = np.array([left[0],right[0],left[1],right[1],left[2],right[2]])\n # stuff the level grid full\n# print(\"\\tdims \"+repr(dimensions[:]))\n# print(\"\\tsdims \"+repr(scalardims[:]))\n# print(\"\\tleft \" + repr(left[:]))\n# print(\"\\tspacing \" + repr(spacing))\n# print(\"\\tsampling \" + repr(samplingrate))\n# print(\"\\tbounds \" + repr(bounds))\n #fltptr = scalars.flatten()\n fltptr = np.ravel(scalars,order='C')\n# print(\"\\tfloats \" + repr(fltptr[0]) + \" \" +repr(fltptr[1] ))\n# print(\"level \" + repr(level) + \" gridname \" + gridname +\" nodename \"+ nodename)\n gvt.addVolumeSamples(nodename,fltptr.astype(np.float32),dimensions.astype(np.int32),left.astype(np.float32),spacing.astype(np.float32),samplingrate,bounds.astype(np.float64))\n # grab the subgrids or daughters of this grid\n daughtergrids = grid['DaughterGrids']\n dglist = list(daughtergrids.keys())\n numsubs = len(dglist)\n #for l in range(0):\n for dgname in daughtergrids.keys():\n #dgname = dglist[l]\n level = 1\n k = k + 1\n grid = daughtergrids[dgname]\n griddata = grid.get('GridData')\n density = griddata['Density']\n with density.astype('float32'):\n scalars = density[()]\n scalardims = np.array(scalars.shape,dtype=np.int32) -1\n low_scalar= min(low_scalar,scalars.min())\n high_scalar= max(high_scalar,scalars.max())\n #dimensions = grid['GridDimension'].value\n startindex = grid['GridStartIndex'][()]\n endindex = grid['GridEndIndex'][()]\n dimensions = endindex - startindex \n origin = grid['GridGlobalPosition'][()]\n left = grid['GridLeftEdge'][()]\n right = grid['GridRightEdge'][()]\n bounds = np.array([left[0],right[0],left[1],right[1],left[2],right[2]])\n spacing = (right - left)/(endindex-startindex +1)\n# print(\"\\t\"+dgname)\n# print(\"\\t\\tdims \"+repr(dimensions[:]))\n# print(\"\\t\\tleft \" + repr(left[:]))\n# print(\"\\t\\tright \" + repr(right[:]))\n# print(\"\\t\\tspacing \" + repr(spacing))\n# print(\"\\tbounds \" + repr(bounds))\n fltptr = scalars.flatten()\n# print(\"level \"+repr(level)+\" gridname \"+dgname+\" nodename \"+nodename)\n gvt.addAmrSubgrid(nodename,k,level,fltptr.astype(np.float32),dimensions.astype(np.int32),left.astype(np.float32),spacing.astype(np.float32))\n print(\" add transfer functions \" + nodename)\n print(\" ctffile : \" + ctffile)\n print(\" otffile : \" + otffile)\n low_scalar = 0.10\n high_scalar = 42.0\n print(\" scalar range : \" + repr(low_scalar) + \" \" + repr(high_scalar))\n gvt.addVolumeTransferFunctions(nodename,ctffile,otffile,low_scalar,high_scalar)\n # add an instance for this level 0 grid\n mf = np.identity(4,dtype=np.float32).flatten()\n myinstance = \"inst\" + repr(domain)\n gvt.addInstance(myinstance,nodename,mf)\n# and now camera etc.\n#\neyept = np.array([2.0,2.0,2.0],dtype=np.float32)\nfocus = np.array([0.4,0.6,0.5],dtype=np.float32)\nfov = 10.0*np.pi/180.0\nupVector = np.array([0.0,1.0,0.0],dtype=np.float32)\nrayMaxDepth = 1\nraysamples = 1\njitterWindowSize = 0.5\ncamname = \"conecam\"\ngvt.addCamera(camname,eyept,focus,upVector,fov,rayMaxDepth,raysamples,jitterWindowSize)\n#film\nwsize = np.array([640,640],dtype=np.int32)\nfilmname = \"conefilm\"\nimagename = \"EnzoImage\"\ngvt.addFilm(filmname,wsize[0],wsize[1],imagename)\n#render\nrendername = \"EnzoVolRenderer\"\nschedtype = 1\nadaptertype = 6\ngvt.addRenderer(rendername,adaptertype,schedtype,camname,filmname,True)\ngvt.render(rendername)\nos.chdir(imagedir)\ngvt.writeimage(rendername,imagename)\n" ]
[ [ "numpy.ravel", "numpy.array", "numpy.identity", "numpy.finfo" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Rick-960123/centermask-mdf-master
[ "72147e8aae673fcaf4103ee90a6a6b73863e7fa1" ]
[ "maskrcnn_benchmark/modeling/roi_heads/mask_head/inference.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport numpy as np\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\n\n\n# TODO check if want to return a single BoxList or a composite\n# object\nclass MaskPostProcessor(nn.Module):\n \"\"\"\n From the results of the CNN, post process the masks\n by taking the mask corresponding to the class with max\n probability (which are of fixed size and directly output\n by the CNN) and return the masks in the mask field of the BoxList.\n\n If a masker object is passed, it will additionally\n project the masks in the image according to the locations in boxes,\n \"\"\"\n\n def __init__(self, masker=None):\n super(MaskPostProcessor, self).__init__()\n self.masker = masker\n\n def forward(self, x, boxes, miou_logits=None):\n \"\"\"\n Arguments:\n x (Tensor): the mask logits\n boxes (list[BoxList]): bounding boxes that are used as\n reference, one for ech image\n\n Returns:\n results (list[BoxList]): one BoxList for each image, containing\n the extra field mask\n \"\"\"\n if miou_logits is not None:\n # mask_prob = (x + miou_logits).sigmoid() #50k iters : 31.7/27.6\n mask_prob = (x * miou_logits).sigmoid() #50k iters : 31.7/5.6\n else:\n mask_prob = x.sigmoid()\n\n # select masks coresponding to the predicted classes\n num_masks = x.shape[0]\n labels = [bbox.get_field(\"labels\") for bbox in boxes]\n labels = torch.cat(labels)\n index = torch.arange(num_masks, device=labels.device)\n mask_prob = mask_prob[index, labels][:, None]\n\n boxes_per_image = [len(box) for box in boxes]\n mask_prob = mask_prob.split(boxes_per_image, dim=0)\n\n if self.masker:\n mask_prob = self.masker(mask_prob, boxes)\n\n results = []\n for prob, box in zip(mask_prob, boxes):\n bbox = BoxList(box.bbox, box.size, mode=\"xyxy\")\n for field in box.fields():\n bbox.add_field(field, box.get_field(field))\n bbox.add_field(\"mask\", prob)\n results.append(bbox)\n\n return results\n\n\nclass MaskPostProcessorCOCOFormat(MaskPostProcessor):\n \"\"\"\n From the results of the CNN, post process the results\n so that the masks are pasted in the image, and\n additionally convert the results to COCO format.\n \"\"\"\n\n def forward(self, x, boxes):\n import pycocotools.mask as mask_util\n import numpy as np\n\n results = super(MaskPostProcessorCOCOFormat, self).forward(x, boxes)\n for result in results:\n masks = result.get_field(\"mask\").cpu()\n rles = [\n mask_util.encode(np.array(mask[0, :, :, np.newaxis], order=\"F\"))[0]\n for mask in masks\n ]\n for rle in rles:\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\")\n result.add_field(\"mask\", rles)\n return results\n\n\n# the next two functions should be merged inside Masker\n# but are kept here for the moment while we need them\n# temporarily gor paste_mask_in_image\ndef expand_boxes(boxes, scale):\n w_half = (boxes[:, 2] - boxes[:, 0]) * .5\n h_half = (boxes[:, 3] - boxes[:, 1]) * .5\n x_c = (boxes[:, 2] + boxes[:, 0]) * .5\n y_c = (boxes[:, 3] + boxes[:, 1]) * .5\n\n w_half *= scale\n h_half *= scale\n\n boxes_exp = torch.zeros_like(boxes)\n boxes_exp[:, 0] = x_c - w_half\n boxes_exp[:, 2] = x_c + w_half\n boxes_exp[:, 1] = y_c - h_half\n boxes_exp[:, 3] = y_c + h_half\n return boxes_exp\n\n\ndef expand_masks(mask, padding):\n N = mask.shape[0]\n M = mask.shape[-1]\n pad2 = 2 * padding\n scale = float(M + pad2) / M\n padded_mask = mask.new_zeros((N, 1, M + pad2, M + pad2))\n padded_mask[:, :, padding:-padding, padding:-padding] = mask\n return padded_mask, scale\n\n\ndef paste_mask_in_image(mask, box, im_h, im_w, thresh=0.5, padding=1):\n padded_mask, scale = expand_masks(mask[None], padding=padding)\n mask = padded_mask[0, 0]\n box = expand_boxes(box[None], scale)[0]\n box = box.to(dtype=torch.int32)\n\n TO_REMOVE = 1\n w = box[2] - box[0] + TO_REMOVE\n h = box[3] - box[1] + TO_REMOVE\n w = max(w, 1)\n h = max(h, 1)\n\n # Set shape to [batchxCxHxW]\n mask = mask.expand((1, 1, -1, -1))\n\n # Resize mask\n mask = mask.to(torch.float32)\n mask = F.interpolate(mask, size=(h, w), mode='bilinear', align_corners=False)\n mask = mask[0][0]\n\n if thresh >= 0:\n mask = mask > thresh\n else:\n # for visualization and debugging, we also\n # allow it to return an unmodified mask\n mask = (mask * 255).to(torch.uint8)\n\n im_mask = torch.zeros((im_h, im_w), dtype=torch.uint8)\n x_0 = max(box[0], 0)\n x_1 = min(box[2] + 1, im_w)\n y_0 = max(box[1], 0)\n y_1 = min(box[3] + 1, im_h)\n\n im_mask[y_0:y_1, x_0:x_1] = mask[\n (y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])\n ]\n return im_mask\n\n\nclass Masker(object):\n \"\"\"\n Projects a set of masks in an image on the locations\n specified by the bounding boxes\n \"\"\"\n\n def __init__(self, threshold=0.5, padding=1):\n self.threshold = threshold\n self.padding = padding\n\n def forward_single_image(self, masks, boxes):\n boxes = boxes.convert(\"xyxy\")\n im_w, im_h = boxes.size\n res = [\n paste_mask_in_image(mask[0], box, im_h, im_w, self.threshold, self.padding)\n for mask, box in zip(masks, boxes.bbox)\n ]\n if len(res) > 0:\n res = torch.stack(res, dim=0)[:, None]\n else:\n res = masks.new_empty((0, 1, masks.shape[-2], masks.shape[-1]))\n return res\n\n def __call__(self, masks, boxes):\n if isinstance(boxes, BoxList):\n boxes = [boxes]\n\n # Make some sanity check\n assert len(boxes) == len(masks), \"Masks and boxes should have the same length.\"\n\n # TODO: Is this JIT compatible?\n # If not we should make it compatible.\n results = []\n for mask, box in zip(masks, boxes):\n assert mask.shape[0] == len(box), \"Number of objects should be the same.\"\n result = self.forward_single_image(mask, box)\n results.append(result)\n return results\n\n\ndef make_roi_mask_post_processor(cfg):\n if cfg.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS:\n mask_threshold = cfg.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS_THRESHOLD\n masker = Masker(threshold=mask_threshold, padding=1)\n else:\n masker = None\n mask_post_processor = MaskPostProcessor(masker)\n return mask_post_processor\n" ]
[ [ "torch.cat", "torch.zeros", "torch.zeros_like", "torch.nn.functional.interpolate", "torch.arange", "torch.stack", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
soniaai/pyAudioAnalysis
[ "47414c324114a9064d30637fdf1e508f12a7387e" ]
[ "pyAudioAnalysis/audacityAnnotation2WAVs.py" ]
[ "import glob\nimport os\nfrom . import audioBasicIO\nimport sys\nimport csv\nimport scipy.io.wavfile as wavfile\n\n\ndef annotation2files(wavFile, csvFile):\n '''\n Break an audio stream to segments of interest, \n defined by a csv file\n \n - wavFile: path to input wavfile\n - csvFile: path to csvFile of segment limits\n \n Input CSV file must be of the format <T1>\\t<T2>\\t<Label>\n ''' \n \n [Fs, x] = audioBasicIO.readAudioFile(wavFile)\n with open(csvFile, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n for j, row in enumerate(reader):\n T1 = float(row[0].replace(\",\",\".\"))\n T2 = float(row[1].replace(\",\",\".\")) \n label = \"%s_%s_%.2f_%.2f.wav\" % (wavFile, row[2], T1, T2)\n label = label.replace(\" \", \"_\")\n xtemp = x[int(round(T1*Fs)):int(round(T2*Fs))] \n print(T1, T2, label, xtemp.shape)\n wavfile.write(label, Fs, xtemp) \n\ndef main(argv):\n if argv[1] == \"-f\":\n wavFile = argv[2]\n annotationFile = argv[3]\n annotation2files(wavFile, annotationFile)\n elif argv[1] == \"-d\":\n inputFolder = argv[2]\n types = ('*.txt', '*.csv')\n annotationFilesList = []\n for files in types:\n annotationFilesList.extend(glob.glob(os.path.join(inputFolder, files)))\n for anFile in annotationFilesList:\n wavFile = os.path.splitext(anFile)[0] + \".wav\"\n if not os.path.isfile(wavFile):\n wavFile = os.path.splitext(anFile)[0] + \".mp3\"\n if not os.path.isfile(wavFile):\n print(\"Audio file not found!\")\n return\n annotation2files(wavFile, anFile)\n\n\nif __name__ == '__main__':\n # Used to extract a series of annotated WAV files based on (a) an audio file (mp3 or wav) and \n # (b) a segment annotation file e.g. a \"label\" file generated in audacity\n #\n # usage 1:\n # python audacityAnnotation2WAVs.py -f <audiofilepath> <annotationfilepath>\n # The <annotationfilepath> is actually a tab-seperated file where each line has the format <startTime>\\t<entTime>\\t<classLabel>\n # The result of this process is a series of WAV files with a file name <audiofilepath>_<startTime>_<endTime>_<classLabel>\n # \n # usage 2:\n # python audacityAnnotation2WAVs.py -d <annotationfolderpath>\n # Same but searches all .txt and .csv annotation files. Audio files are supposed to be in the same path / filename with a WAV extension\n\n main(sys.argv)\n " ]
[ [ "scipy.io.wavfile.write" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
tonyman1008/RADAR
[ "b2fc944230c2fd445528a9827eea42e1a94957b8", "b2fc944230c2fd445528a9827eea42e1a94957b8" ]
[ "data/syn_vases/scripts/generate_synvase.py", "render_animation.py" ]
[ "import math\nimport numpy as np\nfrom PIL import Image\nimport cv2\nimport os\nfrom glob import glob\nimport torch\nimport torchvision\nimport neural_renderer as nr\n\n\nEPS = 1e-7\n\n\ndef get_renderer(world_ori=[0,0,1], image_size=128, fov=30, renderer_min_depth=0.1, renderer_max_depth=10, fill_back=True, device='cuda:0'):\n #### camera intrinsics\n # (u) (x)\n # d * K^-1 (v) = (y)\n # (1) (z)\n\n ## renderer for visualization\n R = [[[1.,0.,0.],\n [0.,1.,0.],\n [0.,0.,1.]]]\n R = torch.FloatTensor(R).to(device)\n t = torch.FloatTensor(world_ori).to(device)\n fx = (image_size)/2/(math.tan(fov/2 *math.pi/180))\n fy = (image_size)/2/(math.tan(fov/2 *math.pi/180))\n cx = (image_size)/2\n cy = (image_size)/2\n K = [[fx, 0., cx],\n [0., fy, cy],\n [0., 0., 1.]]\n K = torch.FloatTensor(K).to(device)\n inv_K = torch.inverse(K).unsqueeze(0)\n K = K.unsqueeze(0)\n renderer = nr.Renderer(camera_mode='projection',\n light_intensity_ambient=1.0,\n light_intensity_directional=0.,\n K=K, R=R, t=t,\n near=renderer_min_depth, far=renderer_max_depth,\n image_size=image_size, orig_size=image_size,\n fill_back=fill_back,\n background_color=[1.,1.,1.])\n return renderer\n\n\ndef get_grid(H, W, normalize=True):\n if normalize:\n h_range = torch.linspace(-1,1,H)\n w_range = torch.linspace(-1,1,W)\n else:\n h_range = torch.arange(0,H)\n w_range = torch.arange(0,W)\n grid = torch.stack(torch.meshgrid([h_range, w_range]), -1).flip(2).float() # flip h,w to x,y\n return grid\n\n\ndef get_sphere_vtx(n_elev, n_azim):\n elevs = ((torch.arange(n_elev).view(n_elev, 1) +0.5) /n_elev *2 -1) *np.pi/2 # -pi/2~pi/2\n azims = ((torch.arange(n_azim).view(1, n_azim) +0.5) /n_azim *2 -1) *np.pi # -pi~pi\n xs = elevs.cos() * azims.cos()\n ys = elevs.repeat(1, n_azim).sin()\n zs = elevs.cos() * azims.sin()\n vtx = torch.stack([xs, ys, zs], 2) # ExAx3\n return vtx\n\n\ndef get_sor_vtx(sor_curve, T):\n b, h, _ = sor_curve.shape\n rs, hs = sor_curve.unbind(2) # BxH\n y = hs.view(b,h,1).repeat(1,1,T) # BxHxT\n thetas = torch.linspace(-math.pi, math.pi, T+1)[:-1].to(sor_curve.device) # T\n x = rs.unsqueeze(2) * thetas.cos().view(1,1,T) # BxHxT\n z = rs.unsqueeze(2) * thetas.sin().view(1,1,T) # BxHxT\n sor_vtx = torch.stack([x, y, z], 3) # BxHxTx3\n return sor_vtx\n\n\ndef get_sor_full_face_idx(h, w):\n idx_map = torch.arange(h*w).reshape(h,w) # HxW\n idx_map = torch.cat([idx_map, idx_map[:,:1]], 1) # Hx(W+1), connect last column to first\n faces1 = torch.stack([idx_map[:h-1,:w], idx_map[1:,:w], idx_map[:h-1,1:w+1]], -1) # (H-1)xWx3\n faces2 = torch.stack([idx_map[1:,1:w+1], idx_map[:h-1,1:w+1], idx_map[1:,:w]], -1) # (H-1)xWx3\n return torch.stack([faces1, faces2], 0).int() # 2x(H-1)xWx3\n\n\ndef get_sor_front_face_idx(h, w):\n sor_full_face_idx = get_sor_full_face_idx(h, w) # 2x(H-1)x(W//2)x3\n return sor_full_face_idx[:,:,:w//2,:]\n\n\ndef get_sor_back_face_idx(h, w):\n sor_full_face_idx = get_sor_full_face_idx(h, w) # 2x(H-1)x(W//2)x3\n return sor_full_face_idx[:,:,w//2:,:]\n\n\ndef get_tex_uv_grid(ts, h, w):\n uv_grid = get_grid(h, w, normalize=True) # -1~1, HxWx(x,y)\n ab_grid = get_grid(ts, ts, normalize=False) / (ts-1) # 0~1, txtx(x,y)\n ab_grid_uv_offsets = ab_grid * torch.FloatTensor([2/(w-1), 2/(h-1)]).view(1,1,2)\n\n tex_uv_grid1 = uv_grid[:-1,:-1,:].view(h-1, w-1, 1, 1, 2) + ab_grid_uv_offsets.view(1, 1, ts, ts, 2) # (H-1)x(W-1)xtxtx2\n tex_uv_grid2 = uv_grid[1:,1:,:].view(h-1, w-1, 1, 1, 2) - ab_grid_uv_offsets.view(1, 1, ts, ts, 2) # (H-1)x(W-1)xtxtx2\n tex_uv_grid = torch.stack([tex_uv_grid1, tex_uv_grid2], 0) # 2x(H-1)x(W-1)xtxtx2\n return tex_uv_grid\n\n\ndef get_sor_vtx_normal(sor_vtx):\n sor_vtx = torch.nn.functional.pad(sor_vtx.permute(0,3,1,2), (1,1,0,0), mode='circular').permute(0,2,3,1) # BxHx(T+2)x3\n\n tu = sor_vtx[:,1:-1,2:] - sor_vtx[:,1:-1,:-2]\n tv = sor_vtx[:,2:,1:-1] - sor_vtx[:,:-2,1:-1]\n normal = tu.cross(tv, dim=3) # Bx(H-2)xTx3\n normal = torch.nn.functional.pad(normal.permute(0,3,1,2), (0,0,1,1), mode='replicate').permute(0,2,3,1) # BxHxTx3\n normal = normal / (((normal**2).sum(3, keepdim=True))**0.5 + EPS)\n return normal # BxHxTx3\n\n\ndef get_sor_quad_center_vtx(sor_vtx):\n ## shift to quad center for shading\n sor_vtx = torch.cat([sor_vtx, sor_vtx[:,:,:1]], 2) # Hx(T+1), connect last column to first\n sor_quad_center_vtx = torch.nn.functional.avg_pool2d(sor_vtx.permute(0,3,1,2), kernel_size=2, stride=1, padding=0).permute(0,2,3,1)\n return sor_quad_center_vtx # Bx(H-1)xTx3\n\n\ndef get_sor_quad_center_normal(sor_vtx):\n ## shift to quad center for shading\n sor_vtx = torch.cat([sor_vtx, sor_vtx[:,:,:1]], 2) # Hx(T+1), connect last column to first\n\n tu = sor_vtx[:,:-1,1:] - sor_vtx[:,1:,:-1]\n tv = sor_vtx[:,1:,1:] - sor_vtx[:,:-1,:-1]\n normal = tu.cross(tv, dim=3) # Bx(H-1)xTx3\n normal = normal / (((normal**2).sum(3, keepdim=True))**0.5 + EPS)\n return normal # Bx(H-1)xTx3\n\n\ndef get_rotation_matrix(tx, ty, tz):\n m_x = torch.zeros((len(tx), 3, 3)).to(tx.device)\n m_y = torch.zeros((len(tx), 3, 3)).to(tx.device)\n m_z = torch.zeros((len(tx), 3, 3)).to(tx.device)\n\n m_x[:, 1, 1], m_x[:, 1, 2] = tx.cos(), -tx.sin()\n m_x[:, 2, 1], m_x[:, 2, 2] = tx.sin(), tx.cos()\n m_x[:, 0, 0] = 1\n\n m_y[:, 0, 0], m_y[:, 0, 2] = ty.cos(), ty.sin()\n m_y[:, 2, 0], m_y[:, 2, 2] = -ty.sin(), ty.cos()\n m_y[:, 1, 1] = 1\n\n m_z[:, 0, 0], m_z[:, 0, 1] = tz.cos(), -tz.sin()\n m_z[:, 1, 0], m_z[:, 1, 1] = tz.sin(), tz.cos()\n m_z[:, 2, 2] = 1\n return torch.matmul(m_z, torch.matmul(m_y, m_x))\n\n\ndef rotate_pts(pts, rotmat):\n return pts.matmul(rotmat.transpose(2,1))\n\n\ndef transform_sor(sor_vtx, rxyz=None, txy=None):\n if rxyz is not None:\n rx, ry, rz = rxyz.unbind(1)\n rotmat = get_rotation_matrix(rx, ry, rz).to(sor_vtx.device)\n sor_vtx = rotate_pts(sor_vtx, rotmat) # BxNx3\n if txy is not None:\n tz = torch.zeros(len(txy), 1).to(txy.device)\n txyz = torch.cat([txy, tz], 1)\n sor_vtx = sor_vtx + txyz.unsqueeze(1) # BxNx3\n return sor_vtx\n\n\ndef render_sor(renderer, sor_vtx, sor_faces, tex_im, tx_size=4, dim_inside=False):\n b, H, T, _ = sor_vtx.shape\n tex_uv_grid = get_tex_uv_grid(tx_size, H, T+1).to(sor_vtx.device) # Bx2x(H-1)x(W-1)xtxtx2\n\n tx_cube = torch.nn.functional.grid_sample(tex_im, tex_uv_grid.view(1,-1,tx_size*tx_size,2).repeat(b,1,1,1), mode='bilinear', padding_mode=\"reflection\", align_corners=False) # Bx3xFxT^2\n tx_cube = tx_cube.permute(0,2,3,1).view(b,-1,1,tx_size,tx_size,3).repeat(1,1,tx_size,1,1,1) # BxFxtxtxtx3\n\n sor_vtx = sor_vtx.view(b,-1,3)\n sor_faces = sor_faces.view(b,-1,3)\n if dim_inside:\n fill_back = renderer.fill_back\n renderer.fill_back = False\n sor_faces = torch.cat([sor_faces, sor_faces.flip(2)], 1)\n tx_cube = torch.cat([tx_cube, tx_cube*0.5], 1)\n im_rendered = renderer.render_rgb(sor_vtx, sor_faces, tx_cube)\n renderer.fill_back = fill_back\n else:\n im_rendered = renderer.render_rgb(sor_vtx, sor_faces, tx_cube)\n return im_rendered\n\n\ndef save_results(root, res, name=''):\n b = res['batch_size']\n keys = res.keys()\n os.makedirs(root, exist_ok=True)\n for i in range(b):\n idx = len(os.listdir(root)) +1\n fname = '%04d' %idx + name\n out_folder = os.path.join(root, fname)\n os.makedirs(out_folder)\n for key in keys:\n fpath = os.path.join(out_folder, fname+'_'+key)\n if key == 'batch_size':\n pass\n elif res[key].dim() == 4:\n im = np.uint8(res[key][i].permute(1,2,0).flip(2).numpy() *255)\n cv2.imwrite(fpath+'.png', im)\n else:\n np.savetxt(fpath+'.txt', res[key][i].numpy(), fmt='%.6f', delimiter=', ')\n\n\nGAMMA = 2.2\ndef HDR2LDR(img):\n return img.clamp(min=EPS) **(1/GAMMA)\n\n\ndef LDR2HDR(img):\n return img.clamp(min=EPS) **GAMMA\n\n\ndef envmap_phong_shading(point3d, albedo, spec_albedo, normal, cam_loc, ambient, env_map, spec_alpha):\n b, c, tex_h, tex_w = albedo.shape\n _, h, w, _ = point3d.shape\n\n view_dirs = cam_loc.view(1,1,1,3) - point3d\n view_dirs = torch.nn.functional.normalize(view_dirs, p=2, dim=3, eps=EPS)\n\n _, n_elev, n_azim = env_map.shape\n l_dirs = get_sphere_vtx(n_elev, n_azim).unsqueeze(0).to(point3d.device) # BxExAx3\n l_elevs = ((torch.arange(n_elev) +0.5) /n_elev *2 -1) *np.pi/2\n l_norm = l_elevs.cos().view(1, n_elev, 1).to(point3d.device) / n_elev / n_azim # 1xEx1\n l_ints = env_map * l_norm *50\n\n cos_theta = ((-normal.unsqueeze(1)) * l_dirs.view(1,n_elev*n_azim,1,1,3)).sum(4, keepdim=True)\n diffuse = l_ints.view(b,n_elev*n_azim,1,1,-1) *cos_theta.clamp(0,1) # BxLxHxWx1\n diffuse = torch.nn.functional.interpolate(diffuse.view(b*n_elev*n_azim,h,w,1).permute(0,3,1,2), (tex_h, tex_w), mode='bilinear', align_corners=False).view(b,n_elev*n_azim,1,tex_h,tex_w)\n\n reflect_dirs = -l_dirs.view(1,n_elev*n_azim,1,1,3) + 2*cos_theta*(-normal.unsqueeze(1))\n specular = (view_dirs.unsqueeze(1) * reflect_dirs).sum(4,keepdim=True).clamp(min=0) * (cos_theta>0)\n specular = (spec_alpha.view(b,1,1,1,-1)+1)/2/math.pi *l_ints.view(b,n_elev*n_azim,1,1,-1) * specular.clamp(min=EPS).pow(spec_alpha.view(b,1,1,1,-1)) # BxLxHxWx1\n specular = torch.nn.functional.interpolate(specular.view(b*n_elev*n_azim,h,w,1).permute(0,3,1,2), (tex_h, tex_w), mode='bilinear', align_corners=False).view(b,n_elev*n_azim,1,tex_h,tex_w)\n\n colors = (ambient.view(b,-1,1,1) + diffuse.sum(1)) *albedo + specular.sum(1) *spec_albedo\n return colors, diffuse, specular\n\n\ndef sg_to_env_map(sg_lights, n_elev=8, n_azim=16):\n b, n_sgls, _ = sg_lights.shape\n\n sgl_dirs = sg_lights[:,:,:3]\n sgl_lams = sg_lights[:,:,3:4]\n sgl_Fs = sg_lights[:,:,4:5]\n\n l_dirs = get_sphere_vtx(n_elev, n_azim).unsqueeze(0).to(sg_lights.device) # BxExAx3\n exps = sgl_lams.view(b,n_sgls,1,1)* ((sgl_dirs.view(b,n_sgls,1,1,3)*l_dirs.view(1,1,n_elev,n_azim,3)).sum(4)-1) # BxLxExA\n l_ints = sgl_Fs.view(b,n_sgls,1,1) * exps.exp() # BxLxExA\n env_map = l_ints.sum(1) / n_sgls # BxExA\n return env_map\n\n\ndef get_random_sor_curve(b, H):\n t1_bottom = torch.rand(b) *math.pi -math.pi # -pi~0\n t1_top = torch.rand(b) *1.5*math.pi + 0.5*math.pi # pi/2~pi*2\n amp1 = torch.rand(b) *0.3 # 0~0.3\n t2_bottom = torch.rand(b) *2*math.pi # 0~pi*2\n t2_top = t2_bottom + torch.rand(b) *1.5*math.pi + 0.5*math.pi # bottom + pi/2~pi*2\n amp2 = torch.rand(b) *0.1 # 0~0.1\n r0 = torch.rand(b) *0.2 + 0.1 # 0.02~0.3\n\n ts = torch.linspace(0,1,H)\n t1s = (1-ts.view(1,H)) * t1_bottom.view(b,1) + ts.view(1,H) * t1_top.view(b,1)\n sin1 = amp1.view(b,1) * (t1s.sin() +1)\n t2s = (1-ts.view(1,H)) * t2_bottom.view(b,1) + ts.view(1,H) * t2_top.view(b,1)\n sin2 = amp2.view(b,1) * (t2s.sin() +1)\n # r_col = sin1 + sin2 + r0.view(b,1) # 0.1~1\n # r_col = r_col / r_col.max(1)[0].view(b,1) *0.75 # normalize to 0.75\n\n ## TODO:straight axis testing\n r_col = (torch.rand(b).clamp(0.4,1)*0.75).repeat(1,32) \n\n h_scale = torch.zeros(b) + 0.75\n h_col = torch.linspace(-1,1,H).view(1,H) *h_scale.view(b,1)\n\n sor_curve = torch.stack([r_col, h_col], 2) # BxHx(r,h)\n return sor_curve\n\n\ndef get_random_pitch(b):\n return torch.rand(b) *20 /180*math.pi # 0~20\n\n\ndef get_random_ambient(b):\n return torch.rand(b) *0.4 +0.1 # 0.1~0.5\n\n\ndef get_random_spec_albedo(b):\n return torch.rand(b) *0.9 +0.1 # 0.3~1\n\n\ndef get_random_spec_alpha(b):\n return (torch.rand(b) *13+1)**2 # 1~196\n\n\ndef get_random_sg_lights(b):\n n_sgls = 3\n sgl_dirs = torch.rand(b, n_sgls, 3) *2-1\n\n sgl_dirs[:,:,1] = -sgl_dirs[:,:,1].abs() # upper only\n sgl_dirs[:,:,2] = -sgl_dirs[:,:,2].abs() # front only\n\n sgl_dirs = torch.nn.functional.normalize(sgl_dirs, p=2, dim=2, eps=EPS)\n sgl_lams = torch.rand(b, n_sgls) *30+10\n sgl_Fs = (torch.rand(b, n_sgls) *0.3+0.1) *sgl_lams**0.5\n sg_lights = torch.cat([sgl_dirs, sgl_lams.unsqueeze(2), sgl_Fs.unsqueeze(2)], 2)\n return sg_lights\n\n\ndef get_random_env_map_ambient(b):\n return torch.rand(b) *0.03 # 0~0.1\n\n\ndef random_crop(im_pil, size, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333)):\n tfs_crop = torchvision.transforms.RandomResizedCrop(size, scale=scale, ratio=ratio)\n return tfs_crop(im_pil)\n\n\ndef random_color_jitter(im_pil, brightness=0, contrast=0, saturation=0, hue=0):\n tfs_jitter = torchvision.transforms.ColorJitter(brightness, contrast, saturation, hue)\n return tfs_jitter(im_pil)\n\n\ndef generate(cc0_tex_dir, out_dir):\n H = 32\n T = 96\n image_size = 256\n fov = 10\n envmap_n_elev = 32\n envmap_n_azim = 96\n tex_im_h = 256\n tex_im_w = 768\n num_im_per_tex = 5\n\n device = 'cuda:0'\n b = 1\n oriz = 5\n cam_loc = torch.FloatTensor([0,0,-oriz]).to(device)\n lim = math.tan(fov/2/180*math.pi) *oriz\n max_depth = oriz + lim\n min_depth = oriz - lim\n renderer = get_renderer(world_ori=[0,0,oriz], image_size=image_size, fov=fov, fill_back=True)\n sor_faces = get_sor_full_face_idx(H, T).repeat(b,1,1,1,1).to(device) # Bx2x(H-1)xWx3\n tx_size = 8\n tex_uv_grid = get_tex_uv_grid(tx_size, H, T+1).repeat(b,1,1,1,1,1,1).to(device) # Bx2x(H-1)x(W-1)xtxtx2\n\n tex_im_list = sorted(glob(os.path.join(cc0_tex_dir, '**/*Color.jpg'), recursive=True))\n num_tex_im = len(tex_im_list)\n for i, tex_fpath in enumerate(tex_im_list):\n print(f'\\n\\nTex {i}/{num_tex_im}:')\n\n tex_im_pil = Image.open(tex_fpath).convert('RGB')\n\n for j in range(num_im_per_tex):\n tex_im_crop_pil = random_crop(tex_im_pil, size=(tex_im_h, tex_im_w//2), scale=(0.04, 1), ratio=(0.75, 1.3333333333333333))\n tex_im_crop_pil = random_color_jitter(tex_im_crop_pil, brightness=(1.0,1.5), contrast=(1.0,2.0), saturation=0., hue=0.5)\n tex_im_crop = torch.FloatTensor(np.array(tex_im_crop_pil) / 255).to(device)\n tex_im_crop = tex_im_crop.permute(2,0,1).unsqueeze(0)\n tex_im_crop = LDR2HDR(tex_im_crop)\n\n sor_curve = get_random_sor_curve(b,H).to(device) *lim # BxHx2\n sor_vtx = get_sor_vtx(sor_curve, T) # BxHxTx3\n\n ## render rotation pitch rotation\n pitch = get_random_pitch(b).to(device)\n rxyz = torch.stack([pitch, torch.zeros_like(pitch), torch.zeros_like(pitch)], 1)\n posed_sor_vtx = transform_sor(sor_vtx.view(b,-1,3), rxyz, txy=None).view(b,H,T,3)\n\n depth_rendered = renderer.render_depth(posed_sor_vtx.view(b,-1,3), sor_faces.view(b,-1,3)).clamp(min_depth, max_depth)\n mask_rendered = renderer.render_silhouettes(posed_sor_vtx.view(b,-1,3), sor_faces.view(b,-1,3))\n\n normal_map = get_sor_quad_center_normal(posed_sor_vtx) # Bx(H-1)xTx3\n normal_tx_cube = torch.nn.functional.grid_sample(normal_map.permute(0,3,1,2), tex_uv_grid.view(b,-1,tx_size*tx_size,2), mode='bilinear', padding_mode=\"border\", align_corners=False) # Bx3xFxT^2\n normal_tx_cube = normal_tx_cube / (normal_tx_cube**2).sum(1,keepdim=True)**0.5 /2+0.5\n normal_tx_cube = normal_tx_cube.permute(0,2,3,1).view(b,-1,1,tx_size,tx_size,3).repeat(1,1,tx_size,1,1,1) # BxFxtxtxtx3\n normal_rendered = renderer.render_rgb(posed_sor_vtx.view(b,-1,3), sor_faces.view(b,-1,3), normal_tx_cube).clamp(0, 1)\n\n ## sample lighting\n albedo = torch.cat([tex_im_crop, tex_im_crop.flip(3)], 3)\n ambient = get_random_ambient(b).to(device) *0\n spec_albedo = get_random_spec_albedo(b).to(device)\n spec_alpha = get_random_spec_alpha(b).to(device)\n sg_lights = get_random_sg_lights(b).to(device)\n\n posed_sor_vtx_map = get_sor_quad_center_vtx(posed_sor_vtx) # Bx(H-1)xTx3\n env_map = sg_to_env_map(sg_lights, n_elev=envmap_n_elev, n_azim=envmap_n_azim)\n env_map_ambient = get_random_env_map_ambient(b).to(device)\n env_map = env_map + env_map_ambient.view(b,1,1)\n colors, diffuse, specular = envmap_phong_shading(posed_sor_vtx_map, albedo, spec_albedo.view(1,1,1,1), normal_map, cam_loc, ambient, env_map, spec_alpha)\n colors = HDR2LDR(colors)\n colors = colors.clamp(0,1)\n albedo = HDR2LDR(albedo)\n specular = specular * spec_albedo.view(1,1,1,1,1)\n\n im_rendered = render_sor(renderer, posed_sor_vtx, sor_faces, colors, tx_size=tx_size, dim_inside=True).clamp(0, 1)\n albedo_rendered = render_sor(renderer, posed_sor_vtx, sor_faces, albedo, tx_size=tx_size, dim_inside=True).clamp(0, 1)\n diffuse_map = diffuse.sum(1).clamp(0, 1).repeat(1,3,1,1)\n diffuse_rendered = render_sor(renderer, posed_sor_vtx, sor_faces, diffuse_map, tx_size=tx_size).clamp(0, 1)\n specular_map = specular.sum(1).clamp(0, 1).repeat(1,3,1,1)\n specular_rendered = render_sor(renderer, posed_sor_vtx, sor_faces, specular_map, tx_size=tx_size).clamp(0, 1)\n\n results = {}\n results['batch_size'] = b\n results['sor_curve'] = sor_curve.detach().cpu()\n results['pitch'] = pitch.detach().cpu().unsqueeze(1) /math.pi*180\n results['depth_rendered'] = (depth_rendered.detach().cpu().unsqueeze(1).repeat(1,3,1,1) - min_depth) / (max_depth-min_depth)\n results['mask_rendered'] = mask_rendered.detach().cpu().unsqueeze(1).repeat(1,3,1,1)\n\n results['im_rendered'] = im_rendered.detach().cpu()\n results['normal_map'] = normal_map.detach().cpu().permute(0,3,1,2) /2+0.5\n results['normal_rendered'] = normal_rendered.detach().cpu()\n results['albedo_map'] = albedo.detach().cpu()\n results['albedo_rendered'] = albedo_rendered.detach().cpu()\n results['texture_map'] = colors.detach().cpu()\n results['diffuse_map'] = diffuse_map.detach().cpu()\n results['diffuse_rendered'] = diffuse_rendered.detach().cpu()\n results['specular_map'] = specular_map.detach().cpu()\n results['specular_rendered'] = specular_rendered.detach().cpu()\n results['sg_lights'] = sg_lights.detach().cpu()\n results['ambient'] = ambient.detach().cpu().unsqueeze(1)\n results['spec_albedo'] = spec_albedo.detach().cpu().unsqueeze(1)\n results['spec_alpha'] = spec_alpha.detach().cpu().unsqueeze(1)\n results['env_map'] = env_map.clamp(0, 1).detach().cpu().repeat(1,3,1,1)\n results['env_map_ambient'] = env_map_ambient.detach().cpu().unsqueeze(1)\n\n tex_id = '_' + os.path.basename(os.path.dirname(tex_fpath)) + '_%02d' %j\n save_results(out_dir, results, name=tex_id)\n\n\nif __name__ == '__main__':\n cc0_tex_dir = '../cc0_textures/PhotoTexturePBR'\n out_dir = '../syn_curv_sgl5_tex_straight_20220220/rendering'\n\n for split in ['train', 'test']:\n print(f'Generating {split} set...')\n generate(os.path.join(cc0_tex_dir, split), os.path.join(out_dir, split))\n\n os.symlink('test', os.path.join(out_dir, 'val'))\n", "from operator import truediv\nimport os\nfrom glob import glob\nfrom pickle import TRUE\nimport numpy as np\nimport cv2\nimport torch\nfrom derender import utils, rendering\nimport neural_renderer as nr\n\ndef load_imgs(flist):\n return torch.stack([torch.FloatTensor(cv2.imread(f) /255.).flip(2) for f in flist], 0).permute(0,3,1,2)\n\ndef load_txts(flist):\n return torch.stack([torch.FloatTensor(np.loadtxt(f, delimiter=',')) for f in flist], 0)\n\ndef load_sor_curve_txt(flist):\n sor_curve_all = torch.tensor([])\n radcol_height_list = []\n for f in flist:\n ## sor_curve of each components\n sor_curve_component = torch.FloatTensor(np.loadtxt(f, delimiter=','))\n sor_curve_all = torch.cat([sor_curve_all,sor_curve_component],0)\n \n ## radcol height of each components\n radcol_height_component = sor_curve_component.size(0)\n radcol_height_list.append(radcol_height_component)\n return sor_curve_all, radcol_height_list\n\ndef load_obj(flist):\n vertices_all = torch.tensor([]).cuda()\n faces_all = torch.tensor([]).int().cuda()\n for f in flist:\n vertices,faces= nr.load_obj(f,normalization=False, load_texture=False, texture_size=8)\n vertices_all = torch.cat([vertices_all,vertices],0)\n faces_all = torch.cat([faces_all,faces],0)\n return vertices_all,faces_all\n\ndef render_views_multiObject(renderer, cam_loc, canon_sor_vtx, sor_faces, albedo_list, env_map_all, spec_alpha_list, spec_albedo_list,radcol_height_list, tx_size):\n b = canon_sor_vtx.size(0)\n print(\"====render novel view animation====\",)\n s = 80 # sample number\n rxs = torch.linspace(0, np.pi/3, s//2) # rotation x axis (roll)\n rxs = torch.cat([rxs, rxs.flip(0)], 0) # rotation x axis back to origin pose\n rys = torch.linspace(0, 2*np.pi, s) # rotation y axis (pitch)\n\n ims = []\n for i, (rx, ry) in enumerate(zip(rxs, rys)):\n\n ## rotate y-axis first then rotate x-axis\n rxyz = torch.stack([rx*0, ry, rx*0], 0).unsqueeze(0).to(canon_sor_vtx.device)\n sor_vtx = rendering.transform_pts(canon_sor_vtx, rxyz, None)\n # rxyz = torch.stack([rx, ry*0, rx*0], 0).unsqueeze(0).to(canon_sor_vtx.device)\n # sor_vtx = rendering.transform_pts(sor_vtx, rxyz, None)\n\n ## render each components texture\n tex_im_list = []\n for j in range(len(radcol_height_list)):\n env_map = env_map_all[j:j+1].to(canon_sor_vtx.device)\n spec_alpha = spec_alpha_list[j]\n spec_albedo = spec_albedo_list[j]\n h_start = sum(radcol_height_list[:j])\n h_end = sum(radcol_height_list[:j+1])\n sor_vtx_map = rendering.get_sor_quad_center_vtx(sor_vtx[:,h_start:h_end,:,:]) # Bx(H-1)xTx3\n normal_map = rendering.get_sor_quad_center_normal(sor_vtx[:,h_start:h_end,:,:]) # Bx(H-1)xTx3\n diffuse, specular = rendering.envmap_phong_shading(sor_vtx_map, normal_map, cam_loc, env_map, spec_alpha)\n tex_im = rendering.compose_shading(albedo_list[j], diffuse, spec_albedo.view(b,1,1,1), specular).clamp(0,1)\n tex_im_list.append(tex_im)\n\n ## render each components reconstrction image\n im_rendered = rendering.render_sor_multiObject(renderer, sor_vtx, sor_faces.repeat(b,1,1,1,1),radcol_height_list, tex_im_list, tx_size=tx_size, dim_inside=False).clamp(0, 1)\n ims += [im_rendered]\n ims = torch.stack(ims, 1) # BxTxCxHxW\n return ims\n\n\ndef render_relight_multiObject(renderer, cam_loc, sor_vtx, sor_faces, albedo_list, spec_alpha_list, spec_albedo_list,radcol_height_list, tx_size):\n b = sor_vtx.size(0)\n lam = 20\n F = 0.15\n env_amb = 0.015\n n_sgls = 1\n sgl_lams = torch.FloatTensor([lam]).repeat(b, n_sgls).to(sor_vtx.device)\n sgl_Fs = torch.FloatTensor([F]).repeat(b, n_sgls).to(sor_vtx.device) *sgl_lams**0.5\n\n s = 80\n azims = torch.linspace(0, 4*np.pi, s)\n elevs = torch.linspace(0, np.pi/2, s//2)\n elevs = torch.cat([elevs, elevs.flip(0)], 0)\n\n ims = []\n for i, (azim, elev) in enumerate(zip(azims, elevs)):\n dy = -elev.sin()\n dx = elev.cos() * azim.sin()\n dz = -elev.cos() * azim.cos()\n sgl_dirs = torch.stack([dx, dy, dz], 0).repeat(b, n_sgls, 1).to(sor_vtx.device)\n sg_lights = torch.cat([sgl_dirs, sgl_lams.unsqueeze(2), sgl_Fs.unsqueeze(2)], 2).to(sor_vtx.device)\n\n env_map = rendering.sg_to_env_map(sg_lights, n_elev=16, n_azim=48)\n env_map_ambient = torch.FloatTensor([env_amb]).repeat(b).to(sor_vtx.device)\n env_map = env_map + env_map_ambient.view(b,1,1)\n\n tex_im_list = []\n for j in range(len(radcol_height_list)):\n spec_alpha = spec_alpha_list[j]\n spec_albedo = spec_albedo_list[j]\n h_start = sum(radcol_height_list[:j])\n h_end = sum(radcol_height_list[:j+1])\n sor_vtx_map = rendering.get_sor_quad_center_vtx(sor_vtx[:,h_start:h_end,:,:]) # Bx(H-1)xTx3\n normal_map = rendering.get_sor_quad_center_normal(sor_vtx[:,h_start:h_end,:,:]) # Bx(H-1)xTx3\n diffuse, specular = rendering.envmap_phong_shading(sor_vtx_map, normal_map, cam_loc, env_map, spec_alpha)\n tex_im = rendering.compose_shading(albedo_list[j], diffuse, spec_albedo.view(b,1,1,1), specular).clamp(0,1)\n tex_im_list.append(tex_im)\n\n im_rendered = rendering.render_sor_multiObject(renderer, sor_vtx, sor_faces.repeat(b,1,1,1,1),radcol_height_list, tex_im_list, tx_size=tx_size, dim_inside=False).clamp(0, 1)\n ims += [im_rendered]\n ims = torch.stack(ims, 1) # BxTxCxHxW\n return ims\n\ndef render_original_shape_multiObject(renderer, canon_sor_vtx, sor_faces):\n b = canon_sor_vtx.size(0)\n s = 80 # sample number\n rxs = torch.linspace(0, np.pi/3, s//2) # rotation x axis (roll)\n rxs = torch.cat([rxs, rxs.flip(0)], 0) # rotation x axis back to origin pose\n rys = torch.linspace(0, 2*np.pi, s) # rotation y axis (pitch)\n\n ims = []\n for i, (rx, ry) in enumerate(zip(rxs, rys)):\n\n ## rotate y-axis first then rotate x-axis\n rxyz = torch.stack([rx*0, ry, rx*0], 0).unsqueeze(0).to(canon_sor_vtx.device)\n sor_vtx = rendering.transform_pts(canon_sor_vtx, rxyz, None)\n rxyz = torch.stack([rx*0, ry*0, rx*0], 0).unsqueeze(0).to(canon_sor_vtx.device)\n\n ## rendering multiple objects test\n sor_vtx = rendering.transform_pts(sor_vtx, rxyz, None)\n\n im_rendered = rendering.render_object_shape(renderer, sor_vtx, sor_faces.repeat(b,1,1,1,1),False).clamp(0, 1)\n ims += [im_rendered]\n ims = torch.stack(ims, 1) # BxTxCxHxW\n return ims\n\ndef main(in_dir, out_dir):\n device = 'cuda:0'\n\n sor_circum = 48 # set sor_circum to 48 fit 3sweep object(after subdivision)\n\n image_size = 256\n tex_im_h = 256\n tex_im_w = 768 ## 256*3 => 3 times width of texture\n env_map_h = 16\n env_map_w = 48\n fov = 10 # in degrees\n ori_z = 12.5 # camera z-axis orientation\n world_ori = [0,0,ori_z] \n tx_size = 16 # texture sample grid size (Neural renderer)\n cam_loc = torch.FloatTensor([0,0,-ori_z]).to(device) # camera position\n\n apply_origin_vertices = True # apply the origin vertices from 3-Sweep\n batch_size = 1 # for multiObject, so fix 1\n\n renderer = rendering.get_renderer(world_ori=world_ori, image_size=image_size,fov=fov, fill_back=True, device='cuda:0')\n \n ## load sor,rad_height. (data type: sor_curve_all => tensor, radcol_height_list => list)\n sor_curve_all,radcol_height_list = load_sor_curve_txt(sorted(glob(os.path.join(in_dir, 'sor_curve/*_sor_curve.txt'), recursive=True)))\n\n ## load data (tensor type)\n material_all = load_txts(sorted(glob(os.path.join(in_dir, 'material/*_material.txt'), recursive=True)))\n pose_all = load_txts(sorted(glob(os.path.join(in_dir, 'pose/*_pose.txt'), recursive=True)))\n albedo_all = load_imgs(sorted(glob(os.path.join(in_dir, 'albedo_map/*_albedo_map.png'), recursive=True)))\n mask_gt_all = load_imgs(sorted(glob(os.path.join(in_dir, 'mask_gt/*_mask_gt.png'), recursive=True)))\n env_map_all = load_imgs(sorted(glob(os.path.join(in_dir, 'env_map/*_env_map.png'), recursive=True)))[:,0,:,:]\n vertices_obj_all,faces_obj_all = load_obj(sorted(glob(os.path.join(in_dir, 'obj_parsed/*_obj_parsed.obj'), recursive=True)))\n\n component_num = len(radcol_height_list)\n print(\"total components num of this object\",component_num)\n\n vertices_size_list = []\n spec_alpha_list = []\n spec_albedo_list = []\n albedo_replicated_list = []\n\n canon_sor_vtx = torch.empty(0).to(device)\n canon_sor_vtx_obj = torch.empty(0).to(device)\n\n mask_gt = mask_gt_all[:1].to(device)\n pose = pose_all[:1].to(device) # => 0,0,0\n \n ## set multi-object data list\n for i in range(0, component_num):\n \n ## set the index of components tensor\n index_start = sum(radcol_height_list[:i])\n index_end = sum(radcol_height_list[:i+1])\n\n ## get different sor,material,albedo of each components\n sor_curve = sor_curve_all[index_start:index_end].to(device)\n material = material_all[i:i+1].to(device)\n albedo = albedo_all[i:i+1].to(device)\n \n ## calculate paramter\n vertices_size = radcol_height_list[i]*sor_circum # for indexing\n vertices_size_list.append(vertices_size)\n\n ## set sor_vtx map\n canon_sor_vtx_component = rendering.get_sor_vtx(sor_curve.repeat(batch_size,1,1), sor_circum)\n canon_sor_vtx = torch.cat([canon_sor_vtx,canon_sor_vtx_component],1)\n\n ## specular\n spec_alpha, spec_albedo = material.unbind(1)\n spec_alpha_list.append(spec_alpha)\n spec_albedo_list.append(spec_albedo)\n\n ## replicate albedo (method 1)\n # albedo = rendering.gamma(albedo)\n # wcrop_ratio = 1/6\n # wcrop_tex_im = int(wcrop_ratio * tex_im_w//2)\n # p = 8 # padding\n # front_albedo = torch.cat([albedo[:,:,:,p:2*p].flip(3), albedo[:,:,:,p:-p], albedo[:,:,:,-2*p:-p].flip(3)], 3) \n # albedo_replicated = torch.cat([front_albedo[:,:,:,:wcrop_tex_im].flip(3), front_albedo, front_albedo.flip(3), front_albedo[:,:,:,:-wcrop_tex_im]], 3)\n \n ## symmetry replicate albedo (method 2)\n albedo = rendering.gamma(albedo)\n wcrop_ratio = 1/6\n wcrop_tex_im = int(wcrop_ratio * tex_im_w//2)\n p = 8 # padding => to avoid the albedo image boundary line\n front_albedo = torch.cat([albedo[:,:,:,p:p+wcrop_tex_im].flip(3), albedo[:,:,:,p:-p], albedo[:,:,:,-(wcrop_tex_im+p):-p].flip(3)], 3) # 256+64+64\n # front_albedo = torch.cat([albedo[:,:,:,p:2*p].flip(3), albedo[:,:,:,p:-p], albedo[:,:,:,-2*p:-p].flip(3)], 3) \n albedo_replicated = torch.cat([ front_albedo, front_albedo.flip(3)], 3) \n\n albedo_replicated_list.append(albedo_replicated)\n utils.save_images(out_dir, albedo_replicated.cpu().numpy(), suffix='albedo_replicated', sep_folder=True)\n utils.save_images(out_dir, front_albedo.cpu().numpy(), suffix='front_albedo', sep_folder=True)\n\n ## get sor_faces with all component\n sor_faces = rendering.get_sor_full_face_idx_multiObject(radcol_height_list, sor_circum).to(device) # 2x(H-1)xWx3\n\n ## normalize from NR loadObj\n vertices_obj_all_normalized = utils.normalizeObjVertices(vertices_obj_all)\n\n ## get sor vertices data\n for i in range(len(radcol_height_list)):\n ## re-assign the normalize vertices\n index_vertices_start = sum(vertices_size_list[:i])\n index_vertices_end = sum(vertices_size_list[:i+1])\n vertices_component_normalized = vertices_obj_all_normalized[index_vertices_start:index_vertices_end].reshape(1,radcol_height_list[i],sor_circum,3)\n\n ## concate to fit RADAR data dimension\n canon_sor_vtx_obj = torch.cat([canon_sor_vtx_obj,vertices_component_normalized],1)\n\n ## test for relighting\n rxyz = pose[:,:3] / 180 * np.pi # 1x3\n txy = pose[:,3:] # 1x2\n tz = torch.zeros(len(txy), 1).to(txy.device) ## set z-transform to zero\n txyz = torch.cat([txy, tz], 1)\n \n ## apply the original vertices for relighting\n if apply_origin_vertices == True:\n sor_vtx_relighting = rendering.transform_pts(canon_sor_vtx_obj, rxyz, txyz)\n else:\n sor_vtx_relighting = rendering.transform_pts(canon_sor_vtx, rxyz, txyz)\n\n with torch.no_grad():\n if apply_origin_vertices == True :\n novel_views = render_views_multiObject(renderer, cam_loc, canon_sor_vtx_obj, sor_faces, albedo_replicated_list, env_map_all, spec_alpha_list, spec_albedo_list,radcol_height_list, tx_size)\n novel_view_original_shape = render_original_shape_multiObject(renderer,canon_sor_vtx_obj,sor_faces)\n else:\n novel_views = render_views_multiObject(renderer, cam_loc, canon_sor_vtx, sor_faces, albedo_replicated_list, env_map_all, spec_alpha_list, spec_albedo_list,radcol_height_list, tx_size)\n relightings = render_relight_multiObject(renderer, cam_loc, sor_vtx_relighting, sor_faces, albedo_replicated_list, spec_alpha_list, spec_albedo_list,radcol_height_list, tx_size)\n [utils.save_images(out_dir, novel_views[:,i].cpu().numpy(), suffix='novel_views_%d'%i, sep_folder=True) for i in range(0, novel_views.size(1), novel_views.size(1)//10)]\n utils.save_videos(out_dir, novel_views.cpu().numpy(), suffix='novel_view_videos', sep_folder=True, fps=25)\n [utils.save_images(out_dir, novel_view_original_shape[:,i].cpu().numpy(), suffix='novel_views_original_shape_%d'%i, sep_folder=True) for i in range(0, novel_view_original_shape.size(1), novel_view_original_shape.size(1)//10)]\n utils.save_videos(out_dir, novel_view_original_shape.cpu().numpy(), suffix='novel_view_original_shape_videos', sep_folder=True, fps=25)\n [utils.save_images(out_dir, relightings[:,i].cpu().numpy(), suffix='relight_%d'%i, sep_folder=True) for i in range(0, relightings.size(1), relightings.size(1)//10)]\n utils.save_videos(out_dir, relightings.cpu().numpy(), suffix='relight_videos', sep_folder=True, fps=25)\n print(\"====render novel view animation finished!====\")\n\nif __name__ == '__main__':\n\n ## auto batch test\n for in_dir in glob('results/TestResults_20220613_noSubdivide*'):\n print(\"===Run data dir: \"+in_dir+\" ===\")\n out_dir = os.path.join(in_dir,'animations_horizontalRotate')\n main(in_dir, out_dir)\n print(\"===Finished data dir: \"+in_dir+\" ===\")\n print(\"===All rendering is finished !!!! ===\")\n ## single test\n # in_dir = 'results/TestResults_20220508_ADA6051'\n # out_dir = os.path.join(in_dir,'animations')\n # out_dir = 'results/TestResults_20220425_horn_1/animations'\n # main(in_dir, out_dir)\n" ]
[ [ "torch.nn.functional.normalize", "torch.linspace", "torch.cat", "torch.zeros", "torch.zeros_like", "torch.inverse", "torch.matmul", "torch.FloatTensor", "torch.rand", "torch.arange", "torch.stack", "numpy.array", "torch.meshgrid" ], [ "torch.linspace", "torch.empty", "torch.cat", "torch.tensor", "torch.no_grad", "torch.FloatTensor", "torch.stack", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
boaryang/quantum
[ "ef3a34341d997d485a7e43335a8ed61a8e7c6ea6" ]
[ "tensorflow_quantum/core/serialize/serializer.py" ]
[ "# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A basic serializer used to serialize/deserialize Cirq circuits for tfq.\"\"\"\n# TODO(pmassey / anyone): determine if this should be kept as globals.\nimport copy\nimport numbers\nimport sympy\nimport numpy as np\n\nimport cirq\nimport cirq.google.api.v2 as v2\nfrom tensorflow_quantum.core.proto import pauli_sum_pb2\n\n# Needed to allow autograph to crawl AST without erroring.\n_CONSTANT_TRUE = lambda x: True\n\n\ndef _round(x):\n return np.round(x, 6) if isinstance(x, float) else x\n\n\ndef _parse_mul(expr):\n \"\"\"Returns the lhs and rhs of a sympy.Mul. This is written\n to prevent autograph from going into sympy library code and having\n conflicts with the @cacheit decorator.\"\"\"\n if len(expr.args) == 1:\n return sympy.S.One, expr.args[0]\n if len(expr.args) == 2:\n return expr.args[0], expr.args[1]\n\n raise ValueError(\"Arithmetic expression outside of simple \"\n \"scalar multiplication is currently not \"\n \"supported. See serializer.py for more \"\n \"information.\")\n\n\ndef _scalar_extractor(x):\n \"\"\"This is a workaround to support symbol scalar multiplication.\n In the future we should likely get rid of this in favor of proper\n expression parsing once cirq supports it. See cirq.op_serializer\n and cirq's program protobuf for details. This is needed for things\n like cirq.rx('alpha').\n \"\"\"\n if not isinstance(x, (numbers.Real, sympy.Expr)):\n raise TypeError(\"Invalid input argument for exponent.\")\n\n if isinstance(x, (numbers.Real, sympy.Symbol)):\n return 1.0\n\n expr = x.evalf()\n if isinstance(expr, sympy.mul.Mul):\n lhs_eval, rhs_eval = _parse_mul(expr)\n\n if isinstance(lhs_eval, sympy.Symbol) and isinstance(\n rhs_eval, (sympy.numbers.Float, sympy.numbers.Integer)):\n # lhs contains symbol rhs contains number.\n return _round(float(rhs_eval))\n\n if isinstance(rhs_eval, sympy.Symbol) and isinstance(\n lhs_eval, (sympy.numbers.Float, sympy.numbers.Integer)):\n # lhs contains number.\n return _round(float(lhs_eval))\n\n raise ValueError(\"Arithmetic expression outside of simple \"\n \"scalar multiplication is currently not \"\n \"supported. See serializer.py for more \"\n \"information.\")\n\n\ndef _symbol_extractor(x):\n \"\"\"This is the second extractor for above.\"\"\"\n if not isinstance(x, (numbers.Real, sympy.Expr)):\n raise TypeError(\"Invalid input argument for exponent.\")\n\n if isinstance(x, numbers.Real):\n return _round(float(x))\n if isinstance(x, sympy.Symbol):\n return x\n\n expr = x.evalf()\n if isinstance(expr, sympy.mul.Mul):\n lhs_eval, rhs_eval = _parse_mul(expr)\n\n if isinstance(lhs_eval, sympy.Symbol) and isinstance(\n rhs_eval, (sympy.numbers.Float, sympy.numbers.Integer)):\n # lhs contains symbol rhs contains number.\n return lhs_eval\n\n if isinstance(rhs_eval, sympy.Symbol) and isinstance(\n lhs_eval, (sympy.numbers.Float, sympy.numbers.Integer)):\n # lhs contains number.\n return rhs_eval\n\n raise ValueError(\"Arithmetic expression outside of simple \"\n \"scalar multiplication is currently not \"\n \"supported. See serializer.py for more \"\n \"information.\")\n\n\ndef _serialize_controls(gate):\n \"\"\"Helper to serialize control qubits if applicable.\"\"\"\n if hasattr(gate, '_tfq_control_qubits'):\n return ','.join(\n v2.qubit_to_proto_id(q) for q in gate._tfq_control_qubits)\n return ''\n\n\ndef _serialize_control_vals(gate):\n \"\"\"Helper to serialize control values if applicable..\"\"\"\n if hasattr(gate, '_tfq_control_values'):\n return ','.join(str(v[0]) for v in gate._tfq_control_values)\n return ''\n\n\nclass DelayedAssignmentGate(cirq.Gate):\n \"\"\"Class to do control qubit assignment before sub_gate qubit assignment.\"\"\"\n\n def __init__(self, gate_callable, control_qubits, control_values):\n self._gate_callable = gate_callable\n self._control_qubits = control_qubits\n self._control_values = control_values\n\n def _qid_shape_(self):\n raise ValueError(\"Called qid_shape on workaround class.\")\n\n # pylint: disable=invalid-name\n def on(self, *qubits):\n \"\"\"Returns gate_callable on qubits controlled by contol_qubits.\"\"\"\n return self._gate_callable(*qubits).controlled_by(\n *self._control_qubits, control_values=self._control_values)\n\n # pylint: enable=invalid-name\n\n\ndef _optional_control_promote(gate, qubits_message, values_message):\n \"\"\"Optionally promote to controlled gate based on serialized control msg.\"\"\"\n if qubits_message == '' and values_message == '':\n return gate\n qbs = [v2.qubit_from_proto_id(qb) for qb in qubits_message.split(',')]\n vals = [int(cv) for cv in values_message.split(',')]\n\n return DelayedAssignmentGate(gate, qbs, vals)\n\n\ndef _depolarize_channel_serializer():\n \"\"\"Make standard serializer for depolarization channel.\"\"\"\n\n args = [\n # cirq channels can't contain symbols.\n cirq.google.SerializingArg(serialized_name=\"p\",\n serialized_type=float,\n op_getter=lambda x: x.gate.p),\n cirq.google.SerializingArg(serialized_name=\"control_qubits\",\n serialized_type=str,\n op_getter=lambda x: ''),\n cirq.google.SerializingArg(serialized_name=\"control_values\",\n serialized_type=str,\n op_getter=lambda x: '')\n ]\n return cirq.google.GateOpSerializer(gate_type=cirq.DepolarizingChannel,\n serialized_gate_id=\"DP\",\n args=args,\n can_serialize_predicate=_CONSTANT_TRUE)\n\n\ndef _depolarize_channel_deserializer():\n \"\"\"Make standard deserializer for depolarization channel.\"\"\"\n\n args = [\n cirq.google.DeserializingArg(serialized_name=\"p\",\n constructor_arg_name=\"p\")\n ]\n return cirq.google.GateOpDeserializer(\n serialized_gate_id=\"DP\",\n gate_constructor=cirq.DepolarizingChannel,\n args=args)\n\n\ndef _eigen_gate_serializer(gate_type, serialized_id):\n \"\"\"Make standard serializer for eigen gates.\"\"\"\n\n args = [\n cirq.google.SerializingArg(\n serialized_name=\"exponent\",\n serialized_type=float,\n op_getter=lambda x: _symbol_extractor(x.gate.exponent)),\n cirq.google.SerializingArg(\n serialized_name=\"exponent_scalar\",\n serialized_type=float,\n op_getter=lambda x: _scalar_extractor(x.gate.exponent)),\n cirq.google.SerializingArg(\n serialized_name=\"global_shift\",\n serialized_type=float,\n op_getter=lambda x: float(x.gate._global_shift)),\n cirq.google.SerializingArg(serialized_name=\"control_qubits\",\n serialized_type=str,\n op_getter=lambda x: _serialize_controls(x)),\n cirq.google.SerializingArg(\n serialized_name=\"control_values\",\n serialized_type=str,\n op_getter=lambda x: _serialize_control_vals(x))\n ]\n return cirq.google.GateOpSerializer(gate_type=gate_type,\n serialized_gate_id=serialized_id,\n args=args,\n can_serialize_predicate=_CONSTANT_TRUE)\n\n\ndef _eigen_gate_deserializer(gate_type, serialized_id):\n \"\"\"Make standard deserializer for eigen gates.\"\"\"\n\n def _scalar_combiner(exponent, global_shift, exponent_scalar,\n control_qubits, control_values):\n \"\"\"This is a workaround to support symbol scalar multiplication.\n In the future we should likely get rid of this in favor of proper\n expression parsing once cirq supports it. See cirq.op_serializer\n and cirq's program protobuf for details. This is needed for things\n like cirq.rx('alpha').\n \"\"\"\n if exponent_scalar == 1.0:\n return _optional_control_promote(\n gate_type(exponent=_round(exponent),\n global_shift=_round(global_shift)), control_qubits,\n control_values)\n return _optional_control_promote(\n gate_type(exponent=_round(exponent) * _round(exponent_scalar),\n global_shift=_round(global_shift)), control_qubits,\n control_values)\n\n args = [\n cirq.google.DeserializingArg(serialized_name=\"exponent\",\n constructor_arg_name=\"exponent\"),\n cirq.google.DeserializingArg(serialized_name=\"global_shift\",\n constructor_arg_name=\"global_shift\"),\n cirq.google.DeserializingArg(serialized_name=\"exponent_scalar\",\n constructor_arg_name=\"exponent_scalar\"),\n cirq.google.DeserializingArg(serialized_name=\"control_qubits\",\n constructor_arg_name=\"control_qubits\"),\n cirq.google.DeserializingArg(serialized_name=\"control_values\",\n constructor_arg_name=\"control_values\")\n ]\n return cirq.google.GateOpDeserializer(serialized_gate_id=serialized_id,\n gate_constructor=_scalar_combiner,\n args=args)\n\n\ndef _fsim_gate_serializer():\n \"\"\"Make standard serializer for fsim gate.\"\"\"\n\n args = [\n cirq.google.SerializingArg(\n serialized_name=\"theta\",\n serialized_type=float,\n op_getter=lambda x: _symbol_extractor(x.gate.theta)),\n cirq.google.SerializingArg(\n serialized_name=\"phi\",\n serialized_type=float,\n op_getter=lambda x: _symbol_extractor(x.gate.phi)),\n cirq.google.SerializingArg(\n serialized_name=\"theta_scalar\",\n serialized_type=float,\n op_getter=lambda x: _scalar_extractor(x.gate.theta)),\n cirq.google.SerializingArg(\n serialized_name=\"phi_scalar\",\n serialized_type=float,\n op_getter=lambda x: _scalar_extractor(x.gate.phi)),\n cirq.google.SerializingArg(serialized_name=\"control_qubits\",\n serialized_type=str,\n op_getter=lambda x: _serialize_controls(x)),\n cirq.google.SerializingArg(\n serialized_name=\"control_values\",\n serialized_type=str,\n op_getter=lambda x: _serialize_control_vals(x))\n ]\n return cirq.google.GateOpSerializer(gate_type=cirq.FSimGate,\n serialized_gate_id=\"FSIM\",\n args=args,\n can_serialize_predicate=_CONSTANT_TRUE)\n\n\ndef _fsim_gate_deserializer():\n \"\"\"Make standard deserializer for fsim gate.\"\"\"\n\n def _scalar_combiner(theta, theta_scalar, phi, phi_scalar, control_qubits,\n control_values):\n \"\"\"This is a workaround to support symbol scalar multiplication.\n See `_eigen_gate_deserializer` for details.\n \"\"\"\n return _optional_control_promote(\n cirq.FSimGate(theta=_round(theta) * _round(theta_scalar),\n phi=_round(phi) * _round(phi_scalar)), control_qubits,\n control_values)\n\n args = [\n cirq.google.DeserializingArg(serialized_name=\"theta\",\n constructor_arg_name=\"theta\"),\n cirq.google.DeserializingArg(serialized_name=\"phi\",\n constructor_arg_name=\"phi\"),\n cirq.google.DeserializingArg(serialized_name=\"theta_scalar\",\n constructor_arg_name=\"theta_scalar\"),\n cirq.google.DeserializingArg(serialized_name=\"phi_scalar\",\n constructor_arg_name=\"phi_scalar\"),\n cirq.google.DeserializingArg(serialized_name=\"control_qubits\",\n constructor_arg_name=\"control_qubits\"),\n cirq.google.DeserializingArg(serialized_name=\"control_values\",\n constructor_arg_name=\"control_values\")\n ]\n return cirq.google.GateOpDeserializer(serialized_gate_id=\"FSIM\",\n gate_constructor=_scalar_combiner,\n args=args)\n\n\ndef _identity_gate_serializer():\n \"\"\"Make a standard serializer for the single qubit identity.\"\"\"\n\n def _identity_check(x):\n if x.gate.num_qubits() != 1:\n raise ValueError(\"Multi-Qubit identity gate not supported.\"\n \"Given: {}. To work around this, use \"\n \"cirq.I.on_each instead.\".format(str(x)))\n return True\n\n # Here `args` is used for two reasons. 1. GateOpSerializer doesn't work well\n # with empty arg lists. 2. It is a nice way to check identity gate size.\n args = [\n cirq.google.SerializingArg(serialized_name=\"unused\",\n serialized_type=bool,\n op_getter=_identity_check),\n cirq.google.SerializingArg(serialized_name=\"control_qubits\",\n serialized_type=str,\n op_getter=lambda x: _serialize_controls(x)),\n cirq.google.SerializingArg(\n serialized_name=\"control_values\",\n serialized_type=str,\n op_getter=lambda x: _serialize_control_vals(x))\n ]\n return cirq.google.GateOpSerializer(gate_type=cirq.IdentityGate,\n serialized_gate_id=\"I\",\n args=args,\n can_serialize_predicate=_CONSTANT_TRUE)\n\n\ndef _identity_gate_deserializer():\n \"\"\"Make a standard deserializer for the single qubit identity.\"\"\"\n args = [\n cirq.google.DeserializingArg(serialized_name=\"unused\",\n constructor_arg_name=\"unused\"),\n cirq.google.DeserializingArg(serialized_name=\"control_qubits\",\n constructor_arg_name=\"control_qubits\"),\n cirq.google.DeserializingArg(serialized_name=\"control_values\",\n constructor_arg_name=\"control_values\")\n ]\n\n def _cirq_i_workaround(unused, control_qubits, control_values):\n return _optional_control_promote(cirq.I, control_qubits, control_values)\n\n return cirq.google.GateOpDeserializer(serialized_gate_id=\"I\",\n gate_constructor=_cirq_i_workaround,\n args=args)\n\n\ndef _phased_eigen_gate_serializer(gate_type, serialized_id):\n \"\"\"Make a standard serializer for phased eigen gates.\"\"\"\n\n args = [\n cirq.google.SerializingArg(\n serialized_name=\"phase_exponent\",\n serialized_type=float,\n op_getter=lambda x: _symbol_extractor(x.gate.phase_exponent)),\n cirq.google.SerializingArg(\n serialized_name=\"phase_exponent_scalar\",\n serialized_type=float,\n op_getter=lambda x: _scalar_extractor(x.gate.phase_exponent)),\n cirq.google.SerializingArg(\n serialized_name=\"exponent\",\n serialized_type=float,\n op_getter=lambda x: _symbol_extractor(x.gate.exponent)),\n cirq.google.SerializingArg(\n serialized_name=\"exponent_scalar\",\n serialized_type=float,\n op_getter=lambda x: _scalar_extractor(x.gate.exponent)),\n cirq.google.SerializingArg(\n serialized_name=\"global_shift\",\n serialized_type=float,\n op_getter=lambda x: float(x.gate.global_shift)),\n cirq.google.SerializingArg(serialized_name=\"control_qubits\",\n serialized_type=str,\n op_getter=lambda x: _serialize_controls(x)),\n cirq.google.SerializingArg(\n serialized_name=\"control_values\",\n serialized_type=str,\n op_getter=lambda x: _serialize_control_vals(x))\n ]\n return cirq.google.GateOpSerializer(gate_type=gate_type,\n serialized_gate_id=serialized_id,\n args=args,\n can_serialize_predicate=_CONSTANT_TRUE)\n\n\ndef _phased_eigen_gate_deserializer(gate_type, serialized_id):\n \"\"\"Make a standard deserializer for phased eigen gates.\"\"\"\n\n def _scalar_combiner(exponent, global_shift, exponent_scalar,\n phase_exponent, phase_exponent_scalar, control_qubits,\n control_values):\n \"\"\"This is a workaround to support symbol scalar multiplication.\n In the future we should likely get rid of this in favor of proper\n expression parsing once cirq supports it. See cirq.op_serializer\n and cirq's program protobuf for details. This is needed for things\n like cirq.rx('alpha').\n \"\"\"\n exponent = _round(exponent)\n phase_exponent = _round(phase_exponent)\n exponent = exponent if exponent_scalar == 1.0 \\\n else exponent * _round(exponent_scalar)\n phase_exponent = phase_exponent if phase_exponent_scalar == 1.0 \\\n else phase_exponent * _round(phase_exponent_scalar)\n if global_shift != 0:\n # needed in case this specific phasedeigengate doesn't\n # have a global_phase in constructor.\n return _optional_control_promote(\n gate_type(exponent=exponent,\n global_shift=_round(global_shift),\n phase_exponent=phase_exponent), control_qubits,\n control_values)\n return _optional_control_promote(\n gate_type(exponent=exponent, phase_exponent=phase_exponent),\n control_qubits, control_values)\n\n args = [\n cirq.google.DeserializingArg(serialized_name=\"phase_exponent\",\n constructor_arg_name=\"phase_exponent\"),\n cirq.google.DeserializingArg(\n serialized_name=\"phase_exponent_scalar\",\n constructor_arg_name=\"phase_exponent_scalar\"),\n cirq.google.DeserializingArg(serialized_name=\"exponent\",\n constructor_arg_name=\"exponent\"),\n cirq.google.DeserializingArg(serialized_name=\"exponent_scalar\",\n constructor_arg_name=\"exponent_scalar\"),\n cirq.google.DeserializingArg(serialized_name=\"global_shift\",\n constructor_arg_name=\"global_shift\"),\n cirq.google.DeserializingArg(serialized_name=\"control_qubits\",\n constructor_arg_name=\"control_qubits\"),\n cirq.google.DeserializingArg(serialized_name=\"control_values\",\n constructor_arg_name=\"control_values\")\n ]\n return cirq.google.GateOpDeserializer(serialized_gate_id=serialized_id,\n gate_constructor=_scalar_combiner,\n args=args)\n\n\nEIGEN_GATES_DICT = {\n cirq.XPowGate: \"XP\",\n cirq.XXPowGate: \"XXP\",\n cirq.YPowGate: \"YP\",\n cirq.YYPowGate: \"YYP\",\n cirq.ZPowGate: \"ZP\",\n cirq.ZZPowGate: \"ZZP\",\n cirq.HPowGate: \"HP\",\n cirq.CZPowGate: \"CZP\",\n cirq.CNotPowGate: \"CNP\",\n cirq.SwapPowGate: \"SP\",\n cirq.ISwapPowGate: \"ISP\",\n}\n\nPHASED_EIGEN_GATES_DICT = {\n cirq.PhasedXPowGate: \"PXP\",\n cirq.PhasedISwapPowGate: \"PISP\",\n}\n\nSERIALIZERS = [\n _eigen_gate_serializer(g, g_name) for g, g_name in EIGEN_GATES_DICT.items()\n] + [\n _fsim_gate_serializer(),\n] + [\n _identity_gate_serializer(),\n] + [\n _phased_eigen_gate_serializer(g, g_name)\n for g, g_name in PHASED_EIGEN_GATES_DICT.items()\n] + [_depolarize_channel_serializer()]\n\nDESERIALIZERS = [\n _eigen_gate_deserializer(g, g_name)\n for g, g_name in EIGEN_GATES_DICT.items()\n] + [\n _fsim_gate_deserializer(),\n] + [\n _identity_gate_deserializer(),\n] + [\n _phased_eigen_gate_deserializer(g, g_name)\n for g, g_name in PHASED_EIGEN_GATES_DICT.items()\n] + [_depolarize_channel_deserializer()]\n\nSERIALIZER = cirq.google.SerializableGateSet(gate_set_name=\"tfq_gate_set\",\n serializers=SERIALIZERS,\n deserializers=DESERIALIZERS)\n\n\ndef serialize_circuit(circuit_inp):\n \"\"\"Returns a `cirq.Program` proto representing the `cirq.Circuit`.\n\n Note that the circuit must use gates valid in the tfq_gate_set.\n Currently we only support scalar multiplication of symbols and\n no other more complex arithmetic expressions. This means\n we can support things like X**(3*alpha), and Rx(alpha). Because\n we use the `cirq.Program` proto, we only support `cirq.GridQubit` instances\n during serialization of circuits.\n\n Note: once serialized terminal measurements are removed.\n\n Args:\n circuit_inp: A `cirq.Circuit`.\n\n Returns:\n A `cirq.google.api.v2.Program` proto.\n \"\"\"\n circuit = copy.deepcopy(circuit_inp)\n if not isinstance(circuit, cirq.Circuit):\n raise TypeError(\"serialize requires cirq.Circuit objects.\"\n \" Given: \" + str(type(circuit)))\n\n # This code is intentionally written to avoid using cirq functions\n # as this get analyzed by tensorflow-autograph.\n\n # Gives a map from moment index to measure qubits in moment\n measured_moments = dict()\n\n # Tracks qubits that have been measured already.\n all_measured_qubits = set()\n for i, moment in enumerate(circuit.moments):\n measured_qubits = set()\n for op in moment:\n for qubit in op.qubits:\n if not isinstance(qubit, cirq.GridQubit):\n raise ValueError(\n \"Attempted to serialize circuit that don't use \"\n \"only cirq.GridQubits.\")\n\n if isinstance(op.gate, cirq.MeasurementGate):\n for qubit in op.qubits:\n if qubit in all_measured_qubits:\n raise ValueError(\"Serialization of circuit failed. \"\n \"Circuits with non-terminal \"\n \"measurement operations are not \"\n \"supported.\")\n measured_qubits.add(qubit)\n all_measured_qubits.add(qubit)\n\n if len(measured_qubits) > 0:\n measured_moments[i] = measured_qubits\n\n # Remove terminal measurements.\n for moment_ind in measured_moments:\n old_moment = circuit[moment_ind]\n measured_qubits = measured_moments[moment_ind]\n new_moment = cirq.Moment(\n filter(lambda x: not any(y in measured_qubits for y in x.qubits),\n old_moment.operations))\n circuit[moment_ind] = new_moment\n\n # Demote cirq.controlled_operations (controlled gates) to their sub_gate\n # types with _tfq_control_qubits and _tfq_control_values fields so that\n # the gates can still get picked up by the serializer. There would be no way\n # to discern controlledgates from one another otherwise. This\n # \"momentary demotion\" occurs with the help of the DelayedAssignmentGate.\n for i, moment in enumerate(circuit):\n controlled_ops = [\n op for op in moment if isinstance(op, cirq.ControlledOperation)\n ]\n new_ops = dict()\n for op in controlled_ops:\n tfq_compatible = op.sub_operation\n tfq_compatible._tfq_control_qubits = op.controls\n tfq_compatible._tfq_control_values = op.control_values\n new_ops[op.qubits] = tfq_compatible\n\n circuit[i] = cirq.Moment(\n new_ops[op.qubits] if op.qubits in new_ops else op for op in moment)\n\n return SERIALIZER.serialize(circuit)\n\n\ndef deserialize_circuit(proto):\n \"\"\"Constructs a `cirq.Circuit` from a `cirq.Program` proto.\n\n Note that the proto must use gates valid in the tfq_gate_set.\n\n Args:\n proto: A `cirq.google.api.v2.Program` proto\n\n Returns:\n A `cirq.Circuit`.\n \"\"\"\n if not isinstance(proto, cirq.google.api.v2.program_pb2.Program):\n raise TypeError(\"deserialize requires \"\n \"cirq.google.api.v2.program_pb2.Program object.\"\n \" Given: \" + str(type(proto)))\n\n return SERIALIZER.deserialize(proto)\n\n\ndef serialize_paulisum(paulisum):\n \"\"\"Constructs a pauli_sum proto from `cirq.PauliSum` or `cirq.PauliString`.\n\n Args:\n paulisum: A `cirq.PauliSum` object.\n\n Returns:\n A pauli_sum proto object.\n \"\"\"\n if isinstance(paulisum, cirq.PauliString):\n paulisum = cirq.PauliSum.from_pauli_strings(paulisum)\n\n if not isinstance(paulisum, cirq.PauliSum):\n raise TypeError(\"serialize requires a cirq.PauliSum object.\"\n \" Given: \" + str(type(paulisum)))\n\n if any(not isinstance(qubit, cirq.GridQubit) for qubit in paulisum.qubits):\n raise ValueError(\"Attempted to serialize a paulisum that doesn't use \"\n \"only cirq.GridQubits.\")\n\n paulisum_proto = pauli_sum_pb2.PauliSum()\n for term in paulisum:\n pauliterm_proto = pauli_sum_pb2.PauliTerm()\n\n pauliterm_proto.coefficient_real = term.coefficient.real\n pauliterm_proto.coefficient_imag = term.coefficient.imag\n for t in sorted(term.items()): # sort to keep qubits ordered.\n pauliterm_proto.paulis.add(\n qubit_id=v2.qubit_to_proto_id(t[0]),\n pauli_type=str(t[1]),\n )\n paulisum_proto.terms.extend([pauliterm_proto])\n\n return paulisum_proto\n\n\ndef deserialize_paulisum(proto):\n \"\"\"Constructs a `cirq.PauliSum` from pauli_sum proto.\n\n Args:\n proto: A pauli_sum proto object.\n\n Returns:\n A `cirq.PauliSum` object.\n \"\"\"\n if not isinstance(proto, pauli_sum_pb2.PauliSum):\n raise TypeError(\"deserialize requires a pauli_sum_pb2 object.\"\n \" Given: \" + str(type(proto)))\n\n res = cirq.PauliSum()\n for term_proto in proto.terms:\n coef = float(_round(term_proto.coefficient_real)) + \\\n 1.0j * float(_round(term_proto.coefficient_imag))\n term = coef * cirq.PauliString()\n for pauli_qubit_pair in term_proto.paulis:\n op = _process_pauli_type(pauli_qubit_pair.pauli_type)\n term *= op(v2.grid_qubit_from_proto_id(pauli_qubit_pair.qubit_id))\n res += term\n\n return res\n\n\ndef _process_pauli_type(char):\n if char == 'Z':\n return cirq.Z\n if char == 'X':\n return cirq.X\n if char == 'Y':\n return cirq.Y\n raise ValueError(\"Invalid pauli type.\")\n" ]
[ [ "numpy.round" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alazarchuk/scipy
[ "7124fc982ea9b0ea961c65db550c0703abcb9bfd" ]
[ "scipy/signal/tests/test_windows.py" ]
[ "import pickle\n\nimport numpy as np\nfrom numpy import array\nfrom numpy.testing import (assert_array_almost_equal, assert_array_equal,\n assert_allclose,\n assert_equal, assert_, assert_array_less,\n suppress_warnings)\nfrom pytest import raises as assert_raises\n\nfrom scipy.fft import fft\nfrom scipy.signal import windows, get_window, resample, hann as dep_hann\n\n\nwindow_funcs = [\n ('boxcar', ()),\n ('triang', ()),\n ('parzen', ()),\n ('bohman', ()),\n ('blackman', ()),\n ('nuttall', ()),\n ('blackmanharris', ()),\n ('flattop', ()),\n ('bartlett', ()),\n ('hanning', ()),\n ('barthann', ()),\n ('hamming', ()),\n ('kaiser', (1,)),\n ('dpss', (2,)),\n ('gaussian', (0.5,)),\n ('general_gaussian', (1.5, 2)),\n ('chebwin', (1,)),\n ('cosine', ()),\n ('hann', ()),\n ('exponential', ()),\n ('tukey', (0.5,)),\n ]\n\n\nclass TestBartHann(object):\n\n def test_basic(self):\n assert_allclose(windows.barthann(6, sym=True),\n [0, 0.35857354213752, 0.8794264578624801,\n 0.8794264578624801, 0.3585735421375199, 0])\n assert_allclose(windows.barthann(7),\n [0, 0.27, 0.73, 1.0, 0.73, 0.27, 0])\n assert_allclose(windows.barthann(6, False),\n [0, 0.27, 0.73, 1.0, 0.73, 0.27])\n\n\nclass TestBartlett(object):\n\n def test_basic(self):\n assert_allclose(windows.bartlett(6), [0, 0.4, 0.8, 0.8, 0.4, 0])\n assert_allclose(windows.bartlett(7), [0, 1/3, 2/3, 1.0, 2/3, 1/3, 0])\n assert_allclose(windows.bartlett(6, False),\n [0, 1/3, 2/3, 1.0, 2/3, 1/3])\n\n\nclass TestBlackman(object):\n\n def test_basic(self):\n assert_allclose(windows.blackman(6, sym=False),\n [0, 0.13, 0.63, 1.0, 0.63, 0.13], atol=1e-14)\n assert_allclose(windows.blackman(7, sym=False),\n [0, 0.09045342435412804, 0.4591829575459636,\n 0.9203636180999081, 0.9203636180999081,\n 0.4591829575459636, 0.09045342435412804], atol=1e-8)\n assert_allclose(windows.blackman(6),\n [0, 0.2007701432625305, 0.8492298567374694,\n 0.8492298567374694, 0.2007701432625305, 0],\n atol=1e-14)\n assert_allclose(windows.blackman(7, True),\n [0, 0.13, 0.63, 1.0, 0.63, 0.13, 0], atol=1e-14)\n\n\nclass TestBlackmanHarris(object):\n\n def test_basic(self):\n assert_allclose(windows.blackmanharris(6, False),\n [6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645])\n assert_allclose(windows.blackmanharris(7, sym=False),\n [6.0e-05, 0.03339172347815117, 0.332833504298565,\n 0.8893697722232837, 0.8893697722232838,\n 0.3328335042985652, 0.03339172347815122])\n assert_allclose(windows.blackmanharris(6),\n [6.0e-05, 0.1030114893456638, 0.7938335106543362,\n 0.7938335106543364, 0.1030114893456638, 6.0e-05])\n assert_allclose(windows.blackmanharris(7, sym=True),\n [6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645,\n 6.0e-05])\n\n\nclass TestBohman(object):\n\n def test_basic(self):\n assert_allclose(windows.bohman(6),\n [0, 0.1791238937062839, 0.8343114522576858,\n 0.8343114522576858, 0.1791238937062838, 0])\n assert_allclose(windows.bohman(7, sym=True),\n [0, 0.1089977810442293, 0.6089977810442293, 1.0,\n 0.6089977810442295, 0.1089977810442293, 0])\n assert_allclose(windows.bohman(6, False),\n [0, 0.1089977810442293, 0.6089977810442293, 1.0,\n 0.6089977810442295, 0.1089977810442293])\n\n\nclass TestBoxcar(object):\n\n def test_basic(self):\n assert_allclose(windows.boxcar(6), [1, 1, 1, 1, 1, 1])\n assert_allclose(windows.boxcar(7), [1, 1, 1, 1, 1, 1, 1])\n assert_allclose(windows.boxcar(6, False), [1, 1, 1, 1, 1, 1])\n\n\ncheb_odd_true = array([0.200938, 0.107729, 0.134941, 0.165348,\n 0.198891, 0.235450, 0.274846, 0.316836,\n 0.361119, 0.407338, 0.455079, 0.503883,\n 0.553248, 0.602637, 0.651489, 0.699227,\n 0.745266, 0.789028, 0.829947, 0.867485,\n 0.901138, 0.930448, 0.955010, 0.974482,\n 0.988591, 0.997138, 1.000000, 0.997138,\n 0.988591, 0.974482, 0.955010, 0.930448,\n 0.901138, 0.867485, 0.829947, 0.789028,\n 0.745266, 0.699227, 0.651489, 0.602637,\n 0.553248, 0.503883, 0.455079, 0.407338,\n 0.361119, 0.316836, 0.274846, 0.235450,\n 0.198891, 0.165348, 0.134941, 0.107729,\n 0.200938])\n\ncheb_even_true = array([0.203894, 0.107279, 0.133904,\n 0.163608, 0.196338, 0.231986,\n 0.270385, 0.311313, 0.354493,\n 0.399594, 0.446233, 0.493983,\n 0.542378, 0.590916, 0.639071,\n 0.686302, 0.732055, 0.775783,\n 0.816944, 0.855021, 0.889525,\n 0.920006, 0.946060, 0.967339,\n 0.983557, 0.994494, 1.000000,\n 1.000000, 0.994494, 0.983557,\n 0.967339, 0.946060, 0.920006,\n 0.889525, 0.855021, 0.816944,\n 0.775783, 0.732055, 0.686302,\n 0.639071, 0.590916, 0.542378,\n 0.493983, 0.446233, 0.399594,\n 0.354493, 0.311313, 0.270385,\n 0.231986, 0.196338, 0.163608,\n 0.133904, 0.107279, 0.203894])\n\n\nclass TestChebWin(object):\n\n def test_basic(self):\n with suppress_warnings() as sup:\n sup.filter(UserWarning, \"This window is not suitable\")\n assert_allclose(windows.chebwin(6, 100),\n [0.1046401879356917, 0.5075781475823447, 1.0, 1.0,\n 0.5075781475823447, 0.1046401879356917])\n assert_allclose(windows.chebwin(7, 100),\n [0.05650405062850233, 0.316608530648474,\n 0.7601208123539079, 1.0, 0.7601208123539079,\n 0.316608530648474, 0.05650405062850233])\n assert_allclose(windows.chebwin(6, 10),\n [1.0, 0.6071201674458373, 0.6808391469897297,\n 0.6808391469897297, 0.6071201674458373, 1.0])\n assert_allclose(windows.chebwin(7, 10),\n [1.0, 0.5190521247588651, 0.5864059018130382,\n 0.6101519801307441, 0.5864059018130382,\n 0.5190521247588651, 1.0])\n assert_allclose(windows.chebwin(6, 10, False),\n [1.0, 0.5190521247588651, 0.5864059018130382,\n 0.6101519801307441, 0.5864059018130382,\n 0.5190521247588651])\n\n def test_cheb_odd_high_attenuation(self):\n with suppress_warnings() as sup:\n sup.filter(UserWarning, \"This window is not suitable\")\n cheb_odd = windows.chebwin(53, at=-40)\n assert_array_almost_equal(cheb_odd, cheb_odd_true, decimal=4)\n\n def test_cheb_even_high_attenuation(self):\n with suppress_warnings() as sup:\n sup.filter(UserWarning, \"This window is not suitable\")\n cheb_even = windows.chebwin(54, at=40)\n assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4)\n\n def test_cheb_odd_low_attenuation(self):\n cheb_odd_low_at_true = array([1.000000, 0.519052, 0.586405,\n 0.610151, 0.586405, 0.519052,\n 1.000000])\n with suppress_warnings() as sup:\n sup.filter(UserWarning, \"This window is not suitable\")\n cheb_odd = windows.chebwin(7, at=10)\n assert_array_almost_equal(cheb_odd, cheb_odd_low_at_true, decimal=4)\n\n def test_cheb_even_low_attenuation(self):\n cheb_even_low_at_true = array([1.000000, 0.451924, 0.51027,\n 0.541338, 0.541338, 0.51027,\n 0.451924, 1.000000])\n with suppress_warnings() as sup:\n sup.filter(UserWarning, \"This window is not suitable\")\n cheb_even = windows.chebwin(8, at=-10)\n assert_array_almost_equal(cheb_even, cheb_even_low_at_true, decimal=4)\n\n\nexponential_data = {\n (4, None, 0.2, False):\n array([4.53999297624848542e-05,\n 6.73794699908546700e-03, 1.00000000000000000e+00,\n 6.73794699908546700e-03]),\n (4, None, 0.2, True): array([0.00055308437014783, 0.0820849986238988,\n 0.0820849986238988, 0.00055308437014783]),\n (4, None, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,\n 0.36787944117144233]),\n (4, None, 1.0, True): array([0.22313016014842982, 0.60653065971263342,\n 0.60653065971263342, 0.22313016014842982]),\n (4, 2, 0.2, False):\n array([4.53999297624848542e-05, 6.73794699908546700e-03,\n 1.00000000000000000e+00, 6.73794699908546700e-03]),\n (4, 2, 0.2, True): None,\n (4, 2, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1.,\n 0.36787944117144233]),\n (4, 2, 1.0, True): None,\n (5, None, 0.2, True):\n array([4.53999297624848542e-05,\n 6.73794699908546700e-03, 1.00000000000000000e+00,\n 6.73794699908546700e-03, 4.53999297624848542e-05]),\n (5, None, 1.0, True): array([0.1353352832366127, 0.36787944117144233, 1.,\n 0.36787944117144233, 0.1353352832366127]),\n (5, 2, 0.2, True): None,\n (5, 2, 1.0, True): None\n}\n\n\ndef test_exponential():\n for k, v in exponential_data.items():\n if v is None:\n assert_raises(ValueError, windows.exponential, *k)\n else:\n win = windows.exponential(*k)\n assert_allclose(win, v, rtol=1e-14)\n\n\nclass TestFlatTop(object):\n\n def test_basic(self):\n assert_allclose(windows.flattop(6, sym=False),\n [-0.000421051, -0.051263156, 0.19821053, 1.0,\n 0.19821053, -0.051263156])\n assert_allclose(windows.flattop(7, sym=False),\n [-0.000421051, -0.03684078115492348,\n 0.01070371671615342, 0.7808739149387698,\n 0.7808739149387698, 0.01070371671615342,\n -0.03684078115492348])\n assert_allclose(windows.flattop(6),\n [-0.000421051, -0.0677142520762119, 0.6068721525762117,\n 0.6068721525762117, -0.0677142520762119,\n -0.000421051])\n assert_allclose(windows.flattop(7, True),\n [-0.000421051, -0.051263156, 0.19821053, 1.0,\n 0.19821053, -0.051263156, -0.000421051])\n\n\nclass TestGaussian(object):\n\n def test_basic(self):\n assert_allclose(windows.gaussian(6, 1.0),\n [0.04393693362340742, 0.3246524673583497,\n 0.8824969025845955, 0.8824969025845955,\n 0.3246524673583497, 0.04393693362340742])\n assert_allclose(windows.gaussian(7, 1.2),\n [0.04393693362340742, 0.2493522087772962,\n 0.7066482778577162, 1.0, 0.7066482778577162,\n 0.2493522087772962, 0.04393693362340742])\n assert_allclose(windows.gaussian(7, 3),\n [0.6065306597126334, 0.8007374029168081,\n 0.9459594689067654, 1.0, 0.9459594689067654,\n 0.8007374029168081, 0.6065306597126334])\n assert_allclose(windows.gaussian(6, 3, False),\n [0.6065306597126334, 0.8007374029168081,\n 0.9459594689067654, 1.0, 0.9459594689067654,\n 0.8007374029168081])\n\n\nclass TestGeneralCosine(object):\n\n def test_basic(self):\n assert_allclose(windows.general_cosine(5, [0.5, 0.3, 0.2]),\n [0.4, 0.3, 1, 0.3, 0.4])\n assert_allclose(windows.general_cosine(4, [0.5, 0.3, 0.2], sym=False),\n [0.4, 0.3, 1, 0.3])\n\nclass TestGeneralHamming(object):\n\n def test_basic(self):\n assert_allclose(windows.general_hamming(5, 0.7),\n [0.4, 0.7, 1.0, 0.7, 0.4])\n assert_allclose(windows.general_hamming(5, 0.75, sym=False),\n [0.5, 0.6727457514, 0.9522542486,\n 0.9522542486, 0.6727457514])\n assert_allclose(windows.general_hamming(6, 0.75, sym=True),\n [0.5, 0.6727457514, 0.9522542486,\n 0.9522542486, 0.6727457514, 0.5])\n\n\nclass TestHamming(object):\n\n def test_basic(self):\n assert_allclose(windows.hamming(6, False),\n [0.08, 0.31, 0.77, 1.0, 0.77, 0.31])\n assert_allclose(windows.hamming(7, sym=False),\n [0.08, 0.2531946911449826, 0.6423596296199047,\n 0.9544456792351128, 0.9544456792351128,\n 0.6423596296199047, 0.2531946911449826])\n assert_allclose(windows.hamming(6),\n [0.08, 0.3978521825875242, 0.9121478174124757,\n 0.9121478174124757, 0.3978521825875242, 0.08])\n assert_allclose(windows.hamming(7, sym=True),\n [0.08, 0.31, 0.77, 1.0, 0.77, 0.31, 0.08])\n\n\nclass TestHann(object):\n\n def test_basic(self):\n assert_allclose(windows.hann(6, sym=False),\n [0, 0.25, 0.75, 1.0, 0.75, 0.25])\n assert_allclose(windows.hann(7, sym=False),\n [0, 0.1882550990706332, 0.6112604669781572,\n 0.9504844339512095, 0.9504844339512095,\n 0.6112604669781572, 0.1882550990706332])\n assert_allclose(windows.hann(6, True),\n [0, 0.3454915028125263, 0.9045084971874737,\n 0.9045084971874737, 0.3454915028125263, 0])\n assert_allclose(windows.hann(7),\n [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0])\n\n\nclass TestKaiser(object):\n\n def test_basic(self):\n assert_allclose(windows.kaiser(6, 0.5),\n [0.9403061933191572, 0.9782962393705389,\n 0.9975765035372042, 0.9975765035372042,\n 0.9782962393705389, 0.9403061933191572])\n assert_allclose(windows.kaiser(7, 0.5),\n [0.9403061933191572, 0.9732402256999829,\n 0.9932754654413773, 1.0, 0.9932754654413773,\n 0.9732402256999829, 0.9403061933191572])\n assert_allclose(windows.kaiser(6, 2.7),\n [0.2603047507678832, 0.6648106293528054,\n 0.9582099802511439, 0.9582099802511439,\n 0.6648106293528054, 0.2603047507678832])\n assert_allclose(windows.kaiser(7, 2.7),\n [0.2603047507678832, 0.5985765418119844,\n 0.8868495172060835, 1.0, 0.8868495172060835,\n 0.5985765418119844, 0.2603047507678832])\n assert_allclose(windows.kaiser(6, 2.7, False),\n [0.2603047507678832, 0.5985765418119844,\n 0.8868495172060835, 1.0, 0.8868495172060835,\n 0.5985765418119844])\n\n\nclass TestNuttall(object):\n\n def test_basic(self):\n assert_allclose(windows.nuttall(6, sym=False),\n [0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298,\n 0.0613345])\n assert_allclose(windows.nuttall(7, sym=False),\n [0.0003628, 0.03777576895352025, 0.3427276199688195,\n 0.8918518610776603, 0.8918518610776603,\n 0.3427276199688196, 0.0377757689535203])\n assert_allclose(windows.nuttall(6),\n [0.0003628, 0.1105152530498718, 0.7982580969501282,\n 0.7982580969501283, 0.1105152530498719, 0.0003628])\n assert_allclose(windows.nuttall(7, True),\n [0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298,\n 0.0613345, 0.0003628])\n\n\nclass TestParzen(object):\n\n def test_basic(self):\n assert_allclose(windows.parzen(6),\n [0.009259259259259254, 0.25, 0.8611111111111112,\n 0.8611111111111112, 0.25, 0.009259259259259254])\n assert_allclose(windows.parzen(7, sym=True),\n [0.00583090379008747, 0.1574344023323616,\n 0.6501457725947521, 1.0, 0.6501457725947521,\n 0.1574344023323616, 0.00583090379008747])\n assert_allclose(windows.parzen(6, False),\n [0.00583090379008747, 0.1574344023323616,\n 0.6501457725947521, 1.0, 0.6501457725947521,\n 0.1574344023323616])\n\n\nclass TestTriang(object):\n\n def test_basic(self):\n\n assert_allclose(windows.triang(6, True),\n [1/6, 1/2, 5/6, 5/6, 1/2, 1/6])\n assert_allclose(windows.triang(7),\n [1/4, 1/2, 3/4, 1, 3/4, 1/2, 1/4])\n assert_allclose(windows.triang(6, sym=False),\n [1/4, 1/2, 3/4, 1, 3/4, 1/2])\n\n\ntukey_data = {\n (4, 0.5, True): array([0.0, 1.0, 1.0, 0.0]),\n (4, 0.9, True): array([0.0, 0.84312081893436686,\n 0.84312081893436686, 0.0]),\n (4, 1.0, True): array([0.0, 0.75, 0.75, 0.0]),\n (4, 0.5, False): array([0.0, 1.0, 1.0, 1.0]),\n (4, 0.9, False): array([0.0, 0.58682408883346526,\n 1.0, 0.58682408883346526]),\n (4, 1.0, False): array([0.0, 0.5, 1.0, 0.5]),\n (5, 0.0, True): array([1.0, 1.0, 1.0, 1.0, 1.0]),\n (5, 0.8, True): array([0.0, 0.69134171618254492,\n 1.0, 0.69134171618254492, 0.0]),\n (5, 1.0, True): array([0.0, 0.5, 1.0, 0.5, 0.0]),\n\n (6, 0): [1, 1, 1, 1, 1, 1],\n (7, 0): [1, 1, 1, 1, 1, 1, 1],\n (6, .25): [0, 1, 1, 1, 1, 0],\n (7, .25): [0, 1, 1, 1, 1, 1, 0],\n (6,): [0, 0.9045084971874737, 1.0, 1.0, 0.9045084971874735, 0],\n (7,): [0, 0.75, 1.0, 1.0, 1.0, 0.75, 0],\n (6, .75): [0, 0.5522642316338269, 1.0, 1.0, 0.5522642316338267, 0],\n (7, .75): [0, 0.4131759111665348, 0.9698463103929542, 1.0,\n 0.9698463103929542, 0.4131759111665347, 0],\n (6, 1): [0, 0.3454915028125263, 0.9045084971874737, 0.9045084971874737,\n 0.3454915028125263, 0],\n (7, 1): [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0],\n}\n\n\nclass TestTukey(object):\n\n def test_basic(self):\n # Test against hardcoded data\n for k, v in tukey_data.items():\n if v is None:\n assert_raises(ValueError, windows.tukey, *k)\n else:\n win = windows.tukey(*k)\n assert_allclose(win, v, rtol=1e-14)\n\n def test_extremes(self):\n # Test extremes of alpha correspond to boxcar and hann\n tuk0 = windows.tukey(100, 0)\n box0 = windows.boxcar(100)\n assert_array_almost_equal(tuk0, box0)\n\n tuk1 = windows.tukey(100, 1)\n han1 = windows.hann(100)\n assert_array_almost_equal(tuk1, han1)\n\n\ndpss_data = {\n # All values from MATLAB:\n # * taper[1] of (3, 1.4, 3) sign-flipped\n # * taper[3] of (5, 1.5, 5) sign-flipped\n (4, 0.1, 2): ([[0.497943898, 0.502047681, 0.502047681, 0.497943898], [0.670487993, 0.224601537, -0.224601537, -0.670487993]], [0.197961815, 0.002035474]), # noqa\n (3, 1.4, 3): ([[0.410233151, 0.814504464, 0.410233151], [0.707106781, 0.0, -0.707106781], [0.575941629, -0.580157287, 0.575941629]], [0.999998093, 0.998067480, 0.801934426]), # noqa\n (5, 1.5, 5): ([[0.1745071052, 0.4956749177, 0.669109327, 0.495674917, 0.174507105], [0.4399493348, 0.553574369, 0.0, -0.553574369, -0.439949334], [0.631452756, 0.073280238, -0.437943884, 0.073280238, 0.631452756], [0.553574369, -0.439949334, 0.0, 0.439949334, -0.553574369], [0.266110290, -0.498935248, 0.600414741, -0.498935248, 0.266110290147157]], [0.999728571, 0.983706916, 0.768457889, 0.234159338, 0.013947282907567]), # noqa: E501\n (100, 2, 4): ([[0.0030914414, 0.0041266922, 0.005315076, 0.006665149, 0.008184854, 0.0098814158, 0.011761239, 0.013829809, 0.016091597, 0.018549973, 0.02120712, 0.02406396, 0.027120092, 0.030373728, 0.033821651, 0.037459181, 0.041280145, 0.045276872, 0.049440192, 0.053759447, 0.058222524, 0.062815894, 0.067524661, 0.072332638, 0.077222418, 0.082175473, 0.087172252, 0.092192299, 0.097214376, 0.1022166, 0.10717657, 0.11207154, 0.11687856, 0.12157463, 0.12613686, 0.13054266, 0.13476986, 0.13879691, 0.14260302, 0.14616832, 0.14947401, 0.1525025, 0.15523755, 0.15766438, 0.15976981, 0.16154233, 0.16297223, 0.16405162, 0.16477455, 0.16513702, 0.16513702, 0.16477455, 0.16405162, 0.16297223, 0.16154233, 0.15976981, 0.15766438, 0.15523755, 0.1525025, 0.14947401, 0.14616832, 0.14260302, 0.13879691, 0.13476986, 0.13054266, 0.12613686, 0.12157463, 0.11687856, 0.11207154, 0.10717657, 0.1022166, 0.097214376, 0.092192299, 0.087172252, 0.082175473, 0.077222418, 0.072332638, 0.067524661, 0.062815894, 0.058222524, 0.053759447, 0.049440192, 0.045276872, 0.041280145, 0.037459181, 0.033821651, 0.030373728, 0.027120092, 0.02406396, 0.02120712, 0.018549973, 0.016091597, 0.013829809, 0.011761239, 0.0098814158, 0.008184854, 0.006665149, 0.005315076, 0.0041266922, 0.0030914414], [0.018064449, 0.022040342, 0.026325013, 0.030905288, 0.035764398, 0.040881982, 0.046234148, 0.051793558, 0.057529559, 0.063408356, 0.069393216, 0.075444716, 0.081521022, 0.087578202, 0.093570567, 0.099451049, 0.10517159, 0.11068356, 0.11593818, 0.12088699, 0.12548227, 0.12967752, 0.1334279, 0.13669069, 0.13942569, 0.1415957, 0.14316686, 0.14410905, 0.14439626, 0.14400686, 0.14292389, 0.1411353, 0.13863416, 0.13541876, 0.13149274, 0.12686516, 0.12155045, 0.1155684, 0.10894403, 0.10170748, 0.093893752, 0.08554251, 0.076697768, 0.067407559, 0.057723559, 0.04770068, 0.037396627, 0.026871428, 0.016186944, 0.0054063557, -0.0054063557, -0.016186944, -0.026871428, -0.037396627, -0.04770068, -0.057723559, -0.067407559, -0.076697768, -0.08554251, -0.093893752, -0.10170748, -0.10894403, -0.1155684, -0.12155045, -0.12686516, -0.13149274, -0.13541876, -0.13863416, -0.1411353, -0.14292389, -0.14400686, -0.14439626, -0.14410905, -0.14316686, -0.1415957, -0.13942569, -0.13669069, -0.1334279, -0.12967752, -0.12548227, -0.12088699, -0.11593818, -0.11068356, -0.10517159, -0.099451049, -0.093570567, -0.087578202, -0.081521022, -0.075444716, -0.069393216, -0.063408356, -0.057529559, -0.051793558, -0.046234148, -0.040881982, -0.035764398, -0.030905288, -0.026325013, -0.022040342, -0.018064449], [0.064817553, 0.072567801, 0.080292992, 0.087918235, 0.095367076, 0.10256232, 0.10942687, 0.1158846, 0.12186124, 0.12728523, 0.13208858, 0.13620771, 0.13958427, 0.14216587, 0.14390678, 0.14476863, 0.1447209, 0.14374148, 0.14181704, 0.13894336, 0.13512554, 0.13037812, 0.1247251, 0.11819984, 0.11084487, 0.10271159, 0.093859853, 0.084357497, 0.074279719, 0.063708406, 0.052731374, 0.041441525, 0.029935953, 0.018314987, 0.0066811877, -0.0048616765, -0.016209689, -0.027259848, -0.037911124, -0.048065512, -0.05762905, -0.066512804, -0.0746338, -0.081915903, -0.088290621, -0.09369783, -0.098086416, -0.10141482, -0.10365146, -0.10477512, -0.10477512, -0.10365146, -0.10141482, -0.098086416, -0.09369783, -0.088290621, -0.081915903, -0.0746338, -0.066512804, -0.05762905, -0.048065512, -0.037911124, -0.027259848, -0.016209689, -0.0048616765, 0.0066811877, 0.018314987, 0.029935953, 0.041441525, 0.052731374, 0.063708406, 0.074279719, 0.084357497, 0.093859853, 0.10271159, 0.11084487, 0.11819984, 0.1247251, 0.13037812, 0.13512554, 0.13894336, 0.14181704, 0.14374148, 0.1447209, 0.14476863, 0.14390678, 0.14216587, 0.13958427, 0.13620771, 0.13208858, 0.12728523, 0.12186124, 0.1158846, 0.10942687, 0.10256232, 0.095367076, 0.087918235, 0.080292992, 0.072567801, 0.064817553], [0.14985551, 0.15512305, 0.15931467, 0.16236806, 0.16423291, 0.16487165, 0.16426009, 0.1623879, 0.1592589, 0.15489114, 0.14931693, 0.14258255, 0.13474785, 0.1258857, 0.11608124, 0.10543095, 0.094041635, 0.082029213, 0.069517411, 0.056636348, 0.043521028, 0.030309756, 0.017142511, 0.0041592774, -0.0085016282, -0.020705223, -0.032321494, -0.043226982, -0.053306291, -0.062453515, -0.070573544, -0.077583253, -0.083412547, -0.088005244, -0.091319802, -0.093329861, -0.094024602, -0.093408915, -0.091503383, -0.08834406, -0.08398207, -0.078483012, -0.071926192, -0.064403681, -0.056019215, -0.046886954, -0.037130106, -0.026879442, -0.016271713, -0.005448, 0.005448, 0.016271713, 0.026879442, 0.037130106, 0.046886954, 0.056019215, 0.064403681, 0.071926192, 0.078483012, 0.08398207, 0.08834406, 0.091503383, 0.093408915, 0.094024602, 0.093329861, 0.091319802, 0.088005244, 0.083412547, 0.077583253, 0.070573544, 0.062453515, 0.053306291, 0.043226982, 0.032321494, 0.020705223, 0.0085016282, -0.0041592774, -0.017142511, -0.030309756, -0.043521028, -0.056636348, -0.069517411, -0.082029213, -0.094041635, -0.10543095, -0.11608124, -0.1258857, -0.13474785, -0.14258255, -0.14931693, -0.15489114, -0.1592589, -0.1623879, -0.16426009, -0.16487165, -0.16423291, -0.16236806, -0.15931467, -0.15512305, -0.14985551]], [0.999943140, 0.997571533, 0.959465463, 0.721862496]), # noqa: E501\n}\n\n\nclass TestDPSS(object):\n\n def test_basic(self):\n # Test against hardcoded data\n for k, v in dpss_data.items():\n win, ratios = windows.dpss(*k, return_ratios=True)\n assert_allclose(win, v[0], atol=1e-7, err_msg=k)\n assert_allclose(ratios, v[1], rtol=1e-5, atol=1e-7, err_msg=k)\n\n def test_unity(self):\n # Test unity value handling (gh-2221)\n for M in range(1, 21):\n # corrected w/approximation (default)\n win = windows.dpss(M, M / 2.1)\n expected = M % 2 # one for odd, none for even\n assert_equal(np.isclose(win, 1.).sum(), expected,\n err_msg='%s' % (win,))\n # corrected w/subsample delay (slower)\n win_sub = windows.dpss(M, M / 2.1, norm='subsample')\n if M > 2:\n # @M=2 the subsample doesn't do anything\n assert_equal(np.isclose(win_sub, 1.).sum(), expected,\n err_msg='%s' % (win_sub,))\n assert_allclose(win, win_sub, rtol=0.03) # within 3%\n # not the same, l2-norm\n win_2 = windows.dpss(M, M / 2.1, norm=2)\n expected = 1 if M == 1 else 0\n assert_equal(np.isclose(win_2, 1.).sum(), expected,\n err_msg='%s' % (win_2,))\n\n def test_extremes(self):\n # Test extremes of alpha\n lam = windows.dpss(31, 6, 4, return_ratios=True)[1]\n assert_array_almost_equal(lam, 1.)\n lam = windows.dpss(31, 7, 4, return_ratios=True)[1]\n assert_array_almost_equal(lam, 1.)\n lam = windows.dpss(31, 8, 4, return_ratios=True)[1]\n assert_array_almost_equal(lam, 1.)\n\n def test_degenerate(self):\n # Test failures\n assert_raises(ValueError, windows.dpss, 4, 1.5, -1) # Bad Kmax\n assert_raises(ValueError, windows.dpss, 4, 1.5, -5)\n assert_raises(TypeError, windows.dpss, 4, 1.5, 1.1)\n assert_raises(ValueError, windows.dpss, 3, 1.5, 3) # NW must be < N/2.\n assert_raises(ValueError, windows.dpss, 3, -1, 3) # NW must be pos\n assert_raises(ValueError, windows.dpss, 3, 0, 3)\n assert_raises(ValueError, windows.dpss, -1, 1, 3) # negative M\n\n\nclass TestGetWindow(object):\n\n def test_boxcar(self):\n w = windows.get_window('boxcar', 12)\n assert_array_equal(w, np.ones_like(w))\n\n # window is a tuple of len 1\n w = windows.get_window(('boxcar',), 16)\n assert_array_equal(w, np.ones_like(w))\n\n def test_cheb_odd(self):\n with suppress_warnings() as sup:\n sup.filter(UserWarning, \"This window is not suitable\")\n w = windows.get_window(('chebwin', -40), 53, fftbins=False)\n assert_array_almost_equal(w, cheb_odd_true, decimal=4)\n\n def test_cheb_even(self):\n with suppress_warnings() as sup:\n sup.filter(UserWarning, \"This window is not suitable\")\n w = windows.get_window(('chebwin', 40), 54, fftbins=False)\n assert_array_almost_equal(w, cheb_even_true, decimal=4)\n\n def test_kaiser_float(self):\n win1 = windows.get_window(7.2, 64)\n win2 = windows.kaiser(64, 7.2, False)\n assert_allclose(win1, win2)\n\n def test_invalid_inputs(self):\n # Window is not a float, tuple, or string\n assert_raises(ValueError, windows.get_window, set('hann'), 8)\n\n # Unknown window type error\n assert_raises(ValueError, windows.get_window, 'broken', 4)\n\n def test_array_as_window(self):\n # github issue 3603\n osfactor = 128\n sig = np.arange(128)\n\n win = windows.get_window(('kaiser', 8.0), osfactor // 2)\n with assert_raises(ValueError, match='must have the same length'):\n resample(sig, len(sig) * osfactor, window=win)\n\n\ndef test_windowfunc_basics():\n for window_name, params in window_funcs:\n window = getattr(windows, window_name)\n with suppress_warnings() as sup:\n sup.filter(UserWarning, \"This window is not suitable\")\n if window_name in ('hanning',):\n sup.filter(DeprecationWarning)\n # Check symmetry for odd and even lengths\n w1 = window(8, *params, sym=True)\n w2 = window(7, *params, sym=False)\n assert_array_almost_equal(w1[:-1], w2)\n\n w1 = window(9, *params, sym=True)\n w2 = window(8, *params, sym=False)\n assert_array_almost_equal(w1[:-1], w2)\n\n # Check that functions run and output lengths are correct\n assert_equal(len(window(6, *params, sym=True)), 6)\n assert_equal(len(window(6, *params, sym=False)), 6)\n assert_equal(len(window(7, *params, sym=True)), 7)\n assert_equal(len(window(7, *params, sym=False)), 7)\n\n # Check invalid lengths\n assert_raises(ValueError, window, 5.5, *params)\n assert_raises(ValueError, window, -7, *params)\n\n # Check degenerate cases\n assert_array_equal(window(0, *params, sym=True), [])\n assert_array_equal(window(0, *params, sym=False), [])\n assert_array_equal(window(1, *params, sym=True), [1])\n assert_array_equal(window(1, *params, sym=False), [1])\n\n # Check dtype\n assert_(window(0, *params, sym=True).dtype == 'float')\n assert_(window(0, *params, sym=False).dtype == 'float')\n assert_(window(1, *params, sym=True).dtype == 'float')\n assert_(window(1, *params, sym=False).dtype == 'float')\n assert_(window(6, *params, sym=True).dtype == 'float')\n assert_(window(6, *params, sym=False).dtype == 'float')\n\n # Check normalization\n assert_array_less(window(10, *params, sym=True), 1.01)\n assert_array_less(window(10, *params, sym=False), 1.01)\n assert_array_less(window(9, *params, sym=True), 1.01)\n assert_array_less(window(9, *params, sym=False), 1.01)\n\n # Check that DFT-even spectrum is purely real for odd and even\n assert_allclose(fft(window(10, *params, sym=False)).imag,\n 0, atol=1e-14)\n assert_allclose(fft(window(11, *params, sym=False)).imag,\n 0, atol=1e-14)\n\n\ndef test_needs_params():\n for winstr in ['kaiser', 'ksr', 'gaussian', 'gauss', 'gss',\n 'general gaussian', 'general_gaussian',\n 'general gauss', 'general_gauss', 'ggs',\n 'dss', 'dpss',\n 'chebwin', 'cheb', 'exponential', 'poisson', 'tukey',\n 'tuk', 'dpss']:\n assert_raises(ValueError, get_window, winstr, 7)\n\n\ndef test_deprecation():\n if dep_hann.__doc__ is not None: # can be None with `-OO` mode\n assert_('signal.hann is deprecated' in dep_hann.__doc__)\n assert_('deprecated' not in windows.hann.__doc__)\n\n\ndef test_deprecated_pickleable():\n dep_hann2 = pickle.loads(pickle.dumps(dep_hann))\n assert_(dep_hann2 is dep_hann)\n" ]
[ [ "scipy.signal.windows.general_cosine", "scipy.signal.windows.boxcar", "scipy.signal.windows.bohman", "scipy.signal.windows.flattop", "scipy.signal.windows.parzen", "numpy.ones_like", "numpy.testing.suppress_warnings", "numpy.arange", "scipy.signal.windows.gaussian", "scipy.signal.windows.nuttall", "scipy.signal.windows.get_window", "numpy.testing.assert_array_almost_equal", "numpy.isclose", "scipy.signal.windows.dpss", "scipy.signal.windows.hann", "scipy.signal.windows.exponential", "numpy.testing.assert_", "numpy.testing.assert_allclose", "scipy.signal.windows.triang", "numpy.array", "scipy.signal.windows.blackmanharris", "scipy.signal.windows.kaiser", "scipy.signal.windows.hamming", "scipy.signal.windows.blackman", "scipy.signal.windows.bartlett", "scipy.signal.windows.tukey", "scipy.signal.windows.general_hamming", "scipy.signal.windows.chebwin", "scipy.signal.windows.barthann" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.3", "1.9", "1.5", "1.7", "1.2", "1.8" ], "tensorflow": [] } ]
Liambeguin/git-pandas
[ "e56b817b1d66b8296d1d5e703d5db0e181d25899" ]
[ "gitpandas/project.py" ]
[ "\"\"\"\n.. module:: projectdirectory\n :platform: Unix, Windows\n :synopsis: A module for examining collections of git repositories as a whole\n\n.. moduleauthor:: Will McGinnis <[email protected]>\n\n\n\"\"\"\n\nimport math\nimport sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport requests\nimport warnings\nfrom git import GitCommandError\nfrom gitpandas.repository import Repository\n\ntry:\n from joblib import delayed, Parallel\n\n _has_joblib = True\nexcept ImportError as e:\n _has_joblib = False\n\n__author__ = 'willmcginnis'\n\n\n# Functions for joblib.\ndef _branches_func(r):\n return r.branches()\n\n\ndef _revs_func(repo, branch, limit, skip, num_datapoints):\n return repo.revs(branch=branch, limit=limit, skip=skip, num_datapoints=num_datapoints)\n\n\ndef _tags_func(repo):\n return repo.tags()\n\n\nclass ProjectDirectory(object):\n \"\"\"\n An object that refers to a directory full of git repositories, for bulk analysis. It contains a collection of\n git-pandas repository objects, created by os.walk-ing a directory to file all child .git subdirectories.\n\n :param working_dir: (optional, default=None), the working directory to search for repositories in, None for cwd, or an explicit list of directories containing git repositories\n :param ignore_repos: (optional, default=None), a list of directories to ignore when searching for git repos.\n :param verbose: (default=True), if True, will print out verbose logging to terminal\n :param verbose: optional, verbosity level of output, bool\n :param tmp_dir: optional, a path to clone the repo into if necessary. Will create one if none passed.\n :param cache_backend: optional, an instantiated cache backend from gitpandas.cache\n :return:\n \"\"\"\n def __init__(self, working_dir=None, ignore_repos=None, verbose=True, tmp_dir=None, cache_backend=None):\n if working_dir is None:\n self.repo_dirs = set([x[0].split('.git')[0] for x in os.walk(os.getcwd()) if '.git' in x[0]])\n elif isinstance(working_dir, list):\n self.repo_dirs = working_dir\n else:\n self.repo_dirs = set([x[0].split('.git')[0] for x in os.walk(working_dir) if '.git' in x[0]])\n\n self.repos = [Repository(r, verbose=verbose, tmp_dir=tmp_dir, cache_backend=cache_backend) for r in self.repo_dirs]\n\n if ignore_repos is not None:\n self.repos = [x for x in self.repos if x.repo_name not in ignore_repos]\n\n def _repo_name(self):\n warnings.warn('please use repo_name() now instead of _repo_name()', DeprecationWarning)\n return self.repo_name()\n\n def repo_name(self):\n \"\"\"\n Returns a DataFrame of the repo names present in this project directory\n\n :return: DataFrame\n\n \"\"\"\n\n ds = [[x.repo_name] for x in self.repos]\n df = pd.DataFrame(ds, columns=['repository'])\n return df\n\n def is_bare(self):\n \"\"\"\n Returns a dataframe of repo names and whether or not they are bare.\n\n :return: DataFrame\n \"\"\"\n\n ds = [[x.repo_name, x.is_bare()] for x in self.repos]\n df = pd.DataFrame(ds, columns=['repository', 'is_bare'])\n return df\n\n def has_coverage(self):\n \"\"\"\n Returns a DataFrame of repo names and whether or not they have a .coverage file that can be parsed\n\n :return: DataFrame\n \"\"\"\n\n ds = [[x.repo_name, x.has_coverage()] for x in self.repos]\n df = pd.DataFrame(ds, columns=['repository', 'has_coverage'])\n return df\n\n def coverage(self):\n \"\"\"\n Will return a DataFrame with coverage information (if available) for each repo in the project).\n\n If there is a .coverage file available, this will attempt to form a DataFrame with that information in it, which\n will contain the columns:\n\n * repository\n * filename\n * lines_covered\n * total_lines\n * coverage\n\n If it can't be found or parsed, an empty DataFrame of that form will be returned.\n\n :return: DataFrame\n \"\"\"\n\n df = pd.DataFrame(columns=['filename', 'lines_covered', 'total_lines', 'coverage', 'repository'])\n\n for repo in self.repos:\n try:\n cov = repo.coverage()\n cov['repository'] = repo.repo_name\n df = df.append(cov)\n except GitCommandError:\n print('Warning! Repo: %s seems to not have coverage' % (repo, ))\n\n df.reset_index()\n\n return df\n\n def file_change_rates(self, branch='master', limit=None, coverage=False, days=None, ignore_globs=None, include_globs=None):\n \"\"\"\n This function will return a DataFrame containing some basic aggregations of the file change history data, and\n optionally test coverage data from a coverage_data.py .coverage file. The aim here is to identify files in the\n project which have abnormal edit rates, or the rate of changes without growing the files size. If a file has\n a high change rate and poor test coverage, then it is a great candidate for writing more tests.\n\n :param branch: (optional, default=master) the branch to return commits for\n :param limit: (optional, default=None) a maximum number of commits to return, None for no limit\n :param coverage: (optional, default=False) a bool for whether or not to attempt to join in coverage data.\n :param days: (optional, default=None) number of days to return if limit is None\n :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing\n :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.\n :return: DataFrame\n \"\"\"\n\n columns = ['unique_committers', 'abs_rate_of_change', 'net_rate_of_change', 'net_change', 'abs_change', 'edit_rate', 'repository']\n if coverage:\n columns += ['lines_covered', 'total_lines', 'coverage']\n df = pd.DataFrame(columns=columns)\n\n for repo in self.repos:\n try:\n fcr = repo.file_change_rates(\n branch=branch,\n limit=limit,\n coverage=coverage,\n days=days,\n ignore_globs=ignore_globs,\n include_globs=include_globs\n )\n fcr['repository'] = repo.repo_name\n df = df.append(fcr)\n except GitCommandError:\n print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch))\n\n df.reset_index()\n\n return df\n\n def hours_estimate(self, branch='master', grouping_window=0.5, single_commit_hours=0.5, limit=None, days=None, committer=True, by=None, ignore_globs=None, include_globs=None):\n \"\"\"\n inspired by: https://github.com/kimmobrunfeldt/git-hours/blob/8aaeee237cb9d9028e7a2592a25ad8468b1f45e4/index.js#L114-L143\n\n Iterates through the commit history of repo to estimate the time commitement of each author or committer over\n the course of time indicated by limit/extensions/days/etc.\n\n :param branch: the branch to return commits for\n :param limit: (optional, default=None) a maximum number of commits to return, None for no limit\n :param grouping_window: (optional, default=0.5 hours) the threhold for how close two commits need to be to consider them part of one coding session\n :param single_commit_hours: (optional, default 0.5 hours) the time range to associate with one single commit\n :param days: (optional, default=None) number of days to return, if limit is None\n :param committer: (optional, default=True) whether to use committer vs. author\n :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing\n :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.\n :return: DataFrame\n \"\"\"\n\n if limit is not None:\n limit = int(limit / len(self.repo_dirs))\n\n if committer:\n com = 'committer'\n else:\n com = 'author'\n\n df = pd.DataFrame(columns=[com, 'hours', 'repository'])\n\n for repo in self.repos:\n try:\n ch = repo.hours_estimate(\n branch,\n grouping_window=grouping_window,\n single_commit_hours=single_commit_hours,\n limit=limit,\n days=days,\n committer=committer,\n ignore_globs=ignore_globs,\n include_globs=include_globs\n )\n ch['repository'] = repo.repo_name\n df = df.append(ch)\n except GitCommandError:\n print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch))\n\n df.reset_index()\n\n if by == 'committer' or by == 'author':\n df = df.groupby(com).agg({'hours': sum})\n df = df.reset_index()\n elif by == 'repository':\n df = df.groupby('repository').agg({'hours': sum})\n df = df.reset_index()\n\n return df\n\n def commit_history(self, branch, limit=None, days=None, ignore_globs=None, include_globs=None):\n \"\"\"\n Returns a pandas DataFrame containing all of the commits for a given branch. The results from all repositories\n are appended to each other, resulting in one large data frame of size <limit>. If a limit is provided, it is\n divided by the number of repositories in the project directory to find out how many commits to pull from each\n project. Future implementations will use date ordering across all projects to get the true most recent N commits\n across the project.\n\n Included in that DataFrame will be the columns:\n\n * repository\n * date (index)\n * author\n * committer\n * message\n * lines\n * insertions\n * deletions\n * net\n\n :param branch: the branch to return commits for\n :param limit: (optional, default=None) a maximum number of commits to return, None for no limit\n :param days: (optional, default=None) number of days to return if limit is None\n :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing\n :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.\n :return: DataFrame\n \"\"\"\n\n if limit is not None:\n limit = int(limit / len(self.repo_dirs))\n\n df = pd.DataFrame(columns=['author', 'committer', 'message', 'lines', 'insertions', 'deletions', 'net'])\n\n for repo in self.repos:\n try:\n ch = repo.commit_history(branch, limit=limit, days=days, ignore_globs=ignore_globs, include_globs=include_globs)\n ch['repository'] = repo.repo_name\n df = df.append(ch)\n except GitCommandError:\n print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch))\n\n df.reset_index()\n\n return df\n\n def file_change_history(self, branch='master', limit=None, days=None, ignore_globs=None, include_globs=None):\n \"\"\"\n Returns a DataFrame of all file changes (via the commit history) for the specified branch. This is similar to\n the commit history DataFrame, but is one row per file edit rather than one row per commit (which may encapsulate\n many file changes). Included in the DataFrame will be the columns:\n\n * repository\n * date (index)\n * author\n * committer\n * message\n * filename\n * insertions\n * deletions\n\n :param branch: the branch to return commits for\n :param limit: (optional, default=None) a maximum number of commits to return, None for no limit\n :param days: (optional, default=None) number of days to return if limit is None\n :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing\n :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.\n :return: DataFrame\n \"\"\"\n\n if limit is not None:\n limit = int(limit / len(self.repo_dirs))\n\n df = pd.DataFrame(columns=['repository', 'date', 'author', 'committer', 'message', 'rev', 'filename', 'insertions', 'deletions'])\n\n for repo in self.repos:\n try:\n ch = repo.file_change_history(\n branch,\n limit=limit,\n days=days,\n ignore_globs=ignore_globs,\n include_globs=include_globs\n )\n ch['repository'] = repo.repo_name\n df = df.append(ch)\n except GitCommandError:\n print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch))\n\n df.reset_index()\n\n return df\n\n def blame(self, committer=True, by='repository', ignore_globs=None, include_globs=None):\n \"\"\"\n Returns the blame from the current HEAD of the repositories as a DataFrame. The DataFrame is grouped by committer\n name, so it will be the sum of all contributions to all repositories by each committer. As with the commit history\n method, extensions and ignore_dirs parameters can be passed to exclude certain directories, or focus on certain\n file extensions. The DataFrame will have the columns:\n\n * committer\n * loc\n\n :param committer: (optional, default=True) true if committer should be reported, false if author\n :param by: (optional, default=repository) whether to group by repository or by file\n :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing\n :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.\n :return: DataFrame\n \"\"\"\n\n df = None\n\n for repo in self.repos:\n try:\n if df is None:\n df = repo.blame(committer=committer, by=by, ignore_globs=ignore_globs, include_globs=include_globs)\n else:\n df = df.append(repo.blame(committer=committer, by=by, ignore_globs=ignore_globs, include_globs=include_globs))\n except GitCommandError as err:\n print('Warning! Repo: %s couldnt be blamed' % (repo, ))\n pass\n\n df = df.reset_index(level=1)\n df = df.reset_index(level=1)\n if committer:\n if by == 'repository':\n df = df.groupby('committer').agg({'loc': np.sum})\n elif by == 'file':\n df = df.groupby(['committer', 'file']).agg({'loc': np.sum})\n else:\n if by == 'repository':\n df = df.groupby('author').agg({'loc': np.sum})\n elif by == 'file':\n df = df.groupby(['author', 'file']).agg({'loc': np.sum})\n\n df = df.sort_values(by=['loc'], ascending=False)\n\n return df\n\n def file_detail(self, rev='HEAD', committer=True, ignore_globs=None, include_globs=None):\n \"\"\"\n Returns a table of all current files in the repos, with some high level information about each file (total LOC,\n file owner, extension, most recent edit date, etc.).\n\n :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing\n :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.\n :param committer: (optional, default=True) true if committer should be reported, false if author\n :return:\n \"\"\"\n\n df = None\n\n for repo in self.repos:\n try:\n if df is None:\n df = repo.file_detail(ignore_globs=ignore_globs, include_globs=include_globs, committer=committer, rev=rev)\n df['repository'] = repo.repo_name\n else:\n chunk = repo.file_detail(ignore_globs=ignore_globs, include_globs=include_globs, committer=committer, rev=rev)\n chunk['repository'] = repo.repo_name\n df = df.append(chunk)\n except GitCommandError:\n print('Warning! Repo: %s couldnt be inspected' % (repo, ))\n\n df = df.reset_index(level=1)\n df = df.set_index(['file', 'repository'])\n return df\n\n def branches(self):\n \"\"\"\n Returns a data frame of all branches in origin. The DataFrame will have the columns:\n\n * repository\n * local\n * branch\n\n :returns: DataFrame\n \"\"\"\n\n df = pd.DataFrame(columns=['repository', 'local', 'branch'])\n\n if _has_joblib:\n ds = Parallel(n_jobs=-1, backend='threading', verbose=0)(\n delayed(_branches_func)\n (x) for x in self.repos\n )\n for d in ds:\n df = df.append(d)\n else:\n for repo in self.repos:\n try:\n df = df.append(_branches_func(repo))\n except GitCommandError:\n print('Warning! Repo: %s couldn\\'t be inspected' % (repo, ))\n\n df.reset_index()\n\n return df\n\n def revs(self, branch='master', limit=None, skip=None, num_datapoints=None):\n \"\"\"\n Returns a dataframe of all revision tags and their timestamps for each project. It will have the columns:\n\n * date\n * repository\n * rev\n\n :param branch: (optional, default 'master') the branch to work in\n :param limit: (optional, default None), the maximum number of revisions to return, None for no limit\n :param skip: (optional, default None), the number of revisions to skip. Ex: skip=2 returns every other revision, None for no skipping.\n :param num_datapoints: (optional, default=None) if limit and skip are none, and this isn't, then num_datapoints evenly spaced revs will be used\n\n :return: DataFrame\n \"\"\"\n\n if limit is not None:\n limit = math.floor(float(limit) / len(self.repos))\n\n if num_datapoints is not None:\n num_datapoints = math.floor(float(num_datapoints) / len(self.repos))\n\n df = pd.DataFrame(columns=['repository', 'rev'])\n\n if _has_joblib:\n ds = Parallel(n_jobs=-1, backend='threading', verbose=0)(\n delayed(_revs_func)\n (x, branch, limit, skip, num_datapoints) for x in self.repos\n )\n for d in ds:\n df = df.append(d)\n else:\n for repo in self.repos:\n try:\n revs = repo.revs(branch=branch, limit=limit, skip=skip, num_datapoints=num_datapoints)\n revs['repository'] = repo.repo_name\n df = df.append(revs)\n except GitCommandError:\n print('Warning! Repo: %s couldn\\'t be inspected' % (repo, ))\n\n df.reset_index()\n\n return df\n\n def cumulative_blame(self, branch='master', by='committer', limit=None, skip=None, num_datapoints=None, committer=True, ignore_globs=None, include_globs=None):\n \"\"\"\n Returns a time series of cumulative blame for a collection of projects. The goal is to return a dataframe for a\n collection of projects with the LOC attached to an entity at each point in time. The returned dataframe can be\n returned in 3 forms (switched with the by parameter, default 'committer'):\n\n * committer: one column per committer\n * project: one column per project\n * raw: one column per committed per project\n\n :param branch: (optional, default 'master') the branch to work in\n :param limit: (optional, default None), the maximum number of revisions to return, None for no limit\n :param skip: (optional, default None), the number of revisions to skip. Ex: skip=2 returns every other revision, None for no skipping.\n :param num_datapoints: (optional, default=None) if limit and skip are none, and this isn't, then num_datapoints evenly spaced revs will be used\n :param committer: (optional, default=True) true if committer should be reported, false if author\n :param by: (optional, default='committer') whether to arrange the output by committer or project\n :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing\n :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.\n :return: DataFrame\n\n \"\"\"\n\n blames = []\n for repo in self.repos:\n try:\n blame = repo.cumulative_blame(\n branch=branch,\n limit=limit,\n skip=skip,\n num_datapoints=num_datapoints,\n committer=committer,\n ignore_globs=ignore_globs,\n include_globs=include_globs\n )\n blames.append((repo.repo_name, blame))\n except GitCommandError:\n print('Warning! Repo: %s couldn\\'t be inspected' % (repo, ))\n pass\n\n global_blame = blames[0][1]\n global_blame.columns = [x + '__' + str(blames[0][0]) for x in global_blame.columns.values]\n blames = blames[1:]\n for reponame, blame in blames:\n blame.columns = [x + '__' + reponame for x in blame.columns.values]\n global_blame = pd.merge(global_blame, blame, left_index=True, right_index=True, how='outer')\n\n global_blame.fillna(method='pad', inplace=True)\n global_blame.fillna(0.0, inplace=True)\n\n if by == 'committer':\n committers = [(str(x).split('__')[0].lower().strip(), x) for x in global_blame.columns.values]\n\n if sys.version_info.major == 2:\n committer_mapping = dict([(c, [x[1] for x in committers if x[0] == c]) for c in set([x[0] for x in committers])])\n else:\n committer_mapping = {c: [x[1] for x in committers if x[0] == c] for c in {x[0] for x in committers}}\n\n for committer in committer_mapping.keys():\n global_blame[committer] = 0\n for col in committer_mapping.get(committer, []):\n global_blame[committer] += global_blame[col]\n\n global_blame = global_blame.reindex(columns=list(committer_mapping.keys()))\n elif by == 'project':\n projects = [(str(x).split('__')[1].lower().strip(), x) for x in global_blame.columns.values]\n\n if sys.version_info.major == 2:\n project_mapping = dict([(c, [x[1] for x in projects if x[0] == c]) for c in set([x[0] for x in projects])])\n else:\n project_mapping = {c: [x[1] for x in projects if x[0] == c] for c in {x[0] for x in projects}}\n\n for project in project_mapping.keys():\n global_blame[project] = 0\n for col in project_mapping.get(project, []):\n global_blame[project] += global_blame[col]\n\n global_blame = global_blame.reindex(columns=list(project_mapping.keys()))\n\n global_blame = global_blame[~global_blame.index.duplicated()]\n\n return global_blame\n\n def tags(self):\n \"\"\"\n Returns a data frame of all tags in origin. The DataFrame will have the columns:\n\n * repository\n * tag\n\n :returns: DataFrame\n \"\"\"\n\n df = pd.DataFrame(columns=['repository', 'tag'])\n\n if _has_joblib:\n ds = Parallel(n_jobs=-1, backend='threading', verbose=0)(\n delayed(_tags_func)\n (x) for x in self.repos\n )\n for d in ds:\n df = df.append(d)\n else:\n for repo in self.repos:\n try:\n df = df.append(repo.tags())\n except GitCommandError:\n print('Warning! Repo: %s couldn\\'t be inspected' % (repo, ))\n\n df.reset_index()\n\n return df\n\n def repo_information(self):\n \"\"\"\n Returns a DataFrame with the properties of all repositories in the project directory. The returned DataFrame\n will have the columns:\n\n * local_directory\n * branches\n * bare\n * remotes\n * description\n * references\n * heads\n * submodules\n * tags\n * active_branch\n\n :return: DataFrame\n \"\"\"\n\n data = [[repo.git_dir,\n repo.repo.branches,\n repo.repo.bare,\n repo.repo.remotes,\n repo.repo.description,\n repo.repo.references,\n repo.repo.heads,\n repo.repo.submodules,\n repo.repo.tags,\n repo.repo.active_branch] for repo in self.repos]\n\n df = pd.DataFrame(data, columns=[\n 'local_directory',\n 'branches',\n 'bare',\n 'remotes',\n 'description',\n 'references',\n 'heads',\n 'submodules',\n 'tags',\n 'active_branch'\n ])\n\n return df\n\n def bus_factor(self, ignore_globs=None, include_globs=None, by='projectd'):\n \"\"\"\n An experimental heuristic for truck factor of a repository calculated by the current distribution of blame in\n the repository's primary branch. The factor is the fewest number of contributors whose contributions make up at\n least 50% of the codebase's LOC\n\n :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing\n :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.\n\n :return:\n \"\"\"\n\n if by == 'file':\n raise NotImplementedError('File-wise bus factor')\n elif by == 'projectd':\n blame = self.blame(ignore_globs=ignore_globs, include_globs=include_globs, by='repository')\n blame = blame.sort_values(by=['loc'], ascending=False)\n\n total = blame['loc'].sum()\n cumulative = 0\n tc = 0\n for idx in range(blame.shape[0]):\n cumulative += blame.ix[idx, 'loc']\n tc += 1\n if cumulative >= total / 2:\n break\n\n return pd.DataFrame([['projectd', tc]], columns=['projectd', 'bus factor'])\n elif by == 'repository':\n df = pd.DataFrame(columns=['repository', 'bus factor'])\n for repo in self.repos:\n try:\n df = df.append(repo.bus_factor(ignore_globs=include_globs, include_globs=include_globs, by=by))\n except GitCommandError:\n print('Warning! Repo: %s couldn\\'t be inspected' % (repo, ))\n\n df.reset_index()\n return df\n\n def punchcard(self, branch='master', limit=None, days=None, by=None, normalize=None, ignore_globs=None, include_globs=None):\n \"\"\"\n Returns a pandas DataFrame containing all of the data for a punchcard.\n\n * day_of_week\n * hour_of_day\n * author / committer\n * lines\n * insertions\n * deletions\n * net\n\n :param branch: the branch to return commits for\n :param limit: (optional, default=None) a maximum number of commits to return, None for no limit\n :param days: (optional, default=None) number of days to return, if limit is None\n :param by: (optional, default=None) agg by options, None for no aggregation (just a high level punchcard), or 'committer', 'author', 'repository'\n :param normalize: (optional, default=None) if an integer, returns the data normalized to max value of that (for plotting)\n :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing\n :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.\n :return: DataFrame\n \"\"\"\n\n df = pd.DataFrame()\n\n if by == 'repository':\n repo_by = None\n else:\n repo_by = by\n\n for repo in self.repos:\n try:\n chunk = repo.punchcard(\n branch=branch,\n limit=limit,\n days=days,\n by=repo_by,\n normalize=None,\n ignore_globs=ignore_globs,\n include_globs=include_globs\n )\n chunk['repository'] = repo.repo_name\n df = df.append(chunk)\n except GitCommandError:\n print('Warning! Repo: %s couldn\\'t be inspected' % (repo, ))\n\n df.reset_index()\n\n aggs = ['hour_of_day', 'day_of_week']\n if by is not None:\n aggs.append(by)\n\n punch_card = df.groupby(aggs).agg({\n 'lines': np.sum,\n 'insertions': np.sum,\n 'deletions': np.sum,\n 'net': np.sum\n })\n punch_card.reset_index(inplace=True)\n\n # normalize all cols\n if normalize is not None:\n for col in ['lines', 'insertions', 'deletions', 'net']:\n punch_card[col] = (punch_card[col] / punch_card[col].sum()) * normalize\n\n return punch_card\n\n def __del__(self):\n \"\"\"\n\n :return:\n \"\"\"\n\n for repo in self.repos:\n repo.__del__()\n\n\nclass GitHubProfile(ProjectDirectory):\n \"\"\"\n An extension of the ProjectDirectory object that is based off of a single github.com user's public profile.\n \"\"\"\n def __init__(self, username, ignore_forks=False, ignore_repos=None, verbose=False):\n \"\"\"\n\n :param username:\n :return:\n \"\"\"\n\n # pull the git urls from github's api\n uri = 'https://api.github.com/users/%s/repos' % username\n data = requests.get(uri)\n repos = []\n for chunk in data.json():\n # if we are skipping forks\n if ignore_forks:\n if not chunk['fork']:\n repos.append(chunk['git_url'])\n else:\n repos.append(chunk['git_url'])\n\n ProjectDirectory.__init__(self, working_dir=repos, ignore_repos=ignore_repos, verbose=verbose)\n" ]
[ [ "pandas.merge", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
Guillemdb/fragile
[ "ea8203508752e76b80495cfe057985268564bf4a" ]
[ "fragile/optimize/env.py" ]
[ "from typing import Callable, Dict, Tuple, Union\n\nimport judo\nfrom judo import Backend, Bounds, tensor, typing\nimport numpy\nfrom scipy.optimize import Bounds as ScipyBounds\nfrom scipy.optimize import minimize\n\nfrom fragile.core.env import Environment\nfrom fragile.core.states import StatesEnv, StatesModel\n\n\nclass Function(Environment):\n \"\"\"\n Environment that represents an arbitrary mathematical function bounded in a \\\n given interval.\n \"\"\"\n\n def __init__(\n self,\n function: Callable[[typing.Tensor], typing.Tensor],\n bounds: Bounds,\n custom_domain_check: Callable[[typing.Tensor, typing.Tensor, int], typing.Tensor] = None,\n actions_as_perturbations: bool = True,\n ):\n \"\"\"\n Initialize a :class:`Function`.\n\n Args:\n function: Callable that takes a batch of vectors (batched across \\\n the first dimension of the array) and returns a vector of \\\n typing.Scalar. This function is applied to a batch of walker \\\n observations.\n bounds: :class:`Bounds` that defines the domain of the function.\n custom_domain_check: Callable that checks points inside the bounds \\\n to know if they are in a custom domain when it is not just \\\n a set of rectangular bounds. It takes a batch of points as \\\n input and returns an array of booleans. Each ``True`` value \\\n indicates that the corresponding point is **outside** the \\\n ``custom_domain_check``.\n actions_as_perturbations: If ``True`` the actions are interpreted as \\\n perturbations that will be applied to the past states. \\\n If ``False`` the actions are interpreted as the new states to \\\n be evaluated.\n\n \"\"\"\n if not isinstance(bounds, Bounds):\n raise TypeError(\"Bounds needs to be an instance of Bounds, found {}\".format(bounds))\n self.function = function\n self.bounds = bounds\n self.custom_domain_check = custom_domain_check\n self._actions_as_perturbations = actions_as_perturbations\n super(Function, self).__init__(observs_shape=self.shape, states_shape=self.shape)\n\n @property\n def n_dims(self) -> int:\n \"\"\"Return the number of dimensions of the function to be optimized.\"\"\"\n return len(self.bounds)\n\n @property\n def shape(self) -> Tuple[int, ...]:\n \"\"\"Return the shape of the environment.\"\"\"\n return self.bounds.shape\n\n @classmethod\n def from_bounds_params(\n cls,\n function: Callable,\n shape: tuple = None,\n high: Union[int, float, typing.Tensor] = numpy.inf,\n low: Union[int, float, typing.Tensor] = numpy.NINF,\n custom_domain_check: Callable[[typing.Tensor], typing.Tensor] = None,\n ) -> \"Function\":\n \"\"\"\n Initialize a function defining its shape and bounds without using a :class:`Bounds`.\n\n Args:\n function: Callable that takes a batch of vectors (batched across \\\n the first dimension of the array) and returns a vector of \\\n typing.Scalar. This function is applied to a batch of walker \\\n observations.\n shape: Input shape of the solution vector without taking into account \\\n the batch dimension. For example, a two dimensional function \\\n applied to a batch of 5 walkers will have shape=(2,), even though\n the observations will have shape (5, 2)\n high: Upper bound of the function domain. If it's an typing.Scalar it will \\\n be the same for all dimensions. If its a numpy array it will \\\n be the upper bound for each dimension.\n low: Lower bound of the function domain. If it's an typing.Scalar it will \\\n be the same for all dimensions. If its a numpy array it will \\\n be the lower bound for each dimension.\n custom_domain_check: Callable that checks points inside the bounds \\\n to know if they are in a custom domain when it is not just \\\n a set of rectangular bounds.\n\n Returns:\n :class:`Function` with its :class:`Bounds` created from the provided arguments.\n\n \"\"\"\n if (\n not (judo.is_tensor(high) or isinstance(high, (list, tuple)))\n and not (judo.is_tensor(low) or isinstance(low, (list, tuple)))\n and shape is None\n ):\n raise TypeError(\"Need to specify shape or high or low must be a numpy array.\")\n bounds = Bounds(high=high, low=low, shape=shape)\n return Function(function=function, bounds=bounds, custom_domain_check=custom_domain_check)\n\n def __repr__(self):\n text = \"{} with function {}, obs shape {},\".format(\n self.__class__.__name__, self.function.__name__, self.shape,\n )\n return text\n\n def states_to_data(\n self, model_states: StatesModel, env_states: StatesEnv\n ) -> Dict[str, typing.Tensor]:\n \"\"\"\n Extract the data that will be used to make the state transitions.\n\n Args:\n model_states: :class:`StatesModel` representing the data to be used \\\n to act on the environment.\n env_states: :class:`StatesEnv` representing the data to be set in \\\n the environment.\n\n Returns:\n Dictionary containing:\n\n ``{\"observs\": np.array, \"actions\": np.array}``\n\n \"\"\"\n data = {\"observs\": env_states.states, \"actions\": model_states.actions}\n return data\n\n def make_transitions(\n self, observs: typing.Tensor, actions: typing.Tensor\n ) -> Dict[str, typing.Tensor]:\n \"\"\"\n\n Sum the target action to the observations to obtain the new points, and \\\n evaluate the reward and boundary conditions.\n\n Args:\n observs: Batch of points returned in the last step.\n actions: Perturbation that will be applied to ``observs``.\n\n Returns:\n Dictionary containing the information of the new points evaluated.\n\n ``{\"states\": new_points, \"observs\": new_points, \"rewards\": typing.Scalar array, \\\n \"oobs\": boolean array}``\n\n \"\"\"\n new_points = actions + observs if self._actions_as_perturbations else actions\n rewards = self.function(new_points).flatten()\n oobs = self.calculate_oobs(points=new_points, rewards=rewards)\n data = {\"states\": new_points, \"observs\": new_points, \"rewards\": rewards, \"oobs\": oobs}\n return data\n\n def reset(self, batch_size: int = 1, **kwargs) -> StatesEnv:\n \"\"\"\n Reset the :class:`Function` to the start of a new episode and returns \\\n an :class:`StatesEnv` instance describing its internal state.\n\n Args:\n batch_size: Number of walkers that the returned state will have.\n **kwargs: Ignored. This environment resets without using any external data.\n\n Returns:\n :class:`EnvStates` instance describing the state of the :class:`Function`. \\\n The first dimension of the data tensors (number of walkers) will be \\\n equal to batch_size.\n\n \"\"\"\n oobs = judo.zeros(batch_size, dtype=judo.bool)\n new_points = self.sample_bounds(batch_size=batch_size)\n rewards = self.function(new_points).flatten()\n new_states = self.states_from_data(\n states=new_points,\n observs=new_points,\n rewards=rewards,\n oobs=oobs,\n batch_size=batch_size,\n )\n return new_states\n\n def calculate_oobs(self, points: typing.Tensor, rewards: typing.Tensor) -> typing.Tensor:\n \"\"\"\n Determine if a given batch of vectors lie inside the function domain.\n\n Args:\n points: Array of batched vectors that will be checked to lie inside \\\n the :class:`Function` bounds.\n rewards: Array containing the rewards of the current walkers.\n\n Returns:\n Array of booleans of length batch_size (points.shape[0]) that will \\\n be ``True`` if a given point of the batch lies outside the bounds, \\\n and ``False`` otherwise.\n\n \"\"\"\n oobs = judo.logical_not(self.bounds.points_in_bounds(points)).flatten()\n if self.custom_domain_check is not None:\n points_in_bounds = judo.logical_not(oobs)\n oobs[points_in_bounds] = self.custom_domain_check(\n points[points_in_bounds], rewards[points_in_bounds], len(rewards)\n )\n return oobs\n\n def sample_bounds(self, batch_size: int) -> typing.Tensor:\n \"\"\"\n Return a matrix of points sampled uniformly from the :class:`Function` \\\n domain.\n\n Args:\n batch_size: Number of points that will be sampled.\n\n Returns:\n Array containing ``batch_size`` points that lie inside the \\\n :class:`Function` domain, stacked across the first dimension.\n\n \"\"\"\n new_points = judo.zeros(tuple([batch_size]) + self.shape, dtype=judo.float32)\n for i in range(batch_size):\n values = self.random_state.uniform(\n low=judo.astype(self.bounds.low, judo.float),\n high=judo.astype(self.bounds.high, judo.float32),\n )\n values = judo.astype(values, self.bounds.low.dtype)\n new_points[i, :] = values\n\n return new_points\n\n\nclass Minimizer:\n \"\"\"Apply ``scipy.optimize.minimize`` to a :class:`Function`.\"\"\"\n\n def __init__(self, function: Function, bounds=None, *args, **kwargs):\n \"\"\"\n Initialize a :class:`Minimizer`.\n\n Args:\n function: :class:`Function` that will be minimized.\n bounds: :class:`Bounds` defining the domain of the minimization \\\n process. If it is ``None`` the :class:`Function` :class:`Bounds` \\\n will be used.\n *args: Passed to ``scipy.optimize.minimize``.\n **kwargs: Passed to ``scipy.optimize.minimize``.\n\n \"\"\"\n self.env = function\n self.function = function.function\n self.bounds = self.env.bounds if bounds is None else bounds\n self.args = args\n self.kwargs = kwargs\n\n def minimize(self, x: typing.Tensor):\n \"\"\"\n Apply ``scipy.optimize.minimize`` to a single point.\n\n Args:\n x: Array representing a single point of the function to be minimized.\n\n Returns:\n Optimization result object returned by ``scipy.optimize.minimize``.\n\n \"\"\"\n\n def _optimize(_x):\n try:\n _x = _x.reshape((1,) + _x.shape)\n y = self.function(_x)\n except (ZeroDivisionError, RuntimeError):\n y = numpy.inf\n return y\n\n bounds = ScipyBounds(\n ub=judo.to_numpy(self.bounds.high) if self.bounds is not None else None,\n lb=judo.to_numpy(self.bounds.low) if self.bounds is not None else None,\n )\n return minimize(_optimize, x, bounds=bounds, *self.args, **self.kwargs)\n\n def minimize_point(self, x: typing.Tensor) -> Tuple[typing.Tensor, typing.Scalar]:\n \"\"\"\n Minimize the target function passing one starting point.\n\n Args:\n x: Array representing a single point of the function to be minimized.\n\n Returns:\n Tuple containing a numpy array representing the best solution found, \\\n and the numerical value of the function at that point.\n\n \"\"\"\n optim_result = self.minimize(x)\n point = tensor(optim_result[\"x\"])\n reward = tensor(float(optim_result[\"fun\"]))\n return point, reward\n\n def minimize_batch(self, x: typing.Tensor) -> Tuple[typing.Tensor, typing.Tensor]:\n \"\"\"\n Minimize a batch of points.\n\n Args:\n x: Array representing a batch of points to be optimized, stacked \\\n across the first dimension.\n\n Returns:\n Tuple of arrays containing the local optimum found for each point, \\\n and an array with the values assigned to each of the points found.\n\n \"\"\"\n x = judo.to_numpy(judo.copy(x))\n with Backend.use_backend(\"numpy\"):\n result = judo.zeros_like(x)\n rewards = judo.zeros((x.shape[0], 1))\n for i in range(x.shape[0]):\n new_x, reward = self.minimize_point(x[i, :])\n result[i, :] = new_x\n rewards[i, :] = float(reward)\n self.bounds.high = tensor(self.bounds.high)\n self.bounds.low = tensor(self.bounds.low)\n result, rewards = tensor(result), tensor(rewards)\n return result, rewards\n\n\nclass MinimizerWrapper(Function):\n \"\"\"\n Wrapper that applies a local minimization process to the observations \\\n returned by a :class:`Function`.\n \"\"\"\n\n def __init__(self, function: Function, *args, **kwargs):\n \"\"\"\n Initialize a :class:`MinimizerWrapper`.\n\n Args:\n function: :class:`Function` to be minimized after each step.\n *args: Passed to the internal :class:`Optimizer`.\n **kwargs: Passed to the internal :class:`Optimizer`.\n\n \"\"\"\n self.env = function\n self.minimizer = Minimizer(function=self.env, *args, **kwargs)\n\n @property\n def shape(self) -> Tuple[int, ...]:\n \"\"\"Return the shape of the wrapped environment.\"\"\"\n return self.env.shape\n\n @property\n def function(self) -> Callable:\n \"\"\"Return the function of the wrapped environment.\"\"\"\n return self.env.function\n\n @property\n def bounds(self) -> Bounds:\n \"\"\"Return the bounds of the wrapped environment.\"\"\"\n return self.env.bounds\n\n @property\n def custom_domain_check(self) -> Callable:\n \"\"\"Return the custom_domain_check of the wrapped environment.\"\"\"\n return self.env.custom_domain_check\n\n def __getattr__(self, item):\n return self.env.__getattribute__(item)\n\n def __repr__(self):\n return self.env.__repr__()\n\n def step(self, model_states: StatesModel, env_states: StatesEnv) -> StatesEnv:\n \"\"\"\n Perform a local optimization process to the observations returned after \\\n calling ``step`` on the wrapped :class:`Function`.\n\n Args:\n model_states: :class:`StatesModel` corresponding to the :class:`Model` data.\n env_states: :class:`StatesEnv` containing the data where the function \\\n will be evaluated.\n\n Returns:\n States containing the information that describes the new state of \\\n the :class:`Function`.\n\n \"\"\"\n env_states = super(MinimizerWrapper, self).step(\n model_states=model_states, env_states=env_states\n )\n new_points, rewards = self.minimizer.minimize_batch(env_states.observs)\n # new_points, rewards = tensor(new_points), tensor(rewards)\n oobs = self.calculate_oobs(new_points, rewards)\n updated_states = self.states_from_data(\n states=new_points,\n observs=new_points,\n rewards=rewards.flatten(),\n oobs=oobs,\n batch_size=model_states.n,\n )\n return updated_states\n" ]
[ [ "scipy.optimize.minimize" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
avivt/pytorch-a2c-ppo-acktr-gail
[ "ea18861b7d09bfb7c9ca2fda37ef08204969036c" ]
[ "grad_tools/mediangrad.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport pdb\nimport numpy as np\nimport copy\nimport random\n\n\nclass MedianGrad():\n def __init__(self, optimizer, noise_ratio=1.0):\n self._optim = optimizer\n self._noise_ratio = noise_ratio\n return\n\n @property\n def optimizer(self):\n return self._optim\n\n def zero_grad(self):\n '''\n clear the gradient of the parameters\n '''\n\n return self._optim.zero_grad(set_to_none=True)\n\n def step(self):\n '''\n update the parameters with the gradient\n '''\n\n return self._optim.step()\n\n def median_backward(self, objectives):\n '''\n calculate the gradient of the parameters\n\n input:\n - objectives: a list of objectives\n '''\n\n grads, shapes, has_grads = self._pack_grad(objectives)\n noisy_grad = self._clip_and_add_noise(grads, has_grads)\n noisy_grad = self._unflatten_grad(noisy_grad, shapes[0])\n self._set_grad(noisy_grad)\n return\n\n def _clip_and_add_noise(self, grads, has_grads, shapes=None):\n shared = torch.stack(has_grads).prod(0).bool()\n pc_grad, num_task = copy.deepcopy(grads), len(grads)\n\n merged_grad = torch.zeros_like(grads[0]).to(grads[0].device)\n stacked_grads = torch.stack([g[shared] for g in grads])\n merged_grad[shared] = torch.median(stacked_grads, dim=0)[0]\n\n u = torch.rand(merged_grad.shape)\n top_quantile = np.minimum((num_task + 1) / (2 * num_task), 1.0)\n bottom_quantile = np.maximum((num_task - 1) / (2 * num_task), 0.0)\n noise_max = torch.quantile(stacked_grads.abs(), top_quantile, dim=0) - merged_grad\n noise_min = merged_grad - torch.quantile(stacked_grads.abs(), bottom_quantile, dim=0)\n noise = (u * (noise_max - noise_min) + noise_min) * self._noise_ratio\n merged_grad += noise\n\n merged_grad[~shared] = torch.stack([g[~shared]\n for g in pc_grad]).sum(dim=0)\n return merged_grad\n\n def _set_grad(self, grads):\n '''\n set the modified gradients to the network\n '''\n\n idx = 0\n for group in self._optim.param_groups:\n for p in group['params']:\n # if p.grad is None: continue\n p.grad = grads[idx]\n idx += 1\n return\n\n def _pack_grad(self, objectives):\n '''\n pack the gradient of the parameters of the network for each objective\n \n output:\n - grad: a list of the gradient of the parameters\n - shape: a list of the shape of the parameters\n - has_grad: a list of mask represent whether the parameter has gradient\n '''\n\n grads, shapes, has_grads = [], [], []\n for obj in objectives:\n self._optim.zero_grad(set_to_none=True)\n obj.backward(retain_graph=True)\n grad, shape, has_grad = self._retrieve_grad()\n grads.append(self._flatten_grad(grad, shape))\n has_grads.append(self._flatten_grad(has_grad, shape))\n shapes.append(shape)\n return grads, shapes, has_grads\n\n def _unflatten_grad(self, grads, shapes):\n unflatten_grad, idx = [], 0\n for shape in shapes:\n length = np.prod(shape)\n unflatten_grad.append(grads[idx:idx + length].view(shape).clone())\n idx += length\n return unflatten_grad\n\n def _flatten_grad(self, grads, shapes):\n flatten_grad = torch.cat([g.flatten() for g in grads])\n return flatten_grad\n\n def _retrieve_grad(self):\n '''\n get the gradient of the parameters of the network with specific \n objective\n \n output:\n - grad: a list of the gradient of the parameters\n - shape: a list of the shape of the parameters\n - has_grad: a list of mask represent whether the parameter has gradient\n '''\n\n grad, shape, has_grad = [], [], []\n for group in self._optim.param_groups:\n for p in group['params']:\n # if p.grad is None: continue\n # tackle the multi-head scenario\n if p.grad is None:\n shape.append(p.shape)\n grad.append(torch.zeros_like(p).to(p.device))\n has_grad.append(torch.zeros_like(p).to(p.device))\n continue\n shape.append(p.grad.shape)\n grad.append(p.grad.clone())\n has_grad.append(torch.ones_like(p).to(p.device))\n return grad, shape, has_grad\n\n\nclass TestNet(nn.Module):\n def __init__(self):\n super().__init__()\n self._linear = nn.Linear(3, 4)\n\n def forward(self, x):\n return self._linear(x)\n\n\nclass MultiHeadTestNet(nn.Module):\n def __init__(self):\n super().__init__()\n self._linear = nn.Linear(3, 2)\n self._head1 = nn.Linear(2, 4)\n self._head2 = nn.Linear(2, 4)\n\n def forward(self, x):\n feat = self._linear(x)\n return self._head1(feat), self._head2(feat)\n\n\nif __name__ == '__main__':\n\n # fully shared network test\n torch.manual_seed(4)\n x, y = torch.randn(2, 3), torch.randn(2, 4)\n net = TestNet()\n y_pred = net(x)\n noisy_adam = NoisyGrad(optim.Adam(net.parameters()))\n noisy_adam.zero_grad()\n loss1_fn, loss2_fn = nn.L1Loss(), nn.MSELoss()\n loss1, loss2 = loss1_fn(y_pred, y), loss2_fn(y_pred, y)\n\n noisy_adam.noisy_backward([loss1, loss2])\n for p in net.parameters():\n print(p.grad)\n\n print('-' * 80)\n # seperated shared network test\n\n torch.manual_seed(4)\n x, y = torch.randn(2, 3), torch.randn(2, 4)\n net = MultiHeadTestNet()\n y_pred_1, y_pred_2 = net(x)\n noisy_adam = NoisyGrad(optim.Adam(net.parameters()))\n noisy_adam.zero_grad()\n loss1_fn, loss2_fn = nn.MSELoss(), nn.MSELoss()\n loss1, loss2 = loss1_fn(y_pred_1, y), loss2_fn(y_pred_2, y)\n\n noisy_adam.noisy_backward([loss1, loss2])\n for p in net.parameters():\n print(p.grad)\n" ]
[ [ "torch.nn.MSELoss", "torch.ones_like", "numpy.maximum", "numpy.minimum", "torch.manual_seed", "torch.randn", "torch.median", "torch.zeros_like", "torch.nn.Linear", "torch.rand", "numpy.prod", "torch.stack", "torch.nn.L1Loss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
I-Doctor/fix_train
[ "3ddd812cd9d6c3dd14ffa059841078f033c48856" ]
[ "code/model/module/test_qq.py" ]
[ "import torch\nimport numpy as np\nimport h5py\nimport matplotlib.ticker as ticker\nimport matplotlib.pyplot as plt\nfrom quantize_functions import Quantize_A\n\nprefix = '../../../checkpoint/fix_experiment/imgnet/log_quantize_20200926_12-32-39/checkpoint_'\nepochnum = '28'\nmannum = 4\n\ndef plotexp(data, epoch, man):\n f = h5py.File(prefix+data+'_'+epoch+'.h5','r')\n keys = f.keys()\n fresults = np.array([])\n nresults = np.array([])\n cresults = np.array([])\n ncresults = np.array([])\n count = 0\n for layer_name in keys:\n layer_data = f[layer_name][:]\n layer_name = layer_name[0:-8] if layer_name[-1]=='b' else layer_name[0:-6]\n if layer_name[0:-1].endswith('conv'):\n if count % 8 == 0:\n print(' ',layer_name)\n shape = layer_data.shape\n print(' shape',shape)\n print(' range',layer_data.min(), layer_data.max())\n x_in = torch.Tensor(layer_data)\n\n q_xf = Quantize_A.apply(x_in, \n man, \n 0, \n False, \n False, \n False, \n 'complex', \n 'progress')\n q_xn = Quantize_A.apply(x_in, \n man, \n 1, \n False, \n False, \n False, \n 'complex', \n 'progress')\n q_xc = Quantize_A.apply(x_in, \n man, \n 2, \n False, \n False, \n False, \n 'complex', \n 'progress')\n q_xnc = Quantize_A.apply(x_in, \n man, \n 3, \n False, \n False, \n False, \n 'complex', \n 'progress')\n ef = np.average(np.abs((q_xf - x_in)/(x_in+1e-20)))\n en = np.average(np.abs((q_xn - x_in)/(x_in+1e-20)))\n ec = np.average(np.abs((q_xc - x_in)/(x_in+1e-20)))\n enc = np.average(np.abs((q_xnc - x_in)/(x_in+1e-20)))\n print('average: ',layer_name, ef,en,ec,enc)\n\n fresults = np.append(fresults,ef)\n nresults = np.append(nresults,en)\n cresults = np.append(cresults,ec)\n ncresults = np.append(ncresults,enc)\n count += 1\n\n print(fresults)\n print(fresults.shape)\n nn = len(fresults)\n x = 1 + np.arange(0,nn)\n\n plt.figure()\n plt.tick_params(labelsize=10)\n ax=plt.gca()\n ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n f, = plt.plot(x[1:], fresults[1:], alpha = 0.25, linewidth=3)\n n, = plt.plot(x[1:], nresults[1:], alpha = 0.5, linewidth=3)\n c, = plt.plot(x[1:], cresults[1:], alpha = 0.75, linewidth=3)\n nc, = plt.plot(x[1:],ncresults[1:],alpha = 1, linewidth=3)\n plt.legend([f,n,c,nc],['E_x=0', 'E_x=1', 'E_x=2','E_x=3'], prop={'size':12})\n plt.xlabel('layer id',{'size':15})\n plt.ylabel('average relative error',{'size':15})\n\n plt.savefig(data+'_'+epoch+'exp'+'.png')\n \n \ndef plotnc(data, epoch, man):\n f = h5py.File(prefix+data+'_'+epoch+'.h5','r')\n keys = f.keys()\n fresults = np.array([])\n nresults = np.array([])\n cresults = np.array([])\n ncresults = np.array([])\n count = 0\n for layer_name in keys:\n layer_data = f[layer_name][:]\n layer_name = layer_name[0:-8] if layer_name[-1]=='b' else layer_name[0:-6]\n if layer_name[0:-1].endswith('conv'):\n if count % 8 == 0:\n print(' ',layer_name)\n shape = layer_data.shape\n print(' shape',shape)\n print(' range',layer_data.min(), layer_data.max())\n x_in = torch.Tensor(layer_data)\n\n q_xf = Quantize_A.apply(x_in, \n man, \n 0, \n False, \n False, \n False, \n 'complex', \n 'progress')\n q_xn = Quantize_A.apply(x_in, \n man, \n 0, \n False, \n False, \n 'n', \n 'complex', \n 'progress')\n q_xc = Quantize_A.apply(x_in, \n man, \n 0, \n False, \n False, \n 'c', \n 'complex', \n 'progress')\n q_xnc = Quantize_A.apply(x_in, \n man, \n 0, \n False, \n False, \n 'nc', \n 'complex', \n 'progress')\n ef = np.average(np.abs((q_xf - x_in)/(x_in+1e-20)))\n en = np.average(np.abs((q_xn - x_in)/(x_in+1e-20)))\n ec = np.average(np.abs((q_xc - x_in)/(x_in+1e-20)))\n enc = np.average(np.abs((q_xnc - x_in)/(x_in+1e-20)))\n print('average: ',layer_name, ef,en,ec,enc)\n\n fresults = np.append(fresults,ef)\n nresults = np.append(nresults,en)\n cresults = np.append(cresults,ec)\n ncresults = np.append(ncresults,enc)\n count += 1\n\n print(fresults)\n print(fresults.shape)\n nn = len(fresults)\n x = 1+np.arange(0,nn)\n\n plt.figure()\n plt.tick_params(labelsize=10)\n ax=plt.gca()\n ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n f, = plt.plot(x[1:], fresults[1:], alpha = 0.25, linewidth=3)\n n, = plt.plot(x[1:], nresults[1:], alpha = 0.5, linewidth=3)\n c, = plt.plot(x[1:], cresults[1:], alpha = 0.75, linewidth=3)\n nc, = plt.plot(x[1:],ncresults[1:],alpha = 1, linewidth=3)\n plt.legend([f,n,c,nc],['1 group', 'N groups', 'C groups','NxC groups'], prop={'size':12})\n plt.xlabel('layer id',{'size':15})\n plt.ylabel('average relative error',{'size':15})\n\n plt.savefig(data+'_'+epoch+'nc'+'.png')\n\n\ndef plotncexp(data, epoch, man):\n f = h5py.File(prefix+data+'_'+epoch+'.h5','r')\n keys = f.keys()\n fresults = np.array([])\n nresults = np.array([])\n cresults = np.array([])\n ncresults = np.array([])\n count = 0\n for layer_name in keys:\n layer_data = f[layer_name][:]\n layer_name = layer_name[0:-8] if layer_name[-1]=='b' else layer_name[0:-6]\n if layer_name[0:-1].endswith('conv'):\n if count % 8 == 0:\n print(' ',layer_name)\n shape = layer_data.shape\n print(' shape',shape)\n print(' range',layer_data.min(), layer_data.max())\n x_in = torch.Tensor(layer_data)\n\n q_xf = Quantize_A.apply(x_in, \n man, \n 0, \n False, \n False, \n 'nc', \n 'complex', \n 'progress')\n q_xn = Quantize_A.apply(x_in, \n man, \n 1, \n False, \n False, \n 'nc', \n 'complex', \n 'progress')\n q_xc = Quantize_A.apply(x_in, \n man, \n 2, \n False, \n False, \n 'nc', \n 'complex', \n 'progress')\n q_xnc = Quantize_A.apply(x_in, \n man, \n 3, \n False, \n False, \n 'nc', \n 'complex', \n 'progress')\n ef = np.average(np.abs((q_xf - x_in)/(x_in+1e-20)))\n en = np.average(np.abs((q_xn - x_in)/(x_in+1e-20)))\n ec = np.average(np.abs((q_xc - x_in)/(x_in+1e-20)))\n enc = np.average(np.abs((q_xnc - x_in)/(x_in+1e-20)))\n print('average: ',layer_name, ef,en,ec,enc)\n\n fresults = np.append(fresults,ef)\n nresults = np.append(nresults,en)\n cresults = np.append(cresults,ec)\n ncresults = np.append(ncresults,enc)\n count += 1\n\n print(fresults)\n print(fresults.shape)\n nn = len(fresults)\n x = np.arange(0,nn)\n\n plt.figure()\n plt.tick_params(labelsize=10)\n ax=plt.gca()\n ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n f, = plt.plot(x[1:], fresults[1:], alpha = 0.25, linewidth=3)\n n, = plt.plot(x[1:], nresults[1:], alpha = 0.5, linewidth=3)\n c, = plt.plot(x[1:], cresults[1:], alpha = 0.75, linewidth=3)\n nc, = plt.plot(x[1:],ncresults[1:],alpha = 1, linewidth=3)\n plt.legend([f,n,c,nc],['E_x=0', 'E_x=1', 'E_x=2','E_x=3'], prop={'size':12})\n plt.xlabel('layer id',{'size':15})\n plt.ylabel('average relative error',{'size':15})\n\n plt.savefig(data+'_'+epoch+'ncexp'+'.png')\n\n \nplotnc('e',epochnum, mannum)\nplotexp('e', epochnum, mannum)\nplotncexp('e',epochnum, mannum)\n\nplotnc('a',epochnum, mannum)\nplotexp('a', epochnum, mannum)\nplotncexp('a',epochnum, mannum)\n\nplotnc('w',epochnum, mannum)\nplotexp('w', epochnum, mannum)\nplotncexp('w',epochnum, mannum)\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.legend", "numpy.abs", "torch.Tensor", "numpy.arange", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "numpy.append", "matplotlib.ticker.MaxNLocator", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tnakaicode/python-cfd
[ "174176bdcb1c31e021fefd8fd54e2b3dd898dc62" ]
[ "lessons_src/13_Step_10.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n# # Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved BSD-3 license. (c) Lorena A. Barba, Gilbert F. Forsyth 2017. Thanks to NSF for support via CAREER award #1149784.\n# [@LorenaABarba](https://twitter.com/LorenaABarba)\n\n# 12 steps to Navier–Stokes\n# =====\n# ***\n\n# For a moment, recall the Navier–Stokes equations for an incompressible fluid, where $\\vec{v}$ represents the velocity field:\n#\n# $$\n# \\begin{eqnarray*}\n# \\nabla \\cdot\\vec{v} &=& 0 \\\\\n# \\frac{\\partial \\vec{v}}{\\partial t}+(\\vec{v}\\cdot\\nabla)\\vec{v} &=& -\\frac{1}{\\rho}\\nabla p + \\nu \\nabla^2\\vec{v}\n# \\end{eqnarray*}\n# $$\n#\n# The first equation represents mass conservation at constant density. The second equation is the conservation of momentum. But a problem appears: the continuity equation for incompressble flow does not have a dominant variable and there is no obvious way to couple the velocity and the pressure. In the case of compressible flow, in contrast, mass continuity would provide an evolution equation for the density $\\rho$, which is coupled with an equation of state relating $\\rho$ and $p$.\n#\n# In incompressible flow, the continuity equation $\\nabla \\cdot\\vec{v}=0$ provides a *kinematic constraint* that requires the pressure field to evolve so that the rate of expansion $\\nabla \\cdot\\vec{v}$ should vanish everywhere. A way out of this difficulty is to *construct* a pressure field that guarantees continuity is satisfied; such a relation can be obtained by taking the divergence of the momentum equation. In that process, a Poisson equation for the pressure shows up!\n\n# Step 10: 2D Poisson Equation\n# ----\n# ***\n\n# Poisson's equation is obtained from adding a source term to the right-hand-side of Laplace's equation:\n#\n# $$\\frac{\\partial ^2 p}{\\partial x^2} + \\frac{\\partial ^2 p}{\\partial y^2} = b$$\n#\n# So, unlinke the Laplace equation, there is some finite value inside the field that affects the solution. Poisson's equation acts to \"relax\" the initial sources in the field.\n#\n# In discretized form, this looks almost the same as [Step 9](./12_Step_9.ipynb), except for the source term:\n#\n# $$\\frac{p_{i+1,j}^{n}-2p_{i,j}^{n}+p_{i-1,j}^{n}}{\\Delta x^2}+\\frac{p_{i,j+1}^{n}-2 p_{i,j}^{n}+p_{i,j-1}^{n}}{\\Delta y^2}=b_{i,j}^{n}$$\n#\n# As before, we rearrange this so that we obtain an equation for $p$ at point $i,j$. Thus, we obtain:\n#\n# $$p_{i,j}^{n}=\\frac{(p_{i+1,j}^{n}+p_{i-1,j}^{n})\\Delta y^2+(p_{i,j+1}^{n}+p_{i,j-1}^{n})\\Delta x^2-b_{i,j}^{n}\\Delta x^2\\Delta y^2}{2(\\Delta x^2+\\Delta y^2)}$$\n#\n\n# We will solve this equation by assuming an initial state of $p=0$ everywhere, and applying boundary conditions as follows:\n#\n# $p=0$ at $x=0, \\ 2$ and $y=0, \\ 1$\n#\n# and the source term consists of two initial spikes inside the domain, as follows:\n#\n# $b_{i,j}=100$ at $i=\\frac{1}{4}nx, j=\\frac{1}{4}ny$\n#\n# $b_{i,j}=-100$ at $i=\\frac{3}{4}nx, j=\\frac{3}{4}ny$\n#\n# $b_{i,j}=0$ everywhere else.\n#\n# The iterations will advance in pseudo-time to relax the initial spikes. The relaxation under Poisson's equation gets slower and slower as they progress. *Why?*\n\n# Let's look at one possible way to write the code for Poisson's equation. As always, we load our favorite Python libraries. We also want to make some lovely plots in 3D. Let's get our parameters defined and the initialization out of the way. What do you notice of the approach below?\n\n# In[1]:\n\n\nimport numpy\nfrom matplotlib import pyplot, cm\nfrom mpl_toolkits.mplot3d import Axes3D\n## get_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[2]:\n\n\n# Parameters\nnx = 50\nny = 50\nnt = 100\nxmin = 0\nxmax = 2\nymin = 0\nymax = 1\n\ndx = (xmax - xmin) / (nx - 1)\ndy = (ymax - ymin) / (ny - 1)\n\n# Initialization\np = numpy.zeros((ny, nx))\npd = numpy.zeros((ny, nx))\nb = numpy.zeros((ny, nx))\nx = numpy.linspace(xmin, xmax, nx)\ny = numpy.linspace(xmin, xmax, ny)\n\n# Source\nb[int(ny / 4), int(nx / 4)] = 100\nb[int(3 * ny / 4), int(3 * nx / 4)] = -100\n\n\n# With that, we are ready to advance the initial guess in pseudo-time. How is the code below different from the function used in [Step 9](./12_Step_9.ipynb) to solve Laplace's equation?\n\n# In[3]:\n\n\nfor it in range(nt):\n\n pd = p.copy()\n\n p[1:-1, 1:-1] = (((pd[1:-1, 2:] + pd[1:-1, :-2]) * dy**2 +\n (pd[2:, 1:-1] + pd[:-2, 1:-1]) * dx**2 -\n b[1:-1, 1:-1] * dx**2 * dy**2) /\n (2 * (dx**2 + dy**2)))\n\n p[0, :] = 0\n p[ny - 1, :] = 0\n p[:, 0] = 0\n p[:, nx - 1] = 0\n\n\n# Maybe we could reuse our plotting function from [Step 9](./12_Step_9.ipynb), don't you think?\n\n# In[4]:\n\n\ndef plot2D(x, y, p):\n fig = pyplot.figure(figsize=(11, 7), dpi=100)\n ax = fig.gca(projection='3d')\n X, Y = numpy.meshgrid(x, y)\n surf = ax.plot_surface(X, Y, p[:], rstride=1, cstride=1, cmap=cm.viridis,\n linewidth=0, antialiased=False)\n ax.view_init(30, 225)\n ax.set_xlabel('$x$')\n ax.set_ylabel('$y$')\n\n\n# In[5]:\n\n\nplot2D(x, y, p)\npyplot.show()\n\n# Ah! The wonders of code reuse! Now, you probably think: \"Well, if I've written this neat little function that does something so useful, I want to use it over and over again. How can I do this without copying and pasting it each time? —If you are very curious about this, you'll have to learn about *packaging*. But this goes beyond the scope of our CFD lessons. You'll just have to Google it if you really want to know.\n\n# ***\n\n# ## Learn More\n\n# To learn more about the role of the Poisson equation in CFD, watch **Video Lesson 11** on You Tube:\n\n# In[6]:\n\n\n#from IPython.display import YouTubeVideo\n# YouTubeVideo('ZjfxA3qq2Lg')\n\n\n# In[7]:\n\n\n#from IPython.core.display import HTML\n# def css_styling():\n# styles = open(\"../styles/custom.css\", \"r\").read()\n# return HTML(styles)\n# css_styling()\n\n\n# > (The cell above executes the style for this notebook.)\n" ]
[ [ "numpy.linspace", "numpy.meshgrid", "numpy.zeros", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
My-UIT-Students/VIPUsingFPGA
[ "e4afadb8def58f0c956c5399a6e9003a15ec43e8" ]
[ "CNNs/lenet/main.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nfrom lenet import *\nWEIGHT_PATH = './weights/'\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nnet = LeNet(WEIGHT_PATH)\n\nif device == 'cuda':\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = True\n\nnet = net.to(device)\ncheckpoint = torch.load(WEIGHT_PATH + 'lenet.pth',map_location=device)\nnet.load_state_dict(checkpoint['net'])\n# print(net.eval())\n# net.load_state_dic()\ndata = torch.randn([1,3,32,32],dtype=torch.float32)\ndata = data.to(device)\nnet.module.export_weights()\nout = net.module.conv1(data)\n\nprint(out)" ]
[ [ "torch.randn", "torch.nn.DataParallel", "torch.cuda.is_available", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
puyamirkarimi/quantum-walks
[ "eb41146cc22e32b2f4d5a6119cc892f45062764c", "eb41146cc22e32b2f4d5a6119cc892f45062764c", "eb41146cc22e32b2f4d5a6119cc892f45062764c", "eb41146cc22e32b2f4d5a6119cc892f45062764c" ]
[ "Max2SAT_quantum/plot_av_inf_time_prob_n.py", "Max2SAT/mixsat_runner.py", "Max2SAT/mixsat_runtime_plot_compare.py", "Max2SAT_quantum/opt_gamma_vs_p_infty.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef plot_graph(x, y, fit):\n plt.figure()\n plt.scatter(x, y)\n if fit is not None:\n plt.plot(x, fit, '--')\n plt.tick_params(direction='in', top=True, right=True)\n plt.xlim(5, 9)\n plt.ylim(2, 16)\n plt.xticks(range(5, 10, 1))\n # plt.yticks(range(start, end+1, step))\n plt.xlabel(\"$n$\")\n plt.ylabel(r\"$1 / \\langle P_{\\infty} \\rangle$\")\n plt.yscale('log', basey=2)\n plt.show()\n\n\ndef fit_and_plot(x_array, y_array):\n m_log, c_log = np.polyfit(x_array[0:], np.log2(y_array), 1, w=np.sqrt(y_array))\n exp_fit = np.exp2(m_log * x_array + c_log)\n print(str(np.exp2(c_log))+\" * 2^(\" + str(m_log) + \" * n)\")\n plot_graph(x_array, y_array, exp_fit)\n\n\nif __name__ == '__main__':\n plt.rc('text', usetex=True)\n plt.rc('font', size=14)\n\n n_array = np.array([5, 6, 7, 8])\n av_probs = np.zeros(len(n_array))\n\n for i, n in enumerate(n_array):\n probs = np.loadtxt(\"inf_time_probs_n_\" + str(n) + \".txt\")\n av_probs[i] = 1/np.mean(probs)\n\n fit_and_plot(n_array, av_probs)\n", "import subprocess\nimport time\nimport numpy as np\n\n\ndef get_instances():\n \"\"\"returns array of instance names, array of corresponding n\"\"\"\n instance_data = np.genfromtxt('m2s_nqubits.csv', delimiter=',', skip_header=1, dtype=str) # can add _noGT_nondg on end\n return instance_data[:, 0], instance_data[:, 1]\n\n\nif __name__ == '__main__':\n instance_names, instance_n_bits_str = get_instances()\n # qubits_array = np.array(range(5, 21)) # Adam's instances range from n=5 to n=20\n runtimes = np.zeros(10000)\n states_count = np.zeros(10000)\n\n n = 20\n n_shifted = n-5 # n_shifted runs from 0 to 15 instead of 5 to 20\n\n for loop, i in enumerate(range(n_shifted*10000, (n_shifted+1)*10000)): # 10000 instances per value of n\n instance_name = instance_names[i]\n time_start_inst = time.time()\n result = subprocess.run(['./../../mixsat/complete', './../../instances_dimacs/'+instance_name+'.txt'], stdout=subprocess.PIPE) # can add _noGT_nondg on end\n time_end_inst = time.time()\n runtime = time_end_inst - time_start_inst\n runtimes[loop] = runtime\n output = str(result.stdout)\n\n string_start_index = output.find('state_visited ') + 14\n string_end_index = output.find(' pruned')\n states_visited = int(output[string_start_index: string_end_index])\n states_count[loop] = states_visited\n\n with open(\"adam_runtimes_\"+str(n)+\".txt\", \"ab\") as f: # saves runtimes using time.time() # can add _noGT_nondg in middle\n f.write(b\"\\n\")\n np.savetxt(f, runtimes)\n\n with open(\"adam_counts_\"+str(n)+\".txt\", \"ab\") as f: # saves counts # can add _noGT_nondg in middle\n f.write(b\"\\n\")\n np.savetxt(f, states_count)\n", "import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef average_data(data):\n num_repeats = len(data[:, 0])\n num_x_vals = len(data[0, :])\n y_av = np.zeros(num_x_vals)\n y_std_error = np.zeros(num_x_vals)\n\n for x in range(num_x_vals):\n y_av[x] = np.mean(data[:, x])\n y_std_error[x] = np.std(data[:, x], ddof=1) / np.sqrt(num_repeats)\n\n return y_av, y_std_error\n\n\n# def plot_graph(x, y, y_std_error, fit_1, fit_2):\n# fig, ax = plt.subplots()\n# plt.scatter(x[4:], y[4:])\n# plt.scatter(x[:4], y[:4], color=\"gray\")\n# plt.plot(x, fit_1, '--', label=\"$y=0.0005x + 0.0012$\", color=\"red\")\n# plt.plot(x, fit_2, label=r\"$y=0.0036 \\times 2^{0.0871x}$\", color=\"green\")\n# #plt.errorbar(x, y, y_std_error)\n# ax.set_xlabel(\"Number of variables, $n$\")\n# ax.set_ylabel(\"Average runtime ($s$)\")\n# ax.set_xlim([5, 20])\n# ax.set_xticks(range(5, 21, 3))\n# ax.set_ylim([0.004, 0.012])\n# ax.set_yscale('log')\n# plt.legend()\n# plt.tight_layout()\n# plt.show()\n\n\ndef zero_to_nan(array):\n \"\"\"Replace every 0 with 'nan' and return a copy.\"\"\"\n return [float('nan') if x==0 else x for x in array]\n\n\ndef counts_data_crosson():\n counts_crosson = np.loadtxt(\"crosson_counts.txt\").reshape((-1, 137))\n return average_data(counts_crosson)\n\n\ndef runtimes_data_crosson():\n runtimes_crosson = np.loadtxt(\"crosson_runtimes.txt\").reshape((-1, 137))\n return average_data(runtimes_crosson)\n\n\ndef counts_data_adam(n):\n counts_adam = np.loadtxt(\"adam_counts_\"+str(n)+\".txt\").reshape((-1, 10000))\n return average_data(counts_adam)\n\n\ndef runtimes_data_adam(n):\n runtimes_adam = np.loadtxt(\"adam_runtimes_\"+str(n)+\".txt\").reshape((-1, 10000))\n return average_data(runtimes_adam)\n\n\ndef counts_data_adam_noGT(n):\n counts_adam = np.loadtxt(\"adam_noGT_counts_\"+str(n)+\".txt\").reshape((-1, 10000))\n return average_data(counts_adam)\n\n\ndef runtimes_data_adam_noGT(n):\n runtimes_adam = np.loadtxt(\"adam_noGT_runtimes_\"+str(n)+\".txt\").reshape((-1, 10000))\n return average_data(runtimes_adam)\n\n\ndef counts_data_adam_noGT_nondg(n):\n counts_adam = np.loadtxt(\"adam_noGT_nondg_counts_\"+str(n)+\".txt\").reshape((-1, 10000))\n return average_data(counts_adam)\n\n\ndef runtimes_data_adam_noGT_nondg(n):\n runtimes_adam = np.loadtxt(\"adam_noGT_nondg_runtimes_\"+str(n)+\".txt\").reshape((-1, 10000))\n return average_data(runtimes_adam)\n\n\nif __name__ == '__main__':\n plt.rc('text', usetex=True)\n plt.rc('font', size=10)\n\n n_list = [9]\n counts_list_adam = []\n runtimes_list_adam = []\n colors = ['blue', 'orange', 'green']\n\n ################## COUNTS ##################\n counts_crosson_average, counts_crosson_standard_error = counts_data_crosson()\n min_count = np.min(counts_crosson_average)\n max_count = np.max(counts_crosson_average)\n\n for n in n_list:\n counts_adam_average, counts_adam_standard_error = counts_data_adam(n)\n counts_list_adam.append(counts_adam_average)\n min_count_temp = np.min(counts_adam_average)\n max_count_temp = np.max(counts_adam_average)\n if min_count_temp < min_count:\n min_count = min_count_temp\n if max_count_temp > max_count:\n max_count = max_count_temp\n\n for n in n_list:\n counts_adam_average, counts_adam_standard_error = counts_data_adam_noGT(n)\n counts_list_adam.append(counts_adam_average)\n min_count_temp = np.min(counts_adam_average)\n max_count_temp = np.max(counts_adam_average)\n if min_count_temp < min_count:\n min_count = min_count_temp\n if max_count_temp > max_count:\n max_count = max_count_temp\n\n for n in n_list:\n counts_adam_average, counts_adam_standard_error = counts_data_adam_noGT_nondg(n)\n counts_list_adam.append(counts_adam_average)\n min_count_temp = np.min(counts_adam_average)\n max_count_temp = np.max(counts_adam_average)\n if min_count_temp < min_count:\n min_count = min_count_temp\n if max_count_temp > max_count:\n max_count = max_count_temp\n\n x = np.arange(np.floor(min_count), np.ceil(max_count)+1)\n y_adam = np.zeros((len(n_list)*3, len(x)))\n y_crosson = np.zeros(len(x))\n\n for i, count in enumerate(x):\n for i_adam in range(len(n_list)*3):\n y_adam[i_adam, i] = np.count_nonzero(counts_list_adam[i_adam] == count) / 10000 # division by 10000 is to normalise\n y_crosson[i] = np.count_nonzero(counts_crosson_average == count) / 137\n\n for i_adam in range(len(n_list)*3):\n y_adam[i_adam] = zero_to_nan(y_adam[i_adam]) # replace zero elements in list with NaN so they aren't plotted\n\n y_crosson = zero_to_nan(y_crosson)\n\n fig1, ax1 = plt.subplots()\n for i_adam, n in enumerate(n_list):\n plt.scatter(x, y_adam[i_adam], label=\"n=\"+str(n)+\" transformed\", marker='+')\n for i_adam, n in enumerate(n_list):\n plt.scatter(x, y_adam[i_adam+len(n_list)], label=\"n=\"+str(n)+\" untransformed\", marker='+')\n for i_adam, n in enumerate(n_list):\n plt.scatter(x, y_adam[i_adam+2*len(n_list)], label=\"n=\"+str(n)+\" untransformed, degen\", marker='+')\n #plt.scatter(x, y_crosson, label=\"n=20 (Crosson)\")\n #plt.errorbar(x, counts_average, counts_standard_error)\n plt.xlim([0, max_count])\n #plt.ylim([0, 0.3])\n plt.yscale('log')\n plt.legend()\n plt.xlabel(\"Number of states visited by MIXSAT algorithm\")\n plt.ylabel(\"Number of instances (normalised)\")\n plt.show()\n\n ################## RUNTIMES ##################\n runtimes_crosson_average, runtimes_crosson_standard_error = runtimes_data_crosson()\n runtimes_crosson_average = np.around(runtimes_crosson_average, 4) # binning\n min_runtime = np.min(runtimes_crosson_average)\n max_runtime = np.max(runtimes_crosson_average)\n\n for n in n_list:\n runtimes_adam_average, runtimes_adam_standard_error = runtimes_data_adam(n)\n runtimes_adam_average = np.around(runtimes_adam_average, 5) # binning\n runtimes_list_adam.append(runtimes_adam_average)\n min_runtime_temp = np.min(runtimes_adam_average)\n max_runtime_temp = np.max(runtimes_adam_average)\n if min_runtime_temp < min_runtime:\n min_runtime = min_runtime_temp\n if max_runtime_temp > max_runtime:\n max_runtime = max_runtime_temp\n\n for n in n_list:\n runtimes_adam_average, runtimes_adam_standard_error = runtimes_data_adam_noGT(n)\n runtimes_adam_average = np.around(runtimes_adam_average, 5) # binning\n runtimes_list_adam.append(runtimes_adam_average)\n min_runtime_temp = np.min(runtimes_adam_average)\n max_runtime_temp = np.max(runtimes_adam_average)\n if min_runtime_temp < min_runtime:\n min_runtime = min_runtime_temp\n if max_runtime_temp > max_runtime:\n max_runtime = max_runtime_temp\n\n for n in n_list:\n runtimes_adam_average, runtimes_adam_standard_error = runtimes_data_adam_noGT_nondg(n)\n runtimes_adam_average = np.around(runtimes_adam_average, 5) # binning\n runtimes_list_adam.append(runtimes_adam_average)\n min_runtime_temp = np.min(runtimes_adam_average)\n max_runtime_temp = np.max(runtimes_adam_average)\n if min_runtime_temp < min_runtime:\n min_runtime = min_runtime_temp\n if max_runtime_temp > max_runtime:\n max_runtime = max_runtime_temp\n\n x = np.arange(np.floor(min_runtime), np.ceil(max_runtime) + 1, step=0.00001)\n x_crosson = np.arange(np.floor(min_runtime), np.ceil(max_runtime) + 1, step=0.0001)\n y_adam = np.zeros((len(n_list)*3, len(x)))\n y_crosson = np.zeros(len(x_crosson))\n\n for i, runtime in enumerate(x):\n for i_adam in range(len(n_list)*3):\n y_adam[i_adam, i] = np.count_nonzero(runtimes_list_adam[i_adam] == runtime) / 10000 # division by 10000 is to normalise\n for i, runtime in enumerate(x_crosson):\n y_crosson[i] = np.count_nonzero(runtimes_crosson_average == runtime) / 1370\n\n for i_adam in range(len(n_list)*3):\n y_adam[i_adam] = zero_to_nan(y_adam[i_adam]) # replace zero elements in list with NaN so they aren't plotted\n\n y_crosson = zero_to_nan(y_crosson)\n\n fig2, ax2 = plt.subplots()\n for i_adam, n in enumerate(n_list):\n plt.scatter(x, y_adam[i_adam], label=\"n=\"+str(n)+\" transformed\", marker='+')\n for i_adam, n in enumerate(n_list):\n plt.scatter(x, y_adam[i_adam+len(n_list)], label=\"n=\" + str(n)+\" untransformed\", marker='+')\n for i_adam, n in enumerate(n_list):\n plt.scatter(x, y_adam[i_adam+2*len(n_list)], label=\"n=\" + str(n)+\" untransformed, degen\", marker='+')\n #plt.scatter(x_crosson, y_crosson, label=\"n=20 (Crosson)\")\n # plt.errorbar(x, runtimes_average, runtimes_standard_error)\n plt.xlim([0, max_runtime])\n # plt.ylim([0, 0.3])\n plt.yscale('log')\n plt.xlabel(\"Rounded runtime of MIXSAT algorithm\")\n plt.ylabel(\"Number of instances (normalised)\")\n plt.legend()\n plt.show()\n\n", "import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef heuristic_gamma(n):\n out = \"haven't defined heuristic gamma for given n\"\n if n == 5:\n out = 0.56503\n if n == 6:\n out = 0.587375\n if n == 7:\n out = 0.5984357142857143\n if n == 8:\n out = 0.60751875\n if n == 9:\n out = 0.6139833333333333\n if n == 10:\n out = 0.619345\n if n == 11:\n out = 0.6220136363636364\n print(\"heuristic gamma: \", out)\n return out\n\n\ndef quantum_data_unopt(n):\n probs = np.loadtxt(\"./../Max2SAT_quantum/inf_time_probs_n_\" + str(n) + \".txt\")\n return probs\n\n\ndef quantum_data_opt(n):\n probs = np.loadtxt(\"./../Max2SAT_quantum/opt_inf_time_probs_n_\" + str(n) + \".txt\")\n return probs\n\n\nif __name__ == '__main__':\n plt.rc('text', usetex=True)\n plt.rc('font', size=16)\n plt.rcParams[\"figure.figsize\"] = (9.6, 4.8)\n\n n = 9\n gamma_limit = 1.5\n gamma_step = 0.01\n\n fig, (ax1, ax2) = plt.subplots(1, 2, gridspec_kw={'width_ratios': [1, 1.25]})\n axes = (ax1, ax2)\n ax1.tick_params(direction='in', top=True, right=True, which='both')\n ax2.tick_params(direction='in', top=True, right=True, which='both', labelleft=False)\n\n ax1.set_xlabel(r\"$P_\\infty$\")\n ax2.set_xlabel(r\"$P_\\infty$\")\n ax1.set_ylabel(r\"$\\gamma_{opt}$\")\n for ax in axes:\n ax.set_xlim([0, 0.2])\n ax.set_ylim([0.15, 1.4])\n\n probs1 = quantum_data_unopt(n)\n probs2 = quantum_data_opt(n)\n delta_probs = probs2 - probs1\n cm = plt.get_cmap(\"seismic\")\n\n opt_gammas = np.loadtxt(\"new_opt_gammas_\"+str(n)+\".txt\")\n heur_gam = heuristic_gamma(n)\n axes[0].scatter(probs1, opt_gammas, c=delta_probs, cmap=cm, vmin = -0.1, vmax=0.1, linewidths=0.075, marker='.', s=4, edgecolors='black')\n axes[0].hlines(heur_gam, 0, 0.2, colors='yellow')\n\n im = axes[1].scatter(probs2, opt_gammas, c=delta_probs, cmap=cm, vmin = -0.1, vmax=0.1, linewidths=0.075, marker='.', s=4, edgecolors='black')\n cbar = fig.colorbar(im, ax=axes[1])\n cbar.ax.set_ylabel('$\\Delta P_\\infty$')\n axes[1].hlines(heur_gam, 0, 0.2, colors='yellow')\n\n # plt.savefig('opt_gamma_vs_p_infty_n_'+ str(n) +'.png', dpi=200)\n plt.show()" ]
[ [ "numpy.exp2", "numpy.log2", "numpy.sqrt", "matplotlib.pyplot.scatter", "matplotlib.pyplot.ylim", "matplotlib.pyplot.yscale", "matplotlib.pyplot.rc", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.ylabel", "numpy.mean", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.figure" ], [ "numpy.savetxt", "numpy.zeros", "numpy.genfromtxt" ], [ "matplotlib.pyplot.legend", "numpy.sqrt", "numpy.around", "matplotlib.pyplot.rc", "numpy.max", "numpy.mean", "numpy.ceil", "numpy.std", "numpy.count_nonzero", "numpy.zeros", "numpy.min", "numpy.floor", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.yscale", "matplotlib.pyplot.subplots", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "numpy.loadtxt" ], [ "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.show", "matplotlib.pyplot.rc", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
syybata/tf_dqn_fx
[ "71061f0c0bb15d647a13d3e2c0d549a5c2d1780a" ]
[ "tensorboard_sample.py" ]
[ "import tensorflow as tf\n\ncross_entropy = tf.placeholder(tf.float32)\n\n# sessionの用意\nsess = tf.Session()\ntf.global_variables_initializer().run(session=sess)\n#sess.run(tf.global_variables_initializer())\n\n# summaryの設定\ntf.summary.scalar('cross_entropy', cross_entropy)\nsummaries = tf.summary.merge_all()\ntrain_writer = tf.summary.FileWriter('shiba_train', sess.graph)\n\n# 100回実行してcross_entropyのsummaryを記録\nfor step in range(100):\n summary_str = sess.run(summaries, {cross_entropy: step})\n train_writer.add_summary(summary_str, step)\n" ]
[ [ "tensorflow.summary.FileWriter", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.Session", "tensorflow.summary.scalar" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
JoseAlanis/supplementary_dpx_tt
[ "2b5a94c35753dc9dbf51357c840a87380e40fe3c", "2b5a94c35753dc9dbf51357c840a87380e40fe3c", "2b5a94c35753dc9dbf51357c840a87380e40fe3c" ]
[ "bads.py", "mvpa_stats.py", "15_plot_mvpa_results.py" ]
[ "\"\"\"\n=================\nFind bad channels\n=================\n\nMethods for finding bad (e.g., noisy) channels in EEG data.\n\nAuthors: José C. García Alanis <[email protected]>\n\nLicense: BSD (3-clause)\n\"\"\"\nimport warnings\n\nimport numpy as np\nfrom scipy.stats import median_abs_deviation as mad\n\nfrom mne.io.base import BaseRaw\n\n\n# main function which implements different methods\ndef find_bad_channels(inst, picks='eeg',\n method='correlation',\n mad_threshold=1,\n std_threshold=1,\n r_threshold=0.4,\n percent_threshold=0.1,\n time_step=1.0,\n sfreq=None,\n return_z_scores=False,\n channels=None):\n\n # arguments to be passed to pick_types\n kwargs = {pick: True for pick in [picks]}\n\n # check that tha input data can be handled by the function\n if isinstance(inst, BaseRaw):\n # only keep data from desired channels\n inst = inst.copy().pick_types(**kwargs)\n dat = inst.get_data() * 1e6 # to microvolt\n channels = inst.ch_names\n sfreq = inst.info['sfreq']\n elif isinstance(inst, np.ndarray):\n dat = inst\n if not channels:\n raise ValueError('If \"inst\" is not an instance of BaseRaw a list '\n 'of channel names must be provided')\n else:\n raise ValueError('inst must be an instance of BaseRaw or a numpy array')\n\n # save shape of data\n n_channels, n_samples = dat.shape\n if n_channels != len(channels):\n raise ValueError(\"Number and channels and data dimensions don't match\")\n\n # make sure method arguments are in a list\n if not isinstance(method, list):\n method = [method]\n\n # place holder for results\n bad_channels = dict()\n\n # 1) find channels with zero or near zero activity\n if 'flat' in method:\n # compute estimates of channel activity\n mad_flats = mad(dat, scale=1, axis=1) < mad_threshold\n std_flats = np.std(dat, axis=1) < std_threshold\n\n # flat channels identified\n flats = np.argwhere(np.logical_or(mad_flats, std_flats))\n flats = np.asarray([channels[int(flat)] for flat in flats])\n\n # warn user if too many channels were identified as flat\n if flats.shape[0] > (n_channels / 2):\n warnings.warn('Too many channels have been identified as \"flat\"! '\n 'Make sure the input values in \"inst\" are provided '\n 'on a volt scale. '\n 'Otherwise try choosing another (meaningful) '\n 'threshold for identification.')\n\n bad_channels.update(flat=flats)\n\n # 3) find bad channels by deviation (high variability in amplitude)\n if 'deviation' in method:\n\n # mean absolute deviation (MAD) scores for each channel\n mad_scores = \\\n [mad(dat[i, :], scale=1) for i in range(n_channels)]\n\n # compute robust z-scores for each channel\n rz_scores = \\\n 0.6745 * (mad_scores - np.nanmedian(mad_scores)) / mad(mad_scores,\n scale=1)\n\n # channels identified by deviation criterion\n bad_deviation = \\\n [channels[i] for i in np.where(np.abs(rz_scores) >= 5.0)[0]]\n\n bad_channels.update(deviation=np.asarray(bad_deviation))\n\n if return_z_scores:\n bad_channels.update(deviation_z_scores=rz_scores)\n\n # 3) find channels with low correlation to other channels\n if 'correlation' in method:\n\n # check that sampling frequency argument was provided\n if not sfreq:\n raise ValueError('If \"inst\" is not an instance of BaseRaw a '\n 'sampling frequency must be provided. Usually '\n 'the sampling frequency of the EEG recording in'\n 'question.')\n\n # based on the length of the provided data,\n # determine size and amount of time windows for analyses\n corr_frames = time_step * sfreq\n corr_window = np.arange(corr_frames)\n\n # sample index (i.e., time offsets) for each window to time window\n # to use for correlation analyis\n corr_offsets = np.arange(1, (n_samples - corr_frames), corr_frames)\n n_corr_steps = corr_offsets.shape[0]\n # place holders for correlation coefficients\n max_r = np.ones((n_channels, n_corr_steps))\n channel_r = np.ones((n_corr_steps, n_channels))\n\n # create time windows for analysis\n dat_t = np.transpose(dat)\n dat_windowed = np.reshape(\n np.transpose(dat_t[0: corr_window.shape[0] * n_corr_steps, :]),\n (n_channels, corr_window.shape[0], n_corr_steps),\n order=\"F\",)\n\n # compute (pearson) correlation coefficient across channels\n # (for each channel and analysis time window)\n # take the absolute of the 98th percentile of the correlations with\n # the other channels as a measure of how well that channel is correlated\n # to other channels\n for k in range(0, n_corr_steps):\n eeg_portion = np.transpose(np.squeeze(dat_windowed[:, :, k]))\n window_correlation = np.corrcoef(np.transpose(eeg_portion))\n abs_corr = \\\n np.abs(\n np.subtract(\n window_correlation, np.diag(np.diag(window_correlation))\n )\n )\n channel_r[k, :] = np.quantile(abs_corr, 0.98, axis=0)\n\n # fill in the actual correlations\n max_r[np.arange(0, n_channels), :] = np.transpose(channel_r)\n\n # check which channels correlate badly with the other channels (i.e.,\n # are below correlation threshold) in a certain fraction of windows\n # (bad_time_threshold)\n thresholded_correlations = max_r < r_threshold\n thresholded_correlations = thresholded_correlations.astype(int)\n frac_bad_corr_windows = np.mean(thresholded_correlations, axis=1)\n\n # find the corresponding channel names and return\n bad_idxs = np.argwhere(frac_bad_corr_windows > percent_threshold)\n uncorrelated_channels = [channels[int(bad)] for bad in bad_idxs]\n\n bad_channels.update(correlation=np.asarray(uncorrelated_channels)) # noqa: E501\n\n return bad_channels\n", "\"\"\"\n==============\nMVPA functions\n==============\n\nUtility functions for estimation and statistical analysis of MVPA parameters.\n\nAuthors: Functions retrieved and adapted from\n https://github.com/heikele/GAT_n4-p6, also see:\n Heikel, E., Sassenhagen, J., & Fiebach, C. J. (2018).\n Time-generalized multivariate analysis of EEG responses reveals a\n cascading architecture of semantic mismatch processing.\n Brain and language, 184, 43-53.\n Changes made by José C. García Alanis <[email protected]>\n\nLicense: BSD (3-clause)\n\"\"\"\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\n\nfrom scipy.stats import wilcoxon\n\nfrom sklearn.linear_model import RidgeClassifier, LogisticRegression\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\n\nfrom mne.decoding import cross_val_multiscore, GeneralizingEstimator, \\\n get_coef, Vectorizer\nfrom mne.parallel import parallel_func\nfrom mne.stats import fdr_correction, spatio_temporal_cluster_1samp_test, \\\n ttest_1samp_no_p\nfrom mne import read_epochs\n\nfrom config import fname\n\n\n# signed rank test\ndef _my_wilcoxon(X):\n out = wilcoxon(X)\n return out[1]\n\n\n# loop function\ndef _loop(x, function):\n out = list()\n for ii in range(x.shape[1]):\n out.append(function(x[:, ii]))\n return out\n\n\n# correct p values for multiple testing\ndef parallel_stats(X, function=_my_wilcoxon, correction='FDR', n_jobs=2):\n\n # check if correction method was provided\n if correction not in [False, None, 'FDR']:\n raise ValueError('Unknown correction')\n\n # reshape to 2D\n X = np.array(X)\n dims = X.shape\n X.resize([dims[0], np.prod(dims[1:])])\n\n # prepare parallel\n n_cols = X.shape[1]\n parallel, pfunc, n_jobs = parallel_func(_loop, n_jobs)\n n_chunks = min(n_cols, n_jobs)\n chunks = np.array_split(range(n_cols), n_chunks)\n p_values = parallel(pfunc(X[:, chunk], function) for chunk in chunks)\n p_values = np.reshape(np.hstack(p_values), dims[1:])\n X.resize(dims)\n\n # apply correction\n if correction == 'FDR':\n dims = p_values.shape\n _, p_values = fdr_correction(p_values)\n p_values = np.reshape(p_values, dims)\n\n return p_values\n\n\n# one sample t-test\ndef _stat_fun(x, sigma=0, method='relative'):\n t_values = ttest_1samp_no_p(x, sigma=sigma, method=method)\n t_values[np.isnan(t_values)] = 0\n return t_values\n\n\n# threshold free cluster permutation test\ndef stats_tfce(X, n_permutations=1000, threshold=None, n_jobs=2):\n\n # calculate p-values using cluster permutation test\n _, _, p_values, _ = spatio_temporal_cluster_1samp_test(\n X,\n out_type='indices',\n stat_fun=_stat_fun,\n n_permutations=n_permutations,\n threshold=threshold,\n n_jobs=n_jobs)\n\n p_values = p_values.reshape(X.shape[1:])\n\n return p_values\n\n\n# function to import mne-epochs for participant\ndef get_epochs(subj):\n \"\"\"\n Loads the single trial data for a participant (name)\n \"\"\"\n\n input_file = fname.output(subject=subj,\n processing_step='cue_epochs',\n file_type='epo.fif')\n epoch = read_epochs(input_file)\n epoch.crop(tmin=-0.5, tmax=epoch.tmax, include_tmax=False)\n epoch.apply_baseline((-0.300, -0.050))\n\n return epoch\n\n\n# run generalisation across time and condition\ndef run_gat(subj, decoder=\"ridge\", n_jobs=2):\n \"\"\"\n Function to run Generalization Across Time (GAT).\n\n Parameters\n ----------\n subj: int\n decoder: str\n Specify type of classifier -'ridge' for Ridge Regression (default),\n 'lin-svm' for linear SVM 'svm' for nonlinear (RBF) SVM and 'log_reg'\n for Logistic Regression\n n_jobs: int\n The number of jobs to run in parallel.\n \"\"\"\n # load cue A and cue B epochs\n epochs = get_epochs(subj)['Correct A', 'Correct B']\n\n # specify whether to use a linear or nonlinear SVM if SVM is used\n lin = '' # if not svm it doesn't matter, both log_reg and ridge are linear\n if \"svm\" in decoder:\n decoder, lin = decoder.split(\"-\")\n\n # build classifier pipeline #\n # pick a machine learning algorithm to use (ridge/SVM/logistic regression)\n decoder_dict = {\n \"ridge\": RidgeClassifier(class_weight='balanced',\n random_state=42,\n solver=\"sag\"),\n \"svm\": SVC(class_weight='balanced',\n kernel=(\"rbf\" if \"non\" in lin else \"linear\"),\n random_state=42),\n \"log_reg\": LogisticRegression(class_weight='balanced',\n random_state=42)}\n\n # get data and targets\n data = epochs.get_data()\n labels = epochs.events[:, -1]\n\n # create classifier pipeline\n clf = make_pipeline(StandardScaler(),\n decoder_dict[decoder])\n gen_clf = GeneralizingEstimator(clf, scoring=\"roc_auc\", n_jobs=n_jobs)\n\n # compute cross validated performance scores\n scores = cross_val_multiscore(gen_clf, data,\n labels,\n cv=5,\n n_jobs=n_jobs).mean(0)\n\n # calculate prediction confidence scores\n cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)\n preds = np.empty((len(labels), data.shape[2], data.shape[2]))\n for train, test in cv.split(data, labels):\n gen_clf.fit(data[train], labels[train])\n d = gen_clf.decision_function(data[test])\n preds[test] = d\n\n # compute topographical patterns\n dat = Vectorizer().fit_transform(data)\n clf.fit(dat, labels)\n dat = dat - dat.mean(0, keepdims=True)\n\n # look for the type of classifier and get the weights\n if decoder == 'ridge':\n filt_ = clf.named_steps.ridgeclassifier.coef_.copy()\n elif decoder == 'svm':\n filt_ = clf.named_steps.svc.coef_.copy()\n elif decoder == 'log_reg':\n filt_ = clf.named_steps.logisticregression.coef_.copy()\n\n # Compute patterns using Haufe's trick: A = Cov_X . W . Precision_Y\n # cf.Haufe, et al., 2014, NeuroImage,\n # doi:10.1016/j.neuroimage.2013.10.067)\n inv_y = 1.\n patt_ = np.cov(dat.T).dot(filt_.T.dot(inv_y)).T\n\n # store the patterns accordingly\n if decoder == 'ridge':\n clf.named_steps.ridgeclassifier.patterns_ = patt_\n elif decoder == 'svm':\n clf.named_steps.svc.patterns_ = patt_\n elif decoder == 'log_reg':\n clf.named_steps.logisticregression.patterns_ = patt_\n\n # back transform using steps in pipeline\n patterns = get_coef(clf, 'patterns_', inverse_transform=True)\n\n # return subject scores, prediction confidence and topographical patterns\n return scores, preds, patterns\n\n\ndef get_p_scores(scores, chance=.5,\n tfce=False, permutations=1000, threshold=None, n_jobs=1):\n \"\"\"\n Calculate p_values from scores for significance masking using either\n TFCE or FDR.\n\n Parameters\n ----------\n scores: numpy array\n Calculated scores from decoder\n chance: float\n Indicate chance level\n tfce: True | False\n Specify whether to Threshold Free Cluster Enhancement (True)\n or FDR (False)\n permutations: int\n The number of permutations to compute.\n threshold: float | dict | None\n Threshold that needs to be exceeded to achieve significance\n n_jobs: int\n The number of jobs to run in parallel (default 1). Requires the\n joblib package.\n\n \"\"\"\n p_values = (parallel_stats(scores - chance, n_jobs=n_jobs) if tfce is False\n else stats_tfce(scores - chance,\n n_permutations=permutations,\n threshold=threshold,\n n_jobs=n_jobs))\n return p_values\n\n\ndef grouper(iterable):\n \"\"\"\n List of time points of significance, identifies neighbouring time points.\n \"\"\"\n prev = None\n group = []\n for item in iterable:\n if not prev or round(item - prev, 2) <= .01:\n group.append(item)\n else:\n yield group\n group = [item]\n prev = item\n if group:\n yield group\n\n\ndef find_clus(sig_times):\n \"\"\"\n Identify time points of significance from FDR correction, results in\n lists of ranges and individual time points of significance and\n creates a dictionary for later use.\n\n Parameters\n ----------\n sig_times: list\n List of significant time points\n \"\"\"\n group = dict(enumerate(grouper(sig_times)))\n clus = []\n for key in group.keys():\n ls = group[key]\n clus.append((([ls[0], ls[-1]] if round((ls[1] - ls[0]), 2) <= 0.01\n else [ls[1], ls[-1]]) if len(group[key]) > 1\n else group[key]))\n return clus\n\n\ndef get_stats_lines(scores, times, test_times, alphas=[.05, .01]):\n \"\"\"\n Calculate subject level decoder performances for each of the times series\n plots and perform FDR correction (p<0.05 and p<0.01).\n Creates a dictionary of relevant stats.\n\n Parameters\n ----------\n\n scores: array\n times: array\n test_times: dict\n alphas: list\n List of alphas for significance masking default masks for p<0.05\n and p<0.01\n \"\"\"\n\n # get alpha levels\n alpha1, alpha2 = alphas\n\n # get the diagonal (training_t==testing_t) for each subject,\n # FDR correction, and mask time points of significance\n diag = np.asarray([sc.diagonal() for sc in scores])\n diag_pvalues = parallel_stats(list(diag - 0.5))\n diag1, diag2 = times[diag_pvalues < alpha1], times[diag_pvalues < alpha2]\n\n # get component boundaries for perfomance analysis\n min_max = {k: [(np.abs(v[0] - times)).argmin(),\n (np.abs(v[1] - times)).argmin()]\n for (k, v) in test_times.items()}\n\n # average classifier performance over time for time window of interest\n # for each subject\n class_performance = {k: scores[:, v[0]:v[1], :].mean(1)\n for (k, v) in min_max.items()}\n\n # FDR correction and significance testing\n p_vals = {k: parallel_stats(list(v - 0.5))\n for (k, v) in class_performance.items()}\n\n # mask time points of significance that are p<0.05 (alpha1) and\n # p<0.01 (alpha2)\n masks = {k: [times[v < alpha] for alpha in alphas]\n for (k, v) in p_vals.items()}\n\n # # *** keep this just in case we neeed it later ***\n # # average difference between classifier performance over time for\n # time window of interest for each subject\n # diff = np.array([(sc[p6_min:p6_max].mean(0) -\n # sc[n4_min:n4_max].mean(0))\n # for sc in scores])\n # # FDR correction and significance masking\n # diff_pvalues = parallel_stats(list(diff))\n # diff1, diff2 = xx[diff_pvalues < alpha1], xx[diff_pvalues < alpha2]\n\n # create dict of diagonal stats\n diag_stats = {'diag': [diag, diag1, diag2]}\n\n # create dict of classifier stats\n class_stats = {k: [v, m[0], m[1]]\n for ((k, v), (a, m)) in\n zip(class_performance.items(), masks.items())}\n\n # object for return\n stats_dict = {**diag_stats, **class_stats}\n\n return stats_dict\n\n\ndef plot_image(data, times, mask=None, ax=None, vmax=None, vmin=None,\n draw_mask=None, draw_contour=None, colorbar=True,\n draw_diag=True, draw_zerolines=True,\n xlabel=\"Time (s)\", ylabel=\"Time (s)\",\n cbar_unit=\"%\", cmap=\"RdBu_r\",\n mask_alpha=.75, mask_cmap=\"RdBu_r\"):\n \"\"\"Return fig and ax for further styling of GAT matrix, e.g., titles\n\n Parameters\n ----------\n data: array of scores\n times: list of epoched time points\n mask: None | array\n ...\n \"\"\"\n if ax is None:\n fig = plt.figure()\n ax = plt.axes()\n\n if vmax is None:\n vmax = np.abs(data).max()\n if vmin is None:\n vmax = np.abs(data).max()\n vmin = -vmax\n tmin, tmax = xlim = times[0], times[-1]\n extent = [tmin, tmax, tmin, tmax]\n im_args = dict(interpolation='nearest', origin='lower',\n extent=extent, aspect='auto', vmin=vmin, vmax=vmax)\n\n if mask is not None:\n draw_mask = True if draw_mask is None else draw_mask\n draw_contour = True if draw_contour is None else draw_contour\n if any((draw_mask, draw_contour,)):\n if mask is None:\n raise ValueError(\"No mask to show!\")\n\n if draw_mask:\n ax.imshow(data, alpha=mask_alpha, cmap=mask_cmap, **im_args)\n im = ax.imshow(np.ma.masked_where(~mask, data), cmap=cmap, **im_args)\n else:\n im = ax.imshow(data, cmap=cmap, **im_args)\n if draw_contour and np.unique(mask).size == 2:\n big_mask = np.kron(mask, np.ones((10, 10)))\n ax.contour(big_mask, colors=[\"k\"], extent=extent, linewidths=[1],\n aspect=1,\n corner_mask=False, antialiased=False, levels=[.5])\n ax.set_xlim(xlim)\n ax.set_ylim(xlim)\n\n if draw_diag:\n ax.plot((tmin, tmax), (tmin, tmax), color=\"k\", linestyle=\":\")\n if draw_zerolines:\n ax.axhline(0, color=\"k\", linestyle=\":\")\n ax.axvline(0, color=\"k\", linestyle=\":\")\n\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel, labelpad=10.0)\n\n if colorbar:\n cbar = plt.colorbar(im, ax=ax)\n cbar.ax.set_title(cbar_unit)\n ax.set_aspect(1. / ax.get_data_ratio())\n ax.set_title(\"GAT Matrix\", pad=10.0)\n # ax.title.set_position([.5, 1.025])\n\n return fig if ax is None else ax\n\n\ndef get_dfs(stats_dict, df_type=False):\n \"\"\"Create DataFrames for time series plotting\"\"\"\n from config import subjects\n # get times\n times = get_epochs(subjects[0]).times\n\n if not df_type:\n # create dataframe for N400 and P600 decoders\n df = pd.DataFrame()\n sub, time, accuracy, comp = [], [], [], []\n comps = list(stats_dict.keys())\n comps = [i for i in comps if i != 'diag']\n for c in comps:\n for ii, s in enumerate(stats_dict[c][0]):\n for t, a in enumerate(s):\n sub.append(ii)\n accuracy.append(a)\n time.append(times[t])\n comp.append(c)\n df[\"Time (s)\"] = time\n df[\"Subject\"] = sub\n df[\"Accuracy (%)\"] = accuracy\n df[\"Component\"] = comp\n\n else:\n # create dataframe for diagonal or difference between components\n sub, time, ac = [], [], []\n df = pd.DataFrame()\n for ii, s in enumerate(stats_dict[df_type][0]):\n for t, a in enumerate(s):\n sub.append(ii), ac.append(a), time.append(times[t])\n df[\"Time (s)\"], df[\"Subject\"] = time, sub\n df[\"{}\".format((\"Accuracy (%)\" if df_type == \"diag\"\n else \"Difference in Accuracy (%)\"))] = ac\n\n return df\n", "\"\"\"\n=============================================\nPlot results of multivariate pattern analysis\n=============================================\n\nCreates figures to show classifier performance at multiple time points of\nthe of the EEG epoch.\n\nAuthors: José C. García Alanis <[email protected]>\n\nLicense: BSD (3-clause)\n\"\"\"\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\nimport numpy as np\nimport seaborn as sns\n\nfrom config import fname, subjects\nfrom mvpa_stats import get_stats_lines, get_dfs, plot_image\n\nfrom mne import read_epochs\n\n# exclude subjects 51\nsubjects = subjects[subjects != 51]\n\n##############################################################################\n# 1) import a generic file to use as template\ninput_file = fname.output(subject=subjects[0],\n processing_step='cue_epochs',\n file_type='epo.fif')\ncue_epo = read_epochs(input_file, preload=True)\ncue_epo = cue_epo.crop(tmin=-0.5, include_tmax=False)\n\n##############################################################################\n# 2) import mvpa results\n\n# load GAT scores\nscores = np.load(fname.results + '/gat_scores_ridge.npy')\n\n# load p values\np_vals = np.load(fname.results + '/p_vals_gat_ridge.npy')\n\n##############################################################################\n# 3) create generalisation across time (GAT) matrix figure\ndata = scores.copy()\nfig, axes = plt.subplots(figsize=(6, 4.5))\nplot_image(data.mean(0),\n cue_epo.times,\n mask=p_vals < 0.01,\n ax=axes, vmax=.7, vmin=.3,\n draw_mask=True, draw_contour=True, colorbar=True,\n draw_diag=True, draw_zerolines=True, xlabel=\"Time (s)\",\n ylabel=\"Time (s)\",\n cbar_unit=\"%\", cmap=\"RdBu_r\", mask_cmap=\"RdBu_r\", mask_alpha=.95)\naxes.spines['top'].set_bounds(-0.5, 2.5)\naxes.spines['right'].set_bounds(-0.5, 2.5)\naxes.spines['left'].set_bounds(-0.5, 2.5)\naxes.spines['bottom'].set_bounds(-0.5, 2.5)\naxes.set_xticks(list(np.arange(-.5, 2.55, .5)), minor=False)\naxes.set_yticks(list(np.arange(-.5, 2.55, .5)), minor=False)\nfig.savefig(fname.figures + '/gat_matrix.pdf', dpi=300)\n\n\n##############################################################################\n# 4) Plot classifier performance for specific time slices of interest\ntest_times = dict(N170=[0.17, 0.27],\n # P3=[0.33, 0.43],\n LPC=[0.5, 0.6],\n CNV=[0.95, 1.05])\n\n# compute significance for those time slices\nstats_dict = get_stats_lines(scores, times=cue_epo.times, test_times=test_times)\n\ncolors = np.linspace(0.2, 0.8, len(test_times.values()))\ncmap = cm.get_cmap('inferno')\n\n# create figure\nfor df_type in ['diag', False]:\n\n if df_type == 'diag':\n title = 'Diagonal decoding performance'\n name = 'diagonal_performance'\n else:\n title = 'Component generalization across time'\n name = 'component_performance'\n lw_b = 0.45\n up_b = 0.70\n\n palette = [cmap(colors[i]) for i, val in enumerate(test_times.values())]\n fig, axes = plt.subplots(figsize=(9, 4.5))\n onsets = {k: v[0] for (k, v) in test_times.items()}\n axes.bar(onsets.values(), 1, width=0.1, alpha=0.15, align='edge', color=palette)\n # for i, val in enumerate({k: v[0] for (k, v) in test_times.items()}):\n # axes.bar(val[0], 1, width=val[1], alpha=0.15,\n # align='edge', color=cmap(colors[i]))\n\n if df_type == 'diag':\n sns.lineplot(data=get_dfs(stats_dict, df_type=df_type),\n color='k',\n y='Accuracy (%)',\n x='Time (s)',\n ci=95,\n ax=axes)\n\n for t in stats_dict['diag'][1]:\n axes.scatter(t, 0.45, marker='_', color='k', s=1.0)\n for t in stats_dict['diag'][2]:\n axes.scatter(t, 0.45, marker='|', color='k', s=25.0)\n\n else:\n sns.lineplot(data=get_dfs(stats_dict, df_type=df_type),\n hue='Component',\n y='Accuracy (%)',\n x='Time (s)',\n ci=95,\n palette=palette,\n ax=axes)\n\n components = stats_dict.keys()\n components = [c for c in components if c != 'diag']\n max_off = (len(onsets) * 0.5) / 100\n offsets = np.linspace(0.45, 0.45+max_off, len(onsets)) - np.linspace(\n 0.45, 0.45+max_off, len(onsets)).mean()\n\n for n_comp, comp in enumerate(components):\n for t in stats_dict[comp][1]:\n axes.scatter(t, 0.45+offsets[n_comp], marker='_', color=palette[n_comp], s=1.0)\n for t in stats_dict[comp][2]:\n axes.scatter(t, 0.45+offsets[n_comp], marker='|', color=palette[n_comp], s=25.0)\n\n axes.set_title(title, pad=10.0)\n axes.set_xlabel('Time (s)', labelpad=10.0)\n axes.set_ylabel('Accuracy (%)', labelpad=10.0)\n axes.spines['top'].set_visible(False)\n axes.spines['right'].set_visible(False)\n axes.spines['left'].set_bounds(lw_b, up_b)\n axes.spines['bottom'].set_bounds(-0.5, 2.5)\n axes.set_ylim(lw_b - 0.015, up_b + 0.025)\n\n axes.axhline(y=0.5, xmin=-.5, xmax=2.5,\n color='black', linestyle='dashed', linewidth=.8)\n axes.axvline(x=0.0, ymin=0, ymax=1.0,\n color='black', linestyle='dashed', linewidth=.8)\n\n fig.savefig(fname.figures + '/%s_gat_ridge.pdf' % name, dpi=300)\n" ]
[ [ "numpy.diag", "numpy.nanmedian", "numpy.abs", "numpy.asarray", "numpy.arange", "numpy.squeeze", "numpy.quantile", "numpy.argwhere", "numpy.ones", "scipy.stats.median_abs_deviation", "numpy.std", "numpy.logical_or", "numpy.mean", "numpy.transpose" ], [ "pandas.DataFrame", "matplotlib.pyplot.axes", "numpy.ma.masked_where", "numpy.hstack", "numpy.unique", "numpy.reshape", "sklearn.model_selection.StratifiedKFold", "matplotlib.pyplot.figure", "numpy.isnan", "scipy.stats.wilcoxon", "numpy.cov", "sklearn.svm.SVC", "numpy.array", "sklearn.linear_model.RidgeClassifier", "sklearn.linear_model.LogisticRegression", "numpy.abs", "numpy.ones", "matplotlib.pyplot.colorbar", "numpy.prod", "sklearn.preprocessing.StandardScaler" ], [ "numpy.load", "numpy.arange", "matplotlib.pyplot.subplots", "matplotlib.cm.get_cmap" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.9", "1.5", "1.7", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dnnspark/bulletwrapper
[ "3d034ee27ccf1613e84bd0beaae345eb4cbb9f91" ]
[ "bulletwrapper/hooks/box_packing/pack_box.py" ]
[ "import numpy as np\nfrom bulletwrapper.util.object_loader import OBJFile\n\nclass Block():\n '''\n This class represent a 2D rectangle associated with a top-down view of a cuboid.\n Each cuboid gives rise to 6 sides * 4 orientation = 24 axis-aligned blocks.\n The rectangle is represented by its width and height; this data is used to \n compute the layout of blocks.\n The block also has fields of rotation matrix and z-offset, which are used to\n convert the 2D position of block to a full 3D pose of cuboid.\n '''\n\n def __init__(self, width, height, R, zoffset):\n self.dim = (width, height)\n self.R = R\n self.zoffset = zoffset\n\n\ndef cuboid_to_blocks(vertices, inplane_rot_angles=[0]):\n '''\n CRUCIAL assumption:\n vertices collectively represent an axis-aligned 3D cuboid centered at origin.\n\n Input\n =====\n vertices: np.ndarray\n (N,3) float tensor, each row is a vertex.\n\n Return\n ======\n blocks: [Block]\n A list of N*24 blocks, where N = len(inplane_rot_angles)\n\n # blocks: np.ndarray\n # (N*24,2) float tensor, where N = len(inplane_rot_angles)\n\n # rot_mat: np.ndarray\n # (N*24,3,3) float tensor that contains N 3x3 rotation matrices \n # associated with blocks.\n '''\n\n axes = np.array([\n [ 1, 0, 0],\n [-1, 0, 0],\n [ 0, 1, 0],\n [ 0,-1, 0],\n [ 0, 0, 1],\n [ 0, 0,-1],\n ])\n\n # Rs, blocks, zoffsets = [], [], []\n blocks = []\n for xaxis in axes:\n for yaxis in axes:\n if np.dot(xaxis, yaxis) != 0:\n continue\n zaxis = np.cross(xaxis, yaxis)\n\n R0 = np.array([xaxis, yaxis, zaxis]).T # object-to-world rotation\n\n for theta in inplane_rot_angles:\n _theta = np.pi * theta/180.\n c,s = np.cos(_theta), np.sin(_theta)\n R_inplane = np.array([\n [c, -s, 0],\n [s, c, 0],\n [0, 0, 1],\n ])\n R = np.dot(R_inplane, R0)\n # R = np.dot(R0, R_inplane)\n\n transformed = np.dot( R, vertices.T ).T\n x1y1z1 = np.min(transformed, axis=0)\n x2y2z2 = np.max(transformed, axis=0)\n # assert np.allclose(x1y1z1 + x2y2z2 , 0) # for perfect cuboid\n\n diagonal = x2y2z2 - x1y1z1\n W,H = diagonal[:2]\n\n zoffset = -x1y1z1[-1]\n\n blocks.append( Block(W, H, R, zoffset) )\n\n return blocks\n\ndef pack_bin(blocks, box, slack=0.):\n '''\n 2D bin-packing divide-and-conquer algorithm.\n\n Input\n =====\n blocks: [Blocks]\n (N,2) float tensor representing dimensions of 2D blocks. \n The first column are widths, the second heights.\n\n box: [float, float]\n width, height\n\n slack: (float, float)\n range of slack\n\n Return\n ======\n block_layout: [ (int, (float, float)) ]\n a list of (index, (start_x, start_y))\n\n '''\n\n blocks_dim = np.array([block.dim for block in blocks])\n blocks_dim_w_slack = blocks_dim + slack\n\n # randomly choose a block that fits.\n fit = blocks_dim_w_slack <= np.expand_dims(box, 0)\n fit = np.where(np.logical_and(fit[:,0], fit[:,1]))[0]\n\n if len(fit) == 0:\n # no blocks fit.\n return []\n\n pick = np.random.choice(fit)\n block_dim_w_slack = blocks_dim_w_slack[pick]\n\n W,H = box\n w,h = block_dim_w_slack\n\n # randomly choose one of the two ways to split the remaining area into two rectangular areas.\n split = np.random.choice(2)\n if split == 0:\n # split horizontally.\n two_residual_boxes = [\n [W-w, h],\n [W, H-h], \n ]\n\n else:\n # split vertically.\n two_residual_boxes = [\n [W-w, H],\n [w, H-h],\n ]\n\n # randomly choose one of the four corners to place the block.\n corner = np.random.choice(4)\n if corner == 0:\n # upper-left corner\n layout = [ (pick, (0., 0.)) ]\n\n two_offsets = [\n [w, 0.],\n [0., h]\n ]\n\n elif corner == 1:\n # upper-right corner\n layout = [ (pick, (W-w, 0.)) ]\n\n if split == 0:\n two_offsets = [\n [0., 0.],\n [0., h],\n ]\n else:\n two_offsets = [\n [0., 0.],\n [W-w, h],\n ]\n elif corner == 2:\n # lower-left corner\n layout = [ (pick, (0., H-h)) ]\n\n if split == 0:\n two_offsets = [\n [w, H-h],\n [0., 0.],\n ]\n else:\n two_offsets = [\n [w, 0.],\n [0., 0.],\n ]\n\n else: #corner == 3:\n # lower-right corner\n layout = [ (pick, (W-w, H-h)) ]\n\n if split == 0:\n two_offsets = [\n [0., H-h],\n [0., 0.],\n ]\n else:\n two_offsets = [\n [0., 0.],\n [W-w, 0.],\n ]\n\n for residual_box, offsets in zip(two_residual_boxes, two_offsets):\n sub_layout = pack_bin(blocks, residual_box, slack)\n sub_layout = [(idx, (start_x+offsets[0], start_y+offsets[1])) for idx, (start_x, start_y) in sub_layout]\n layout += sub_layout\n\n return layout\n\ndef compute_poses(blocks, block_layout, box_dim, box_center):\n\n # positions = []\n poses = []\n box_dim, box_center = np.array(box_dim), np.array(box_center)\n for idx, (x1, y1) in block_layout:\n block = blocks[idx]\n\n R = block.R\n xy_offset = np.array([x1,y1]) + .5 * np.array(block.dim) - .5 * box_dim + box_center\n z_offset = block.zoffset\n\n position = list(xy_offset) + [z_offset]\n poses.append( (R, position))\n\n return poses\n\ndef make_packed_cuboids_scene(path_to_obj, mesh_scale, box_dim, box_center, inplane_rot_angles, slack, block_filter=None):\n vertices = np.array( OBJFile(path_to_obj, None).vertices ) * mesh_scale\n blocks = cuboid_to_blocks(vertices, inplane_rot_angles=inplane_rot_angles)\n if block_filter is not None:\n blocks = [block for block in blocks if block_filter(block)]\n block_layout = pack_bin(blocks, box_dim, slack=slack)\n\n poses = compute_poses(blocks, block_layout, box_dim, box_center)\n\n return poses\n" ]
[ [ "numpy.dot", "numpy.expand_dims", "numpy.logical_and", "numpy.random.choice", "numpy.min", "numpy.cos", "numpy.sin", "numpy.max", "numpy.cross", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ShegnkaiWu/IoU-aware-single-stage-object-detector-for-accurate-localization
[ "67b8955eb59137590dbadc6aac45529ae9459e4a", "67b8955eb59137590dbadc6aac45529ae9459e4a" ]
[ "mmdet/models/bbox_heads/convfc_bbox_head.py", "mmdet/models/anchor_heads/retina_head.py" ]
[ "import torch.nn as nn\n\nfrom .bbox_head import BBoxHead\nfrom ..registry import HEADS\nfrom ..utils import ConvModule\n\n\[email protected]_module\nclass ConvFCBBoxHead(BBoxHead):\n \"\"\"More general bbox head, with shared conv and fc layers and two optional\n separated branches.\n\n /-> cls convs -> cls fcs -> cls\n shared convs -> shared fcs\n \\-> reg convs -> reg fcs -> reg\n \"\"\" # noqa: W605\n\n def __init__(self,\n num_shared_convs=0,\n num_shared_fcs=0,\n num_cls_convs=0,\n num_cls_fcs=0,\n num_reg_convs=0,\n num_reg_fcs=0,\n conv_out_channels=256,\n fc_out_channels=1024,\n conv_cfg=None,\n norm_cfg=None,\n with_avg_pool_shared=False,\n with_avg_pool_cls=False,\n with_avg_pool_reg=False,\n *args,\n **kwargs):\n super(ConvFCBBoxHead, self).__init__(*args, **kwargs)\n assert (num_shared_convs + num_shared_fcs + num_cls_convs +\n num_cls_fcs + num_reg_convs + num_reg_fcs > 0)\n if num_cls_convs > 0 or num_reg_convs > 0:\n assert num_shared_fcs == 0\n if not self.with_cls:\n assert num_cls_convs == 0 and num_cls_fcs == 0\n if not self.with_reg:\n assert num_reg_convs == 0 and num_reg_fcs == 0\n self.num_shared_convs = num_shared_convs\n self.num_shared_fcs = num_shared_fcs\n self.num_cls_convs = num_cls_convs\n self.num_cls_fcs = num_cls_fcs\n self.num_reg_convs = num_reg_convs\n self.num_reg_fcs = num_reg_fcs\n self.conv_out_channels = conv_out_channels\n self.fc_out_channels = fc_out_channels\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n\n # added by WSK\n self.with_avg_pool_shared = with_avg_pool_shared\n self.with_avg_pool_cls = with_avg_pool_cls\n self.with_avg_pool_reg = with_avg_pool_reg\n\n # add shared convs and fcs\n self.shared_convs, self.shared_fcs, last_layer_dim = \\\n self._add_conv_fc_branch(\n self.num_shared_convs, self.num_shared_fcs, self.in_channels,\n True)\n self.shared_out_channels = last_layer_dim\n\n # add cls specific branch\n self.cls_convs, self.cls_fcs, self.cls_last_dim = \\\n self._add_conv_fc_branch(\n self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)\n\n # add reg specific branch\n self.reg_convs, self.reg_fcs, self.reg_last_dim = \\\n self._add_conv_fc_branch(\n self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)\n\n if self.num_shared_fcs == 0 and not self.with_avg_pool:\n if self.num_cls_fcs == 0:\n self.cls_last_dim *= (self.roi_feat_size * self.roi_feat_size)\n if self.num_shared_fcs == 0 and not self.with_avg_pool_reg:\n if self.num_reg_fcs == 0:\n self.reg_last_dim *= (self.roi_feat_size * self.roi_feat_size)\n\n self.relu = nn.ReLU(inplace=True)\n # reconstruct fc_cls and fc_reg since input channels are changed\n if self.with_cls:\n self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes)\n if self.with_reg:\n out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes\n self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)\n\n def _add_conv_fc_branch(self,\n num_branch_convs,\n num_branch_fcs,\n in_channels,\n is_shared=False):\n \"\"\"Add shared or separable branch\n\n convs -> avg pool (optional) -> fcs\n \"\"\"\n last_layer_dim = in_channels\n # add branch specific conv layers\n branch_convs = nn.ModuleList()\n if num_branch_convs > 0:\n for i in range(num_branch_convs):\n conv_in_channels = (\n last_layer_dim if i == 0 else self.conv_out_channels)\n branch_convs.append(\n ConvModule(\n conv_in_channels,\n self.conv_out_channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n last_layer_dim = self.conv_out_channels\n # add branch specific fc layers\n branch_fcs = nn.ModuleList()\n if num_branch_fcs > 0:\n # for shared branch, only consider self.with_avg_pool\n # for separated branches, also consider self.num_shared_fcs\n if (is_shared\n or self.num_shared_fcs == 0) and not self.with_avg_pool:\n last_layer_dim *= (self.roi_feat_size * self.roi_feat_size)\n for i in range(num_branch_fcs):\n fc_in_channels = (\n last_layer_dim if i == 0 else self.fc_out_channels)\n branch_fcs.append(\n nn.Linear(fc_in_channels, self.fc_out_channels))\n last_layer_dim = self.fc_out_channels\n return branch_convs, branch_fcs, last_layer_dim\n\n def init_weights(self):\n super(ConvFCBBoxHead, self).init_weights()\n for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:\n for m in module_list.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n # shared part\n if self.num_shared_convs > 0:\n for conv in self.shared_convs:\n x = conv(x)\n\n if self.num_shared_fcs > 0:\n if self.with_avg_pool:\n x = self.avg_pool(x)\n x = x.view(x.size(0), -1)\n for fc in self.shared_fcs:\n x = self.relu(fc(x))\n # separate branches\n x_cls = x\n x_reg = x\n\n for conv in self.cls_convs:\n x_cls = conv(x_cls)\n if x_cls.dim() > 2:\n if self.with_avg_pool:\n x_cls = self.avg_pool(x_cls)\n x_cls = x_cls.view(x_cls.size(0), -1)\n for fc in self.cls_fcs:\n x_cls = self.relu(fc(x_cls))\n\n for conv in self.reg_convs:\n x_reg = conv(x_reg)\n if x_reg.dim() > 2:\n if self.with_avg_pool_reg:\n x_reg = self.avg_pool(x_reg)\n x_reg = x_reg.view(x_reg.size(0), -1)\n for fc in self.reg_fcs:\n x_reg = self.relu(fc(x_reg))\n\n cls_score = self.fc_cls(x_cls) if self.with_cls else None\n bbox_pred = self.fc_reg(x_reg) if self.with_reg else None\n return cls_score, bbox_pred\n\n\[email protected]_module\nclass SharedFCBBoxHead(ConvFCBBoxHead):\n\n def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs):\n assert num_fcs >= 1\n super(SharedFCBBoxHead, self).__init__(\n num_shared_convs=0,\n num_shared_fcs=num_fcs,\n num_cls_convs=0,\n num_cls_fcs=0,\n num_reg_convs=0,\n num_reg_fcs=0,\n fc_out_channels=fc_out_channels,\n *args,\n **kwargs)\n\[email protected]_module\nclass SeperableBranchBBoxHead(ConvFCBBoxHead):\n def __init__(self, num_cls_fc=2 , num_reg_convs=2, conv_out_channels=256, fc_out_channels=1024, with_avg_pool_reg=True, *args, **kwargs):\n assert num_cls_fc >= 1 and num_reg_convs >=1\n super(SeperableBranchBBoxHead, self).__init__(\n num_shared_convs=0,\n num_shared_fcs=0,\n num_cls_convs=0,\n num_cls_fcs=num_cls_fc,\n num_reg_convs=num_reg_convs,\n num_reg_fcs=0,\n conv_out_channels=conv_out_channels,\n fc_out_channels=fc_out_channels,\n with_avg_pool_reg=with_avg_pool_reg\n *args,\n **kwargs)\n", "import numpy as np\nimport torch.nn as nn\nfrom mmcv.cnn import normal_init\n\nfrom .anchor_head import AnchorHead\nfrom ..registry import HEADS\nfrom ..utils import bias_init_with_prob, ConvModule\n\n\[email protected]_module\nclass RetinaHead(AnchorHead):\n\n def __init__(self,\n num_classes,\n in_channels,\n stacked_convs=4,\n octave_base_scale=4,\n scales_per_octave=3,\n conv_cfg=None,\n norm_cfg=None,\n **kwargs):\n self.stacked_convs = stacked_convs\n self.octave_base_scale = octave_base_scale\n self.scales_per_octave = scales_per_octave\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n octave_scales = np.array(\n [2**(i / scales_per_octave) for i in range(scales_per_octave)])\n anchor_scales = octave_scales * octave_base_scale\n super(RetinaHead, self).__init__(\n num_classes, in_channels, anchor_scales=anchor_scales, **kwargs)\n\n\n\n def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.retina_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n 3,\n padding=1)\n\n self.retina_reg = nn.Conv2d(self.feat_channels, self.num_anchors*4, 3, padding=1)\n\n def init_weights(self):\n for m in self.cls_convs:\n normal_init(m.conv, std=0.01)\n for m in self.reg_convs:\n normal_init(m.conv, std=0.01)\n bias_cls = bias_init_with_prob(0.01)\n normal_init(self.retina_cls, std=0.01, bias=bias_cls)\n normal_init(self.retina_reg, std=0.01)\n\n def forward_single(self, x):\n cls_feat = x\n reg_feat = x\n for cls_conv in self.cls_convs:\n cls_feat = cls_conv(cls_feat)\n for reg_conv in self.reg_convs:\n reg_feat = reg_conv(reg_feat)\n\n # detach the cls_feat and reg_feat\n # cls_feat = cls_feat.detach()\n # reg_feat = reg_feat.detach()\n\n cls_score = self.retina_cls(cls_feat)\n bbox_pred = self.retina_reg(reg_feat)\n return cls_score, bbox_pred\n" ]
[ [ "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Linear", "torch.nn.init.xavier_uniform_", "torch.nn.ReLU" ], [ "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.ModuleList" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chrfla/sec-filings-nlp
[ "dd296b4e7734e8bc682ba115b6f5f68a4e55c992" ]
[ "09_LDA_all_years.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n####### Import packages #########\n\nfrom pprint import pprint\nimport pickle\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\nimport gensim\nimport gensim.corpora as corpora\nfrom gensim.utils import simple_preprocess\nfrom gensim.models import LdaMulticore\nimport spacy\nimport pyLDAvis\nimport pyLDAvis.gensim\nimport logging\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)\nimport random\nimport warnings\nwarnings.filterwarnings(\"ignore\",category=DeprecationWarning)\nimport nltk;\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nstop_words = stopwords.words('english')\nstop_words.extend(['may', 'could', 'business', 'result', 'affect', 'include'])\nnlp = spacy.load('en', disable=['parser', 'ner'])\nfrom _settings import main_dir, lda_data_dir\n\n\n\n\n####### Define functions #########\n\n\ndef sent_to_words(sentences):\n for sentence in sentences:\n yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations\n\n# Define functions for stopwords, bigrams, trigrams and lemmatization\ndef remove_stopwords(texts):\n return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\n\ndef lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\n texts_out = []\n for sent in texts:\n doc = nlp(\" \".join(sent)) \n texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\n return texts_out\n\n\ndef Convert(tup, di): \n for a, b in tup:\n di[a] = float(b)\n return di \n\n\n\ndef train_LDA_model (data, num_topics, CPUs):\n\n # Pre-processing\n sentences = [nltk.tokenize.sent_tokenize(doc) for doc in data]\n sentences = [val for sublist in sentences for val in sublist]\n data_words = list(sent_to_words(sentences))\n\n # Remove Stop Words\n data_words_nostops = remove_stopwords(data_words)\n \n # Initialize spacy 'en' model, keeping only tagger component (for efficiency)\n # python3 -m spacy download en\n nlp = spacy.load('en', disable=['parser', 'ner'])\n \n # Do lemmatization keeping only noun, adj, vb, adv\n data_lemmatized = lemmatization(data_words_nostops, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])\n \n # Create Dictionary\n id2word = corpora.Dictionary(data_lemmatized)\n \n # Create Corpus\n texts = data_lemmatized\n \n # Term Document Frequency\n corpus = [id2word.doc2bow(text) for text in texts]\n\n\n # ## Train LDA Model\n \n # Build LDA model\n lda_model = LdaMulticore(corpus=corpus,\n id2word=id2word,\n num_topics = num_topics, \n random_state=50,\n chunksize=100,\n passes=10,\n per_word_topics=True,\n workers = CPUs)\n \n model_dest = lda_data_dir + 'LDA_model/all_years_2007_2017/lda_model_all_years.model'\n lda_model.save(model_dest)\n\n \n # Print the Keyword in the 10 topics\n pprint(lda_model.print_topics())\n doc_lda = lda_model[corpus]\n\n # Visualize the topics\n vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word)\n storage_dest_lda_html = lda_data_dir + 'LDA_model/all_years_2007_2017/all_years_2007_2017_local_lda.html'\n pyLDAvis.save_html(vis, storage_dest_lda_html)\n\n \n wordcloud_dest = lda_data_dir + 'LDA_model/all_years_2007_2017/wordclouds/'\n \n for t in range(lda_model.num_topics):\n plt.figure()\n dictionary = {} \n plt.imshow(WordCloud().fit_words(Convert(lda_model.show_topic(t, 30), dictionary)))\n plt.axis(\"off\")\n plt.title(\"Topic_\" + str(t))\n plt.show()\n plt.savefig(wordcloud_dest + \"Topic #\" + str(t)+'.png') # set location on server\n\n\n return lda_model\n\n\n\n####### Execute code below #########\n\ndata = pickle.load(open(main_dir + 'data/clean_text.list', \"rb\" ))\nlda_model = train_LDA_model (data, num_topics = 30, CPUs = 6)\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
minhnhatphan/CenterTrack
[ "f459601c8843018ba791fa78931ad291e108092a" ]
[ "src/tools/convert_mot_det_to_results.py" ]
[ "import json\nimport numpy as np\nimport os\nfrom collections import defaultdict\nsplit = 'train'\n\nDET_PATH = '../../data/mot17/train'\nANN_PATH = '../../data/mot17/annotations/{}.json'.format(split)\nOUT_DIR = '../../data/mot17/results/'\nOUT_PATH = OUT_DIR + '{}_det.json'.format(split)\n\nif __name__ == '__main__':\n if not os.path.exists(OUT_DIR):\n os.mkdir(OUT_DIR)\n seqs = [s for s in os.listdir(DET_PATH)]\n data = json.load(open(ANN_PATH, 'r'))\n images = data['images']\n image_to_anns = defaultdict(list)\n for seq in sorted(seqs):\n print('seq', seq)\n seq_path = '{}/{}/'.format(DET_PATH, seq)\n if split == 'val_half':\n ann_path = seq_path + 'det/det_val_half.txt'\n train_ann_path = seq_path + 'det/det_train_half.txt'\n train_anns = np.loadtxt(train_ann_path, dtype=np.float32, delimiter=',')\n frame_base = int(train_anns[:, 0].max())\n else:\n ann_path = seq_path + 'det/det.txt'\n frame_base = 0\n # if not IS_THIRD_PARTY:\n anns = np.loadtxt(ann_path, dtype=np.float32, delimiter=',')\n for i in range(len(anns)):\n frame_id = int(anns[i][0])\n file_name = '{}/img1/{:06d}.jpg'.format(seq, frame_id + frame_base)\n bbox = (anns[i][2:6]).tolist()\n score = 1 # float(anns[i][8])\n image_to_anns[file_name].append(bbox + [score])\n\n results = {}\n for image_info in images:\n image_id = image_info['id']\n file_name = image_info['file_name']\n dets = image_to_anns[file_name]\n results[image_id] = []\n for det in dets:\n bbox = [float(det[0]), float(det[1]), \\\n float(det[0] + det[2]), float(det[1] + det[3])]\n ct = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]\n results[image_id].append(\n {'bbox': bbox, 'score': float(det[4]), 'class': 1, 'ct': ct})\n out_path = OUT_PATH\n json.dump(results, open(out_path, 'w'))\n" ]
[ [ "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FeiYuejiao/NLP_Pretrain
[ "7aa4693c31a7bba9b90f401d2586ef154dd7fb81" ]
[ "model/densenet.py" ]
[ "'''DenseNet in PyTorch.'''\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Bottleneck(nn.Module):\n def __init__(self, in_planes, growth_rate):\n super(Bottleneck, self).__init__()\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)\n self.bn2 = nn.BatchNorm2d(4*growth_rate)\n self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)\n\n def forward(self, x):\n out = self.conv1(F.relu(self.bn1(x)))\n out = self.conv2(F.relu(self.bn2(out)))\n out = torch.cat([out,x], 1)\n return out\n\n\nclass Transition(nn.Module):\n def __init__(self, in_planes, out_planes):\n super(Transition, self).__init__()\n self.bn = nn.BatchNorm2d(in_planes)\n self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)\n\n def forward(self, x):\n out = self.conv(F.relu(self.bn(x)))\n out = F.avg_pool2d(out, 2)\n return out\n\n\nclass DenseNet(nn.Module):\n def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):\n super(DenseNet, self).__init__()\n self.growth_rate = growth_rate\n\n num_planes = 2*growth_rate\n self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)\n\n self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])\n num_planes += nblocks[0]*growth_rate\n out_planes = int(math.floor(num_planes*reduction))\n self.trans1 = Transition(num_planes, out_planes)\n num_planes = out_planes\n\n self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])\n num_planes += nblocks[1]*growth_rate\n out_planes = int(math.floor(num_planes*reduction))\n self.trans2 = Transition(num_planes, out_planes)\n num_planes = out_planes\n\n self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])\n num_planes += nblocks[2]*growth_rate\n out_planes = int(math.floor(num_planes*reduction))\n self.trans3 = Transition(num_planes, out_planes)\n num_planes = out_planes\n\n self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])\n num_planes += nblocks[3]*growth_rate\n\n self.bn = nn.BatchNorm2d(num_planes)\n self.linear = nn.Linear(num_planes, num_classes)\n\n def _make_dense_layers(self, block, in_planes, nblock):\n layers = []\n for i in range(nblock):\n layers.append(block(in_planes, self.growth_rate))\n in_planes += self.growth_rate\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.trans1(self.dense1(out))\n out = self.trans2(self.dense2(out))\n out = self.trans3(self.dense3(out))\n out = self.dense4(out)\n out = F.relu(self.bn(out))\n #out = F.avg_pool2d(F.relu(self.bn(out)), 4)\n #out = out.view(out.size(0), -1)\n #out = self.linear(out)\n return out\n\ndef DenseNet121():\n return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32)\n\ndef DenseNet169():\n return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32)\n\ndef DenseNet201():\n return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32)\n\ndef DenseNet161():\n return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48)\n\ndef densenet_cifar():\n return DenseNet(Bottleneck, [6,12,24,16], growth_rate=12)\n\ndef test():\n net = densenet_cifar()\n x = torch.randn(1,3,32,32)\n y = net(x)\n print(y)\n\n# test()\n" ]
[ [ "torch.nn.Sequential", "torch.cat", "torch.randn", "torch.nn.functional.avg_pool2d", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.BatchNorm2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SoftwareDev/mat-plot-lib
[ "abaf94859d5ef6e653a4d8a7ce2c59cea1724a57", "255a79b106c98c1904489afe6a754e4d943179d6", "a429c415bdb6e54ccfe004a48fdc034ea8e9d329", "a429c415bdb6e54ccfe004a48fdc034ea8e9d329", "255a79b106c98c1904489afe6a754e4d943179d6", "a429c415bdb6e54ccfe004a48fdc034ea8e9d329" ]
[ "lib/matplotlib/colors.py", "examples/units/bar_unit_demo.py", "lib/matplotlib/patches.py", "lib/matplotlib/bezier.py", "examples/api/demo_affine_image.py", "lib/matplotlib/lines.py" ]
[ "\"\"\"\nA module for converting numbers or color arguments to *RGB* or *RGBA*\n\n*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the\nrange 0-1.\n\nThis module includes functions and classes for color specification\nconversions, and for mapping numbers to colors in a 1-D array of colors called\na colormap. Colormapping typically involves two steps: a data array is first\nmapped onto the range 0-1 using an instance of :class:`Normalize` or of a\nsubclass; then this number in the 0-1 range is mapped to a color using an\ninstance of a subclass of :class:`Colormap`. Two are provided here:\n:class:`LinearSegmentedColormap`, which is used to generate all the built-in\ncolormap instances, but is also useful for making custom colormaps, and\n:class:`ListedColormap`, which is used for generating a custom colormap from a\nlist of color specifications.\n\nThe module also provides a single instance, *colorConverter*, of the\n:class:`ColorConverter` class providing methods for converting single color\nspecifications or sequences of them to *RGB* or *RGBA*.\n\nCommands which take color arguments can use several formats to specify\nthe colors. For the basic built-in colors, you can use a single letter\n\n - b: blue\n - g: green\n - r: red\n - c: cyan\n - m: magenta\n - y: yellow\n - k: black\n - w: white\n\nGray shades can be given as a string encoding a float in the 0-1 range, e.g.::\n\n color = '0.75'\n\nFor a greater range of colors, you have two options. You can specify the\ncolor using an html hex string, as in::\n\n color = '#eeefff'\n\nor you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B* are in\nthe range [0,1].\n\nFinally, legal html names for colors, like 'red', 'burlywood' and 'chartreuse'\nare supported.\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\nfrom six.moves import map, zip\n\nimport re\nimport numpy as np\nfrom numpy import ma\nimport matplotlib.cbook as cbook\n\nparts = np.__version__.split('.')\nNP_MAJOR, NP_MINOR = list(map(int, parts[:2]))\n# true if clip supports the out kwarg\nNP_CLIP_OUT = NP_MAJOR >= 1 and NP_MINOR >= 2\n\ncnames = {\n 'aliceblue': '#F0F8FF',\n 'antiquewhite': '#FAEBD7',\n 'aqua': '#00FFFF',\n 'aquamarine': '#7FFFD4',\n 'azure': '#F0FFFF',\n 'beige': '#F5F5DC',\n 'bisque': '#FFE4C4',\n 'black': '#000000',\n 'blanchedalmond': '#FFEBCD',\n 'blue': '#0000FF',\n 'blueviolet': '#8A2BE2',\n 'brown': '#A52A2A',\n 'burlywood': '#DEB887',\n 'cadetblue': '#5F9EA0',\n 'chartreuse': '#7FFF00',\n 'chocolate': '#D2691E',\n 'coral': '#FF7F50',\n 'cornflowerblue': '#6495ED',\n 'cornsilk': '#FFF8DC',\n 'crimson': '#DC143C',\n 'cyan': '#00FFFF',\n 'darkblue': '#00008B',\n 'darkcyan': '#008B8B',\n 'darkgoldenrod': '#B8860B',\n 'darkgray': '#A9A9A9',\n 'darkgreen': '#006400',\n 'darkkhaki': '#BDB76B',\n 'darkmagenta': '#8B008B',\n 'darkolivegreen': '#556B2F',\n 'darkorange': '#FF8C00',\n 'darkorchid': '#9932CC',\n 'darkred': '#8B0000',\n 'darksage': '#598556',\n 'darksalmon': '#E9967A',\n 'darkseagreen': '#8FBC8F',\n 'darkslateblue': '#483D8B',\n 'darkslategray': '#2F4F4F',\n 'darkturquoise': '#00CED1',\n 'darkviolet': '#9400D3',\n 'deeppink': '#FF1493',\n 'deepskyblue': '#00BFFF',\n 'dimgray': '#696969',\n 'dodgerblue': '#1E90FF',\n 'firebrick': '#B22222',\n 'floralwhite': '#FFFAF0',\n 'forestgreen': '#228B22',\n 'fuchsia': '#FF00FF',\n 'gainsboro': '#DCDCDC',\n 'ghostwhite': '#F8F8FF',\n 'gold': '#FFD700',\n 'goldenrod': '#DAA520',\n 'gray': '#808080',\n 'green': '#008000',\n 'greenyellow': '#ADFF2F',\n 'honeydew': '#F0FFF0',\n 'hotpink': '#FF69B4',\n 'indianred': '#CD5C5C',\n 'indigo': '#4B0082',\n 'ivory': '#FFFFF0',\n 'khaki': '#F0E68C',\n 'lavender': '#E6E6FA',\n 'lavenderblush': '#FFF0F5',\n 'lawngreen': '#7CFC00',\n 'lemonchiffon': '#FFFACD',\n 'lightblue': '#ADD8E6',\n 'lightcoral': '#F08080',\n 'lightcyan': '#E0FFFF',\n 'lightgoldenrodyellow': '#FAFAD2',\n 'lightgreen': '#90EE90',\n 'lightgray': '#D3D3D3',\n 'lightpink': '#FFB6C1',\n 'lightsage': '#BCECAC',\n 'lightsalmon': '#FFA07A',\n 'lightseagreen': '#20B2AA',\n 'lightskyblue': '#87CEFA',\n 'lightslategray': '#778899',\n 'lightsteelblue': '#B0C4DE',\n 'lightyellow': '#FFFFE0',\n 'lime': '#00FF00',\n 'limegreen': '#32CD32',\n 'linen': '#FAF0E6',\n 'magenta': '#FF00FF',\n 'maroon': '#800000',\n 'mediumaquamarine': '#66CDAA',\n 'mediumblue': '#0000CD',\n 'mediumorchid': '#BA55D3',\n 'mediumpurple': '#9370DB',\n 'mediumseagreen': '#3CB371',\n 'mediumslateblue': '#7B68EE',\n 'mediumspringgreen': '#00FA9A',\n 'mediumturquoise': '#48D1CC',\n 'mediumvioletred': '#C71585',\n 'midnightblue': '#191970',\n 'mintcream': '#F5FFFA',\n 'mistyrose': '#FFE4E1',\n 'moccasin': '#FFE4B5',\n 'navajowhite': '#FFDEAD',\n 'navy': '#000080',\n 'oldlace': '#FDF5E6',\n 'olive': '#808000',\n 'olivedrab': '#6B8E23',\n 'orange': '#FFA500',\n 'orangered': '#FF4500',\n 'orchid': '#DA70D6',\n 'palegoldenrod': '#EEE8AA',\n 'palegreen': '#98FB98',\n 'paleturquoise': '#AFEEEE',\n 'palevioletred': '#DB7093',\n 'papayawhip': '#FFEFD5',\n 'peachpuff': '#FFDAB9',\n 'peru': '#CD853F',\n 'pink': '#FFC0CB',\n 'plum': '#DDA0DD',\n 'powderblue': '#B0E0E6',\n 'purple': '#800080',\n 'red': '#FF0000',\n 'rosybrown': '#BC8F8F',\n 'royalblue': '#4169E1',\n 'saddlebrown': '#8B4513',\n 'salmon': '#FA8072',\n 'sage': '#87AE73',\n 'sandybrown': '#FAA460',\n 'seagreen': '#2E8B57',\n 'seashell': '#FFF5EE',\n 'sienna': '#A0522D',\n 'silver': '#C0C0C0',\n 'skyblue': '#87CEEB',\n 'slateblue': '#6A5ACD',\n 'slategray': '#708090',\n 'snow': '#FFFAFA',\n 'springgreen': '#00FF7F',\n 'steelblue': '#4682B4',\n 'tan': '#D2B48C',\n 'teal': '#008080',\n 'thistle': '#D8BFD8',\n 'tomato': '#FF6347',\n 'turquoise': '#40E0D0',\n 'violet': '#EE82EE',\n 'wheat': '#F5DEB3',\n 'white': '#FFFFFF',\n 'whitesmoke': '#F5F5F5',\n 'yellow': '#FFFF00',\n 'yellowgreen': '#9ACD32'}\n\n\n# add british equivs\nfor k, v in list(six.iteritems(cnames)):\n if k.find('gray') >= 0:\n k = k.replace('gray', 'grey')\n cnames[k] = v\n\n\ndef is_color_like(c):\n 'Return *True* if *c* can be converted to *RGB*'\n try:\n colorConverter.to_rgb(c)\n return True\n except ValueError:\n return False\n\n\ndef rgb2hex(rgb):\n 'Given an rgb or rgba sequence of 0-1 floats, return the hex string'\n return '#%02x%02x%02x' % tuple([np.round(val * 255) for val in rgb[:3]])\n\nhexColorPattern = re.compile(\"\\A#[a-fA-F0-9]{6}\\Z\")\n\n\ndef hex2color(s):\n \"\"\"\n Take a hex string *s* and return the corresponding rgb 3-tuple\n Example: #efefef -> (0.93725, 0.93725, 0.93725)\n \"\"\"\n if not isinstance(s, six.string_types):\n raise TypeError('hex2color requires a string argument')\n if hexColorPattern.match(s) is None:\n raise ValueError('invalid hex color string \"%s\"' % s)\n return tuple([int(n, 16) / 255.0 for n in (s[1:3], s[3:5], s[5:7])])\n\n\nclass ColorConverter(object):\n \"\"\"\n Provides methods for converting color specifications to *RGB* or *RGBA*\n\n Caching is used for more efficient conversion upon repeated calls\n with the same argument.\n\n Ordinarily only the single instance instantiated in this module,\n *colorConverter*, is needed.\n \"\"\"\n colors = {\n 'b': (0.0, 0.0, 1.0),\n 'g': (0.0, 0.5, 0.0),\n 'r': (1.0, 0.0, 0.0),\n 'c': (0.0, 0.75, 0.75),\n 'm': (0.75, 0, 0.75),\n 'y': (0.75, 0.75, 0),\n 'k': (0.0, 0.0, 0.0),\n 'w': (1.0, 1.0, 1.0), }\n\n cache = {}\n\n def to_rgb(self, arg):\n \"\"\"\n Returns an *RGB* tuple of three floats from 0-1.\n\n *arg* can be an *RGB* or *RGBA* sequence or a string in any of\n several forms:\n\n 1) a letter from the set 'rgbcmykw'\n 2) a hex color string, like '#00FFFF'\n 3) a standard name, like 'aqua'\n 4) a string representation of a float, like '0.4',\n indicating gray on a 0-1 scale\n\n if *arg* is *RGBA*, the *A* will simply be discarded.\n \"\"\"\n # Gray must be a string to distinguish 3-4 grays from RGB or RGBA.\n\n try:\n return self.cache[arg]\n except KeyError:\n pass\n except TypeError: # could be unhashable rgb seq\n arg = tuple(arg)\n try:\n return self.cache[arg]\n except KeyError:\n pass\n except TypeError:\n raise ValueError(\n 'to_rgb: arg \"%s\" is unhashable even inside a tuple'\n % (str(arg),))\n\n try:\n if cbook.is_string_like(arg):\n argl = arg.lower()\n color = self.colors.get(argl, None)\n if color is None:\n str1 = cnames.get(argl, argl)\n if str1.startswith('#'):\n color = hex2color(str1)\n else:\n fl = float(argl)\n if fl < 0 or fl > 1:\n raise ValueError(\n 'gray (string) must be in range 0-1')\n color = (fl,)*3\n elif cbook.iterable(arg):\n if len(arg) > 4 or len(arg) < 3:\n raise ValueError(\n 'sequence length is %d; must be 3 or 4' % len(arg))\n color = tuple(arg[:3])\n if [x for x in color if (float(x) < 0) or (x > 1)]:\n # This will raise TypeError if x is not a number.\n raise ValueError(\n 'number in rbg sequence outside 0-1 range')\n else:\n raise ValueError(\n 'cannot convert argument to rgb sequence')\n\n self.cache[arg] = color\n\n except (KeyError, ValueError, TypeError) as exc:\n raise ValueError(\n 'to_rgb: Invalid rgb arg \"%s\"\\n%s' % (str(arg), exc))\n # Error messages could be improved by handling TypeError\n # separately; but this should be rare and not too hard\n # for the user to figure out as-is.\n return color\n\n def to_rgba(self, arg, alpha=None):\n \"\"\"\n Returns an *RGBA* tuple of four floats from 0-1.\n\n For acceptable values of *arg*, see :meth:`to_rgb`.\n In addition, if *arg* is \"none\" (case-insensitive),\n then (0,0,0,0) will be returned.\n If *arg* is an *RGBA* sequence and *alpha* is not *None*,\n *alpha* will replace the original *A*.\n \"\"\"\n try:\n if arg.lower() == 'none':\n return (0.0, 0.0, 0.0, 0.0)\n except AttributeError:\n pass\n\n try:\n if not cbook.is_string_like(arg) and cbook.iterable(arg):\n if len(arg) == 4:\n if [x for x in arg if (float(x) < 0) or (x > 1)]:\n # This will raise TypeError if x is not a number.\n raise ValueError(\n 'number in rbga sequence outside 0-1 range')\n if alpha is None:\n return tuple(arg)\n if alpha < 0.0 or alpha > 1.0:\n raise ValueError(\"alpha must be in range 0-1\")\n return arg[0], arg[1], arg[2], alpha\n r, g, b = arg[:3]\n if [x for x in (r, g, b) if (float(x) < 0) or (x > 1)]:\n raise ValueError(\n 'number in rbg sequence outside 0-1 range')\n else:\n r, g, b = self.to_rgb(arg)\n if alpha is None:\n alpha = 1.0\n return r, g, b, alpha\n except (TypeError, ValueError) as exc:\n raise ValueError(\n 'to_rgba: Invalid rgba arg \"%s\"\\n%s' % (str(arg), exc))\n\n def to_rgba_array(self, c, alpha=None):\n \"\"\"\n Returns a numpy array of *RGBA* tuples.\n\n Accepts a single mpl color spec or a sequence of specs.\n\n Special case to handle \"no color\": if *c* is \"none\" (case-insensitive),\n then an empty array will be returned. Same for an empty list.\n \"\"\"\n try:\n nc = len(c)\n except TypeError:\n raise ValueError(\n \"Cannot convert argument type %s to rgba array\" % type(c))\n try:\n if nc == 0 or c.lower() == 'none':\n return np.zeros((0, 4), dtype=np.float)\n except AttributeError:\n pass\n try:\n # Single value? Put it in an array with a single row.\n return np.array([self.to_rgba(c, alpha)], dtype=np.float)\n except ValueError:\n if isinstance(c, np.ndarray):\n if c.ndim != 2 and c.dtype.kind not in 'SU':\n raise ValueError(\"Color array must be two-dimensional\")\n if (c.ndim == 2 and c.shape[1] == 4 and c.dtype.kind == 'f'):\n if (c.ravel() > 1).any() or (c.ravel() < 0).any():\n raise ValueError(\n \"number in rgba sequence is outside 0-1 range\")\n result = np.asarray(c, np.float)\n if alpha is not None:\n if alpha > 1 or alpha < 0:\n raise ValueError(\"alpha must be in 0-1 range\")\n result[:, 3] = alpha\n return result\n # This alpha operation above is new, and depends\n # on higher levels to refrain from setting alpha\n # to values other than None unless there is\n # intent to override any existing alpha values.\n\n # It must be some other sequence of color specs.\n result = np.zeros((nc, 4), dtype=np.float)\n for i, cc in enumerate(c):\n result[i] = self.to_rgba(cc, alpha)\n return result\n\n\ncolorConverter = ColorConverter()\n\n\ndef makeMappingArray(N, data, gamma=1.0):\n \"\"\"Create an *N* -element 1-d lookup table\n\n *data* represented by a list of x,y0,y1 mapping correspondences.\n Each element in this list represents how a value between 0 and 1\n (inclusive) represented by x is mapped to a corresponding value\n between 0 and 1 (inclusive). The two values of y are to allow\n for discontinuous mapping functions (say as might be found in a\n sawtooth) where y0 represents the value of y for values of x\n <= to that given, and y1 is the value to be used for x > than\n that given). The list must start with x=0, end with x=1, and\n all values of x must be in increasing order. Values between\n the given mapping points are determined by simple linear interpolation.\n\n Alternatively, data can be a function mapping values between 0 - 1\n to 0 - 1.\n\n The function returns an array \"result\" where ``result[x*(N-1)]``\n gives the closest value for values of x between 0 and 1.\n \"\"\"\n\n if six.callable(data):\n xind = np.linspace(0, 1, N) ** gamma\n lut = np.clip(np.array(data(xind), dtype=np.float), 0, 1)\n return lut\n\n try:\n adata = np.array(data)\n except:\n raise TypeError(\"data must be convertable to an array\")\n shape = adata.shape\n if len(shape) != 2 and shape[1] != 3:\n raise ValueError(\"data must be nx3 format\")\n\n x = adata[:, 0]\n y0 = adata[:, 1]\n y1 = adata[:, 2]\n\n if x[0] != 0. or x[-1] != 1.0:\n raise ValueError(\n \"data mapping points must start with x=0. and end with x=1\")\n if np.sometrue(np.sort(x) - x):\n raise ValueError(\n \"data mapping points must have x in increasing order\")\n # begin generation of lookup table\n x = x * (N - 1)\n lut = np.zeros((N,), np.float)\n xind = (N - 1) * np.linspace(0, 1, N) ** gamma\n ind = np.searchsorted(x, xind)[1:-1]\n\n lut[1:-1] = (((xind[1:-1] - x[ind - 1]) / (x[ind] - x[ind - 1])) *\n (y0[ind] - y1[ind - 1]) + y1[ind - 1])\n lut[0] = y1[0]\n lut[-1] = y0[-1]\n # ensure that the lut is confined to values between 0 and 1 by clipping it\n np.clip(lut, 0.0, 1.0)\n #lut = where(lut > 1., 1., lut)\n #lut = where(lut < 0., 0., lut)\n return lut\n\n\nclass Colormap(object):\n \"\"\"\n Baseclass for all scalar to RGBA mappings.\n\n Typically Colormap instances are used to convert data values (floats) from\n the interval ``[0, 1]`` to the RGBA color that the respective Colormap\n represents. For scaling of data into the ``[0, 1]`` interval see\n :class:`matplotlib.colors.Normalize`. It is worth noting that\n :class:`matplotlib.cm.ScalarMappable` subclasses make heavy use of this\n ``data->normalize->map-to-color`` processing chain.\n\n \"\"\"\n def __init__(self, name, N=256):\n r\"\"\"\n Parameters\n ----------\n name : str\n The name of the colormap.\n N : int\n The number of rgb quantization levels.\n\n \"\"\"\n self.name = name\n self.N = int(N) # ensure that N is always int\n self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.\n self._rgba_under = None\n self._rgba_over = None\n self._i_under = self.N\n self._i_over = self.N + 1\n self._i_bad = self.N + 2\n self._isinit = False\n\n #: When this colormap exists on a scalar mappable and colorbar_extend\n #: is not False, colorbar creation will pick up ``colorbar_extend`` as\n #: the default value for the ``extend`` keyword in the\n #: :class:`matplotlib.colorbar.Colorbar` constructor.\n self.colorbar_extend = False\n\n def __call__(self, X, alpha=None, bytes=False):\n \"\"\"\n Parameters\n ----------\n X : scalar, ndarray\n The data value(s) to convert to RGBA.\n For floats, X should be in the interval ``[0.0, 1.0]`` to\n return the RGBA values ``X*100`` percent along the Colormap line.\n For integers, X should be in the interval ``[0, Colormap.N)`` to\n return RGBA values *indexed* from the Colormap with index ``X``.\n alpha : float, None\n Alpha must be a scalar between 0 and 1, or None.\n bytes : bool\n If False (default), the returned RGBA values will be floats in the\n interval ``[0, 1]`` otherwise they will be uint8s in the interval\n ``[0, 255]``.\n\n Returns\n -------\n Tuple of RGBA values if X is scalar, othewise an array of\n RGBA values with a shape of ``X.shape + (4, )``.\n\n \"\"\"\n # See class docstring for arg/kwarg documentation.\n if not self._isinit:\n self._init()\n mask_bad = None\n if not cbook.iterable(X):\n vtype = 'scalar'\n xa = np.array([X])\n else:\n vtype = 'array'\n xma = ma.array(X, copy=True) # Copy here to avoid side effects.\n mask_bad = xma.mask # Mask will be used below.\n xa = xma.filled() # Fill to avoid infs, etc.\n del xma\n\n # Calculations with native byteorder are faster, and avoid a\n # bug that otherwise can occur with putmask when the last\n # argument is a numpy scalar.\n if not xa.dtype.isnative:\n xa = xa.byteswap().newbyteorder()\n\n if xa.dtype.kind == \"f\":\n # Treat 1.0 as slightly less than 1.\n vals = np.array([1, 0], dtype=xa.dtype)\n almost_one = np.nextafter(*vals)\n cbook._putmask(xa, xa == 1.0, almost_one)\n # The following clip is fast, and prevents possible\n # conversion of large positive values to negative integers.\n\n xa *= self.N\n if NP_CLIP_OUT:\n np.clip(xa, -1, self.N, out=xa)\n else:\n xa = np.clip(xa, -1, self.N)\n\n # ensure that all 'under' values will still have negative\n # value after casting to int\n cbook._putmask(xa, xa < 0.0, -1)\n xa = xa.astype(int)\n # Set the over-range indices before the under-range;\n # otherwise the under-range values get converted to over-range.\n cbook._putmask(xa, xa > self.N - 1, self._i_over)\n cbook._putmask(xa, xa < 0, self._i_under)\n if mask_bad is not None:\n if mask_bad.shape == xa.shape:\n cbook._putmask(xa, mask_bad, self._i_bad)\n elif mask_bad:\n xa.fill(self._i_bad)\n if bytes:\n lut = (self._lut * 255).astype(np.uint8)\n else:\n lut = self._lut.copy() # Don't let alpha modify original _lut.\n\n if alpha is not None:\n alpha = min(alpha, 1.0) # alpha must be between 0 and 1\n alpha = max(alpha, 0.0)\n if bytes:\n alpha = int(alpha * 255)\n if (lut[-1] == 0).all():\n lut[:-1, -1] = alpha\n # All zeros is taken as a flag for the default bad\n # color, which is no color--fully transparent. We\n # don't want to override this.\n else:\n lut[:, -1] = alpha\n # If the bad value is set to have a color, then we\n # override its alpha just as for any other value.\n\n rgba = np.empty(shape=xa.shape + (4,), dtype=lut.dtype)\n lut.take(xa, axis=0, mode='clip', out=rgba)\n # twice as fast as lut[xa];\n # using the clip or wrap mode and providing an\n # output array speeds it up a little more.\n if vtype == 'scalar':\n rgba = tuple(rgba[0, :])\n return rgba\n\n def set_bad(self, color='k', alpha=None):\n '''Set color to be used for masked values.\n '''\n self._rgba_bad = colorConverter.to_rgba(color, alpha)\n if self._isinit:\n self._set_extremes()\n\n def set_under(self, color='k', alpha=None):\n '''Set color to be used for low out-of-range values.\n Requires norm.clip = False\n '''\n self._rgba_under = colorConverter.to_rgba(color, alpha)\n if self._isinit:\n self._set_extremes()\n\n def set_over(self, color='k', alpha=None):\n '''Set color to be used for high out-of-range values.\n Requires norm.clip = False\n '''\n self._rgba_over = colorConverter.to_rgba(color, alpha)\n if self._isinit:\n self._set_extremes()\n\n def _set_extremes(self):\n if self._rgba_under:\n self._lut[self._i_under] = self._rgba_under\n else:\n self._lut[self._i_under] = self._lut[0]\n if self._rgba_over:\n self._lut[self._i_over] = self._rgba_over\n else:\n self._lut[self._i_over] = self._lut[self.N - 1]\n self._lut[self._i_bad] = self._rgba_bad\n\n def _init(self):\n '''Generate the lookup table, self._lut'''\n raise NotImplementedError(\"Abstract class only\")\n\n def is_gray(self):\n if not self._isinit:\n self._init()\n return (np.alltrue(self._lut[:, 0] == self._lut[:, 1]) and\n np.alltrue(self._lut[:, 0] == self._lut[:, 2]))\n\n\nclass LinearSegmentedColormap(Colormap):\n \"\"\"Colormap objects based on lookup tables using linear segments.\n\n The lookup table is generated using linear interpolation for each\n primary color, with the 0-1 domain divided into any number of\n segments.\n \"\"\"\n def __init__(self, name, segmentdata, N=256, gamma=1.0):\n \"\"\"Create color map from linear mapping segments\n\n segmentdata argument is a dictionary with a red, green and blue\n entries. Each entry should be a list of *x*, *y0*, *y1* tuples,\n forming rows in a table. Entries for alpha are optional.\n\n Example: suppose you want red to increase from 0 to 1 over\n the bottom half, green to do the same over the middle half,\n and blue over the top half. Then you would use::\n\n cdict = {'red': [(0.0, 0.0, 0.0),\n (0.5, 1.0, 1.0),\n (1.0, 1.0, 1.0)],\n\n 'green': [(0.0, 0.0, 0.0),\n (0.25, 0.0, 0.0),\n (0.75, 1.0, 1.0),\n (1.0, 1.0, 1.0)],\n\n 'blue': [(0.0, 0.0, 0.0),\n (0.5, 0.0, 0.0),\n (1.0, 1.0, 1.0)]}\n\n Each row in the table for a given color is a sequence of\n *x*, *y0*, *y1* tuples. In each sequence, *x* must increase\n monotonically from 0 to 1. For any input value *z* falling\n between *x[i]* and *x[i+1]*, the output value of a given color\n will be linearly interpolated between *y1[i]* and *y0[i+1]*::\n\n row i: x y0 y1\n /\n /\n row i+1: x y0 y1\n\n Hence y0 in the first row and y1 in the last row are never used.\n\n\n .. seealso::\n\n :meth:`LinearSegmentedColormap.from_list`\n Static method; factory function for generating a\n smoothly-varying LinearSegmentedColormap.\n\n :func:`makeMappingArray`\n For information about making a mapping array.\n \"\"\"\n # True only if all colors in map are identical; needed for contouring.\n self.monochrome = False\n Colormap.__init__(self, name, N)\n self._segmentdata = segmentdata\n self._gamma = gamma\n\n def _init(self):\n self._lut = np.ones((self.N + 3, 4), np.float)\n self._lut[:-3, 0] = makeMappingArray(\n self.N, self._segmentdata['red'], self._gamma)\n self._lut[:-3, 1] = makeMappingArray(\n self.N, self._segmentdata['green'], self._gamma)\n self._lut[:-3, 2] = makeMappingArray(\n self.N, self._segmentdata['blue'], self._gamma)\n if 'alpha' in self._segmentdata:\n self._lut[:-3, 3] = makeMappingArray(\n self.N, self._segmentdata['alpha'], 1)\n self._isinit = True\n self._set_extremes()\n\n def set_gamma(self, gamma):\n \"\"\"\n Set a new gamma value and regenerate color map.\n \"\"\"\n self._gamma = gamma\n self._init()\n\n @staticmethod\n def from_list(name, colors, N=256, gamma=1.0):\n \"\"\"\n Make a linear segmented colormap with *name* from a sequence\n of *colors* which evenly transitions from colors[0] at val=0\n to colors[-1] at val=1. *N* is the number of rgb quantization\n levels.\n Alternatively, a list of (value, color) tuples can be given\n to divide the range unevenly.\n \"\"\"\n\n if not cbook.iterable(colors):\n raise ValueError('colors must be iterable')\n\n if cbook.iterable(colors[0]) and len(colors[0]) == 2 and \\\n not cbook.is_string_like(colors[0]):\n # List of value, color pairs\n vals, colors = list(zip(*colors))\n else:\n vals = np.linspace(0., 1., len(colors))\n\n cdict = dict(red=[], green=[], blue=[], alpha=[])\n for val, color in zip(vals, colors):\n r, g, b, a = colorConverter.to_rgba(color)\n cdict['red'].append((val, r, r))\n cdict['green'].append((val, g, g))\n cdict['blue'].append((val, b, b))\n cdict['alpha'].append((val, a, a))\n\n return LinearSegmentedColormap(name, cdict, N, gamma)\n\n\nclass ListedColormap(Colormap):\n \"\"\"Colormap object generated from a list of colors.\n\n This may be most useful when indexing directly into a colormap,\n but it can also be used to generate special colormaps for ordinary\n mapping.\n \"\"\"\n def __init__(self, colors, name='from_list', N=None):\n \"\"\"\n Make a colormap from a list of colors.\n\n *colors*\n a list of matplotlib color specifications,\n or an equivalent Nx3 or Nx4 floating point array\n (*N* rgb or rgba values)\n *name*\n a string to identify the colormap\n *N*\n the number of entries in the map. The default is *None*,\n in which case there is one colormap entry for each\n element in the list of colors. If::\n\n N < len(colors)\n\n the list will be truncated at *N*. If::\n\n N > len(colors)\n\n the list will be extended by repetition.\n \"\"\"\n self.colors = colors\n self.monochrome = False # True only if all colors in map are\n # identical; needed for contouring.\n if N is None:\n N = len(self.colors)\n else:\n if cbook.is_string_like(self.colors):\n self.colors = [self.colors] * N\n self.monochrome = True\n elif cbook.iterable(self.colors):\n self.colors = list(self.colors) # in case it was a tuple\n if len(self.colors) == 1:\n self.monochrome = True\n if len(self.colors) < N:\n self.colors = list(self.colors) * N\n del(self.colors[N:])\n else:\n try:\n gray = float(self.colors)\n except TypeError:\n pass\n else:\n self.colors = [gray] * N\n self.monochrome = True\n Colormap.__init__(self, name, N)\n\n def _init(self):\n rgba = colorConverter.to_rgba_array(self.colors)\n self._lut = np.zeros((self.N + 3, 4), np.float)\n self._lut[:-3] = rgba\n self._isinit = True\n self._set_extremes()\n\n\nclass Normalize(object):\n \"\"\"\n A class which, when called, can normalize data into\n the ``[0.0, 1.0]`` interval.\n\n \"\"\"\n def __init__(self, vmin=None, vmax=None, clip=False):\n \"\"\"\n If *vmin* or *vmax* is not given, they are taken from the input's\n minimum and maximum value respectively. If *clip* is *True* and\n the given value falls outside the range, the returned value\n will be 0 or 1, whichever is closer. Returns 0 if::\n\n vmin==vmax\n\n Works with scalars or arrays, including masked arrays. If\n *clip* is *True*, masked values are set to 1; otherwise they\n remain masked. Clipping silently defeats the purpose of setting\n the over, under, and masked colors in the colormap, so it is\n likely to lead to surprises; therefore the default is\n *clip* = *False*.\n \"\"\"\n self.vmin = vmin\n self.vmax = vmax\n self.clip = clip\n\n @staticmethod\n def process_value(value):\n \"\"\"\n Homogenize the input *value* for easy and efficient normalization.\n\n *value* can be a scalar or sequence.\n\n Returns *result*, *is_scalar*, where *result* is a\n masked array matching *value*. Float dtypes are preserved;\n integer types with two bytes or smaller are converted to\n np.float32, and larger types are converted to np.float.\n Preserving float32 when possible, and using in-place operations,\n can greatly improve speed for large arrays.\n\n Experimental; we may want to add an option to force the\n use of float32.\n \"\"\"\n if cbook.iterable(value):\n is_scalar = False\n result = ma.asarray(value)\n if result.dtype.kind == 'f':\n if isinstance(value, np.ndarray):\n result = result.copy()\n elif result.dtype.itemsize > 2:\n result = result.astype(np.float)\n else:\n result = result.astype(np.float32)\n else:\n is_scalar = True\n result = ma.array([value]).astype(np.float)\n return result, is_scalar\n\n def __call__(self, value, clip=None):\n if clip is None:\n clip = self.clip\n\n result, is_scalar = self.process_value(value)\n\n self.autoscale_None(result)\n vmin, vmax = self.vmin, self.vmax\n if vmin == vmax:\n result.fill(0) # Or should it be all masked? Or 0.5?\n elif vmin > vmax:\n raise ValueError(\"minvalue must be less than or equal to maxvalue\")\n else:\n vmin = float(vmin)\n vmax = float(vmax)\n if clip:\n mask = ma.getmask(result)\n result = ma.array(np.clip(result.filled(vmax), vmin, vmax),\n mask=mask)\n # ma division is very slow; we can take a shortcut\n resdat = result.data\n resdat -= vmin\n resdat /= (vmax - vmin)\n result = np.ma.array(resdat, mask=result.mask, copy=False)\n if is_scalar:\n result = result[0]\n return result\n\n def inverse(self, value):\n if not self.scaled():\n raise ValueError(\"Not invertible until scaled\")\n vmin = float(self.vmin)\n vmax = float(self.vmax)\n\n if cbook.iterable(value):\n val = ma.asarray(value)\n return vmin + val * (vmax - vmin)\n else:\n return vmin + value * (vmax - vmin)\n\n def autoscale(self, A):\n '''\n Set *vmin*, *vmax* to min, max of *A*.\n '''\n self.vmin = ma.min(A)\n self.vmax = ma.max(A)\n\n def autoscale_None(self, A):\n ' autoscale only None-valued vmin or vmax'\n if self.vmin is None and np.size(A) > 0:\n self.vmin = ma.min(A)\n if self.vmax is None and np.size(A) > 0:\n self.vmax = ma.max(A)\n\n def scaled(self):\n 'return true if vmin and vmax set'\n return (self.vmin is not None and self.vmax is not None)\n\n\nclass LogNorm(Normalize):\n \"\"\"\n Normalize a given value to the 0-1 range on a log scale\n \"\"\"\n def __call__(self, value, clip=None):\n if clip is None:\n clip = self.clip\n\n result, is_scalar = self.process_value(value)\n\n result = ma.masked_less_equal(result, 0, copy=False)\n\n self.autoscale_None(result)\n vmin, vmax = self.vmin, self.vmax\n if vmin > vmax:\n raise ValueError(\"minvalue must be less than or equal to maxvalue\")\n elif vmin <= 0:\n raise ValueError(\"values must all be positive\")\n elif vmin == vmax:\n result.fill(0)\n else:\n if clip:\n mask = ma.getmask(result)\n result = ma.array(np.clip(result.filled(vmax), vmin, vmax),\n mask=mask)\n # in-place equivalent of above can be much faster\n resdat = result.data\n mask = result.mask\n if mask is np.ma.nomask:\n mask = (resdat <= 0)\n else:\n mask |= resdat <= 0\n cbook._putmask(resdat, mask, 1)\n np.log(resdat, resdat)\n resdat -= np.log(vmin)\n resdat /= (np.log(vmax) - np.log(vmin))\n result = np.ma.array(resdat, mask=mask, copy=False)\n if is_scalar:\n result = result[0]\n return result\n\n def inverse(self, value):\n if not self.scaled():\n raise ValueError(\"Not invertible until scaled\")\n vmin, vmax = self.vmin, self.vmax\n\n if cbook.iterable(value):\n val = ma.asarray(value)\n return vmin * ma.power((vmax / vmin), val)\n else:\n return vmin * pow((vmax / vmin), value)\n\n def autoscale(self, A):\n '''\n Set *vmin*, *vmax* to min, max of *A*.\n '''\n A = ma.masked_less_equal(A, 0, copy=False)\n self.vmin = ma.min(A)\n self.vmax = ma.max(A)\n\n def autoscale_None(self, A):\n ' autoscale only None-valued vmin or vmax'\n if self.vmin is not None and self.vmax is not None:\n return\n A = ma.masked_less_equal(A, 0, copy=False)\n if self.vmin is None:\n self.vmin = ma.min(A)\n if self.vmax is None:\n self.vmax = ma.max(A)\n\n\nclass SymLogNorm(Normalize):\n \"\"\"\n The symmetrical logarithmic scale is logarithmic in both the\n positive and negative directions from the origin.\n\n Since the values close to zero tend toward infinity, there is a\n need to have a range around zero that is linear. The parameter\n *linthresh* allows the user to specify the size of this range\n (-*linthresh*, *linthresh*).\n \"\"\"\n def __init__(self, linthresh, linscale=1.0,\n vmin=None, vmax=None, clip=False):\n \"\"\"\n *linthresh*:\n The range within which the plot is linear (to\n avoid having the plot go to infinity around zero).\n\n *linscale*:\n This allows the linear range (-*linthresh* to *linthresh*)\n to be stretched relative to the logarithmic range. Its\n value is the number of decades to use for each half of the\n linear range. For example, when *linscale* == 1.0 (the\n default), the space used for the positive and negative\n halves of the linear range will be equal to one decade in\n the logarithmic range. Defaults to 1.\n \"\"\"\n Normalize.__init__(self, vmin, vmax, clip)\n self.linthresh = float(linthresh)\n self._linscale_adj = (linscale / (1.0 - np.e ** -1))\n\n def __call__(self, value, clip=None):\n if clip is None:\n clip = self.clip\n\n result, is_scalar = self.process_value(value)\n self.autoscale_None(result)\n vmin, vmax = self.vmin, self.vmax\n\n if vmin > vmax:\n raise ValueError(\"minvalue must be less than or equal to maxvalue\")\n elif vmin == vmax:\n result.fill(0)\n else:\n if clip:\n mask = ma.getmask(result)\n result = ma.array(np.clip(result.filled(vmax), vmin, vmax),\n mask=mask)\n # in-place equivalent of above can be much faster\n resdat = self._transform(result.data)\n resdat -= self._lower\n resdat /= (self._upper - self._lower)\n\n if is_scalar:\n result = result[0]\n return result\n\n def _transform(self, a):\n \"\"\"\n Inplace transformation.\n \"\"\"\n masked = np.abs(a) > self.linthresh\n sign = np.sign(a[masked])\n log = (self._linscale_adj + np.log(np.abs(a[masked]) / self.linthresh))\n log *= sign * self.linthresh\n a[masked] = log\n a[~masked] *= self._linscale_adj\n return a\n\n def _inv_transform(self, a):\n \"\"\"\n Inverse inplace Transformation.\n \"\"\"\n masked = np.abs(a) > (self.linthresh * self._linscale_adj)\n sign = np.sign(a[masked])\n exp = np.exp(sign * a[masked] / self.linthresh - self._linscale_adj)\n exp *= sign * self.linthresh\n a[masked] = exp\n a[~masked] /= self._linscale_adj\n return a\n\n def _transform_vmin_vmax(self):\n \"\"\"\n Calculates vmin and vmax in the transformed system.\n \"\"\"\n vmin, vmax = self.vmin, self.vmax\n arr = np.array([vmax, vmin]).astype(np.float)\n self._upper, self._lower = self._transform(arr)\n\n def inverse(self, value):\n if not self.scaled():\n raise ValueError(\"Not invertible until scaled\")\n val = ma.asarray(value)\n val = val * (self._upper - self._lower) + self._lower\n return self._inv_transform(val)\n\n def autoscale(self, A):\n \"\"\"\n Set *vmin*, *vmax* to min, max of *A*.\n \"\"\"\n self.vmin = ma.min(A)\n self.vmax = ma.max(A)\n self._transform_vmin_vmax()\n\n def autoscale_None(self, A):\n \"\"\" autoscale only None-valued vmin or vmax \"\"\"\n if self.vmin is not None and self.vmax is not None:\n pass\n if self.vmin is None:\n self.vmin = ma.min(A)\n if self.vmax is None:\n self.vmax = ma.max(A)\n self._transform_vmin_vmax()\n\n\nclass BoundaryNorm(Normalize):\n '''\n Generate a colormap index based on discrete intervals.\n\n Unlike :class:`Normalize` or :class:`LogNorm`,\n :class:`BoundaryNorm` maps values to integers instead of to the\n interval 0-1.\n\n Mapping to the 0-1 interval could have been done via\n piece-wise linear interpolation, but using integers seems\n simpler, and reduces the number of conversions back and forth\n between integer and floating point.\n '''\n def __init__(self, boundaries, ncolors, clip=False):\n '''\n *boundaries*\n a monotonically increasing sequence\n *ncolors*\n number of colors in the colormap to be used\n\n If::\n\n b[i] <= v < b[i+1]\n\n then v is mapped to color j;\n as i varies from 0 to len(boundaries)-2,\n j goes from 0 to ncolors-1.\n\n Out-of-range values are mapped to -1 if low and ncolors\n if high; these are converted to valid indices by\n :meth:`Colormap.__call__` .\n '''\n self.clip = clip\n self.vmin = boundaries[0]\n self.vmax = boundaries[-1]\n self.boundaries = np.asarray(boundaries)\n self.N = len(self.boundaries)\n self.Ncmap = ncolors\n if self.N - 1 == self.Ncmap:\n self._interp = False\n else:\n self._interp = True\n\n def __call__(self, x, clip=None):\n if clip is None:\n clip = self.clip\n x = ma.asarray(x)\n mask = ma.getmaskarray(x)\n xx = x.filled(self.vmax + 1)\n if clip:\n np.clip(xx, self.vmin, self.vmax)\n iret = np.zeros(x.shape, dtype=np.int16)\n for i, b in enumerate(self.boundaries):\n iret[xx >= b] = i\n if self._interp:\n scalefac = float(self.Ncmap - 1) / (self.N - 2)\n iret = (iret * scalefac).astype(np.int16)\n iret[xx < self.vmin] = -1\n iret[xx >= self.vmax] = self.Ncmap\n ret = ma.array(iret, mask=mask)\n if ret.shape == () and not mask:\n ret = int(ret) # assume python scalar\n return ret\n\n def inverse(self, value):\n return ValueError(\"BoundaryNorm is not invertible\")\n\n\nclass NoNorm(Normalize):\n '''\n Dummy replacement for Normalize, for the case where we\n want to use indices directly in a\n :class:`~matplotlib.cm.ScalarMappable` .\n '''\n def __call__(self, value, clip=None):\n return value\n\n def inverse(self, value):\n return value\n\n# compatibility with earlier class names that violated convention:\nnormalize = cbook.deprecated('1.3', alternative='Normalize',\n name='normalize',\n obj_type='class alias')(Normalize)\nno_norm = cbook.deprecated('1.3', alternative='NoNorm',\n name='no_norm',\n obj_type='class alias')(NoNorm)\n\n\ndef rgb_to_hsv(arr):\n \"\"\"\n convert float rgb values (in the range [0, 1]), in a numpy array to hsv\n values.\n\n Parameters\n ----------\n arr : (..., 3) array-like\n All values must be in the range [0, 1]\n\n Returns\n -------\n hsv : (..., 3) ndarray\n Colors converted to hsv values in range [0, 1]\n \"\"\"\n # make sure it is an ndarray\n arr = np.asarray(arr)\n\n # check length of the last dimension, should be _some_ sort of rgb\n if arr.shape[-1] != 3:\n raise ValueError(\"Last dimension of input array must be 3; \"\n \"shape {shp} was found.\".format(shp=arr.shape))\n\n in_ndim = arr.ndim\n if arr.ndim == 1:\n arr = np.array(arr, ndmin=2)\n\n # make sure we don't have an int image\n if arr.dtype.kind in ('iu'):\n arr = arr.astype(np.float32)\n\n out = np.zeros_like(arr)\n arr_max = arr.max(-1)\n ipos = arr_max > 0\n delta = arr.ptp(-1)\n s = np.zeros_like(delta)\n s[ipos] = delta[ipos] / arr_max[ipos]\n ipos = delta > 0\n # red is max\n idx = (arr[..., 0] == arr_max) & ipos\n out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]\n # green is max\n idx = (arr[..., 1] == arr_max) & ipos\n out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]\n # blue is max\n idx = (arr[..., 2] == arr_max) & ipos\n out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]\n\n out[..., 0] = (out[..., 0] / 6.0) % 1.0\n out[..., 1] = s\n out[..., 2] = arr_max\n\n if in_ndim == 1:\n out.shape = (3,)\n\n return out\n\n\ndef hsv_to_rgb(hsv):\n \"\"\"\n convert hsv values in a numpy array to rgb values\n all values assumed to be in range [0, 1]\n\n Parameters\n ----------\n hsv : (..., 3) array-like\n All values assumed to be in range [0, 1]\n\n Returns\n -------\n rgb : (..., 3) ndarray\n Colors converted to RGB values in range [0, 1]\n \"\"\"\n # make sure it is an ndarray\n hsv = np.asarray(hsv)\n\n # check length of the last dimension, should be _some_ sort of rgb\n if hsv.shape[-1] != 3:\n raise ValueError(\"Last dimension of input array must be 3; \"\n \"shape {shp} was found.\".format(shp=hsv.shape))\n\n # if we got pased a 1D array, try to treat as\n # a single color and reshape as needed\n in_ndim = hsv.ndim\n if in_ndim == 1:\n hsv = np.array(hsv, ndmin=2)\n\n # make sure we don't have an int image\n if hsv.dtype.kind in ('iu'):\n hsv = hsv.astype(np.float32)\n\n h = hsv[..., 0]\n s = hsv[..., 1]\n v = hsv[..., 2]\n\n r = np.empty_like(h)\n g = np.empty_like(h)\n b = np.empty_like(h)\n\n i = (h * 6.0).astype(np.int)\n f = (h * 6.0) - i\n p = v * (1.0 - s)\n q = v * (1.0 - s * f)\n t = v * (1.0 - s * (1.0 - f))\n\n idx = i % 6 == 0\n r[idx] = v[idx]\n g[idx] = t[idx]\n b[idx] = p[idx]\n\n idx = i == 1\n r[idx] = q[idx]\n g[idx] = v[idx]\n b[idx] = p[idx]\n\n idx = i == 2\n r[idx] = p[idx]\n g[idx] = v[idx]\n b[idx] = t[idx]\n\n idx = i == 3\n r[idx] = p[idx]\n g[idx] = q[idx]\n b[idx] = v[idx]\n\n idx = i == 4\n r[idx] = t[idx]\n g[idx] = p[idx]\n b[idx] = v[idx]\n\n idx = i == 5\n r[idx] = v[idx]\n g[idx] = p[idx]\n b[idx] = q[idx]\n\n idx = s == 0\n r[idx] = v[idx]\n g[idx] = v[idx]\n b[idx] = v[idx]\n\n rgb = np.empty_like(hsv)\n rgb[..., 0] = r\n rgb[..., 1] = g\n rgb[..., 2] = b\n\n if in_ndim == 1:\n rgb.shape = (3, )\n\n return rgb\n\n\nclass LightSource(object):\n \"\"\"\n Create a light source coming from the specified azimuth and elevation.\n Angles are in degrees, with the azimuth measured\n clockwise from north and elevation up from the zero plane of the surface.\n The :meth:`shade` is used to produce rgb values for a shaded relief image\n given a data array.\n \"\"\"\n def __init__(self, azdeg=315, altdeg=45,\n hsv_min_val=0, hsv_max_val=1, hsv_min_sat=1,\n hsv_max_sat=0):\n\n \"\"\"\n Specify the azimuth (measured clockwise from south) and altitude\n (measured up from the plane of the surface) of the light source\n in degrees.\n\n The color of the resulting image will be darkened\n by moving the (s,v) values (in hsv colorspace) toward\n (hsv_min_sat, hsv_min_val) in the shaded regions, or\n lightened by sliding (s,v) toward\n (hsv_max_sat hsv_max_val) in regions that are illuminated.\n The default extremes are chose so that completely shaded points\n are nearly black (s = 1, v = 0) and completely illuminated points\n are nearly white (s = 0, v = 1).\n \"\"\"\n self.azdeg = azdeg\n self.altdeg = altdeg\n self.hsv_min_val = hsv_min_val\n self.hsv_max_val = hsv_max_val\n self.hsv_min_sat = hsv_min_sat\n self.hsv_max_sat = hsv_max_sat\n\n def shade(self, data, cmap, norm=None):\n \"\"\"\n Take the input data array, convert to HSV values in the\n given colormap, then adjust those color values\n to give the impression of a shaded relief map with a\n specified light source.\n RGBA values are returned, which can then be used to\n plot the shaded image with imshow.\n \"\"\"\n\n if norm is None:\n norm = Normalize(vmin=data.min(), vmax=data.max())\n\n rgb0 = cmap(norm(data))\n rgb1 = self.shade_rgb(rgb0, elevation=data)\n rgb0[:, :, 0:3] = rgb1\n return rgb0\n\n def shade_rgb(self, rgb, elevation, fraction=1.):\n \"\"\"\n Take the input RGB array (ny*nx*3) adjust their color values\n to given the impression of a shaded relief map with a\n specified light source using the elevation (ny*nx).\n A new RGB array ((ny*nx*3)) is returned.\n \"\"\"\n # imagine an artificial sun placed at infinity in some azimuth and\n # elevation position illuminating our surface. The parts of the\n # surface that slope toward the sun should brighten while those sides\n # facing away should become darker. convert alt, az to radians\n az = self.azdeg * np.pi / 180.0\n alt = self.altdeg * np.pi / 180.0\n # gradient in x and y directions\n dx, dy = np.gradient(elevation)\n slope = 0.5 * np.pi - np.arctan(np.hypot(dx, dy))\n aspect = np.arctan2(dx, dy)\n intensity = (np.sin(alt) * np.sin(slope) + np.cos(alt) *\n np.cos(slope) * np.cos(-az - aspect - 0.5 * np.pi))\n # rescale to interval -1,1\n # +1 means maximum sun exposure and -1 means complete shade.\n intensity = (intensity - intensity.min()) / \\\n (intensity.max() - intensity.min())\n intensity = (2. * intensity - 1.) * fraction\n # convert to rgb, then rgb to hsv\n #rgb = cmap((data-data.min())/(data.max()-data.min()))\n hsv = rgb_to_hsv(rgb[:, :, 0:3])\n # modify hsv values to simulate illumination.\n\n hsv[:, :, 1] = np.where(np.logical_and(np.abs(hsv[:, :, 1]) > 1.e-10,\n intensity > 0),\n ((1. - intensity) * hsv[:, :, 1] +\n intensity * self.hsv_max_sat),\n hsv[:, :, 1])\n\n hsv[:, :, 2] = np.where(intensity > 0,\n ((1. - intensity) * hsv[:, :, 2] +\n intensity * self.hsv_max_val),\n hsv[:, :, 2])\n\n hsv[:, :, 1] = np.where(np.logical_and(np.abs(hsv[:, :, 1]) > 1.e-10,\n intensity < 0),\n ((1. + intensity) * hsv[:, :, 1] -\n intensity * self.hsv_min_sat),\n hsv[:, :, 1])\n hsv[:, :, 2] = np.where(intensity < 0,\n ((1. + intensity) * hsv[:, :, 2] -\n intensity * self.hsv_min_val),\n hsv[:, :, 2])\n hsv[:, :, 1:] = np.where(hsv[:, :, 1:] < 0., 0, hsv[:, :, 1:])\n hsv[:, :, 1:] = np.where(hsv[:, :, 1:] > 1., 1, hsv[:, :, 1:])\n # convert modified hsv back to rgb.\n return hsv_to_rgb(hsv)\n\n\ndef from_levels_and_colors(levels, colors, extend='neither'):\n \"\"\"\n A helper routine to generate a cmap and a norm instance which\n behave similar to contourf's levels and colors arguments.\n\n Parameters\n ----------\n levels : sequence of numbers\n The quantization levels used to construct the :class:`BoundaryNorm`.\n Values ``v`` are quantizized to level ``i`` if\n ``lev[i] <= v < lev[i+1]``.\n colors : sequence of colors\n The fill color to use for each level. If `extend` is \"neither\" there\n must be ``n_level - 1`` colors. For an `extend` of \"min\" or \"max\" add\n one extra color, and for an `extend` of \"both\" add two colors.\n extend : {'neither', 'min', 'max', 'both'}, optional\n The behaviour when a value falls out of range of the given levels.\n See :func:`~matplotlib.pyplot.contourf` for details.\n\n Returns\n -------\n (cmap, norm) : tuple containing a :class:`Colormap` and a \\\n :class:`Normalize` instance\n \"\"\"\n colors_i0 = 0\n colors_i1 = None\n\n if extend == 'both':\n colors_i0 = 1\n colors_i1 = -1\n extra_colors = 2\n elif extend == 'min':\n colors_i0 = 1\n extra_colors = 1\n elif extend == 'max':\n colors_i1 = -1\n extra_colors = 1\n elif extend == 'neither':\n extra_colors = 0\n else:\n raise ValueError('Unexpected value for extend: {0!r}'.format(extend))\n\n n_data_colors = len(levels) - 1\n n_expected_colors = n_data_colors + extra_colors\n if len(colors) != n_expected_colors:\n raise ValueError('With extend == {0!r} and n_levels == {1!r} expected'\n ' n_colors == {2!r}. Got {3!r}.'\n ''.format(extend, len(levels), n_expected_colors,\n len(colors)))\n\n cmap = ListedColormap(colors[colors_i0:colors_i1], N=n_data_colors)\n\n if extend in ['min', 'both']:\n cmap.set_under(colors[0])\n else:\n cmap.set_under('none')\n\n if extend in ['max', 'both']:\n cmap.set_over(colors[-1])\n else:\n cmap.set_over('none')\n\n cmap.colorbar_extend = extend\n\n norm = BoundaryNorm(levels, ncolors=n_data_colors)\n return cmap, norm\n", "#!/usr/bin/env python\nimport numpy as np\nfrom basic_units import cm, inch\nimport matplotlib.pyplot as plt\n\n\nN = 5\nmenMeans = (150*cm, 160*cm, 146*cm, 172*cm, 155*cm)\nmenStd = ( 20*cm, 30*cm, 32*cm, 10*cm, 20*cm)\n\nfig, ax = plt.subplots()\n\nind = np.arange(N) # the x locations for the groups\nwidth = 0.35 # the width of the bars\np1 = ax.bar(ind, menMeans, width, color='r', bottom=0*cm, yerr=menStd)\n\n\nwomenMeans = (145*cm, 149*cm, 172*cm, 165*cm, 200*cm)\nwomenStd = (30*cm, 25*cm, 20*cm, 31*cm, 22*cm)\np2 = ax.bar(ind+width, womenMeans, width, color='y', bottom=0*cm, yerr=womenStd)\n\nax.set_title('Scores by group and gender')\nax.set_xticks(ind+width)\nax.set_xticklabels( ('G1', 'G2', 'G3', 'G4', 'G5') )\n\nax.legend( (p1[0], p2[0]), ('Men', 'Women') )\nax.yaxis.set_units(inch)\nax.autoscale_view()\n\n#plt.savefig('barchart_demo')\nplt.show()\n", "# -*- coding: utf-8 -*-\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\nfrom six.moves import map, zip\n\nimport math\n\nimport matplotlib as mpl\nimport numpy as np\nimport matplotlib.cbook as cbook\nimport matplotlib.artist as artist\nfrom matplotlib.artist import allow_rasterization\nimport matplotlib.colors as colors\nfrom matplotlib import docstring\nimport matplotlib.transforms as transforms\nfrom matplotlib.path import Path\nfrom matplotlib.cbook import mplDeprecation\n\n# these are not available for the object inspector until after the\n# class is built so we define an initial set here for the init\n# function and they will be overridden after object definition\ndocstring.interpd.update(Patch=\"\"\"\n\n ================= ==============================================\n Property Description\n ================= ==============================================\n alpha float\n animated [True | False]\n antialiased or aa [True | False]\n capstyle ['butt' | 'round' | 'projecting']\n clip_box a matplotlib.transform.Bbox instance\n clip_on [True | False]\n edgecolor or ec any matplotlib color\n facecolor or fc any matplotlib color\n figure a matplotlib.figure.Figure instance\n fill [True | False]\n hatch unknown\n joinstyle ['miter' | 'round' | 'bevel']\n label any string\n linewidth or lw float\n lod [True | False]\n transform a matplotlib.transform transformation instance\n visible [True | False]\n zorder any number\n ================= ==============================================\n\n \"\"\")\n\n\nclass Patch(artist.Artist):\n \"\"\"\n A patch is a 2D artist with a face color and an edge color.\n\n If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased*\n are *None*, they default to their rc params setting.\n \"\"\"\n zorder = 1\n validCap = ('butt', 'round', 'projecting')\n validJoin = ('miter', 'round', 'bevel')\n\n def __str__(self):\n return str(self.__class__).split('.')[-1]\n\n def __init__(self,\n edgecolor=None,\n facecolor=None,\n color=None,\n linewidth=None,\n linestyle=None,\n antialiased=None,\n hatch=None,\n fill=True,\n capstyle=None,\n joinstyle=None,\n **kwargs):\n \"\"\"\n The following kwarg properties are supported\n\n %(Patch)s\n \"\"\"\n artist.Artist.__init__(self)\n\n if linewidth is None:\n linewidth = mpl.rcParams['patch.linewidth']\n if linestyle is None:\n linestyle = \"solid\"\n if capstyle is None:\n capstyle = 'butt'\n if joinstyle is None:\n joinstyle = 'miter'\n if antialiased is None:\n antialiased = mpl.rcParams['patch.antialiased']\n\n self._fill = True # needed for set_facecolor call\n if color is not None:\n if (edgecolor is not None or\n facecolor is not None):\n import warnings\n warnings.warn(\"Setting the 'color' property will override\"\n \"the edgecolor or facecolor properties. \")\n self.set_color(color)\n else:\n self.set_edgecolor(edgecolor)\n self.set_facecolor(facecolor)\n self.set_linewidth(linewidth)\n self.set_linestyle(linestyle)\n self.set_antialiased(antialiased)\n self.set_hatch(hatch)\n self.set_fill(fill)\n self.set_capstyle(capstyle)\n self.set_joinstyle(joinstyle)\n self._combined_transform = transforms.IdentityTransform()\n\n if len(kwargs):\n self.update(kwargs)\n\n def get_verts(self):\n \"\"\"\n Return a copy of the vertices used in this patch\n\n If the patch contains Bezier curves, the curves will be\n interpolated by line segments. To access the curves as\n curves, use :meth:`get_path`.\n \"\"\"\n trans = self.get_transform()\n path = self.get_path()\n polygons = path.to_polygons(trans)\n if len(polygons):\n return polygons[0]\n return []\n\n def contains(self, mouseevent, radius=None):\n \"\"\"Test whether the mouse event occurred in the patch.\n\n Returns T/F, {}\n \"\"\"\n # This is a general version of contains that should work on any\n # patch with a path. However, patches that have a faster\n # algebraic solution to hit-testing should override this\n # method.\n if six.callable(self._contains):\n return self._contains(self, mouseevent)\n if radius is None:\n radius = self.get_linewidth()\n inside = self.get_path().contains_point(\n (mouseevent.x, mouseevent.y), self.get_transform(), radius)\n return inside, {}\n\n def contains_point(self, point, radius=None):\n \"\"\"\n Returns *True* if the given point is inside the path\n (transformed with its transform attribute).\n \"\"\"\n if radius is None:\n radius = self.get_linewidth()\n return self.get_path().contains_point(point,\n self.get_transform(),\n radius)\n\n def update_from(self, other):\n \"\"\"\n Updates this :class:`Patch` from the properties of *other*.\n \"\"\"\n artist.Artist.update_from(self, other)\n self.set_edgecolor(other.get_edgecolor())\n self.set_facecolor(other.get_facecolor())\n self.set_fill(other.get_fill())\n self.set_hatch(other.get_hatch())\n self.set_linewidth(other.get_linewidth())\n self.set_linestyle(other.get_linestyle())\n self.set_transform(other.get_data_transform())\n self.set_figure(other.get_figure())\n self.set_alpha(other.get_alpha())\n\n def get_extents(self):\n \"\"\"\n Return a :class:`~matplotlib.transforms.Bbox` object defining\n the axis-aligned extents of the :class:`Patch`.\n \"\"\"\n return self.get_path().get_extents(self.get_transform())\n\n def get_transform(self):\n \"\"\"\n Return the :class:`~matplotlib.transforms.Transform` applied\n to the :class:`Patch`.\n \"\"\"\n return self.get_patch_transform() + artist.Artist.get_transform(self)\n\n def get_data_transform(self):\n \"\"\"\n Return the :class:`~matplotlib.transforms.Transform` instance which\n maps data coordinates to physical coordinates.\n \"\"\"\n return artist.Artist.get_transform(self)\n\n def get_patch_transform(self):\n \"\"\"\n Return the :class:`~matplotlib.transforms.Transform` instance which\n takes patch coordinates to data coordinates.\n\n For example, one may define a patch of a circle which represents a\n radius of 5 by providing coordinates for a unit circle, and a\n transform which scales the coordinates (the patch coordinate) by 5.\n \"\"\"\n return transforms.IdentityTransform()\n\n def get_antialiased(self):\n \"\"\"\n Returns True if the :class:`Patch` is to be drawn with antialiasing.\n \"\"\"\n return self._antialiased\n get_aa = get_antialiased\n\n def get_edgecolor(self):\n \"\"\"\n Return the edge color of the :class:`Patch`.\n \"\"\"\n return self._edgecolor\n get_ec = get_edgecolor\n\n def get_facecolor(self):\n \"\"\"\n Return the face color of the :class:`Patch`.\n \"\"\"\n return self._facecolor\n get_fc = get_facecolor\n\n def get_linewidth(self):\n \"\"\"\n Return the line width in points.\n \"\"\"\n return self._linewidth\n get_lw = get_linewidth\n\n def get_linestyle(self):\n \"\"\"\n Return the linestyle. Will be one of ['solid' | 'dashed' |\n 'dashdot' | 'dotted']\n \"\"\"\n return self._linestyle\n get_ls = get_linestyle\n\n def set_antialiased(self, aa):\n \"\"\"\n Set whether to use antialiased rendering\n\n ACCEPTS: [True | False] or None for default\n \"\"\"\n if aa is None:\n aa = mpl.rcParams['patch.antialiased']\n self._antialiased = aa\n\n def set_aa(self, aa):\n \"\"\"alias for set_antialiased\"\"\"\n return self.set_antialiased(aa)\n\n def set_edgecolor(self, color):\n \"\"\"\n Set the patch edge color\n\n ACCEPTS: mpl color spec, or None for default, or 'none' for no color\n \"\"\"\n if color is None:\n color = mpl.rcParams['patch.edgecolor']\n self._original_edgecolor = color\n self._edgecolor = colors.colorConverter.to_rgba(color, self._alpha)\n\n def set_ec(self, color):\n \"\"\"alias for set_edgecolor\"\"\"\n return self.set_edgecolor(color)\n\n def set_facecolor(self, color):\n \"\"\"\n Set the patch face color\n\n ACCEPTS: mpl color spec, or None for default, or 'none' for no color\n \"\"\"\n if color is None:\n color = mpl.rcParams['patch.facecolor']\n self._original_facecolor = color # save: otherwise changing _fill\n # may lose alpha information\n self._facecolor = colors.colorConverter.to_rgba(color, self._alpha)\n if not self._fill:\n self._facecolor = list(self._facecolor)\n self._facecolor[3] = 0\n\n def set_fc(self, color):\n \"\"\"alias for set_facecolor\"\"\"\n return self.set_facecolor(color)\n\n def set_color(self, c):\n \"\"\"\n Set both the edgecolor and the facecolor.\n\n ACCEPTS: matplotlib color spec\n\n .. seealso::\n\n :meth:`set_facecolor`, :meth:`set_edgecolor`\n For setting the edge or face color individually.\n \"\"\"\n self.set_facecolor(c)\n self.set_edgecolor(c)\n\n def set_alpha(self, alpha):\n \"\"\"\n Set the alpha tranparency of the patch.\n\n ACCEPTS: float or None\n \"\"\"\n if alpha is not None:\n try:\n float(alpha)\n except TypeError:\n raise TypeError('alpha must be a float or None')\n artist.Artist.set_alpha(self, alpha)\n self.set_facecolor(self._original_facecolor) # using self._fill and\n # self._alpha\n self.set_edgecolor(self._original_edgecolor)\n\n def set_linewidth(self, w):\n \"\"\"\n Set the patch linewidth in points\n\n ACCEPTS: float or None for default\n \"\"\"\n if w is None:\n w = mpl.rcParams['patch.linewidth']\n self._linewidth = w\n\n def set_lw(self, lw):\n \"\"\"alias for set_linewidth\"\"\"\n return self.set_linewidth(lw)\n\n def set_linestyle(self, ls):\n \"\"\"\n Set the patch linestyle\n\n ACCEPTS: ['solid' | 'dashed' | 'dashdot' | 'dotted']\n \"\"\"\n if ls is None:\n ls = \"solid\"\n self._linestyle = ls\n\n def set_ls(self, ls):\n \"\"\"alias for set_linestyle\"\"\"\n return self.set_linestyle(ls)\n\n def set_fill(self, b):\n \"\"\"\n Set whether to fill the patch\n\n ACCEPTS: [True | False]\n \"\"\"\n self._fill = bool(b)\n self.set_facecolor(self._original_facecolor)\n\n def get_fill(self):\n 'return whether fill is set'\n return self._fill\n\n # Make fill a property so as to preserve the long-standing\n # but somewhat inconsistent behavior in which fill was an\n # attribute.\n fill = property(get_fill, set_fill)\n\n def set_capstyle(self, s):\n \"\"\"\n Set the patch capstyle\n\n ACCEPTS: ['butt' | 'round' | 'projecting']\n \"\"\"\n s = s.lower()\n if s not in self.validCap:\n raise ValueError('set_capstyle passed \"%s\";\\n' % (s,)\n + 'valid capstyles are %s' % (self.validCap,))\n self._capstyle = s\n\n def get_capstyle(self):\n \"Return the current capstyle\"\n return self._capstyle\n\n def set_joinstyle(self, s):\n \"\"\"\n Set the patch joinstyle\n\n ACCEPTS: ['miter' | 'round' | 'bevel']\n \"\"\"\n s = s.lower()\n if s not in self.validJoin:\n raise ValueError('set_joinstyle passed \"%s\";\\n' % (s,)\n + 'valid joinstyles are %s' % (self.validJoin,))\n self._joinstyle = s\n\n def get_joinstyle(self):\n \"Return the current joinstyle\"\n return self._joinstyle\n\n def set_hatch(self, hatch):\n \"\"\"\n Set the hatching pattern\n\n *hatch* can be one of::\n\n / - diagonal hatching\n \\ - back diagonal\n | - vertical\n - - horizontal\n + - crossed\n x - crossed diagonal\n o - small circle\n O - large circle\n . - dots\n * - stars\n\n Letters can be combined, in which case all the specified\n hatchings are done. If same letter repeats, it increases the\n density of hatching of that pattern.\n\n Hatching is supported in the PostScript, PDF, SVG and Agg\n backends only.\n\n ACCEPTS: ['/' | '\\\\\\\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*']\n \"\"\"\n self._hatch = hatch\n\n def get_hatch(self):\n 'Return the current hatching pattern'\n return self._hatch\n\n @allow_rasterization\n def draw(self, renderer):\n 'Draw the :class:`Patch` to the given *renderer*.'\n if not self.get_visible():\n return\n\n renderer.open_group('patch', self.get_gid())\n gc = renderer.new_gc()\n\n gc.set_foreground(self._edgecolor, isRGBA=True)\n\n lw = self._linewidth\n if self._edgecolor[3] == 0:\n lw = 0\n gc.set_linewidth(lw)\n gc.set_linestyle(self._linestyle)\n gc.set_capstyle(self._capstyle)\n gc.set_joinstyle(self._joinstyle)\n\n gc.set_antialiased(self._antialiased)\n self._set_gc_clip(gc)\n gc.set_url(self._url)\n gc.set_snap(self.get_snap())\n\n rgbFace = self._facecolor\n if rgbFace[3] == 0:\n rgbFace = None # (some?) renderers expect this as no-fill signal\n\n gc.set_alpha(self._alpha)\n\n if self._hatch:\n gc.set_hatch(self._hatch)\n\n if self.get_sketch_params() is not None:\n gc.set_sketch_params(*self.get_sketch_params())\n\n path = self.get_path()\n transform = self.get_transform()\n tpath = transform.transform_path_non_affine(path)\n affine = transform.get_affine()\n\n if self.get_path_effects():\n from matplotlib.patheffects import PathEffectRenderer\n renderer = PathEffectRenderer(self.get_path_effects(), renderer)\n\n renderer.draw_path(gc, tpath, affine, rgbFace)\n\n gc.restore()\n renderer.close_group('patch')\n\n def get_path(self):\n \"\"\"\n Return the path of this patch\n \"\"\"\n raise NotImplementedError('Derived must override')\n\n def get_window_extent(self, renderer=None):\n return self.get_path().get_extents(self.get_transform())\n\n\npatchdoc = artist.kwdoc(Patch)\nfor k in ('Rectangle', 'Circle', 'RegularPolygon', 'Polygon', 'Wedge', 'Arrow',\n 'FancyArrow', 'YAArrow', 'CirclePolygon', 'Ellipse', 'Arc',\n 'FancyBboxPatch', 'Patch'):\n docstring.interpd.update({k: patchdoc})\n\n# define Patch.__init__ docstring after the class has been added to interpd\ndocstring.dedent_interpd(Patch.__init__)\n\n\nclass Shadow(Patch):\n def __str__(self):\n return \"Shadow(%s)\" % (str(self.patch))\n\n @docstring.dedent_interpd\n def __init__(self, patch, ox, oy, props=None, **kwargs):\n \"\"\"\n Create a shadow of the given *patch* offset by *ox*, *oy*.\n *props*, if not *None*, is a patch property update dictionary.\n If *None*, the shadow will have have the same color as the face,\n but darkened.\n\n kwargs are\n %(Patch)s\n \"\"\"\n Patch.__init__(self)\n self.patch = patch\n self.props = props\n self._ox, self._oy = ox, oy\n self._shadow_transform = transforms.Affine2D()\n self._update()\n\n def _update(self):\n self.update_from(self.patch)\n if self.props is not None:\n self.update(self.props)\n else:\n r, g, b, a = colors.colorConverter.to_rgba(\n self.patch.get_facecolor())\n rho = 0.3\n r = rho * r\n g = rho * g\n b = rho * b\n\n self.set_facecolor((r, g, b, 0.5))\n self.set_edgecolor((r, g, b, 0.5))\n self.set_alpha(0.5)\n\n def _update_transform(self, renderer):\n ox = renderer.points_to_pixels(self._ox)\n oy = renderer.points_to_pixels(self._oy)\n self._shadow_transform.clear().translate(ox, oy)\n\n def _get_ox(self):\n return self._ox\n\n def _set_ox(self, ox):\n self._ox = ox\n\n def _get_oy(self):\n return self._oy\n\n def _set_oy(self, oy):\n self._oy = oy\n\n def get_path(self):\n return self.patch.get_path()\n\n def get_patch_transform(self):\n return self.patch.get_patch_transform() + self._shadow_transform\n\n def draw(self, renderer):\n self._update_transform(renderer)\n Patch.draw(self, renderer)\n\n\nclass Rectangle(Patch):\n \"\"\"\n Draw a rectangle with lower left at *xy* = (*x*, *y*) with\n specified *width* and *height*.\n \"\"\"\n\n def __str__(self):\n return self.__class__.__name__ \\\n + \"(%g,%g;%gx%g)\" % (self._x, self._y, self._width, self._height)\n\n @docstring.dedent_interpd\n def __init__(self, xy, width, height, angle=0.0, **kwargs):\n \"\"\"\n\n *angle*\n rotation in degrees (anti-clockwise)\n\n *fill* is a boolean indicating whether to fill the rectangle\n\n Valid kwargs are:\n %(Patch)s\n \"\"\"\n\n Patch.__init__(self, **kwargs)\n\n self._x = xy[0]\n self._y = xy[1]\n self._width = width\n self._height = height\n self._angle = angle\n # Note: This cannot be calculated until this is added to an Axes\n self._rect_transform = transforms.IdentityTransform()\n\n def get_path(self):\n \"\"\"\n Return the vertices of the rectangle\n \"\"\"\n return Path.unit_rectangle()\n\n def _update_patch_transform(self):\n \"\"\"NOTE: This cannot be called until after this has been added\n to an Axes, otherwise unit conversion will fail. This\n maxes it very important to call the accessor method and\n not directly access the transformation member variable.\n \"\"\"\n x = self.convert_xunits(self._x)\n y = self.convert_yunits(self._y)\n width = self.convert_xunits(self._width)\n height = self.convert_yunits(self._height)\n bbox = transforms.Bbox.from_bounds(x, y, width, height)\n rot_trans = transforms.Affine2D()\n rot_trans.rotate_deg_around(x, y, self._angle)\n self._rect_transform = transforms.BboxTransformTo(bbox)\n self._rect_transform += rot_trans\n\n def get_patch_transform(self):\n self._update_patch_transform()\n return self._rect_transform\n\n def contains(self, mouseevent):\n # special case the degenerate rectangle\n if self._width == 0 or self._height == 0:\n return False, {}\n\n x, y = self.get_transform().inverted().transform_point(\n (mouseevent.x, mouseevent.y))\n return (x >= 0.0 and x <= 1.0 and y >= 0.0 and y <= 1.0), {}\n\n def get_x(self):\n \"Return the left coord of the rectangle\"\n return self._x\n\n def get_y(self):\n \"Return the bottom coord of the rectangle\"\n return self._y\n\n def get_xy(self):\n \"Return the left and bottom coords of the rectangle\"\n return self._x, self._y\n\n def get_width(self):\n \"Return the width of the rectangle\"\n return self._width\n\n def get_height(self):\n \"Return the height of the rectangle\"\n return self._height\n\n def set_x(self, x):\n \"\"\"\n Set the left coord of the rectangle\n\n ACCEPTS: float\n \"\"\"\n self._x = x\n\n def set_y(self, y):\n \"\"\"\n Set the bottom coord of the rectangle\n\n ACCEPTS: float\n \"\"\"\n self._y = y\n\n def set_xy(self, xy):\n \"\"\"\n Set the left and bottom coords of the rectangle\n\n ACCEPTS: 2-item sequence\n \"\"\"\n self._x, self._y = xy\n\n def set_width(self, w):\n \"\"\"\n Set the width rectangle\n\n ACCEPTS: float\n \"\"\"\n self._width = w\n\n def set_height(self, h):\n \"\"\"\n Set the width rectangle\n\n ACCEPTS: float\n \"\"\"\n self._height = h\n\n def set_bounds(self, *args):\n \"\"\"\n Set the bounds of the rectangle: l,b,w,h\n\n ACCEPTS: (left, bottom, width, height)\n \"\"\"\n if len(args) == 0:\n l, b, w, h = args[0]\n else:\n l, b, w, h = args\n self._x = l\n self._y = b\n self._width = w\n self._height = h\n\n def get_bbox(self):\n return transforms.Bbox.from_bounds(self._x, self._y,\n self._width, self._height)\n\n xy = property(get_xy, set_xy)\n\n\nclass RegularPolygon(Patch):\n \"\"\"\n A regular polygon patch.\n \"\"\"\n def __str__(self):\n return \"Poly%d(%g,%g)\" % (self._numVertices, self._xy[0], self._xy[1])\n\n @docstring.dedent_interpd\n def __init__(self, xy, numVertices, radius=5, orientation=0,\n **kwargs):\n \"\"\"\n Constructor arguments:\n\n *xy*\n A length 2 tuple (*x*, *y*) of the center.\n\n *numVertices*\n the number of vertices.\n\n *radius*\n The distance from the center to each of the vertices.\n\n *orientation*\n rotates the polygon (in radians).\n\n Valid kwargs are:\n %(Patch)s\n \"\"\"\n self._xy = xy\n self._numVertices = numVertices\n self._orientation = orientation\n self._radius = radius\n self._path = Path.unit_regular_polygon(numVertices)\n self._poly_transform = transforms.Affine2D()\n self._update_transform()\n\n Patch.__init__(self, **kwargs)\n\n def _update_transform(self):\n self._poly_transform.clear() \\\n .scale(self.radius) \\\n .rotate(self.orientation) \\\n .translate(*self.xy)\n\n def _get_xy(self):\n return self._xy\n\n def _set_xy(self, xy):\n self._xy = xy\n self._update_transform()\n xy = property(_get_xy, _set_xy)\n\n def _get_orientation(self):\n return self._orientation\n\n def _set_orientation(self, orientation):\n self._orientation = orientation\n self._update_transform()\n orientation = property(_get_orientation, _set_orientation)\n\n def _get_radius(self):\n return self._radius\n\n def _set_radius(self, radius):\n self._radius = radius\n self._update_transform()\n radius = property(_get_radius, _set_radius)\n\n def _get_numvertices(self):\n return self._numVertices\n\n def _set_numvertices(self, numVertices):\n self._numVertices = numVertices\n\n numvertices = property(_get_numvertices, _set_numvertices)\n\n def get_path(self):\n return self._path\n\n def get_patch_transform(self):\n self._update_transform()\n return self._poly_transform\n\n\nclass PathPatch(Patch):\n \"\"\"\n A general polycurve path patch.\n \"\"\"\n def __str__(self):\n return \"Poly((%g, %g) ...)\" % tuple(self._path.vertices[0])\n\n @docstring.dedent_interpd\n def __init__(self, path, **kwargs):\n \"\"\"\n *path* is a :class:`matplotlib.path.Path` object.\n\n Valid kwargs are:\n %(Patch)s\n\n .. seealso::\n\n :class:`Patch`\n For additional kwargs\n\n \"\"\"\n Patch.__init__(self, **kwargs)\n self._path = path\n\n def get_path(self):\n return self._path\n\n\nclass Polygon(Patch):\n \"\"\"\n A general polygon patch.\n \"\"\"\n def __str__(self):\n return \"Poly((%g, %g) ...)\" % tuple(self._path.vertices[0])\n\n @docstring.dedent_interpd\n def __init__(self, xy, closed=True, **kwargs):\n \"\"\"\n *xy* is a numpy array with shape Nx2.\n\n If *closed* is *True*, the polygon will be closed so the\n starting and ending points are the same.\n\n Valid kwargs are:\n %(Patch)s\n\n .. seealso::\n\n :class:`Patch`\n For additional kwargs\n\n \"\"\"\n Patch.__init__(self, **kwargs)\n self._closed = closed\n self.set_xy(xy)\n\n def get_path(self):\n return self._path\n\n def get_closed(self):\n return self._closed\n\n def set_closed(self, closed):\n if self._closed == bool(closed):\n return\n self._closed = bool(closed)\n self.set_xy(self.get_xy())\n\n def get_xy(self):\n return self._path.vertices\n\n def set_xy(self, xy):\n xy = np.asarray(xy)\n if self._closed:\n if len(xy) and (xy[0] != xy[-1]).any():\n xy = np.concatenate([xy, [xy[0]]])\n else:\n if len(xy) > 2 and (xy[0] == xy[-1]).all():\n xy = xy[:-1]\n self._path = Path(xy, closed=self._closed)\n\n _get_xy = get_xy\n _set_xy = set_xy\n xy = property(\n get_xy, set_xy, None,\n \"\"\"Set/get the vertices of the polygon. This property is\n provided for backward compatibility with matplotlib 0.91.x\n only. New code should use\n :meth:`~matplotlib.patches.Polygon.get_xy` and\n :meth:`~matplotlib.patches.Polygon.set_xy` instead.\"\"\")\n\n\nclass Wedge(Patch):\n \"\"\"\n Wedge shaped patch.\n \"\"\"\n def __str__(self):\n return \"Wedge(%g,%g)\" % (self.theta1, self.theta2)\n\n @docstring.dedent_interpd\n def __init__(self, center, r, theta1, theta2, width=None, **kwargs):\n \"\"\"\n Draw a wedge centered at *x*, *y* center with radius *r* that\n sweeps *theta1* to *theta2* (in degrees). If *width* is given,\n then a partial wedge is drawn from inner radius *r* - *width*\n to outer radius *r*.\n\n Valid kwargs are:\n\n %(Patch)s\n \"\"\"\n Patch.__init__(self, **kwargs)\n self.center = center\n self.r, self.width = r, width\n self.theta1, self.theta2 = theta1, theta2\n self._patch_transform = transforms.IdentityTransform()\n self._recompute_path()\n\n def _recompute_path(self):\n # Inner and outer rings are connected unless the annulus is complete\n if abs((self.theta2 - self.theta1) - 360) <= 1e-12:\n theta1, theta2 = 0, 360\n connector = Path.MOVETO\n else:\n theta1, theta2 = self.theta1, self.theta2\n connector = Path.LINETO\n\n # Form the outer ring\n arc = Path.arc(theta1, theta2)\n\n if self.width is not None:\n # Partial annulus needs to draw the outer ring\n # followed by a reversed and scaled inner ring\n v1 = arc.vertices\n v2 = arc.vertices[::-1] * float(self.r - self.width) / self.r\n v = np.vstack([v1, v2, v1[0, :], (0, 0)])\n c = np.hstack([arc.codes, arc.codes, connector, Path.CLOSEPOLY])\n c[len(arc.codes)] = connector\n else:\n # Wedge doesn't need an inner ring\n v = np.vstack([arc.vertices, [(0, 0), arc.vertices[0, :], (0, 0)]])\n c = np.hstack([arc.codes, [connector, connector, Path.CLOSEPOLY]])\n\n # Shift and scale the wedge to the final location.\n v *= self.r\n v += np.asarray(self.center)\n self._path = Path(v, c)\n\n def set_center(self, center):\n self._path = None\n self.center = center\n\n def set_radius(self, radius):\n self._path = None\n self.radius = radius\n\n def set_theta1(self, theta1):\n self._path = None\n self.theta1 = theta1\n\n def set_theta2(self, theta2):\n self._path = None\n self.theta2 = theta2\n\n def set_width(self, width):\n self._path = None\n self.width = width\n\n def get_path(self):\n if self._path is None:\n self._recompute_path()\n return self._path\n\n\n# COVERAGE NOTE: Not used internally or from examples\nclass Arrow(Patch):\n \"\"\"\n An arrow patch.\n \"\"\"\n def __str__(self):\n return \"Arrow()\"\n\n _path = Path([\n [0.0, 0.1], [0.0, -0.1],\n [0.8, -0.1], [0.8, -0.3],\n [1.0, 0.0], [0.8, 0.3],\n [0.8, 0.1], [0.0, 0.1]],\n closed=True)\n\n @docstring.dedent_interpd\n def __init__(self, x, y, dx, dy, width=1.0, **kwargs):\n \"\"\"\n Draws an arrow, starting at (*x*, *y*), direction and length\n given by (*dx*, *dy*) the width of the arrow is scaled by *width*.\n\n Valid kwargs are:\n %(Patch)s\n \"\"\"\n Patch.__init__(self, **kwargs)\n L = np.sqrt(dx ** 2 + dy ** 2) or 1 # account for div by zero\n cx = float(dx) / L\n sx = float(dy) / L\n\n trans1 = transforms.Affine2D().scale(L, width)\n trans2 = transforms.Affine2D.from_values(cx, sx, -sx, cx, 0.0, 0.0)\n trans3 = transforms.Affine2D().translate(x, y)\n trans = trans1 + trans2 + trans3\n self._patch_transform = trans.frozen()\n\n def get_path(self):\n return self._path\n\n def get_patch_transform(self):\n return self._patch_transform\n\n\nclass FancyArrow(Polygon):\n \"\"\"\n Like Arrow, but lets you set head width and head height independently.\n \"\"\"\n\n def __str__(self):\n return \"FancyArrow()\"\n\n @docstring.dedent_interpd\n def __init__(self, x, y, dx, dy, width=0.001, length_includes_head=False,\n head_width=None, head_length=None, shape='full', overhang=0,\n head_starts_at_zero=False, **kwargs):\n \"\"\"\n Constructor arguments\n *width*: float (default: 0.001)\n width of full arrow tail\n\n *length_includes_head*: [True | False] (default: False)\n True if head is to be counted in calculating the length.\n\n *head_width*: float or None (default: 3*width)\n total width of the full arrow head\n\n *head_length*: float or None (default: 1.5 * head_width)\n length of arrow head\n\n *shape*: ['full', 'left', 'right'] (default: 'full')\n draw the left-half, right-half, or full arrow\n\n *overhang*: float (default: 0)\n fraction that the arrow is swept back (0 overhang means\n triangular shape). Can be negative or greater than one.\n\n *head_starts_at_zero*: [True | False] (default: False)\n if True, the head starts being drawn at coordinate 0\n instead of ending at coordinate 0.\n\n Other valid kwargs (inherited from :class:`Patch`) are:\n %(Patch)s\n\n \"\"\"\n if head_width is None:\n head_width = 20 * width\n if head_length is None:\n head_length = 1.5 * head_width\n\n distance = np.sqrt(dx ** 2 + dy ** 2)\n if length_includes_head:\n length = distance\n else:\n length = distance + head_length\n if not length:\n verts = [] # display nothing if empty\n else:\n # start by drawing horizontal arrow, point at (0,0)\n hw, hl, hs, lw = head_width, head_length, overhang, width\n left_half_arrow = np.array([\n [0.0, 0.0], # tip\n [-hl, -hw / 2.0], # leftmost\n [-hl * (1 - hs), -lw / 2.0], # meets stem\n [-length, -lw / 2.0], # bottom left\n [-length, 0],\n ])\n #if we're not including the head, shift up by head length\n if not length_includes_head:\n left_half_arrow += [head_length, 0]\n #if the head starts at 0, shift up by another head length\n if head_starts_at_zero:\n left_half_arrow += [head_length / 2.0, 0]\n #figure out the shape, and complete accordingly\n if shape == 'left':\n coords = left_half_arrow\n else:\n right_half_arrow = left_half_arrow * [1, -1]\n if shape == 'right':\n coords = right_half_arrow\n elif shape == 'full':\n # The half-arrows contain the midpoint of the stem,\n # which we can omit from the full arrow. Including it\n # twice caused a problem with xpdf.\n coords = np.concatenate([left_half_arrow[:-1],\n right_half_arrow[-2::-1]])\n else:\n raise ValueError(\"Got unknown shape: %s\" % shape)\n cx = float(dx) / distance\n sx = float(dy) / distance\n M = np.array([[cx, sx], [-sx, cx]])\n verts = np.dot(coords, M) + (x + dx, y + dy)\n\n Polygon.__init__(self, list(map(tuple, verts)), closed=True, **kwargs)\n\n\ndocstring.interpd.update({\"FancyArrow\": FancyArrow.__init__.__doc__})\n\ndocstring.interpd.update({\"FancyArrow\": FancyArrow.__init__.__doc__})\n\n\nclass YAArrow(Patch):\n \"\"\"\n Yet another arrow class.\n\n This is an arrow that is defined in display space and has a tip at\n *x1*, *y1* and a base at *x2*, *y2*.\n \"\"\"\n def __str__(self):\n return \"YAArrow()\"\n\n @docstring.dedent_interpd\n def __init__(self, figure, xytip, xybase,\n width=4, frac=0.1, headwidth=12, **kwargs):\n \"\"\"\n Constructor arguments:\n\n *xytip*\n (*x*, *y*) location of arrow tip\n\n *xybase*\n (*x*, *y*) location the arrow base mid point\n\n *figure*\n The :class:`~matplotlib.figure.Figure` instance\n (fig.dpi)\n\n *width*\n The width of the arrow in points\n\n *frac*\n The fraction of the arrow length occupied by the head\n\n *headwidth*\n The width of the base of the arrow head in points\n\n Valid kwargs are:\n %(Patch)s\n\n \"\"\"\n self.xytip = xytip\n self.xybase = xybase\n self.width = width\n self.frac = frac\n self.headwidth = headwidth\n Patch.__init__(self, **kwargs)\n # Set self.figure after Patch.__init__, since it sets self.figure to\n # None\n self.figure = figure\n\n def get_path(self):\n # Since this is dpi dependent, we need to recompute the path\n # every time.\n\n # the base vertices\n x1, y1 = self.xytip\n x2, y2 = self.xybase\n k1 = self.width * self.figure.dpi / 72. / 2.\n k2 = self.headwidth * self.figure.dpi / 72. / 2.\n xb1, yb1, xb2, yb2 = self.getpoints(x1, y1, x2, y2, k1)\n\n # a point on the segment 20% of the distance from the tip to the base\n theta = math.atan2(y2 - y1, x2 - x1)\n r = math.sqrt((y2 - y1) ** 2. + (x2 - x1) ** 2.)\n xm = x1 + self.frac * r * math.cos(theta)\n ym = y1 + self.frac * r * math.sin(theta)\n xc1, yc1, xc2, yc2 = self.getpoints(x1, y1, xm, ym, k1)\n xd1, yd1, xd2, yd2 = self.getpoints(x1, y1, xm, ym, k2)\n\n xs = self.convert_xunits([xb1, xb2, xc2, xd2, x1, xd1, xc1, xb1])\n ys = self.convert_yunits([yb1, yb2, yc2, yd2, y1, yd1, yc1, yb1])\n\n return Path(list(zip(xs, ys)), closed=True)\n\n def get_patch_transform(self):\n return transforms.IdentityTransform()\n\n def getpoints(self, x1, y1, x2, y2, k):\n \"\"\"\n For line segment defined by (*x1*, *y1*) and (*x2*, *y2*)\n return the points on the line that is perpendicular to the\n line and intersects (*x2*, *y2*) and the distance from (*x2*,\n *y2*) of the returned points is *k*.\n \"\"\"\n x1, y1, x2, y2, k = list(map(float, (x1, y1, x2, y2, k)))\n\n if y2 - y1 == 0:\n return x2, y2 + k, x2, y2 - k\n elif x2 - x1 == 0:\n return x2 + k, y2, x2 - k, y2\n\n m = (y2 - y1) / (x2 - x1)\n pm = -1. / m\n a = 1\n b = -2 * y2\n c = y2 ** 2. - k ** 2. * pm ** 2. / (1. + pm ** 2.)\n\n y3a = (-b + math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)\n x3a = (y3a - y2) / pm + x2\n\n y3b = (-b - math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)\n x3b = (y3b - y2) / pm + x2\n return x3a, y3a, x3b, y3b\n\n\nclass CirclePolygon(RegularPolygon):\n \"\"\"\n A polygon-approximation of a circle patch.\n \"\"\"\n def __str__(self):\n return \"CirclePolygon(%d,%d)\" % self.center\n\n @docstring.dedent_interpd\n def __init__(self, xy, radius=5,\n resolution=20, # the number of vertices\n ** kwargs):\n \"\"\"\n Create a circle at *xy* = (*x*, *y*) with given *radius*.\n This circle is approximated by a regular polygon with\n *resolution* sides. For a smoother circle drawn with splines,\n see :class:`~matplotlib.patches.Circle`.\n\n Valid kwargs are:\n %(Patch)s\n\n \"\"\"\n RegularPolygon.__init__(self, xy,\n resolution,\n radius,\n orientation=0,\n **kwargs)\n\n\nclass Ellipse(Patch):\n \"\"\"\n A scale-free ellipse.\n \"\"\"\n def __str__(self):\n return \"Ellipse(%s,%s;%sx%s)\" % (self.center[0], self.center[1],\n self.width, self.height)\n\n @docstring.dedent_interpd\n def __init__(self, xy, width, height, angle=0.0, **kwargs):\n \"\"\"\n *xy*\n center of ellipse\n\n *width*\n total length (diameter) of horizontal axis\n\n *height*\n total length (diameter) of vertical axis\n\n *angle*\n rotation in degrees (anti-clockwise)\n\n Valid kwargs are:\n %(Patch)s\n \"\"\"\n Patch.__init__(self, **kwargs)\n\n self.center = xy\n self.width, self.height = width, height\n self.angle = angle\n self._path = Path.unit_circle()\n # Note: This cannot be calculated until this is added to an Axes\n self._patch_transform = transforms.IdentityTransform()\n\n def _recompute_transform(self):\n \"\"\"NOTE: This cannot be called until after this has been added\n to an Axes, otherwise unit conversion will fail. This\n maxes it very important to call the accessor method and\n not directly access the transformation member variable.\n \"\"\"\n center = (self.convert_xunits(self.center[0]),\n self.convert_yunits(self.center[1]))\n width = self.convert_xunits(self.width)\n height = self.convert_yunits(self.height)\n self._patch_transform = transforms.Affine2D() \\\n .scale(width * 0.5, height * 0.5) \\\n .rotate_deg(self.angle) \\\n .translate(*center)\n\n def get_path(self):\n \"\"\"\n Return the vertices of the rectangle\n \"\"\"\n return self._path\n\n def get_patch_transform(self):\n self._recompute_transform()\n return self._patch_transform\n\n def contains(self, ev):\n if ev.x is None or ev.y is None:\n return False, {}\n x, y = self.get_transform().inverted().transform_point((ev.x, ev.y))\n return (x * x + y * y) <= 1.0, {}\n\n\nclass Circle(Ellipse):\n \"\"\"\n A circle patch.\n \"\"\"\n def __str__(self):\n return \"Circle((%g,%g),r=%g)\" % (self.center[0],\n self.center[1],\n self.radius)\n\n @docstring.dedent_interpd\n def __init__(self, xy, radius=5, **kwargs):\n \"\"\"\n Create true circle at center *xy* = (*x*, *y*) with given\n *radius*. Unlike :class:`~matplotlib.patches.CirclePolygon`\n which is a polygonal approximation, this uses Bézier splines\n and is much closer to a scale-free circle.\n\n Valid kwargs are:\n %(Patch)s\n\n \"\"\"\n self.radius = radius\n Ellipse.__init__(self, xy, radius * 2, radius * 2, **kwargs)\n\n def set_radius(self, radius):\n \"\"\"\n Set the radius of the circle\n\n ACCEPTS: float\n \"\"\"\n self.width = self.height = 2 * radius\n\n def get_radius(self):\n 'return the radius of the circle'\n return self.width / 2.\n\n radius = property(get_radius, set_radius)\n\n\nclass Arc(Ellipse):\n \"\"\"\n An elliptical arc. Because it performs various optimizations, it\n can not be filled.\n\n The arc must be used in an :class:`~matplotlib.axes.Axes`\n instance---it can not be added directly to a\n :class:`~matplotlib.figure.Figure`---because it is optimized to\n only render the segments that are inside the axes bounding box\n with high resolution.\n \"\"\"\n def __str__(self):\n return \"Arc(%s,%s;%sx%s)\" % (self.center[0], self.center[1],\n self.width, self.height)\n\n @docstring.dedent_interpd\n def __init__(self, xy, width, height, angle=0.0,\n theta1=0.0, theta2=360.0, **kwargs):\n \"\"\"\n The following args are supported:\n\n *xy*\n center of ellipse\n\n *width*\n length of horizontal axis\n\n *height*\n length of vertical axis\n\n *angle*\n rotation in degrees (anti-clockwise)\n\n *theta1*\n starting angle of the arc in degrees\n\n *theta2*\n ending angle of the arc in degrees\n\n If *theta1* and *theta2* are not provided, the arc will form a\n complete ellipse.\n\n Valid kwargs are:\n\n %(Patch)s\n \"\"\"\n fill = kwargs.setdefault('fill', False)\n if fill:\n raise ValueError(\"Arc objects can not be filled\")\n\n Ellipse.__init__(self, xy, width, height, angle, **kwargs)\n\n self.theta1 = theta1\n self.theta2 = theta2\n\n self._path = Path.arc(self.theta1, self.theta2)\n\n @allow_rasterization\n def draw(self, renderer):\n \"\"\"\n Ellipses are normally drawn using an approximation that uses\n eight cubic bezier splines. The error of this approximation\n is 1.89818e-6, according to this unverified source:\n\n Lancaster, Don. Approximating a Circle or an Ellipse Using\n Four Bezier Cubic Splines.\n\n http://www.tinaja.com/glib/ellipse4.pdf\n\n There is a use case where very large ellipses must be drawn\n with very high accuracy, and it is too expensive to render the\n entire ellipse with enough segments (either splines or line\n segments). Therefore, in the case where either radius of the\n ellipse is large enough that the error of the spline\n approximation will be visible (greater than one pixel offset\n from the ideal), a different technique is used.\n\n In that case, only the visible parts of the ellipse are drawn,\n with each visible arc using a fixed number of spline segments\n (8). The algorithm proceeds as follows:\n\n 1. The points where the ellipse intersects the axes bounding\n box are located. (This is done be performing an inverse\n transformation on the axes bbox such that it is relative\n to the unit circle -- this makes the intersection\n calculation much easier than doing rotated ellipse\n intersection directly).\n\n This uses the \"line intersecting a circle\" algorithm\n from:\n\n Vince, John. Geometry for Computer Graphics: Formulae,\n Examples & Proofs. London: Springer-Verlag, 2005.\n\n 2. The angles of each of the intersection points are\n calculated.\n\n 3. Proceeding counterclockwise starting in the positive\n x-direction, each of the visible arc-segments between the\n pairs of vertices are drawn using the bezier arc\n approximation technique implemented in\n :meth:`matplotlib.path.Path.arc`.\n \"\"\"\n if not hasattr(self, 'axes'):\n raise RuntimeError('Arcs can only be used in Axes instances')\n\n self._recompute_transform()\n\n # Get the width and height in pixels\n width = self.convert_xunits(self.width)\n height = self.convert_yunits(self.height)\n width, height = self.get_transform().transform_point(\n (width, height))\n inv_error = (1.0 / 1.89818e-6) * 0.5\n\n if width < inv_error and height < inv_error:\n #self._path = Path.arc(self.theta1, self.theta2)\n return Patch.draw(self, renderer)\n\n def iter_circle_intersect_on_line(x0, y0, x1, y1):\n dx = x1 - x0\n dy = y1 - y0\n dr2 = dx * dx + dy * dy\n D = x0 * y1 - x1 * y0\n D2 = D * D\n discrim = dr2 - D2\n\n # Single (tangential) intersection\n if discrim == 0.0:\n x = (D * dy) / dr2\n y = (-D * dx) / dr2\n yield x, y\n elif discrim > 0.0:\n # The definition of \"sign\" here is different from\n # np.sign: we never want to get 0.0\n if dy < 0.0:\n sign_dy = -1.0\n else:\n sign_dy = 1.0\n sqrt_discrim = np.sqrt(discrim)\n for sign in (1., -1.):\n x = (D * dy + sign * sign_dy * dx * sqrt_discrim) / dr2\n y = (-D * dx + sign * np.abs(dy) * sqrt_discrim) / dr2\n yield x, y\n\n def iter_circle_intersect_on_line_seg(x0, y0, x1, y1):\n epsilon = 1e-9\n if x1 < x0:\n x0e, x1e = x1, x0\n else:\n x0e, x1e = x0, x1\n if y1 < y0:\n y0e, y1e = y1, y0\n else:\n y0e, y1e = y0, y1\n x0e -= epsilon\n y0e -= epsilon\n x1e += epsilon\n y1e += epsilon\n for x, y in iter_circle_intersect_on_line(x0, y0, x1, y1):\n if x >= x0e and x <= x1e and y >= y0e and y <= y1e:\n yield x, y\n\n # Transforms the axes box_path so that it is relative to the unit\n # circle in the same way that it is relative to the desired\n # ellipse.\n box_path = Path.unit_rectangle()\n box_path_transform = transforms.BboxTransformTo(self.axes.bbox) + \\\n self.get_transform().inverted()\n box_path = box_path.transformed(box_path_transform)\n\n PI = np.pi\n TWOPI = PI * 2.0\n RAD2DEG = 180.0 / PI\n DEG2RAD = PI / 180.0\n theta1 = self.theta1\n theta2 = self.theta2\n thetas = {}\n # For each of the point pairs, there is a line segment\n for p0, p1 in zip(box_path.vertices[:-1], box_path.vertices[1:]):\n x0, y0 = p0\n x1, y1 = p1\n for x, y in iter_circle_intersect_on_line_seg(x0, y0, x1, y1):\n theta = np.arccos(x)\n if y < 0:\n theta = TWOPI - theta\n # Convert radians to angles\n theta *= RAD2DEG\n if theta > theta1 and theta < theta2:\n thetas[theta] = None\n\n thetas = list(six.iterkeys(thetas))\n thetas.sort()\n thetas.append(theta2)\n\n last_theta = theta1\n theta1_rad = theta1 * DEG2RAD\n inside = box_path.contains_point((np.cos(theta1_rad),\n np.sin(theta1_rad)))\n\n # save original path\n path_original = self._path\n for theta in thetas:\n if inside:\n Path.arc(last_theta, theta, 8)\n Patch.draw(self, renderer)\n inside = False\n else:\n inside = True\n last_theta = theta\n\n # restore original path\n self._path = path_original\n\n\ndef bbox_artist(artist, renderer, props=None, fill=True):\n \"\"\"\n This is a debug function to draw a rectangle around the bounding\n box returned by\n :meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,\n to test whether the artist is returning the correct bbox.\n\n *props* is a dict of rectangle props with the additional property\n 'pad' that sets the padding around the bbox in points.\n \"\"\"\n if props is None:\n props = {}\n props = props.copy() # don't want to alter the pad externally\n pad = props.pop('pad', 4)\n pad = renderer.points_to_pixels(pad)\n bbox = artist.get_window_extent(renderer)\n l, b, w, h = bbox.bounds\n l -= pad / 2.\n b -= pad / 2.\n w += pad\n h += pad\n r = Rectangle(xy=(l, b),\n width=w,\n height=h,\n fill=fill,\n )\n r.set_transform(transforms.IdentityTransform())\n r.set_clip_on(False)\n r.update(props)\n r.draw(renderer)\n\n\ndef draw_bbox(bbox, renderer, color='k', trans=None):\n \"\"\"\n This is a debug function to draw a rectangle around the bounding\n box returned by\n :meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,\n to test whether the artist is returning the correct bbox.\n \"\"\"\n\n l, b, w, h = bbox.bounds\n r = Rectangle(xy=(l, b),\n width=w,\n height=h,\n edgecolor=color,\n fill=False,\n )\n if trans is not None:\n r.set_transform(trans)\n r.set_clip_on(False)\n r.draw(renderer)\n\n\ndef _pprint_table(_table, leadingspace=2):\n \"\"\"\n Given the list of list of strings, return a string of REST table format.\n \"\"\"\n if leadingspace:\n pad = ' ' * leadingspace\n else:\n pad = ''\n\n columns = [[] for cell in _table[0]]\n\n for row in _table:\n for column, cell in zip(columns, row):\n column.append(cell)\n\n col_len = [max([len(cell) for cell in column]) for column in columns]\n\n lines = []\n table_formatstr = pad + ' '.join([('=' * cl) for cl in col_len])\n\n lines.append('')\n lines.append(table_formatstr)\n lines.append(pad + ' '.join([cell.ljust(cl)\n for cell, cl\n in zip(_table[0], col_len)]))\n lines.append(table_formatstr)\n\n lines.extend([(pad + ' '.join([cell.ljust(cl)\n for cell, cl\n in zip(row, col_len)]))\n for row in _table[1:]])\n\n lines.append(table_formatstr)\n lines.append('')\n return \"\\n\".join(lines)\n\n\ndef _pprint_styles(_styles):\n \"\"\"\n A helper function for the _Style class. Given the dictionary of\n (stylename : styleclass), return a formatted string listing all the\n styles. Used to update the documentation.\n \"\"\"\n names, attrss, clss = [], [], []\n\n import inspect\n\n _table = [[\"Class\", \"Name\", \"Attrs\"]]\n\n for name, cls in sorted(_styles.items()):\n args, varargs, varkw, defaults = inspect.getargspec(cls.__init__)\n if defaults:\n args = [(argname, argdefault)\n for argname, argdefault in zip(args[1:], defaults)]\n else:\n args = None\n\n if args is None:\n argstr = 'None'\n else:\n argstr = \",\".join([(\"%s=%s\" % (an, av))\n for an, av\n in args])\n\n #adding ``quotes`` since - and | have special meaning in reST\n _table.append([cls.__name__, \"``%s``\" % name, argstr])\n\n return _pprint_table(_table)\n\n\nclass _Style(object):\n \"\"\"\n A base class for the Styles. It is meant to be a container class,\n where actual styles are declared as subclass of it, and it\n provides some helper functions.\n \"\"\"\n def __new__(self, stylename, **kw):\n \"\"\"\n return the instance of the subclass with the given style name.\n \"\"\"\n\n # the \"class\" should have the _style_list attribute, which is\n # a dictionary of stylname, style class paie.\n\n _list = stylename.replace(\" \", \"\").split(\",\")\n _name = _list[0].lower()\n try:\n _cls = self._style_list[_name]\n except KeyError:\n raise ValueError(\"Unknown style : %s\" % stylename)\n\n try:\n _args_pair = [cs.split(\"=\") for cs in _list[1:]]\n _args = dict([(k, float(v)) for k, v in _args_pair])\n except ValueError:\n raise ValueError(\"Incorrect style argument : %s\" % stylename)\n _args.update(kw)\n\n return _cls(**_args)\n\n @classmethod\n def get_styles(klass):\n \"\"\"\n A class method which returns a dictionary of available styles.\n \"\"\"\n return klass._style_list\n\n @classmethod\n def pprint_styles(klass):\n \"\"\"\n A class method which returns a string of the available styles.\n \"\"\"\n return _pprint_styles(klass._style_list)\n\n @classmethod\n def register(klass, name, style):\n \"\"\"\n Register a new style.\n \"\"\"\n\n if not issubclass(style, klass._Base):\n raise ValueError(\"%s must be a subclass of %s\" % (style,\n klass._Base))\n klass._style_list[name] = style\n\n\nclass BoxStyle(_Style):\n \"\"\"\n :class:`BoxStyle` is a container class which defines several\n boxstyle classes, which are used for :class:`FancyBoxPatch`.\n\n A style object can be created as::\n\n BoxStyle.Round(pad=0.2)\n\n or::\n\n BoxStyle(\"Round\", pad=0.2)\n\n or::\n\n BoxStyle(\"Round, pad=0.2\")\n\n Following boxstyle classes are defined.\n\n %(AvailableBoxstyles)s\n\n An instance of any boxstyle class is an callable object,\n whose call signature is::\n\n __call__(self, x0, y0, width, height, mutation_size, aspect_ratio=1.)\n\n and returns a :class:`Path` instance. *x0*, *y0*, *width* and\n *height* specify the location and size of the box to be\n drawn. *mutation_scale* determines the overall size of the\n mutation (by which I mean the transformation of the rectangle to\n the fancy box). *mutation_aspect* determines the aspect-ratio of\n the mutation.\n\n .. plot:: mpl_examples/pylab_examples/fancybox_demo2.py\n \"\"\"\n\n _style_list = {}\n\n class _Base(object):\n \"\"\"\n :class:`BBoxTransmuterBase` and its derivatives are used to make a\n fancy box around a given rectangle. The :meth:`__call__` method\n returns the :class:`~matplotlib.path.Path` of the fancy box. This\n class is not an artist and actual drawing of the fancy box is done\n by the :class:`FancyBboxPatch` class.\n \"\"\"\n\n # The derived classes are required to be able to be initialized\n # w/o arguments, i.e., all its argument (except self) must have\n # the default values.\n\n def __init__(self):\n \"\"\"\n initializtion.\n \"\"\"\n super(BoxStyle._Base, self).__init__()\n\n def transmute(self, x0, y0, width, height, mutation_size):\n \"\"\"\n The transmute method is a very core of the\n :class:`BboxTransmuter` class and must be overriden in the\n subclasses. It receives the location and size of the\n rectangle, and the mutation_size, with which the amount of\n padding and etc. will be scaled. It returns a\n :class:`~matplotlib.path.Path` instance.\n \"\"\"\n raise NotImplementedError('Derived must override')\n\n def __call__(self, x0, y0, width, height, mutation_size,\n aspect_ratio=1.):\n \"\"\"\n Given the location and size of the box, return the path of\n the box around it.\n\n - *x0*, *y0*, *width*, *height* : location and size of the box\n - *mutation_size* : a reference scale for the mutation.\n - *aspect_ratio* : aspect-ration for the mutation.\n \"\"\"\n # The __call__ method is a thin wrapper around the transmute method\n # and take care of the aspect.\n\n if aspect_ratio is not None:\n # Squeeze the given height by the aspect_ratio\n y0, height = y0 / aspect_ratio, height / aspect_ratio\n # call transmute method with squeezed height.\n path = self.transmute(x0, y0, width, height, mutation_size)\n vertices, codes = path.vertices, path.codes\n # Restore the height\n vertices[:, 1] = vertices[:, 1] * aspect_ratio\n return Path(vertices, codes)\n else:\n return self.transmute(x0, y0, width, height, mutation_size)\n\n def __reduce__(self):\n # because we have decided to nest thes classes, we need to\n # add some more information to allow instance pickling.\n import matplotlib.cbook as cbook\n return (cbook._NestedClassGetter(),\n (BoxStyle, self.__class__.__name__),\n self.__dict__\n )\n\n class Square(_Base):\n \"\"\"\n A simple square box.\n \"\"\"\n\n def __init__(self, pad=0.3):\n \"\"\"\n *pad*\n amount of padding\n \"\"\"\n\n self.pad = pad\n super(BoxStyle.Square, self).__init__()\n\n def transmute(self, x0, y0, width, height, mutation_size):\n pad = mutation_size * self.pad\n\n # width and height with padding added.\n width, height = width + 2*pad, height + 2*pad\n\n # boundary of the padded box\n x0, y0 = x0 - pad, y0 - pad,\n x1, y1 = x0 + width, y0 + height\n\n vertices = [(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)]\n codes = [Path.MOVETO] + [Path.LINETO] * 3 + [Path.CLOSEPOLY]\n return Path(vertices, codes)\n\n _style_list[\"square\"] = Square\n\n class Circle(_Base):\n \"\"\"A simple circle box.\"\"\"\n def __init__(self, pad=0.3):\n \"\"\"\n Parameters\n ----------\n pad : float\n The amount of padding around the original box.\n \"\"\"\n self.pad = pad\n super(BoxStyle.Circle, self).__init__()\n\n def transmute(self, x0, y0, width, height, mutation_size):\n pad = mutation_size * self.pad\n width, height = width + 2 * pad, height + 2 * pad\n\n # boundary of the padded box\n x0, y0 = x0 - pad, y0 - pad,\n return Path.circle((x0 + width/2., y0 + height/2.),\n (max([width, height]) / 2.))\n\n _style_list[\"circle\"] = Circle\n\n class LArrow(_Base):\n \"\"\"\n (left) Arrow Box\n \"\"\"\n def __init__(self, pad=0.3):\n self.pad = pad\n super(BoxStyle.LArrow, self).__init__()\n\n def transmute(self, x0, y0, width, height, mutation_size):\n # padding\n pad = mutation_size * self.pad\n\n # width and height with padding added.\n width, height = width + 2. * pad, \\\n height + 2. * pad,\n\n # boundary of the padded box\n x0, y0 = x0 - pad, y0 - pad,\n x1, y1 = x0 + width, y0 + height\n\n dx = (y1 - y0) / 2.\n dxx = dx * .5\n # adjust x0. 1.4 <- sqrt(2)\n x0 = x0 + pad / 1.4\n\n cp = [(x0 + dxx, y0), (x1, y0), (x1, y1), (x0 + dxx, y1),\n (x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),\n (x0 + dxx, y0 - dxx), # arrow\n (x0 + dxx, y0), (x0 + dxx, y0)]\n\n com = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,\n Path.LINETO, Path.LINETO, Path.LINETO,\n Path.LINETO, Path.CLOSEPOLY]\n\n path = Path(cp, com)\n\n return path\n _style_list[\"larrow\"] = LArrow\n\n class RArrow(LArrow):\n \"\"\"\n (right) Arrow Box\n \"\"\"\n\n def __init__(self, pad=0.3):\n #self.pad = pad\n super(BoxStyle.RArrow, self).__init__(pad)\n\n def transmute(self, x0, y0, width, height, mutation_size):\n\n p = BoxStyle.LArrow.transmute(self, x0, y0,\n width, height, mutation_size)\n\n p.vertices[:, 0] = 2 * x0 + width - p.vertices[:, 0]\n\n return p\n\n _style_list[\"rarrow\"] = RArrow\n\n class Round(_Base):\n \"\"\"\n A box with round corners.\n \"\"\"\n\n def __init__(self, pad=0.3, rounding_size=None):\n \"\"\"\n *pad*\n amount of padding\n\n *rounding_size*\n rounding radius of corners. *pad* if None\n \"\"\"\n self.pad = pad\n self.rounding_size = rounding_size\n super(BoxStyle.Round, self).__init__()\n\n def transmute(self, x0, y0, width, height, mutation_size):\n\n # padding\n pad = mutation_size * self.pad\n\n # size of the roudning corner\n if self.rounding_size:\n dr = mutation_size * self.rounding_size\n else:\n dr = pad\n\n width, height = width + 2. * pad, \\\n height + 2. * pad,\n\n x0, y0 = x0 - pad, y0 - pad,\n x1, y1 = x0 + width, y0 + height\n\n # Round corners are implemented as quadratic bezier. e.g.,\n # [(x0, y0-dr), (x0, y0), (x0+dr, y0)] for lower left corner.\n cp = [(x0 + dr, y0),\n (x1 - dr, y0),\n (x1, y0), (x1, y0 + dr),\n (x1, y1 - dr),\n (x1, y1), (x1 - dr, y1),\n (x0 + dr, y1),\n (x0, y1), (x0, y1 - dr),\n (x0, y0 + dr),\n (x0, y0), (x0 + dr, y0),\n (x0 + dr, y0)]\n\n com = [Path.MOVETO,\n Path.LINETO,\n Path.CURVE3, Path.CURVE3,\n Path.LINETO,\n Path.CURVE3, Path.CURVE3,\n Path.LINETO,\n Path.CURVE3, Path.CURVE3,\n Path.LINETO,\n Path.CURVE3, Path.CURVE3,\n Path.CLOSEPOLY]\n\n path = Path(cp, com)\n\n return path\n\n _style_list[\"round\"] = Round\n\n class Round4(_Base):\n \"\"\"\n Another box with round edges.\n \"\"\"\n\n def __init__(self, pad=0.3, rounding_size=None):\n \"\"\"\n *pad*\n amount of padding\n\n *rounding_size*\n rounding size of edges. *pad* if None\n \"\"\"\n\n self.pad = pad\n self.rounding_size = rounding_size\n super(BoxStyle.Round4, self).__init__()\n\n def transmute(self, x0, y0, width, height, mutation_size):\n\n # padding\n pad = mutation_size * self.pad\n\n # roudning size. Use a half of the pad if not set.\n if self.rounding_size:\n dr = mutation_size * self.rounding_size\n else:\n dr = pad / 2.\n\n width, height = width + 2. * pad - 2 * dr, \\\n height + 2. * pad - 2 * dr,\n\n x0, y0 = x0 - pad + dr, y0 - pad + dr,\n x1, y1 = x0 + width, y0 + height\n\n cp = [(x0, y0),\n (x0 + dr, y0 - dr), (x1 - dr, y0 - dr), (x1, y0),\n (x1 + dr, y0 + dr), (x1 + dr, y1 - dr), (x1, y1),\n (x1 - dr, y1 + dr), (x0 + dr, y1 + dr), (x0, y1),\n (x0 - dr, y1 - dr), (x0 - dr, y0 + dr), (x0, y0),\n (x0, y0)]\n\n com = [Path.MOVETO,\n Path.CURVE4, Path.CURVE4, Path.CURVE4,\n Path.CURVE4, Path.CURVE4, Path.CURVE4,\n Path.CURVE4, Path.CURVE4, Path.CURVE4,\n Path.CURVE4, Path.CURVE4, Path.CURVE4,\n Path.CLOSEPOLY]\n\n path = Path(cp, com)\n\n return path\n\n _style_list[\"round4\"] = Round4\n\n class Sawtooth(_Base):\n \"\"\"\n A sawtooth box.\n \"\"\"\n\n def __init__(self, pad=0.3, tooth_size=None):\n \"\"\"\n *pad*\n amount of padding\n\n *tooth_size*\n size of the sawtooth. pad* if None\n \"\"\"\n self.pad = pad\n self.tooth_size = tooth_size\n super(BoxStyle.Sawtooth, self).__init__()\n\n def _get_sawtooth_vertices(self, x0, y0, width, height, mutation_size):\n\n # padding\n pad = mutation_size * self.pad\n\n # size of sawtooth\n if self.tooth_size is None:\n tooth_size = self.pad * .5 * mutation_size\n else:\n tooth_size = self.tooth_size * mutation_size\n\n tooth_size2 = tooth_size / 2.\n width, height = width + 2. * pad - tooth_size, \\\n height + 2. * pad - tooth_size,\n\n # the sizes of the vertical and horizontal sawtooth are\n # separately adjusted to fit the given box size.\n dsx_n = int(round((width - tooth_size) / (tooth_size * 2))) * 2\n dsx = (width - tooth_size) / dsx_n\n dsy_n = int(round((height - tooth_size) / (tooth_size * 2))) * 2\n dsy = (height - tooth_size) / dsy_n\n\n x0, y0 = x0 - pad + tooth_size2, y0 - pad + tooth_size2\n x1, y1 = x0 + width, y0 + height\n\n bottom_saw_x = [x0] + \\\n [x0 + tooth_size2 + dsx * .5 * i\n for i\n in range(dsx_n * 2)] + \\\n [x1 - tooth_size2]\n\n bottom_saw_y = [y0] + \\\n [y0 - tooth_size2, y0,\n y0 + tooth_size2, y0] * dsx_n + \\\n [y0 - tooth_size2]\n\n right_saw_x = [x1] + \\\n [x1 + tooth_size2,\n x1,\n x1 - tooth_size2,\n x1] * dsx_n + \\\n [x1 + tooth_size2]\n\n right_saw_y = [y0] + \\\n [y0 + tooth_size2 + dsy * .5 * i\n for i\n in range(dsy_n * 2)] + \\\n [y1 - tooth_size2]\n\n top_saw_x = [x1] + \\\n [x1 - tooth_size2 - dsx * .5 * i\n for i\n in range(dsx_n * 2)] + \\\n [x0 + tooth_size2]\n\n top_saw_y = [y1] + \\\n [y1 + tooth_size2,\n y1,\n y1 - tooth_size2,\n y1] * dsx_n + \\\n [y1 + tooth_size2]\n\n left_saw_x = [x0] + \\\n [x0 - tooth_size2,\n x0,\n x0 + tooth_size2,\n x0] * dsy_n + \\\n [x0 - tooth_size2]\n\n left_saw_y = [y1] + \\\n [y1 - tooth_size2 - dsy * .5 * i\n for i\n in range(dsy_n * 2)] + \\\n [y0 + tooth_size2]\n\n saw_vertices = list(zip(bottom_saw_x, bottom_saw_y)) + \\\n list(zip(right_saw_x, right_saw_y)) + \\\n list(zip(top_saw_x, top_saw_y)) + \\\n list(zip(left_saw_x, left_saw_y)) + \\\n [(bottom_saw_x[0], bottom_saw_y[0])]\n\n return saw_vertices\n\n def transmute(self, x0, y0, width, height, mutation_size):\n\n saw_vertices = self._get_sawtooth_vertices(x0, y0, width,\n height, mutation_size)\n path = Path(saw_vertices, closed=True)\n return path\n\n _style_list[\"sawtooth\"] = Sawtooth\n\n class Roundtooth(Sawtooth):\n \"\"\"A rounded tooth box.\"\"\"\n def __init__(self, pad=0.3, tooth_size=None):\n \"\"\"\n *pad*\n amount of padding\n\n *tooth_size*\n size of the sawtooth. pad* if None\n \"\"\"\n super(BoxStyle.Roundtooth, self).__init__(pad, tooth_size)\n\n def transmute(self, x0, y0, width, height, mutation_size):\n saw_vertices = self._get_sawtooth_vertices(x0, y0,\n width, height,\n mutation_size)\n # Add a trailing vertex to allow us to close the polygon correctly\n saw_vertices = np.concatenate([np.array(saw_vertices),\n [saw_vertices[0]]], axis=0)\n codes = ([Path.MOVETO] +\n [Path.CURVE3, Path.CURVE3] * ((len(saw_vertices)-1) // 2) +\n [Path.CLOSEPOLY])\n print(len(codes), saw_vertices.shape)\n return Path(saw_vertices, codes)\n\n _style_list[\"roundtooth\"] = Roundtooth\n\n if __doc__: # __doc__ could be None if -OO optimization is enabled\n __doc__ = cbook.dedent(__doc__) % \\\n {\"AvailableBoxstyles\": _pprint_styles(_style_list)}\n\ndocstring.interpd.update(\n AvailableBoxstyles=_pprint_styles(BoxStyle._style_list))\n\n\nclass FancyBboxPatch(Patch):\n \"\"\"\n Draw a fancy box around a rectangle with lower left at *xy*=(*x*,\n *y*) with specified width and height.\n\n :class:`FancyBboxPatch` class is similar to :class:`Rectangle`\n class, but it draws a fancy box around the rectangle. The\n transformation of the rectangle box to the fancy box is delegated\n to the :class:`BoxTransmuterBase` and its derived classes.\n\n \"\"\"\n\n def __str__(self):\n return self.__class__.__name__ \\\n + \"(%g,%g;%gx%g)\" % (self._x, self._y,\n self._width, self._height)\n\n @docstring.dedent_interpd\n def __init__(self, xy, width, height,\n boxstyle=\"round\",\n bbox_transmuter=None,\n mutation_scale=1.,\n mutation_aspect=None,\n **kwargs):\n \"\"\"\n *xy* = lower left corner\n\n *width*, *height*\n\n *boxstyle* determines what kind of fancy box will be drawn. It\n can be a string of the style name with a comma separated\n attribute, or an instance of :class:`BoxStyle`. Following box\n styles are available.\n\n %(AvailableBoxstyles)s\n\n *mutation_scale* : a value with which attributes of boxstyle\n (e.g., pad) will be scaled. default=1.\n\n *mutation_aspect* : The height of the rectangle will be\n squeezed by this value before the mutation and the mutated\n box will be stretched by the inverse of it. default=None.\n\n Valid kwargs are:\n %(Patch)s\n \"\"\"\n\n Patch.__init__(self, **kwargs)\n\n self._x = xy[0]\n self._y = xy[1]\n self._width = width\n self._height = height\n\n if boxstyle == \"custom\":\n if bbox_transmuter is None:\n raise ValueError(\"bbox_transmuter argument is needed with \"\n \"custom boxstyle\")\n self._bbox_transmuter = bbox_transmuter\n else:\n self.set_boxstyle(boxstyle)\n\n self._mutation_scale = mutation_scale\n self._mutation_aspect = mutation_aspect\n\n @docstring.dedent_interpd\n def set_boxstyle(self, boxstyle=None, **kw):\n \"\"\"\n Set the box style.\n\n *boxstyle* can be a string with boxstyle name with optional\n comma-separated attributes. Alternatively, the attrs can\n be provided as keywords::\n\n set_boxstyle(\"round,pad=0.2\")\n set_boxstyle(\"round\", pad=0.2)\n\n Old attrs simply are forgotten.\n\n Without argument (or with *boxstyle* = None), it returns\n available box styles.\n\n ACCEPTS: %(AvailableBoxstyles)s\n\n \"\"\"\n if boxstyle is None:\n return BoxStyle.pprint_styles()\n\n if isinstance(boxstyle, BoxStyle._Base):\n self._bbox_transmuter = boxstyle\n elif six.callable(boxstyle):\n self._bbox_transmuter = boxstyle\n else:\n self._bbox_transmuter = BoxStyle(boxstyle, **kw)\n\n def set_mutation_scale(self, scale):\n \"\"\"\n Set the mutation scale.\n\n ACCEPTS: float\n \"\"\"\n self._mutation_scale = scale\n\n def get_mutation_scale(self):\n \"\"\"\n Return the mutation scale.\n \"\"\"\n return self._mutation_scale\n\n def set_mutation_aspect(self, aspect):\n \"\"\"\n Set the aspect ratio of the bbox mutation.\n\n ACCEPTS: float\n \"\"\"\n self._mutation_aspect = aspect\n\n def get_mutation_aspect(self):\n \"\"\"\n Return the aspect ratio of the bbox mutation.\n \"\"\"\n return self._mutation_aspect\n\n def get_boxstyle(self):\n \"Return the boxstyle object\"\n return self._bbox_transmuter\n\n def get_path(self):\n \"\"\"\n Return the mutated path of the rectangle\n \"\"\"\n\n _path = self.get_boxstyle()(self._x, self._y,\n self._width, self._height,\n self.get_mutation_scale(),\n self.get_mutation_aspect())\n return _path\n\n # Following methods are borrowed from the Rectangle class.\n\n def get_x(self):\n \"Return the left coord of the rectangle\"\n return self._x\n\n def get_y(self):\n \"Return the bottom coord of the rectangle\"\n return self._y\n\n def get_width(self):\n \"Return the width of the rectangle\"\n return self._width\n\n def get_height(self):\n \"Return the height of the rectangle\"\n return self._height\n\n def set_x(self, x):\n \"\"\"\n Set the left coord of the rectangle\n\n ACCEPTS: float\n \"\"\"\n self._x = x\n\n def set_y(self, y):\n \"\"\"\n Set the bottom coord of the rectangle\n\n ACCEPTS: float\n \"\"\"\n self._y = y\n\n def set_width(self, w):\n \"\"\"\n Set the width rectangle\n\n ACCEPTS: float\n \"\"\"\n self._width = w\n\n def set_height(self, h):\n \"\"\"\n Set the width rectangle\n\n ACCEPTS: float\n \"\"\"\n self._height = h\n\n def set_bounds(self, *args):\n \"\"\"\n Set the bounds of the rectangle: l,b,w,h\n\n ACCEPTS: (left, bottom, width, height)\n \"\"\"\n if len(args) == 0:\n l, b, w, h = args[0]\n else:\n l, b, w, h = args\n self._x = l\n self._y = b\n self._width = w\n self._height = h\n\n def get_bbox(self):\n return transforms.Bbox.from_bounds(self._x, self._y,\n self._width, self._height)\n\n\nfrom matplotlib.bezier import split_bezier_intersecting_with_closedpath\nfrom matplotlib.bezier import get_intersection, inside_circle, get_parallels\nfrom matplotlib.bezier import make_wedged_bezier2\nfrom matplotlib.bezier import split_path_inout, get_cos_sin\nfrom matplotlib.bezier import make_path_regular, concatenate_paths\n\n\nclass ConnectionStyle(_Style):\n \"\"\"\n :class:`ConnectionStyle` is a container class which defines\n several connectionstyle classes, which is used to create a path\n between two points. These are mainly used with\n :class:`FancyArrowPatch`.\n\n A connectionstyle object can be either created as::\n\n ConnectionStyle.Arc3(rad=0.2)\n\n or::\n\n ConnectionStyle(\"Arc3\", rad=0.2)\n\n or::\n\n ConnectionStyle(\"Arc3, rad=0.2\")\n\n The following classes are defined\n\n %(AvailableConnectorstyles)s\n\n\n An instance of any connection style class is an callable object,\n whose call signature is::\n\n __call__(self, posA, posB,\n patchA=None, patchB=None,\n shrinkA=2., shrinkB=2.)\n\n and it returns a :class:`Path` instance. *posA* and *posB* are\n tuples of x,y coordinates of the two points to be\n connected. *patchA* (or *patchB*) is given, the returned path is\n clipped so that it start (or end) from the boundary of the\n patch. The path is further shrunk by *shrinkA* (or *shrinkB*)\n which is given in points.\n\n \"\"\"\n\n _style_list = {}\n\n class _Base(object):\n \"\"\"\n A base class for connectionstyle classes. The dervided needs\n to implement a *connect* methods whose call signature is::\n\n connect(posA, posB)\n\n where posA and posB are tuples of x, y coordinates to be\n connected. The methods needs to return a path connecting two\n points. This base class defines a __call__ method, and few\n helper methods.\n \"\"\"\n\n class SimpleEvent:\n def __init__(self, xy):\n self.x, self.y = xy\n\n def _clip(self, path, patchA, patchB):\n \"\"\"\n Clip the path to the boundary of the patchA and patchB.\n The starting point of the path needed to be inside of the\n patchA and the end point inside the patch B. The *contains*\n methods of each patch object is utilized to test if the point\n is inside the path.\n \"\"\"\n\n if patchA:\n def insideA(xy_display):\n xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)\n return patchA.contains(xy_event)[0]\n\n try:\n left, right = split_path_inout(path, insideA)\n except ValueError:\n right = path\n\n path = right\n\n if patchB:\n def insideB(xy_display):\n xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)\n return patchB.contains(xy_event)[0]\n\n try:\n left, right = split_path_inout(path, insideB)\n except ValueError:\n left = path\n\n path = left\n\n return path\n\n def _shrink(self, path, shrinkA, shrinkB):\n \"\"\"\n Shrink the path by fixed size (in points) with shrinkA and shrinkB\n \"\"\"\n if shrinkA:\n x, y = path.vertices[0]\n insideA = inside_circle(x, y, shrinkA)\n\n try:\n left, right = split_path_inout(path, insideA)\n path = right\n except ValueError:\n pass\n\n if shrinkB:\n x, y = path.vertices[-1]\n insideB = inside_circle(x, y, shrinkB)\n\n try:\n left, right = split_path_inout(path, insideB)\n path = left\n except ValueError:\n pass\n\n return path\n\n def __call__(self, posA, posB,\n shrinkA=2., shrinkB=2., patchA=None, patchB=None):\n \"\"\"\n Calls the *connect* method to create a path between *posA*\n and *posB*. The path is clipped and shrinked.\n \"\"\"\n\n path = self.connect(posA, posB)\n\n clipped_path = self._clip(path, patchA, patchB)\n shrinked_path = self._shrink(clipped_path, shrinkA, shrinkB)\n\n return shrinked_path\n\n def __reduce__(self):\n # because we have decided to nest thes classes, we need to\n # add some more information to allow instance pickling.\n import matplotlib.cbook as cbook\n return (cbook._NestedClassGetter(),\n (ConnectionStyle, self.__class__.__name__),\n self.__dict__\n )\n\n class Arc3(_Base):\n \"\"\"\n Creates a simple quadratic bezier curve between two\n points. The curve is created so that the middle contol points\n (C1) is located at the same distance from the start (C0) and\n end points(C2) and the distance of the C1 to the line\n connecting C0-C2 is *rad* times the distance of C0-C2.\n \"\"\"\n\n def __init__(self, rad=0.):\n \"\"\"\n *rad*\n curvature of the curve.\n \"\"\"\n self.rad = rad\n\n def connect(self, posA, posB):\n x1, y1 = posA\n x2, y2 = posB\n x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.\n dx, dy = x2 - x1, y2 - y1\n\n f = self.rad\n\n cx, cy = x12 + f * dy, y12 - f * dx\n\n vertices = [(x1, y1),\n (cx, cy),\n (x2, y2)]\n codes = [Path.MOVETO,\n Path.CURVE3,\n Path.CURVE3]\n\n return Path(vertices, codes)\n\n _style_list[\"arc3\"] = Arc3\n\n class Angle3(_Base):\n \"\"\"\n Creates a simple quadratic bezier curve between two\n points. The middle control points is placed at the\n intersecting point of two lines which crosses the start (or\n end) point and has a angle of angleA (or angleB).\n \"\"\"\n\n def __init__(self, angleA=90, angleB=0):\n \"\"\"\n *angleA*\n starting angle of the path\n\n *angleB*\n ending angle of the path\n \"\"\"\n\n self.angleA = angleA\n self.angleB = angleB\n\n def connect(self, posA, posB):\n x1, y1 = posA\n x2, y2 = posB\n\n cosA, sinA = math.cos(self.angleA / 180. * math.pi),\\\n math.sin(self.angleA / 180. * math.pi),\n cosB, sinB = math.cos(self.angleB / 180. * math.pi),\\\n math.sin(self.angleB / 180. * math.pi),\n\n cx, cy = get_intersection(x1, y1, cosA, sinA,\n x2, y2, cosB, sinB)\n\n vertices = [(x1, y1), (cx, cy), (x2, y2)]\n codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]\n\n return Path(vertices, codes)\n\n _style_list[\"angle3\"] = Angle3\n\n class Angle(_Base):\n \"\"\"\n Creates a picewise continuous quadratic bezier path between\n two points. The path has a one passing-through point placed at\n the intersecting point of two lines which crosses the start\n (or end) point and has a angle of angleA (or angleB). The\n connecting edges are rounded with *rad*.\n \"\"\"\n\n def __init__(self, angleA=90, angleB=0, rad=0.):\n \"\"\"\n *angleA*\n starting angle of the path\n\n *angleB*\n ending angle of the path\n\n *rad*\n rounding radius of the edge\n \"\"\"\n\n self.angleA = angleA\n self.angleB = angleB\n\n self.rad = rad\n\n def connect(self, posA, posB):\n x1, y1 = posA\n x2, y2 = posB\n\n cosA, sinA = math.cos(self.angleA / 180. * math.pi),\\\n math.sin(self.angleA / 180. * math.pi),\n cosB, sinB = math.cos(self.angleB / 180. * math.pi),\\\n math.sin(self.angleB / 180. * math.pi),\n\n cx, cy = get_intersection(x1, y1, cosA, sinA,\n x2, y2, cosB, sinB)\n\n vertices = [(x1, y1)]\n codes = [Path.MOVETO]\n\n if self.rad == 0.:\n vertices.append((cx, cy))\n codes.append(Path.LINETO)\n else:\n dx1, dy1 = x1 - cx, y1 - cy\n d1 = (dx1 ** 2 + dy1 ** 2) ** .5\n f1 = self.rad / d1\n dx2, dy2 = x2 - cx, y2 - cy\n d2 = (dx2 ** 2 + dy2 ** 2) ** .5\n f2 = self.rad / d2\n vertices.extend([(cx + dx1 * f1, cy + dy1 * f1),\n (cx, cy),\n (cx + dx2 * f2, cy + dy2 * f2)])\n codes.extend([Path.LINETO, Path.CURVE3, Path.CURVE3])\n\n vertices.append((x2, y2))\n codes.append(Path.LINETO)\n\n return Path(vertices, codes)\n\n _style_list[\"angle\"] = Angle\n\n class Arc(_Base):\n \"\"\"\n Creates a picewise continuous quadratic bezier path between\n two points. The path can have two passing-through points, a\n point placed at the distance of armA and angle of angleA from\n point A, another point with respect to point B. The edges are\n rounded with *rad*.\n \"\"\"\n\n def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.):\n \"\"\"\n *angleA* :\n starting angle of the path\n\n *angleB* :\n ending angle of the path\n\n *armA* :\n length of the starting arm\n\n *armB* :\n length of the ending arm\n\n *rad* :\n rounding radius of the edges\n \"\"\"\n\n self.angleA = angleA\n self.angleB = angleB\n self.armA = armA\n self.armB = armB\n\n self.rad = rad\n\n def connect(self, posA, posB):\n x1, y1 = posA\n x2, y2 = posB\n\n vertices = [(x1, y1)]\n rounded = []\n codes = [Path.MOVETO]\n\n if self.armA:\n cosA = math.cos(self.angleA / 180. * math.pi)\n sinA = math.sin(self.angleA / 180. * math.pi)\n #x_armA, y_armB\n d = self.armA - self.rad\n rounded.append((x1 + d * cosA, y1 + d * sinA))\n d = self.armA\n rounded.append((x1 + d * cosA, y1 + d * sinA))\n\n if self.armB:\n cosB = math.cos(self.angleB / 180. * math.pi)\n sinB = math.sin(self.angleB / 180. * math.pi)\n x_armB, y_armB = x2 + self.armB * cosB, y2 + self.armB * sinB\n\n if rounded:\n xp, yp = rounded[-1]\n dx, dy = x_armB - xp, y_armB - yp\n dd = (dx * dx + dy * dy) ** .5\n\n rounded.append((xp + self.rad * dx / dd,\n yp + self.rad * dy / dd))\n vertices.extend(rounded)\n codes.extend([Path.LINETO,\n Path.CURVE3,\n Path.CURVE3])\n else:\n xp, yp = vertices[-1]\n dx, dy = x_armB - xp, y_armB - yp\n dd = (dx * dx + dy * dy) ** .5\n\n d = dd - self.rad\n rounded = [(xp + d * dx / dd, yp + d * dy / dd),\n (x_armB, y_armB)]\n\n if rounded:\n xp, yp = rounded[-1]\n dx, dy = x2 - xp, y2 - yp\n dd = (dx * dx + dy * dy) ** .5\n\n rounded.append((xp + self.rad * dx / dd,\n yp + self.rad * dy / dd))\n vertices.extend(rounded)\n codes.extend([Path.LINETO,\n Path.CURVE3,\n Path.CURVE3])\n\n vertices.append((x2, y2))\n codes.append(Path.LINETO)\n\n return Path(vertices, codes)\n\n _style_list[\"arc\"] = Arc\n\n class Bar(_Base):\n \"\"\"\n A line with *angle* between A and B with *armA* and\n *armB*. One of the arm is extend so that they are connected in\n a right angle. The length of armA is determined by (*armA*\n + *fraction* x AB distance). Same for armB.\n \"\"\"\n\n def __init__(self, armA=0., armB=0., fraction=0.3, angle=None):\n \"\"\"\n *armA* : minimum length of armA\n\n *armB* : minimum length of armB\n\n *fraction* : a fraction of the distance between two points that\n will be added to armA and armB.\n\n *angle* : angle of the connecting line (if None, parallel to A\n and B)\n \"\"\"\n self.armA = armA\n self.armB = armB\n self.fraction = fraction\n self.angle = angle\n\n def connect(self, posA, posB):\n x1, y1 = posA\n x20, y20 = x2, y2 = posB\n\n x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.\n\n theta1 = math.atan2(y2 - y1, x2 - x1)\n dx, dy = x2 - x1, y2 - y1\n dd = (dx * dx + dy * dy) ** .5\n ddx, ddy = dx / dd, dy / dd\n\n armA, armB = self.armA, self.armB\n\n if self.angle is not None:\n #angle = self.angle % 180.\n #if angle < 0. or angle > 180.:\n # angle\n #theta0 = (self.angle%180.)/180.*math.pi\n theta0 = self.angle / 180. * math.pi\n #theta0 = (((self.angle+90)%180.) - 90.)/180.*math.pi\n dtheta = theta1 - theta0\n dl = dd * math.sin(dtheta)\n\n dL = dd * math.cos(dtheta)\n\n #x2, y2 = x2 + dl*ddy, y2 - dl*ddx\n x2, y2 = x1 + dL * math.cos(theta0), y1 + dL * math.sin(theta0)\n\n armB = armB - dl\n\n # update\n dx, dy = x2 - x1, y2 - y1\n dd2 = (dx * dx + dy * dy) ** .5\n ddx, ddy = dx / dd2, dy / dd2\n\n else:\n dl = 0.\n\n #if armA > armB:\n # armB = armA + dl\n #else:\n # armA = armB - dl\n\n arm = max(armA, armB)\n f = self.fraction * dd + arm\n #fB = self.fraction*dd + armB\n\n cx1, cy1 = x1 + f * ddy, y1 - f * ddx\n cx2, cy2 = x2 + f * ddy, y2 - f * ddx\n\n vertices = [(x1, y1),\n (cx1, cy1),\n (cx2, cy2),\n (x20, y20)]\n codes = [Path.MOVETO,\n Path.LINETO,\n Path.LINETO,\n Path.LINETO]\n\n return Path(vertices, codes)\n\n _style_list[\"bar\"] = Bar\n\n if __doc__:\n __doc__ = cbook.dedent(__doc__) % \\\n {\"AvailableConnectorstyles\": _pprint_styles(_style_list)}\n\n\ndef _point_along_a_line(x0, y0, x1, y1, d):\n \"\"\"\n find a point along a line connecting (x0, y0) -- (x1, y1) whose\n distance from (x0, y0) is d.\n \"\"\"\n dx, dy = x0 - x1, y0 - y1\n ff = d / (dx * dx + dy * dy) ** .5\n x2, y2 = x0 - ff * dx, y0 - ff * dy\n\n return x2, y2\n\n\nclass ArrowStyle(_Style):\n \"\"\"\n :class:`ArrowStyle` is a container class which defines several\n arrowstyle classes, which is used to create an arrow path along a\n given path. These are mainly used with :class:`FancyArrowPatch`.\n\n A arrowstyle object can be either created as::\n\n ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4)\n\n or::\n\n ArrowStyle(\"Fancy\", head_length=.4, head_width=.4, tail_width=.4)\n\n or::\n\n ArrowStyle(\"Fancy, head_length=.4, head_width=.4, tail_width=.4\")\n\n The following classes are defined\n\n %(AvailableArrowstyles)s\n\n\n An instance of any arrow style class is an callable object,\n whose call signature is::\n\n __call__(self, path, mutation_size, linewidth, aspect_ratio=1.)\n\n and it returns a tuple of a :class:`Path` instance and a boolean\n value. *path* is a :class:`Path` instance along witch the arrow\n will be drawn. *mutation_size* and *aspect_ratio* has a same\n meaning as in :class:`BoxStyle`. *linewidth* is a line width to be\n stroked. This is meant to be used to correct the location of the\n head so that it does not overshoot the destination point, but not all\n classes support it.\n\n .. plot:: mpl_examples/pylab_examples/fancyarrow_demo.py\n \"\"\"\n\n _style_list = {}\n\n class _Base(object):\n \"\"\"\n Arrow Transmuter Base class\n\n ArrowTransmuterBase and its derivatives are used to make a fancy\n arrow around a given path. The __call__ method returns a path\n (which will be used to create a PathPatch instance) and a boolean\n value indicating the path is open therefore is not fillable. This\n class is not an artist and actual drawing of the fancy arrow is\n done by the FancyArrowPatch class.\n\n \"\"\"\n\n # The derived classes are required to be able to be initialized\n # w/o arguments, i.e., all its argument (except self) must have\n # the default values.\n\n def __init__(self):\n super(ArrowStyle._Base, self).__init__()\n\n @staticmethod\n def ensure_quadratic_bezier(path):\n \"\"\" Some ArrowStyle class only wokrs with a simple\n quaratic bezier curve (created with Arc3Connetion or\n Angle3Connector). This static method is to check if the\n provided path is a simple quadratic bezier curve and returns\n its control points if true.\n \"\"\"\n segments = list(path.iter_segments())\n assert len(segments) == 2\n\n assert segments[0][1] == Path.MOVETO\n assert segments[1][1] == Path.CURVE3\n\n return list(segments[0][0]) + list(segments[1][0])\n\n def transmute(self, path, mutation_size, linewidth):\n \"\"\"\n The transmute method is a very core of the ArrowStyle\n class and must be overriden in the subclasses. It receives\n the path object along which the arrow will be drawn, and\n the mutation_size, with which the amount arrow head and\n etc. will be scaled. The linewidth may be used to adjust\n the the path so that it does not pass beyond the given\n points. It returns a tuple of a Path instance and a\n boolean. The boolean value indicate whether the path can\n be filled or not. The return value can also be a list of paths\n and list of booleans of a same length.\n \"\"\"\n\n raise NotImplementedError('Derived must override')\n\n def __call__(self, path, mutation_size, linewidth,\n aspect_ratio=1.):\n \"\"\"\n The __call__ method is a thin wrapper around the transmute method\n and take care of the aspect ratio.\n \"\"\"\n\n path = make_path_regular(path)\n\n if aspect_ratio is not None:\n # Squeeze the given height by the aspect_ratio\n\n vertices, codes = path.vertices[:], path.codes[:]\n # Squeeze the height\n vertices[:, 1] = vertices[:, 1] / aspect_ratio\n path_shrinked = Path(vertices, codes)\n # call transmute method with squeezed height.\n path_mutated, fillable = self.transmute(path_shrinked,\n linewidth,\n mutation_size)\n if cbook.iterable(fillable):\n path_list = []\n for p in zip(path_mutated):\n v, c = p.vertices, p.codes\n # Restore the height\n v[:, 1] = v[:, 1] * aspect_ratio\n path_list.append(Path(v, c))\n return path_list, fillable\n else:\n return path_mutated, fillable\n else:\n return self.transmute(path, mutation_size, linewidth)\n\n def __reduce__(self):\n # because we have decided to nest thes classes, we need to\n # add some more information to allow instance pickling.\n import matplotlib.cbook as cbook\n return (cbook._NestedClassGetter(),\n (ArrowStyle, self.__class__.__name__),\n self.__dict__\n )\n\n class _Curve(_Base):\n \"\"\"\n A simple arrow which will work with any path instance. The\n returned path is simply concatenation of the original path + at\n most two paths representing the arrow head at the begin point and the\n at the end point. The arrow heads can be either open or closed.\n \"\"\"\n\n def __init__(self, beginarrow=None, endarrow=None,\n fillbegin=False, fillend=False,\n head_length=.2, head_width=.1):\n \"\"\"\n The arrows are drawn if *beginarrow* and/or *endarrow* are\n true. *head_length* and *head_width* determines the size\n of the arrow relative to the *mutation scale*. The\n arrowhead at the begin (or end) is closed if fillbegin (or\n fillend) is True.\n \"\"\"\n self.beginarrow, self.endarrow = beginarrow, endarrow\n self.head_length, self.head_width = \\\n head_length, head_width\n self.fillbegin, self.fillend = fillbegin, fillend\n super(ArrowStyle._Curve, self).__init__()\n\n def _get_arrow_wedge(self, x0, y0, x1, y1,\n head_dist, cos_t, sin_t, linewidth\n ):\n \"\"\"\n Return the paths for arrow heads. Since arrow lines are\n drawn with capstyle=projected, The arrow goes beyond the\n desired point. This method also returns the amount of the path\n to be shrinked so that it does not overshoot.\n \"\"\"\n\n # arrow from x0, y0 to x1, y1\n\n dx, dy = x0 - x1, y0 - y1\n cp_distance = math.sqrt(dx ** 2 + dy ** 2)\n\n # pad_projected : amount of pad to account the\n # overshooting of the projection of the wedge\n pad_projected = (.5 * linewidth / sin_t)\n\n # apply pad for projected edge\n ddx = pad_projected * dx / cp_distance\n ddy = pad_projected * dy / cp_distance\n\n # offset for arrow wedge\n dx = dx / cp_distance * head_dist\n dy = dy / cp_distance * head_dist\n\n dx1, dy1 = cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy\n dx2, dy2 = cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy\n\n vertices_arrow = [(x1 + ddx + dx1, y1 + ddy + dy1),\n (x1 + ddx, y1 + ddy),\n (x1 + ddx + dx2, y1 + ddy + dy2)]\n codes_arrow = [Path.MOVETO,\n Path.LINETO,\n Path.LINETO]\n\n return vertices_arrow, codes_arrow, ddx, ddy\n\n def transmute(self, path, mutation_size, linewidth):\n\n head_length, head_width = self.head_length * mutation_size, \\\n self.head_width * mutation_size\n head_dist = math.sqrt(head_length ** 2 + head_width ** 2)\n cos_t, sin_t = head_length / head_dist, head_width / head_dist\n\n # begin arrow\n x0, y0 = path.vertices[0]\n x1, y1 = path.vertices[1]\n\n if self.beginarrow:\n verticesA, codesA, ddxA, ddyA = \\\n self._get_arrow_wedge(x1, y1, x0, y0,\n head_dist, cos_t, sin_t,\n linewidth)\n else:\n verticesA, codesA = [], []\n ddxA, ddyA = 0., 0.\n\n # end arrow\n x2, y2 = path.vertices[-2]\n x3, y3 = path.vertices[-1]\n\n if self.endarrow:\n verticesB, codesB, ddxB, ddyB = \\\n self._get_arrow_wedge(x2, y2, x3, y3,\n head_dist, cos_t, sin_t,\n linewidth)\n else:\n verticesB, codesB = [], []\n ddxB, ddyB = 0., 0.\n\n # this simple code will not work if ddx, ddy is greater than\n # separation bettern vertices.\n _path = [Path(np.concatenate([[(x0 + ddxA, y0 + ddyA)],\n path.vertices[1:-1],\n [(x3 + ddxB, y3 + ddyB)]]),\n path.codes)]\n _fillable = [False]\n\n if self.beginarrow:\n if self.fillbegin:\n p = np.concatenate([verticesA, [verticesA[0],\n verticesA[0]], ])\n c = np.concatenate([codesA, [Path.LINETO, Path.CLOSEPOLY]])\n _path.append(Path(p, c))\n _fillable.append(True)\n else:\n _path.append(Path(verticesA, codesA))\n _fillable.append(False)\n\n if self.endarrow:\n if self.fillend:\n _fillable.append(True)\n p = np.concatenate([verticesB, [verticesB[0],\n verticesB[0]], ])\n c = np.concatenate([codesB, [Path.LINETO, Path.CLOSEPOLY]])\n _path.append(Path(p, c))\n else:\n _fillable.append(False)\n _path.append(Path(verticesB, codesB))\n\n return _path, _fillable\n\n class Curve(_Curve):\n \"\"\"\n A simple curve without any arrow head.\n \"\"\"\n\n def __init__(self):\n super(ArrowStyle.Curve, self).__init__(\n beginarrow=False, endarrow=False)\n\n _style_list[\"-\"] = Curve\n\n class CurveA(_Curve):\n \"\"\"\n An arrow with a head at its begin point.\n \"\"\"\n\n def __init__(self, head_length=.4, head_width=.2):\n \"\"\"\n *head_length*\n length of the arrow head\n\n *head_width*\n width of the arrow head\n \"\"\"\n\n super(ArrowStyle.CurveA, self).__init__(\n beginarrow=True, endarrow=False,\n head_length=head_length, head_width=head_width)\n\n _style_list[\"<-\"] = CurveA\n\n class CurveB(_Curve):\n \"\"\"\n An arrow with a head at its end point.\n \"\"\"\n\n def __init__(self, head_length=.4, head_width=.2):\n \"\"\"\n *head_length*\n length of the arrow head\n\n *head_width*\n width of the arrow head\n \"\"\"\n\n super(ArrowStyle.CurveB, self).__init__(\n beginarrow=False, endarrow=True,\n head_length=head_length, head_width=head_width)\n\n _style_list[\"->\"] = CurveB\n\n class CurveAB(_Curve):\n \"\"\"\n An arrow with heads both at the begin and the end point.\n \"\"\"\n\n def __init__(self, head_length=.4, head_width=.2):\n \"\"\"\n *head_length*\n length of the arrow head\n\n *head_width*\n width of the arrow head\n \"\"\"\n\n super(ArrowStyle.CurveAB, self).__init__(\n beginarrow=True, endarrow=True,\n head_length=head_length, head_width=head_width)\n\n _style_list[\"<->\"] = CurveAB\n\n class CurveFilledA(_Curve):\n \"\"\"\n An arrow with filled triangle head at the begin.\n \"\"\"\n\n def __init__(self, head_length=.4, head_width=.2):\n \"\"\"\n *head_length*\n length of the arrow head\n\n *head_width*\n width of the arrow head\n \"\"\"\n\n super(ArrowStyle.CurveFilledA, self).__init__(\n beginarrow=True, endarrow=False,\n fillbegin=True, fillend=False,\n head_length=head_length, head_width=head_width)\n\n _style_list[\"<|-\"] = CurveFilledA\n\n class CurveFilledB(_Curve):\n \"\"\"\n An arrow with filled triangle head at the end.\n \"\"\"\n\n def __init__(self, head_length=.4, head_width=.2):\n \"\"\"\n *head_length*\n length of the arrow head\n\n *head_width*\n width of the arrow head\n \"\"\"\n\n super(ArrowStyle.CurveFilledB, self).__init__(\n beginarrow=False, endarrow=True,\n fillbegin=False, fillend=True,\n head_length=head_length, head_width=head_width)\n\n _style_list[\"-|>\"] = CurveFilledB\n\n class CurveFilledAB(_Curve):\n \"\"\"\n An arrow with filled triangle heads both at the begin and the end\n point.\n \"\"\"\n\n def __init__(self, head_length=.4, head_width=.2):\n \"\"\"\n *head_length*\n length of the arrow head\n\n *head_width*\n width of the arrow head\n \"\"\"\n\n super(ArrowStyle.CurveFilledAB, self).__init__(\n beginarrow=True, endarrow=True,\n fillbegin=True, fillend=True,\n head_length=head_length, head_width=head_width)\n\n _style_list[\"<|-|>\"] = CurveFilledAB\n\n class _Bracket(_Base):\n\n def __init__(self, bracketA=None, bracketB=None,\n widthA=1., widthB=1.,\n lengthA=0.2, lengthB=0.2,\n angleA=None, angleB=None,\n scaleA=None, scaleB=None\n ):\n self.bracketA, self.bracketB = bracketA, bracketB\n self.widthA, self.widthB = widthA, widthB\n self.lengthA, self.lengthB = lengthA, lengthB\n self.angleA, self.angleB = angleA, angleB\n self.scaleA, self.scaleB = scaleA, scaleB\n\n def _get_bracket(self, x0, y0,\n cos_t, sin_t, width, length,\n ):\n\n # arrow from x0, y0 to x1, y1\n from matplotlib.bezier import get_normal_points\n x1, y1, x2, y2 = get_normal_points(x0, y0, cos_t, sin_t, width)\n\n dx, dy = length * cos_t, length * sin_t\n\n vertices_arrow = [(x1 + dx, y1 + dy),\n (x1, y1),\n (x2, y2),\n (x2 + dx, y2 + dy)]\n codes_arrow = [Path.MOVETO,\n Path.LINETO,\n Path.LINETO,\n Path.LINETO]\n\n return vertices_arrow, codes_arrow\n\n def transmute(self, path, mutation_size, linewidth):\n\n if self.scaleA is None:\n scaleA = mutation_size\n else:\n scaleA = self.scaleA\n\n if self.scaleB is None:\n scaleB = mutation_size\n else:\n scaleB = self.scaleB\n\n vertices_list, codes_list = [], []\n\n if self.bracketA:\n x0, y0 = path.vertices[0]\n x1, y1 = path.vertices[1]\n cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)\n verticesA, codesA = self._get_bracket(x0, y0, cos_t, sin_t,\n self.widthA * scaleA,\n self.lengthA * scaleA)\n vertices_list.append(verticesA)\n codes_list.append(codesA)\n\n vertices_list.append(path.vertices)\n codes_list.append(path.codes)\n\n if self.bracketB:\n x0, y0 = path.vertices[-1]\n x1, y1 = path.vertices[-2]\n cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)\n verticesB, codesB = self._get_bracket(x0, y0, cos_t, sin_t,\n self.widthB * scaleB,\n self.lengthB * scaleB)\n vertices_list.append(verticesB)\n codes_list.append(codesB)\n\n vertices = np.concatenate(vertices_list)\n codes = np.concatenate(codes_list)\n\n p = Path(vertices, codes)\n\n return p, False\n\n class BracketAB(_Bracket):\n \"\"\"\n An arrow with a bracket(]) at both ends.\n \"\"\"\n\n def __init__(self,\n widthA=1., lengthA=0.2, angleA=None,\n widthB=1., lengthB=0.2, angleB=None):\n \"\"\"\n *widthA*\n width of the bracket\n\n *lengthA*\n length of the bracket\n\n *angleA*\n angle between the bracket and the line\n\n *widthB*\n width of the bracket\n\n *lengthB*\n length of the bracket\n\n *angleB*\n angle between the bracket and the line\n \"\"\"\n\n super(ArrowStyle.BracketAB, self).__init__(\n True, True, widthA=widthA, lengthA=lengthA,\n angleA=angleA, widthB=widthB, lengthB=lengthB,\n angleB=angleB)\n\n _style_list[\"]-[\"] = BracketAB\n\n class BracketA(_Bracket):\n \"\"\"\n An arrow with a bracket(]) at its end.\n \"\"\"\n\n def __init__(self, widthA=1., lengthA=0.2, angleA=None):\n \"\"\"\n *widthA*\n width of the bracket\n\n *lengthA*\n length of the bracket\n\n *angleA*\n angle between the bracket and the line\n \"\"\"\n\n super(ArrowStyle.BracketA, self).__init__(True, None,\n widthA=widthA, lengthA=lengthA, angleA=angleA)\n\n _style_list[\"]-\"] = BracketA\n\n class BracketB(_Bracket):\n \"\"\"\n An arrow with a bracket([) at its end.\n \"\"\"\n\n def __init__(self, widthB=1., lengthB=0.2, angleB=None):\n \"\"\"\n *widthB*\n width of the bracket\n\n *lengthB*\n length of the bracket\n\n *angleB*\n angle between the bracket and the line\n \"\"\"\n\n super(ArrowStyle.BracketB, self).__init__(None, True,\n widthB=widthB, lengthB=lengthB, angleB=angleB)\n\n _style_list[\"-[\"] = BracketB\n\n class BarAB(_Bracket):\n \"\"\"\n An arrow with a bar(|) at both ends.\n \"\"\"\n\n def __init__(self,\n widthA=1., angleA=None,\n widthB=1., angleB=None):\n \"\"\"\n *widthA*\n width of the bracket\n\n *lengthA*\n length of the bracket\n\n *angleA*\n angle between the bracket and the line\n\n *widthB*\n width of the bracket\n\n *lengthB*\n length of the bracket\n\n *angleB*\n angle between the bracket and the line\n \"\"\"\n\n super(ArrowStyle.BarAB, self).__init__(\n True, True, widthA=widthA, lengthA=0, angleA=angleA,\n widthB=widthB, lengthB=0, angleB=angleB)\n\n _style_list[\"|-|\"] = BarAB\n\n class Simple(_Base):\n \"\"\"\n A simple arrow. Only works with a quadratic bezier curve.\n \"\"\"\n\n def __init__(self, head_length=.5, head_width=.5, tail_width=.2):\n \"\"\"\n *head_length*\n length of the arrow head\n\n *head_with*\n width of the arrow head\n\n *tail_width*\n width of the arrow tail\n\n \"\"\"\n\n self.head_length, self.head_width, self.tail_width = \\\n head_length, head_width, tail_width\n super(ArrowStyle.Simple, self).__init__()\n\n def transmute(self, path, mutation_size, linewidth):\n\n x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)\n\n # divide the path into a head and a tail\n head_length = self.head_length * mutation_size\n in_f = inside_circle(x2, y2, head_length)\n arrow_path = [(x0, y0), (x1, y1), (x2, y2)]\n\n from .bezier import NonIntersectingPathException\n\n try:\n arrow_out, arrow_in = \\\n split_bezier_intersecting_with_closedpath(arrow_path,\n in_f,\n tolerence=0.01)\n except NonIntersectingPathException:\n # if this happens, make a straight line of the head_length\n # long.\n x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)\n x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)\n arrow_in = [(x0, y0), (x1n, y1n), (x2, y2)]\n arrow_out = None\n\n # head\n head_width = self.head_width * mutation_size\n head_left, head_right = \\\n make_wedged_bezier2(arrow_in, head_width / 2.,\n wm=.5)\n\n # tail\n if arrow_out is not None:\n tail_width = self.tail_width * mutation_size\n tail_left, tail_right = get_parallels(arrow_out,\n tail_width / 2.)\n\n #head_right, head_left = head_r, head_l\n patch_path = [(Path.MOVETO, tail_right[0]),\n (Path.CURVE3, tail_right[1]),\n (Path.CURVE3, tail_right[2]),\n (Path.LINETO, head_right[0]),\n (Path.CURVE3, head_right[1]),\n (Path.CURVE3, head_right[2]),\n (Path.CURVE3, head_left[1]),\n (Path.CURVE3, head_left[0]),\n (Path.LINETO, tail_left[2]),\n (Path.CURVE3, tail_left[1]),\n (Path.CURVE3, tail_left[0]),\n (Path.LINETO, tail_right[0]),\n (Path.CLOSEPOLY, tail_right[0]),\n ]\n else:\n patch_path = [(Path.MOVETO, head_right[0]),\n (Path.CURVE3, head_right[1]),\n (Path.CURVE3, head_right[2]),\n (Path.CURVE3, head_left[1]),\n (Path.CURVE3, head_left[0]),\n (Path.CLOSEPOLY, head_left[0]),\n ]\n\n path = Path([p for c, p in patch_path], [c for c, p in patch_path])\n\n return path, True\n\n _style_list[\"simple\"] = Simple\n\n class Fancy(_Base):\n \"\"\"\n A fancy arrow. Only works with a quadratic bezier curve.\n \"\"\"\n\n def __init__(self, head_length=.4, head_width=.4, tail_width=.4):\n \"\"\"\n *head_length*\n length of the arrow head\n\n *head_with*\n width of the arrow head\n\n *tail_width*\n width of the arrow tail\n\n \"\"\"\n\n self.head_length, self.head_width, self.tail_width = \\\n head_length, head_width, tail_width\n super(ArrowStyle.Fancy, self).__init__()\n\n def transmute(self, path, mutation_size, linewidth):\n\n x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)\n\n # divide the path into a head and a tail\n head_length = self.head_length * mutation_size\n arrow_path = [(x0, y0), (x1, y1), (x2, y2)]\n\n from .bezier import NonIntersectingPathException\n\n # path for head\n in_f = inside_circle(x2, y2, head_length)\n try:\n path_out, path_in = \\\n split_bezier_intersecting_with_closedpath(\n arrow_path,\n in_f,\n tolerence=0.01)\n except NonIntersectingPathException:\n # if this happens, make a straight line of the head_length\n # long.\n x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)\n x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)\n arrow_path = [(x0, y0), (x1n, y1n), (x2, y2)]\n path_head = arrow_path\n else:\n path_head = path_in\n\n # path for head\n in_f = inside_circle(x2, y2, head_length * .8)\n path_out, path_in = \\\n split_bezier_intersecting_with_closedpath(\n arrow_path,\n in_f,\n tolerence=0.01)\n path_tail = path_out\n\n # head\n head_width = self.head_width * mutation_size\n head_l, head_r = make_wedged_bezier2(path_head,\n head_width / 2.,\n wm=.6)\n\n # tail\n tail_width = self.tail_width * mutation_size\n tail_left, tail_right = make_wedged_bezier2(path_tail,\n tail_width * .5,\n w1=1., wm=0.6, w2=0.3)\n\n # path for head\n in_f = inside_circle(x0, y0, tail_width * .3)\n path_in, path_out = \\\n split_bezier_intersecting_with_closedpath(\n arrow_path,\n in_f,\n tolerence=0.01)\n tail_start = path_in[-1]\n\n head_right, head_left = head_r, head_l\n patch_path = [(Path.MOVETO, tail_start),\n (Path.LINETO, tail_right[0]),\n (Path.CURVE3, tail_right[1]),\n (Path.CURVE3, tail_right[2]),\n (Path.LINETO, head_right[0]),\n (Path.CURVE3, head_right[1]),\n (Path.CURVE3, head_right[2]),\n (Path.CURVE3, head_left[1]),\n (Path.CURVE3, head_left[0]),\n (Path.LINETO, tail_left[2]),\n (Path.CURVE3, tail_left[1]),\n (Path.CURVE3, tail_left[0]),\n (Path.LINETO, tail_start),\n (Path.CLOSEPOLY, tail_start),\n ]\n path = Path([p for c, p in patch_path], [c for c, p in patch_path])\n\n return path, True\n\n _style_list[\"fancy\"] = Fancy\n\n class Wedge(_Base):\n \"\"\"\n Wedge(?) shape. Only wokrs with a quadratic bezier curve. The\n begin point has a width of the tail_width and the end point has a\n width of 0. At the middle, the width is shrink_factor*tail_width.\n\n \"\"\"\n\n def __init__(self, tail_width=.3, shrink_factor=0.5):\n \"\"\"\n *tail_width*\n width of the tail\n\n *shrink_factor*\n fraction of the arrow width at the middle point\n \"\"\"\n\n self.tail_width = tail_width\n self.shrink_factor = shrink_factor\n super(ArrowStyle.Wedge, self).__init__()\n\n def transmute(self, path, mutation_size, linewidth):\n\n x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)\n\n arrow_path = [(x0, y0), (x1, y1), (x2, y2)]\n b_plus, b_minus = make_wedged_bezier2(\n arrow_path,\n self.tail_width * mutation_size / 2.,\n wm=self.shrink_factor)\n\n patch_path = [(Path.MOVETO, b_plus[0]),\n (Path.CURVE3, b_plus[1]),\n (Path.CURVE3, b_plus[2]),\n (Path.LINETO, b_minus[2]),\n (Path.CURVE3, b_minus[1]),\n (Path.CURVE3, b_minus[0]),\n (Path.CLOSEPOLY, b_minus[0]),\n ]\n path = Path([p for c, p in patch_path], [c for c, p in patch_path])\n\n return path, True\n\n _style_list[\"wedge\"] = Wedge\n\n if __doc__:\n __doc__ = cbook.dedent(__doc__) % \\\n {\"AvailableArrowstyles\": _pprint_styles(_style_list)}\n\n\ndocstring.interpd.update(\n AvailableArrowstyles=_pprint_styles(ArrowStyle._style_list),\n AvailableConnectorstyles=_pprint_styles(ConnectionStyle._style_list),\n)\n\n\nclass FancyArrowPatch(Patch):\n \"\"\"\n A fancy arrow patch. It draws an arrow using the :class:ArrowStyle.\n \"\"\"\n\n def __str__(self):\n\n if self._posA_posB is not None:\n (x1, y1), (x2, y2) = self._posA_posB\n return self.__class__.__name__ \\\n + \"(%g,%g->%g,%g)\" % (x1, y1, x2, y2)\n else:\n return self.__class__.__name__ \\\n + \"(%s)\" % (str(self._path_original),)\n\n @docstring.dedent_interpd\n def __init__(self, posA=None, posB=None,\n path=None,\n arrowstyle=\"simple\",\n arrow_transmuter=None,\n connectionstyle=\"arc3\",\n connector=None,\n patchA=None,\n patchB=None,\n shrinkA=2.,\n shrinkB=2.,\n mutation_scale=1.,\n mutation_aspect=None,\n dpi_cor=1.,\n **kwargs):\n \"\"\"\n If *posA* and *posB* is given, a path connecting two point are\n created according to the connectionstyle. The path will be\n clipped with *patchA* and *patchB* and further shirnked by\n *shrinkA* and *shrinkB*. An arrow is drawn along this\n resulting path using the *arrowstyle* parameter. If *path*\n provided, an arrow is drawn along this path and *patchA*,\n *patchB*, *shrinkA*, and *shrinkB* are ignored.\n\n The *connectionstyle* describes how *posA* and *posB* are\n connected. It can be an instance of the ConnectionStyle class\n (matplotlib.patches.ConnectionStlye) or a string of the\n connectionstyle name, with optional comma-separated\n attributes. The following connection styles are available.\n\n %(AvailableConnectorstyles)s\n\n\n The *arrowstyle* describes how the fancy arrow will be\n drawn. It can be string of the available arrowstyle names,\n with optional comma-separated attributes, or one of the\n ArrowStyle instance. The optional attributes are meant to be\n scaled with the *mutation_scale*. The following arrow styles are\n available.\n\n %(AvailableArrowstyles)s\n\n *mutation_scale* : a value with which attributes of arrowstyle\n (e.g., head_length) will be scaled. default=1.\n\n *mutation_aspect* : The height of the rectangle will be\n squeezed by this value before the mutation and the mutated\n box will be stretched by the inverse of it. default=None.\n\n Valid kwargs are:\n %(Patch)s\n \"\"\"\n\n if posA is not None and posB is not None and path is None:\n self._posA_posB = [posA, posB]\n\n if connectionstyle is None:\n connectionstyle = \"arc3\"\n self.set_connectionstyle(connectionstyle)\n\n elif posA is None and posB is None and path is not None:\n self._posA_posB = None\n self._connetors = None\n else:\n raise ValueError(\"either posA and posB, or path need to provided\")\n\n self.patchA = patchA\n self.patchB = patchB\n self.shrinkA = shrinkA\n self.shrinkB = shrinkB\n\n Patch.__init__(self, **kwargs)\n\n self._path_original = path\n\n self.set_arrowstyle(arrowstyle)\n\n self._mutation_scale = mutation_scale\n self._mutation_aspect = mutation_aspect\n\n self.set_dpi_cor(dpi_cor)\n #self._draw_in_display_coordinate = True\n\n def set_dpi_cor(self, dpi_cor):\n \"\"\"\n dpi_cor is currently used for linewidth-related things and\n shink factor. Mutation scale is not affected by this.\n \"\"\"\n\n self._dpi_cor = dpi_cor\n\n def get_dpi_cor(self):\n \"\"\"\n dpi_cor is currently used for linewidth-related things and\n shink factor. Mutation scale is not affected by this.\n \"\"\"\n\n return self._dpi_cor\n\n def set_positions(self, posA, posB):\n \"\"\" set the begin end end positions of the connecting\n path. Use current vlaue if None.\n \"\"\"\n if posA is not None:\n self._posA_posB[0] = posA\n if posB is not None:\n self._posA_posB[1] = posB\n\n def set_patchA(self, patchA):\n \"\"\" set the begin patch.\n \"\"\"\n self.patchA = patchA\n\n def set_patchB(self, patchB):\n \"\"\" set the begin patch\n \"\"\"\n self.patchB = patchB\n\n def set_connectionstyle(self, connectionstyle, **kw):\n \"\"\"\n Set the connection style.\n\n *connectionstyle* can be a string with connectionstyle name with\n optional comma-separated attributes. Alternatively, the attrs can be\n probided as keywords.\n\n set_connectionstyle(\"arc,angleA=0,armA=30,rad=10\")\n set_connectionstyle(\"arc\", angleA=0,armA=30,rad=10)\n\n Old attrs simply are forgotten.\n\n Without argument (or with connectionstyle=None), return\n available styles as a list of strings.\n \"\"\"\n\n if connectionstyle is None:\n return ConnectionStyle.pprint_styles()\n\n if isinstance(connectionstyle, ConnectionStyle._Base):\n self._connector = connectionstyle\n elif six.callable(connectionstyle):\n # we may need check the calling convention of the given function\n self._connector = connectionstyle\n else:\n self._connector = ConnectionStyle(connectionstyle, **kw)\n\n def get_connectionstyle(self):\n \"\"\"\n Return the ConnectionStyle instance\n \"\"\"\n return self._connector\n\n def set_arrowstyle(self, arrowstyle=None, **kw):\n \"\"\"\n Set the arrow style.\n\n *arrowstyle* can be a string with arrowstyle name with optional\n comma-separated attributes. Alternatively, the attrs can\n be provided as keywords.\n\n set_arrowstyle(\"Fancy,head_length=0.2\")\n set_arrowstyle(\"fancy\", head_length=0.2)\n\n Old attrs simply are forgotten.\n\n Without argument (or with arrowstyle=None), return\n available box styles as a list of strings.\n \"\"\"\n\n if arrowstyle is None:\n return ArrowStyle.pprint_styles()\n\n if isinstance(arrowstyle, ArrowStyle._Base):\n self._arrow_transmuter = arrowstyle\n else:\n self._arrow_transmuter = ArrowStyle(arrowstyle, **kw)\n\n def get_arrowstyle(self):\n \"\"\"\n Return the arrowstyle object\n \"\"\"\n return self._arrow_transmuter\n\n def set_mutation_scale(self, scale):\n \"\"\"\n Set the mutation scale.\n\n ACCEPTS: float\n \"\"\"\n self._mutation_scale = scale\n\n def get_mutation_scale(self):\n \"\"\"\n Return the mutation scale.\n \"\"\"\n return self._mutation_scale\n\n def set_mutation_aspect(self, aspect):\n \"\"\"\n Set the aspect ratio of the bbox mutation.\n\n ACCEPTS: float\n \"\"\"\n self._mutation_aspect = aspect\n\n def get_mutation_aspect(self):\n \"\"\"\n Return the aspect ratio of the bbox mutation.\n \"\"\"\n return self._mutation_aspect\n\n def get_path(self):\n \"\"\"\n return the path of the arrow in the data coordinate. Use\n get_path_in_displaycoord() method to retrieve the arrow path\n in the display coord.\n \"\"\"\n _path, fillable = self.get_path_in_displaycoord()\n\n if cbook.iterable(fillable):\n _path = concatenate_paths(_path)\n\n return self.get_transform().inverted().transform_path(_path)\n\n def get_path_in_displaycoord(self):\n \"\"\"\n Return the mutated path of the arrow in the display coord\n \"\"\"\n\n dpi_cor = self.get_dpi_cor()\n\n if self._posA_posB is not None:\n posA = self.get_transform().transform_point(self._posA_posB[0])\n posB = self.get_transform().transform_point(self._posA_posB[1])\n _path = self.get_connectionstyle()(posA, posB,\n patchA=self.patchA,\n patchB=self.patchB,\n shrinkA=self.shrinkA * dpi_cor,\n shrinkB=self.shrinkB * dpi_cor\n )\n else:\n _path = self.get_transform().transform_path(self._path_original)\n\n _path, fillable = self.get_arrowstyle()(_path,\n self.get_mutation_scale(),\n self.get_linewidth() * dpi_cor,\n self.get_mutation_aspect()\n )\n\n #if not fillable:\n # self._fill = False\n\n return _path, fillable\n\n def draw(self, renderer):\n if not self.get_visible():\n return\n\n renderer.open_group('patch', self.get_gid())\n gc = renderer.new_gc()\n\n gc.set_foreground(self._edgecolor, isRGBA=True)\n\n lw = self._linewidth\n if self._edgecolor[3] == 0:\n lw = 0\n gc.set_linewidth(lw)\n gc.set_linestyle(self._linestyle)\n\n gc.set_antialiased(self._antialiased)\n self._set_gc_clip(gc)\n gc.set_capstyle('round')\n gc.set_snap(self.get_snap())\n\n rgbFace = self._facecolor\n if rgbFace[3] == 0:\n rgbFace = None # (some?) renderers expect this as no-fill signal\n\n gc.set_alpha(self._alpha)\n\n if self._hatch:\n gc.set_hatch(self._hatch)\n\n if self.get_sketch_params() is not None:\n gc.set_sketch_params(*self.get_sketch_params())\n\n # FIXME : dpi_cor is for the dpi-dependecy of the\n # linewidth. There could be room for improvement.\n #\n #dpi_cor = renderer.points_to_pixels(1.)\n self.set_dpi_cor(renderer.points_to_pixels(1.))\n path, fillable = self.get_path_in_displaycoord()\n\n if not cbook.iterable(fillable):\n path = [path]\n fillable = [fillable]\n\n affine = transforms.IdentityTransform()\n\n if self.get_path_effects():\n from matplotlib.patheffects import PathEffectRenderer\n renderer = PathEffectRenderer(self.get_path_effects(), renderer)\n\n for p, f in zip(path, fillable):\n if f:\n renderer.draw_path(gc, p, affine, rgbFace)\n else:\n renderer.draw_path(gc, p, affine, None)\n\n gc.restore()\n renderer.close_group('patch')\n\n\nclass ConnectionPatch(FancyArrowPatch):\n \"\"\"\n A :class:`~matplotlib.patches.ConnectionPatch` class is to make\n connecting lines between two points (possibly in different axes).\n \"\"\"\n def __str__(self):\n return \"ConnectionPatch((%g,%g),(%g,%g))\" % \\\n (self.xy1[0], self.xy1[1], self.xy2[0], self.xy2[1])\n\n @docstring.dedent_interpd\n def __init__(self, xyA, xyB, coordsA, coordsB=None,\n axesA=None, axesB=None,\n arrowstyle=\"-\",\n arrow_transmuter=None,\n connectionstyle=\"arc3\",\n connector=None,\n patchA=None,\n patchB=None,\n shrinkA=0.,\n shrinkB=0.,\n mutation_scale=10.,\n mutation_aspect=None,\n clip_on=False,\n dpi_cor=1.,\n **kwargs):\n \"\"\"\n Connect point *xyA* in *coordsA* with point *xyB* in *coordsB*\n\n\n Valid keys are\n\n\n =============== ======================================================\n Key Description\n =============== ======================================================\n arrowstyle the arrow style\n connectionstyle the connection style\n relpos default is (0.5, 0.5)\n patchA default is bounding box of the text\n patchB default is None\n shrinkA default is 2 points\n shrinkB default is 2 points\n mutation_scale default is text size (in points)\n mutation_aspect default is 1.\n ? any key for :class:`matplotlib.patches.PathPatch`\n =============== ======================================================\n\n\n *coordsA* and *coordsB* are strings that indicate the\n coordinates of *xyA* and *xyB*.\n\n ================= ===================================================\n Property Description\n ================= ===================================================\n 'figure points' points from the lower left corner of the figure\n 'figure pixels' pixels from the lower left corner of the figure\n 'figure fraction' 0,0 is lower left of figure and 1,1 is upper, right\n 'axes points' points from lower left corner of axes\n 'axes pixels' pixels from lower left corner of axes\n 'axes fraction' 0,1 is lower left of axes and 1,1 is upper right\n 'data' use the coordinate system of the object being\n annotated (default)\n 'offset points' Specify an offset (in points) from the *xy* value\n\n 'polar' you can specify *theta*, *r* for the annotation,\n even in cartesian plots. Note that if you\n are using a polar axes, you do not need\n to specify polar for the coordinate\n system since that is the native \"data\" coordinate\n system.\n ================= ===================================================\n\n \"\"\"\n if coordsB is None:\n coordsB = coordsA\n # we'll draw ourself after the artist we annotate by default\n self.xy1 = xyA\n self.xy2 = xyB\n self.coords1 = coordsA\n self.coords2 = coordsB\n\n self.axesA = axesA\n self.axesB = axesB\n\n FancyArrowPatch.__init__(self,\n posA=(0, 0), posB=(1, 1),\n arrowstyle=arrowstyle,\n arrow_transmuter=arrow_transmuter,\n connectionstyle=connectionstyle,\n connector=connector,\n patchA=patchA,\n patchB=patchB,\n shrinkA=shrinkA,\n shrinkB=shrinkB,\n mutation_scale=mutation_scale,\n mutation_aspect=mutation_aspect,\n clip_on=clip_on,\n dpi_cor=dpi_cor,\n **kwargs)\n\n # if True, draw annotation only if self.xy is inside the axes\n self._annotation_clip = None\n\n def _get_xy(self, x, y, s, axes=None):\n \"\"\"\n caculate the pixel position of given point\n \"\"\"\n\n if axes is None:\n axes = self.axes\n\n if s == 'data':\n trans = axes.transData\n x = float(self.convert_xunits(x))\n y = float(self.convert_yunits(y))\n return trans.transform_point((x, y))\n elif s == 'offset points':\n # convert the data point\n dx, dy = self.xy\n\n # prevent recursion\n if self.xycoords == 'offset points':\n return self._get_xy(dx, dy, 'data')\n\n dx, dy = self._get_xy(dx, dy, self.xycoords)\n\n # convert the offset\n dpi = self.figure.get_dpi()\n x *= dpi / 72.\n y *= dpi / 72.\n\n # add the offset to the data point\n x += dx\n y += dy\n\n return x, y\n elif s == 'polar':\n theta, r = x, y\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n trans = axes.transData\n return trans.transform_point((x, y))\n elif s == 'figure points':\n # points from the lower left corner of the figure\n dpi = self.figure.dpi\n l, b, w, h = self.figure.bbox.bounds\n r = l + w\n t = b + h\n\n x *= dpi / 72.\n y *= dpi / 72.\n if x < 0:\n x = r + x\n if y < 0:\n y = t + y\n return x, y\n elif s == 'figure pixels':\n # pixels from the lower left corner of the figure\n l, b, w, h = self.figure.bbox.bounds\n r = l + w\n t = b + h\n if x < 0:\n x = r + x\n if y < 0:\n y = t + y\n return x, y\n elif s == 'figure fraction':\n # (0,0) is lower left, (1,1) is upper right of figure\n trans = self.figure.transFigure\n return trans.transform_point((x, y))\n elif s == 'axes points':\n # points from the lower left corner of the axes\n dpi = self.figure.dpi\n l, b, w, h = axes.bbox.bounds\n r = l + w\n t = b + h\n if x < 0:\n x = r + x * dpi / 72.\n else:\n x = l + x * dpi / 72.\n if y < 0:\n y = t + y * dpi / 72.\n else:\n y = b + y * dpi / 72.\n return x, y\n elif s == 'axes pixels':\n #pixels from the lower left corner of the axes\n\n l, b, w, h = axes.bbox.bounds\n r = l + w\n t = b + h\n if x < 0:\n x = r + x\n else:\n x = l + x\n if y < 0:\n y = t + y\n else:\n y = b + y\n return x, y\n elif s == 'axes fraction':\n #(0,0) is lower left, (1,1) is upper right of axes\n trans = axes.transAxes\n return trans.transform_point((x, y))\n\n def set_annotation_clip(self, b):\n \"\"\"\n set *annotation_clip* attribute.\n\n * True: the annotation will only be drawn when self.xy is inside the\n axes.\n * False: the annotation will always be drawn regardless of its\n position.\n * None: the self.xy will be checked only if *xycoords* is \"data\"\n \"\"\"\n self._annotation_clip = b\n\n def get_annotation_clip(self):\n \"\"\"\n Return *annotation_clip* attribute.\n See :meth:`set_annotation_clip` for the meaning of return values.\n \"\"\"\n return self._annotation_clip\n\n def get_path_in_displaycoord(self):\n \"\"\"\n Return the mutated path of the arrow in the display coord\n \"\"\"\n\n dpi_cor = self.get_dpi_cor()\n\n x, y = self.xy1\n posA = self._get_xy(x, y, self.coords1, self.axesA)\n\n x, y = self.xy2\n posB = self._get_xy(x, y, self.coords2, self.axesB)\n\n _path = self.get_connectionstyle()(posA, posB,\n patchA=self.patchA,\n patchB=self.patchB,\n shrinkA=self.shrinkA * dpi_cor,\n shrinkB=self.shrinkB * dpi_cor\n )\n\n _path, fillable = self.get_arrowstyle()(_path,\n self.get_mutation_scale(),\n self.get_linewidth() * dpi_cor,\n self.get_mutation_aspect()\n )\n\n return _path, fillable\n\n def _check_xy(self, renderer):\n \"\"\"\n check if the annotation need to\n be drawn.\n \"\"\"\n\n b = self.get_annotation_clip()\n\n if b or (b is None and self.coords1 == \"data\"):\n x, y = self.xy1\n xy_pixel = self._get_xy(x, y, self.coords1, self.axesA)\n if not self.axes.contains_point(xy_pixel):\n return False\n\n if b or (b is None and self.coords2 == \"data\"):\n x, y = self.xy2\n xy_pixel = self._get_xy(x, y, self.coords2, self.axesB)\n if self.axesB is None:\n axes = self.axes\n else:\n axes = self.axesB\n if not axes.contains_point(xy_pixel):\n return False\n\n return True\n\n def draw(self, renderer):\n \"\"\"\n Draw.\n \"\"\"\n\n if renderer is not None:\n self._renderer = renderer\n if not self.get_visible():\n return\n\n if not self._check_xy(renderer):\n return\n\n FancyArrowPatch.draw(self, renderer)\n", "\"\"\"\nA module providing some utility functions regarding bezier path manipulation.\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\n\nimport numpy as np\nfrom matplotlib.path import Path\n\nfrom operator import xor\nimport warnings\n\n\nclass NonIntersectingPathException(ValueError):\n pass\n\n# some functions\n\n\ndef get_intersection(cx1, cy1, cos_t1, sin_t1,\n cx2, cy2, cos_t2, sin_t2):\n \"\"\" return a intersecting point between a line through (cx1, cy1)\n and having angle t1 and a line through (cx2, cy2) and angle t2.\n \"\"\"\n\n # line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0.\n # line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1\n\n line1_rhs = sin_t1 * cx1 - cos_t1 * cy1\n line2_rhs = sin_t2 * cx2 - cos_t2 * cy2\n\n # rhs matrix\n a, b = sin_t1, -cos_t1\n c, d = sin_t2, -cos_t2\n\n ad_bc = a * d - b * c\n if ad_bc == 0.:\n raise ValueError(\"Given lines do not intersect\")\n\n #rhs_inverse\n a_, b_ = d, -b\n c_, d_ = -c, a\n a_, b_, c_, d_ = [k / ad_bc for k in [a_, b_, c_, d_]]\n\n x = a_ * line1_rhs + b_ * line2_rhs\n y = c_ * line1_rhs + d_ * line2_rhs\n\n return x, y\n\n\ndef get_normal_points(cx, cy, cos_t, sin_t, length):\n \"\"\"\n For a line passing through (*cx*, *cy*) and having a angle *t*, return\n locations of the two points located along its perpendicular line at the\n distance of *length*.\n \"\"\"\n\n if length == 0.:\n return cx, cy, cx, cy\n\n cos_t1, sin_t1 = sin_t, -cos_t\n cos_t2, sin_t2 = -sin_t, cos_t\n\n x1, y1 = length * cos_t1 + cx, length * sin_t1 + cy\n x2, y2 = length * cos_t2 + cx, length * sin_t2 + cy\n\n return x1, y1, x2, y2\n\n\n## BEZIER routines\n\n# subdividing bezier curve\n# http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html\n\n\ndef _de_casteljau1(beta, t):\n next_beta = beta[:-1] * (1 - t) + beta[1:] * t\n return next_beta\n\n\ndef split_de_casteljau(beta, t):\n \"\"\"split a bezier segment defined by its controlpoints *beta*\n into two separate segment divided at *t* and return their control points.\n\n \"\"\"\n beta = np.asarray(beta)\n beta_list = [beta]\n while True:\n beta = _de_casteljau1(beta, t)\n beta_list.append(beta)\n if len(beta) == 1:\n break\n left_beta = [beta[0] for beta in beta_list]\n right_beta = [beta[-1] for beta in reversed(beta_list)]\n\n return left_beta, right_beta\n\n\n# FIXME spelling mistake in the name of the parameter ``tolerence``\ndef find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,\n inside_closedpath,\n t0=0., t1=1., tolerence=0.01):\n \"\"\" Find a parameter t0 and t1 of the given bezier path which\n bounds the intersecting points with a provided closed\n path(*inside_closedpath*). Search starts from *t0* and *t1* and it\n uses a simple bisecting algorithm therefore one of the end point\n must be inside the path while the orther doesn't. The search stop\n when |t0-t1| gets smaller than the given tolerence.\n value for\n\n - bezier_point_at_t : a function which returns x, y coordinates at *t*\n\n - inside_closedpath : return True if the point is insed the path\n\n \"\"\"\n # inside_closedpath : function\n\n start = bezier_point_at_t(t0)\n end = bezier_point_at_t(t1)\n\n start_inside = inside_closedpath(start)\n end_inside = inside_closedpath(end)\n\n if not xor(start_inside, end_inside):\n raise NonIntersectingPathException(\n \"the segment does not seem to intersect with the path\")\n\n while 1:\n\n # return if the distance is smaller than the tolerence\n if (start[0] - end[0]) ** 2 + \\\n (start[1] - end[1]) ** 2 < tolerence ** 2:\n return t0, t1\n\n # calculate the middle point\n middle_t = 0.5 * (t0 + t1)\n middle = bezier_point_at_t(middle_t)\n middle_inside = inside_closedpath(middle)\n\n if xor(start_inside, middle_inside):\n t1 = middle_t\n end = middle\n end_inside = middle_inside\n else:\n t0 = middle_t\n start = middle\n start_inside = middle_inside\n\n\nclass BezierSegment(object):\n \"\"\"\n A simple class of a 2-dimensional bezier segment\n \"\"\"\n\n # Higher order bezier lines can be supported by simplying adding\n # corresponding values.\n _binom_coeff = {1: np.array([1., 1.]),\n 2: np.array([1., 2., 1.]),\n 3: np.array([1., 3., 3., 1.])}\n\n def __init__(self, control_points):\n \"\"\"\n *control_points* : location of contol points. It needs have a\n shpae of n * 2, where n is the order of the bezier line. 1<=\n n <= 3 is supported.\n \"\"\"\n _o = len(control_points)\n self._orders = np.arange(_o)\n _coeff = BezierSegment._binom_coeff[_o - 1]\n\n _control_points = np.asarray(control_points)\n xx = _control_points[:, 0]\n yy = _control_points[:, 1]\n\n self._px = xx * _coeff\n self._py = yy * _coeff\n\n def point_at_t(self, t):\n \"evaluate a point at t\"\n one_minus_t_powers = np.power(1. - t, self._orders)[::-1]\n t_powers = np.power(t, self._orders)\n\n tt = one_minus_t_powers * t_powers\n _x = sum(tt * self._px)\n _y = sum(tt * self._py)\n\n return _x, _y\n\n\ndef split_bezier_intersecting_with_closedpath(bezier,\n inside_closedpath,\n tolerence=0.01):\n\n \"\"\"\n bezier : control points of the bezier segment\n inside_closedpath : a function which returns true if the point is inside\n the path\n \"\"\"\n\n bz = BezierSegment(bezier)\n bezier_point_at_t = bz.point_at_t\n\n t0, t1 = find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,\n inside_closedpath,\n tolerence=tolerence)\n\n _left, _right = split_de_casteljau(bezier, (t0 + t1) / 2.)\n return _left, _right\n\n\ndef find_r_to_boundary_of_closedpath(inside_closedpath, xy,\n cos_t, sin_t,\n rmin=0., rmax=1., tolerence=0.01):\n \"\"\"\n Find a radius r (centered at *xy*) between *rmin* and *rmax* at\n which it intersect with the path.\n\n inside_closedpath : function\n cx, cy : center\n cos_t, sin_t : cosine and sine for the angle\n rmin, rmax :\n \"\"\"\n\n cx, cy = xy\n\n def _f(r):\n return cos_t * r + cx, sin_t * r + cy\n\n find_bezier_t_intersecting_with_closedpath(_f, inside_closedpath,\n t0=rmin, t1=rmax,\n tolerence=tolerence)\n\n## matplotlib specific\n\n\ndef split_path_inout(path, inside, tolerence=0.01, reorder_inout=False):\n \"\"\" divide a path into two segment at the point where inside(x, y)\n becomes False.\n \"\"\"\n\n path_iter = path.iter_segments()\n\n ctl_points, command = next(path_iter)\n begin_inside = inside(ctl_points[-2:]) # true if begin point is inside\n\n bezier_path = None\n ctl_points_old = ctl_points\n\n concat = np.concatenate\n\n iold = 0\n i = 1\n\n for ctl_points, command in path_iter:\n iold = i\n i += len(ctl_points) / 2\n if inside(ctl_points[-2:]) != begin_inside:\n bezier_path = concat([ctl_points_old[-2:], ctl_points])\n break\n\n ctl_points_old = ctl_points\n\n if bezier_path is None:\n raise ValueError(\"The path does not seem to intersect with the patch\")\n\n bp = list(zip(bezier_path[::2], bezier_path[1::2]))\n left, right = split_bezier_intersecting_with_closedpath(bp,\n inside,\n tolerence)\n if len(left) == 2:\n codes_left = [Path.LINETO]\n codes_right = [Path.MOVETO, Path.LINETO]\n elif len(left) == 3:\n codes_left = [Path.CURVE3, Path.CURVE3]\n codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3]\n elif len(left) == 4:\n codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4]\n codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]\n else:\n raise ValueError()\n\n verts_left = left[1:]\n verts_right = right[:]\n\n #i += 1\n\n if path.codes is None:\n path_in = Path(concat([path.vertices[:i], verts_left]))\n path_out = Path(concat([verts_right, path.vertices[i:]]))\n\n else:\n path_in = Path(concat([path.vertices[:iold], verts_left]),\n concat([path.codes[:iold], codes_left]))\n\n path_out = Path(concat([verts_right, path.vertices[i:]]),\n concat([codes_right, path.codes[i:]]))\n\n if reorder_inout and begin_inside == False:\n path_in, path_out = path_out, path_in\n\n return path_in, path_out\n\n\ndef inside_circle(cx, cy, r):\n r2 = r ** 2\n\n def _f(xy):\n x, y = xy\n return (x - cx) ** 2 + (y - cy) ** 2 < r2\n return _f\n\n\n# quadratic bezier lines\n\ndef get_cos_sin(x0, y0, x1, y1):\n dx, dy = x1 - x0, y1 - y0\n d = (dx * dx + dy * dy) ** .5\n return dx / d, dy / d\n\n\ndef check_if_parallel(dx1, dy1, dx2, dy2, tolerence=1.e-5):\n \"\"\" returns\n * 1 if two lines are parralel in same direction\n * -1 if two lines are parralel in opposite direction\n * 0 otherwise\n \"\"\"\n theta1 = np.arctan2(dx1, dy1)\n theta2 = np.arctan2(dx2, dy2)\n dtheta = np.abs(theta1 - theta2)\n if dtheta < tolerence:\n return 1\n elif np.abs(dtheta - np.pi) < tolerence:\n return -1\n else:\n return False\n\n\ndef get_parallels(bezier2, width):\n \"\"\"\n Given the quadratic bezier control points *bezier2*, returns\n control points of quadratic bezier lines roughly parallel to given\n one separated by *width*.\n \"\"\"\n\n # The parallel bezier lines are constructed by following ways.\n # c1 and c2 are contol points representing the begin and end of the\n # bezier line.\n # cm is the middle point\n\n c1x, c1y = bezier2[0]\n cmx, cmy = bezier2[1]\n c2x, c2y = bezier2[2]\n\n parallel_test = check_if_parallel(c1x - cmx, c1y - cmy,\n cmx - c2x, cmy - c2y)\n\n if parallel_test == -1:\n warnings.warn(\n \"Lines do not intersect. A straight line is used instead.\")\n #cmx, cmy = 0.5*(c1x+c2x), 0.5*(c1y+c2y)\n cos_t1, sin_t1 = get_cos_sin(c1x, c1y, c2x, c2y)\n cos_t2, sin_t2 = cos_t1, sin_t1\n else:\n # t1 and t2 is the angle between c1 and cm, cm, c2. They are\n # also a angle of the tangential line of the path at c1 and c2\n cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)\n cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)\n\n # find c1_left, c1_right which are located along the lines\n # throught c1 and perpendicular to the tangential lines of the\n # bezier path at a distance of width. Same thing for c2_left and\n # c2_right with respect to c2.\n c1x_left, c1y_left, c1x_right, c1y_right = \\\n get_normal_points(c1x, c1y, cos_t1, sin_t1, width)\n c2x_left, c2y_left, c2x_right, c2y_right = \\\n get_normal_points(c2x, c2y, cos_t2, sin_t2, width)\n\n # find cm_left which is the intersectng point of a line through\n # c1_left with angle t1 and a line throught c2_left with angle\n # t2. Same with cm_right.\n if parallel_test != 0:\n # a special case for a straight line, i.e., angle between two\n # lines are smaller than some (arbitrtay) value.\n cmx_left, cmy_left = \\\n 0.5 * (c1x_left + c2x_left), 0.5 * (c1y_left + c2y_left)\n cmx_right, cmy_right = \\\n 0.5 * (c1x_right + c2x_right), 0.5 * (c1y_right + c2y_right)\n else:\n cmx_left, cmy_left = \\\n get_intersection(c1x_left, c1y_left, cos_t1, sin_t1,\n c2x_left, c2y_left, cos_t2, sin_t2)\n\n cmx_right, cmy_right = \\\n get_intersection(c1x_right, c1y_right, cos_t1, sin_t1,\n c2x_right, c2y_right, cos_t2, sin_t2)\n\n # the parralel bezier lines are created with control points of\n # [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right]\n path_left = [(c1x_left, c1y_left),\n (cmx_left, cmy_left),\n (c2x_left, c2y_left)]\n path_right = [(c1x_right, c1y_right),\n (cmx_right, cmy_right),\n (c2x_right, c2y_right)]\n\n return path_left, path_right\n\n\ndef find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):\n \"\"\" Find control points of the bezier line throught c1, mm, c2. We\n simply assume that c1, mm, c2 which have parametric value 0, 0.5, and 1.\n \"\"\"\n\n cmx = .5 * (4 * mmx - (c1x + c2x))\n cmy = .5 * (4 * mmy - (c1y + c2y))\n\n return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]\n\n\ndef make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.):\n \"\"\"\n Being similar to get_parallels, returns control points of two quadrativ\n bezier lines having a width roughly parralel to given one separated by\n *width*.\n \"\"\"\n\n # c1, cm, c2\n c1x, c1y = bezier2[0]\n cmx, cmy = bezier2[1]\n c3x, c3y = bezier2[2]\n\n # t1 and t2 is the anlge between c1 and cm, cm, c3.\n # They are also a angle of the tangential line of the path at c1 and c3\n cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)\n cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)\n\n # find c1_left, c1_right which are located along the lines\n # throught c1 and perpendicular to the tangential lines of the\n # bezier path at a distance of width. Same thing for c3_left and\n # c3_right with respect to c3.\n c1x_left, c1y_left, c1x_right, c1y_right = \\\n get_normal_points(c1x, c1y, cos_t1, sin_t1, width * w1)\n c3x_left, c3y_left, c3x_right, c3y_right = \\\n get_normal_points(c3x, c3y, cos_t2, sin_t2, width * w2)\n\n # find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and\n # c12-c23\n c12x, c12y = (c1x + cmx) * .5, (c1y + cmy) * .5\n c23x, c23y = (cmx + c3x) * .5, (cmy + c3y) * .5\n c123x, c123y = (c12x + c23x) * .5, (c12y + c23y) * .5\n\n # tangential angle of c123 (angle between c12 and c23)\n cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)\n\n c123x_left, c123y_left, c123x_right, c123y_right = \\\n get_normal_points(c123x, c123y, cos_t123, sin_t123, width * wm)\n\n path_left = find_control_points(c1x_left, c1y_left,\n c123x_left, c123y_left,\n c3x_left, c3y_left)\n path_right = find_control_points(c1x_right, c1y_right,\n c123x_right, c123y_right,\n c3x_right, c3y_right)\n\n return path_left, path_right\n\n\ndef make_path_regular(p):\n \"\"\"\n fill in the codes if None.\n \"\"\"\n c = p.codes\n if c is None:\n c = np.empty(p.vertices.shape[:1], \"i\")\n c.fill(Path.LINETO)\n c[0] = Path.MOVETO\n\n return Path(p.vertices, c)\n else:\n return p\n\n\ndef concatenate_paths(paths):\n \"\"\"\n concatenate list of paths into a single path.\n \"\"\"\n\n vertices = []\n codes = []\n for p in paths:\n p = make_path_regular(p)\n vertices.append(p.vertices)\n codes.append(p.codes)\n\n _path = Path(np.concatenate(vertices),\n np.concatenate(codes))\n return _path\n", "#!/usr/bin/env python\n\n\n\"\"\"\nFor the backends that supports draw_image with optional affine\ntransform (e.g., agg, ps backend), the image of the output should\nhave its boundary matches the red rectangles.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.cm as cm\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as mtransforms\n\ndef get_image():\n delta = 0.25\n x = y = np.arange(-3.0, 3.0, delta)\n X, Y = np.meshgrid(x, y)\n Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)\n Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)\n Z = Z2-Z1 # difference of Gaussians\n return Z\n\ndef imshow_affine(ax, z, *kl, **kwargs):\n im = ax.imshow(z, *kl, **kwargs)\n x1, x2, y1, y2 = im.get_extent()\n im._image_skew_coordinate = (x2, y1)\n return im\n\n\nif 1:\n\n # image rotation\n\n fig, (ax1, ax2) = plt.subplots(1,2)\n Z = get_image()\n im1 = imshow_affine(ax1, Z, interpolation='none', cmap=cm.jet,\n origin='lower',\n extent=[-2, 4, -3, 2], clip_on=True)\n\n trans_data2 = mtransforms.Affine2D().rotate_deg(30) + ax1.transData\n im1.set_transform(trans_data2)\n\n # display intended extent of the image\n x1, x2, y1, y2 = im1.get_extent()\n x3, y3 = x2, y1\n\n ax1.plot([x1, x2, x2, x1, x1], [y1, y1, y2, y2, y1], \"r--\", lw=3,\n transform=trans_data2)\n\n ax1.set_xlim(-3, 5)\n ax1.set_ylim(-4, 4)\n\n\n # image skew\n\n im2 = ax2.imshow(Z, interpolation='none', cmap=cm.jet,\n origin='lower',\n extent=[-2, 4, -3, 2], clip_on=True)\n im2._image_skew_coordinate = (3, -2)\n\n\n plt.show()\n #plt.savefig(\"demo_affine_image\")\n", "\"\"\"\nThis module contains all the 2D line class which can draw with a\nvariety of line styles, markers and colors.\n\"\"\"\n\n# TODO: expose cap and join style attrs\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\n\nimport warnings\n\nimport numpy as np\nfrom numpy import ma\nfrom matplotlib import verbose\nfrom . import artist\nfrom .artist import Artist\nfrom .cbook import iterable, is_string_like, is_numlike, ls_mapper\nfrom .colors import colorConverter\nfrom .path import Path\nfrom .transforms import Bbox, TransformedPath, IdentityTransform\n\nfrom matplotlib import rcParams\nfrom .artist import allow_rasterization\nfrom matplotlib import docstring\nfrom matplotlib.markers import MarkerStyle\n# Imported here for backward compatibility, even though they don't\n# really belong.\nfrom matplotlib.markers import TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN\nfrom matplotlib.markers import CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN\n\n\ndef segment_hits(cx, cy, x, y, radius):\n \"\"\"\n Determine if any line segments are within radius of a\n point. Returns the list of line segments that are within that\n radius.\n \"\"\"\n # Process single points specially\n if len(x) < 2:\n res, = np.nonzero((cx - x) ** 2 + (cy - y) ** 2 <= radius ** 2)\n return res\n\n # We need to lop the last element off a lot.\n xr, yr = x[:-1], y[:-1]\n\n # Only look at line segments whose nearest point to C on the line\n # lies within the segment.\n dx, dy = x[1:] - xr, y[1:] - yr\n Lnorm_sq = dx ** 2 + dy ** 2 # Possibly want to eliminate Lnorm==0\n u = ((cx - xr) * dx + (cy - yr) * dy) / Lnorm_sq\n candidates = (u >= 0) & (u <= 1)\n #if any(candidates): print \"candidates\",xr[candidates]\n\n # Note that there is a little area near one side of each point\n # which will be near neither segment, and another which will\n # be near both, depending on the angle of the lines. The\n # following radius test eliminates these ambiguities.\n point_hits = (cx - x) ** 2 + (cy - y) ** 2 <= radius ** 2\n #if any(point_hits): print \"points\",xr[candidates]\n candidates = candidates & ~(point_hits[:-1] | point_hits[1:])\n\n # For those candidates which remain, determine how far they lie away\n # from the line.\n px, py = xr + u * dx, yr + u * dy\n line_hits = (cx - px) ** 2 + (cy - py) ** 2 <= radius ** 2\n #if any(line_hits): print \"lines\",xr[candidates]\n line_hits = line_hits & candidates\n points, = point_hits.ravel().nonzero()\n lines, = line_hits.ravel().nonzero()\n #print points,lines\n return np.concatenate((points, lines))\n\n\nclass Line2D(Artist):\n \"\"\"\n A line - the line can have both a solid linestyle connecting all\n the vertices, and a marker at each vertex. Additionally, the\n drawing of the solid line is influenced by the drawstyle, eg one\n can create \"stepped\" lines in various styles.\n\n\n \"\"\"\n lineStyles = _lineStyles = { # hidden names deprecated\n '-': '_draw_solid',\n '--': '_draw_dashed',\n '-.': '_draw_dash_dot',\n ':': '_draw_dotted',\n 'None': '_draw_nothing',\n ' ': '_draw_nothing',\n '': '_draw_nothing',\n }\n\n _drawStyles_l = {\n 'default': '_draw_lines',\n 'steps-mid': '_draw_steps_mid',\n 'steps-pre': '_draw_steps_pre',\n 'steps-post': '_draw_steps_post',\n }\n\n _drawStyles_s = {\n 'steps': '_draw_steps_pre',\n }\n\n drawStyles = {}\n drawStyles.update(_drawStyles_l)\n drawStyles.update(_drawStyles_s)\n # Need a list ordered with long names first:\n drawStyleKeys = (list(six.iterkeys(_drawStyles_l)) +\n list(six.iterkeys(_drawStyles_s)))\n\n # Referenced here to maintain API. These are defined in\n # MarkerStyle\n markers = MarkerStyle.markers\n filled_markers = MarkerStyle.filled_markers\n fillStyles = MarkerStyle.fillstyles\n\n zorder = 2\n validCap = ('butt', 'round', 'projecting')\n validJoin = ('miter', 'round', 'bevel')\n\n def __str__(self):\n if self._label != \"\":\n return \"Line2D(%s)\" % (self._label)\n elif hasattr(self, '_x') and len(self._x) > 3:\n return \"Line2D((%g,%g),(%g,%g),...,(%g,%g))\"\\\n % (self._x[0], self._y[0], self._x[0],\n self._y[0], self._x[-1], self._y[-1])\n elif hasattr(self, '_x'):\n return \"Line2D(%s)\"\\\n % (\",\".join([\"(%g,%g)\" % (x, y) for x, y\n in zip(self._x, self._y)]))\n else:\n return \"Line2D()\"\n\n def __init__(self, xdata, ydata,\n linewidth=None, # all Nones default to rc\n linestyle=None,\n color=None,\n marker=None,\n markersize=None,\n markeredgewidth=None,\n markeredgecolor=None,\n markerfacecolor=None,\n markerfacecoloralt='none',\n fillstyle='full',\n antialiased=None,\n dash_capstyle=None,\n solid_capstyle=None,\n dash_joinstyle=None,\n solid_joinstyle=None,\n pickradius=5,\n drawstyle=None,\n markevery=None,\n **kwargs\n ):\n \"\"\"\n Create a :class:`~matplotlib.lines.Line2D` instance with *x*\n and *y* data in sequences *xdata*, *ydata*.\n\n The kwargs are :class:`~matplotlib.lines.Line2D` properties:\n\n %(Line2D)s\n\n See :meth:`set_linestyle` for a decription of the line styles,\n :meth:`set_marker` for a description of the markers, and\n :meth:`set_drawstyle` for a description of the draw styles.\n\n \"\"\"\n Artist.__init__(self)\n\n #convert sequences to numpy arrays\n if not iterable(xdata):\n raise RuntimeError('xdata must be a sequence')\n if not iterable(ydata):\n raise RuntimeError('ydata must be a sequence')\n\n if linewidth is None:\n linewidth = rcParams['lines.linewidth']\n\n if linestyle is None:\n linestyle = rcParams['lines.linestyle']\n if marker is None:\n marker = rcParams['lines.marker']\n if color is None:\n color = rcParams['lines.color']\n\n if markersize is None:\n markersize = rcParams['lines.markersize']\n if antialiased is None:\n antialiased = rcParams['lines.antialiased']\n if dash_capstyle is None:\n dash_capstyle = rcParams['lines.dash_capstyle']\n if dash_joinstyle is None:\n dash_joinstyle = rcParams['lines.dash_joinstyle']\n if solid_capstyle is None:\n solid_capstyle = rcParams['lines.solid_capstyle']\n if solid_joinstyle is None:\n solid_joinstyle = rcParams['lines.solid_joinstyle']\n\n if drawstyle is None:\n drawstyle = 'default'\n\n self.set_dash_capstyle(dash_capstyle)\n self.set_dash_joinstyle(dash_joinstyle)\n self.set_solid_capstyle(solid_capstyle)\n self.set_solid_joinstyle(solid_joinstyle)\n\n self.set_linestyle(linestyle)\n self.set_drawstyle(drawstyle)\n self.set_linewidth(linewidth)\n self.set_color(color)\n self._marker = MarkerStyle()\n self.set_marker(marker)\n self.set_markevery(markevery)\n self.set_antialiased(antialiased)\n self.set_markersize(markersize)\n self._dashSeq = None\n\n self.set_markerfacecolor(markerfacecolor)\n self.set_markerfacecoloralt(markerfacecoloralt)\n self.set_markeredgecolor(markeredgecolor)\n self.set_markeredgewidth(markeredgewidth)\n self.set_fillstyle(fillstyle)\n\n self.verticalOffset = None\n\n # update kwargs before updating data to give the caller a\n # chance to init axes (and hence unit support)\n self.update(kwargs)\n self.pickradius = pickradius\n self.ind_offset = 0\n if is_numlike(self._picker):\n self.pickradius = self._picker\n\n self._xorig = np.asarray([])\n self._yorig = np.asarray([])\n self._invalidx = True\n self._invalidy = True\n self.set_data(xdata, ydata)\n\n def contains(self, mouseevent):\n \"\"\"\n Test whether the mouse event occurred on the line. The pick\n radius determines the precision of the location test (usually\n within five points of the value). Use\n :meth:`~matplotlib.lines.Line2D.get_pickradius` or\n :meth:`~matplotlib.lines.Line2D.set_pickradius` to view or\n modify it.\n\n Returns *True* if any values are within the radius along with\n ``{'ind': pointlist}``, where *pointlist* is the set of points\n within the radius.\n\n TODO: sort returned indices by distance\n \"\"\"\n if six.callable(self._contains):\n return self._contains(self, mouseevent)\n\n if not is_numlike(self.pickradius):\n raise ValueError(\"pick radius should be a distance\")\n\n # Make sure we have data to plot\n if self._invalidy or self._invalidx:\n self.recache()\n if len(self._xy) == 0:\n return False, {}\n\n # Convert points to pixels\n transformed_path = self._get_transformed_path()\n path, affine = transformed_path.get_transformed_path_and_affine()\n path = affine.transform_path(path)\n xy = path.vertices\n xt = xy[:, 0]\n yt = xy[:, 1]\n\n # Convert pick radius from points to pixels\n if self.figure is None:\n warnings.warn('no figure set when check if mouse is on line')\n pixels = self.pickradius\n else:\n pixels = self.figure.dpi / 72. * self.pickradius\n\n # the math involved in checking for containment (here and inside of\n # segment_hits) assumes that it is OK to overflow. In case the\n # application has set the error flags such that an exception is raised\n # on overflow, we temporarily set the appropriate error flags here and\n # set them back when we are finished.\n olderrflags = np.seterr(all='ignore')\n try:\n # Check for collision\n if self._linestyle in ['None', None]:\n # If no line, return the nearby point(s)\n d = (xt - mouseevent.x) ** 2 + (yt - mouseevent.y) ** 2\n ind, = np.nonzero(np.less_equal(d, pixels ** 2))\n else:\n # If line, return the nearby segment(s)\n ind = segment_hits(mouseevent.x, mouseevent.y, xt, yt, pixels)\n finally:\n np.seterr(**olderrflags)\n\n ind += self.ind_offset\n\n # Debugging message\n if False and self._label != '':\n print(\"Checking line\", self._label,\n \"at\", mouseevent.x, mouseevent.y)\n print('xt', xt)\n print('yt', yt)\n #print 'dx,dy', (xt-mouseevent.x)**2., (yt-mouseevent.y)**2.\n print('ind', ind)\n\n # Return the point(s) within radius\n return len(ind) > 0, dict(ind=ind)\n\n def get_pickradius(self):\n \"\"\"return the pick radius used for containment tests\"\"\"\n return self.pickradius\n\n def set_pickradius(self, d):\n \"\"\"Sets the pick radius used for containment tests\n\n ACCEPTS: float distance in points\n \"\"\"\n self.pickradius = d\n\n def get_fillstyle(self):\n \"\"\"\n return the marker fillstyle\n \"\"\"\n return self._marker.get_fillstyle()\n\n def set_fillstyle(self, fs):\n \"\"\"\n Set the marker fill style; 'full' means fill the whole marker.\n 'none' means no filling; other options are for half-filled markers.\n\n ACCEPTS: ['full' | 'left' | 'right' | 'bottom' | 'top' | 'none']\n \"\"\"\n self._marker.set_fillstyle(fs)\n\n def set_markevery(self, every):\n \"\"\"\n Set the markevery property to subsample the plot when using\n markers. e.g., if ``markevery=5``, every 5-th marker will be\n plotted. *every* can be\n\n None\n Every point will be plotted\n\n an integer N\n Every N-th marker will be plotted starting with marker 0\n\n A length-2 tuple of integers\n every=(start, N) will start at point start and plot every N-th\n marker\n\n ACCEPTS: None | integer | (startind, stride)\n\n \"\"\"\n self._markevery = every\n\n def get_markevery(self):\n \"\"\"return the markevery setting\"\"\"\n return self._markevery\n\n def set_picker(self, p):\n \"\"\"Sets the event picker details for the line.\n\n ACCEPTS: float distance in points or callable pick function\n ``fn(artist, event)``\n \"\"\"\n if six.callable(p):\n self._contains = p\n else:\n self.pickradius = p\n self._picker = p\n\n def get_window_extent(self, renderer):\n bbox = Bbox([[0, 0], [0, 0]])\n trans_data_to_xy = self.get_transform().transform\n bbox.update_from_data_xy(trans_data_to_xy(self.get_xydata()),\n ignore=True)\n # correct for marker size, if any\n if self._marker:\n ms = (self._markersize / 72.0 * self.figure.dpi) * 0.5\n bbox = bbox.padded(ms)\n return bbox\n\n def set_axes(self, ax):\n Artist.set_axes(self, ax)\n if ax.xaxis is not None:\n self._xcid = ax.xaxis.callbacks.connect('units',\n self.recache_always)\n if ax.yaxis is not None:\n self._ycid = ax.yaxis.callbacks.connect('units',\n self.recache_always)\n set_axes.__doc__ = Artist.set_axes.__doc__\n\n def set_data(self, *args):\n \"\"\"\n Set the x and y data\n\n ACCEPTS: 2D array (rows are x, y) or two 1D arrays\n \"\"\"\n if len(args) == 1:\n x, y = args[0]\n else:\n x, y = args\n\n self.set_xdata(x)\n self.set_ydata(y)\n\n def recache_always(self):\n self.recache(always=True)\n\n def recache(self, always=False):\n if always or self._invalidx:\n xconv = self.convert_xunits(self._xorig)\n if ma.isMaskedArray(self._xorig):\n x = ma.asarray(xconv, np.float_)\n else:\n x = np.asarray(xconv, np.float_)\n x = x.ravel()\n else:\n x = self._x\n if always or self._invalidy:\n yconv = self.convert_yunits(self._yorig)\n if ma.isMaskedArray(self._yorig):\n y = ma.asarray(yconv, np.float_)\n else:\n y = np.asarray(yconv, np.float_)\n y = y.ravel()\n else:\n y = self._y\n\n if len(x) == 1 and len(y) > 1:\n x = x * np.ones(y.shape, np.float_)\n if len(y) == 1 and len(x) > 1:\n y = y * np.ones(x.shape, np.float_)\n\n if len(x) != len(y):\n raise RuntimeError('xdata and ydata must be the same length')\n\n x = x.reshape((len(x), 1))\n y = y.reshape((len(y), 1))\n\n if ma.isMaskedArray(x) or ma.isMaskedArray(y):\n self._xy = ma.concatenate((x, y), 1)\n else:\n self._xy = np.concatenate((x, y), 1)\n self._x = self._xy[:, 0] # just a view\n self._y = self._xy[:, 1] # just a view\n\n self._subslice = False\n if (self.axes and len(x) > 100 and self._is_sorted(x) and\n self.axes.name == 'rectilinear' and\n self.axes.get_xscale() == 'linear' and\n self._markevery is None and\n self.get_clip_on() is True):\n self._subslice = True\n if hasattr(self, '_path'):\n interpolation_steps = self._path._interpolation_steps\n else:\n interpolation_steps = 1\n self._path = Path(self._xy, None, interpolation_steps)\n self._transformed_path = None\n self._invalidx = False\n self._invalidy = False\n\n def _transform_path(self, subslice=None):\n \"\"\"\n Puts a TransformedPath instance at self._transformed_path,\n all invalidation of the transform is then handled by the\n TransformedPath instance.\n \"\"\"\n # Masked arrays are now handled by the Path class itself\n if subslice is not None:\n _path = Path(self._xy[subslice, :])\n else:\n _path = self._path\n self._transformed_path = TransformedPath(_path, self.get_transform())\n\n def _get_transformed_path(self):\n \"\"\"\n Return the :class:`~matplotlib.transforms.TransformedPath` instance\n of this line.\n \"\"\"\n if self._transformed_path is None:\n self._transform_path()\n return self._transformed_path\n\n def set_transform(self, t):\n \"\"\"\n set the Transformation instance used by this artist\n\n ACCEPTS: a :class:`matplotlib.transforms.Transform` instance\n \"\"\"\n Artist.set_transform(self, t)\n self._invalidx = True\n self._invalidy = True\n\n def _is_sorted(self, x):\n \"\"\"return true if x is sorted\"\"\"\n if len(x) < 2:\n return 1\n return np.amin(x[1:] - x[0:-1]) >= 0\n\n @allow_rasterization\n def draw(self, renderer):\n \"\"\"draw the Line with `renderer` unless visibility is False\"\"\"\n if not self.get_visible():\n return\n\n if self._invalidy or self._invalidx:\n self.recache()\n self.ind_offset = 0 # Needed for contains() method.\n if self._subslice and self.axes:\n # Need to handle monotonically decreasing case also...\n x0, x1 = self.axes.get_xbound()\n i0, = self._x.searchsorted([x0], 'left')\n i1, = self._x.searchsorted([x1], 'right')\n subslice = slice(max(i0 - 1, 0), i1 + 1)\n self.ind_offset = subslice.start\n self._transform_path(subslice)\n\n transf_path = self._get_transformed_path()\n\n if self.get_path_effects():\n from matplotlib.patheffects import PathEffectRenderer\n renderer = PathEffectRenderer(self.get_path_effects(), renderer)\n\n renderer.open_group('line2d', self.get_gid())\n gc = renderer.new_gc()\n self._set_gc_clip(gc)\n\n ln_color_rgba = self._get_rgba_ln_color()\n gc.set_foreground(ln_color_rgba, isRGBA=True)\n gc.set_alpha(ln_color_rgba[3])\n\n gc.set_antialiased(self._antialiased)\n gc.set_linewidth(self._linewidth)\n\n if self.is_dashed():\n cap = self._dashcapstyle\n join = self._dashjoinstyle\n else:\n cap = self._solidcapstyle\n join = self._solidjoinstyle\n gc.set_joinstyle(join)\n gc.set_capstyle(cap)\n gc.set_snap(self.get_snap())\n if self.get_sketch_params() is not None:\n gc.set_sketch_params(*self.get_sketch_params())\n\n funcname = self._lineStyles.get(self._linestyle, '_draw_nothing')\n if funcname != '_draw_nothing':\n tpath, affine = transf_path.get_transformed_path_and_affine()\n if len(tpath.vertices):\n self._lineFunc = getattr(self, funcname)\n funcname = self.drawStyles.get(self._drawstyle, '_draw_lines')\n drawFunc = getattr(self, funcname)\n drawFunc(renderer, gc, tpath, affine.frozen())\n\n if self._marker:\n gc = renderer.new_gc()\n self._set_gc_clip(gc)\n rgbaFace = self._get_rgba_face()\n rgbaFaceAlt = self._get_rgba_face(alt=True)\n edgecolor = self.get_markeredgecolor()\n if is_string_like(edgecolor) and edgecolor.lower() == 'none':\n gc.set_linewidth(0)\n gc.set_foreground(rgbaFace, isRGBA=True)\n else:\n gc.set_foreground(edgecolor)\n gc.set_linewidth(self._markeredgewidth)\n\n marker = self._marker\n tpath, affine = transf_path.get_transformed_points_and_affine()\n if len(tpath.vertices):\n # subsample the markers if markevery is not None\n markevery = self.get_markevery()\n if markevery is not None:\n if iterable(markevery):\n startind, stride = markevery\n else:\n startind, stride = 0, markevery\n if tpath.codes is not None:\n codes = tpath.codes[startind::stride]\n else:\n codes = None\n vertices = tpath.vertices[startind::stride]\n subsampled = Path(vertices, codes)\n else:\n subsampled = tpath\n\n snap = marker.get_snap_threshold()\n if type(snap) == float:\n snap = renderer.points_to_pixels(self._markersize) >= snap\n gc.set_snap(snap)\n gc.set_joinstyle(marker.get_joinstyle())\n gc.set_capstyle(marker.get_capstyle())\n marker_path = marker.get_path()\n marker_trans = marker.get_transform()\n w = renderer.points_to_pixels(self._markersize)\n if marker.get_marker() != ',':\n # Don't scale for pixels, and don't stroke them\n marker_trans = marker_trans.scale(w)\n else:\n gc.set_linewidth(0)\n if rgbaFace is not None:\n gc.set_alpha(rgbaFace[3])\n\n renderer.draw_markers(gc, marker_path, marker_trans,\n subsampled, affine.frozen(),\n rgbaFace)\n\n alt_marker_path = marker.get_alt_path()\n if alt_marker_path:\n if rgbaFaceAlt is not None:\n gc.set_alpha(rgbaFaceAlt[3])\n alt_marker_trans = marker.get_alt_transform()\n alt_marker_trans = alt_marker_trans.scale(w)\n\n renderer.draw_markers(\n gc, alt_marker_path, alt_marker_trans, subsampled,\n affine.frozen(), rgbaFaceAlt)\n\n gc.restore()\n\n gc.restore()\n renderer.close_group('line2d')\n\n def get_antialiased(self):\n return self._antialiased\n\n def get_color(self):\n return self._color\n\n def get_drawstyle(self):\n return self._drawstyle\n\n def get_linestyle(self):\n return self._linestyle\n\n def get_linewidth(self):\n return self._linewidth\n\n def get_marker(self):\n return self._marker.get_marker()\n\n def get_markeredgecolor(self):\n mec = self._markeredgecolor\n if (is_string_like(mec) and mec == 'auto'):\n if self._marker.get_marker() in ('.', ','):\n return self._color\n if self._marker.is_filled() and self.get_fillstyle() != 'none':\n return 'k' # Bad hard-wired default...\n else:\n return self._color\n else:\n return mec\n\n def get_markeredgewidth(self):\n return self._markeredgewidth\n\n def _get_markerfacecolor(self, alt=False):\n if alt:\n fc = self._markerfacecoloralt\n else:\n fc = self._markerfacecolor\n\n if (is_string_like(fc) and fc.lower() == 'auto'):\n if self.get_fillstyle() == 'none':\n return 'none'\n else:\n return self._color\n else:\n return fc\n\n def get_markerfacecolor(self):\n return self._get_markerfacecolor(alt=False)\n\n def get_markerfacecoloralt(self):\n return self._get_markerfacecolor(alt=True)\n\n def get_markersize(self):\n return self._markersize\n\n def get_data(self, orig=True):\n \"\"\"\n Return the xdata, ydata.\n\n If *orig* is *True*, return the original data.\n \"\"\"\n return self.get_xdata(orig=orig), self.get_ydata(orig=orig)\n\n def get_xdata(self, orig=True):\n \"\"\"\n Return the xdata.\n\n If *orig* is *True*, return the original data, else the\n processed data.\n \"\"\"\n if orig:\n return self._xorig\n if self._invalidx:\n self.recache()\n return self._x\n\n def get_ydata(self, orig=True):\n \"\"\"\n Return the ydata.\n\n If *orig* is *True*, return the original data, else the\n processed data.\n \"\"\"\n if orig:\n return self._yorig\n if self._invalidy:\n self.recache()\n return self._y\n\n def get_path(self):\n \"\"\"\n Return the :class:`~matplotlib.path.Path` object associated\n with this line.\n \"\"\"\n if self._invalidy or self._invalidx:\n self.recache()\n return self._path\n\n def get_xydata(self):\n \"\"\"\n Return the *xy* data as a Nx2 numpy array.\n \"\"\"\n if self._invalidy or self._invalidx:\n self.recache()\n return self._xy\n\n def set_antialiased(self, b):\n \"\"\"\n True if line should be drawin with antialiased rendering\n\n ACCEPTS: [True | False]\n \"\"\"\n self._antialiased = b\n\n def set_color(self, color):\n \"\"\"\n Set the color of the line\n\n ACCEPTS: any matplotlib color\n \"\"\"\n self._color = color\n\n def set_drawstyle(self, drawstyle):\n \"\"\"\n Set the drawstyle of the plot\n\n 'default' connects the points with lines. The steps variants\n produce step-plots. 'steps' is equivalent to 'steps-pre' and\n is maintained for backward-compatibility.\n\n ACCEPTS: ['default' | 'steps' | 'steps-pre' | 'steps-mid' |\n 'steps-post']\n \"\"\"\n self._drawstyle = drawstyle\n\n def set_linewidth(self, w):\n \"\"\"\n Set the line width in points\n\n ACCEPTS: float value in points\n \"\"\"\n self._linewidth = w\n\n def set_linestyle(self, linestyle):\n \"\"\"\n Set the linestyle of the line (also accepts drawstyles)\n\n\n ================ =================\n linestyle description\n ================ =================\n ``'-'`` solid\n ``'--'`` dashed\n ``'-.'`` dash_dot\n ``':'`` dotted\n ``'None'`` draw nothing\n ``' '`` draw nothing\n ``''`` draw nothing\n ================ =================\n\n 'steps' is equivalent to 'steps-pre' and is maintained for\n backward-compatibility.\n\n .. seealso::\n\n :meth:`set_drawstyle`\n To set the drawing style (stepping) of the plot.\n\n ACCEPTS: [``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` |\n ``' '`` | ``''``]\n and any drawstyle in combination with a linestyle, e.g., ``'steps--'``.\n \"\"\"\n\n for ds in self.drawStyleKeys: # long names are first in the list\n if linestyle.startswith(ds):\n self.set_drawstyle(ds)\n if len(linestyle) > len(ds):\n linestyle = linestyle[len(ds):]\n else:\n linestyle = '-'\n break\n\n if linestyle not in self._lineStyles:\n if linestyle in ls_mapper:\n linestyle = ls_mapper[linestyle]\n else:\n verbose.report('Unrecognized line style %s, %s' %\n (linestyle, type(linestyle)))\n if linestyle in [' ', '']:\n linestyle = 'None'\n self._linestyle = linestyle\n\n @docstring.dedent_interpd\n def set_marker(self, marker):\n \"\"\"\n Set the line marker\n\n Parameters\n -----------\n\n marker: marker style\n See `~matplotlib.markers` for full description of possible\n argument\n\n \"\"\"\n self._marker.set_marker(marker)\n\n def set_markeredgecolor(self, ec):\n \"\"\"\n Set the marker edge color\n\n ACCEPTS: any matplotlib color\n \"\"\"\n if ec is None:\n ec = 'auto'\n self._markeredgecolor = ec\n\n def set_markeredgewidth(self, ew):\n \"\"\"\n Set the marker edge width in points\n\n ACCEPTS: float value in points\n \"\"\"\n if ew is None:\n ew = rcParams['lines.markeredgewidth']\n self._markeredgewidth = ew\n\n def set_markerfacecolor(self, fc):\n \"\"\"\n Set the marker face color.\n\n ACCEPTS: any matplotlib color\n \"\"\"\n if fc is None:\n fc = 'auto'\n\n self._markerfacecolor = fc\n\n def set_markerfacecoloralt(self, fc):\n \"\"\"\n Set the alternate marker face color.\n\n ACCEPTS: any matplotlib color\n \"\"\"\n if fc is None:\n fc = 'auto'\n\n self._markerfacecoloralt = fc\n\n def set_markersize(self, sz):\n \"\"\"\n Set the marker size in points\n\n ACCEPTS: float\n \"\"\"\n self._markersize = sz\n\n def set_xdata(self, x):\n \"\"\"\n Set the data np.array for x\n\n ACCEPTS: 1D array\n \"\"\"\n self._xorig = x\n self._invalidx = True\n\n def set_ydata(self, y):\n \"\"\"\n Set the data np.array for y\n\n ACCEPTS: 1D array\n \"\"\"\n self._yorig = y\n self._invalidy = True\n\n def set_dashes(self, seq):\n \"\"\"\n Set the dash sequence, sequence of dashes with on off ink in\n points. If seq is empty or if seq = (None, None), the\n linestyle will be set to solid.\n\n ACCEPTS: sequence of on/off ink in points\n \"\"\"\n if seq == (None, None) or len(seq) == 0:\n self.set_linestyle('-')\n else:\n self.set_linestyle('--')\n self._dashSeq = seq # TODO: offset ignored for now\n\n def _draw_lines(self, renderer, gc, path, trans):\n self._lineFunc(renderer, gc, path, trans)\n\n def _draw_steps_pre(self, renderer, gc, path, trans):\n vertices = self._xy\n steps = ma.zeros((2 * len(vertices) - 1, 2), np.float_)\n\n steps[0::2, 0], steps[1::2, 0] = vertices[:, 0], vertices[:-1, 0]\n steps[0::2, 1], steps[1:-1:2, 1] = vertices[:, 1], vertices[1:, 1]\n\n path = Path(steps)\n path = path.transformed(self.get_transform())\n self._lineFunc(renderer, gc, path, IdentityTransform())\n\n def _draw_steps_post(self, renderer, gc, path, trans):\n vertices = self._xy\n steps = ma.zeros((2 * len(vertices) - 1, 2), np.float_)\n\n steps[::2, 0], steps[1:-1:2, 0] = vertices[:, 0], vertices[1:, 0]\n steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:-1, 1]\n\n path = Path(steps)\n path = path.transformed(self.get_transform())\n self._lineFunc(renderer, gc, path, IdentityTransform())\n\n def _draw_steps_mid(self, renderer, gc, path, trans):\n vertices = self._xy\n steps = ma.zeros((2 * len(vertices), 2), np.float_)\n\n steps[1:-1:2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])\n steps[2::2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])\n steps[0, 0] = vertices[0, 0]\n steps[-1, 0] = vertices[-1, 0]\n steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:, 1]\n\n path = Path(steps)\n path = path.transformed(self.get_transform())\n self._lineFunc(renderer, gc, path, IdentityTransform())\n\n def _draw_solid(self, renderer, gc, path, trans):\n gc.set_linestyle('solid')\n renderer.draw_path(gc, path, trans)\n\n def _draw_dashed(self, renderer, gc, path, trans):\n gc.set_linestyle('dashed')\n if self._dashSeq is not None:\n gc.set_dashes(0, self._dashSeq)\n\n renderer.draw_path(gc, path, trans)\n\n def _draw_dash_dot(self, renderer, gc, path, trans):\n gc.set_linestyle('dashdot')\n renderer.draw_path(gc, path, trans)\n\n def _draw_dotted(self, renderer, gc, path, trans):\n gc.set_linestyle('dotted')\n renderer.draw_path(gc, path, trans)\n\n def update_from(self, other):\n \"\"\"copy properties from other to self\"\"\"\n Artist.update_from(self, other)\n self._linestyle = other._linestyle\n self._linewidth = other._linewidth\n self._color = other._color\n self._markersize = other._markersize\n self._markerfacecolor = other._markerfacecolor\n self._markerfacecoloralt = other._markerfacecoloralt\n self._markeredgecolor = other._markeredgecolor\n self._markeredgewidth = other._markeredgewidth\n self._dashSeq = other._dashSeq\n self._dashcapstyle = other._dashcapstyle\n self._dashjoinstyle = other._dashjoinstyle\n self._solidcapstyle = other._solidcapstyle\n self._solidjoinstyle = other._solidjoinstyle\n\n self._linestyle = other._linestyle\n self._marker = MarkerStyle(other._marker.get_marker(),\n other._marker.get_fillstyle())\n self._drawstyle = other._drawstyle\n\n def _get_rgb_face(self, alt=False):\n facecolor = self._get_markerfacecolor(alt=alt)\n if is_string_like(facecolor) and facecolor.lower() == 'none':\n rgbFace = None\n else:\n rgbFace = colorConverter.to_rgb(facecolor)\n return rgbFace\n\n def _get_rgba_face(self, alt=False):\n facecolor = self._get_markerfacecolor(alt=alt)\n if is_string_like(facecolor) and facecolor.lower() == 'none':\n rgbaFace = None\n else:\n rgbaFace = colorConverter.to_rgba(facecolor, self._alpha)\n return rgbaFace\n\n def _get_rgba_ln_color(self, alt=False):\n return colorConverter.to_rgba(self._color, self._alpha)\n\n # some aliases....\n def set_aa(self, val):\n 'alias for set_antialiased'\n self.set_antialiased(val)\n\n def set_c(self, val):\n 'alias for set_color'\n self.set_color(val)\n\n def set_ls(self, val):\n \"\"\"alias for set_linestyle\"\"\"\n self.set_linestyle(val)\n\n def set_lw(self, val):\n \"\"\"alias for set_linewidth\"\"\"\n self.set_linewidth(val)\n\n def set_mec(self, val):\n \"\"\"alias for set_markeredgecolor\"\"\"\n self.set_markeredgecolor(val)\n\n def set_mew(self, val):\n \"\"\"alias for set_markeredgewidth\"\"\"\n self.set_markeredgewidth(val)\n\n def set_mfc(self, val):\n \"\"\"alias for set_markerfacecolor\"\"\"\n self.set_markerfacecolor(val)\n\n def set_mfcalt(self, val):\n \"\"\"alias for set_markerfacecoloralt\"\"\"\n self.set_markerfacecoloralt(val)\n\n def set_ms(self, val):\n \"\"\"alias for set_markersize\"\"\"\n self.set_markersize(val)\n\n def get_aa(self):\n \"\"\"alias for get_antialiased\"\"\"\n return self.get_antialiased()\n\n def get_c(self):\n \"\"\"alias for get_color\"\"\"\n return self.get_color()\n\n def get_ls(self):\n \"\"\"alias for get_linestyle\"\"\"\n return self.get_linestyle()\n\n def get_lw(self):\n \"\"\"alias for get_linewidth\"\"\"\n return self.get_linewidth()\n\n def get_mec(self):\n \"\"\"alias for get_markeredgecolor\"\"\"\n return self.get_markeredgecolor()\n\n def get_mew(self):\n \"\"\"alias for get_markeredgewidth\"\"\"\n return self.get_markeredgewidth()\n\n def get_mfc(self):\n \"\"\"alias for get_markerfacecolor\"\"\"\n return self.get_markerfacecolor()\n\n def get_mfcalt(self, alt=False):\n \"\"\"alias for get_markerfacecoloralt\"\"\"\n return self.get_markerfacecoloralt()\n\n def get_ms(self):\n \"\"\"alias for get_markersize\"\"\"\n return self.get_markersize()\n\n def set_dash_joinstyle(self, s):\n \"\"\"\n Set the join style for dashed linestyles\n ACCEPTS: ['miter' | 'round' | 'bevel']\n \"\"\"\n s = s.lower()\n if s not in self.validJoin:\n raise ValueError('set_dash_joinstyle passed \"%s\";\\n' % (s,)\n + 'valid joinstyles are %s' % (self.validJoin,))\n self._dashjoinstyle = s\n\n def set_solid_joinstyle(self, s):\n \"\"\"\n Set the join style for solid linestyles\n ACCEPTS: ['miter' | 'round' | 'bevel']\n \"\"\"\n s = s.lower()\n if s not in self.validJoin:\n raise ValueError('set_solid_joinstyle passed \"%s\";\\n' % (s,)\n + 'valid joinstyles are %s' % (self.validJoin,))\n self._solidjoinstyle = s\n\n def get_dash_joinstyle(self):\n \"\"\"\n Get the join style for dashed linestyles\n \"\"\"\n return self._dashjoinstyle\n\n def get_solid_joinstyle(self):\n \"\"\"\n Get the join style for solid linestyles\n \"\"\"\n return self._solidjoinstyle\n\n def set_dash_capstyle(self, s):\n \"\"\"\n Set the cap style for dashed linestyles\n\n ACCEPTS: ['butt' | 'round' | 'projecting']\n \"\"\"\n s = s.lower()\n if s not in self.validCap:\n raise ValueError('set_dash_capstyle passed \"%s\";\\n' % (s,)\n + 'valid capstyles are %s' % (self.validCap,))\n\n self._dashcapstyle = s\n\n def set_solid_capstyle(self, s):\n \"\"\"\n Set the cap style for solid linestyles\n\n ACCEPTS: ['butt' | 'round' | 'projecting']\n \"\"\"\n s = s.lower()\n if s not in self.validCap:\n raise ValueError('set_solid_capstyle passed \"%s\";\\n' % (s,)\n + 'valid capstyles are %s' % (self.validCap,))\n\n self._solidcapstyle = s\n\n def get_dash_capstyle(self):\n \"\"\"\n Get the cap style for dashed linestyles\n \"\"\"\n return self._dashcapstyle\n\n def get_solid_capstyle(self):\n \"\"\"\n Get the cap style for solid linestyles\n \"\"\"\n return self._solidcapstyle\n\n def is_dashed(self):\n 'return True if line is dashstyle'\n return self._linestyle in ('--', '-.', ':')\n\n\nclass VertexSelector:\n \"\"\"\n Manage the callbacks to maintain a list of selected vertices for\n :class:`matplotlib.lines.Line2D`. Derived classes should override\n :meth:`~matplotlib.lines.VertexSelector.process_selected` to do\n something with the picks.\n\n Here is an example which highlights the selected verts with red\n circles::\n\n import numpy as np\n import matplotlib.pyplot as plt\n import matplotlib.lines as lines\n\n class HighlightSelected(lines.VertexSelector):\n def __init__(self, line, fmt='ro', **kwargs):\n lines.VertexSelector.__init__(self, line)\n self.markers, = self.axes.plot([], [], fmt, **kwargs)\n\n def process_selected(self, ind, xs, ys):\n self.markers.set_data(xs, ys)\n self.canvas.draw()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n x, y = np.random.rand(2, 30)\n line, = ax.plot(x, y, 'bs-', picker=5)\n\n selector = HighlightSelected(line)\n plt.show()\n\n \"\"\"\n def __init__(self, line):\n \"\"\"\n Initialize the class with a :class:`matplotlib.lines.Line2D`\n instance. The line should already be added to some\n :class:`matplotlib.axes.Axes` instance and should have the\n picker property set.\n \"\"\"\n if not hasattr(line, 'axes'):\n raise RuntimeError('You must first add the line to the Axes')\n\n if line.get_picker() is None:\n raise RuntimeError('You must first set the picker property '\n 'of the line')\n\n self.axes = line.axes\n self.line = line\n self.canvas = self.axes.figure.canvas\n self.cid = self.canvas.mpl_connect('pick_event', self.onpick)\n\n self.ind = set()\n\n def process_selected(self, ind, xs, ys):\n \"\"\"\n Default \"do nothing\" implementation of the\n :meth:`process_selected` method.\n\n *ind* are the indices of the selected vertices. *xs* and *ys*\n are the coordinates of the selected vertices.\n \"\"\"\n pass\n\n def onpick(self, event):\n \"\"\"When the line is picked, update the set of selected indicies.\"\"\"\n if event.artist is not self.line:\n return\n\n for i in event.ind:\n if i in self.ind:\n self.ind.remove(i)\n else:\n self.ind.add(i)\n\n ind = list(self.ind)\n ind.sort()\n xdata, ydata = self.line.get_data()\n self.process_selected(ind, xdata[ind], ydata[ind])\n\n\nlineStyles = Line2D._lineStyles\nlineMarkers = MarkerStyle.markers\ndrawStyles = Line2D.drawStyles\nfillStyles = MarkerStyle.fillstyles\n\ndocstring.interpd.update(Line2D=artist.kwdoc(Line2D))\n\n# You can not set the docstring of an instancemethod,\n# but you can on the underlying function. Go figure.\ndocstring.dedent_interpd(Line2D.__init__)\n" ]
[ [ "numpy.linspace", "numpy.__version__.split", "numpy.asarray", "numpy.arctan2", "numpy.round", "numpy.alltrue", "numpy.zeros_like", "numpy.searchsorted", "numpy.ma.power", "numpy.ma.array", "numpy.ma.max", "numpy.exp", "numpy.where", "numpy.ma.getmask", "numpy.nextafter", "numpy.hypot", "matplotlib.cbook.iterable", "numpy.ma.getmaskarray", "numpy.clip", "numpy.empty_like", "numpy.ma.min", "numpy.sin", "matplotlib.cbook.is_string_like", "numpy.size", "numpy.zeros", "numpy.log", "numpy.ma.asarray", "matplotlib.cbook.deprecated", "numpy.ma.masked_less_equal", "numpy.array", "numpy.abs", "numpy.gradient", "numpy.cos", "numpy.sort", "numpy.ones", "matplotlib.cbook._putmask", "numpy.sign", "numpy.empty" ], [ "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ], [ "numpy.dot", "numpy.sqrt", "matplotlib.bezier.make_wedged_bezier2", "matplotlib.path.Path.unit_regular_polygon", "numpy.asarray", "matplotlib.transforms.Affine2D", "matplotlib.bezier.make_path_regular", "matplotlib.bezier.concatenate_paths", "matplotlib.path.Path.unit_rectangle", "matplotlib.cbook._NestedClassGetter", "numpy.concatenate", "matplotlib.bezier.split_path_inout", "matplotlib.artist.Artist.update_from", "numpy.hstack", "matplotlib.transforms.Affine2D.from_values", "matplotlib.cbook.iterable", "numpy.sin", "matplotlib.path.Path.unit_circle", "matplotlib.colors.colorConverter.to_rgba", "matplotlib.bezier.get_parallels", "matplotlib.transforms.Bbox.from_bounds", "matplotlib.docstring.dedent_interpd", "matplotlib.path.Path.arc", "matplotlib.artist.kwdoc", "matplotlib.artist.get_window_extent", "matplotlib.artist.Artist.set_alpha", "matplotlib.artist.Artist.__init__", "matplotlib.bezier.get_cos_sin", "matplotlib.path.Path", "numpy.arccos", "matplotlib.cbook.dedent", "matplotlib.transforms.BboxTransformTo", "matplotlib.bezier.get_normal_points", "matplotlib.bezier.inside_circle", "matplotlib.bezier.split_bezier_intersecting_with_closedpath", "numpy.array", "matplotlib.artist.Artist.get_transform", "numpy.abs", "matplotlib.transforms.IdentityTransform", "numpy.cos", "matplotlib.bezier.get_intersection", "matplotlib.docstring.interpd.update", "numpy.vstack" ], [ "numpy.abs", "numpy.power", "numpy.asarray", "numpy.arange", "matplotlib.path.Path", "numpy.arctan2", "numpy.concatenate", "numpy.array", "numpy.empty" ], [ "numpy.arange", "matplotlib.transforms.Affine2D", "matplotlib.pyplot.subplots", "matplotlib.mlab.bivariate_normal", "numpy.meshgrid", "matplotlib.pyplot.show" ], [ "matplotlib.markers.MarkerStyle", "numpy.ma.isMaskedArray", "numpy.ma.asarray", "numpy.nonzero", "numpy.asarray", "numpy.amin", "numpy.ones", "numpy.concatenate", "numpy.seterr", "numpy.ma.concatenate", "numpy.less_equal", "matplotlib.docstring.dedent_interpd" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
brobeson/vta
[ "01bc98137bab7171c9fe69770090ab01b2b10ae6" ]
[ "vta/loss/loss.py" ]
[ "\"\"\"The entry module for the vta loss command.\"\"\"\n\nimport argparse\nimport json\n\nimport matplotlib.pyplot as plt\nimport numpy\nimport scipy.interpolate\nimport sklearn.linear_model\n\nimport vta.loss.data\n\n\ndef main(arguments, configuration):\n \"\"\"Runs the vta loss command.\n\n This is the main entry point for the VTA loss command. It will draw graphs\n of data related to machine learning loss during training.\n\n :param argparse.Namespace arguments: The command line arguments, as parsed\n by the :py:mod:`argparse` module. Run `vta loss --help` for details.\n :param dict configuration: An optional run time configuration. In normal\n use, this will have been read from a YAML file.\n :return: An exit code following Unix command conventions. 0 indicates that\n command processing succeeded. Any other value indicates that an error\n occurred.\n :rtype: int\n \"\"\"\n configuration = _augment_configuration(configuration[\"loss\"])\n losses = _read_loss_data(arguments.file)\n if configuration[\"reject_invalid_data\"]:\n losses = [l for l in losses if _filter_invalid_data(l)]\n figure = plt.figure(figsize=(15, 10))\n axes = _make_axes(figure, configuration)\n _graph_loss(configuration, axes, losses)\n _graph_precision(configuration, axes, losses)\n axes.legend() # This must remain after the data is graphed.\n plt.show()\n\n\ndef make_parser(subparsers, common_options):\n \"\"\"Creates an argument parser for the VTA loss command.\n\n :param subparsers: The subparsers object returned by a call to\n :py:func:`argparse.ArgumentParser.add_subparsers`. The loss\n argument parser will be added to this.\n\n :return: Nothing\n \"\"\"\n parser = subparsers.add_parser(\n \"loss\",\n help=\"Analyze data related to machine learning training loss.\",\n prog=\"vta loss\",\n description=\"This command can be used to draw graphs of data related to\"\n \" machine learning loss.\",\n parents=[common_options],\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n \"file\", help=\"The JSON file that has the loss data to graph.\", nargs=\"+\"\n )\n\n\n# -----------------------------------------------------------------------------\n# implementation details\n# -----------------------------------------------------------------------------\ndef _filter_invalid_data(data):\n if vta.loss.data.has_invalid_values(data):\n print(\"warning:\", data.label, \"has invalid data\")\n return False\n return True\n\n\ndef _graph_loss(configuration, axes, losses):\n if not configuration[\"draw_loss\"]:\n return\n vta.loss.data.sort_by_loss(losses, configuration[\"sort_algorithm\"])\n for loss in losses[0 : configuration[\"maximum_graphs\"]]:\n value = loss.loss_values[-1]\n axes.plot(\n range(len(loss.loss_values)),\n loss.loss_values,\n label=f\"[{value:.3f}] {loss.label}\",\n linestyle=\"-\" if configuration[\"line_loss\"] else \"\",\n marker=\".\" if configuration[\"scatter_loss\"] else \"\",\n )\n\n\ndef _graph_precision(configuration, axes, precisions):\n if not configuration[\"draw_precision\"]:\n return\n vta.loss.data.sort_by_precision(precisions, configuration[\"sort_algorithm\"])\n for precision in precisions[0 : configuration[\"maximum_graphs\"]]:\n value = precision.precision_values[-1]\n axes.plot(\n range(len(precision.precision_values)),\n precision.precision_values,\n label=f\"[{value:.3f}] {precision.label}\",\n linestyle=\"-\" if configuration[\"line_precision\"] else \"\",\n marker=\".\" if configuration[\"scatter_precision\"] else \"\",\n )\n\n\ndef _augment_configuration(configuration):\n configuration[\"draw_loss\"] = (\n configuration[\"scatter_loss\"] or configuration[\"line_loss\"]\n )\n configuration[\"draw_precision\"] = (\n configuration[\"scatter_precision\"] or configuration[\"line_precision\"]\n )\n # configuration[\"sort_algorithm\"] = configuration[\"sort_algorithm\"].lower()\n return configuration\n\n\ndef _read_loss_data(file_paths):\n losses = vta.loss.data.LossList()\n for file_path in file_paths:\n with open(file_path) as loss_file:\n data = json.load(loss_file)\n losses.append(\n vta.loss.data.Loss(\n data[\"label\"] if \"label\" in data else file_path,\n numpy.array(data[\"loss\"]),\n numpy.array(data[\"precision\"]),\n )\n )\n return losses\n\n\ndef _has_invalid_values(data):\n return numpy.any(numpy.logical_not(numpy.isfinite(data)))\n\n\ndef _make_axes(figure, configuration):\n axes = figure.add_subplot(1, 1, 1)\n axes.set_xlabel(\"Training Epoch\")\n if configuration[\"draw_loss\"] and configuration[\"draw_precision\"]:\n axes.set_title(\"Loss and Precision\")\n axes.set_ylabel(\"Loss / Precision\")\n elif configuration[\"draw_loss\"]:\n axes.set_title(\"Loss\")\n axes.set_ylabel(\"Loss\")\n elif configuration[\"draw_precision\"]:\n axes.set_title(\"Precision\")\n axes.set_ylabel(\"Precision\")\n axes.autoscale(enable=True, axis=\"both\", tight=True)\n axes.grid(\n b=True, which=\"major\", axis=\"both\", color=\"#101010\", alpha=0.5, linestyle=\":\"\n )\n return axes\n\n\ndef _graph_regression(axes, data):\n \"\"\"\n .. todo:: See issue #7\n \"\"\"\n regression = sklearn.linear_model.LinearRegression()\n x_data = numpy.arange(len(data)) # pylint: disable=invalid-name\n regression.fit(x_data.reshape(-1, 1), numpy.array(data).reshape(-1, 1))\n # x_data = numpy.arange(0.0, len(data), 0.1)\n x_data = numpy.linspace(0.0, len(data), 100)\n prediction = regression.predict(x_data.reshape(-1, 1))\n axes.plot(x_data, prediction)\n\n\ndef _graph_fit(axes, data):\n \"\"\"\n .. todo:: See issue #7\n \"\"\"\n interpolation = scipy.interpolate.interp1d(range(len(data)), data)\n x_data = numpy.linspace(0, len(data), 100)\n axes.plot(x_data, interpolation(x_data))\n" ]
[ [ "numpy.array", "matplotlib.pyplot.show", "numpy.isfinite", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wrld/PRoGAN
[ "ef28d4b91b76dd7f9e7d466b007826491bce5080" ]
[ "phase_correlation/phase_corr.py" ]
[ "import sys\nimport os\nsys.path.append(os.path.abspath(\"..\"))\n\nimport cv2\nimport math\nimport torch\nimport kornia\nimport numpy as np\nimport torch.nn as nn\nfrom numpy.fft import fft2, ifft2, fftshift\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torchvision import transforms, utils\nimport matplotlib.pyplot as plt\nfrom util.utils import *\nfrom log_polar.log_polar import *\n\n\ndef phase_corr(a, b, device, logbase, trans=False):\n # a: template; b: source\n # imshow(a.squeeze(0).float())\n G_a = torch.rfft(a, 2, onesided=False)\n G_b = torch.rfft(b, 2, onesided=False)\n eps = 1e-15\n\n real_a = G_a[:, :, :, 0]\n real_b = G_b[:, :, :, 0]\n imag_a = G_a[:, :, :, 1]\n imag_b = G_b[:, :, :, 1]\n\n # compute a * b.conjugate; shape=[B,H,W,C]\n R = torch.FloatTensor(G_a.shape[0], G_a.shape[1], G_a.shape[2],\n 2).to(device)\n R[:, :, :, 0] = real_a * real_b + imag_a * imag_b\n R[:, :, :, 1] = real_a * imag_b - real_b * imag_a\n\n r0 = torch.sqrt(real_a**2 + imag_a**2 + eps) * torch.sqrt(real_b**2 +\n imag_b**2 + eps)\n R[:, :, :, 0] = R[:, :, :, 0].clone() / (r0 + eps).to(device)\n R[:, :, :, 1] = R[:, :, :, 1].clone() / (r0 + eps).to(device)\n\n r = torch.ifft(R, 2)\n r_real = r[:, :, :, 0]\n r_imag = r[:, :, :, 1]\n r = torch.sqrt(r_real**2 + r_imag**2 + eps)\n r = fftshift2d(r)\n if trans:\n r[:, 0:60, :] = 0.\n r[:, G_a.shape[1] - 60:G_a.shape[1], :] = 0.\n r[:, :, 0:60] = 0.\n r[:, :, G_a.shape[1] - 60:G_a.shape[1]] = 0.\n # imshow(r[0,:,:])\n # plt.show()\n\n angle_resize_out_tensor = torch.sum(r.clone(), 2, keepdim=False)\n scale_reszie_out_tensor = torch.sum(r.clone(), 1, keepdim=False)\n # get the argmax of the angle and the scale\n angle_out_tensor = torch.argmax(angle_resize_out_tensor.clone().detach(),\n dim=-1)\n scale_out_tensor = torch.argmax(scale_reszie_out_tensor.clone().detach(),\n dim=-1)\n if not trans:\n angle_out_tensor = angle_out_tensor * 180.00 / r.shape[1]\n for batch_num in range(angle_out_tensor.shape[0]):\n if angle_out_tensor[batch_num].item() > 90:\n angle_out_tensor[batch_num] -= 90.00\n else:\n angle_out_tensor[batch_num] += 90.00\n\n logbase = logbase.to(device)\n\n # sca_f = scale_out_tensor.clone() * 256 / r.shape[2] - 256 // 2\n scale_out_tensor = 1 / torch.pow(\n logbase, scale_out_tensor.clone()) #logbase ** sca_f\n\n return scale_out_tensor, angle_out_tensor, r, logbase\n\n\ndef highpass(shape):\n \"\"\"Return highpass filter to be multiplied with fourier transform.\"\"\"\n i1 = torch.cos(torch.linspace(-np.pi / 2.0, np.pi / 2.0, shape[0]))\n i2 = torch.cos(torch.linspace(-np.pi / 2.0, np.pi / 2.0, shape[1]))\n x = torch.einsum('i,j->ij', i1, i2)\n return (1.0 - x) * (1.0 - x)\n\n\ndef logpolar_filter(shape):\n \"\"\"\n Make a radial cosine filter for the logpolar transform.\n This filter suppresses low frequencies and completely removes\n the zero freq.\n \"\"\"\n yy = np.linspace(-np.pi / 2., np.pi / 2., shape[0])[:, np.newaxis]\n xx = np.linspace(-np.pi / 2., np.pi / 2., shape[1])[np.newaxis, :]\n # Supressing low spatial frequencies is a must when using log-polar\n # transform. The scale stuff is poorly reflected with low freqs.\n rads = np.sqrt(yy**2 + xx**2)\n filt = 1.0 - np.cos(rads)**2\n # vvv This doesn't really matter, very high freqs are not too usable anyway\n filt[np.abs(rads) > np.pi / 2] = 1\n filt = torch.from_numpy(filt)\n return filt\n\n\nclass LogPolar(nn.Module):\n def __init__(self, out_size, device):\n super(LogPolar, self).__init__()\n self.out_size = out_size\n self.device = device\n\n def forward(self, input):\n return polar_transformer(input, self.out_size, self.device)\n\n\nclass PhaseCorr(nn.Module):\n def __init__(self, device, logbase, trans=False):\n super(PhaseCorr, self).__init__()\n self.device = device\n self.logbase = logbase\n self.trans = trans\n\n def forward(self, template, source):\n return phase_corr(template,\n source,\n self.device,\n self.logbase,\n trans=self.trans)\n\n\n" ]
[ [ "torch.linspace", "numpy.sqrt", "numpy.linspace", "numpy.abs", "torch.sqrt", "torch.einsum", "torch.from_numpy", "numpy.cos", "torch.rfft", "torch.FloatTensor", "torch.ifft" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ptrskay3/pandas
[ "eca60680df2d4b7973d9e5b8252b7b9be3f96c3c" ]
[ "pandas/core/window/rolling.py" ]
[ "\"\"\"\nProvide a generic structure to support window functions,\nsimilar to how we have a Groupby object.\n\"\"\"\nfrom datetime import timedelta\nfrom functools import partial\nimport inspect\nfrom textwrap import dedent\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n Dict,\n List,\n Optional,\n Set,\n Tuple,\n Type,\n Union,\n)\n\nimport numpy as np\n\nfrom pandas._libs.tslibs import BaseOffset, to_offset\nimport pandas._libs.window.aggregations as window_aggregations\nfrom pandas._typing import ArrayLike, Axis, FrameOrSeriesUnion, Label\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._decorators import Appender, Substitution, cache_readonly, doc\n\nfrom pandas.core.dtypes.common import (\n ensure_float64,\n is_bool,\n is_float_dtype,\n is_integer,\n is_integer_dtype,\n is_list_like,\n is_scalar,\n needs_i8_conversion,\n)\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCDatetimeIndex,\n ABCPeriodIndex,\n ABCSeries,\n ABCTimedeltaIndex,\n)\n\nfrom pandas.core.base import DataError, PandasObject, SelectionMixin, ShallowMixin\nimport pandas.core.common as com\nfrom pandas.core.construction import extract_array\nfrom pandas.core.indexes.api import Index, MultiIndex\nfrom pandas.core.util.numba_ import NUMBA_FUNC_CACHE, maybe_use_numba\nfrom pandas.core.window.common import (\n WindowGroupByMixin,\n _doc_template,\n _flex_binary_moment,\n _shared_docs,\n zsqrt,\n)\nfrom pandas.core.window.indexers import (\n BaseIndexer,\n FixedWindowIndexer,\n GroupbyRollingIndexer,\n VariableWindowIndexer,\n)\nfrom pandas.core.window.numba_ import generate_numba_apply_func\n\nif TYPE_CHECKING:\n from pandas import Series\n\n\ndef calculate_center_offset(window) -> int:\n \"\"\"\n Calculate an offset necessary to have the window label to be centered.\n\n Parameters\n ----------\n window: ndarray or int\n window weights or window\n\n Returns\n -------\n int\n \"\"\"\n if not is_integer(window):\n window = len(window)\n return int((window - 1) / 2.0)\n\n\ndef calculate_min_periods(\n window: int,\n min_periods: Optional[int],\n num_values: int,\n required_min_periods: int,\n floor: int,\n) -> int:\n \"\"\"\n Calculate final minimum periods value for rolling aggregations.\n\n Parameters\n ----------\n window : passed window value\n min_periods : passed min periods value\n num_values : total number of values\n required_min_periods : required min periods per aggregation function\n floor : required min periods per aggregation function\n\n Returns\n -------\n min_periods : int\n \"\"\"\n if min_periods is None:\n min_periods = window\n else:\n min_periods = max(required_min_periods, min_periods)\n if min_periods > window:\n raise ValueError(f\"min_periods {min_periods} must be <= window {window}\")\n elif min_periods > num_values:\n min_periods = num_values + 1\n elif min_periods < 0:\n raise ValueError(\"min_periods must be >= 0\")\n return max(min_periods, floor)\n\n\ndef get_weighted_roll_func(cfunc: Callable) -> Callable:\n \"\"\"\n Wrap weighted rolling cython function with min periods argument.\n\n Parameters\n ----------\n cfunc : function\n Cython weighted rolling function\n\n Returns\n -------\n function\n \"\"\"\n\n def func(arg, window, min_periods=None):\n if min_periods is None:\n min_periods = len(window)\n return cfunc(arg, window, min_periods)\n\n return func\n\n\nclass _Window(PandasObject, ShallowMixin, SelectionMixin):\n _attributes: List[str] = [\n \"window\",\n \"min_periods\",\n \"center\",\n \"win_type\",\n \"axis\",\n \"on\",\n \"closed\",\n ]\n exclusions: Set[str] = set()\n\n def __init__(\n self,\n obj: FrameOrSeriesUnion,\n window=None,\n min_periods: Optional[int] = None,\n center: bool = False,\n win_type: Optional[str] = None,\n axis: Axis = 0,\n on: Optional[Union[str, Index]] = None,\n closed: Optional[str] = None,\n **kwargs,\n ):\n\n self.__dict__.update(kwargs)\n self.obj = obj\n self.on = on\n self.closed = closed\n self.window = window\n self.min_periods = min_periods\n self.center = center\n self.win_type = win_type\n self.win_freq = None\n self.axis = obj._get_axis_number(axis) if axis is not None else None\n self.validate()\n\n @property\n def _constructor(self):\n return Window\n\n @property\n def is_datetimelike(self) -> Optional[bool]:\n return None\n\n @property\n def _on(self):\n return None\n\n @property\n def is_freq_type(self) -> bool:\n return self.win_type == \"freq\"\n\n def validate(self) -> None:\n if self.center is not None and not is_bool(self.center):\n raise ValueError(\"center must be a boolean\")\n if self.min_periods is not None and not is_integer(self.min_periods):\n raise ValueError(\"min_periods must be an integer\")\n if self.closed is not None and self.closed not in [\n \"right\",\n \"both\",\n \"left\",\n \"neither\",\n ]:\n raise ValueError(\"closed must be 'right', 'left', 'both' or 'neither'\")\n if not isinstance(self.obj, (ABCSeries, ABCDataFrame)):\n raise TypeError(f\"invalid type: {type(self)}\")\n if isinstance(self.window, BaseIndexer):\n self._validate_get_window_bounds_signature(self.window)\n\n @staticmethod\n def _validate_get_window_bounds_signature(window: BaseIndexer) -> None:\n \"\"\"\n Validate that the passed BaseIndexer subclass has\n a get_window_bounds with the correct signature.\n \"\"\"\n get_window_bounds_signature = inspect.signature(\n window.get_window_bounds\n ).parameters.keys()\n expected_signature = inspect.signature(\n BaseIndexer().get_window_bounds\n ).parameters.keys()\n if get_window_bounds_signature != expected_signature:\n raise ValueError(\n f\"{type(window).__name__} does not implement the correct signature for \"\n f\"get_window_bounds\"\n )\n\n def _create_blocks(self, obj: FrameOrSeriesUnion):\n \"\"\"\n Split data into blocks & return conformed data.\n \"\"\"\n # filter out the on from the object\n if self.on is not None and not isinstance(self.on, Index):\n if obj.ndim == 2:\n obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)\n blocks = obj._to_dict_of_blocks(copy=False).values()\n\n return blocks, obj\n\n def _gotitem(self, key, ndim, subset=None):\n \"\"\"\n Sub-classes to define. Return a sliced object.\n\n Parameters\n ----------\n key : str / list of selections\n ndim : 1,2\n requested ndim of result\n subset : object, default None\n subset to act on\n \"\"\"\n # create a new object to prevent aliasing\n if subset is None:\n subset = self.obj\n self = self._shallow_copy(subset)\n self._reset_cache()\n if subset.ndim == 2:\n if is_scalar(key) and key in subset or is_list_like(key):\n self._selection = key\n return self\n\n def __getattr__(self, attr: str):\n if attr in self._internal_names_set:\n return object.__getattribute__(self, attr)\n if attr in self.obj:\n return self[attr]\n\n raise AttributeError(\n f\"'{type(self).__name__}' object has no attribute '{attr}'\"\n )\n\n def _dir_additions(self):\n return self.obj._dir_additions()\n\n def _get_win_type(self, kwargs: Dict):\n \"\"\"\n Exists for compatibility, overridden by subclass Window.\n\n Parameters\n ----------\n kwargs : dict\n ignored, exists for compatibility\n\n Returns\n -------\n None\n \"\"\"\n return None\n\n def _get_window(self, other=None, win_type: Optional[str] = None) -> int:\n \"\"\"\n Return window length.\n\n Parameters\n ----------\n other :\n ignored, exists for compatibility\n win_type :\n ignored, exists for compatibility\n\n Returns\n -------\n window : int\n \"\"\"\n if isinstance(self.window, BaseIndexer):\n return self.min_periods or 0\n return self.window\n\n @property\n def _window_type(self) -> str:\n return type(self).__name__\n\n def __repr__(self) -> str:\n \"\"\"\n Provide a nice str repr of our rolling object.\n \"\"\"\n attrs_list = (\n f\"{attr_name}={getattr(self, attr_name)}\"\n for attr_name in self._attributes\n if getattr(self, attr_name, None) is not None\n )\n attrs = \",\".join(attrs_list)\n return f\"{self._window_type} [{attrs}]\"\n\n def __iter__(self):\n window = self._get_window(win_type=None)\n _, obj = self._create_blocks(self._selected_obj)\n index = self._get_window_indexer(window=window)\n\n start, end = index.get_window_bounds(\n num_values=len(obj),\n min_periods=self.min_periods,\n center=self.center,\n closed=self.closed,\n )\n # From get_window_bounds, those two should be equal in length of array\n assert len(start) == len(end)\n\n for s, e in zip(start, end):\n result = obj.iloc[slice(s, e)]\n yield result\n\n def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:\n \"\"\"Convert input to numpy arrays for Cython routines\"\"\"\n if values is None:\n values = extract_array(self._selected_obj, extract_numpy=True)\n\n # GH #12373 : rolling functions error on float32 data\n # make sure the data is coerced to float64\n if is_float_dtype(values.dtype):\n values = ensure_float64(values)\n elif is_integer_dtype(values.dtype):\n values = ensure_float64(values)\n elif needs_i8_conversion(values.dtype):\n raise NotImplementedError(\n f\"ops for {self._window_type} for this \"\n f\"dtype {values.dtype} are not implemented\"\n )\n else:\n try:\n values = ensure_float64(values)\n except (ValueError, TypeError) as err:\n raise TypeError(f\"cannot handle this type -> {values.dtype}\") from err\n\n # Convert inf to nan for C funcs\n inf = np.isinf(values)\n if inf.any():\n values = np.where(inf, np.nan, values)\n\n return values\n\n def _wrap_result(self, result, block=None, obj=None):\n \"\"\"\n Wrap a single result.\n \"\"\"\n if obj is None:\n obj = self._selected_obj\n index = obj.index\n\n if isinstance(result, np.ndarray):\n\n if result.ndim == 1:\n from pandas import Series\n\n return Series(result, index, name=obj.name)\n\n return type(obj)(result, index=index, columns=block.columns)\n return result\n\n def _wrap_results(self, results, obj, skipped: List[int]) -> FrameOrSeriesUnion:\n \"\"\"\n Wrap the results.\n\n Parameters\n ----------\n results : list of ndarrays\n obj : conformed data (may be resampled)\n skipped: List[int]\n Indices of blocks that are skipped.\n \"\"\"\n from pandas import Series, concat\n\n if obj.ndim == 1:\n if not results:\n raise DataError(\"No numeric types to aggregate\")\n assert len(results) == 1\n return Series(results[0], index=obj.index, name=obj.name)\n\n exclude: List[Label] = []\n orig_blocks = list(obj._to_dict_of_blocks(copy=False).values())\n for i in skipped:\n exclude.extend(orig_blocks[i].columns)\n\n kept_blocks = [blk for i, blk in enumerate(orig_blocks) if i not in skipped]\n\n final = []\n for result, block in zip(results, kept_blocks):\n\n result = type(obj)(result, index=obj.index, columns=block.columns)\n final.append(result)\n\n exclude = exclude or []\n columns = [c for c in self._selected_obj.columns if c not in exclude]\n if not columns and not len(final) and exclude:\n raise DataError(\"No numeric types to aggregate\")\n elif not len(final):\n return obj.astype(\"float64\")\n\n df = concat(final, axis=1).reindex(columns=columns, copy=False)\n\n # if we have an 'on' column we want to put it back into\n # the results in the same location\n if self.on is not None and not self._on.equals(obj.index):\n name = self._on.name\n extra_col = Series(self._on, index=obj.index, name=name)\n if name not in df.columns and name not in df.index.names:\n new_loc = len(df.columns)\n df.insert(new_loc, name, extra_col)\n elif name in df.columns:\n # TODO: sure we want to overwrite results?\n df[name] = extra_col\n return df\n\n def _center_window(self, result, window) -> np.ndarray:\n \"\"\"\n Center the result in the window.\n \"\"\"\n if self.axis > result.ndim - 1:\n raise ValueError(\"Requested axis is larger then no. of argument dimensions\")\n\n offset = calculate_center_offset(window)\n if offset > 0:\n lead_indexer = [slice(None)] * result.ndim\n lead_indexer[self.axis] = slice(offset, None)\n result = np.copy(result[tuple(lead_indexer)])\n return result\n\n def _get_roll_func(self, func_name: str) -> Callable:\n \"\"\"\n Wrap rolling function to check values passed.\n\n Parameters\n ----------\n func_name : str\n Cython function used to calculate rolling statistics\n\n Returns\n -------\n func : callable\n \"\"\"\n window_func = getattr(window_aggregations, func_name, None)\n if window_func is None:\n raise ValueError(\n f\"we do not support this function in window_aggregations.{func_name}\"\n )\n return window_func\n\n def _get_cython_func_type(self, func: str) -> Callable:\n \"\"\"\n Return a variable or fixed cython function type.\n\n Variable algorithms do not use window while fixed do.\n \"\"\"\n if self.is_freq_type or isinstance(self.window, BaseIndexer):\n return self._get_roll_func(f\"{func}_variable\")\n return partial(self._get_roll_func(f\"{func}_fixed\"), win=self._get_window())\n\n def _get_window_indexer(self, window: int) -> BaseIndexer:\n \"\"\"\n Return an indexer class that will compute the window start and end bounds\n \"\"\"\n if isinstance(self.window, BaseIndexer):\n return self.window\n if self.is_freq_type:\n return VariableWindowIndexer(index_array=self._on.asi8, window_size=window)\n return FixedWindowIndexer(window_size=window)\n\n def _apply_series(self, homogeneous_func: Callable[..., ArrayLike]) -> \"Series\":\n \"\"\"\n Series version of _apply_blockwise\n \"\"\"\n _, obj = self._create_blocks(self._selected_obj)\n values = obj.values\n\n try:\n values = self._prep_values(obj.values)\n except (TypeError, NotImplementedError) as err:\n raise DataError(\"No numeric types to aggregate\") from err\n\n result = homogeneous_func(values)\n return obj._constructor(result, index=obj.index, name=obj.name)\n\n def _apply_blockwise(\n self, homogeneous_func: Callable[..., ArrayLike]\n ) -> FrameOrSeriesUnion:\n \"\"\"\n Apply the given function to the DataFrame broken down into homogeneous\n sub-frames.\n \"\"\"\n if self._selected_obj.ndim == 1:\n return self._apply_series(homogeneous_func)\n\n # This isn't quite blockwise, since `blocks` is actually a collection\n # of homogenenous DataFrames.\n blocks, obj = self._create_blocks(self._selected_obj)\n\n skipped: List[int] = []\n results: List[ArrayLike] = []\n for i, b in enumerate(blocks):\n try:\n values = self._prep_values(b.values)\n\n except (TypeError, NotImplementedError):\n skipped.append(i)\n continue\n\n result = homogeneous_func(values)\n results.append(result)\n\n return self._wrap_results(results, obj, skipped)\n\n def _apply(\n self,\n func: Callable,\n center: bool,\n require_min_periods: int = 0,\n floor: int = 1,\n is_weighted: bool = False,\n name: Optional[str] = None,\n use_numba_cache: bool = False,\n **kwargs,\n ):\n \"\"\"\n Rolling statistical measure using supplied function.\n\n Designed to be used with passed-in Cython array-based functions.\n\n Parameters\n ----------\n func : callable function to apply\n center : bool\n require_min_periods : int\n floor : int\n is_weighted : bool\n name : str,\n compatibility with groupby.rolling\n use_numba_cache : bool\n whether to cache a numba compiled function. Only available for numba\n enabled methods (so far only apply)\n **kwargs\n additional arguments for rolling function and window function\n\n Returns\n -------\n y : type of input\n \"\"\"\n win_type = self._get_win_type(kwargs)\n window = self._get_window(win_type=win_type)\n window_indexer = self._get_window_indexer(window)\n\n def homogeneous_func(values: np.ndarray):\n # calculation function\n\n if values.size == 0:\n return values.copy()\n\n offset = calculate_center_offset(window) if center else 0\n additional_nans = np.array([np.nan] * offset)\n\n if not is_weighted:\n\n def calc(x):\n x = np.concatenate((x, additional_nans))\n if not isinstance(self.window, BaseIndexer):\n min_periods = calculate_min_periods(\n window, self.min_periods, len(x), require_min_periods, floor\n )\n else:\n min_periods = calculate_min_periods(\n window_indexer.window_size,\n self.min_periods,\n len(x),\n require_min_periods,\n floor,\n )\n start, end = window_indexer.get_window_bounds(\n num_values=len(x),\n min_periods=self.min_periods,\n center=self.center,\n closed=self.closed,\n )\n return func(x, start, end, min_periods)\n\n else:\n\n def calc(x):\n x = np.concatenate((x, additional_nans))\n return func(x, window, self.min_periods)\n\n with np.errstate(all=\"ignore\"):\n if values.ndim > 1:\n result = np.apply_along_axis(calc, self.axis, values)\n else:\n result = calc(values)\n result = np.asarray(result)\n\n if use_numba_cache:\n NUMBA_FUNC_CACHE[(kwargs[\"original_func\"], \"rolling_apply\")] = func\n\n if center:\n result = self._center_window(result, window)\n\n return result\n\n return self._apply_blockwise(homogeneous_func)\n\n def aggregate(self, func, *args, **kwargs):\n result, how = self._aggregate(func, *args, **kwargs)\n if result is None:\n return self.apply(func, raw=False, args=args, kwargs=kwargs)\n return result\n\n agg = aggregate\n\n _shared_docs[\"sum\"] = dedent(\n \"\"\"\n Calculate %(name)s sum of given DataFrame or Series.\n\n Parameters\n ----------\n *args, **kwargs\n For compatibility with other %(name)s methods. Has no effect\n on the computed value.\n\n Returns\n -------\n Series or DataFrame\n Same type as the input, with the same index, containing the\n %(name)s sum.\n\n See Also\n --------\n pandas.Series.sum : Reducing sum for Series.\n pandas.DataFrame.sum : Reducing sum for DataFrame.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4, 5])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n dtype: int64\n\n >>> s.rolling(3).sum()\n 0 NaN\n 1 NaN\n 2 6.0\n 3 9.0\n 4 12.0\n dtype: float64\n\n >>> s.expanding(3).sum()\n 0 NaN\n 1 NaN\n 2 6.0\n 3 10.0\n 4 15.0\n dtype: float64\n\n >>> s.rolling(3, center=True).sum()\n 0 NaN\n 1 6.0\n 2 9.0\n 3 12.0\n 4 NaN\n dtype: float64\n\n For DataFrame, each %(name)s sum is computed column-wise.\n\n >>> df = pd.DataFrame({\"A\": s, \"B\": s ** 2})\n >>> df\n A B\n 0 1 1\n 1 2 4\n 2 3 9\n 3 4 16\n 4 5 25\n\n >>> df.rolling(3).sum()\n A B\n 0 NaN NaN\n 1 NaN NaN\n 2 6.0 14.0\n 3 9.0 29.0\n 4 12.0 50.0\n \"\"\"\n )\n\n _shared_docs[\"mean\"] = dedent(\n \"\"\"\n Calculate the %(name)s mean of the values.\n\n Parameters\n ----------\n *args\n Under Review.\n **kwargs\n Under Review.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the %(name)s\n calculation.\n\n See Also\n --------\n pandas.Series.%(name)s : Calling object with Series data.\n pandas.DataFrame.%(name)s : Calling object with DataFrames.\n pandas.Series.mean : Equivalent method for Series.\n pandas.DataFrame.mean : Equivalent method for DataFrame.\n\n Examples\n --------\n The below examples will show rolling mean calculations with window sizes of\n two and three, respectively.\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.rolling(2).mean()\n 0 NaN\n 1 1.5\n 2 2.5\n 3 3.5\n dtype: float64\n\n >>> s.rolling(3).mean()\n 0 NaN\n 1 NaN\n 2 2.0\n 3 3.0\n dtype: float64\n \"\"\"\n )\n\n _shared_docs[\"var\"] = dedent(\n \"\"\"\n Calculate unbiased %(name)s variance.\n %(versionadded)s\n Normalized by N-1 by default. This can be changed using the `ddof`\n argument.\n\n Parameters\n ----------\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n *args, **kwargs\n For NumPy compatibility. No additional arguments are used.\n\n Returns\n -------\n Series or DataFrame\n Returns the same object type as the caller of the %(name)s calculation.\n\n See Also\n --------\n pandas.Series.%(name)s : Calling object with Series data.\n pandas.DataFrame.%(name)s : Calling object with DataFrames.\n pandas.Series.var : Equivalent method for Series.\n pandas.DataFrame.var : Equivalent method for DataFrame.\n numpy.var : Equivalent method for Numpy array.\n\n Notes\n -----\n The default `ddof` of 1 used in :meth:`Series.var` is different than the\n default `ddof` of 0 in :func:`numpy.var`.\n\n A minimum of 1 period is required for the rolling calculation.\n\n Examples\n --------\n >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])\n >>> s.rolling(3).var()\n 0 NaN\n 1 NaN\n 2 0.333333\n 3 1.000000\n 4 1.000000\n 5 1.333333\n 6 0.000000\n dtype: float64\n\n >>> s.expanding(3).var()\n 0 NaN\n 1 NaN\n 2 0.333333\n 3 0.916667\n 4 0.800000\n 5 0.700000\n 6 0.619048\n dtype: float64\n \"\"\"\n )\n\n _shared_docs[\"std\"] = dedent(\n \"\"\"\n Calculate %(name)s standard deviation.\n %(versionadded)s\n Normalized by N-1 by default. This can be changed using the `ddof`\n argument.\n\n Parameters\n ----------\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n *args, **kwargs\n For NumPy compatibility. No additional arguments are used.\n\n Returns\n -------\n Series or DataFrame\n Returns the same object type as the caller of the %(name)s calculation.\n\n See Also\n --------\n pandas.Series.%(name)s : Calling object with Series data.\n pandas.DataFrame.%(name)s : Calling object with DataFrames.\n pandas.Series.std : Equivalent method for Series.\n pandas.DataFrame.std : Equivalent method for DataFrame.\n numpy.std : Equivalent method for Numpy array.\n\n Notes\n -----\n The default `ddof` of 1 used in Series.std is different than the default\n `ddof` of 0 in numpy.std.\n\n A minimum of one period is required for the rolling calculation.\n\n Examples\n --------\n >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])\n >>> s.rolling(3).std()\n 0 NaN\n 1 NaN\n 2 0.577350\n 3 1.000000\n 4 1.000000\n 5 1.154701\n 6 0.000000\n dtype: float64\n\n >>> s.expanding(3).std()\n 0 NaN\n 1 NaN\n 2 0.577350\n 3 0.957427\n 4 0.894427\n 5 0.836660\n 6 0.786796\n dtype: float64\n \"\"\"\n )\n\n\nclass Window(_Window):\n \"\"\"\n Provide rolling window calculations.\n\n Parameters\n ----------\n window : int, offset, or BaseIndexer subclass\n Size of the moving window. This is the number of observations used for\n calculating the statistic. Each window will be a fixed size.\n\n If its an offset then this will be the time period of each window. Each\n window will be a variable sized based on the observations included in\n the time-period. This is only valid for datetimelike indexes.\n\n If a BaseIndexer subclass is passed, calculates the window boundaries\n based on the defined ``get_window_bounds`` method. Additional rolling\n keyword arguments, namely `min_periods`, `center`, and\n `closed` will be passed to `get_window_bounds`.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). For a window that is specified by an offset,\n `min_periods` will default to 1. Otherwise, `min_periods` will default\n to the size of the window.\n center : bool, default False\n Set the labels at the center of the window.\n win_type : str, default None\n Provide a window type. If ``None``, all points are evenly weighted.\n See the notes below for further information.\n on : str, optional\n For a DataFrame, a datetime-like column or MultiIndex level on which\n to calculate the rolling window, rather than the DataFrame's index.\n Provided integer column is ignored and excluded from result since\n an integer index is not used to calculate the rolling window.\n axis : int or str, default 0\n closed : str, default None\n Make the interval closed on the 'right', 'left', 'both' or\n 'neither' endpoints.\n For offset-based windows, it defaults to 'right'.\n For fixed windows, defaults to 'both'. Remaining cases not implemented\n for fixed windows.\n\n Returns\n -------\n a Window or Rolling sub-classed for the particular operation\n\n See Also\n --------\n expanding : Provides expanding transformations.\n ewm : Provides exponential weighted functions.\n\n Notes\n -----\n By default, the result is set to the right edge of the window. This can be\n changed to the center of the window by setting ``center=True``.\n\n To learn more about the offsets & frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n The recognized win_types are:\n\n * ``boxcar``\n * ``triang``\n * ``blackman``\n * ``hamming``\n * ``bartlett``\n * ``parzen``\n * ``bohman``\n * ``blackmanharris``\n * ``nuttall``\n * ``barthann``\n * ``kaiser`` (needs parameter: beta)\n * ``gaussian`` (needs parameter: std)\n * ``general_gaussian`` (needs parameters: power, width)\n * ``slepian`` (needs parameter: width)\n * ``exponential`` (needs parameter: tau), center is set to None.\n\n If ``win_type=None`` all points are evenly weighted. To learn more about\n different window types see `scipy.signal window functions\n <https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.\n\n Certain window types require additional parameters to be passed. Please see\n the third example below on how to add the additional parameters.\n\n Examples\n --------\n >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})\n >>> df\n B\n 0 0.0\n 1 1.0\n 2 2.0\n 3 NaN\n 4 4.0\n\n Rolling sum with a window length of 2, using the 'triang'\n window type.\n\n >>> df.rolling(2, win_type='triang').sum()\n B\n 0 NaN\n 1 0.5\n 2 1.5\n 3 NaN\n 4 NaN\n\n Rolling sum with a window length of 2, using the 'gaussian'\n window type (note how we need to specify std).\n\n >>> df.rolling(2, win_type='gaussian').sum(std=3)\n B\n 0 NaN\n 1 0.986207\n 2 2.958621\n 3 NaN\n 4 NaN\n\n Rolling sum with a window length of 2, min_periods defaults\n to the window length.\n\n >>> df.rolling(2).sum()\n B\n 0 NaN\n 1 1.0\n 2 3.0\n 3 NaN\n 4 NaN\n\n Same as above, but explicitly set the min_periods\n\n >>> df.rolling(2, min_periods=1).sum()\n B\n 0 0.0\n 1 1.0\n 2 3.0\n 3 2.0\n 4 4.0\n\n Same as above, but with forward-looking windows\n\n >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)\n >>> df.rolling(window=indexer, min_periods=1).sum()\n B\n 0 1.0\n 1 3.0\n 2 2.0\n 3 4.0\n 4 4.0\n\n A ragged (meaning not-a-regular frequency), time-indexed DataFrame\n\n >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},\n ... index = [pd.Timestamp('20130101 09:00:00'),\n ... pd.Timestamp('20130101 09:00:02'),\n ... pd.Timestamp('20130101 09:00:03'),\n ... pd.Timestamp('20130101 09:00:05'),\n ... pd.Timestamp('20130101 09:00:06')])\n\n >>> df\n B\n 2013-01-01 09:00:00 0.0\n 2013-01-01 09:00:02 1.0\n 2013-01-01 09:00:03 2.0\n 2013-01-01 09:00:05 NaN\n 2013-01-01 09:00:06 4.0\n\n Contrasting to an integer rolling window, this will roll a variable\n length window corresponding to the time period.\n The default for min_periods is 1.\n\n >>> df.rolling('2s').sum()\n B\n 2013-01-01 09:00:00 0.0\n 2013-01-01 09:00:02 1.0\n 2013-01-01 09:00:03 3.0\n 2013-01-01 09:00:05 NaN\n 2013-01-01 09:00:06 4.0\n \"\"\"\n\n def validate(self):\n super().validate()\n\n window = self.window\n if isinstance(window, BaseIndexer):\n raise NotImplementedError(\n \"BaseIndexer subclasses not implemented with win_types.\"\n )\n elif isinstance(window, (list, tuple, np.ndarray)):\n pass\n elif is_integer(window):\n if window <= 0:\n raise ValueError(\"window must be > 0 \")\n import_optional_dependency(\n \"scipy\", extra=\"Scipy is required to generate window weight.\"\n )\n import scipy.signal as sig\n\n if not isinstance(self.win_type, str):\n raise ValueError(f\"Invalid win_type {self.win_type}\")\n if getattr(sig, self.win_type, None) is None:\n raise ValueError(f\"Invalid win_type {self.win_type}\")\n else:\n raise ValueError(f\"Invalid window {window}\")\n\n def _get_win_type(self, kwargs: Dict) -> Union[str, Tuple]:\n \"\"\"\n Extract arguments for the window type, provide validation for it\n and return the validated window type.\n\n Parameters\n ----------\n kwargs : dict\n\n Returns\n -------\n win_type : str, or tuple\n \"\"\"\n # the below may pop from kwargs\n def _validate_win_type(win_type, kwargs):\n arg_map = {\n \"kaiser\": [\"beta\"],\n \"gaussian\": [\"std\"],\n \"general_gaussian\": [\"power\", \"width\"],\n \"slepian\": [\"width\"],\n \"exponential\": [\"tau\"],\n }\n\n if win_type in arg_map:\n win_args = _pop_args(win_type, arg_map[win_type], kwargs)\n if win_type == \"exponential\":\n # exponential window requires the first arg (center)\n # to be set to None (necessary for symmetric window)\n win_args.insert(0, None)\n\n return tuple([win_type] + win_args)\n\n return win_type\n\n def _pop_args(win_type, arg_names, kwargs):\n all_args = []\n for n in arg_names:\n if n not in kwargs:\n raise ValueError(f\"{win_type} window requires {n}\")\n all_args.append(kwargs.pop(n))\n return all_args\n\n return _validate_win_type(self.win_type, kwargs)\n\n def _get_window(\n self, other=None, win_type: Optional[Union[str, Tuple]] = None\n ) -> np.ndarray:\n \"\"\"\n Get the window, weights.\n\n Parameters\n ----------\n other :\n ignored, exists for compatibility\n win_type : str, or tuple\n type of window to create\n\n Returns\n -------\n window : ndarray\n the window, weights\n \"\"\"\n window = self.window\n if isinstance(window, (list, tuple, np.ndarray)):\n return com.asarray_tuplesafe(window).astype(float)\n elif is_integer(window):\n import scipy.signal as sig\n\n # GH #15662. `False` makes symmetric window, rather than periodic.\n return sig.get_window(win_type, window, False).astype(float)\n\n _agg_see_also_doc = dedent(\n \"\"\"\n See Also\n --------\n pandas.DataFrame.aggregate : Similar DataFrame method.\n pandas.Series.aggregate : Similar Series method.\n \"\"\"\n )\n\n _agg_examples_doc = dedent(\n \"\"\"\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6], \"C\": [7, 8, 9]})\n >>> df\n A B C\n 0 1 4 7\n 1 2 5 8\n 2 3 6 9\n\n >>> df.rolling(2, win_type=\"boxcar\").agg(\"mean\")\n A B C\n 0 NaN NaN NaN\n 1 1.5 4.5 7.5\n 2 2.5 5.5 8.5\n \"\"\"\n )\n\n @doc(\n _shared_docs[\"aggregate\"],\n see_also=_agg_see_also_doc,\n examples=_agg_examples_doc,\n versionadded=\"\",\n klass=\"Series/DataFrame\",\n axis=\"\",\n )\n def aggregate(self, func, *args, **kwargs):\n result, how = self._aggregate(func, *args, **kwargs)\n if result is None:\n\n # these must apply directly\n result = func(self)\n\n return result\n\n agg = aggregate\n\n @Substitution(name=\"window\")\n @Appender(_shared_docs[\"sum\"])\n def sum(self, *args, **kwargs):\n nv.validate_window_func(\"sum\", args, kwargs)\n window_func = self._get_roll_func(\"roll_weighted_sum\")\n window_func = get_weighted_roll_func(window_func)\n return self._apply(\n window_func, center=self.center, is_weighted=True, name=\"sum\", **kwargs\n )\n\n @Substitution(name=\"window\")\n @Appender(_shared_docs[\"mean\"])\n def mean(self, *args, **kwargs):\n nv.validate_window_func(\"mean\", args, kwargs)\n window_func = self._get_roll_func(\"roll_weighted_mean\")\n window_func = get_weighted_roll_func(window_func)\n return self._apply(\n window_func, center=self.center, is_weighted=True, name=\"mean\", **kwargs\n )\n\n @Substitution(name=\"window\", versionadded=\"\\n.. versionadded:: 1.0.0\\n\")\n @Appender(_shared_docs[\"var\"])\n def var(self, ddof=1, *args, **kwargs):\n nv.validate_window_func(\"var\", args, kwargs)\n window_func = partial(self._get_roll_func(\"roll_weighted_var\"), ddof=ddof)\n window_func = get_weighted_roll_func(window_func)\n kwargs.pop(\"name\", None)\n return self._apply(\n window_func, center=self.center, is_weighted=True, name=\"var\", **kwargs\n )\n\n @Substitution(name=\"window\", versionadded=\"\\n.. versionadded:: 1.0.0\\n\")\n @Appender(_shared_docs[\"std\"])\n def std(self, ddof=1, *args, **kwargs):\n nv.validate_window_func(\"std\", args, kwargs)\n return zsqrt(self.var(ddof=ddof, name=\"std\", **kwargs))\n\n\nclass _Rolling(_Window):\n @property\n def _constructor(self):\n return Rolling\n\n\nclass _Rolling_and_Expanding(_Rolling):\n\n _shared_docs[\"count\"] = dedent(\n r\"\"\"\n The %(name)s count of any non-NaN observations inside the window.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the %(name)s\n calculation.\n\n See Also\n --------\n pandas.Series.%(name)s : Calling object with Series data.\n pandas.DataFrame.%(name)s : Calling object with DataFrames.\n pandas.DataFrame.count : Count of the full DataFrame.\n\n Examples\n --------\n >>> s = pd.Series([2, 3, np.nan, 10])\n >>> s.rolling(2).count()\n 0 1.0\n 1 2.0\n 2 1.0\n 3 1.0\n dtype: float64\n >>> s.rolling(3).count()\n 0 1.0\n 1 2.0\n 2 2.0\n 3 2.0\n dtype: float64\n >>> s.rolling(4).count()\n 0 1.0\n 1 2.0\n 2 2.0\n 3 3.0\n dtype: float64\n \"\"\"\n )\n\n def count(self):\n # GH 32865. Using count with custom BaseIndexer subclass\n # implementations shouldn't end up here\n assert not isinstance(self.window, BaseIndexer)\n\n blocks, obj = self._create_blocks(self._selected_obj)\n results = []\n for b in blocks:\n result = b.notna().astype(int)\n result = self._constructor(\n result,\n window=self._get_window(),\n min_periods=self.min_periods or 0,\n center=self.center,\n axis=self.axis,\n closed=self.closed,\n ).sum()\n results.append(result)\n\n return self._wrap_results(results, obj, skipped=[])\n\n _shared_docs[\"apply\"] = dedent(\n r\"\"\"\n Apply an arbitrary function to each %(name)s window.\n\n Parameters\n ----------\n func : function\n Must produce a single value from an ndarray input if ``raw=True``\n or a single value from a Series if ``raw=False``. Can also accept a\n Numba JIT function with ``engine='numba'`` specified.\n\n .. versionchanged:: 1.0.0\n\n raw : bool, default None\n * ``False`` : passes each row or column as a Series to the\n function.\n * ``True`` : the passed function will receive ndarray\n objects instead.\n If you are just applying a NumPy reduction function this will\n achieve much better performance.\n engine : str, default None\n * ``'cython'`` : Runs rolling apply through C-extensions from cython.\n * ``'numba'`` : Runs rolling apply through JIT compiled code from numba.\n Only available when ``raw`` is set to ``True``.\n * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``\n\n .. versionadded:: 1.0.0\n\n engine_kwargs : dict, default None\n * For ``'cython'`` engine, there are no accepted ``engine_kwargs``\n * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``\n and ``parallel`` dictionary keys. The values must either be ``True`` or\n ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is\n ``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be\n applied to both the ``func`` and the ``apply`` rolling aggregation.\n\n .. versionadded:: 1.0.0\n\n args : tuple, default None\n Positional arguments to be passed into func.\n kwargs : dict, default None\n Keyword arguments to be passed into func.\n\n Returns\n -------\n Series or DataFrame\n Return type is determined by the caller.\n\n See Also\n --------\n pandas.Series.%(name)s : Calling object with Series data.\n pandas.DataFrame.%(name)s : Calling object with DataFrame data.\n pandas.Series.apply : Similar method for Series.\n pandas.DataFrame.apply : Similar method for DataFrame.\n\n Notes\n -----\n See :ref:`stats.rolling_apply` for extended documentation and performance\n considerations for the Numba engine.\n \"\"\"\n )\n\n def apply(\n self,\n func,\n raw: bool = False,\n engine: Optional[str] = None,\n engine_kwargs: Optional[Dict] = None,\n args: Optional[Tuple] = None,\n kwargs: Optional[Dict] = None,\n ):\n if args is None:\n args = ()\n if kwargs is None:\n kwargs = {}\n kwargs.pop(\"_level\", None)\n kwargs.pop(\"floor\", None)\n if not is_bool(raw):\n raise ValueError(\"raw parameter must be `True` or `False`\")\n\n if maybe_use_numba(engine):\n if raw is False:\n raise ValueError(\"raw must be `True` when using the numba engine\")\n cache_key = (func, \"rolling_apply\")\n if cache_key in NUMBA_FUNC_CACHE:\n # Return an already compiled version of roll_apply if available\n apply_func = NUMBA_FUNC_CACHE[cache_key]\n else:\n apply_func = generate_numba_apply_func(\n args, kwargs, func, engine_kwargs\n )\n center = self.center\n elif engine in (\"cython\", None):\n if engine_kwargs is not None:\n raise ValueError(\"cython engine does not accept engine_kwargs\")\n # Cython apply functions handle center, so don't need to use\n # _apply's center handling\n window = self._get_window()\n offset = calculate_center_offset(window) if self.center else 0\n apply_func = self._generate_cython_apply_func(\n args, kwargs, raw, offset, func\n )\n center = False\n else:\n raise ValueError(\"engine must be either 'numba' or 'cython'\")\n\n # name=func & raw=raw for WindowGroupByMixin._apply\n return self._apply(\n apply_func,\n center=center,\n floor=0,\n name=func,\n use_numba_cache=engine == \"numba\",\n raw=raw,\n original_func=func,\n args=args,\n kwargs=kwargs,\n )\n\n def _generate_cython_apply_func(self, args, kwargs, raw, offset, func):\n from pandas import Series\n\n window_func = partial(\n self._get_cython_func_type(\"roll_generic\"),\n args=args,\n kwargs=kwargs,\n raw=raw,\n offset=offset,\n func=func,\n )\n\n def apply_func(values, begin, end, min_periods, raw=raw):\n if not raw:\n values = Series(values, index=self.obj.index)\n return window_func(values, begin, end, min_periods)\n\n return apply_func\n\n def sum(self, *args, **kwargs):\n nv.validate_window_func(\"sum\", args, kwargs)\n window_func = self._get_cython_func_type(\"roll_sum\")\n kwargs.pop(\"floor\", None)\n return self._apply(\n window_func, center=self.center, floor=0, name=\"sum\", **kwargs\n )\n\n _shared_docs[\"max\"] = dedent(\n \"\"\"\n Calculate the %(name)s maximum.\n\n Parameters\n ----------\n *args, **kwargs\n Arguments and keyword arguments to be passed into func.\n \"\"\"\n )\n\n def max(self, *args, **kwargs):\n nv.validate_window_func(\"max\", args, kwargs)\n window_func = self._get_cython_func_type(\"roll_max\")\n return self._apply(window_func, center=self.center, name=\"max\", **kwargs)\n\n _shared_docs[\"min\"] = dedent(\n \"\"\"\n Calculate the %(name)s minimum.\n\n Parameters\n ----------\n **kwargs\n Under Review.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the %(name)s\n calculation.\n\n See Also\n --------\n pandas.Series.%(name)s : Calling object with a Series.\n pandas.DataFrame.%(name)s : Calling object with a DataFrame.\n pandas.Series.min : Similar method for Series.\n pandas.DataFrame.min : Similar method for DataFrame.\n\n Examples\n --------\n Performing a rolling minimum with a window size of 3.\n\n >>> s = pd.Series([4, 3, 5, 2, 6])\n >>> s.rolling(3).min()\n 0 NaN\n 1 NaN\n 2 3.0\n 3 2.0\n 4 2.0\n dtype: float64\n \"\"\"\n )\n\n def min(self, *args, **kwargs):\n nv.validate_window_func(\"min\", args, kwargs)\n window_func = self._get_cython_func_type(\"roll_min\")\n return self._apply(window_func, center=self.center, name=\"min\", **kwargs)\n\n def mean(self, *args, **kwargs):\n nv.validate_window_func(\"mean\", args, kwargs)\n window_func = self._get_cython_func_type(\"roll_mean\")\n return self._apply(window_func, center=self.center, name=\"mean\", **kwargs)\n\n _shared_docs[\"median\"] = dedent(\n \"\"\"\n Calculate the %(name)s median.\n\n Parameters\n ----------\n **kwargs\n For compatibility with other %(name)s methods. Has no effect\n on the computed median.\n\n Returns\n -------\n Series or DataFrame\n Returned type is the same as the original object.\n\n See Also\n --------\n pandas.Series.%(name)s : Calling object with Series data.\n pandas.DataFrame.%(name)s : Calling object with DataFrames.\n pandas.Series.median : Equivalent method for Series.\n pandas.DataFrame.median : Equivalent method for DataFrame.\n\n Examples\n --------\n Compute the rolling median of a series with a window size of 3.\n\n >>> s = pd.Series([0, 1, 2, 3, 4])\n >>> s.rolling(3).median()\n 0 NaN\n 1 NaN\n 2 1.0\n 3 2.0\n 4 3.0\n dtype: float64\n \"\"\"\n )\n\n def median(self, **kwargs):\n window_func = self._get_roll_func(\"roll_median_c\")\n # GH 32865. Move max window size calculation to\n # the median function implementation\n return self._apply(window_func, center=self.center, name=\"median\", **kwargs)\n\n def std(self, ddof=1, *args, **kwargs):\n nv.validate_window_func(\"std\", args, kwargs)\n kwargs.pop(\"require_min_periods\", None)\n window_func = self._get_cython_func_type(\"roll_var\")\n\n def zsqrt_func(values, begin, end, min_periods):\n return zsqrt(window_func(values, begin, end, min_periods, ddof=ddof))\n\n # ddof passed again for compat with groupby.rolling\n return self._apply(\n zsqrt_func,\n center=self.center,\n require_min_periods=1,\n name=\"std\",\n ddof=ddof,\n **kwargs,\n )\n\n def var(self, ddof=1, *args, **kwargs):\n nv.validate_window_func(\"var\", args, kwargs)\n kwargs.pop(\"require_min_periods\", None)\n window_func = partial(self._get_cython_func_type(\"roll_var\"), ddof=ddof)\n # ddof passed again for compat with groupby.rolling\n return self._apply(\n window_func,\n center=self.center,\n require_min_periods=1,\n name=\"var\",\n ddof=ddof,\n **kwargs,\n )\n\n _shared_docs[\n \"skew\"\n ] = \"\"\"\n Unbiased %(name)s skewness.\n\n Parameters\n ----------\n **kwargs\n Keyword arguments to be passed into func.\n \"\"\"\n\n def skew(self, **kwargs):\n window_func = self._get_cython_func_type(\"roll_skew\")\n kwargs.pop(\"require_min_periods\", None)\n return self._apply(\n window_func,\n center=self.center,\n require_min_periods=3,\n name=\"skew\",\n **kwargs,\n )\n\n _shared_docs[\"kurt\"] = dedent(\n \"\"\"\n Calculate unbiased %(name)s kurtosis.\n\n This function uses Fisher's definition of kurtosis without bias.\n\n Parameters\n ----------\n **kwargs\n Under Review.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the %(name)s\n calculation.\n\n See Also\n --------\n pandas.Series.%(name)s : Calling object with Series data.\n pandas.DataFrame.%(name)s : Calling object with DataFrames.\n pandas.Series.kurt : Equivalent method for Series.\n pandas.DataFrame.kurt : Equivalent method for DataFrame.\n scipy.stats.skew : Third moment of a probability density.\n scipy.stats.kurtosis : Reference SciPy method.\n\n Notes\n -----\n A minimum of 4 periods is required for the %(name)s calculation.\n \"\"\"\n )\n\n def kurt(self, **kwargs):\n window_func = self._get_cython_func_type(\"roll_kurt\")\n kwargs.pop(\"require_min_periods\", None)\n return self._apply(\n window_func,\n center=self.center,\n require_min_periods=4,\n name=\"kurt\",\n **kwargs,\n )\n\n _shared_docs[\"quantile\"] = dedent(\n \"\"\"\n Calculate the %(name)s quantile.\n\n Parameters\n ----------\n quantile : float\n Quantile to compute. 0 <= quantile <= 1.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n .. versionadded:: 0.23.0\n\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n * linear: `i + (j - i) * fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j` whichever is nearest.\n * midpoint: (`i` + `j`) / 2.\n **kwargs\n For compatibility with other %(name)s methods. Has no effect on\n the result.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the %(name)s\n calculation.\n\n See Also\n --------\n pandas.Series.quantile : Computes value at the given quantile over all data\n in Series.\n pandas.DataFrame.quantile : Computes values at the given quantile over\n requested axis in DataFrame.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.rolling(2).quantile(.4, interpolation='lower')\n 0 NaN\n 1 1.0\n 2 2.0\n 3 3.0\n dtype: float64\n\n >>> s.rolling(2).quantile(.4, interpolation='midpoint')\n 0 NaN\n 1 1.5\n 2 2.5\n 3 3.5\n dtype: float64\n \"\"\"\n )\n\n def quantile(self, quantile, interpolation=\"linear\", **kwargs):\n if quantile == 1.0:\n window_func = self._get_cython_func_type(\"roll_max\")\n elif quantile == 0.0:\n window_func = self._get_cython_func_type(\"roll_min\")\n else:\n window_func = partial(\n self._get_roll_func(\"roll_quantile\"),\n win=self._get_window(),\n quantile=quantile,\n interpolation=interpolation,\n )\n\n # Pass through for groupby.rolling\n kwargs[\"quantile\"] = quantile\n kwargs[\"interpolation\"] = interpolation\n return self._apply(window_func, center=self.center, name=\"quantile\", **kwargs)\n\n _shared_docs[\n \"cov\"\n ] = \"\"\"\n Calculate the %(name)s sample covariance.\n\n Parameters\n ----------\n other : Series, DataFrame, or ndarray, optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndexed DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n **kwargs\n Keyword arguments to be passed into func.\n \"\"\"\n\n def cov(self, other=None, pairwise=None, ddof=1, **kwargs):\n if other is None:\n other = self._selected_obj\n # only default unset\n pairwise = True if pairwise is None else pairwise\n other = self._shallow_copy(other)\n\n # GH 32865. We leverage rolling.mean, so we pass\n # to the rolling constructors the data used when constructing self:\n # window width, frequency data, or a BaseIndexer subclass\n if isinstance(self.window, BaseIndexer):\n window = self.window\n else:\n # GH 16058: offset window\n if self.is_freq_type:\n window = self.win_freq\n else:\n window = self._get_window(other)\n\n def _get_cov(X, Y):\n # GH #12373 : rolling functions error on float32 data\n # to avoid potential overflow, cast the data to float64\n X = X.astype(\"float64\")\n Y = Y.astype(\"float64\")\n mean = lambda x: x.rolling(\n window, self.min_periods, center=self.center\n ).mean(**kwargs)\n count = (\n (X + Y)\n .rolling(window=window, min_periods=0, center=self.center)\n .count(**kwargs)\n )\n bias_adj = count / (count - ddof)\n return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj\n\n return _flex_binary_moment(\n self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)\n )\n\n _shared_docs[\"corr\"] = dedent(\n \"\"\"\n Calculate %(name)s correlation.\n\n Parameters\n ----------\n other : Series, DataFrame, or ndarray, optional\n If not supplied then will default to self.\n pairwise : bool, default None\n Calculate pairwise combinations of columns within a\n DataFrame. If `other` is not specified, defaults to `True`,\n otherwise defaults to `False`.\n Not relevant for :class:`~pandas.Series`.\n **kwargs\n Unused.\n\n Returns\n -------\n Series or DataFrame\n Returned object type is determined by the caller of the\n %(name)s calculation.\n\n See Also\n --------\n pandas.Series.%(name)s : Calling object with Series data.\n pandas.DataFrame.%(name)s : Calling object with DataFrames.\n pandas.Series.corr : Equivalent method for Series.\n pandas.DataFrame.corr : Equivalent method for DataFrame.\n cov : Similar method to calculate covariance.\n numpy.corrcoef : NumPy Pearson's correlation calculation.\n\n Notes\n -----\n This function uses Pearson's definition of correlation\n (https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).\n\n When `other` is not specified, the output will be self correlation (e.g.\n all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`\n set to `True`.\n\n Function will return ``NaN`` for correlations of equal valued sequences;\n this is the result of a 0/0 division error.\n\n When `pairwise` is set to `False`, only matching columns between `self` and\n `other` will be used.\n\n When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame\n with the original index on the first level, and the `other` DataFrame\n columns on the second level.\n\n In the case of missing elements, only complete pairwise observations\n will be used.\n\n Examples\n --------\n The below example shows a rolling calculation with a window size of\n four matching the equivalent function call using :meth:`numpy.corrcoef`.\n\n >>> v1 = [3, 3, 3, 5, 8]\n >>> v2 = [3, 4, 4, 4, 8]\n >>> # numpy returns a 2X2 array, the correlation coefficient\n >>> # is the number at entry [0][1]\n >>> print(f\"{np.corrcoef(v1[:-1], v2[:-1])[0][1]:.6f}\")\n 0.333333\n >>> print(f\"{np.corrcoef(v1[1:], v2[1:])[0][1]:.6f}\")\n 0.916949\n >>> s1 = pd.Series(v1)\n >>> s2 = pd.Series(v2)\n >>> s1.rolling(4).corr(s2)\n 0 NaN\n 1 NaN\n 2 NaN\n 3 0.333333\n 4 0.916949\n dtype: float64\n\n The below example shows a similar rolling calculation on a\n DataFrame using the pairwise option.\n\n >>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\\\n [46., 31.], [50., 36.]])\n >>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7))\n [[1. 0.6263001]\n [0.6263001 1. ]]\n >>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7))\n [[1. 0.5553681]\n [0.5553681 1. ]]\n >>> df = pd.DataFrame(matrix, columns=['X','Y'])\n >>> df\n X Y\n 0 51.0 35.0\n 1 49.0 30.0\n 2 47.0 32.0\n 3 46.0 31.0\n 4 50.0 36.0\n >>> df.rolling(4).corr(pairwise=True)\n X Y\n 0 X NaN NaN\n Y NaN NaN\n 1 X NaN NaN\n Y NaN NaN\n 2 X NaN NaN\n Y NaN NaN\n 3 X 1.000000 0.626300\n Y 0.626300 1.000000\n 4 X 1.000000 0.555368\n Y 0.555368 1.000000\n \"\"\"\n )\n\n def corr(self, other=None, pairwise=None, **kwargs):\n if other is None:\n other = self._selected_obj\n # only default unset\n pairwise = True if pairwise is None else pairwise\n other = self._shallow_copy(other)\n\n # GH 32865. We leverage rolling.cov and rolling.std here, so we pass\n # to the rolling constructors the data used when constructing self:\n # window width, frequency data, or a BaseIndexer subclass\n if isinstance(self.window, BaseIndexer):\n window = self.window\n else:\n window = self._get_window(other) if not self.is_freq_type else self.win_freq\n\n def _get_corr(a, b):\n a = a.rolling(\n window=window, min_periods=self.min_periods, center=self.center\n )\n b = b.rolling(\n window=window, min_periods=self.min_periods, center=self.center\n )\n\n return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))\n\n return _flex_binary_moment(\n self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)\n )\n\n\nclass Rolling(_Rolling_and_Expanding):\n @cache_readonly\n def is_datetimelike(self) -> bool:\n return isinstance(\n self._on, (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex)\n )\n\n @cache_readonly\n def _on(self) -> Index:\n if self.on is None:\n if self.axis == 0:\n return self.obj.index\n else:\n # i.e. self.axis == 1\n return self.obj.columns\n elif isinstance(self.on, Index):\n return self.on\n elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:\n return Index(self.obj[self.on])\n else:\n raise ValueError(\n f\"invalid on specified as {self.on}, \"\n \"must be a column (of DataFrame), an Index or None\"\n )\n\n def validate(self):\n super().validate()\n\n # we allow rolling on a datetimelike index\n if (self.obj.empty or self.is_datetimelike) and isinstance(\n self.window, (str, BaseOffset, timedelta)\n ):\n\n self._validate_monotonic()\n freq = self._validate_freq()\n\n # we don't allow center\n if self.center:\n raise NotImplementedError(\n \"center is not implemented for \"\n \"datetimelike and offset based windows\"\n )\n\n # this will raise ValueError on non-fixed freqs\n self.win_freq = self.window\n self.window = freq.nanos\n self.win_type = \"freq\"\n\n # min_periods must be an integer\n if self.min_periods is None:\n self.min_periods = 1\n\n elif isinstance(self.window, BaseIndexer):\n # Passed BaseIndexer subclass should handle all other rolling kwargs\n return\n elif not is_integer(self.window):\n raise ValueError(\"window must be an integer\")\n elif self.window < 0:\n raise ValueError(\"window must be non-negative\")\n\n if not self.is_datetimelike and self.closed is not None:\n raise ValueError(\n \"closed only implemented for datetimelike and offset based windows\"\n )\n\n def _validate_monotonic(self):\n \"\"\"\n Validate monotonic (increasing or decreasing).\n \"\"\"\n if not (self._on.is_monotonic_increasing or self._on.is_monotonic_decreasing):\n formatted = self.on\n if self.on is None:\n formatted = \"index\"\n raise ValueError(f\"{formatted} must be monotonic\")\n\n def _validate_freq(self):\n \"\"\"\n Validate & return window frequency.\n \"\"\"\n try:\n return to_offset(self.window)\n except (TypeError, ValueError) as err:\n raise ValueError(\n f\"passed window {self.window} is not \"\n \"compatible with a datetimelike index\"\n ) from err\n\n _agg_see_also_doc = dedent(\n \"\"\"\n See Also\n --------\n pandas.Series.rolling : Calling object with Series data.\n pandas.DataFrame.rolling : Calling object with DataFrame data.\n \"\"\"\n )\n\n _agg_examples_doc = dedent(\n \"\"\"\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6], \"C\": [7, 8, 9]})\n >>> df\n A B C\n 0 1 4 7\n 1 2 5 8\n 2 3 6 9\n\n >>> df.rolling(2).sum()\n A B C\n 0 NaN NaN NaN\n 1 3.0 9.0 15.0\n 2 5.0 11.0 17.0\n\n >>> df.rolling(2).agg({\"A\": \"sum\", \"B\": \"min\"})\n A B\n 0 NaN NaN\n 1 3.0 4.0\n 2 5.0 5.0\n \"\"\"\n )\n\n @doc(\n _shared_docs[\"aggregate\"],\n see_also=_agg_see_also_doc,\n examples=_agg_examples_doc,\n versionadded=\"\",\n klass=\"Series/Dataframe\",\n axis=\"\",\n )\n def aggregate(self, func, *args, **kwargs):\n return super().aggregate(func, *args, **kwargs)\n\n agg = aggregate\n\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"count\"])\n def count(self):\n\n # different impl for freq counting\n # GH 32865. Use a custom count function implementation\n # when using a BaseIndexer subclass as a window\n if self.is_freq_type or isinstance(self.window, BaseIndexer):\n window_func = self._get_roll_func(\"roll_count\")\n return self._apply(window_func, center=self.center, name=\"count\")\n\n return super().count()\n\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"apply\"])\n def apply(\n self, func, raw=False, engine=None, engine_kwargs=None, args=None, kwargs=None,\n ):\n return super().apply(\n func,\n raw=raw,\n engine=engine,\n engine_kwargs=engine_kwargs,\n args=args,\n kwargs=kwargs,\n )\n\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"sum\"])\n def sum(self, *args, **kwargs):\n nv.validate_rolling_func(\"sum\", args, kwargs)\n return super().sum(*args, **kwargs)\n\n @Substitution(name=\"rolling\", func_name=\"max\")\n @Appender(_doc_template)\n @Appender(_shared_docs[\"max\"])\n def max(self, *args, **kwargs):\n nv.validate_rolling_func(\"max\", args, kwargs)\n return super().max(*args, **kwargs)\n\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"min\"])\n def min(self, *args, **kwargs):\n nv.validate_rolling_func(\"min\", args, kwargs)\n return super().min(*args, **kwargs)\n\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"mean\"])\n def mean(self, *args, **kwargs):\n nv.validate_rolling_func(\"mean\", args, kwargs)\n return super().mean(*args, **kwargs)\n\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"median\"])\n def median(self, **kwargs):\n return super().median(**kwargs)\n\n @Substitution(name=\"rolling\", versionadded=\"\")\n @Appender(_shared_docs[\"std\"])\n def std(self, ddof=1, *args, **kwargs):\n nv.validate_rolling_func(\"std\", args, kwargs)\n return super().std(ddof=ddof, **kwargs)\n\n @Substitution(name=\"rolling\", versionadded=\"\")\n @Appender(_shared_docs[\"var\"])\n def var(self, ddof=1, *args, **kwargs):\n nv.validate_rolling_func(\"var\", args, kwargs)\n return super().var(ddof=ddof, **kwargs)\n\n @Substitution(name=\"rolling\", func_name=\"skew\")\n @Appender(_doc_template)\n @Appender(_shared_docs[\"skew\"])\n def skew(self, **kwargs):\n return super().skew(**kwargs)\n\n _agg_doc = dedent(\n \"\"\"\n Examples\n --------\n\n The example below will show a rolling calculation with a window size of\n four matching the equivalent function call using `scipy.stats`.\n\n >>> arr = [1, 2, 3, 4, 999]\n >>> import scipy.stats\n >>> print(f\"{scipy.stats.kurtosis(arr[:-1], bias=False):.6f}\")\n -1.200000\n >>> print(f\"{scipy.stats.kurtosis(arr[1:], bias=False):.6f}\")\n 3.999946\n >>> s = pd.Series(arr)\n >>> s.rolling(4).kurt()\n 0 NaN\n 1 NaN\n 2 NaN\n 3 -1.200000\n 4 3.999946\n dtype: float64\n \"\"\"\n )\n\n @Appender(_agg_doc)\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"kurt\"])\n def kurt(self, **kwargs):\n return super().kurt(**kwargs)\n\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"quantile\"])\n def quantile(self, quantile, interpolation=\"linear\", **kwargs):\n return super().quantile(\n quantile=quantile, interpolation=interpolation, **kwargs\n )\n\n @Substitution(name=\"rolling\", func_name=\"cov\")\n @Appender(_doc_template)\n @Appender(_shared_docs[\"cov\"])\n def cov(self, other=None, pairwise=None, ddof=1, **kwargs):\n return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)\n\n @Substitution(name=\"rolling\")\n @Appender(_shared_docs[\"corr\"])\n def corr(self, other=None, pairwise=None, **kwargs):\n return super().corr(other=other, pairwise=pairwise, **kwargs)\n\n\nRolling.__doc__ = Window.__doc__\n\n\nclass RollingGroupby(WindowGroupByMixin, Rolling):\n \"\"\"\n Provide a rolling groupby implementation.\n \"\"\"\n\n def _apply(\n self,\n func: Callable,\n center: bool,\n require_min_periods: int = 0,\n floor: int = 1,\n is_weighted: bool = False,\n name: Optional[str] = None,\n use_numba_cache: bool = False,\n **kwargs,\n ):\n result = Rolling._apply(\n self,\n func,\n center,\n require_min_periods,\n floor,\n is_weighted,\n name,\n use_numba_cache,\n **kwargs,\n )\n # Cannot use _wrap_outputs because we calculate the result all at once\n # Compose MultiIndex result from grouping levels then rolling level\n # Aggregate the MultiIndex data as tuples then the level names\n grouped_object_index = self.obj.index\n grouped_index_name = [grouped_object_index.name]\n groupby_keys = [grouping.name for grouping in self._groupby.grouper._groupings]\n result_index_names = groupby_keys + grouped_index_name\n\n result_index_data = []\n for key, values in self._groupby.grouper.indices.items():\n for value in values:\n if not is_list_like(key):\n data = [key, grouped_object_index[value]]\n else:\n data = [*key, grouped_object_index[value]]\n result_index_data.append(tuple(data))\n\n result_index = MultiIndex.from_tuples(\n result_index_data, names=result_index_names\n )\n result.index = result_index\n return result\n\n @property\n def _constructor(self):\n return Rolling\n\n def _create_blocks(self, obj: FrameOrSeriesUnion):\n \"\"\"\n Split data into blocks & return conformed data.\n \"\"\"\n # Ensure the object we're rolling over is monotonically sorted relative\n # to the groups\n groupby_order = np.concatenate(\n list(self._groupby.grouper.indices.values())\n ).astype(np.int64)\n obj = obj.take(groupby_order)\n return super()._create_blocks(obj)\n\n def _get_cython_func_type(self, func: str) -> Callable:\n \"\"\"\n Return the cython function type.\n\n RollingGroupby needs to always use \"variable\" algorithms since processing\n the data in group order may not be monotonic with the data which\n \"fixed\" algorithms assume\n \"\"\"\n return self._get_roll_func(f\"{func}_variable\")\n\n def _get_window_indexer(self, window: int) -> GroupbyRollingIndexer:\n \"\"\"\n Return an indexer class that will compute the window start and end bounds\n\n Parameters\n ----------\n window : int\n window size for FixedWindowIndexer\n\n Returns\n -------\n GroupbyRollingIndexer\n \"\"\"\n rolling_indexer: Type[BaseIndexer]\n indexer_kwargs: Optional[Dict] = None\n index_array = self.obj.index.asi8\n if isinstance(self.window, BaseIndexer):\n rolling_indexer = type(self.window)\n indexer_kwargs = self.window.__dict__\n assert isinstance(indexer_kwargs, dict) # for mypy\n # We'll be using the index of each group later\n indexer_kwargs.pop(\"index_array\", None)\n elif self.is_freq_type:\n rolling_indexer = VariableWindowIndexer\n else:\n rolling_indexer = FixedWindowIndexer\n index_array = None\n window_indexer = GroupbyRollingIndexer(\n index_array=index_array,\n window_size=window,\n groupby_indicies=self._groupby.indices,\n rolling_indexer=rolling_indexer,\n indexer_kwargs=indexer_kwargs,\n )\n return window_indexer\n\n def _gotitem(self, key, ndim, subset=None):\n # we are setting the index on the actual object\n # here so our index is carried thru to the selected obj\n # when we do the splitting for the groupby\n if self.on is not None:\n self.obj = self.obj.set_index(self._on)\n self.on = None\n return super()._gotitem(key, ndim, subset=subset)\n\n def _validate_monotonic(self):\n \"\"\"\n Validate that on is monotonic;\n we don't care for groupby.rolling\n because we have already validated at a higher\n level.\n \"\"\"\n pass\n" ]
[ [ "pandas.Series", "scipy.signal.get_window", "numpy.asarray", "pandas.core.window.indexers.FixedWindowIndexer", "numpy.concatenate", "pandas.core.window.indexers.GroupbyRollingIndexer", "pandas.compat.numpy.function.validate_rolling_func", "numpy.where", "pandas.util._decorators.Substitution", "pandas.core.dtypes.common.ensure_float64", "pandas._libs.tslibs.to_offset", "pandas.compat.numpy.function.validate_window_func", "pandas.core.common.asarray_tuplesafe", "pandas.compat._optional.import_optional_dependency", "pandas.core.util.numba_.maybe_use_numba", "numpy.apply_along_axis", "pandas.core.dtypes.common.is_float_dtype", "pandas.core.dtypes.common.is_integer_dtype", "pandas.core.dtypes.common.is_list_like", "pandas.util._decorators.Appender", "pandas.core.base.DataError", "pandas.concat", "pandas.core.window.indexers.BaseIndexer", "pandas.core.window.numba_.generate_numba_apply_func", "pandas.core.indexes.api.Index", "numpy.errstate", "numpy.array", "pandas.core.dtypes.common.needs_i8_conversion", "pandas.core.dtypes.common.is_bool", "pandas.core.window.indexers.VariableWindowIndexer", "numpy.isinf", "pandas.core.dtypes.common.is_scalar", "pandas.core.dtypes.common.is_integer", "pandas.util._decorators.doc", "pandas.core.construction.extract_array", "pandas.core.indexes.api.MultiIndex.from_tuples" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.0", "1.2" ], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
kowshikthopalli/MULDENS
[ "e2d5f8ec51024c5bdda6d1fcde4a96a3f31e6930" ]
[ "domainbed/scripts/dson.py" ]
[ "import argparse\nimport collections\nimport json\nimport os\nimport random\nimport sys\nimport time\nimport uuid\n#sys.path.append('/home/kowshik/infoDG')\nimport numpy as np\nimport PIL\nimport torch\nimport torchvision\nimport torch.utils.data\nimport torch.nn as nn\n\n\n\n\nclass OptimizedNorm2d(nn.Module):\n def __init__(self, num_features, eps=1e-5, momentum=0.9, using_moving_average=True,\n last_gamma=False, channelwise=False, modes=['in', 'bn']):\n super(OptimizedNorm2d, self).__init__()\n self.eps = eps\n self.momentum = momentum\n self.using_moving_average = using_moving_average\n self.last_gamma = last_gamma\n self.weight = nn.Parameter(torch.ones(1, num_features, 1, 1))\n self.bias = nn.Parameter(torch.zeros(1, num_features, 1, 1))\n self.channelwise = channelwise\n self.num_features = num_features\n self.modes = modes\n\n num_norms = len(modes)\n if channelwise:\n self.mean_weight = nn.Parameter(torch.ones(num_norms, num_features))\n self.var_weight = nn.Parameter(torch.ones(num_norms, num_features))\n else:\n self.mean_weight = nn.Parameter(torch.ones(num_norms))\n self.var_weight = nn.Parameter(torch.ones(num_norms))\n\n self.register_buffer('running_mean', torch.zeros(1, num_features, 1))\n self.register_buffer('running_var', torch.zeros(1, num_features, 1))\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.running_mean.zero_()\n self.running_var.zero_()\n if self.last_gamma:\n self.weight.data.fill_(0)\n else:\n self.weight.data.fill_(1)\n self.bias.data.zero_()\n\n def _check_input_dim(self, input):\n if input.dim() != 4:\n raise ValueError('expected 4D input (got {}D input)'\n .format(input.dim()))\n\n def get_norm_ratios(self, mean=False):\n softmax = nn.Softmax(0)\n mean_weight, var_weight = self.mean_weight, self.var_weight\n mean_weight, var_weight = softmax(mean_weight), softmax(var_weight)\n return mean_weight, var_weight\n\n def forward(self, input):\n self._check_input_dim(input)\n N, C, H, W = input.size()\n x = input.view(N, C, -1)\n \n mean_in = x.mean(-1, keepdim=True)\n var_in = x.var(-1, keepdim=True)\n temp = var_in + mean_in ** 2\n\n if self.training:\n mean_bn = mean_in.mean(0, keepdim=True)\n var_bn = temp.mean(0, keepdim=True) - mean_bn ** 2\n if self.using_moving_average:\n self.running_mean.mul_(self.momentum)\n self.running_mean.add_((1 - self.momentum) * mean_bn.data)\n self.running_var.mul_(self.momentum)\n self.running_var.add_((1 - self.momentum) * var_bn.data)\n else:\n self.running_mean.add_(mean_bn.data)\n self.running_var.add_(mean_bn.data ** 2 + var_bn.data)\n else:\n mean_bn = torch.autograd.Variable(self.running_mean)\n var_bn = torch.autograd.Variable(self.running_var)\n\n\n mean_weight, var_weight = self.get_norm_ratios()\n\n mean_norms = {'in': mean_in, 'bn': mean_bn}\n var_norms = {'in': var_in, 'bn': var_bn}\n\n mean = sum([mean_norms[mode]*mw for mode, mw in zip(self.modes, mean_weight)])\n var = sum([var_norms[mode]*mw for mode, mw in zip(self.modes, var_weight)])\n\n x = (x-mean) / (var+self.eps).sqrt()\n x = x.view(N, C, H, W)\n return x * self.weight + self.bias\n\n\n \nclass DomainSpecificOptimizedNorm2d(nn.Module):\n def __init__(self, num_features, num_domains, eps=1e-5, momentum=0.9, using_moving_average=True, using_bn=True,\n last_gamma=False, module='OptimizedNorm2d'):\n\n super(DomainSpecificOptimizedNorm2d, self).__init__()\n\n self.bns = nn.ModuleList(\n [globals()[module](num_features, eps, momentum) for _ in range(num_domains)]\n )\n\n def reset_running_stats(self):\n for bn in self.bns:\n bn.reset_running_stats()\n\n def reset_parameters(self):\n for bn in self.bns:\n bn.reset_parameters()\n\n def _check_input_dim(self, input):\n if input.dim() != 4:\n raise ValueError('expected 4D input (got {}D input)'\n .format(input.dim()))\n\n def forward(self, x, domain_label):\n self._check_input_dim(x)\n bn = self.bns[domain_label]\n return bn(x), domain_label\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\n dson = DomainSpecificOptimizedNorm2d(3,3,)\n x = torch.zeros(10,3,224, 224)\n dson(x,0).shape" ]
[ [ "torch.autograd.Variable", "torch.nn.Softmax", "torch.ones", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
takumak/tuna
[ "a50d1d34c9917d73f02257bcffcf7cc6bf582747" ]
[ "src/interpolation.py" ]
[ "import numpy as np\n\nfrom settingobj import SettingObj\nfrom settingitems import *\nfrom commonwidgets import *\n\n\n\n__all__ = ['InterpLinear', 'InterpBSpline',\n 'InterpCubicSpline', 'InterpBarycentric',\n 'InterpKrogh', 'InterpPchip', 'InterpAkima']\n\n\n\nclass InterpBase(SettingObj):\n def func(self, x, y):\n raise NotImplementedError()\n\n\n\nclass InterpLinear(InterpBase):\n name = 'linear'\n label = 'Linear'\n\n def func(self, x, y):\n from scipy.interpolate import interp1d\n return interp1d(x, y, 'linear')\n\n def descriptionWidget(self):\n w = DescriptionWidget()\n w.addTitle(self.label)\n url = 'https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html'\n w.addLabel('''\nThis runs <code>scipy.interpolate.interp1d(x, y, \"linear\")</code>.<br>\n<a href=\"{0}\">{0}</a>\n'''.format(url).strip(), richtext=True)\n return w\n\n\n\nclass InterpBSpline(InterpBase):\n name = 'b_spline'\n label = 'B-spline'\n\n def __init__(self):\n super().__init__()\n self.addSettingItem(SettingItemStr('w', 'Weight (function of x,y)', '1/(ymax*0.003*(1.00001-y/ymax))'))\n\n def func(self, x, y):\n from sympy import sympify, lambdify\n from scipy.interpolate import splrep, splev\n w = lambdify(['x', 'y', 'ymax'], sympify(self.w.strValue()), 'numpy')(x, y, np.full(len(x), max(y)))\n try:\n iter(w)\n except:\n w = np.full(x.shape, w)\n\n c = len(x)//10\n xl = np.linspace(x[0]+(x[0]-x[1])*c, x[0], c, endpoint=False)\n xr = np.flip(np.linspace(x[-1]+(x[-1]-x[-2])*c, x[-1], c, endpoint=False), 0)\n x2 = np.concatenate((xl, x, xr))\n y2 = np.concatenate((np.full(c, y[0]), y, np.full(c, y[-1])))\n w2 = np.concatenate((np.full(c, w[0]), w, np.full(c, w[-1])))\n spl = splrep(x2, y2, w=w2)\n return lambda x: splev(x, spl)\n\n\n\nclass InterpScipy(InterpBase):\n def func(self, x, y):\n import scipy.interpolate as interp\n return getattr(interp, self.clsname)(x, y)\n\n def descriptionWidget(self):\n modname = 'scipy.interpolate.%s' % self.clsname\n w = DescriptionWidget()\n w.addTitle(self.label)\n url = 'https://docs.scipy.org/doc/scipy/reference/generated/%s.html' % modname\n w.addLabel('''\nThis uses <code>{0}</code>.<br>\n<a href=\"{1}\">{1}</a>\n'''.format(modname, url).strip(), richtext=True)\n return w\n\nclass InterpCubicSpline(InterpScipy):\n name = 'cubic_spline'\n label = 'Cubic spline'\n clsname = 'CubicSpline'\n\nclass InterpBarycentric(InterpScipy):\n name = 'barycentric'\n label = 'Barycentric'\n clsname = 'BarycentricInterpolator'\n\nclass InterpKrogh(InterpScipy):\n name = 'krogh'\n label = 'Krogh'\n clsname = 'KroghInterpolator'\n\nclass InterpPchip(InterpScipy):\n name = 'pchip'\n label = 'Pchip'\n clsname = 'PchipInterpolator'\n\nclass InterpAkima(InterpScipy):\n name = 'akima'\n label = 'Akima'\n clsname = 'Akima1DInterpolator'\n" ]
[ [ "scipy.interpolate.splrep", "numpy.linspace", "scipy.interpolate.splev", "numpy.concatenate", "numpy.full", "scipy.interpolate.interp1d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
eleflea/tvm
[ "d199243d8907b2d8062dd9c20b69dcb9765a970f", "d199243d8907b2d8062dd9c20b69dcb9765a970f", "d199243d8907b2d8062dd9c20b69dcb9765a970f", "d199243d8907b2d8062dd9c20b69dcb9765a970f" ]
[ "tutorials/frontend/from_tflite.py", "tutorials/language/extern_op.py", "tests/python/unittest/test_auto_scheduler_layout_rewrite.py", "tests/python/unittest/test_link_params.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"\nCompile TFLite Models\n=====================\n**Author**: `Zhao Wu <https://github.com/FrozenGene>`_\n\nThis article is an introductory tutorial to deploy TFLite models with Relay.\n\nTo get started, TFLite package needs to be installed as prerequisite.\n\n.. code-block:: bash\n\n # install tflite\n pip install tflite==2.1.0 --user\n\n\nor you could generate TFLite package yourself. The steps are the following:\n\n.. code-block:: bash\n\n # Get the flatc compiler.\n # Please refer to https://github.com/google/flatbuffers for details\n # and make sure it is properly installed.\n flatc --version\n\n # Get the TFLite schema.\n wget https://raw.githubusercontent.com/tensorflow/tensorflow/r1.13/tensorflow/lite/schema/schema.fbs\n\n # Generate TFLite package.\n flatc --python schema.fbs\n\n # Add current folder (which contains generated tflite module) to PYTHONPATH.\n export PYTHONPATH=${PYTHONPATH:+$PYTHONPATH:}$(pwd)\n\n\nNow please check if TFLite package is installed successfully, ``python -c \"import tflite\"``\n\nBelow you can find an example on how to compile TFLite model using TVM.\n\"\"\"\n######################################################################\n# Utils for downloading and extracting zip files\n# ----------------------------------------------\nimport os\n\n\ndef extract(path):\n import tarfile\n\n if path.endswith(\"tgz\") or path.endswith(\"gz\"):\n dir_path = os.path.dirname(path)\n tar = tarfile.open(path)\n tar.extractall(path=dir_path)\n tar.close()\n else:\n raise RuntimeError(\"Could not decompress the file: \" + path)\n\n\n######################################################################\n# Load pretrained TFLite model\n# ----------------------------\n# Load mobilenet V1 TFLite model provided by Google\nfrom tvm.contrib.download import download_testdata\n\nmodel_url = \"http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz\"\n\n# Download model tar file and extract it to get mobilenet_v1_1.0_224.tflite\nmodel_path = download_testdata(model_url, \"mobilenet_v1_1.0_224.tgz\", module=[\"tf\", \"official\"])\nmodel_dir = os.path.dirname(model_path)\nextract(model_path)\n\n# Now we can open mobilenet_v1_1.0_224.tflite\ntflite_model_file = os.path.join(model_dir, \"mobilenet_v1_1.0_224.tflite\")\ntflite_model_buf = open(tflite_model_file, \"rb\").read()\n\n# Get TFLite model from buffer\ntry:\n import tflite\n\n tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)\nexcept AttributeError:\n import tflite.Model\n\n tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)\n\n######################################################################\n# Load a test image\n# -----------------\n# A single cat dominates the examples!\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nimage_url = \"https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true\"\nimage_path = download_testdata(image_url, \"cat.png\", module=\"data\")\nresized_image = Image.open(image_path).resize((224, 224))\nplt.imshow(resized_image)\nplt.show()\nimage_data = np.asarray(resized_image).astype(\"float32\")\n\n# Add a dimension to the image so that we have NHWC format layout\nimage_data = np.expand_dims(image_data, axis=0)\n\n# Preprocess image as described here:\n# https://github.com/tensorflow/models/blob/edb6ed22a801665946c63d650ab9a0b23d98e1b1/research/slim/preprocessing/inception_preprocessing.py#L243\nimage_data[:, :, :, 0] = 2.0 / 255.0 * image_data[:, :, :, 0] - 1\nimage_data[:, :, :, 1] = 2.0 / 255.0 * image_data[:, :, :, 1] - 1\nimage_data[:, :, :, 2] = 2.0 / 255.0 * image_data[:, :, :, 2] - 1\nprint(\"input\", image_data.shape)\n\n######################################################################\n# Compile the model with relay\n# ----------------------------\n\n# TFLite input tensor name, shape and type\ninput_tensor = \"input\"\ninput_shape = (1, 224, 224, 3)\ninput_dtype = \"float32\"\n\n# Parse TFLite model and convert it to a Relay module\nfrom tvm import relay, transform\n\nmod, params = relay.frontend.from_tflite(\n tflite_model, shape_dict={input_tensor: input_shape}, dtype_dict={input_tensor: input_dtype}\n)\n\n# Build the module against to x86 CPU\ntarget = \"llvm\"\nwith transform.PassContext(opt_level=3):\n lib = relay.build(mod, target, params=params)\n\n######################################################################\n# Execute on TVM\n# --------------\nimport tvm\nfrom tvm import te\nfrom tvm.contrib import graph_runtime as runtime\n\n# Create a runtime executor module\nmodule = runtime.GraphModule(lib[\"default\"](tvm.cpu()))\n\n# Feed input data\nmodule.set_input(input_tensor, tvm.nd.array(image_data))\n\n# Run\nmodule.run()\n\n# Get output\ntvm_output = module.get_output(0).asnumpy()\n\n######################################################################\n# Display results\n# ---------------\n\n# Load label file\nlabel_file_url = \"\".join(\n [\n \"https://raw.githubusercontent.com/\",\n \"tensorflow/tensorflow/master/tensorflow/lite/java/demo/\",\n \"app/src/main/assets/\",\n \"labels_mobilenet_quant_v1_224.txt\",\n ]\n)\nlabel_file = \"labels_mobilenet_quant_v1_224.txt\"\nlabel_path = download_testdata(label_file_url, label_file, module=\"data\")\n\n# List of 1001 classes\nwith open(label_path) as f:\n labels = f.readlines()\n\n# Convert result to 1D data\npredictions = np.squeeze(tvm_output)\n\n# Get top 1 prediction\nprediction = np.argmax(predictions)\n\n# Convert id to class name and show the result\nprint(\"The image prediction result is: id \" + str(prediction) + \" name: \" + labels[prediction])\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"\nExternal Tensor Functions\n=========================\n**Author**: `Tianqi Chen <https://tqchen.github.io>`_\n\nWhile TVM supports transparent code generation, sometimes\nit is also helpful to incorporate manual written code into\nthe pipeline. For example, we might want to use cuDNN for\nsome of the convolution kernels and define the rest of the stages.\n\nTVM supports these black box function calls natively.\nSpecfically, TVM support all the tensor functions that are DLPack compatible.\nWhich means we can call any function with POD types(pointer, int, float)\nor pointer to DLTensor as argument.\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport tvm\nfrom tvm import te\nimport numpy as np\nfrom tvm.contrib import cblas\nimport tvm.testing\n\nif not tvm.get_global_func(\"tvm.contrib.cblas.matmul\", allow_missing=True):\n raise Exception(\"Not compiled with cblas support; can't build this tutorial\")\n\n######################################################################\n# Use Extern Tensor Function\n# --------------------------\n# In the example below, we use :any:`te.extern` to add an extern\n# array function call. In the extern call, we declare the shape\n# of output tensors. In the second argument we provide the list of inputs.\n#\n# User will need to provide a function describing how to compute the result.\n# The compute function takes list of symbolic placeholder for the inputs,\n# list of symbolic placeholder for the outputs and returns the executing statement.\n#\n# In this case we simply call a registered TVM function, which invokes a CBLAS call.\n# TVM does not control internal of the extern array function and treats it as blackbox.\n# We can further mix schedulable TVM calls that add a bias term to the result.\n#\nn = 1024\nl = 128\nm = 235\nbias = te.var(\"bias\", dtype=\"float32\")\nA = te.placeholder((n, l), name=\"A\")\nB = te.placeholder((l, m), name=\"B\")\nC = te.extern(\n (n, m),\n [A, B],\n lambda ins, outs: tvm.tir.call_packed(\n \"tvm.contrib.cblas.matmul\", ins[0], ins[1], outs[0], False, False\n ),\n name=\"C\",\n)\nD = te.compute(C.shape, lambda i, j: C[i, j] + bias, name=\"D\")\ns = te.create_schedule(D.op)\n\n######################################################################\n# Verify the Result\n# -----------------\n# We can verify that the result matches what we expected.\n#\nctx = tvm.cpu(0)\nf = tvm.build(s, [A, B, D, bias], \"llvm\")\na = tvm.nd.array(np.random.uniform(size=(n, l)).astype(A.dtype), ctx)\nb = tvm.nd.array(np.random.uniform(size=(l, m)).astype(B.dtype), ctx)\nd = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), ctx)\nbb = 10.0\nf(a, b, d, bb)\ntvm.testing.assert_allclose(d.asnumpy(), np.dot(a.asnumpy(), b.asnumpy()) + 10, rtol=1e-5)\n\n######################################################################\n# Extern Contrib Wrappers\n# -----------------------\n# TVM also provide extern contrib wrappers to useful extern calls,\n# the following line is equivalent to the previous example.\n#\nfrom tvm.contrib import cblas\n\nC = cblas.matmul(A, B)\nD = te.compute(C.shape, lambda i, j: C[i, j] + bias, name=\"D\")\ns = te.create_schedule(D.op)\n\n######################################################################\n# Hook Python Function as Extern\n# ------------------------------\n# Since we can call into any PackedFunc in TVM. We can use the extern\n# function to callback into python.\n#\n# The following example registers a python function into TVM runtime system\n# and use it to complete one stage of the computation.\n# This makes TVM much more flexible. For example, we can insert front-end\n# callbacks to inspect the intermediate results or mix customized code\n# with TVM.\n#\[email protected]_func(\"tvm.contrib.my_tvm_addone\")\ndef my_tvm_addone(x, y):\n print(\"my_tvm_addone signatures: %s, %s\" % (type(x), type(y)))\n tvm.nd.array(x.asnumpy() + 1).copyto(y)\n\n\nA = te.placeholder((n,), name=\"A\")\nB = te.extern(\n A.shape,\n [A],\n lambda ins, outs: tvm.tir.call_packed(\"tvm.contrib.my_tvm_addone\", ins[0], outs[0]),\n name=\"C\",\n)\ns = te.create_schedule(B.op)\nf = tvm.build(s, [A, B], \"llvm\")\na = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), ctx)\nb = tvm.nd.array(np.random.uniform(size=(n,)).astype(B.dtype), ctx)\nf(a, b)\ntvm.testing.assert_allclose(b.asnumpy(), a.asnumpy() + 1, rtol=1e-5)\n\n######################################################################\n# Summary\n# -------\n# - TVM calls extern tensor function via :any:`te.extern`\n# - Use contrib wrappers for short sugars of extern tensor calls.\n# - We can hook front-end function as extern tensor callbacks.\n#\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Test AutoScheduler Layout Rewrite\"\"\"\nimport tempfile\nimport numpy as np\n\nimport pytest\n\nimport tvm\nimport tvm.testing\nfrom tvm import topi\nfrom tvm import auto_scheduler, te\n\nfrom test_auto_scheduler_common import get_tiled_matmul, matmul_auto_scheduler_test\n\n\ndef test_apply_steps_with_layout_rewrite():\n dag, s = get_tiled_matmul()\n _, bufs = dag.apply_steps_from_state(s)\n assert bufs[1].shape[0] == 512\n assert bufs[1].shape[1] == 512\n _, bufs = dag.apply_steps_from_state(\n s, layout_rewrite=auto_scheduler.LayoutRewriteOption.REWRITE_FOR_PRE_TRANSFORMED\n )\n assert bufs[1].shape[0] == 4\n assert bufs[1].shape[1] == 8\n assert bufs[1].shape[2] == 4\n assert bufs[1].shape[3] == 4\n assert bufs[1].shape[4] == 512\n _, bufs = dag.apply_steps_from_state(\n s, layout_rewrite=auto_scheduler.LayoutRewriteOption.INSERT_TRANSFORM_STAGE\n )\n assert bufs[1].shape[0] == 512\n assert bufs[1].shape[1] == 512\n\n\ndef test_apply_steps_with_layout_rewrite_corner_case():\n A, B, C = matmul_auto_scheduler_test(1, 1, 1)\n dag = auto_scheduler.ComputeDAG([A, B, C])\n\n s = dag.get_init_state()\n\n s.compute_root(C)\n i_j_fused = s.fuse(C, [s[C].iters[0], s[C].iters[1]])\n s.parallel(C, i_j_fused)\n\n _, bufs = dag.apply_steps_from_state(\n s, layout_rewrite=auto_scheduler.LayoutRewriteOption.REWRITE_FOR_PRE_TRANSFORMED\n )\n\n\[email protected]_llvm\ndef test_correctness_layout_rewrite_rewrite_for_preTransformed():\n N = 16\n target = tvm.target.Target(\"llvm\")\n task = auto_scheduler.SearchTask(func=matmul_auto_scheduler_test, args=(N, N, N), target=target)\n dag = task.compute_dag\n\n with tempfile.NamedTemporaryFile() as fp:\n log_file = fp.name\n\n search_policy = auto_scheduler.SketchPolicy(task)\n\n measure_ctx = auto_scheduler.LocalRPCMeasureContext()\n tuning_options = auto_scheduler.TuningOptions(\n num_measure_trials=100,\n runner=measure_ctx.runner,\n verbose=2,\n early_stopping=1,\n measure_callbacks=[auto_scheduler.RecordToFile(log_file)],\n )\n task.tune(tuning_options, search_policy=search_policy)\n inp, _ = auto_scheduler.load_best_record(log_file, task.workload_key, target)\n s, bufs = dag.apply_steps_from_state(\n inp.state, layout_rewrite=auto_scheduler.LayoutRewriteOption.REWRITE_FOR_PRE_TRANSFORMED\n )\n s_ref, bufs_ref = dag.apply_steps_from_state(inp.state)\n np_args = [np.random.randn(*topi.get_const_tuple(x.shape)).astype(x.dtype) for x in bufs]\n np_args_ref = [np.array(x) for x in np_args]\n\n weight = np_args_ref[1]\n # infer shape for the rewritten layout\n if len(weight.shape) >= 6:\n # For cpu tile structure SSRSRS\n base = len(weight.shape) - 6\n red_dim = weight.shape[2 + base] * weight.shape[4 + base]\n out_dim = weight.shape[3 + base] * weight.shape[5 + base]\n for i in range(base + 2):\n out_dim *= weight.shape[i]\n new_order = (\n [\n 2 + base,\n 4 + base,\n ]\n + list(range(base + 2))\n + [\n 3 + base,\n 5 + base,\n ]\n )\n np_args_ref[1] = np_args_ref[1].transpose(new_order)\n np_args_ref[1] = np_args_ref[1].reshape((red_dim, out_dim))\n\n func = tvm.build(s, bufs, target=target)\n func_ref = tvm.build(s_ref, bufs_ref, target=target)\n\n ctx = tvm.context(str(target))\n ctx_ref = tvm.cpu()\n\n args = [tvm.nd.array(x, ctx=ctx) for x in np_args]\n args_ref = [tvm.nd.array(x, ctx=ctx_ref) for x in np_args_ref]\n ctx.sync()\n\n func(*args)\n func_ref(*args_ref)\n ctx.sync()\n\n tvm.testing.assert_allclose(args[0].asnumpy(), args_ref[0].asnumpy(), atol=1e-3, rtol=1e-3)\n tvm.testing.assert_allclose(args[2].asnumpy(), args_ref[2].asnumpy(), atol=1e-3, rtol=1e-3)\n del measure_ctx\n\n\[email protected]_llvm\ndef test_correctness_layout_rewrite_insert_transform_stage():\n N = 128\n target = tvm.target.Target(\"llvm\")\n task = auto_scheduler.SearchTask(func=matmul_auto_scheduler_test, args=(N, N, N), target=target)\n dag = task.compute_dag\n\n with tempfile.NamedTemporaryFile() as fp:\n log_file = fp.name\n\n search_policy = auto_scheduler.SketchPolicy(task)\n\n measure_ctx = auto_scheduler.LocalRPCMeasureContext()\n tuning_options = auto_scheduler.TuningOptions(\n num_measure_trials=2,\n runner=measure_ctx.runner,\n verbose=1,\n measure_callbacks=[auto_scheduler.RecordToFile(log_file)],\n )\n task.tune(tuning_options, search_policy=search_policy)\n inp, _ = auto_scheduler.load_best_record(log_file, task.workload_key, target)\n s, bufs = dag.apply_steps_from_state(\n inp.state, layout_rewrite=auto_scheduler.LayoutRewriteOption.INSERT_TRANSFORM_STAGE\n )\n\n s_ref, bufs_ref = dag.apply_steps_from_state(inp.state)\n np_args = [np.random.randn(*topi.get_const_tuple(x.shape)).astype(x.dtype) for x in bufs]\n\n func = tvm.build(s, bufs, target=target)\n func_ref = tvm.build(s_ref, bufs_ref, target=target)\n\n ctx = tvm.context(str(target))\n ctx_ref = tvm.cpu()\n\n args = [tvm.nd.array(x, ctx=ctx) for x in np_args]\n args_ref = [tvm.nd.array(x, ctx=ctx_ref) for x in np_args]\n ctx.sync()\n\n func(*args)\n func_ref(*args_ref)\n ctx.sync()\n\n tvm.testing.assert_allclose(args[0].asnumpy(), args_ref[0].asnumpy(), atol=1e-3, rtol=1e-3)\n tvm.testing.assert_allclose(args[1].asnumpy(), args_ref[1].asnumpy(), atol=1e-3, rtol=1e-3)\n tvm.testing.assert_allclose(args[2].asnumpy(), args_ref[2].asnumpy(), atol=1e-3, rtol=1e-3)\n del measure_ctx\n\n\nif __name__ == \"__main__\":\n test_apply_steps_with_layout_rewrite()\n test_apply_steps_with_layout_rewrite_corner_case()\n test_correctness_layout_rewrite_rewrite_for_preTransformed()\n test_correctness_layout_rewrite_insert_transform_stage()\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport collections\nimport ctypes\nimport json\nimport os\nimport re\nimport struct\nimport sys\nimport tempfile\n\nimport numpy as np\nimport pytest\n\nimport tvm\nimport tvm.relay\nimport tvm.testing\nfrom tvm.contrib import utils\n\n\nINPUT_SHAPE = (1, 3, 16, 16)\n\n\nKERNEL_SHAPE = (3, 3, 3, 3)\n\n\n# The data types that are linkable.\nLINKABLE_DTYPES = (\n [f\"uint{b}\" for b in (8, 16, 32, 64)]\n + [f\"int{b}\" for b in (8, 16, 32, 64)]\n + [\"float32\", \"float64\"]\n)\n\n\ndef dtype_info(dtype):\n \"\"\"Lookup numpy type info for the given string dtype (of LINKABLE_DTYPES above).\"\"\"\n if \"int\" in dtype:\n return np.iinfo(getattr(np, dtype))\n else:\n return np.finfo(getattr(np, dtype))\n\n\n# Note: for debugging, set this to an integer (i.e. 1.0). Then all \"random\" tensors will become\n# predictable\nRANDOM_TENSOR_START = None\n\n\ndef _make_random_tensor(dtype, shape):\n \"\"\"Create a random test tensor with given shape and dtype.\"\"\"\n global RAND_SEED\n if RANDOM_TENSOR_START is not None:\n to_return = np.arange(\n RANDOM_TENSOR_START, RANDOM_TENSOR_START + np.prod(shape), dtype=dtype\n ).reshape(shape)\n RAND_SEED += np.prod(shape)\n return to_return\n\n dinfo = dtype_info(dtype)\n if \"int\" in dtype:\n return np.random.randint(dinfo.min, dinfo.max, shape, dtype=dtype)\n else:\n to_return = np.random.uniform(0, dinfo.max, shape).astype(dtype)\n np.reshape(to_return, np.prod(shape))[::2] *= -1\n return to_return\n\n\ndef _lookup_sid(graph, name):\n \"\"\"Lookup the storage id of a named parameter.\n\n Arguments\n ---------\n graph : dict\n Parsed JSON graph.\n\n name : str\n Name of the tensor parameter to lookup.\n\n Returns\n -------\n int :\n The storage_id of the parameter.\n \"\"\"\n num_outputs_seen = 0\n for i, n in enumerate(graph[\"nodes\"]):\n if n[\"name\"] == name:\n print(\"sid\", name, graph[\"attrs\"][\"storage_id\"][1], num_outputs_seen)\n return graph[\"attrs\"][\"storage_id\"][1][num_outputs_seen]\n else:\n if \"attrs\" in n and \"num_outputs\" in n[\"attrs\"]:\n num_outputs_seen += int(n[\"attrs\"][\"num_outputs\"])\n else:\n num_outputs_seen += 1\n\n raise KeyError(f\"no such param: {name}\")\n\n\ndef _get_ctypes_dtype(dt):\n \"\"\"Return a ctypes c_* datatype given a string data type.\"\"\"\n if \"int\" in dt:\n return getattr(ctypes, f\"c_{dt}\")\n elif dt == \"float32\":\n return ctypes.c_float\n elif dt == \"float64\":\n return ctypes.c_double\n else:\n assert False, f\"unknown dtype: {dt}\"\n\n\ndef _verify_linked_param(dtype, lib, mod, graph, name):\n \"\"\"Directly read memory from the linked library to verify the linked parameter is correct.\"\"\"\n sid = _lookup_sid(graph, name)\n # NOTE: query_imports=True because when loading a module from disk (i.e. for C backend),\n # a GraphRuntimeFactory module is created instead of the module itself.\n param_ptr = mod.get_function(\"_lookup_linked_param\", True)(sid)\n gen_param = lib.params[name]\n arr_data = (_get_ctypes_dtype(dtype) * np.prod(gen_param.shape)).from_address(param_ptr.value)\n arr = np.ndarray(shape=gen_param.shape, dtype=gen_param.dtype, buffer=arr_data, order=\"C\")\n if \"int\" in gen_param.dtype:\n np.testing.assert_equal(gen_param.asnumpy(), arr)\n else:\n np.testing.assert_allclose(gen_param.asnumpy(), arr)\n return dtype == gen_param.dtype\n\n\ndef _make_mod_and_params(dtype):\n \"\"\"Create a Relay module and parameters to test the given datatype.\"\"\"\n param_decls = collections.OrderedDict()\n param_init = {}\n\n def _add_decl(name, dtype):\n param_decls[name] = f\"%{name} : Tensor[{KERNEL_SHAPE}, {dtype}]\"\n param_init[name] = _make_random_tensor(dtype, KERNEL_SHAPE)\n\n # Add several parameters so that the number of parameters\n _add_decl(f\"{dtype}_a\", dtype)\n _add_decl(f\"{dtype}_b\", dtype)\n\n mod_lines = [\n '#[version = \"0.0.5\"]',\n f\"def @main(%rand_input : Tensor[{INPUT_SHAPE}, {dtype}], { ', '.join(param_decls.values()) } ) {{\",\n # This program ensures that GraphPlanMemory alternates between the same two storage IDs for a\n # while. In doing this, it ensures that param %{dtype}_b will be placed into the graph at an\n # index unequal to its storage_id. This ensures that GraphRuntimeCodegen encodes the storage_id\n # and not the parameter index into the graph.\n (\n f' %0 = nn.conv2d(%rand_input, %{dtype}_a, data_layout=\"NCHW\", kernel_layout=\"OIHW\", '\n f'kernel_size=[3, 3], out_dtype=\"{dtype}\");'\n ),\n (\n f' %1 = nn.conv2d(%0, %{dtype}_a, data_layout=\"NCHW\", kernel_layout=\"OIHW\", '\n f'kernel_size=[3, 3], out_dtype=\"{dtype}\");'\n ),\n (\n f' %2 = nn.conv2d(%1, %{dtype}_a, data_layout=\"NCHW\", kernel_layout=\"OIHW\", '\n f'kernel_size=[3, 3], out_dtype=\"{dtype}\");'\n ),\n (\n f' %3 = nn.conv2d(%2, %{dtype}_b, data_layout=\"NCHW\", kernel_layout=\"OIHW\", '\n f'kernel_size=[3, 3], out_dtype=\"{dtype}\");'\n ),\n \" %3\",\n \"}\",\n ]\n\n mod = tvm.parser.fromtext(\"\\n\".join(mod_lines))\n return mod, param_init\n\n\[email protected]_llvm\ndef test_llvm_link_params():\n for dtype in LINKABLE_DTYPES:\n ir_mod, param_init = _make_mod_and_params(dtype)\n rand_input = _make_random_tensor(dtype, INPUT_SHAPE)\n main_func = ir_mod[\"main\"]\n target = \"llvm --runtime=c --system-lib --link-params\"\n with tvm.transform.PassContext(opt_level=3):\n lib = tvm.relay.build(ir_mod, target, params=param_init)\n\n # NOTE: Need to export_library() and load_library() to link all the Module(llvm, ...)\n # against one another.\n temp_dir = tempfile.mkdtemp()\n export_file = os.path.join(temp_dir, \"lib.so\")\n lib.lib.export_library(export_file)\n mod = tvm.runtime.load_module(export_file)\n assert set(lib.params.keys()) == {\"p0\", \"p1\"} # NOTE: op folded\n assert mod.get_function(\"TVMSystemLibEntryPoint\") != None\n\n graph = json.loads(lib.graph_json)\n for p in lib.params:\n _verify_linked_param(dtype, lib, mod, graph, p) or found_one\n\n # Wrap in function to explicitly deallocate the runtime.\n def _run_linked(lib, mod):\n graph_json, _, _ = lib\n graph_rt = tvm.contrib.graph_runtime.create(graph_json, mod, tvm.cpu(0))\n graph_rt.set_input(\"rand_input\", rand_input) # NOTE: params not required.\n graph_rt.run()\n return graph_rt.get_output(0)\n\n linked_output = _run_linked(lib, mod)\n\n with tvm.transform.PassContext(opt_level=3):\n lib = tvm.relay.build(ir_mod, \"llvm --system-lib\", params=param_init)\n\n def _run_unlinked(lib):\n graph_json, mod, lowered_params = lib\n graph_rt = tvm.contrib.graph_runtime.create(graph_json, mod, tvm.cpu(0))\n graph_rt.set_input(\"rand_input\", rand_input, **lowered_params)\n graph_rt.run()\n return graph_rt.get_output(0)\n\n unlinked_output = _run_unlinked(lib)\n\n if \"int\" in dtype:\n np.testing.assert_equal(unlinked_output.asnumpy(), linked_output.asnumpy())\n else:\n np.testing.assert_allclose(unlinked_output.asnumpy(), linked_output.asnumpy())\n\n\ndef _get_c_datatype(dtype):\n \"\"\"Translate LINKABLE_DTYPES element to c datatype.\"\"\"\n if \"int\" in dtype:\n return f\"{dtype}_t\"\n elif dtype == \"float32\":\n return \"float\"\n elif dtype == \"float64\":\n return \"double\"\n else:\n assert False, f\"unknown dtype {dtype}\"\n\n\ndef _format_c_value(dtype, width, x):\n if \"int\" in dtype:\n hex_formatstr = f'{{:{\"+\" if dtype.startswith(\"int\") else \"\"}#0{width}x}}'\n return hex_formatstr.format(x)\n elif \"float\" in dtype:\n to_ret = float(x).hex()\n if \"inf\" in to_ret:\n return (\"-\" if x < 0 else \"\") + \"INFINITY\"\n elif \"nan\" in to_ret:\n return \"NAN\"\n\n before, after = to_ret.split(\"p\")\n return f'{before.rstrip(\"0\")}p{after}'\n else:\n assert False, f\"don't know dtype {dtype}\"\n\n\nHEX_NUM_RE = re.compile(r\"[+\\-]?(?:(?:0x[0-9A-Fa-f.p+-]+)|(?:INFINITY)|(?:NAN))\")\n\n\ndef test_c_link_params():\n temp_dir = utils.tempdir()\n for dtype in LINKABLE_DTYPES:\n mod, param_init = _make_mod_and_params(dtype)\n rand_input = _make_random_tensor(dtype, INPUT_SHAPE)\n main_func = mod[\"main\"]\n target = \"c --link-params\"\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n lib = tvm.relay.build(mod, target, params=param_init)\n assert set(lib.params.keys()) == {\"p0\", \"p1\"} # NOTE: op folded\n\n src = lib.lib.get_source()\n lib.lib.save(\"test.c\", \"c\")\n c_dtype = _get_c_datatype(dtype)\n src_lines = src.split(\"\\n\")\n param = lib.params[\"p0\"].asnumpy().reshape(np.prod(KERNEL_SHAPE))\n param_def = f\"static const {c_dtype} __tvm_param__p0[{np.prod(param.shape)}] = {{\"\n for i, line in enumerate(src_lines):\n if line == param_def:\n i += 1\n break\n else:\n assert False, f'did not find parameter definition \"{param_def}\":\\n{src}'\n\n cursor = 0\n width = dtype_info(dtype).bits // 4 + 2\n if dtype.startswith(\"int\"):\n width += 1 # Account for sign\n\n while \"};\" not in src_lines[i]:\n for match in HEX_NUM_RE.finditer(src_lines[i]):\n assert match.group() == _format_c_value(dtype, width, param[cursor]), (\n f'p0 byte {cursor}: want \"{_format_c_value(dtype, width, param[cursor])}\" got '\n f'\"{match.group(0)}\"; full p0 follows:\\n{src}'\n )\n cursor += 1\n i += 1\n\n assert cursor == np.prod(param.shape)\n temp = utils.tempdir()\n\n # Need a unique name per library to avoid dlopen caching the lib load.\n lib_path = temp_dir.relpath(f\"test-{dtype}-linked.so\")\n lib[\"remove_params\"]().export_library(lib_path)\n lib_mod = tvm.runtime.load_module(lib_path)\n\n # lib_mod = lib_factory['default']()\n graph = json.loads(lib.graph_json)\n for p in lib.params:\n _verify_linked_param(dtype, lib, lib_mod, graph, p)\n\n # Wrap in function to explicitly deallocate the runtime.\n def _run_linked(lib_mod):\n graph_rt = tvm.contrib.graph_runtime.GraphModule(lib_mod[\"default\"](tvm.cpu(0)))\n graph_rt.set_input(\"rand_input\", rand_input) # NOTE: params not required.\n graph_rt.run()\n\n return graph_rt.get_output(0)\n\n linked_output = _run_linked(lib_mod)\n\n linked_params = lib.params\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n lib = tvm.relay.build(mod, \"c\", params=param_init)\n _, _, params = lib\n # Need a unique name per library to avoid dlopen caching the lib load.\n lib_path = temp_dir.relpath(f\"test-{dtype}-unlinked.so\")\n lib.export_library(lib_path)\n lib_mod = tvm.runtime.load_module(lib_path)\n\n def _run_unlinked(lib_mod):\n graph_rt = tvm.contrib.graph_runtime.GraphModule(lib_mod[\"default\"](tvm.cpu(0)))\n graph_rt.set_input(\"rand_input\", rand_input, **params)\n graph_rt.run()\n return graph_rt.get_output(0)\n\n unlinked_output = _run_unlinked(lib_mod)\n\n if \"int\" in dtype:\n np.testing.assert_equal(unlinked_output.asnumpy(), linked_output.asnumpy())\n else:\n np.testing.assert_allclose(unlinked_output.asnumpy(), linked_output.asnumpy())\n\n\[email protected]_micro\ndef test_crt_link_params():\n import tvm.micro\n\n for dtype in LINKABLE_DTYPES:\n mod, param_init = _make_mod_and_params(dtype)\n rand_input = _make_random_tensor(dtype, INPUT_SHAPE)\n main_func = mod[\"main\"]\n target = \"c --system-lib --runtime=c --link-params\"\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n graph_json, lib, params = tvm.relay.build(mod, target, params=param_init)\n assert set(params.keys()) == {\"p0\", \"p1\"} # NOTE: op folded\n\n workspace = tvm.micro.Workspace()\n compiler = tvm.micro.DefaultCompiler(target=target)\n opts = tvm.micro.default_options(\n os.path.join(tvm.micro.get_standalone_crt_dir(), \"template\", \"host\")\n )\n opts[\"bin_opts\"][\"ldflags\"].append(\"-DTVM_HOST_USE_GRAPH_RUNTIME_MODULE\")\n\n micro_binary = tvm.micro.build_static_runtime(\n workspace,\n compiler,\n lib,\n compiler_options=opts,\n extra_libs=[\n tvm.micro.get_standalone_crt_lib(m)\n for m in (\"memory\", \"graph_runtime_module\", \"graph_runtime\")\n ],\n )\n\n flasher_kw = {\n \"debug\": False,\n }\n flasher = compiler.flasher(**flasher_kw)\n with tvm.micro.Session(binary=micro_binary, flasher=flasher) as sess:\n graph_rt = tvm.micro.session.create_local_graph_runtime(\n graph_json, sess.get_system_lib(), sess.context\n )\n\n # NOTE: not setting params here.\n graph_rt.set_input(\"rand_input\", rand_input)\n graph_rt.run()\n linked_output = graph_rt.get_output(0).asnumpy()\n\n with tvm.transform.PassContext(opt_level=3):\n lib = tvm.relay.build(mod, \"llvm --system-lib\", params=param_init)\n\n def _run_unlinked(lib):\n graph_json, mod, lowered_params = lib\n graph_rt = tvm.contrib.graph_runtime.create(graph_json, mod, tvm.cpu(0))\n graph_rt.set_input(\"rand_input\", rand_input, **lowered_params)\n graph_rt.run()\n return graph_rt.get_output(0).asnumpy()\n\n unlinked_output = _run_unlinked(lib)\n\n if \"int\" in dtype:\n np.testing.assert_equal(unlinked_output, linked_output)\n else:\n np.testing.assert_allclose(unlinked_output, linked_output)\n\n\nif __name__ == \"__main__\":\n sys.exit(pytest.main([__file__] + sys.argv[1:]))\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.expand_dims", "numpy.asarray", "numpy.squeeze", "numpy.argmax", "matplotlib.pyplot.show" ], [ "numpy.random.uniform", "numpy.zeros" ], [ "numpy.array" ], [ "numpy.testing.assert_equal", "numpy.ndarray", "numpy.prod", "numpy.testing.assert_allclose", "numpy.random.uniform", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Semih-Kurt/scvi-tools
[ "1bea2af8cc99e11d55a6925f09d978de5f6994fb", "1bea2af8cc99e11d55a6925f09d978de5f6994fb", "1bea2af8cc99e11d55a6925f09d978de5f6994fb", "1bea2af8cc99e11d55a6925f09d978de5f6994fb" ]
[ "tests/external/test_cellassign.py", "scvi/model/_totalvi.py", "tests/external/test_gimvi.py", "scvi/data/_built_in_data/_seqfish.py" ]
[ "import numpy as np\nimport pandas as pd\n\nfrom scvi.data import synthetic_iid\nfrom scvi.external import CellAssign\n\n\ndef test_cellassign(save_path):\n adata = synthetic_iid(\n n_labels=5,\n )\n adata.obs[\"size_factor\"] = adata.X.sum(1)\n CellAssign.setup_anndata(\n adata,\n \"size_factor\",\n batch_key=\"batch\",\n )\n marker_df = pd.DataFrame(data=np.random.randint(2, size=(100, 5)))\n marker_df.index = marker_df.index.map(str)\n\n model = CellAssign(adata, marker_df)\n model.train(max_epochs=1)\n model.predict()\n", "import logging\nimport warnings\nfrom collections.abc import Iterable as IterableClass\nfrom functools import partial\nfrom typing import Dict, Iterable, List, Optional, Sequence, Tuple, TypeVar, Union\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom anndata import AnnData\n\nfrom scvi import REGISTRY_KEYS\nfrom scvi._compat import Literal\nfrom scvi._utils import _doc_params\nfrom scvi.data._utils import _check_nonnegative_integers\nfrom scvi.data.anndata import AnnDataManager\nfrom scvi.data.anndata.fields import (\n CategoricalJointObsField,\n CategoricalObsField,\n LayerField,\n NumericalJointObsField,\n ProteinObsmField,\n)\nfrom scvi.dataloaders import DataSplitter\nfrom scvi.model._utils import (\n _get_batch_code_from_category,\n _init_library_size,\n cite_seq_raw_counts_properties,\n)\nfrom scvi.model.base._utils import _de_core\nfrom scvi.module import TOTALVAE\nfrom scvi.train import AdversarialTrainingPlan, TrainRunner\nfrom scvi.utils._docstrings import doc_differential_expression, setup_anndata_dsp\n\nfrom .base import ArchesMixin, BaseModelClass, RNASeqMixin, VAEMixin\n\nlogger = logging.getLogger(__name__)\nNumber = TypeVar(\"Number\", int, float)\n\n\nclass TOTALVI(RNASeqMixin, VAEMixin, ArchesMixin, BaseModelClass):\n \"\"\"\n total Variational Inference [GayosoSteier21]_.\n\n Parameters\n ----------\n adata\n AnnData object that has been registered via :meth:`~scvi.model.TOTALVI.setup_anndata`.\n n_latent\n Dimensionality of the latent space.\n gene_dispersion\n One of the following:\n\n * ``'gene'`` - genes_dispersion parameter of NB is constant per gene across cells\n * ``'gene-batch'`` - genes_dispersion can differ between different batches\n * ``'gene-label'`` - genes_dispersion can differ between different labels\n protein_dispersion\n One of the following:\n\n * ``'protein'`` - protein_dispersion parameter is constant per protein across cells\n * ``'protein-batch'`` - protein_dispersion can differ between different batches NOT TESTED\n * ``'protein-label'`` - protein_dispersion can differ between different labels NOT TESTED\n gene_likelihood\n One of:\n\n * ``'nb'`` - Negative binomial distribution\n * ``'zinb'`` - Zero-inflated negative binomial distribution\n latent_distribution\n One of:\n\n * ``'normal'`` - Normal distribution\n * ``'ln'`` - Logistic normal distribution (Normal(0, I) transformed by softmax)\n empirical_protein_background_prior\n Set the initialization of protein background prior empirically. This option fits a GMM for each of\n 100 cells per batch and averages the distributions. Note that even with this option set to `True`,\n this only initializes a parameter that is learned during inference. If `False`, randomly initializes.\n The default (`None`), sets this to `True` if greater than 10 proteins are used.\n override_missing_proteins\n If `True`, will not treat proteins with all 0 expression in a particular batch as missing.\n **model_kwargs\n Keyword args for :class:`~scvi.module.TOTALVAE`\n\n Examples\n --------\n >>> adata = anndata.read_h5ad(path_to_anndata)\n >>> scvi.model.TOTALVI.setup_anndata(adata, batch_key=\"batch\", protein_expression_obsm_key=\"protein_expression\")\n >>> vae = scvi.model.TOTALVI(adata)\n >>> vae.train()\n >>> adata.obsm[\"X_totalVI\"] = vae.get_latent_representation()\n\n Notes\n -----\n See further usage examples in the following tutorials:\n\n 1. :doc:`/tutorials/notebooks/totalVI`\n 2. :doc:`/tutorials/notebooks/cite_scrna_integration_w_totalVI`\n 3. :doc:`/tutorials/notebooks/scarches_scvi_tools`\n \"\"\"\n\n def __init__(\n self,\n adata: AnnData,\n n_latent: int = 20,\n gene_dispersion: Literal[\n \"gene\", \"gene-batch\", \"gene-label\", \"gene-cell\"\n ] = \"gene\",\n protein_dispersion: Literal[\n \"protein\", \"protein-batch\", \"protein-label\"\n ] = \"protein\",\n gene_likelihood: Literal[\"zinb\", \"nb\"] = \"nb\",\n latent_distribution: Literal[\"normal\", \"ln\"] = \"normal\",\n empirical_protein_background_prior: Optional[bool] = None,\n override_missing_proteins: bool = False,\n **model_kwargs,\n ):\n super(TOTALVI, self).__init__(adata)\n self.protein_state_registry = self.adata_manager.get_state_registry(\n REGISTRY_KEYS.PROTEIN_EXP_KEY\n )\n if (\n ProteinObsmField.PROTEIN_BATCH_MASK in self.protein_state_registry\n and not override_missing_proteins\n ):\n batch_mask = self.protein_state_registry.protein_batch_mask\n msg = (\n \"Some proteins have all 0 counts in some batches. \"\n + \"These proteins will be treated as missing measurements; however, \"\n + \"this can occur due to experimental design/biology. \"\n + \"Reinitialize the model with `override_missing_proteins=True`,\"\n + \"to override this behavior.\"\n )\n warnings.warn(msg, UserWarning)\n self._use_adversarial_classifier = True\n else:\n batch_mask = None\n self._use_adversarial_classifier = False\n\n emp_prior = (\n empirical_protein_background_prior\n if empirical_protein_background_prior is not None\n else (self.summary_stats.n_proteins > 10)\n )\n if emp_prior:\n prior_mean, prior_scale = self._get_totalvi_protein_priors(adata)\n else:\n prior_mean, prior_scale = None, None\n\n n_cats_per_cov = (\n self.adata_manager.get_state_registry(REGISTRY_KEYS.CAT_COVS_KEY)[\n CategoricalJointObsField.N_CATS_PER_KEY\n ]\n if REGISTRY_KEYS.CAT_COVS_KEY in self.adata_manager.data_registry\n else None\n )\n\n n_batch = self.summary_stats.n_batch\n library_log_means, library_log_vars = _init_library_size(\n self.adata_manager, n_batch\n )\n\n self.module = TOTALVAE(\n n_input_genes=self.summary_stats.n_vars,\n n_input_proteins=self.summary_stats.n_proteins,\n n_batch=n_batch,\n n_latent=n_latent,\n n_continuous_cov=self.summary_stats.get(\"n_extra_continuous_covs\", 0),\n n_cats_per_cov=n_cats_per_cov,\n gene_dispersion=gene_dispersion,\n protein_dispersion=protein_dispersion,\n gene_likelihood=gene_likelihood,\n latent_distribution=latent_distribution,\n protein_batch_mask=batch_mask,\n protein_background_prior_mean=prior_mean,\n protein_background_prior_scale=prior_scale,\n library_log_means=library_log_means,\n library_log_vars=library_log_vars,\n **model_kwargs,\n )\n self._model_summary_string = (\n \"TotalVI Model with the following params: \\nn_latent: {}, \"\n \"gene_dispersion: {}, protein_dispersion: {}, gene_likelihood: {}, latent_distribution: {}\"\n ).format(\n n_latent,\n gene_dispersion,\n protein_dispersion,\n gene_likelihood,\n latent_distribution,\n )\n self.init_params_ = self._get_init_params(locals())\n\n def train(\n self,\n max_epochs: Optional[int] = 400,\n lr: float = 4e-3,\n use_gpu: Optional[Union[str, int, bool]] = None,\n train_size: float = 0.9,\n validation_size: Optional[float] = None,\n batch_size: int = 256,\n early_stopping: bool = True,\n check_val_every_n_epoch: Optional[int] = None,\n reduce_lr_on_plateau: bool = True,\n n_steps_kl_warmup: Union[int, None] = None,\n n_epochs_kl_warmup: Union[int, None] = None,\n adversarial_classifier: Optional[bool] = None,\n plan_kwargs: Optional[dict] = None,\n **kwargs,\n ):\n \"\"\"\n Trains the model using amortized variational inference.\n\n Parameters\n ----------\n max_epochs\n Number of passes through the dataset.\n lr\n Learning rate for optimization.\n use_gpu\n Use default GPU if available (if None or True), or index of GPU to use (if int),\n or name of GPU (if str, e.g., `'cuda:0'`), or use CPU (if False).\n train_size\n Size of training set in the range [0.0, 1.0].\n validation_size\n Size of the test set. If `None`, defaults to 1 - `train_size`. If\n `train_size + validation_size < 1`, the remaining cells belong to a test set.\n batch_size\n Minibatch size to use during training.\n early_stopping\n Whether to perform early stopping with respect to the validation set.\n check_val_every_n_epoch\n Check val every n train epochs. By default, val is not checked, unless `early_stopping` is `True`\n or `reduce_lr_on_plateau` is `True`. If either of the latter conditions are met, val is checked\n every epoch.\n reduce_lr_on_plateau\n Reduce learning rate on plateau of validation metric (default is ELBO).\n n_steps_kl_warmup\n Number of training steps (minibatches) to scale weight on KL divergences from 0 to 1.\n Only activated when `n_epochs_kl_warmup` is set to None. If `None`, defaults\n to `floor(0.75 * adata.n_obs)`.\n n_epochs_kl_warmup\n Number of epochs to scale weight on KL divergences from 0 to 1.\n Overrides `n_steps_kl_warmup` when both are not `None`.\n adversarial_classifier\n Whether to use adversarial classifier in the latent space. This helps mixing when\n there are missing proteins in any of the batches. Defaults to `True` is missing proteins\n are detected.\n plan_kwargs\n Keyword args for :class:`~scvi.train.AdversarialTrainingPlan`. Keyword arguments passed to\n `train()` will overwrite values present in `plan_kwargs`, when appropriate.\n **kwargs\n Other keyword args for :class:`~scvi.train.Trainer`.\n \"\"\"\n if adversarial_classifier is None:\n adversarial_classifier = self._use_adversarial_classifier\n n_steps_kl_warmup = (\n n_steps_kl_warmup\n if n_steps_kl_warmup is not None\n else int(0.75 * self.adata.n_obs)\n )\n if reduce_lr_on_plateau:\n check_val_every_n_epoch = 1\n\n update_dict = {\n \"lr\": lr,\n \"adversarial_classifier\": adversarial_classifier,\n \"reduce_lr_on_plateau\": reduce_lr_on_plateau,\n \"n_epochs_kl_warmup\": n_epochs_kl_warmup,\n \"n_steps_kl_warmup\": n_steps_kl_warmup,\n \"check_val_every_n_epoch\": check_val_every_n_epoch,\n }\n if plan_kwargs is not None:\n plan_kwargs.update(update_dict)\n else:\n plan_kwargs = update_dict\n\n if max_epochs is None:\n n_cells = self.adata.n_obs\n max_epochs = np.min([round((20000 / n_cells) * 400), 400])\n\n plan_kwargs = plan_kwargs if isinstance(plan_kwargs, dict) else dict()\n\n data_splitter = DataSplitter(\n self.adata_manager,\n train_size=train_size,\n validation_size=validation_size,\n batch_size=batch_size,\n use_gpu=use_gpu,\n )\n training_plan = AdversarialTrainingPlan(self.module, **plan_kwargs)\n runner = TrainRunner(\n self,\n training_plan=training_plan,\n data_splitter=data_splitter,\n max_epochs=max_epochs,\n use_gpu=use_gpu,\n early_stopping=early_stopping,\n **kwargs,\n )\n return runner()\n\n @torch.no_grad()\n def get_latent_library_size(\n self,\n adata: Optional[AnnData] = None,\n indices: Optional[Sequence[int]] = None,\n give_mean: bool = True,\n batch_size: Optional[int] = None,\n ) -> np.ndarray:\n r\"\"\"\n Returns the latent library size for each cell.\n\n This is denoted as :math:`\\ell_n` in the totalVI paper.\n\n Parameters\n ----------\n adata\n AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the\n AnnData object used to initialize the model.\n indices\n Indices of cells in adata to use. If `None`, all cells are used.\n give_mean\n Return the mean or a sample from the posterior distribution.\n batch_size\n Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.\n \"\"\"\n self._check_if_trained(warn=False)\n\n adata = self._validate_anndata(adata)\n post = self._make_data_loader(\n adata=adata, indices=indices, batch_size=batch_size\n )\n libraries = []\n for tensors in post:\n inference_inputs = self.module._get_inference_input(tensors)\n outputs = self.module.inference(**inference_inputs)\n if give_mean:\n ql_m = outputs[\"ql_m\"]\n ql_v = outputs[\"ql_v\"]\n library = torch.exp(ql_m + 0.5 * ql_v)\n else:\n library = outputs[\"library_gene\"]\n libraries += [library.cpu()]\n return torch.cat(libraries).numpy()\n\n @torch.no_grad()\n def get_normalized_expression(\n self,\n adata=None,\n indices=None,\n n_samples_overall: Optional[int] = None,\n transform_batch: Optional[Sequence[Union[Number, str]]] = None,\n gene_list: Optional[Sequence[str]] = None,\n protein_list: Optional[Sequence[str]] = None,\n library_size: Optional[Union[float, Literal[\"latent\"]]] = 1,\n n_samples: int = 1,\n sample_protein_mixing: bool = False,\n scale_protein: bool = False,\n include_protein_background: bool = False,\n batch_size: Optional[int] = None,\n return_mean: bool = True,\n return_numpy: Optional[bool] = None,\n ) -> Tuple[Union[np.ndarray, pd.DataFrame], Union[np.ndarray, pd.DataFrame]]:\n r\"\"\"\n Returns the normalized gene expression and protein expression.\n\n This is denoted as :math:`\\rho_n` in the totalVI paper for genes, and TODO\n for proteins, :math:`(1-\\pi_{nt})\\alpha_{nt}\\beta_{nt}`.\n\n Parameters\n ----------\n adata\n AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the\n AnnData object used to initialize the model.\n indices\n Indices of cells in adata to use. If `None`, all cells are used.\n n_samples_overall\n Number of samples to use in total\n transform_batch\n Batch to condition on.\n If transform_batch is:\n\n - None, then real observed batch is used\n - int, then batch transform_batch is used\n - List[int], then average over batches in list\n gene_list\n Return frequencies of expression for a subset of genes.\n This can save memory when working with large datasets and few genes are\n of interest.\n protein_list\n Return protein expression for a subset of genes.\n This can save memory when working with large datasets and few genes are\n of interest.\n library_size\n Scale the expression frequencies to a common library size.\n This allows gene expression levels to be interpreted on a common scale of relevant\n magnitude.\n n_samples\n Get sample scale from multiple samples.\n sample_protein_mixing\n Sample mixing bernoulli, setting background to zero\n scale_protein\n Make protein expression sum to 1\n include_protein_background\n Include background component for protein expression\n batch_size\n Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.\n return_mean\n Whether to return the mean of the samples.\n return_numpy\n Return a `np.ndarray` instead of a `pd.DataFrame`. Includes gene\n names as columns. If either n_samples=1 or return_mean=True, defaults to False.\n Otherwise, it defaults to True.\n\n Returns\n -------\n - **gene_normalized_expression** - normalized expression for RNA\n - **protein_normalized_expression** - normalized expression for proteins\n\n If ``n_samples`` > 1 and ``return_mean`` is False, then the shape is ``(samples, cells, genes)``.\n Otherwise, shape is ``(cells, genes)``. Return type is ``pd.DataFrame`` unless ``return_numpy`` is True.\n \"\"\"\n adata = self._validate_anndata(adata)\n adata_manager = self.get_anndata_manager(adata)\n if indices is None:\n indices = np.arange(adata.n_obs)\n if n_samples_overall is not None:\n indices = np.random.choice(indices, n_samples_overall)\n post = self._make_data_loader(\n adata=adata, indices=indices, batch_size=batch_size\n )\n\n if gene_list is None:\n gene_mask = slice(None)\n else:\n all_genes = adata.var_names\n gene_mask = [True if gene in gene_list else False for gene in all_genes]\n if protein_list is None:\n protein_mask = slice(None)\n else:\n all_proteins = self.protein_state_registry.column_names\n protein_mask = [True if p in protein_list else False for p in all_proteins]\n if indices is None:\n indices = np.arange(adata.n_obs)\n\n if n_samples > 1 and return_mean is False:\n if return_numpy is False:\n warnings.warn(\n \"return_numpy must be True if n_samples > 1 and return_mean is False, returning np.ndarray\"\n )\n return_numpy = True\n\n if not isinstance(transform_batch, IterableClass):\n transform_batch = [transform_batch]\n\n transform_batch = _get_batch_code_from_category(adata_manager, transform_batch)\n\n scale_list_gene = []\n scale_list_pro = []\n\n for tensors in post:\n x = tensors[REGISTRY_KEYS.X_KEY]\n y = tensors[REGISTRY_KEYS.PROTEIN_EXP_KEY]\n px_scale = torch.zeros_like(x)\n py_scale = torch.zeros_like(y)\n if n_samples > 1:\n px_scale = torch.stack(n_samples * [px_scale])\n py_scale = torch.stack(n_samples * [py_scale])\n for b in transform_batch:\n generative_kwargs = dict(transform_batch=b)\n inference_kwargs = dict(n_samples=n_samples)\n _, generative_outputs = self.module.forward(\n tensors=tensors,\n inference_kwargs=inference_kwargs,\n generative_kwargs=generative_kwargs,\n compute_loss=False,\n )\n if library_size == \"latent\":\n px_scale += generative_outputs[\"px_\"][\"rate\"].cpu()\n else:\n px_scale += generative_outputs[\"px_\"][\"scale\"].cpu()\n px_scale = px_scale[..., gene_mask]\n\n py_ = generative_outputs[\"py_\"]\n # probability of background\n protein_mixing = 1 / (1 + torch.exp(-py_[\"mixing\"].cpu()))\n if sample_protein_mixing is True:\n protein_mixing = torch.distributions.Bernoulli(\n protein_mixing\n ).sample()\n protein_val = py_[\"rate_fore\"].cpu() * (1 - protein_mixing)\n if include_protein_background is True:\n protein_val += py_[\"rate_back\"].cpu() * protein_mixing\n\n if scale_protein is True:\n protein_val = torch.nn.functional.normalize(\n protein_val, p=1, dim=-1\n )\n protein_val = protein_val[..., protein_mask]\n py_scale += protein_val\n px_scale /= len(transform_batch)\n py_scale /= len(transform_batch)\n scale_list_gene.append(px_scale)\n scale_list_pro.append(py_scale)\n\n if n_samples > 1:\n # concatenate along batch dimension -> result shape = (samples, cells, features)\n scale_list_gene = torch.cat(scale_list_gene, dim=1)\n scale_list_pro = torch.cat(scale_list_pro, dim=1)\n # (cells, features, samples)\n scale_list_gene = scale_list_gene.permute(1, 2, 0)\n scale_list_pro = scale_list_pro.permute(1, 2, 0)\n else:\n scale_list_gene = torch.cat(scale_list_gene, dim=0)\n scale_list_pro = torch.cat(scale_list_pro, dim=0)\n\n if return_mean is True and n_samples > 1:\n scale_list_gene = torch.mean(scale_list_gene, dim=-1)\n scale_list_pro = torch.mean(scale_list_pro, dim=-1)\n\n scale_list_gene = scale_list_gene.cpu().numpy()\n scale_list_pro = scale_list_pro.cpu().numpy()\n if return_numpy is None or return_numpy is False:\n gene_df = pd.DataFrame(\n scale_list_gene,\n columns=adata.var_names[gene_mask],\n index=adata.obs_names[indices],\n )\n protein_names = self.protein_state_registry.column_names\n pro_df = pd.DataFrame(\n scale_list_pro,\n columns=protein_names[protein_mask],\n index=adata.obs_names[indices],\n )\n\n return gene_df, pro_df\n else:\n return scale_list_gene, scale_list_pro\n\n @torch.no_grad()\n def get_protein_foreground_probability(\n self,\n adata: Optional[AnnData] = None,\n indices: Optional[Sequence[int]] = None,\n transform_batch: Optional[Sequence[Union[Number, str]]] = None,\n protein_list: Optional[Sequence[str]] = None,\n n_samples: int = 1,\n batch_size: Optional[int] = None,\n return_mean: bool = True,\n return_numpy: Optional[bool] = None,\n ):\n r\"\"\"\n Returns the foreground probability for proteins.\n\n This is denoted as :math:`(1 - \\pi_{nt})` in the totalVI paper.\n\n Parameters\n ----------\n adata\n AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the\n AnnData object used to initialize the model.\n indices\n Indices of cells in adata to use. If `None`, all cells are used.\n transform_batch\n Batch to condition on.\n If transform_batch is:\n\n - None, then real observed batch is used\n - int, then batch transform_batch is used\n - List[int], then average over batches in list\n protein_list\n Return protein expression for a subset of genes.\n This can save memory when working with large datasets and few genes are\n of interest.\n n_samples\n Number of posterior samples to use for estimation.\n batch_size\n Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.\n return_mean\n Whether to return the mean of the samples.\n return_numpy\n Return a :class:`~numpy.ndarray` instead of a :class:`~pandas.DataFrame`. DataFrame includes\n gene names as columns. If either `n_samples=1` or `return_mean=True`, defaults to `False`.\n Otherwise, it defaults to `True`.\n\n Returns\n -------\n - **foreground_probability** - probability foreground for each protein\n\n If `n_samples` > 1 and `return_mean` is False, then the shape is `(samples, cells, genes)`.\n Otherwise, shape is `(cells, genes)`. In this case, return type is :class:`~pandas.DataFrame` unless `return_numpy` is True.\n \"\"\"\n adata = self._validate_anndata(adata)\n post = self._make_data_loader(\n adata=adata, indices=indices, batch_size=batch_size\n )\n\n if protein_list is None:\n protein_mask = slice(None)\n else:\n all_proteins = self.protein_state_registry.column_names\n protein_mask = [True if p in protein_list else False for p in all_proteins]\n\n if n_samples > 1 and return_mean is False:\n if return_numpy is False:\n warnings.warn(\n \"return_numpy must be True if n_samples > 1 and return_mean is False, returning np.ndarray\"\n )\n return_numpy = True\n if indices is None:\n indices = np.arange(adata.n_obs)\n\n py_mixings = []\n if not isinstance(transform_batch, IterableClass):\n transform_batch = [transform_batch]\n\n transform_batch = _get_batch_code_from_category(\n self.adata_manager, transform_batch\n )\n for tensors in post:\n y = tensors[REGISTRY_KEYS.PROTEIN_EXP_KEY]\n py_mixing = torch.zeros_like(y[..., protein_mask])\n if n_samples > 1:\n py_mixing = torch.stack(n_samples * [py_mixing])\n for b in transform_batch:\n generative_kwargs = dict(transform_batch=b)\n inference_kwargs = dict(n_samples=n_samples)\n _, generative_outputs = self.module.forward(\n tensors=tensors,\n inference_kwargs=inference_kwargs,\n generative_kwargs=generative_kwargs,\n compute_loss=False,\n )\n py_mixing += torch.sigmoid(generative_outputs[\"py_\"][\"mixing\"])[\n ..., protein_mask\n ].cpu()\n py_mixing /= len(transform_batch)\n py_mixings += [py_mixing]\n if n_samples > 1:\n # concatenate along batch dimension -> result shape = (samples, cells, features)\n py_mixings = torch.cat(py_mixings, dim=1)\n # (cells, features, samples)\n py_mixings = py_mixings.permute(1, 2, 0)\n else:\n py_mixings = torch.cat(py_mixings, dim=0)\n\n if return_mean is True and n_samples > 1:\n py_mixings = torch.mean(py_mixings, dim=-1)\n\n py_mixings = py_mixings.cpu().numpy()\n\n if return_numpy is True:\n return 1 - py_mixings\n else:\n pro_names = self.protein_state_registry.column_names\n foreground_prob = pd.DataFrame(\n 1 - py_mixings,\n columns=pro_names[protein_mask],\n index=adata.obs_names[indices],\n )\n return foreground_prob\n\n def _expression_for_de(\n self,\n adata=None,\n indices=None,\n n_samples_overall=None,\n transform_batch: Optional[Sequence[Union[Number, str]]] = None,\n scale_protein=False,\n batch_size: Optional[int] = None,\n sample_protein_mixing=False,\n include_protein_background=False,\n protein_prior_count=0.5,\n ):\n rna, protein = self.get_normalized_expression(\n adata=adata,\n indices=indices,\n n_samples_overall=n_samples_overall,\n transform_batch=transform_batch,\n return_numpy=True,\n n_samples=1,\n batch_size=batch_size,\n scale_protein=scale_protein,\n sample_protein_mixing=sample_protein_mixing,\n include_protein_background=include_protein_background,\n )\n protein += protein_prior_count\n\n joint = np.concatenate([rna, protein], axis=1)\n return joint\n\n @_doc_params(\n doc_differential_expression=doc_differential_expression,\n )\n def differential_expression(\n self,\n adata: Optional[AnnData] = None,\n groupby: Optional[str] = None,\n group1: Optional[Iterable[str]] = None,\n group2: Optional[str] = None,\n idx1: Optional[Union[Sequence[int], Sequence[bool], str]] = None,\n idx2: Optional[Union[Sequence[int], Sequence[bool], str]] = None,\n mode: Literal[\"vanilla\", \"change\"] = \"change\",\n delta: float = 0.25,\n batch_size: Optional[int] = None,\n all_stats: bool = True,\n batch_correction: bool = False,\n batchid1: Optional[Iterable[str]] = None,\n batchid2: Optional[Iterable[str]] = None,\n fdr_target: float = 0.05,\n silent: bool = False,\n protein_prior_count: float = 0.1,\n scale_protein: bool = False,\n sample_protein_mixing: bool = False,\n include_protein_background: bool = False,\n **kwargs,\n ) -> pd.DataFrame:\n r\"\"\"\n A unified method for differential expression analysis.\n\n Implements `\"vanilla\"` DE [Lopez18]_ and `\"change\"` mode DE [Boyeau19]_.\n\n Parameters\n ----------\n {doc_differential_expression}\n protein_prior_count\n Prior count added to protein expression before LFC computation\n scale_protein\n Force protein values to sum to one in every single cell (post-hoc normalization)\n sample_protein_mixing\n Sample the protein mixture component, i.e., use the parameter to sample a Bernoulli\n that determines if expression is from foreground/background.\n include_protein_background\n Include the protein background component as part of the protein expression\n **kwargs\n Keyword args for :meth:`scvi.model.base.DifferentialComputation.get_bayes_factors`\n\n Returns\n -------\n Differential expression DataFrame.\n \"\"\"\n adata = self._validate_anndata(adata)\n model_fn = partial(\n self._expression_for_de,\n scale_protein=scale_protein,\n sample_protein_mixing=sample_protein_mixing,\n include_protein_background=include_protein_background,\n protein_prior_count=protein_prior_count,\n batch_size=batch_size,\n )\n col_names = np.concatenate(\n [\n np.asarray(adata.var_names),\n self.protein_state_registry.column_names,\n ]\n )\n result = _de_core(\n self.get_anndata_manager(adata, required=True),\n model_fn,\n groupby,\n group1,\n group2,\n idx1,\n idx2,\n all_stats,\n cite_seq_raw_counts_properties,\n col_names,\n mode,\n batchid1,\n batchid2,\n delta,\n batch_correction,\n fdr_target,\n silent,\n **kwargs,\n )\n\n return result\n\n @torch.no_grad()\n def posterior_predictive_sample(\n self,\n adata: Optional[AnnData] = None,\n indices: Optional[Sequence[int]] = None,\n n_samples: int = 1,\n batch_size: Optional[int] = None,\n gene_list: Optional[Sequence[str]] = None,\n protein_list: Optional[Sequence[str]] = None,\n ) -> np.ndarray:\n r\"\"\"\n Generate observation samples from the posterior predictive distribution.\n\n The posterior predictive distribution is written as :math:`p(\\hat{x}, \\hat{y} \\mid x, y)`.\n\n Parameters\n ----------\n adata\n AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the\n AnnData object used to initialize the model.\n indices\n Indices of cells in adata to use. If `None`, all cells are used.\n n_samples\n Number of required samples for each cell\n batch_size\n Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.\n gene_list\n Names of genes of interest\n protein_list\n Names of proteins of interest\n\n Returns\n -------\n x_new : :class:`~numpy.ndarray`\n tensor with shape (n_cells, n_genes, n_samples)\n \"\"\"\n if self.module.gene_likelihood not in [\"nb\"]:\n raise ValueError(\"Invalid gene_likelihood\")\n\n adata = self._validate_anndata(adata)\n if gene_list is None:\n gene_mask = slice(None)\n else:\n all_genes = adata.var_names\n gene_mask = [True if gene in gene_list else False for gene in all_genes]\n if protein_list is None:\n protein_mask = slice(None)\n else:\n all_proteins = self.protein_state_registry.column_names\n protein_mask = [True if p in protein_list else False for p in all_proteins]\n\n scdl = self._make_data_loader(\n adata=adata, indices=indices, batch_size=batch_size\n )\n\n scdl_list = []\n for tensors in scdl:\n rna_sample, protein_sample = self.module.sample(\n tensors, n_samples=n_samples\n )\n rna_sample = rna_sample[..., gene_mask]\n protein_sample = protein_sample[..., protein_mask]\n data = torch.cat([rna_sample, protein_sample], dim=-1).numpy()\n\n scdl_list += [data]\n if n_samples > 1:\n scdl_list[-1] = np.transpose(scdl_list[-1], (1, 2, 0))\n scdl_list = np.concatenate(scdl_list, axis=0)\n\n return scdl_list\n\n @torch.no_grad()\n def _get_denoised_samples(\n self,\n adata=None,\n indices=None,\n n_samples: int = 25,\n batch_size: int = 64,\n rna_size_factor: int = 1000,\n transform_batch: Optional[int] = None,\n ) -> np.ndarray:\n \"\"\"\n Return samples from an adjusted posterior predictive.\n\n Parameters\n ----------\n adata\n AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the\n AnnData object used to initialize the model.\n indices\n indices of `adata` to use\n n_samples\n How may samples per cell\n batch_size\n Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.\n rna_size_factor\n size factor for RNA prior to sampling gamma distribution\n transform_batch\n int of which batch to condition on for all cells\n \"\"\"\n adata = self._validate_anndata(adata)\n scdl = self._make_data_loader(\n adata=adata, indices=indices, batch_size=batch_size\n )\n\n scdl_list = []\n for tensors in scdl:\n x = tensors[REGISTRY_KEYS.X_KEY]\n y = tensors[REGISTRY_KEYS.PROTEIN_EXP_KEY]\n\n generative_kwargs = dict(transform_batch=transform_batch)\n inference_kwargs = dict(n_samples=n_samples)\n with torch.no_grad():\n inference_outputs, generative_outputs, = self.module.forward(\n tensors,\n inference_kwargs=inference_kwargs,\n generative_kwargs=generative_kwargs,\n compute_loss=False,\n )\n px_ = generative_outputs[\"px_\"]\n py_ = generative_outputs[\"py_\"]\n device = px_[\"r\"].device\n\n pi = 1 / (1 + torch.exp(-py_[\"mixing\"]))\n mixing_sample = torch.distributions.Bernoulli(pi).sample()\n protein_rate = py_[\"rate_fore\"]\n rate = torch.cat((rna_size_factor * px_[\"scale\"], protein_rate), dim=-1)\n if len(px_[\"r\"].size()) == 2:\n px_dispersion = px_[\"r\"]\n else:\n px_dispersion = torch.ones_like(x).to(device) * px_[\"r\"]\n if len(py_[\"r\"].size()) == 2:\n py_dispersion = py_[\"r\"]\n else:\n py_dispersion = torch.ones_like(y).to(device) * py_[\"r\"]\n\n dispersion = torch.cat((px_dispersion, py_dispersion), dim=-1)\n\n # This gamma is really l*w using scVI manuscript notation\n p = rate / (rate + dispersion)\n r = dispersion\n l_train = torch.distributions.Gamma(r, (1 - p) / p).sample()\n data = l_train.cpu().numpy()\n # make background 0\n data[:, :, self.adata.shape[1] :] = (\n data[:, :, self.adata.shape[1] :] * (1 - mixing_sample).cpu().numpy()\n )\n scdl_list += [data]\n\n scdl_list[-1] = np.transpose(scdl_list[-1], (1, 2, 0))\n\n return np.concatenate(scdl_list, axis=0)\n\n @torch.no_grad()\n def get_feature_correlation_matrix(\n self,\n adata=None,\n indices=None,\n n_samples: int = 10,\n batch_size: int = 64,\n rna_size_factor: int = 1000,\n transform_batch: Optional[Sequence[Union[Number, str]]] = None,\n correlation_type: Literal[\"spearman\", \"pearson\"] = \"spearman\",\n log_transform: bool = False,\n ) -> pd.DataFrame:\n \"\"\"\n Generate gene-gene correlation matrix using scvi uncertainty and expression.\n\n Parameters\n ----------\n adata\n AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the\n AnnData object used to initialize the model.\n indices\n Indices of cells in adata to use. If `None`, all cells are used.\n n_samples\n Number of posterior samples to use for estimation.\n batch_size\n Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.\n rna_size_factor\n size factor for RNA prior to sampling gamma distribution\n transform_batch\n Batches to condition on.\n If transform_batch is:\n\n - None, then real observed batch is used\n - int, then batch transform_batch is used\n - list of int, then values are averaged over provided batches.\n correlation_type\n One of \"pearson\", \"spearman\".\n log_transform\n Whether to log transform denoised values prior to correlation calculation.\n\n Returns\n -------\n Gene-protein-gene-protein correlation matrix\n \"\"\"\n from scipy.stats import spearmanr\n\n adata = self._validate_anndata(adata)\n\n if not isinstance(transform_batch, IterableClass):\n transform_batch = [transform_batch]\n\n transform_batch = _get_batch_code_from_category(\n self.get_anndata_manager(adata, required=True), transform_batch\n )\n\n corr_mats = []\n for b in transform_batch:\n denoised_data = self._get_denoised_samples(\n n_samples=n_samples,\n batch_size=batch_size,\n rna_size_factor=rna_size_factor,\n transform_batch=b,\n )\n flattened = np.zeros(\n (denoised_data.shape[0] * n_samples, denoised_data.shape[1])\n )\n for i in range(n_samples):\n flattened[\n denoised_data.shape[0] * (i) : denoised_data.shape[0] * (i + 1)\n ] = denoised_data[:, :, i]\n if log_transform is True:\n flattened[:, : self.n_genes] = np.log(\n flattened[:, : self.n_genes] + 1e-8\n )\n flattened[:, self.n_genes :] = np.log1p(flattened[:, self.n_genes :])\n if correlation_type == \"pearson\":\n corr_matrix = np.corrcoef(flattened, rowvar=False)\n else:\n corr_matrix, _ = spearmanr(flattened, axis=0)\n corr_mats.append(corr_matrix)\n\n corr_matrix = np.mean(np.stack(corr_mats), axis=0)\n var_names = adata.var_names\n names = np.concatenate(\n [\n np.asarray(var_names),\n self.protein_state_registry.column_names,\n ]\n )\n return pd.DataFrame(corr_matrix, index=names, columns=names)\n\n @torch.no_grad()\n def get_likelihood_parameters(\n self,\n adata: Optional[AnnData] = None,\n indices: Optional[Sequence[int]] = None,\n n_samples: Optional[int] = 1,\n give_mean: Optional[bool] = False,\n batch_size: Optional[int] = None,\n ) -> Dict[str, np.ndarray]:\n r\"\"\"\n Estimates for the parameters of the likelihood :math:`p(x, y \\mid z)`.\n\n Parameters\n ----------\n adata\n AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the\n AnnData object used to initialize the model.\n indices\n Indices of cells in adata to use. If `None`, all cells are used.\n n_samples\n Number of posterior samples to use for estimation.\n give_mean\n Return expected value of parameters or a samples\n batch_size\n Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.\n \"\"\"\n raise NotImplementedError\n\n def _validate_anndata(\n self, adata: Optional[AnnData] = None, copy_if_view: bool = True\n ):\n adata = super()._validate_anndata(adata=adata, copy_if_view=copy_if_view)\n error_msg = \"Number of {} in anndata different from when setup_anndata was run. Please rerun setup_anndata.\"\n if REGISTRY_KEYS.PROTEIN_EXP_KEY in self.adata_manager.data_registry.keys():\n pro_exp = self.get_from_registry(adata, REGISTRY_KEYS.PROTEIN_EXP_KEY)\n if self.summary_stats.n_proteins != pro_exp.shape[1]:\n raise ValueError(error_msg.format(\"proteins\"))\n is_nonneg_int = _check_nonnegative_integers(pro_exp)\n if not is_nonneg_int:\n warnings.warn(\n \"Make sure the registered protein expression in anndata contains unnormalized count data.\"\n )\n else:\n raise ValueError(\"No protein data found, please setup or transfer anndata\")\n\n return adata\n\n def _get_totalvi_protein_priors(self, adata, n_cells=100):\n \"\"\"Compute an empirical prior for protein background.\"\"\"\n import warnings\n\n from sklearn.exceptions import ConvergenceWarning\n from sklearn.mixture import GaussianMixture\n\n warnings.filterwarnings(\"error\")\n logger.info(\"Computing empirical prior initialization for protein background.\")\n\n adata = self._validate_anndata(adata)\n adata_manager = self.get_anndata_manager(adata)\n pro_exp = adata_manager.get_from_registry(REGISTRY_KEYS.PROTEIN_EXP_KEY)\n pro_exp = pro_exp.to_numpy() if isinstance(pro_exp, pd.DataFrame) else pro_exp\n batch_mask = adata_manager.get_state_registry(\n REGISTRY_KEYS.PROTEIN_EXP_KEY\n ).get(ProteinObsmField.PROTEIN_BATCH_MASK)\n batch = adata_manager.get_from_registry(REGISTRY_KEYS.BATCH_KEY).ravel()\n cats = adata_manager.get_state_registry(REGISTRY_KEYS.BATCH_KEY)[\n CategoricalObsField.CATEGORICAL_MAPPING_KEY\n ]\n codes = np.arange(len(cats))\n\n batch_avg_mus, batch_avg_scales = [], []\n for b in np.unique(codes):\n # can happen during online updates\n # the values of these batches will not be used\n num_in_batch = np.sum(batch == b)\n if num_in_batch == 0:\n batch_avg_mus.append(0)\n batch_avg_scales.append(1)\n continue\n batch_pro_exp = pro_exp[batch == b]\n\n # non missing\n if batch_mask is not None:\n batch_pro_exp = batch_pro_exp[:, batch_mask[b]]\n if batch_pro_exp.shape[1] < 5:\n logger.debug(\n f\"Batch {b} has too few proteins to set prior, setting randomly.\"\n )\n batch_avg_mus.append(0.0)\n batch_avg_scales.append(0.05)\n continue\n\n # a batch is missing because it's in the reference but not query data\n # for scarches case, these values will be replaced by original state dict\n if batch_pro_exp.shape[0] == 0:\n batch_avg_mus.append(0.0)\n batch_avg_scales.append(0.05)\n continue\n\n cells = np.random.choice(np.arange(batch_pro_exp.shape[0]), size=n_cells)\n batch_pro_exp = batch_pro_exp[cells]\n gmm = GaussianMixture(n_components=2)\n mus, scales = [], []\n # fit per cell GMM\n for c in batch_pro_exp:\n try:\n gmm.fit(np.log1p(c.reshape(-1, 1)))\n # when cell is all 0\n except ConvergenceWarning:\n mus.append(0)\n scales.append(0.05)\n continue\n\n means = gmm.means_.ravel()\n sorted_fg_bg = np.argsort(means)\n mu = means[sorted_fg_bg].ravel()[0]\n covariances = gmm.covariances_[sorted_fg_bg].ravel()[0]\n scale = np.sqrt(covariances)\n mus.append(mu)\n scales.append(scale)\n\n # average distribution over cells\n batch_avg_mu = np.mean(mus)\n batch_avg_scale = np.sqrt(np.sum(np.square(scales)) / (n_cells**2))\n\n batch_avg_mus.append(batch_avg_mu)\n batch_avg_scales.append(batch_avg_scale)\n\n # repeat prior for each protein\n batch_avg_mus = np.array(batch_avg_mus, dtype=np.float32).reshape(1, -1)\n batch_avg_scales = np.array(batch_avg_scales, dtype=np.float32).reshape(1, -1)\n batch_avg_mus = np.tile(batch_avg_mus, (pro_exp.shape[1], 1))\n batch_avg_scales = np.tile(batch_avg_scales, (pro_exp.shape[1], 1))\n\n warnings.resetwarnings()\n\n return batch_avg_mus, batch_avg_scales\n\n @torch.no_grad()\n def get_protein_background_mean(self, adata, indices, batch_size):\n adata = self._validate_anndata(adata)\n scdl = self._make_data_loader(\n adata=adata, indices=indices, batch_size=batch_size\n )\n background_mean = []\n for tensors in scdl:\n _, inference_outputs, _ = self.module.forward(tensors)\n b_mean = inference_outputs[\"py_\"][\"rate_back\"]\n background_mean += [b_mean.cpu().numpy()]\n return np.concatenate(background_mean)\n\n @classmethod\n @setup_anndata_dsp.dedent\n def setup_anndata(\n cls,\n adata: AnnData,\n protein_expression_obsm_key: str,\n protein_names_uns_key: Optional[str] = None,\n batch_key: Optional[str] = None,\n layer: Optional[str] = None,\n categorical_covariate_keys: Optional[List[str]] = None,\n continuous_covariate_keys: Optional[List[str]] = None,\n **kwargs,\n ) -> Optional[AnnData]:\n \"\"\"\n %(summary)s.\n\n Parameters\n ----------\n %(param_adata)s\n protein_expression_obsm_key\n key in `adata.obsm` for protein expression data.\n protein_names_uns_key\n key in `adata.uns` for protein names. If None, will use the column names of `adata.obsm[protein_expression_obsm_key]`\n if it is a DataFrame, else will assign sequential names to proteins.\n %(param_batch_key)s\n %(param_layer)s\n %(param_cat_cov_keys)s\n %(param_cont_cov_keys)s\n %(param_copy)s\n\n Returns\n -------\n %(returns)s\n \"\"\"\n setup_method_args = cls._get_setup_method_args(**locals())\n batch_field = CategoricalObsField(REGISTRY_KEYS.BATCH_KEY, batch_key)\n anndata_fields = [\n LayerField(REGISTRY_KEYS.X_KEY, layer, is_count_data=True),\n CategoricalObsField(\n REGISTRY_KEYS.LABELS_KEY, None\n ), # Default labels field for compatibility with TOTALVAE\n batch_field,\n CategoricalJointObsField(\n REGISTRY_KEYS.CAT_COVS_KEY, categorical_covariate_keys\n ),\n NumericalJointObsField(\n REGISTRY_KEYS.CONT_COVS_KEY, continuous_covariate_keys\n ),\n ProteinObsmField(\n REGISTRY_KEYS.PROTEIN_EXP_KEY,\n protein_expression_obsm_key,\n use_batch_mask=True,\n batch_key=batch_field.attr_key,\n colnames_uns_key=protein_names_uns_key,\n is_count_data=True,\n ),\n ]\n adata_manager = AnnDataManager(\n fields=anndata_fields, setup_method_args=setup_method_args\n )\n adata_manager.register_fields(adata, **kwargs)\n cls.register_manager(adata_manager)\n", "import os\nimport pickle\n\nimport numpy as np\nimport pytest\nimport torch\n\nimport scvi\nfrom scvi.data import synthetic_iid\nfrom scvi.external import GIMVI\n\n\ndef test_saving_and_loading(save_path):\n def legacy_save(\n model,\n dir_path,\n prefix=None,\n overwrite=False,\n save_anndata=False,\n **anndata_write_kwargs,\n ):\n # get all the user attributes\n user_attributes = model._get_user_attributes()\n # only save the public attributes with _ at the very end\n user_attributes = {a[0]: a[1] for a in user_attributes if a[0][-1] == \"_\"}\n # save the model state dict and the trainer state dict only\n if not os.path.exists(dir_path) or overwrite:\n os.makedirs(dir_path, exist_ok=overwrite)\n else:\n raise ValueError(\n \"{} already exists. Please provide an unexisting directory for saving.\".format(\n dir_path\n )\n )\n\n file_name_prefix = prefix or \"\"\n\n if save_anndata:\n dataset_names = [\"seq\", \"spatial\"]\n for i in range(len(model.adatas)):\n dataset_name = dataset_names[i]\n save_path = os.path.join(\n dir_path, f\"{file_name_prefix}adata_{dataset_name}.h5ad\"\n )\n model.adatas[i].write(save_path)\n varnames_save_path = os.path.join(\n dir_path, f\"{file_name_prefix}var_names_{dataset_name}.csv\"\n )\n\n var_names = model.adatas[i].var_names.astype(str)\n var_names = var_names.to_numpy()\n np.savetxt(varnames_save_path, var_names, fmt=\"%s\")\n\n model_save_path = os.path.join(dir_path, f\"{file_name_prefix}model_params.pt\")\n attr_save_path = os.path.join(dir_path, f\"{file_name_prefix}attr.pkl\")\n\n torch.save(model.module.state_dict(), model_save_path)\n with open(attr_save_path, \"wb\") as f:\n pickle.dump(user_attributes, f)\n\n def test_save_and_load(save_path, legacy=False):\n prefix = \"GIMVI_\"\n adata = synthetic_iid()\n GIMVI.setup_anndata(\n adata,\n batch_key=\"batch\",\n )\n\n # GIMVI\n model = GIMVI(adata, adata)\n model.train(3, train_size=0.5)\n z1 = model.get_latent_representation([adata])\n z2 = model.get_latent_representation([adata])\n np.testing.assert_array_equal(z1, z2)\n if legacy:\n legacy_save(\n model, save_path, overwrite=True, save_anndata=True, prefix=prefix\n )\n else:\n model.save(save_path, overwrite=True, save_anndata=True, prefix=prefix)\n model = GIMVI.load(save_path, prefix=prefix)\n model.get_latent_representation()\n tmp_adata = scvi.data.synthetic_iid(n_genes=200)\n with pytest.raises(ValueError):\n GIMVI.load(\n save_path, adata_seq=tmp_adata, adata_spatial=tmp_adata, prefix=prefix\n )\n model = GIMVI.load(\n save_path, adata_seq=adata, adata_spatial=adata, prefix=prefix\n )\n z2 = model.get_latent_representation([adata])\n np.testing.assert_array_equal(z1, z2)\n model = GIMVI.load(\n save_path,\n adata_seq=adata,\n adata_spatial=adata,\n use_gpu=False,\n prefix=prefix,\n )\n z2 = model.get_latent_representation([adata])\n np.testing.assert_almost_equal(z1, z2, decimal=3)\n assert model.is_trained is True\n\n test_save_and_load(save_path, legacy=True)\n test_save_and_load(save_path)\n # Test load prioritizes newer save paradigm and thus mismatches legacy save.\n with pytest.raises(AssertionError):\n test_save_and_load(save_path, legacy=True)\n\n\ndef test_gimvi():\n adata_seq = synthetic_iid()\n adata_spatial = synthetic_iid()\n GIMVI.setup_anndata(\n adata_seq,\n batch_key=\"batch\",\n labels_key=\"labels\",\n )\n GIMVI.setup_anndata(\n adata_spatial,\n batch_key=\"batch\",\n labels_key=\"labels\",\n )\n model = GIMVI(adata_seq, adata_spatial, n_latent=10)\n assert hasattr(model.module, \"library_log_means_0\") and not hasattr(\n model.module, \"library_log_means_1\"\n )\n model.train(1, check_val_every_n_epoch=1, train_size=0.5)\n model.get_latent_representation()\n model.get_imputed_values()\n\n adata_spatial.var_names += \"asdf\"\n GIMVI.setup_anndata(\n adata_spatial,\n batch_key=\"batch\",\n labels_key=\"labels\",\n )\n with pytest.raises(ValueError):\n model = GIMVI(adata_seq, adata_spatial)\n\n\ndef test_gimvi_model_library_size():\n adata_seq = synthetic_iid()\n adata_spatial = synthetic_iid()\n GIMVI.setup_anndata(\n adata_seq,\n batch_key=\"batch\",\n labels_key=\"labels\",\n )\n GIMVI.setup_anndata(\n adata_spatial,\n batch_key=\"batch\",\n labels_key=\"labels\",\n )\n model = GIMVI(\n adata_seq, adata_spatial, model_library_size=[True, True], n_latent=10\n )\n assert hasattr(model.module, \"library_log_means_0\") and hasattr(\n model.module, \"library_log_means_1\"\n )\n model.train(1, check_val_every_n_epoch=1, train_size=0.5)\n model.get_latent_representation()\n model.get_imputed_values()\n", "import logging\nimport os\nimport zipfile\n\nimport anndata\nimport numpy as np\nimport pandas as pd\n\nfrom scvi.data._built_in_data._download import _download\n\nlogger = logging.getLogger(__name__)\n\n\ndef _load_seqfishplus(\n save_path: str = \"data/\",\n tissue_region: str = \"subventricular cortex\",\n) -> anndata.AnnData:\n\n if tissue_region == \"subventricular cortex\":\n file_prefix = \"cortex_svz\"\n elif tissue_region == \"olfactory bulb\":\n file_prefix = \"ob\"\n else:\n raise ValueError(\n '`tissue_type` must be \"subventricular cortex\" or \"olfactory bulb\", but got {}'.format(\n tissue_region\n )\n )\n\n save_path = os.path.abspath(save_path)\n url = \"https://github.com/CaiGroup/seqFISH-PLUS/raw/master/sourcedata.zip\"\n save_fn = \"seqfishplus.zip\"\n\n _download(url, save_path, save_fn)\n adata = _load_seqfishplus_data(\n os.path.join(save_path, save_fn), file_prefix, save_path, gene_by_cell=False\n )\n adata.obs[\"batch\"] = np.zeros(adata.shape[0], dtype=np.int64)\n adata.obs[\"labels\"] = np.zeros(adata.shape[0], dtype=np.int64)\n\n return adata\n\n\ndef _load_seqfishplus_data(\n path_to_file: str, file_prefix: str, save_path: str, gene_by_cell: bool = False\n) -> anndata.AnnData:\n counts_filename = \"sourcedata/{}_counts.csv\".format(file_prefix)\n coordinates_filename = \"sourcedata/{}_cellcentroids.csv\".format(file_prefix)\n extract_location = os.path.join(save_path, \"seqfishplus\")\n if not os.path.exists(extract_location):\n os.makedirs(extract_location)\n with zipfile.ZipFile(path_to_file) as f:\n f.extract(counts_filename, path=extract_location)\n f.extract(coordinates_filename, path=extract_location)\n\n df_counts = pd.read_csv(os.path.join(extract_location, counts_filename))\n adata = anndata.AnnData(df_counts)\n adata.var_names = df_counts.columns\n df_coordinates = pd.read_csv(os.path.join(extract_location, coordinates_filename))\n\n adata.obs[\"X\"] = df_coordinates[\"X\"].values\n adata.obs[\"Y\"] = df_coordinates[\"Y\"].values\n adata.obs[\"cell_id\"] = df_coordinates[\"Cell ID\"].values\n adata.obs[\"field_of_view\"] = df_coordinates[\"Field of View\"].values\n\n return adata\n\n\ndef _load_seqfish(save_path: str = \"data/\") -> anndata.AnnData:\n save_path = os.path.abspath(save_path)\n url = \"https://www.cell.com/cms/attachment/2080562255/2072099886/mmc6.xlsx\"\n save_fn = \"SeqFISH.xlsx\"\n _download(url, save_path, save_fn)\n adata = _load_seqfish_data(os.path.join(save_path, save_fn))\n adata.obs[\"batch\"] = np.zeros(adata.shape[0], dtype=np.int64)\n adata.obs[\"labels\"] = np.zeros(adata.shape[0], dtype=np.int64)\n return adata\n\n\ndef _load_seqfish_data(path_to_file: str) -> anndata.AnnData:\n logger.info(\"Loading seqfish dataset from {}\".format(path_to_file))\n counts = pd.read_excel(\n path_to_file, sheet_name=\"Hippocampus Counts\", engine=\"openpyxl\"\n )\n data = (\n counts.values[:, 1:].astype(int).T\n ) # transpose because counts is genes X cells\n gene_names = counts.values[:, 0].astype(str)\n adata = anndata.AnnData(pd.DataFrame(data=data, columns=gene_names))\n logger.info(\"Finished loading seqfish dataset\")\n return adata\n" ]
[ [ "numpy.random.randint" ], [ "torch.mean", "numpy.sqrt", "torch.distributions.Gamma", "torch.cat", "numpy.asarray", "pandas.DataFrame", "numpy.concatenate", "torch.no_grad", "numpy.mean", "scipy.stats.spearmanr", "numpy.square", "numpy.unique", "numpy.arange", "torch.distributions.Bernoulli", "numpy.stack", "numpy.log1p", "numpy.zeros", "torch.ones_like", "numpy.log", "torch.sigmoid", "numpy.random.choice", "torch.zeros_like", "torch.exp", "numpy.transpose", "torch.stack", "sklearn.mixture.GaussianMixture", "numpy.corrcoef", "numpy.argsort", "numpy.array", "numpy.sum", "torch.nn.functional.normalize", "numpy.tile" ], [ "numpy.testing.assert_array_equal", "numpy.testing.assert_almost_equal", "numpy.savetxt" ], [ "pandas.read_excel", "numpy.zeros", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
xiaowu0162/mobilenet_compression
[ "a04fa087ac84b0918fb49ef77bf8439d02cbcf1f" ]
[ "old_code/utils.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport os\nimport torch\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as transforms\nimport numpy as np\nfrom nni.compression.pytorch.utils.counter import count_flops_params\n\nfrom mobilenet import MobileNet\nfrom mobilenet_v2 import MobileNetV2\n\n\ndef create_model(model_type=None, n_classes=120, input_size=224, checkpoint=None, pretrained=False, width_mult=1.):\n if model_type == 'mobilenet_v1':\n model = MobileNet(n_class=n_classes, profile='normal')\n elif model_type == 'mobilenet_v2':\n model = MobileNetV2(n_class=n_classes, input_size=input_size, width_mult=width_mult)\n elif model_type == 'mobilenet_v2_torchhub':\n model = torch.hub.load('pytorch/vision:v0.8.1', 'mobilenet_v2', pretrained=pretrained)\n # model = torch.hub.load('pytorch/vision:v0.10.0', 'mobilenet_v2', pretrained=pretrained)\n feature_size = model.classifier[1].weight.data.size()[1]\n replace_classifier = torch.nn.Linear(feature_size, n_classes)\n model.classifier[1] = replace_classifier\n elif model_type is None:\n model = None\n else:\n raise RuntimeError('Unknown model_type.')\n\n if checkpoint is not None:\n model.load_state_dict(torch.load(checkpoint))\n \n return model\n\n\nclass TrainDataset(Dataset):\n def __init__(self, npy_dir):\n self.root_dir = npy_dir\n self.case_names = [self.root_dir + '/' + x for x in os.listdir(self.root_dir)]\n \n transform_set = [transforms.Lambda(lambda x: x),\n transforms.RandomRotation(30),\n # transforms.RandomPerspective(),\n transforms.ColorJitter(),\n transforms.RandomHorizontalFlip(p=1)]\n self.transform = transforms.RandomChoice(transform_set)\n \n # self.transform = transforms.AutoAugment(transforms.AutoAugmentPolicy.IMAGENET)\n \n def __len__(self):\n return len(self.case_names)\n\n def __getitem__(self, index):\n instance = np.load(self.case_names[index], allow_pickle=True).item()\n x = instance['input'].transpose(2, 0, 1) # (C, H, W)\n x = torch.from_numpy(x).type(torch.float)#.type(torch.uint8) # convert to Tensor to use torchvision.transforms\n x = self.transform(x)\n return x, instance['label']\n\n\nclass EvalDataset(Dataset):\n def __init__(self, npy_dir):\n self.root_dir = npy_dir\n self.case_names = [self.root_dir + '/' + x for x in os.listdir(self.root_dir)]\n\n def __len__(self):\n return len(self.case_names)\n\n def __getitem__(self, index):\n instance = np.load(self.case_names[index], allow_pickle=True).item()\n x = instance['input'].transpose(2, 0, 1)\n x = torch.from_numpy(x).type(torch.float) #.type(torch.uint8)\n return x, instance['label']\n\n\ndef count_flops(model, log=None):\n dummy_input = torch.rand([1, 3, 256, 256])\n flops, params, results = count_flops_params(model, dummy_input)\n print(f\"FLOPs: {flops}, params: {params}\")\n if log is not None:\n log.write(f\"FLOPs: {flops}, params: {params}\\n\")\n return flops, params\n" ]
[ [ "torch.load", "torch.from_numpy", "torch.nn.Linear", "torch.rand", "numpy.load", "torch.hub.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LindgeW/AT4MTL
[ "8fe0f142f830b7f2cb170ffe53ec535ca5bb3eab" ]
[ "modules/grl.py" ]
[ "import torch\n\n\n# Gradient Reversal Layer\nclass GRLayer(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x, lmbd=0.01):\n ctx.lmbd = torch.tensor(lmbd)\n return x.reshape_as(x)\n\n @staticmethod\n # 输入为forward输出的梯度\n def backward(ctx, grad_output):\n grad_input = grad_output.clone()\n return ctx.lmbd * grad_input.neg(), None\n" ]
[ [ "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
akobiisr/wetectron
[ "9973eb4f1716fbc5f46b41a7f93fb1f1c78bc8c7" ]
[ "wetectron/modeling/rpn/retinanet/inference.py" ]
[ "import torch\n\nfrom ..inference import RPNPostProcessor\nfrom ..utils import permute_and_flatten\n\nfrom wetectron.modeling.box_coder import BoxCoder\nfrom wetectron.modeling.utils import cat\nfrom wetectron.structures.bounding_box import BoxList\nfrom wetectron.structures.boxlist_ops import cat_boxlist\nfrom wetectron.structures.boxlist_ops import boxlist_nms\nfrom wetectron.structures.boxlist_ops import remove_small_boxes\n\n\nclass RetinaNetPostProcessor(RPNPostProcessor):\n \"\"\"\n Performs post-processing on the outputs of the RetinaNet boxes.\n This is only used in the testing.\n \"\"\"\n def __init__(\n self,\n pre_nms_thresh,\n pre_nms_top_n,\n nms_thresh,\n fpn_post_nms_top_n,\n min_size,\n num_classes,\n box_coder=None,\n ):\n \"\"\"\n Arguments:\n pre_nms_thresh (float)\n pre_nms_top_n (int)\n nms_thresh (float)\n fpn_post_nms_top_n (int)\n min_size (int)\n num_classes (int)\n box_coder (BoxCoder)\n \"\"\"\n super(RetinaNetPostProcessor, self).__init__(\n pre_nms_thresh, 0, nms_thresh, min_size\n )\n self.pre_nms_thresh = pre_nms_thresh\n self.pre_nms_top_n = pre_nms_top_n\n self.nms_thresh = nms_thresh\n self.fpn_post_nms_top_n = fpn_post_nms_top_n\n self.min_size = min_size\n self.num_classes = num_classes\n\n if box_coder is None:\n box_coder = BoxCoder(weights=(10., 10., 5., 5.))\n self.box_coder = box_coder\n \n def add_gt_proposals(self, proposals, targets):\n \"\"\"\n This function is not used in RetinaNet\n \"\"\"\n pass\n\n def forward_for_single_feature_map(\n self, anchors, box_cls, box_regression):\n \"\"\"\n Arguments:\n anchors: list[BoxList]\n box_cls: tensor of size N, A * C, H, W\n box_regression: tensor of size N, A * 4, H, W\n \"\"\"\n device = box_cls.device\n N, _, H, W = box_cls.shape\n A = box_regression.size(1) // 4\n C = box_cls.size(1) // A\n\n # put in the same format as anchors\n box_cls = permute_and_flatten(box_cls, N, A, C, H, W)\n box_cls = box_cls.sigmoid()\n\n box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)\n box_regression = box_regression.reshape(N, -1, 4)\n\n num_anchors = A * H * W\n\n candidate_inds = box_cls > self.pre_nms_thresh\n\n pre_nms_top_n = candidate_inds.view(N, -1).sum(1)\n pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n)\n\n results = []\n for per_box_cls, per_box_regression, per_pre_nms_top_n, \\\n per_candidate_inds, per_anchors in zip(\n box_cls,\n box_regression,\n pre_nms_top_n,\n candidate_inds,\n anchors):\n\n # Sort and select TopN\n # TODO most of this can be made out of the loop for\n # all images. \n # TODO:Yang: Not easy to do. Because the numbers of detections are\n # different in each image. Therefore, this part needs to be done\n # per image. \n per_box_cls = per_box_cls[per_candidate_inds]\n \n per_box_cls, top_k_indices = \\\n per_box_cls.topk(per_pre_nms_top_n, sorted=False)\n\n per_candidate_nonzeros = \\\n per_candidate_inds.nonzero()[top_k_indices, :]\n\n per_box_loc = per_candidate_nonzeros[:, 0]\n per_class = per_candidate_nonzeros[:, 1]\n per_class += 1\n\n detections = self.box_coder.decode(\n per_box_regression[per_box_loc, :].view(-1, 4),\n per_anchors.bbox[per_box_loc, :].view(-1, 4)\n )\n\n boxlist = BoxList(detections, per_anchors.size, mode=\"xyxy\")\n boxlist.add_field(\"labels\", per_class)\n boxlist.add_field(\"scores\", per_box_cls)\n boxlist = boxlist.clip_to_image(remove_empty=False)\n boxlist = remove_small_boxes(boxlist, self.min_size)\n results.append(boxlist)\n\n return results\n\n # TODO very similar to filter_results from PostProcessor\n # but filter_results is per image\n # TODO Yang: solve this issue in the future. No good solution\n # right now.\n def select_over_all_levels(self, boxlists):\n num_images = len(boxlists)\n results = []\n for i in range(num_images):\n scores = boxlists[i].get_field(\"scores\")\n labels = boxlists[i].get_field(\"labels\")\n boxes = boxlists[i].bbox\n boxlist = boxlists[i]\n result = []\n # skip the background\n for j in range(1, self.num_classes):\n inds = (labels == j).nonzero().view(-1)\n\n scores_j = scores[inds]\n boxes_j = boxes[inds, :].view(-1, 4)\n boxlist_for_class = BoxList(boxes_j, boxlist.size, mode=\"xyxy\")\n boxlist_for_class.add_field(\"scores\", scores_j)\n boxlist_for_class = boxlist_nms(\n boxlist_for_class, self.nms_thresh,\n score_field=\"scores\"\n )\n num_labels = len(boxlist_for_class)\n boxlist_for_class.add_field(\n \"labels\", torch.full((num_labels,), j,\n dtype=torch.int64,\n device=scores.device)\n )\n result.append(boxlist_for_class)\n\n result = cat_boxlist(result)\n number_of_detections = len(result)\n\n # Limit to max_per_image detections **over all classes**\n if number_of_detections > self.fpn_post_nms_top_n > 0:\n cls_scores = result.get_field(\"scores\")\n image_thresh, _ = torch.kthvalue(\n cls_scores.cpu(),\n number_of_detections - self.fpn_post_nms_top_n + 1\n )\n keep = cls_scores >= image_thresh.item()\n keep = torch.nonzero(keep).squeeze(1)\n result = result[keep]\n results.append(result)\n return results\n\n\ndef make_retinanet_postprocessor(config, rpn_box_coder, is_train):\n pre_nms_thresh = config.MODEL.RETINANET.INFERENCE_TH\n pre_nms_top_n = config.MODEL.RETINANET.PRE_NMS_TOP_N\n nms_thresh = config.MODEL.RETINANET.NMS_TH\n fpn_post_nms_top_n = config.TEST.DETECTIONS_PER_IMG\n min_size = 0\n\n box_selector = RetinaNetPostProcessor(\n pre_nms_thresh=pre_nms_thresh,\n pre_nms_top_n=pre_nms_top_n,\n nms_thresh=nms_thresh,\n fpn_post_nms_top_n=fpn_post_nms_top_n,\n min_size=min_size,\n num_classes=config.MODEL.RETINANET.NUM_CLASSES,\n box_coder=rpn_box_coder,\n )\n\n return box_selector\n" ]
[ [ "torch.nonzero", "torch.full" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tjbanks/bmtk
[ "52fee3b230ceb14a666c46f57f2031c38f1ac5b1", "52fee3b230ceb14a666c46f57f2031c38f1ac5b1", "52fee3b230ceb14a666c46f57f2031c38f1ac5b1", "52fee3b230ceb14a666c46f57f2031c38f1ac5b1" ]
[ "bmtk/tests/utils/reports/spike_trains/test_file_writers.py", "bmtk/tests/utils/reports/spike_trains/create_files.py", "bmtk/simulator/filternet/lgnmodel/transferfunction.py", "bmtk/simulator/bionet/gids.py" ]
[ "import os\nimport pytest\nimport numpy as np\nimport pandas as pd\nimport h5py\n\nfrom bmtk.utils.reports.spike_trains import SpikeTrains, sort_order, pop_na\nfrom bmtk.utils.reports.spike_trains import write_csv\nfrom bmtk.utils.reports.spike_trains import write_sonata\n\n\ndef load_spike_trains(file_path):\n cpath = os.path.dirname(os.path.realpath(__file__))\n file_path = os.path.join(cpath, file_path)\n if file_path.endswith('.csv'):\n return SpikeTrains.from_csv(file_path)\n\n elif file_path.endswith('.h5'):\n return SpikeTrains.from_sonata(file_path)\n\n elif file_path.endswith('.nwb'):\n return SpikeTrains.from_nwb(file_path)\n\n\[email protected]('input_path,pop_name',\n [\n ('spike_files/spikes.noheader.nopop.csv', pop_na),\n ('spike_files/spikes.one_pop.csv', 'v1'),\n ('spike_files/spikes.old.h5', pop_na),\n ('spike_files/spikes.one_pop.h5', 'v1'),\n ('spike_files/spikes.onepop.v1.0.nwb', pop_na)\n ])\ndef test_csv_writer_onepop(input_path, pop_name):\n spikes = load_spike_trains(input_path)\n output_path = 'output/tmpspikes.csv'\n write_csv(path=output_path, spiketrain_reader=spikes, sort_order=sort_order.by_time)\n output_df = pd.read_csv(output_path, sep=' ')\n assert(len(output_df) == 124)\n assert(output_df['population'].unique() == [pop_name])\n assert(np.all(np.diff(output_df['timestamps']) >= 0))\n\n write_csv(path=output_path, spiketrain_reader=spikes, sort_order=sort_order.by_id)\n output_df = pd.read_csv(output_path, sep=' ')\n assert(len(output_df) == 124)\n assert(np.all(np.diff(output_df['node_ids']) >= 0))\n\n\[email protected]('input_path',\n [\n ('spike_files/spikes.multipop.csv'),\n ('spike_files/spikes.multipop.h5')\n ])\ndef test_csv_writer_multipop(input_path):\n spikes = load_spike_trains(input_path)\n output_path = 'output/tmpspikes.csv'\n write_csv(path=output_path, spiketrain_reader=spikes, sort_order=sort_order.by_time)\n output_df = pd.read_csv(output_path, sep=' ')\n assert(len(output_df) == 144434)\n assert(np.all(np.diff(output_df['timestamps']) >= 0))\n os.remove(output_path)\n\n write_csv(path=output_path, spiketrain_reader=spikes, sort_order=sort_order.by_id)\n output_df = pd.read_csv(output_path, sep=' ')\n assert(len(output_df) == 144434)\n output_lgn = output_df[output_df['population'] == 'v1']\n assert(np.all(np.diff(output_lgn['node_ids']) >= 0))\n output_tw = output_df[output_df['population'] == 'tw']\n assert(np.all(np.diff(output_tw['node_ids']) >= 0))\n os.remove(output_path)\n\n\[email protected]('input_path,pop_name',\n [\n ('spike_files/spikes.noheader.nopop.csv', pop_na),\n ('spike_files/spikes.one_pop.csv', 'v1'),\n ('spike_files/spikes.old.h5', pop_na),\n ('spike_files/spikes.one_pop.h5', 'v1'),\n ('spike_files/spikes.onepop.v1.0.nwb', pop_na)\n ])\ndef test_sonata_writer_onepop(input_path, pop_name):\n spikes = load_spike_trains(input_path)\n output_path = 'output/tmpspikes.h5'\n write_sonata(path=output_path, spiketrain_reader=spikes, sort_order=sort_order.by_time)\n spikes_h5 = h5py.File(output_path, 'r')\n spikes_grp = spikes_h5['/spikes/{}'.format(pop_name)]\n assert(spikes_grp.attrs['sorting'] == 'by_time')\n timestamps = spikes_grp['timestamps'][()]\n assert(len(timestamps) == 124)\n assert(np.all(np.diff(timestamps) >= 0))\n node_ids = spikes_grp['node_ids'][()]\n assert(len(node_ids) == 124)\n os.remove(output_path)\n\n write_sonata(path=output_path, spiketrain_reader=spikes, sort_order=sort_order.by_id)\n spikes_h5 = h5py.File(output_path, 'r')\n spikes_grp = spikes_h5['/spikes/{}'.format(pop_name)]\n assert(spikes_grp.attrs['sorting'] == 'by_id')\n timestamps = spikes_grp['timestamps'][()]\n assert(len(timestamps) == 124)\n node_ids = spikes_grp['node_ids'][()]\n assert(np.all(np.diff(node_ids) >= 0))\n assert(len(node_ids) == 124)\n os.remove(output_path)\n\n\[email protected]('input_path',\n [\n ('spike_files/spikes.multipop.csv'),\n ('spike_files/spikes.multipop.h5')\n ])\ndef test_sonata_writer_multipop(input_path):\n spikes = load_spike_trains(input_path)\n output_path = 'output/tmpspikes.h5'\n write_sonata(path=output_path, spiketrain_reader=spikes, sort_order=sort_order.by_time)\n spikes_h5 = h5py.File(output_path, 'r')\n lgn_spikes = spikes_h5['/spikes/lgn']\n lgn_timestamps = lgn_spikes['timestamps'][()]\n assert(len(lgn_timestamps) == 123356)\n assert(np.all(np.diff(lgn_timestamps) >= 0))\n assert(len(lgn_spikes['node_ids']) == 123356)\n assert(len(spikes_h5['/spikes/tw/timestamps']) == 21078)\n assert(len(spikes_h5['/spikes/tw/node_ids']) == 21078)\n os.remove(output_path)\n\n write_sonata(path=output_path, spiketrain_reader=spikes, sort_order=sort_order.by_id)\n spikes_h5 = h5py.File(output_path, 'r')\n lgn_spikes = spikes_h5['/spikes/lgn']\n lgn_node_ids = lgn_spikes['node_ids'][()]\n assert(len(lgn_node_ids) == 123356)\n assert(np.all(np.diff(lgn_node_ids) >= 0))\n assert(len(lgn_spikes['timestamps']) == 123356)\n assert(len(spikes_h5['/spikes/tw/timestamps']))\n assert(len(spikes_h5['/spikes/tw/node_ids']))\n os.remove(output_path)\n\n\ndef update(n=14):\n for i in range(n):\n print('{} of {}'.format(i+1, n))\n yield\n\n\nif __name__ == '__main__':\n prnt_stmt = update()\n test_csv_writer_onepop('spike_files/spikes.noheader.nopop.csv', pop_name=pop_na); next(prnt_stmt)\n test_csv_writer_onepop('spike_files/spikes.one_pop.csv', pop_name='v1'); next(prnt_stmt)\n test_csv_writer_onepop('spike_files/spikes.old.h5', pop_name=pop_na); next(prnt_stmt)\n test_csv_writer_onepop('spike_files/spikes.one_pop.h5', pop_name='v1'); next(prnt_stmt)\n test_csv_writer_onepop('spike_files/spikes.onepop.v1.0.nwb', pop_name=pop_na); next(prnt_stmt)\n\n test_csv_writer_multipop('spike_files/spikes.multipop.csv'); next(prnt_stmt)\n test_csv_writer_multipop('spike_files/spikes.multipop.h5'); next(prnt_stmt)\n\n test_sonata_writer_onepop('spike_files/spikes.noheader.nopop.csv', pop_name=pop_na); next(prnt_stmt)\n test_sonata_writer_onepop('spike_files/spikes.one_pop.csv', pop_name='v1'); next(prnt_stmt)\n test_sonata_writer_onepop('spike_files/spikes.old.h5', pop_name=pop_na); next(prnt_stmt)\n test_sonata_writer_onepop('spike_files/spikes.one_pop.h5', pop_name='v1'); next(prnt_stmt)\n test_sonata_writer_onepop('spike_files/spikes.onepop.v1.0.nwb', pop_name=pop_na); next(prnt_stmt)\n\n test_sonata_writer_multipop('spike_files/spikes.multipop.csv'); next(prnt_stmt)\n test_sonata_writer_multipop('spike_files/spikes.multipop.h5'); next(prnt_stmt)\n", "import os\nimport numpy as np\nimport pandas as pd\nimport h5py\n\nfrom bmtk.utils.sonata.utils import add_hdf5_magic, add_hdf5_version\n\n\ndef create_single_pop_h5():\n h5_file_old = h5py.File('spike_files/spikes.old.h5', 'r')\n node_ids = h5_file_old['/spikes/gids']\n timestamps = h5_file_old['/spikes/timestamps']\n\n with h5py.File('spike_files/spikes.one_pop.h5', 'w') as h5:\n add_hdf5_magic(h5)\n add_hdf5_version(h5)\n core_grp = h5.create_group('/spikes/v1')\n core_grp.attrs['sorting'] = 'by_time'\n ts_ds = core_grp.create_dataset('timestamps', data=timestamps, dtype=np.float64)\n ts_ds.attrs['units'] = 'milliseconds'\n nids_ds = core_grp.create_dataset('node_ids', data=node_ids, dtype=np.uint64)\n\n\ndef create_multipop_csv(dir_path='/local1/workspace/bmtk/docs/examples/NWB_files'):\n lgn_h5 = h5py.File(os.path.join(dir_path, 'lgn_spikes.nwb'), 'r')\n tw_h5 = h5py.File(os.path.join(dir_path, 'tw_spikes.nwb'), 'r')\n\n full_df = pd.DataFrame({\n 'timestamps': pd.Series(dtype=np.float64),\n 'population': pd.Series(dtype=np.string_),\n 'node_ids': pd.Series(dtype=np.uint64)\n })\n\n for pop_name, pop_h5, n_nodes in [('lgn', lgn_h5, 4000), ('tw', tw_h5, 2000)]:\n spike_train_grp = pop_h5['/processing/trial_0/spike_train']\n for node_id in range(n_nodes):\n tmp_df = pd.DataFrame({\n 'timestamps': spike_train_grp[str(node_id)]['data'][()],\n 'population': pop_name,\n 'node_ids': np.uint64(node_id)\n })\n\n full_df = full_df.append(tmp_df)\n\n full_df = full_df[['timestamps', 'population', 'node_ids']]\n full_df.to_csv('spike_files/spikes.multipop.csv', sep=' ', index=False)\n\n\ndef create_multipop_h5():\n spikes_df = pd.read_csv('spike_files/spikes.multipop.csv', sep=' ')\n lgn_spikes_df = spikes_df[spikes_df['population'] == 'lgn']\n tw_spikes_df = spikes_df[spikes_df['population'] == 'tw']\n with h5py.File('spike_files/spikes.multipop.h5', 'w') as h5:\n add_hdf5_magic(h5)\n add_hdf5_version(h5)\n\n lgn_grp = h5.create_group('/spikes/lgn')\n lgn_grp.attrs['sorting'] = 'by_id'\n ts_ds = lgn_grp.create_dataset('timestamps', data=lgn_spikes_df['timestamps'], dtype=np.float64)\n ts_ds.attrs['units'] = 'milliseconds'\n lgn_grp.create_dataset('node_ids', data=lgn_spikes_df['node_ids'], dtype=np.uint64)\n\n\n tw_grp = h5.create_group('/spikes/tw')\n tw_grp.attrs['sorting'] = 'by_id'\n ts_ds = tw_grp.create_dataset('timestamps', data=tw_spikes_df['timestamps'], dtype=np.float64)\n ts_ds.attrs['units'] = 'milliseconds'\n tw_grp.create_dataset('node_ids', data=tw_spikes_df['node_ids'], dtype=np.uint64)\n\n\ndef create_nwb():\n spikes_df = pd.read_csv('spike_files/spikes.one_pop.csv', sep=' ')\n with h5py.File('spike_files/spikes.onepop.v1.0.nwb', 'w') as h5:\n spikes_grp = h5.create_group('/processing/trial_0/spike_train')\n for node_id in range(14):\n timestamps = spikes_df[spikes_df['node_ids'] == node_id]['timestamps'].values\n data_ds = spikes_grp.create_dataset('{}/data'.format(node_id), data=timestamps, dtype=np.float64)\n data_ds.attrs['dimension'] = 'time'\n data_ds.attrs['unit'] = 'millisecond'\n\n\n\nif __name__ == '__main__':\n # create_multipop_csv()\n # create_multipop_h5()\n create_nwb()\n\n\n", "from sympy.utilities.lambdify import lambdify\nimport sympy.parsing.sympy_parser as symp\nimport sympy.abc\nimport numpy as np\n\n\nclass ScalarTransferFunction(object):\n def __init__(self, transfer_function_string, symbol=sympy.abc.s):\n self.symbol = symbol\n self.transfer_function_string = transfer_function_string\n self.closure = lambdify(self.symbol, symp.parse_expr(self.transfer_function_string), modules=['sympy'])\n \n def __call__(self, s):\n return self.closure(s)\n \n def to_dict(self):\n return {'class': (__name__, self.__class__.__name__),\n 'function': self.transfer_function_string}\n \n def imshow(self, xlim, ax=None, show=True, save_file_name=None, ylim=None):\n # TODO: This function should be removed (as Ram to see if/where it's used) since it will fail (no t_vals)\n import matplotlib.pyplot as plt\n if ax is None:\n _, ax = plt.subplots(1, 1)\n \n plt.plot(self.t_vals, self.kernel)\n ax.set_xlabel('Time (Seconds)')\n \n if ylim is not None:\n ax.set_ylim(ylim)\n \n if xlim is not None:\n ax.set_xlim((self.t_range[0], self.t_range[-1]))\n \n if save_file_name is not None:\n plt.savefig(save_file_name, transparent=True)\n \n if show:\n plt.show()\n \n return ax\n\n\nclass MultiTransferFunction(object):\n def __init__(self, symbol_tuple, transfer_function_string):\n self.symbol_tuple = symbol_tuple\n self.transfer_function_string = transfer_function_string\n self.closure = lambdify(self.symbol_tuple, symp.parse_expr(self.transfer_function_string), modules=['sympy'])\n\n def __call__(self, *s):\n if isinstance(s[0], (float,)):\n return self.closure(*s)\n else:\n return np.array(list(map(lambda x: self.closure(*x), zip(*s))))\n \n def to_dict(self):\n return {'class': (__name__, self.__class__.__name__),\n 'function': self.transfer_function_string}\n", "import numpy as np\nfrom collections import namedtuple\n\nPopulationID = namedtuple('PopulationID', 'node_id population')\n\n\nclass GidPool(object):\n def __init__(self):\n # map from pool-id --> gid\n self._accumulated_offset = 0\n self._pool_offsets = {}\n\n # map from gid --> pop, node_id\n self._offsets = np.array([0], dtype=np.uint64)\n self._offset2pool_map = {}\n\n def add_pool(self, name, n_nodes):\n offset_index = len(self._offsets)\n self._offset2pool_map[offset_index] = name\n self._offsets = np.append(self._offsets, np.array([self._accumulated_offset + n_nodes], dtype=np.uint64))\n\n self._pool_offsets[name] = self._accumulated_offset\n self._accumulated_offset += n_nodes\n\n def get_gid(self, name, node_id):\n return self._pool_offsets[name] + node_id\n\n def get_pool_id(self, gid):\n offset_indx = np.searchsorted(self._offsets, gid, 'right')\n node_id = gid - self._offsets[offset_indx-1]\n pool_name = self._offset2pool_map[offset_indx]\n return PopulationID(int(node_id), pool_name)\n" ]
[ [ "pandas.read_csv", "numpy.diff" ], [ "pandas.read_csv", "numpy.uint64", "pandas.Series" ], [ "matplotlib.pyplot.plot", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig" ], [ "numpy.array", "numpy.searchsorted" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
joaofonseca9/mimic3-benchmarks-TBI_edit
[ "21533b011433272b06d970fa47fe1c8cbfeb5351" ]
[ "mimic3benchmark/preprocessing.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport numpy as np\nimport re\n\nfrom pandas import DataFrame, Series\n\nfrom mimic3benchmark.util import dataframe_from_csv\n\n###############################\n# Non-time series preprocessing\n###############################\n\ng_map = {'F': 1, 'M': 2, 'OTHER': 3, '': 0}\n\n\ndef transform_gender(gender_series):\n global g_map\n return {'Gender': gender_series.fillna('').apply(lambda s: g_map[s] if s in g_map else g_map['OTHER'])}\n\n\ne_map = {'ASIAN': 1,\n 'BLACK': 2,\n 'CARIBBEAN ISLAND': 2,\n 'HISPANIC': 3,\n 'SOUTH AMERICAN': 3,\n 'WHITE': 4,\n 'MIDDLE EASTERN': 4,\n 'PORTUGUESE': 4,\n 'AMERICAN INDIAN': 0,\n 'NATIVE HAWAIIAN': 0,\n 'UNABLE TO OBTAIN': 0,\n 'PATIENT DECLINED TO ANSWER': 0,\n 'UNKNOWN': 0,\n 'OTHER': 0,\n '': 0}\n\n\ndef transform_ethnicity(ethnicity_series):\n global e_map\n\n def aggregate_ethnicity(ethnicity_str):\n return ethnicity_str.replace(' OR ', '/').split(' - ')[0].split('/')[0]\n\n ethnicity_series = ethnicity_series.apply(aggregate_ethnicity)\n return {'Ethnicity': ethnicity_series.fillna('').apply(lambda s: e_map[s] if s in e_map else e_map['OTHER'])}\n\n\ndef assemble_episodic_data(stays, diagnoses):\n data = {'Icustay': stays.ICUSTAY_ID, 'Age': stays.AGE, 'Length of Stay': stays.LOS,\n 'Mortality': stays.MORTALITY}\n data.update(transform_gender(stays.GENDER))\n data.update(transform_ethnicity(stays.ETHNICITY))\n data['Height'] = np.nan\n data['Weight'] = np.nan\n data = DataFrame(data).set_index('Icustay')\n data = data[['Ethnicity', 'Gender', 'Age', 'Height', 'Weight', 'Length of Stay', 'Mortality']]\n return data.merge(extract_diagnosis_labels(diagnoses), left_index=True, right_index=True)\n\n\ndiagnosis_labels = ['4019', '4280', '41401', '42731', '25000', '5849', '2724', '51881', '53081', '5990', '2720',\n '2859', '2449', '486', '2762', '2851', '496', 'V5861', '99592', '311', '0389', '5859', '5070',\n '40390', '3051', '412', 'V4581', '2761', '41071', '2875', '4240', 'V1582', 'V4582', 'V5867',\n '4241', '40391', '78552', '5119', '42789', '32723', '49390', '9971', '2767', '2760', '2749',\n '4168', '5180', '45829', '4589', '73300', '5845', '78039', '5856', '4271', '4254', '4111',\n 'V1251', '30000', '3572', '60000', '27800', '41400', '2768', '4439', '27651', 'V4501', '27652',\n '99811', '431', '28521', '2930', '7907', 'E8798', '5789', '79902', 'V4986', 'V103', '42832',\n 'E8788', '00845', '5715', '99591', '07054', '42833', '4275', '49121', 'V1046', '2948', '70703',\n '2809', '5712', '27801', '42732', '99812', '4139', '3004', '2639', '42822', '25060', 'V1254',\n '42823', '28529', 'E8782', '30500', '78791', '78551', 'E8889', '78820', '34590', '2800', '99859',\n 'V667', 'E8497', '79092', '5723', '3485', '5601', '25040', '570', '71590', '2869', '2763', '5770',\n 'V5865', '99662', '28860', '36201', '56210']\n\n\ndef extract_diagnosis_labels(diagnoses):\n global diagnosis_labels\n diagnoses['VALUE'] = 1\n labels = diagnoses[['ICUSTAY_ID', 'ICD9_CODE', 'VALUE']].drop_duplicates()\\\n .pivot(index='ICUSTAY_ID', columns='ICD9_CODE', values='VALUE').fillna(0).astype(int)\n for l in diagnosis_labels:\n if l not in labels:\n labels[l] = 0\n labels = labels[diagnosis_labels]\n return labels.rename(dict(zip(diagnosis_labels, ['Diagnosis ' + d for d in diagnosis_labels])), axis=1)\n\n\ndef add_hcup_ccs_2015_groups(diagnoses, definitions):\n def_map = {}\n for dx in definitions:\n for code in definitions[dx]['codes']:\n def_map[code] = (dx, definitions[dx]['use_in_benchmark'])\n diagnoses['HCUP_CCS_2015'] = diagnoses.ICD9_CODE.apply(lambda c: def_map[c][0] if c in def_map else None)\n diagnoses['USE_IN_BENCHMARK'] = diagnoses.ICD9_CODE.apply(lambda c: int(def_map[c][1]) if c in def_map else None)\n return diagnoses\n\n\ndef make_phenotype_label_matrix(phenotypes, stays=None):\n phenotypes = phenotypes[['ICUSTAY_ID', 'HCUP_CCS_2015']].loc[phenotypes.USE_IN_BENCHMARK > 0].drop_duplicates()\n phenotypes['VALUE'] = 1\n phenotypes = phenotypes.pivot(index='ICUSTAY_ID', columns='HCUP_CCS_2015', values='VALUE')\n if stays is not None:\n phenotypes = phenotypes.reindex(stays.ICUSTAY_ID.sort_values())\n return phenotypes.fillna(0).astype(int).sort_index(axis=0).sort_index(axis=1)\n\n\n###################################\n# Time series preprocessing\n###################################\n\ndef read_itemid_to_variable_map(fn, variable_column='LEVEL2', added_fts=False):\n var_map = dataframe_from_csv(fn, index_col=None, header=0).fillna('').astype(str)\n # var_map[variable_column] = var_map[variable_column].apply(lambda s: s.lower())\n var_map.COUNT = var_map.COUNT.astype(float).astype(int)\n var_map = var_map[(var_map[variable_column] != '') & (var_map.COUNT > 0)]\n if added_fts:\n var_map = var_map[((var_map.STATUS == 'ready') | (var_map.STATUS == 'verify')) & (var_map.COUNT > 258343)]\n else:\n var_map = var_map[(var_map.STATUS == 'ready')]\n var_map.ITEMID = var_map.ITEMID.astype(float).astype(int)\n var_map = var_map[[variable_column, 'ITEMID', 'MIMIC LABEL']].set_index('ITEMID')\n return var_map.rename({variable_column: 'VARIABLE', 'MIMIC LABEL': 'MIMIC_LABEL'}, axis=1)\n\n\ndef map_itemids_to_variables(events, var_map):\n return events.merge(var_map, left_on='ITEMID', right_index=True)\n\n\ndef read_variable_ranges(fn, variable_column='LEVEL2'):\n columns = [variable_column, 'OUTLIER LOW', 'VALID LOW', 'IMPUTE', 'VALID HIGH', 'OUTLIER HIGH']\n to_rename = dict(zip(columns, [c.replace(' ', '_') for c in columns]))\n to_rename[variable_column] = 'VARIABLE'\n var_ranges = dataframe_from_csv(fn, index_col=None)\n # var_ranges = var_ranges[variable_column].apply(lambda s: s.lower())\n var_ranges = var_ranges[columns]\n var_ranges.rename(to_rename, axis=1, inplace=True)\n var_ranges = var_ranges.drop_duplicates(subset='VARIABLE', keep='first')\n var_ranges.set_index('VARIABLE', inplace=True)\n return var_ranges.loc[var_ranges.notnull().all(axis=1)]\n\n\ndef remove_outliers_for_variable(events, variable, ranges):\n if variable not in ranges.index:\n return events\n idx = (events.VARIABLE == variable)\n v = events.VALUE[idx].copy()\n v.loc[v < ranges.OUTLIER_LOW[variable]] = np.nan\n v.loc[v > ranges.OUTLIER_HIGH[variable]] = np.nan\n v.loc[v < ranges.VALID_LOW[variable]] = ranges.VALID_LOW[variable]\n v.loc[v > ranges.VALID_HIGH[variable]] = ranges.VALID_HIGH[variable]\n events.loc[idx, 'VALUE'] = v\n return events\n\n\n# SBP: some are strings of type SBP/DBP\ndef clean_sbp(df):\n v = df.VALUE.astype(str).copy()\n idx = v.apply(lambda s: '/' in s)\n v.loc[idx] = v[idx].apply(lambda s: re.match('^(\\d+)/(\\d+)$', s).group(1))\n return v.astype(float)\n\n\ndef clean_dbp(df):\n v = df.VALUE.astype(str).copy()\n idx = v.apply(lambda s: '/' in s)\n v.loc[idx] = v[idx].apply(lambda s: re.match('^(\\d+)/(\\d+)$', s).group(2))\n return v.astype(float)\n\n\n# CRR: strings with brisk, <3 normal, delayed, or >3 abnormal\ndef clean_crr(df):\n v = Series(np.zeros(df.shape[0]), index=df.index)\n v[:] = np.nan\n\n # when df.VALUE is empty, dtype can be float and comparision with string\n # raises an exception, to fix this we change dtype to str\n df_value_str = df.VALUE.astype(str)\n\n v.loc[(df_value_str == 'Normal <3 secs') | (df_value_str == 'Brisk')] = 0\n v.loc[(df_value_str == 'Abnormal >3 secs') | (df_value_str == 'Delayed')] = 1\n return v\n\n\n# FIO2: many 0s, some 0<x<0.2 or 1<x<20\ndef clean_fio2(df):\n v = df.VALUE.astype(float).copy()\n\n ''' The line below is the correct way of doing the cleaning, since we will not compare 'str' to 'float'.\n If we use that line it will create mismatches from the data of the paper in ~50 ICU stays.\n The next releases of the benchmark should use this line.\n '''\n # idx = df.VALUEUOM.fillna('').apply(lambda s: 'torr' not in s.lower()) & (v>1.0)\n\n ''' The line below was used to create the benchmark dataset that the paper used. Note this line will not work\n in python 3, since it may try to compare 'str' to 'float'.\n '''\n # idx = df.VALUEUOM.fillna('').apply(lambda s: 'torr' not in s.lower()) & (df.VALUE > 1.0)\n\n ''' The two following lines implement the code that was used to create the benchmark dataset that the paper used.\n This works with both python 2 and python 3.\n '''\n is_str = np.array(map(lambda x: type(x) == str, list(df.VALUE)), dtype=np.bool)\n idx = df.VALUEUOM.fillna('').apply(lambda s: 'torr' not in s.lower()) & (is_str | (~is_str & (v > 1.0)))\n\n v.loc[idx] = v[idx] / 100.\n return v\n\n\n# GLUCOSE, PH: sometimes have ERROR as value\ndef clean_lab(df):\n v = df.VALUE.copy()\n idx = v.apply(lambda s: type(s) is str and not re.match('^(\\d+(\\.\\d*)?|\\.\\d+)$', s))\n v.loc[idx] = np.nan\n return v.astype(float)\n\n\n# O2SAT: small number of 0<x<=1 that should be mapped to 0-100 scale\ndef clean_o2sat(df):\n # change \"ERROR\" to NaN\n v = df.VALUE.copy()\n idx = v.apply(lambda s: type(s) is str and not re.match('^(\\d+(\\.\\d*)?|\\.\\d+)$', s))\n v.loc[idx] = np.nan\n\n v = v.astype(float)\n idx = (v <= 1)\n v.loc[idx] = v[idx] * 100.\n return v\n\n\n# Temperature: map Farenheit to Celsius, some ambiguous 50<x<80\ndef clean_temperature(df):\n v = df.VALUE.astype(float).copy()\n idx = df.VALUEUOM.fillna('').apply(lambda s: 'F' in s.lower()) | df.MIMIC_LABEL.apply(lambda s: 'F' in s.lower()) | (v >= 79)\n v.loc[idx] = (v[idx] - 32) * 5. / 9\n return v\n\n\n# Weight: some really light/heavy adults: <50 lb, >450 lb, ambiguous oz/lb\n# Children are tough for height, weight\ndef clean_weight(df):\n v = df.VALUE.astype(float).copy()\n # ounces\n idx = df.VALUEUOM.fillna('').apply(lambda s: 'oz' in s.lower()) | df.MIMIC_LABEL.apply(lambda s: 'oz' in s.lower())\n v.loc[idx] = v[idx] / 16.\n # pounds\n idx = idx | df.VALUEUOM.fillna('').apply(lambda s: 'lb' in s.lower()) | df.MIMIC_LABEL.apply(lambda s: 'lb' in s.lower())\n v.loc[idx] = v[idx] * 0.453592\n return v\n\n\n# Height: some really short/tall adults: <2 ft, >7 ft)\n# Children are tough for height, weight\ndef clean_height(df):\n v = df.VALUE.astype(float).copy()\n idx = df.VALUEUOM.fillna('').apply(lambda s: 'in' in s.lower()) | df.MIMIC_LABEL.apply(lambda s: 'in' in s.lower())\n v.loc[idx] = np.round(v[idx] * 2.54)\n return v\n\n# Potassium: one value was misimputed as '10k' instead of 10\ndef clean_potassium(df):\n v = df.VALUE.copy()\n for value in v:\n if value =='10k':\n value='10'\n return v\n\n\n# ETCO2: haven't found yet\n# Urine output: ambiguous units (raw ccs, ccs/kg/hr, 24-hr, etc.)\n# Tidal volume: tried to substitute for ETCO2 but units are ambiguous\n# Glascow coma scale eye opening\n# Glascow coma scale motor response\n# Glascow coma scale total\n# Glascow coma scale verbal response\n# Heart Rate\n# Respiratory rate\n# Mean blood pressure\nclean_fns = {\n 'Capillary refill rate': clean_crr,\n 'Diastolic blood pressure': clean_dbp,\n 'Systolic blood pressure': clean_sbp,\n 'Fraction inspired oxygen': clean_fio2,\n 'Oxygen saturation': clean_o2sat,\n 'Glucose': clean_lab,\n 'pH': clean_lab,\n 'Temperature': clean_temperature,\n 'Weight': clean_weight,\n 'Height': clean_height,\n 'Potassium': clean_potassium\n}\n\n\ndef clean_events(events):\n global clean_fns\n for var_name, clean_fn in clean_fns.items():\n idx = (events.VARIABLE == var_name)\n try:\n events.loc[idx, 'VALUE'] = clean_fn(events[idx])\n except Exception as e:\n import traceback\n print(\"Exception in clean_events:\", clean_fn.__name__, e)\n print(traceback.format_exc())\n print(\"number of rows:\", np.sum(idx))\n print(\"values:\", events[idx])\n exit()\n return events.loc[events.VALUE.notnull()]\n" ]
[ [ "numpy.round", "numpy.zeros", "numpy.sum", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
zhangqianhui/FUNIT_tensorflow
[ "16ea70e881d4aa2d38cd44c0fcb60f4ed821a24d" ]
[ "FUNIT.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tfLib.ops import *\nfrom tfLib.loss import *\nfrom Dataset import save_images\nimport os\nimport numpy as np\nimport PyLib.const as con\n\ncon.EPS = 1e-7\nclass FSUGAN(object):\n\n # build model\n def __init__(self, data_ob, opt):\n\n self.opt = opt\n # placeholder defination\n self.data_ob = data_ob\n self.x = tf.placeholder(tf.float32,[opt.batchSize, opt.image_size, opt.image_size, opt.input_nc])\n self.y_1 = tf.placeholder(tf.float32,[opt.batchSize, opt.image_size, opt.image_size, opt.input_nc])\n self.cls_x = tf.placeholder(tf.int32, [opt.batchSize])\n self.cls_y = tf.placeholder(tf.int32, [opt.batchSize])\n self.lr_decay = tf.placeholder(tf.float32, None, name='lr_decay')\n\n def build_model(self):\n\n self.content_code = self.content_encoder(self.x)\n self.encode_y1 = self.class_encoder_k(self.y_1)\n self.class_code = self.encode_y1\n self.tilde_x = self.decoder(content_code=self.content_code, class_code=self.class_code)\n self.encode_x = self.class_encoder_k(self.x)\n self.x_recon = self.decoder(content_code=self.content_code, class_code=self.encode_x)\n self.content_recon_loss = tf.reduce_mean(tf.abs(self.x - self.x_recon))\n\n self.x_feature, self.D_real_x = self.discriminator(self.x)\n self.y_feature_1, self.D_real_y = self.discriminator(self.y_1)\n self.tilde_x_feature, self.D_fake = self.discriminator(self.tilde_x)\n self.x_feature_recon, self.D_fake_recon = self.discriminator(self.x_recon)\n\n self.feature_matching = 0.5 * getfeature_matching_loss(self.y_feature_1, self.tilde_x_feature) + \\\n 0.5 * getfeature_matching_loss(self.x_feature, self.x_feature_recon)\n\n self.D_gan_loss = self.loss_hinge_dis(self.D_real_y, self.D_fake, self.cls_y, self.cls_y)\n self.G_gan_loss = 0.5 * self.loss_hinge_gen(self.D_fake, self.cls_y) \\\n + 0.5 * self.loss_hinge_gen(self.D_fake_recon, self.cls_x)\n self.grad_penalty = self.gradient_penalty_just_real(x=self.y_1, label=self.cls_y)\n\n # weight decay\n self.l2_loss_d = getWeight_Decay(scope='discriminator')\n self.l2_loss_g = getWeight_Decay(scope='content_encoder') + getWeight_Decay(scope='class_encoder_k') + getWeight_Decay(scope='decoder')\n\n self.D_loss = self.D_gan_loss + self.opt.lam_gp * self.grad_penalty + self.l2_loss_d\n self.G_loss = self.G_gan_loss + self.opt.lam_recon * self.content_recon_loss + self.opt.lam_fp * self.feature_matching + self.l2_loss_g\n\n def train(self):\n\n log_vars = []\n log_vars.append(('D_loss', self.D_loss))\n log_vars.append(('G_loss', self.G_loss))\n\n vars = tf.trainable_variables()\n\n '''\n total_para = 0\n for variable in vars:\n shape = variable.get_shape()\n print(variable.name, shape)\n variable_para = 1\n for dim in shape:\n variable_para *= dim.value\n total_para += variable_para\n print(\"The total para\", total_para)\n '''\n\n g_vars = getTrainVariable(vars, scope='encoder') + getTrainVariable(vars, scope='decoder')\n d_vars = getTrainVariable(vars, scope='discriminator')\n\n assert len(vars) == len(g_vars) + len(d_vars)\n\n saver = tf.train.Saver()\n for k, v in log_vars:\n tf.summary.scalar(k, v)\n\n opti_G = tf.train.RMSPropOptimizer(self.opt.lr_g * self.lr_decay).minimize(loss=self.G_loss,\n var_list=g_vars)\n opti_D = tf.train.RMSPropOptimizer(self.opt.lr_g * self.lr_decay).minimize(loss=self.D_loss,\n var_list=d_vars)\n init = tf.global_variables_initializer()\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n with tf.Session(config=config) as sess:\n\n sess.run(init)\n summary_op = tf.summary.merge_all()\n summary_writer = tf.summary.FileWriter(self.opt.log_dir, sess.graph)\n\n ckpt = tf.train.get_checkpoint_state(self.opt.checkpoints_dir)\n if ckpt and ckpt.model_checkpoint_path:\n start_step = int(ckpt.model_checkpoint_path.split('model_', 2)[1].split('.', 2)[0])\n saver.restore(sess, ckpt.model_checkpoint_path)\n print('Load Successfully!', ckpt.model_checkpoint_path)\n else:\n start_step = 0\n\n step = start_step\n lr_decay = 1\n\n print(\"Start reading dataset\")\n while step <= self.opt.niter:\n\n if step > self.opt.niter_decay and step % 2000 == 0:\n lr_decay = (self.opt.niter - step) / float(self.opt.niter - self.opt.iter_decay)\n\n source_image_x_data, target_image_y1_data, cls_x, cls_y = self.data_ob.getNextBatch()\n source_image_x = self.data_ob.getShapeForData(source_image_x_data)\n target_image_y1 = self.data_ob.getShapeForData(target_image_y1_data)\n\n f_d = {\n self.x :source_image_x, self.y_1:target_image_y1, self.cls_x: cls_x, self.cls_y: cls_y, self.lr_decay: lr_decay\n }\n\n sess.run(opti_D, feed_dict=f_d)\n sess.run(opti_G, feed_dict=f_d)\n\n summary_str = sess.run(summary_op, feed_dict=f_d)\n summary_writer.add_summary(summary_str, step)\n\n if step % self.opt.display_freq == 0:\n\n output_loss = sess.run([self.D_loss, self.D_gan_loss, self.G_loss, self.G_gan_loss,\n self.content_recon_loss, self.feature_matching, self.l2_loss_d, self.l2_loss_g], feed_dict=f_d)\n print(\"step %d, D_loss=%.4f, D_gan_loss=%.4f\"\n \" G_loss=%.4f, G_gan_loss=%.4f, content_recon=%.4f, feautre_loss=%.4f, l2_loss=%.4f, lr_decay=%.4f\" \n % (step, output_loss[0], output_loss[1], output_loss[2], output_loss[3],\n output_loss[4], output_loss[5], output_loss[6] + output_loss[7], lr_decay))\n\n if np.mod(step, self.opt.save_latest_freq) == 0:\n\n f_d = {\n self.x: source_image_x, self.y_1: target_image_y1}\n\n train_output_img = sess.run([\n self.x,\n self.y_1,\n self.tilde_x,\n self.x_recon\n ],feed_dict=f_d)\n\n output_img = np.concatenate([img for img in train_output_img[0:4]],axis=0)\n\n save_images(output_img, [output_img.shape[0]/self.opt.batchSize, self.opt.batchSize],\n '{}/{:02d}_output_img.jpg'.format(self.opt.sample_dir, step))\n\n if np.mod(step, self.opt.save_model_freq) == 0 and step != 0:\n saver.save(sess, os.path.join(self.opt.checkpoints_dir, 'model_{:06d}.ckpt'.format(step)))\n step += 1\n\n save_path = saver.save(sess, os.path.join(self.opt.checkpoints_dir, 'model_{:06d}.ckpt'.format(step)))\n summary_writer.close()\n\n print(\"Model saved in file: %s\" % save_path)\n\n def test(self):\n pass\n\n def reshape_tile(self, cls_l):\n return tf.tile(tf.reshape(tf.one_hot(cls_l, depth=self.opt.num_source_class),\n shape=[self.opt.batchSize, 1, 1, self.opt.num_source_class]), multiples=[1, 8, 8, 1])\n\n #conditional hinge loss\n def loss_hinge_dis(self, d_real_logits, d_fake_logits, cls_x, cls_y):\n cls_x = self.reshape_tile(cls_x)\n cls_y = self.reshape_tile(cls_y)\n loss = tf.reduce_mean(tf.nn.relu(tf.reduce_sum(cls_x * (1.0 - d_real_logits), axis=3)))\n loss += tf.reduce_mean(tf.nn.relu(tf.reduce_sum(cls_y * (1.0 + d_fake_logits), axis=3)))\n\n return loss\n\n def loss_hinge_gen(self, d_fake_logits, cls_x):\n cls_x = self.reshape_tile(cls_x)\n loss = - tf.reduce_mean(tf.reduce_sum(cls_x * d_fake_logits, axis=3))\n return loss\n\n def gradient_penalty_just_real(self, x, label):\n label = self.reshape_tile(label)\n _, discri_logits = self.discriminator(x)\n discri_logits = tf.squeeze(tf.reduce_sum(discri_logits * label, axis=3))\n gradients = tf.gradients(tf.reduce_mean(discri_logits), [x])[0]\n slopes = tf.reduce_sum(tf.square(gradients), reduction_indices=[1, 2, 3])\n return tf.reduce_mean(slopes)\n\n def content_encoder(self, x):\n\n necf_t = self.opt.necf_t\n n_g_ref_t = self.opt.n_g_ref_t\n with tf.variable_scope(\"content_encoder\", reuse=tf.AUTO_REUSE):\n\n x = conv2d(x, output_dim=necf_t, kernel=7, stride=1, padding='SAME', scope='conv-1')\n x = instance_norm(x,scope='IN-1', affine=False)\n x = tf.nn.relu(x)\n for i in range(self.opt.n_layers_ec):\n x = conv2d(x, output_dim=pow(2,i+1)* necf_t, kernel=4, stride=2, padding='SAME', scope='conv_{}'.format(i+1))\n x = instance_norm(x,scope='ins_{}'.format(i+1), affine=False)\n x = tf.nn.relu(x)\n\n for i in range(2):\n x = Resblock(x, channels=n_g_ref_t, is_start=False, is_norm=True, is_acti=True, affline=False, scope='residual_{}'.format(i))\n\n return x\n\n def class_encoder_k(self, y):\n\n nesf_t = self.opt.nesf_t\n with tf.variable_scope(\"class_encoder_k\", reuse=tf.AUTO_REUSE):\n y = tf.nn.relu(conv2d(y, output_dim=nesf_t, kernel=7, stride=1, padding='SAME',scope='conv-1'))\n for i in range(2):\n y = conv2d(y, output_dim=nesf_t * pow(2, i+1), kernel=4, stride=2, padding='SAME',scope='conv_{}'.format(i+1))\n y = tf.nn.relu(y)\n for i in range(self.opt.n_layers_es - 2):\n y = conv2d(y, output_dim=nesf_t * pow(2, 2), kernel=4, stride=2, padding='SAME',scope='conv_{}'.format(i+3))\n y = tf.nn.relu(y)\n y = Adaptive_pool2d(y, output_size=1)\n y = conv2d(y, output_dim=nesf_t, kernel=1, stride=1, padding='SAME')\n\n return tf.squeeze(y)\n\n def decoder(self, content_code, class_code):\n\n n_g_ref_t = self.opt.n_g_ref_t\n output_nc = self.opt.output_nc\n n_layers_de = self.opt.n_layers_de\n n_residual_de = self.opt.n_residual_de\n with tf.variable_scope(\"decoder\", reuse=tf.AUTO_REUSE):\n\n #MLP\n for i in range(3):\n if i == 2:\n class_code = fully_connect(input_=class_code, output_size=n_g_ref_t*8, scope='fc_{}'.format(i+1))\n else:\n class_code = tf.nn.relu(fully_connect(input_=class_code, output_size=n_g_ref_t // 2, scope='fc_{}'.format(i+1)))\n\n de = content_code\n\n for i in range(n_residual_de):\n mean1 = class_code[:,n_g_ref_t*i:n_g_ref_t*(i+1)]\n stand_dev1 = class_code[:,n_g_ref_t*(i+1):n_g_ref_t*(i+2)]\n mean2 = class_code[:,n_g_ref_t*(i+2):n_g_ref_t*(i+3)]\n stand_dev2 = class_code[:,n_g_ref_t*(i+3):n_g_ref_t*(i+4)]\n print(class_code)\n class_code = class_code[:,n_g_ref_t*(i+3):]\n\n de = Resblock_AdaIn(content_code, beta1=mean1, gamma1=stand_dev1, beta2=mean2, gamma2=stand_dev2,\n channels=n_g_ref_t, scope='res_{}'.format(i+1))\n\n n_g_ref_t = n_g_ref_t // 2\n for i in range(n_layers_de):\n de = upscale(de, scale=2)\n de = conv2d(de, output_dim=n_g_ref_t/pow(2,i), kernel=5, stride=1, padding='SAME', scope='conv_{}'.format(i+1))\n de = instance_norm(de, scope='ins_{}'.format(i+1), affine=False)\n de = tf.nn.relu(de)\n\n y = conv2d(de, output_dim=output_nc, kernel=7, stride=1, padding='SAME', scope='conv_final')\n\n return tf.nn.tanh(y)\n\n def discriminator(self, x):\n\n ndf = self.opt.ndf\n with tf.variable_scope(\"discriminator\", reuse=tf.AUTO_REUSE):\n\n x = lrelu(conv2d(input_=x, output_dim=ndf, kernel=7, stride=1, scope='conv-64'))\n for i in range(5):\n if i == 4:\n x = Resblock_D(x, channels=min(ndf * pow(2, i+1), 1024), is_acti=False, is_start=False, is_norm=False, scope='r1_{}'.format(i + 1))\n x = Resblock_D(x, channels=min(ndf * pow(2, i+1), 1024), is_acti=False, is_start=False, is_norm=False, scope='r2_{}'.format(i+1))\n else:\n x = Resblock_D(x, channels=min(ndf * pow(2, i+1), 1024), is_acti=True, is_start=True,\n is_norm=False,\n scope='r1_{}'.format(i + 1))\n x = Resblock_D(x, channels=min(ndf * pow(2, i+1), 1024), is_acti=True, is_start=False, is_norm=False, scope='r2_{}'.format(i+1))\n x = avgpool2d(x, k=2)\n\n x_predict = conv2d(lrelu(x), output_dim=self.opt.num_source_class, kernel=3, stride=1, padding='SAME')\n\n return x, tf.squeeze(x_predict)\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.mod", "numpy.concatenate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
author9779/paper9779
[ "b544f60733794ff2dd344627f8ec0d87fe087266" ]
[ "defaults/wrappers.py" ]
[ "from .models import *\nfrom .datasets import *\nfrom utils._utils import *\n\nimport torch.distributed as dist\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.utils.data.distributed import DistributedSampler as DS\n\nclass DefaultWrapper:\n \"\"\"Class that wraps everything.\n\n Model, optimizers, schedulers, and dataloaders are initialized in this class.\n\n Attributes:\n param_attributes:\n All the fields in the .json file are stored as attributes here.\n \"\"\"\n def __init__(self, parameters: edict):\n \"\"\"Inits the DefaultWrapper class.\n \n Args:\n parameters:\n Dictionary of paramaters read from a .json file.\n \"\"\"\n super().__init__()\n self.is_supervised = True\n parameters = edict(deepcopy(parameters))\n parameters = self.update_augmentation_strategy(parameters)\n self.param_attributes = list(parameters.keys())\n # adding effective batch size to optimizer_params\n batch_size = parameters.dataloader_params.trainloader.batch_size\n effective_batch_size = batch_size * self.visible_world\n for key in parameters.optimization_params.keys():\n parameters.optimization_params[key]['effective_batch_size'] = effective_batch_size \n autoscale_lr = parameters.optimization_params[key].optimizer.autoscale_lr\n if autoscale_lr:\n def_lr = parameters.optimization_params[key].optimizer.params.lr\n scaled_lr = def_lr * effective_batch_size / 256.\n parameters.optimization_params[key].optimizer.params.lr = scaled_lr \n for key in parameters:\n setattr(self, key, parameters[key]) \n \n def instantiate(self): \n \"\"\"Initialize model, loss, metrics, dataloaders, optimizer and scheduler.\"\"\"\n if self.is_rank0:\n print(\"Initialising Dataloaders . . .\")\n \n self.dataloaders = self.init_dataloaders()\n img_channels = self.dataloaders.trainloader.dataset.img_channels\n n_classes = self.dataloaders.trainloader.dataset.n_classes\n knn_nhood = self.dataloaders.trainloader.dataset.knn_nhood\n target_metric = self.dataloaders.trainloader.dataset.target_metric\n print_ddp(f\"The default metric has been set to : \\033[94m{target_metric}\\033[0m\")\n \n self.model_params.img_channels = img_channels\n self.model_params.knn_nhood = knn_nhood\n self.model_params.target_metric = target_metric\n \n # Checking for binary multi-label\n self.model_params.n_classes = n_classes\n is_multiclass = self.dataloaders.trainloader.dataset.is_multiclass\n if not is_multiclass and n_classes <= 2:\n print(\"\\033[93m Binary multi-label problem found: CHANING THE n_classes to 1\\033[0m\")\n self.model_params.n_classes = 1\n \n # adding WT checkpoint dir\n mname = self.training_params.model_name\n wt_name = self.model_params.backbone_type\n run_check = mname.split('-')[-1].split('_')\n if len(run_check) == 2 and run_check[0] == 'run':\n wt_name = f\"{wt_name}-run_{run_check[1]}\"\n wt_dir = os.path.join(self.training_params.save_dir, \"wt_inits\")\n self.model_params.wt_checkpoint = os.path.join(wt_dir, wt_name)\n \n # init and get model\n print_ddp(\"Initialising Model . . .\") \n self.model = self.init_model() \n \n print_ddp(\"Initialising Optimization methods . . \") \n # init and get optimizer\n optimizer_defs = self.init_optimizer(self.model, self.optimization_params.default) \n self.attr_from_dict(optimizer_defs)\n \n # init and get scheduler\n epochs = self.training_params.epochs\n scheduler_defs = self.init_scheduler(self.optimizer,\n self.optimization_params.default, \n len(self.dataloaders.trainloader), \n epochs) \n self.schedulers = MixedLRScheduler(**scheduler_defs)\n \n # init loss functions\n self.criterion = self.init_criteria() \n \n # init metric functions\n self.init_metrics()\n \n def init_dataloaders(self, collate_fn=None) -> edict:\n \"\"\"Define dataset params and dataloaders.\n \n Args:\n collate_fn:\n Specific collate_fn for the torch.utils.data.DataLoader.\n \n Returns:\n A dict (EasyDict) with train, validation and test loaders. nonddp_trainloader is\n for the 2nd phase of SWAP training where we don't use the distributed sampler.\n \n {'trainloader': trainloader,\n 'valloader': valloader,\n 'testloader': testloader,\n 'nonddp_trainloader':nonddp_trainloader}\n \"\"\" \n feature_bank_set, feature_bank_Loader = None, None\n DataSet = self.dataset_mapper.get(self.dataset_params.dataset, False)\n assert DataSet, \"Dataset not found - Plese select one of the following: {}\".format(list(self.dataset_mapper.keys()))\n\n trainset = DataSet(self.dataset_params, mode='train')\n valset = DataSet(self.dataset_params, mode='eval')\n testset = DataSet(self.dataset_params, mode='test')\n\n if self.training_params.knn_eval or not self.is_supervised:\n feature_bank_set = DataSet(self.dataset_params, mode='train')\n feature_bank_set.transform = valset.transform # Use validation transform when setting up prototype vectors\n feature_bank_set.resizing = valset.resizing \n \n if not self.is_supervised:\n trainset.num_augmentations = 2 \n \n #register task defs\n self.task = trainset.task\n self.is_multiclass = trainset.is_multiclass \n \n \n train_sampler = None\n feature_bank_sampler = None\n train_shuffle = self.dataloader_params['trainloader']['shuffle']\n # distributed sampler \n if ddp_is_on(): \n train_sampler = DS(trainset, num_replicas=self.visible_world, rank=self.device_id)\n if feature_bank_set is not None:\n feature_bank_sampler = DS(feature_bank_set, num_replicas=self.visible_world, shuffle=False,\n rank=self.device_id)\n self.dataloader_params['trainloader']['shuffle'] = False\n\n # define distributed samplers etc\n trainLoader = DataLoader(trainset, **self.dataloader_params['trainloader'],sampler=train_sampler)\n testLoader = DataLoader(testset, **self.dataloader_params['testloader'])\n if len(valset) > 0 :\n valLoader = DataLoader(valset, **self.dataloader_params['valloader'])\n else:\n valLoader = testLoader\n if feature_bank_set is not None:\n data_params_copy_feature_bank = deepcopy(self.dataloader_params['valloader'])\n data_params_copy_feature_bank['shuffle'] = False\n feature_bank_Loader = DataLoader(feature_bank_set,\n **data_params_copy_feature_bank ,sampler=feature_bank_sampler)\n self.dataloader_params['trainloader']['shuffle'] = train_shuffle\n\n if not len(valLoader):\n valLoader = testLoader \n if self.is_rank0:\n warnings.warn(\"Warning... Using test set as validation set\")\n\n return edict({'trainloader': trainLoader,\n 'valloader' : valLoader,\n 'testloader' : testLoader,\n 'fbank_loader' : feature_bank_Loader,\n })\n \n\n def init_model(self) -> Classifier:\n \"\"\"Initialize the model.\n \n DDP broadcasts model states from rank 0 process to all other processes \n in the DDP constructor, you don’t need to worry about different DDP processes \n start from different model parameter initial values. \n \"\"\"\n model = Classifier(self.model_params) \n model.to(self.device_id)\n if self.visible_world > 1 and torch.distributed.is_initialized():\n model = DDP(model, device_ids=[self.device_id])\n return model\n \n @staticmethod\n def init_optimizer(model, optimization_params:edict) -> edict: \n \"\"\"Initialize the optimizer.\n \n Args:\n optimization_params: EasyDict instance, read from the .json file.\n\n Returns:\n A dict (EasyDict) with optimizer and type keys.\n {'optimizer': optimizer (e.g. a torch.optim.Adam instance),\n 'optimizer_type': optimizer_type (e.g. a string \"Adam\")}\n \"\"\"\n optimizer_type = optimization_params.optimizer.type\n opt = optim.__dict__[optimizer_type]\n opt_params = optimization_params.optimizer.params\n optimizer = opt(DefaultWrapper.get_params_groups(model), **opt_params)\n\n # handling LARS\n lars_params = optimization_params.LARS_params\n effective_batch_size = optimization_params.effective_batch_size\n if lars_params.use and effective_batch_size >= lars_params.batch_act_thresh:\n print_ddp(\"LARS OPTIMIZER: \\033[92m ACTIVE \\033[0m\")\n optimizer = LARS(optimizer=optimizer, eps=lars_params.eps, trust_coef=lars_params.trust_coef)\n optimizer.defaults = optimizer.optim.defaults \n else:\n print_ddp(\"LARS OPTIMIZER: \\033[93m INACTIVE \\033[0m\")\n \n return edict({\"optimizer\":optimizer, \"optimizer_type\":optimizer_type})\n \n @staticmethod\n def get_params_groups(model):\n \"\"\"\n FROM: https://github.com/facebookresearch/dino/blob/main/utils.py\n It filters-out the no-grad params and it excludes weight_decay from all non-weight / non-bias tensors\n It will return 2 groups 0: regularized 1: not_regularized\n \"\"\"\n regularized = []\n not_regularized = []\n for name, param in model.named_parameters():\n if not param.requires_grad:\n continue\n # we do not regularize biases nor Norm parameters\n if name.endswith(\".bias\") or len(param.shape) == 1:\n not_regularized.append(param)\n else:\n regularized.append(param)\n return [{'params': regularized}, {'params': not_regularized, 'weight_decay': 0.}] \n \n @staticmethod \n def init_scheduler(optimizer, optimization_params: edict, steps_per_epoch: int=None, epochs: int=None) -> edict: \n \"\"\"Initialize the learning rate scheduler.\n\n steps_per_epoch and epochs are set by the caller, they are not intended to be None.\n \n Args:\n optimization_params: EasyDict instance, read from the .json file.\n \n Returns:\n A dict (EasyDict) with scheduler and type keys.\n {'scheduler': scheduler (e.g. a torch.optim.lr_scheduler.OneCycleLR instance),\n 'scheduler_type': scheduler_type (e.g. a string \"OneCycleLR\")}\n \"\"\"\n schedulers = edict({\"schedulers\":[None], \"scheduler_types\":[None], \n \"steps_per_epoch\":steps_per_epoch})\n scheduler_types = optimization_params.scheduler.type\n accepted_types = [None, \"LinearWarmup\", \"MultiStepLR\", \n \"ReduceLROnPlateau\", \"OneCycleLR\", \"CosineAnnealingLR\"] \n if not isinstance(scheduler_types, list):\n scheduler_types = [scheduler_types] \n \n for scheduler_type in scheduler_types:\n if scheduler_type not in accepted_types:\n raise ValueError(f\"{scheduler_type} is not a supported scheduler\")\n \n if scheduler_type is None:\n continue\n elif scheduler_type not in optim.lr_scheduler.__dict__:\n if scheduler_type == 'LinearWarmup':\n sch = LinearWarmup \n else:\n raise NotImplementedError\n else:\n sch = optim.lr_scheduler.__dict__[scheduler_type]\n\n if sch.__name__ == 'OneCycleLR':\n max_lr = optimization_params.optimizer.params.lr\n sch_params = {\"max_lr\":max_lr, \n \"steps_per_epoch\":steps_per_epoch, \n \"epochs\":epochs,\n \"div_factor\": max_lr/1e-8\n }\n if \"LinearWarmup\" in scheduler_types:\n sch_params[\"div_factor\"] = 1.\n sch_params.update(optimization_params.scheduler.params.OneCycleLR)\n elif sch.__name__ == 'LinearWarmup':\n max_lr = optimization_params.optimizer.params.lr\n sch_params = optimization_params.scheduler.params[scheduler_type]\n sch_params.update({\"max_lr\":max_lr, \"steps_per_epoch\":steps_per_epoch})\n elif sch.__name__ == 'CosineAnnealingLR':\n T_max = steps_per_epoch * epochs\n sch_params = optimization_params.scheduler.params[scheduler_type]\n if \"LinearWarmup\" in scheduler_types:\n T_max = T_max - warmup_iters\n sch_params.update({\"T_max\":T_max})\n else:\n sch_params = optimization_params.scheduler.params[scheduler_type]\n \n scheduler = sch(optimizer, **sch_params) \n schedulers[\"schedulers\"].append(scheduler)\n schedulers[\"scheduler_types\"].append(scheduler_type)\n \n if scheduler_type == 'LinearWarmup':\n warmup_iters = scheduler.warmup_iters\n\n return schedulers\n \n def init_criteria(self): \n \"\"\"Initialize the loss criteria. \"\"\"\n if self.task == 'classification':\n if self.is_multiclass:\n crit = nn.CrossEntropyLoss() \n else:\n crit = nn.BCEWithLogitsLoss() \n else:\n raise NotImplementedError(\"Only classification tasks are implemented for now\")\n \n return crit\n \n def init_metrics(self):\n if self.task == 'classification':\n if self.is_multiclass:\n self.metric = DefaultClassificationMetrics \n else:\n self.metric = MultiLabelClassificationMetrics\n else:\n raise NotImplementedError(\"Only classification tasks are implemented for now\") \n \n def attr_from_dict(self, param_dict: edict):\n \"\"\"Function that makes the dictionary key-values into attributes.\n \n This allows us to use the dot syntax. Check the .json file for the entries.\n\n Args:\n param_dict: The dict we populate the class attributes from.\n \"\"\"\n self.name = self.__class__.__name__\n for key in param_dict:\n setattr(self, key, param_dict[key]) \n \n def update_augmentation_strategy(self, parameters):\n self_dir = os.path.dirname(os.path.abspath(inspect.getfile(self.__class__)))\n new_strategy_dir = os.path.join(self_dir, \"augmentation_strategy.json\") \n if not os.path.isfile(new_strategy_dir):\n return parameters\n \n augmentation_strategy = edict(load_json(new_strategy_dir))\n general_args = augmentation_strategy.general_args\n repetition_strategy = augmentation_strategy.repetition_strategy\n transforms = augmentation_strategy.transforms\n to_change = list(transforms.keys())\n \n if not general_args.overwrite_defaults:\n return parameters\n params = deepcopy(parameters)\n \n for org_keys in parameters.dataset_params.keys():\n if org_keys in to_change:\n org_def = parameters.dataset_params[org_keys]\n updated_transforms = []\n for order, aug_type in enumerate(repetition_strategy.order):\n new_trans = transforms[org_keys][aug_type]\n n_augs = repetition_strategy.n_augmentations[order]\n if general_args.inherit:\n for key in general_args.inherit:\n new_trans[key] = org_def[key] \n for _ in range(n_augs):\n updated_transforms.append(new_trans)\n params.dataset_params[org_keys] = updated_transforms \n \n return params \n \n @property\n def parameters(self):\n return edict({key : getattr(self, key) \n for key in self.param_attributes})\n \n @property\n def dataset_mapper(self):\n return {\n \"CheXpert\" : CheXpert,\n \"DDSM\" : DDSM,\n \"ISIC2019\": ISIC2019,\n \"APTOS2019\": APTOS2019,\n \"Camelyon\": Camelyon,\n }\n \n @property\n def visible_world(self):\n return torch.cuda.device_count() \n \n @property\n def visible_ids(sefl):\n return list(range(torch.cuda.device_count()))\n \n @property\n def device_id(self): \n return torch.cuda.current_device() if self.visible_world else \"cpu\"\n \n @property\n def is_rank0(self):\n return is_rank0(self.device_id)\n" ]
[ [ "torch.nn.parallel.DistributedDataParallel", "torch.utils.data.distributed.DistributedSampler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jeford/mdprop
[ "e79b70343f559cbf586f09234122c2f43b4d867c", "e79b70343f559cbf586f09234122c2f43b4d867c", "e79b70343f559cbf586f09234122c2f43b4d867c" ]
[ "examples/kepler/kepler_exp_timestep.py", "examples/prinz/timestep_map.py", "examples/joukowsky/joukowsky.py" ]
[ "#!/usr/bin/env python\nimport numpy as np\n\nimport mdprop\n\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\npot = mdprop.potential.Kepler(1.0)\nmasses = np.array([1.0])\nX, V = pot.init_cond(0.8)\nsymbols = ['X']\n\nstate = {\n 'X': X,\n 'V': V,\n 'symbols': symbols,\n 'masses': masses,\n 'potential_energy': pot.compute_energy(X)\n}\n\n# Set up parameters for dynamics\nbeta = 0.25\ndt = 0.005\nsim_time = 10.0\n\n# Construct update objects using pointers to different forces\nvel_update = mdprop.update.VelocityUpdate(pot.compute_forces)\n\n# Construct integrator\ninteg = mdprop.integrator.VelocityVerlet(vel_update)\nprint(integ)\n\n# Save energy, times to h5\nh5hook = mdprop.hook.WriteH5(\"exp.h5\", ['kinetic_energy', 'total_energy', 'simulation_time'], [(1, ), (1, ), (1, )], ['f', 'f', 'f'], cache_size=1)\n\ntraj = mdprop.trajectory.Trajectory(integ)\nts_control = mdprop.hook.TimestepController(mdprop.control.exponential, beta)\ntraj.append_hooks([mdprop.hook.Gradient(vel_update), ts_control])\ntraj.append_hook(h5hook)\ntraj.append_printkeys(['dt', 'control'])\ntraj.run(dt, sim_time, state)\n\nKE = h5hook.h5file['kinetic_energy'][...]\nTE = h5hook.h5file['total_energy'][...]\nTs = h5hook.h5file['simulation_time'][...]\ndts = Ts[1:] - Ts[:-1]\nprint(len(dts))\n\nfig, ax1 = plt.subplots()\nax1.plot(Ts, TE, color='tab:blue', ls='-', label='Total energy')\nax2 = ax1.twinx()\nax2.plot(Ts[1:], dts, color='tab:red', ls='--')\nax1.set_ylim([-0.5001, -0.497])\nax1.set_xlabel('Simulation Time')\nax1.set_ylabel(\"Total Energy\", color='tab:blue')\nax2.set_ylabel(\"Time step\", color='tab:red')\nplt.title(\"Total Energy vs. Time\")\nplt.tight_layout()\nplt.savefig(\"exp_total.eps\")\n\nfig, ax1 = plt.subplots()\nax1.plot(Ts, KE, color='tab:blue', ls='-', label='Kinetic energy')\nax2 = ax1.twinx()\nax2.plot(Ts[1:], dts, color='tab:red', ls='--')\nax1.set_xlabel('Simulation Time')\nax1.set_ylabel(\"Kinetic Energy\", color='tab:blue')\nax2.set_ylabel(\"Time step\", color='tab:red')\nplt.title(\"Kinetic Energy vs. Time\")\nplt.tight_layout()\nplt.savefig(\"exp_kinetic.eps\")\n", "import numpy as np\n\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\nimport mdprop\n\npot = mdprop.potential.Prinz()\nX = np.array([[-1.0]])\nV = np.array([[ 0.0]])\nmasses = np.array([1.0])\nsymbols = ['X']\nbeta = 0.1\ndt = 0.01\nsim_time = 10.0\n\nstate = {\n 'X': X,\n 'V': V,\n 'masses': masses,\n 'symbols': symbols,\n 'potential_energy': pot.compute_energy(X),\n }\n\nvup = mdprop.update.VelocityUpdate(pot.compute_force)\n\ninteg = mdprop.integrator.VelocityVerlet(vup)\n\ntraj = mdprop.trajectory.Trajectory(integ)\n\ngradhook = mdprop.hook.Gradient(vup)\ntschook = mdprop.hook.TimestepController(mdprop.control.exponential, beta)\nh5hook = mdprop.hook.WriteH5(\"prinz.h5\", [\"X\", \"potential_energy\", \"total_energy\", \"simulation_time\"], [(1, 1), (1, ), (1, ), (1, )], ['f', 'f', 'f', 'f'], cache_size=1)\ntraj.append_hooks([gradhook, h5hook, tschook])\ntraj.append_printkey('dt')\ntraj.run(dt, sim_time, state)\n\nh5file = h5hook.h5file\nXs = h5file['X'][:, 0, 0]\nTs = h5file['simulation_time'][:, 0]\nPEs = h5file['potential_energy'][:, 0]\nTEs = h5file['total_energy'][:, 0]\ndts = Ts[1:] - Ts[:-1]\ndts_scaled = (dts - min(dts))/max(dts - min(dts)) * 4.0\n#dts_scaled = dts / min(dts) * 2.0\n\nplt.figure()\nplt.plot(Xs, PEs)\nplt.plot(Xs[1:], dts_scaled)\nplt.savefig(\"prinz.eps\")\n\nplt.figure()\nplt.plot(Ts, TEs)\nplt.savefig(\"energy.eps\")\n", "import numpy as np\n\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\nimport mdprop\n\nclass Joukowsky(mdprop.potential.Potential):\n def __init__(self, alpha=1.0):\n self.alpha = alpha\n\n def compute_energy_per_particle(self, X, **state):\n r = np.linalg.norm(X, axis=1)\n return self.alpha * r + 1.0/r\n\n def compute_energy(self, X, **state):\n return np.sum(self.compute_energy_per_particle(X, **state))\n\n def compute_gradient(self, X, **state):\n r = np.linalg.norm(X, axis=1)[:, None]\n V = np.sum(self.alpha * r + 1.0 / r)\n G = self.alpha * X / r - X / (r ** 3)\n return V, G\n\n# Params\ndt = 0.05\nsim_time = 13.0\nbeta = 0.2\n\nx = np.linspace(0.1, 2.0, 100)[:, None]\npot = Joukowsky(5.0)\ny = pot.compute_energy_per_particle(x)\n_, g = pot.compute_gradient(x)\nng = mdprop.utils.numerical_gradient(x, pot.compute_energy)\nprint(max(g - ng))\n\nvup = mdprop.update.VelocityUpdate(pot.compute_force)\ninteg = mdprop.integrator.VelocityVerlet(vup)\n\nX = np.array([[-2.0]])\nV = np.array([[0.0]])\nmasses = np.array([1.0])\nsymbols = ['X']\ninit_pot = pot.compute_energy(X)\n\ninit_state = {\n 'X': X,\n 'V': V,\n 'masses': masses,\n 'symbols': symbols,\n 'potential_energy': init_pot,\n }\n\nstate = init_state.copy()\ntraj = mdprop.trajectory.Trajectory(integ)\nh5hook = mdprop.hook.WriteH5('joukowsky.h5', ['X', 'total_energy'], [(1, 1), (1, )], ['f', 'f'], cache_size=1)\ntraj.append_hook(h5hook)\ntraj.run(dt, sim_time, state)\n\nxs = h5hook.h5file['X'][:, :, 0]\nes = h5hook.h5file['total_energy'][:, 0]\nvs = pot.compute_energy_per_particle(xs)\n\nstate = init_state.copy()\ntraj = mdprop.trajectory.Trajectory(integ)\ngradhook = mdprop.hook.Gradient(vup)\nh5hook_vts = mdprop.hook.WriteH5('joukowsky_vts.h5', ['X', 'total_energy'], [(1, 1), (1, )], ['f', 'f'], cache_size=1)\ntshook = mdprop.hook.TimestepController(mdprop.control.exponential, beta)\ntraj.append_hooks([gradhook, h5hook_vts, tshook])\ntraj.run(dt, sim_time, state)\n\nxs_vts = h5hook_vts.h5file['X'][:, :, 0]\nes_vts = h5hook_vts.h5file['total_energy'][:, 0]\nvs_vts = pot.compute_energy_per_particle(xs_vts)\n\nprint(len(xs))\nprint(len(xs_vts))\n\n# Plot the VV trajectory\nxmin = np.min(xs)\nxmax = np.max(xs)\nx = np.linspace(xmin, xmax, 100)[:, None]\ny = pot.compute_energy_per_particle(x)\n\nplt.figure()\nplt.plot(x, y)\nplt.plot(xs, vs, marker='o', ls='', label='Potential')\nplt.plot(xs, es, marker='o', color='g', label='Total')\nplt.xlabel(\"X\")\nplt.ylabel(\"Energy\")\nplt.legend()\nplt.tight_layout()\nplt.savefig(\"joukowsky.eps\")\n\n# Plot the VV VTS trajectory\nxmin_vts = np.min(xs_vts)\nxmax_vts = np.max(xs_vts)\nx_vts = np.linspace(xmin_vts, xmax_vts, 100)[:, None]\ny_vts = pot.compute_energy_per_particle(x_vts)\n\nplt.figure()\nplt.plot(x_vts, y_vts)\nplt.plot(xs_vts, vs_vts, marker='o', ls='', label='Potential')\nplt.plot(xs_vts, es_vts, marker='o', color='g', label='Total')\nplt.xlabel(\"X\")\nplt.ylabel(\"Energy\")\nplt.legend()\nplt.tight_layout()\nplt.savefig(\"joukowsky_vts.eps\")\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "matplotlib.use", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "numpy.array" ], [ "matplotlib.use", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "numpy.array", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.tight_layout", "numpy.linspace", "numpy.min", "matplotlib.use", "numpy.linalg.norm", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "numpy.max", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "numpy.array", "numpy.sum", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
VictorAtPL/Speech_Commands_Recognition_Bi_LSTM_with_Tensorflow_2
[ "d8ed6052baa8289c9856ee3582979c6203f94009" ]
[ "models/baseline_bilstm_bigger_2_dense_bigger.py" ]
[ "import tensorflow as tf\n\nfrom AbstractModel import AbstractModel\nfrom common import get_input_fn_and_steps_per_epoch, load_sets_count, mel_spectrogram_unlabeled_parser\nfrom constants import TFRECORDS_SAVE_PATH\n\nfrom models.baseline import Model as Baseline\n\n\nclass Model(Baseline):\n\n def get_model(self):\n input_op = tf.keras.Input(shape=(128, 44))\n\n dropout = 0.0\n layers = tf.keras.layers\n # BATCH_NORM\n x = layers.BatchNormalization()(input_op)\n\n # LSTM\n # https://github.com/tensorflow/tensorflow/issues/30263\n x = layers.Bidirectional(layers.LSTM(256, activation='sigmoid', return_sequences=True))(x)\n x = layers.Dropout(dropout)(x)\n\n # LSTM\n # https://github.com/tensorflow/tensorflow/issues/30263\n x = layers.Bidirectional(layers.LSTM(256, activation='sigmoid', return_sequences=True))(x)\n x = layers.Dropout(dropout)(x)\n\n # LSTM\n # https://github.com/tensorflow/tensorflow/issues/30263\n x = layers.Bidirectional(layers.LSTM(256, activation='sigmoid'))(x)\n x = layers.Dropout(dropout)(x)\n\n # BATCH_NORM\n x = layers.BatchNormalization()(x)\n\n # DENSE\n x = layers.Dense(512)(x)\n x = layers.BatchNormalization()(x)\n x = layers.Activation('relu')(x)\n x = layers.Dropout(dropout)(x)\n\n # DENSE\n x = layers.Dense(256)(x)\n x = layers.BatchNormalization()(x)\n x = layers.Activation('relu')(x)\n x = layers.Dropout(dropout)(x)\n\n # DENSE\n x = layers.Dense(128)(x)\n x = layers.BatchNormalization()(x)\n x = layers.Activation('relu')(x)\n x = layers.Dropout(dropout)(x)\n\n # DENSE\n x = layers.Dense(64)(x)\n x = layers.BatchNormalization()(x)\n x = layers.Activation('relu')(x)\n x = layers.Dropout(dropout)(x)\n\n output_op = layers.Dense(12)(x)\n\n return tf.keras.Model(inputs=input_op, outputs=output_op)\n" ]
[ [ "tensorflow.keras.Input", "tensorflow.keras.Model" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]