repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
Sengxian/cogdl
[ "b0a855feef6a883bcc0f7df421fc6092ec18abde", "b0a855feef6a883bcc0f7df421fc6092ec18abde" ]
[ "cogdl/tasks/link_prediction.py", "examples/gnn_models/chebyshev.py" ]
[ "import copy\nimport json\nimport logging\nimport os\nimport random\n\nimport networkx as nx\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom cogdl.datasets import build_dataset\nfrom cogdl.datasets.kg_data import BidirectionalOneShotIterator, TrainDataset\nfrom cogdl.models import build_model\nfrom cogdl.models.emb import DNGR, HOPE, LINE, SDNE, DeepWalk, GraRep, NetMF, NetSMF, Node2vec, ProNE\nfrom cogdl.utils import negative_edge_sampling\nfrom sklearn.metrics import auc, f1_score, precision_recall_curve, roc_auc_score\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom . import BaseTask, register_task\n\n\ndef save_model(model, optimizer, save_variable_list, args):\n \"\"\"\n Save the parameters of the model and the optimizer,\n as well as some other variables such as step and learning_rate\n \"\"\"\n\n argparse_dict = vars(args)\n with open(os.path.join(args.save_path, \"config.json\"), \"w\") as fjson:\n json.dump(argparse_dict, fjson)\n\n torch.save(\n {**save_variable_list, \"model_state_dict\": model.state_dict(), \"optimizer_state_dict\": optimizer.state_dict()},\n os.path.join(args.save_path, \"checkpoint\"),\n )\n\n entity_embedding = model.entity_embedding.detach().cpu().numpy()\n np.save(os.path.join(args.save_path, \"entity_embedding\"), entity_embedding)\n\n relation_embedding = model.relation_embedding.detach().cpu().numpy()\n np.save(os.path.join(args.save_path, \"relation_embedding\"), relation_embedding)\n\n\ndef set_logger(args):\n \"\"\"\n Write logs to checkpoint and console\n \"\"\"\n\n if args.do_train:\n log_file = os.path.join(args.save_path or args.init_checkpoint, \"train.log\")\n else:\n log_file = os.path.join(args.save_path or args.init_checkpoint, \"test.log\")\n\n logging.basicConfig(\n format=\"%(asctime)s %(levelname)-8s %(message)s\",\n level=logging.INFO,\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n filename=log_file,\n filemode=\"w\",\n )\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter(\"%(asctime)s %(levelname)-8s %(message)s\")\n console.setFormatter(formatter)\n logging.getLogger(\"\").addHandler(console)\n\n\ndef log_metrics(mode, step, metrics):\n \"\"\"\n Print the evaluation logs\n \"\"\"\n for metric in metrics:\n logging.info(\"%s %s at step %d: %f\" % (mode, metric, step, metrics[metric]))\n\n\ndef divide_data(input_list, division_rate):\n local_division = len(input_list) * np.cumsum(np.array(division_rate))\n random.shuffle(input_list)\n return [\n input_list[int(round(local_division[i - 1])) if i > 0 else 0 : int(round(local_division[i]))]\n for i in range(len(local_division))\n ]\n\n\ndef randomly_choose_false_edges(nodes, true_edges, num):\n true_edges_set = set(true_edges)\n tmp_list = list()\n all_flag = False\n for _ in range(num):\n trial = 0\n while True:\n x = nodes[random.randint(0, len(nodes) - 1)]\n y = nodes[random.randint(0, len(nodes) - 1)]\n trial += 1\n if trial >= 1000:\n all_flag = True\n break\n if x != y and (x, y) not in true_edges_set and (y, x) not in true_edges_set:\n tmp_list.append((x, y))\n break\n if all_flag:\n break\n return tmp_list\n\n\ndef gen_node_pairs(train_data, test_data, negative_ratio=5):\n G = nx.Graph()\n G.add_edges_from(train_data)\n\n training_nodes = set(list(G.nodes()))\n test_true_data = []\n for u, v in test_data:\n if u in training_nodes and v in training_nodes:\n test_true_data.append((u, v))\n test_false_data = randomly_choose_false_edges(list(training_nodes), train_data, len(test_data) * negative_ratio)\n return (test_true_data, test_false_data)\n\n\ndef get_score(embs, node1, node2):\n vector1 = embs[int(node1)]\n vector2 = embs[int(node2)]\n return np.dot(vector1, vector2) / (np.linalg.norm(vector1) * np.linalg.norm(vector2))\n\n\ndef evaluate(embs, true_edges, false_edges):\n true_list = list()\n prediction_list = list()\n for edge in true_edges:\n true_list.append(1)\n prediction_list.append(get_score(embs, edge[0], edge[1]))\n\n for edge in false_edges:\n true_list.append(0)\n prediction_list.append(get_score(embs, edge[0], edge[1]))\n\n sorted_pred = prediction_list[:]\n sorted_pred.sort()\n threshold = sorted_pred[-len(true_edges)]\n\n y_pred = np.zeros(len(prediction_list), dtype=np.int32)\n for i in range(len(prediction_list)):\n if prediction_list[i] >= threshold:\n y_pred[i] = 1\n\n y_true = np.array(true_list)\n y_scores = np.array(prediction_list)\n ps, rs, _ = precision_recall_curve(y_true, y_scores)\n return roc_auc_score(y_true, y_scores), f1_score(y_true, y_pred), auc(rs, ps)\n\n\ndef select_task(model_name=None, model=None):\n assert model_name is not None or model is not None\n if model_name is not None:\n if model_name in [\"rgcn\", \"compgcn\"]:\n return \"KGLinkPrediction\"\n elif model_name in [\"distmult\", \"transe\", \"rotate\", \"complex\"]:\n return \"TripleLinkPrediction\"\n elif model_name in [\n \"prone\",\n \"netmf\",\n \"deepwalk\",\n \"line\",\n \"hope\",\n \"node2vec\",\n \"netmf\",\n \"netsmf\",\n \"sdne\",\n \"grarep\",\n \"dngr\",\n ]:\n return \"HomoLinkPrediction\"\n else:\n return \"GNNLinkPrediction\"\n else:\n from cogdl.models.emb import complex, distmult, rotate, transe\n from cogdl.models.nn import compgcn, rgcn\n\n if type(model) in [rgcn.LinkPredictRGCN, compgcn.LinkPredictCompGCN]:\n return \"KGLinkPrediction\"\n elif type(model) in [distmult.DistMult, rotate.RotatE, transe.TransE, complex.ComplEx]:\n return \"TripleLinkPrediction\"\n elif type(model) in [HOPE, ProNE, LINE, DeepWalk, Node2vec, NetSMF, NetMF, SDNE, GraRep, DNGR]:\n return \"HomoLinkPrediction\"\n else:\n return \"GNNLinkPrediction\"\n\n\nclass HomoLinkPrediction(nn.Module):\n def __init__(self, args, dataset=None, model=None):\n super(HomoLinkPrediction, self).__init__()\n dataset = build_dataset(args) if dataset is None else dataset\n data = dataset[0]\n self.data = data\n if hasattr(dataset, \"num_features\"):\n args.num_features = dataset.num_features\n model = build_model(args) if model is None else model\n self.model = model\n self.patience = args.patience\n self.max_epoch = args.max_epoch\n\n edge_list = self.data.edge_index.numpy()\n edge_list = list(zip(edge_list[0], edge_list[1]))\n edge_set = set()\n for edge in edge_list:\n if (edge[0], edge[1]) not in edge_set and (edge[1], edge[0]) not in edge_set:\n edge_set.add(edge)\n edge_list = list(edge_set)\n self.train_data, self.test_data = divide_data(edge_list, [0.90, 0.10])\n\n self.test_data = gen_node_pairs(self.train_data, self.test_data, args.negative_ratio)\n self.device = \"cpu\" if not torch.cuda.is_available() or args.cpu else args.device_id[0]\n self.model.set_device(self.device)\n\n def train(self):\n G = nx.Graph()\n G.add_edges_from(self.train_data)\n embeddings = self.model.train(G)\n\n embs = dict()\n for vid, node in enumerate(G.nodes()):\n embs[node] = embeddings[vid]\n\n roc_auc, f1_score, pr_auc = evaluate(embs, self.test_data[0], self.test_data[1])\n print(f\"Test ROC-AUC = {roc_auc:.4f}, F1 = {f1_score:.4f}, PR-AUC = {pr_auc:.4f}\")\n return dict(ROC_AUC=roc_auc, PR_AUC=pr_auc, F1=f1_score)\n\n\nclass TripleLinkPrediction(nn.Module):\n \"\"\"\n Training process borrowed from `KnowledgeGraphEmbedding<https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding>`\n \"\"\"\n\n def __init__(self, args, dataset=None, model=None):\n super(TripleLinkPrediction, self).__init__()\n self.dataset = build_dataset(args) if dataset is None else dataset\n args.nentity = self.dataset.num_entities\n args.nrelation = self.dataset.num_relations\n self.model = build_model(args) if model is None else model\n self.args = args\n\n self.device = \"cpu\" if not torch.cuda.is_available() or args.cpu else args.device_id[0]\n self.model = self.model.to(self.device)\n set_logger(args)\n logging.info(\"Model: %s\" % args.model)\n logging.info(\"#entity: %d\" % args.nentity)\n logging.info(\"#relation: %d\" % args.nrelation)\n\n def train(self):\n\n train_triples = self.dataset.triples[self.dataset.train_start_idx : self.dataset.valid_start_idx]\n logging.info(\"#train: %d\" % len(train_triples))\n valid_triples = self.dataset.triples[self.dataset.valid_start_idx : self.dataset.test_start_idx]\n logging.info(\"#valid: %d\" % len(valid_triples))\n test_triples = self.dataset.triples[self.dataset.test_start_idx :]\n logging.info(\"#test: %d\" % len(test_triples))\n\n all_true_triples = train_triples + valid_triples + test_triples\n nentity, nrelation = self.args.nentity, self.args.nrelation\n\n if self.args.do_train:\n # Set training dataloader iterator\n train_dataloader_head = DataLoader(\n TrainDataset(train_triples, nentity, nrelation, self.args.negative_sample_size, \"head-batch\"),\n batch_size=self.args.batch_size,\n shuffle=True,\n collate_fn=TrainDataset.collate_fn,\n )\n\n train_dataloader_tail = DataLoader(\n TrainDataset(train_triples, nentity, nrelation, self.args.negative_sample_size, \"tail-batch\"),\n batch_size=self.args.batch_size,\n shuffle=True,\n collate_fn=TrainDataset.collate_fn,\n )\n\n train_iterator = BidirectionalOneShotIterator(train_dataloader_head, train_dataloader_tail)\n\n # Set training configuration\n current_learning_rate = self.args.learning_rate\n optimizer = torch.optim.Adam(\n filter(lambda p: p.requires_grad, self.model.parameters()), lr=current_learning_rate\n )\n if self.args.warm_up_steps:\n warm_up_steps = self.args.warm_up_steps\n else:\n warm_up_steps = self.args.max_epoch // 2\n\n if self.args.init_checkpoint:\n # Restore model from checkpoint directory\n logging.info(\"Loading checkpoint %s...\" % self.args.init_checkpoint)\n checkpoint = torch.load(os.path.join(self.args.init_checkpoint, \"checkpoint\"))\n init_step = checkpoint[\"step\"]\n self.model.load_state_dict(checkpoint[\"model_state_dict\"])\n if self.args.do_train:\n current_learning_rate = checkpoint[\"current_learning_rate\"]\n warm_up_steps = checkpoint[\"warm_up_steps\"]\n optimizer.load_state_dict(checkpoint[\"optimizer_state_dict\"])\n else:\n logging.info(\"Ramdomly Initializing %s Model...\" % self.args.model)\n init_step = 0\n\n step = init_step\n\n logging.info(\"Start Training...\")\n logging.info(\"init_step = %d\" % init_step)\n logging.info(\"batch_size = %d\" % self.args.batch_size)\n logging.info(\"negative_adversarial_sampling = %d\" % self.args.negative_adversarial_sampling)\n logging.info(\"hidden_dim = %d\" % self.args.embedding_size)\n logging.info(\"gamma = %f\" % self.args.gamma)\n logging.info(\"negative_adversarial_sampling = %s\" % str(self.args.negative_adversarial_sampling))\n if self.args.negative_adversarial_sampling:\n logging.info(\"adversarial_temperature = %f\" % self.args.adversarial_temperature)\n\n # Set valid dataloader as it would be evaluated during training\n\n if self.args.do_train:\n logging.info(\"learning_rate = %d\" % current_learning_rate)\n\n training_logs = []\n\n # Training Loop\n for step in range(init_step, self.args.max_epoch):\n\n log = self.model.train_step(self.model, optimizer, train_iterator, self.args)\n\n training_logs.append(log)\n\n if step >= warm_up_steps:\n current_learning_rate = current_learning_rate / 10\n logging.info(\"Change learning_rate to %f at step %d\" % (current_learning_rate, step))\n optimizer = torch.optim.Adam(\n filter(lambda p: p.requires_grad, self.model.parameters()), lr=current_learning_rate\n )\n warm_up_steps = warm_up_steps * 3\n\n if step % self.args.save_checkpoint_steps == 0:\n save_variable_list = {\n \"step\": step,\n \"current_learning_rate\": current_learning_rate,\n \"warm_up_steps\": warm_up_steps,\n }\n save_model(self.model, optimizer, save_variable_list, self.args)\n\n if step % self.args.log_steps == 0:\n metrics = {}\n for metric in training_logs[0].keys():\n metrics[metric] = sum([log[metric] for log in training_logs]) / len(training_logs)\n log_metrics(\"Training average\", step, metrics)\n training_logs = []\n\n if self.args.do_valid and step % self.args.valid_steps == 0:\n logging.info(\"Evaluating on Valid Dataset...\")\n metrics = self.model.test_step(self.model, valid_triples, all_true_triples, self.args)\n log_metrics(\"Valid\", step, metrics)\n\n save_variable_list = {\n \"step\": step,\n \"current_learning_rate\": current_learning_rate,\n \"warm_up_steps\": warm_up_steps,\n }\n save_model(self.model, optimizer, save_variable_list, self.args)\n\n if self.args.do_valid:\n logging.info(\"Evaluating on Valid Dataset...\")\n metrics = self.model.test_step(self.model, valid_triples, all_true_triples, self.args)\n log_metrics(\"Valid\", step, metrics)\n\n logging.info(\"Evaluating on Test Dataset...\")\n return self.model.test_step(self.model, test_triples, all_true_triples, self.args)\n\n\nclass KGLinkPrediction(nn.Module):\n def __init__(self, args, dataset=None, model=None):\n super(KGLinkPrediction, self).__init__()\n\n self.device = \"cpu\" if not torch.cuda.is_available() or args.cpu else args.device_id[0]\n self.evaluate_interval = args.evaluate_interval\n dataset = build_dataset(args) if dataset is None else dataset\n self.data = dataset[0]\n self.data.apply(lambda x: x.to(self.device))\n args.num_entities = len(torch.unique(self.data.edge_index))\n args.num_rels = len(torch.unique(self.data.edge_attr))\n model = build_model(args) if model is None else model\n\n self.model = model.to(self.device)\n self.model.set_device(self.device)\n self.max_epoch = args.max_epoch\n self.patience = min(args.patience, 20)\n self.grad_norm = 1.0\n self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n\n def train(self):\n epoch_iter = tqdm(range(self.max_epoch))\n patience = 0\n best_mrr = 0\n best_model = None\n val_mrr = 0\n\n for epoch in epoch_iter:\n loss_n = self._train_step()\n if (epoch + 1) % self.evaluate_interval == 0:\n torch.cuda.empty_cache()\n val_mrr, _ = self._test_step(\"val\")\n if val_mrr > best_mrr:\n best_mrr = val_mrr\n best_model = copy.deepcopy(self.model)\n patience = 0\n else:\n patience += 1\n if patience == self.patience:\n self.model = best_model\n epoch_iter.close()\n break\n epoch_iter.set_description(\n f\"Epoch: {epoch:03d}, TrainLoss: {loss_n: .4f}, Val MRR: {val_mrr: .4f}, Best MRR: {best_mrr: .4f}\"\n )\n self.model = best_model\n test_mrr, test_hits = self._test_step(\"test\")\n print(f\"Test MRR:{test_mrr}, Hits@1/3/10: {test_hits}\")\n return dict(MRR=test_mrr, HITS1=test_hits[0], HITS3=test_hits[1], HITS10=test_hits[2])\n\n def _train_step(self, split=\"train\"):\n self.model.train()\n self.optimizer.zero_grad()\n loss_n = self.model.loss(self.data)\n loss_n.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)\n self.optimizer.step()\n return loss_n.item()\n\n def _test_step(self, split=\"val\"):\n self.model.eval()\n if split == \"train\":\n mask = self.data.train_mask\n elif split == \"val\":\n mask = self.data.val_mask\n else:\n mask = self.data.test_mask\n edge_index = self.data.edge_index[:, mask]\n edge_attr = self.data.edge_attr[mask]\n mrr, hits = self.model.predict(edge_index, edge_attr)\n return mrr, hits\n\n\nclass GNNHomoLinkPrediction(nn.Module):\n def __init__(self, args, dataset=None, model=None):\n super(GNNHomoLinkPrediction, self).__init__()\n self.device = \"cpu\" if not torch.cuda.is_available() or args.cpu else args.device_id[0]\n self.evaluate_interval = args.evaluate_interval\n dataset = build_dataset(args) if dataset is None else dataset\n self.data = dataset[0]\n\n self.num_nodes = self.data.x.size(0)\n args.num_features = dataset.num_features\n args.num_classes = args.hidden_size\n\n model = build_model(args) if model is None else model\n self.model = model.to(self.device)\n\n if hasattr(self.model, \"split_dataset\"):\n self.data = self.model.split_dataset(self.data)\n else:\n self._train_test_edge_split()\n self.data.apply(lambda x: x.to(self.device))\n\n self.max_epoch = args.max_epoch\n self.patience = args.patience\n self.grad_norm = 1.5\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n\n def train(self):\n best_model = None\n best_score = 0\n patience = 0\n auc_score = 0\n epoch_iter = tqdm(range(self.max_epoch))\n for epoch in epoch_iter:\n train_loss = self._train_step()\n if (epoch + 1) % self.evaluate_interval == 0:\n auc_score = self._test_step(split=\"val\")\n if auc_score > best_score:\n best_score = auc_score\n best_model = copy.deepcopy(self.model)\n patience = 0\n else:\n patience += 1\n if patience == self.patience:\n break\n epoch_iter.set_description(f\"Epoch {epoch: 3d}: TrainLoss: {train_loss: .4f}, AUC: {auc_score: .4f}\")\n self.model = best_model\n test_score = self._test_step(split=\"test\")\n val_score = self._test_step(split=\"val\")\n print(f\"Val: {val_score: .4f}, Test: {test_score: .4f}\")\n return dict(AUC=test_score)\n\n def _train_step(self):\n self.model.train()\n self.optimizer.zero_grad()\n\n train_neg_edges = negative_edge_sampling(self.data.train_edges, self.num_nodes).to(self.device)\n train_pos_edges = self.data.train_edges\n edge_index = torch.cat([train_pos_edges, train_neg_edges], dim=1)\n labels = self.get_link_labels(train_pos_edges.shape[1], train_neg_edges.shape[1], self.device)\n\n if hasattr(self.model, \"link_prediction_loss\"):\n loss = self.model.link_prediction_loss(self.data.x, edge_index, labels)\n else:\n # link prediction loss\n emb = self.model(self.data.x, edge_index)\n pred = (emb[edge_index[0]] * emb[edge_index[1]]).sum(1)\n pred = torch.sigmoid(pred)\n loss = torch.nn.BCELoss()(pred, labels)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)\n self.optimizer.step()\n return loss.item()\n\n def _test_step(self, split=\"val\"):\n self.model.eval()\n if split == \"val\":\n pos_edges = self.data.val_edges\n neg_edges = self.data.val_neg_edges\n elif split == \"test\":\n pos_edges = self.data.test_edges\n neg_edges = self.data.test_neg_edges\n else:\n raise ValueError\n train_edges = self.data.train_edges\n edges = torch.cat([pos_edges, neg_edges], dim=1)\n labels = self.get_link_labels(pos_edges.shape[1], neg_edges.shape[1], self.device).long()\n with torch.no_grad():\n emb = self.model(self.data.x, train_edges)\n pred = (emb[edges[0]] * emb[edges[1]]).sum(-1)\n pred = torch.sigmoid(pred)\n\n auc_score = roc_auc_score(labels.cpu().numpy(), pred.cpu().numpy())\n return auc_score\n\n def _train_test_edge_split(self):\n num_nodes = self.data.x.shape[0]\n (\n (train_edges, val_edges, test_edges),\n (val_false_edges, test_false_edges),\n ) = self.train_test_edge_split(self.data.edge_index, num_nodes)\n self.data.train_edges = train_edges\n self.data.val_edges = val_edges\n self.data.test_edges = test_edges\n self.data.val_neg_edges = val_false_edges\n self.data.test_neg_edges = test_false_edges\n\n @staticmethod\n def train_test_edge_split(edge_index, num_nodes, val_ratio=0.1, test_ratio=0.2):\n row, col = edge_index\n mask = row > col\n row, col = row[mask], col[mask]\n num_edges = row.size(0)\n\n perm = torch.randperm(num_edges)\n row, col = row[perm], col[perm]\n\n num_val = int(num_edges * val_ratio)\n num_test = int(num_edges * test_ratio)\n\n index = [[0, num_val], [num_val, num_val + num_test], [num_val + num_test, -1]]\n sampled_rows = [row[l:r] for l, r in index] # noqa E741\n sampled_cols = [col[l:r] for l, r in index] # noqa E741\n\n # sample false edges\n num_false = num_val + num_test\n row_false = np.random.randint(0, num_nodes, num_edges * 5)\n col_false = np.random.randint(0, num_nodes, num_edges * 5)\n\n indices_false = row_false * num_nodes + col_false\n indices_true = row.cpu().numpy() * num_nodes + col.cpu().numpy()\n indices_false = list(set(indices_false).difference(indices_true))\n indices_false = np.array(indices_false)\n row_false = indices_false // num_nodes\n col_false = indices_false % num_nodes\n\n mask = row_false > col_false\n row_false = row_false[mask]\n col_false = col_false[mask]\n\n edge_index_false = np.stack([row_false, col_false])\n if edge_index.shape[1] < num_false:\n ratio = edge_index_false.shape[1] / num_false\n num_val = int(ratio * num_val)\n num_test = int(ratio * num_test)\n val_false_edges = torch.from_numpy(edge_index_false[:, 0:num_val])\n test_fal_edges = torch.from_numpy(edge_index_false[:, num_val : num_test + num_val])\n\n def to_undirected(_row, _col):\n _edge_index = torch.stack([_row, _col], dim=0)\n _r_edge_index = torch.stack([_col, _row], dim=0)\n return torch.cat([_edge_index, _r_edge_index], dim=1)\n\n train_edges = to_undirected(sampled_rows[2], sampled_cols[2])\n val_edges = torch.stack([sampled_rows[0], sampled_cols[0]])\n test_edges = torch.stack([sampled_rows[1], sampled_cols[1]])\n return (train_edges, val_edges, test_edges), (val_false_edges, test_fal_edges)\n\n @staticmethod\n def get_link_labels(num_pos, num_neg, device=None):\n labels = torch.zeros(num_pos + num_neg)\n labels[:num_pos] = 1\n if device is not None:\n labels = labels.to(device)\n return labels.float()\n\n\n@register_task(\"link_prediction\")\nclass LinkPrediction(BaseTask):\n @staticmethod\n def add_args(parser):\n # fmt: off\n parser.add_argument(\"--evaluate-interval\", type=int, default=30)\n parser.add_argument(\"--max-epoch\", type=int, default=3000)\n parser.add_argument(\"--patience\", type=int, default=10)\n parser.add_argument(\"--lr\", type=float, default=0.001)\n parser.add_argument(\"--weight-decay\", type=float, default=0)\n\n parser.add_argument(\"--hidden-size\", type=int, default=200) # KG\n parser.add_argument(\"--negative-ratio\", type=int, default=5)\n\n # Arguments for triple-based knowledge graph embedding\n parser.add_argument(\"--do_train\", action=\"store_true\")\n parser.add_argument(\"--do_valid\", action=\"store_true\")\n parser.add_argument(\"-de\", \"--double_entity_embedding\", action=\"store_true\")\n parser.add_argument(\"-dr\", \"--double_relation_embedding\", action=\"store_true\")\n\n parser.add_argument(\"-n\", \"--negative_sample_size\", default=128, type=int)\n parser.add_argument(\"-d\", \"--embedding_size\", default=500, type=int)\n parser.add_argument(\"-init\", \"--init_checkpoint\", default=None, type=str)\n parser.add_argument(\"-g\", \"--gamma\", default=12.0, type=float)\n parser.add_argument(\"-adv\", \"--negative_adversarial_sampling\", action=\"store_true\")\n parser.add_argument(\"-a\", \"--adversarial_temperature\", default=1.0, type=float)\n parser.add_argument(\"-b\", \"--batch_size\", default=1024, type=int)\n parser.add_argument(\"--test_batch_size\", default=4, type=int, help=\"valid/test batch size\")\n parser.add_argument(\"--uni_weight\", action=\"store_true\",\n help=\"Otherwise use subsampling weighting like in word2vec\")\n\n parser.add_argument(\"-save\", \"--save_path\", default=None, type=str)\n parser.add_argument(\"--warm_up_steps\", default=None, type=int)\n\n parser.add_argument(\"--save_checkpoint_steps\", default=1000, type=int)\n parser.add_argument(\"--valid_steps\", default=10000, type=int)\n parser.add_argument(\"--log_steps\", default=100, type=int, help=\"train log every xx steps\")\n parser.add_argument(\"--test_log_steps\", default=1000, type=int, help=\"valid/test log every xx steps\")\n # fmt: on\n\n def __init__(self, args, dataset=None, model=None):\n super(LinkPrediction, self).__init__(args)\n\n task_type = select_task(args.model, model)\n if task_type == \"HomoLinkPrediction\":\n self.task = HomoLinkPrediction(args, dataset, model)\n elif task_type == \"KGLinkPrediction\":\n self.task = KGLinkPrediction(args, dataset, model)\n elif task_type == \"TripleLinkPrediction\":\n self.task = TripleLinkPrediction(args, dataset, model)\n elif task_type == \"GNNLinkPrediction\":\n self.task = GNNHomoLinkPrediction(args, dataset, model)\n\n def train(self):\n return self.task.train()\n\n def load_from_pretrained(self):\n pass\n\n def save_checkpoint(self):\n pass\n", "import torch\n\nfrom cogdl import experiment\nfrom cogdl.utils import build_args_from_dict, print_result\n\nDATASET_REGISTRY = {}\n\n\ndef default_parameter():\n cpu = not torch.cuda.is_available()\n args = {\n \"cpu\": cpu,\n \"seed\": [0, 1, 2],\n }\n return build_args_from_dict(args)\n\n\ndef register_func(name):\n def register_func_name(func):\n DATASET_REGISTRY[name] = func\n return func\n\n return register_func_name\n\n\n@register_func(\"cora\")\ndef cora_config(args):\n return args\n\n\n@register_func(\"citeseer\")\ndef citeseer_config(args):\n return args\n\n\n@register_func(\"pubmed\")\ndef pubmed_config(args):\n return args\n\n\ndef run(dataset_name):\n args = default_parameter()\n args = DATASET_REGISTRY[dataset_name](args).__dict__\n results = experiment(task=\"node_classification\", dataset=dataset_name, model=\"chebyshev\", **args)\n return results\n\n\nif __name__ == \"__main__\":\n datasets = [\"cora\", \"citeseer\", \"pubmed\"]\n for x in datasets:\n run(x)\n" ]
[ [ "torch.cat", "numpy.dot", "torch.stack", "torch.randperm", "torch.cuda.is_available", "sklearn.metrics.f1_score", "torch.sigmoid", "numpy.linalg.norm", "sklearn.metrics.precision_recall_curve", "numpy.random.randint", "torch.nn.BCELoss", "torch.zeros", "numpy.array", "torch.cuda.empty_cache", "numpy.stack", "sklearn.metrics.roc_auc_score", "torch.unique", "torch.no_grad", "torch.from_numpy", "sklearn.metrics.auc" ], [ "torch.cuda.is_available" ] ]
suriya-1403/Food-Detection
[ "aa52f946150bb949692a2307a55839a6252b35a9" ]
[ "Frontend/app.py" ]
[ "from flask import Flask, render_template, request\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.preprocessing import image\nimport numpy as np\n\napp = Flask(__name__)\nfood_list = ['donuts', 'pizza', 'samosa']\n\nmodel = load_model('model.hdf5', compile=False)\n\n\ndef predict_label(images):\n for img in images:\n img = image.load_img(img, target_size=(299, 299))\n img = image.img_to_array(img)\n img = np.expand_dims(img, axis=0)\n img /= 255.\n pred = model.predict(img)\n index = np.argmax(pred)\n food_list.sort()\n pred_value = food_list[index]\n return pred_value\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef home_page():\n return render_template(\"index.html\")\n\n\[email protected](\"/submit\", methods=['GET', 'POST'])\ndef get_hours():\n if request.method == 'POST':\n img = request.files['my_image']\n img_path = \"static/\" + img.filename\n img.save(img_path)\n images = [img_path]\n p = predict_label(images)\n\n return render_template(\"index.html\", prediction=p, img_path=img_path)\n\n\nif __name__ == '__main__':\n app.run(debug=True)" ]
[ [ "tensorflow.keras.preprocessing.image.load_img", "tensorflow.keras.models.load_model", "tensorflow.keras.preprocessing.image.img_to_array", "numpy.argmax", "numpy.expand_dims" ] ]
RafeyIqbalRahman/Data-Imputation-Techniques
[ "2c6e04136f82df7673948eae9da36b70ffe672a6" ]
[ "SimpleImputer.py" ]
[ "from numpy import isnan\nfrom pandas import read_csv, DataFrame\nfrom sklearn.impute import SimpleImputer\n\n# Load the data\ndf = read_csv('https://raw.githubusercontent.com/jbrownlee/Datasets/master/horse-colic.csv',\n header=None,\n na_values='?',)\n\n# Show the first 5 rows of the data\ndf.head()\n\n# Define X (Predictor variables) and y (Target variable)\ndt = df.values\nix = [i for i in range(dt.shape[1]) if i != 27]\nX, y = dt[:, ix], dt[:, 27]\n\n# Show count of missing values of X (before imputation)\nsum(isnan(X).flatten())\n\n# Define imputer\nimp = SimpleImputer(strategy='median')\n\n# Fit and transform imputer on the dataset\nXtrans = imp.fit_transform(X)\n\n# Show count of missing values of Xtrans (after imputation)\nsum(isnan(Xtrans).flatten())\n\n# Convert NumPy array to Pandas DataFrame\nXtrans = DataFrame(data=Xtrans)\n\n# Show the first 5 rows of the data with imputed values\nXtrans.head()\n" ]
[ [ "sklearn.impute.SimpleImputer", "pandas.read_csv", "numpy.isnan", "pandas.DataFrame" ] ]
Agoniii/tensorflow
[ "4c6ad75c06935faf238b48034194712483114f5f" ]
[ "tensorflow/lite/python/tflite_convert.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Python command line interface for running TOCO.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport sys\n\nfrom tensorflow.lite.python import lite\nfrom tensorflow.lite.python import lite_constants\nfrom tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2\nfrom tensorflow.python import keras\nfrom tensorflow.python import tf2\nfrom tensorflow.python.platform import app\n\n\ndef _parse_array(values, type_fn=str):\n if values is not None:\n return [type_fn(val) for val in values.split(\",\") if val]\n return None\n\n\ndef _parse_set(values):\n if values is not None:\n return set([item for item in values.split(\",\") if item])\n return None\n\n\ndef _parse_inference_type(value, flag):\n \"\"\"Converts the inference type to the value of the constant.\n\n Args:\n value: str representing the inference type.\n flag: str representing the flag name.\n\n Returns:\n tf.dtype.\n\n Raises:\n ValueError: Unsupported value.\n \"\"\"\n if value == \"FLOAT\":\n return lite_constants.FLOAT\n if value == \"QUANTIZED_UINT8\":\n return lite_constants.QUANTIZED_UINT8\n raise ValueError(\"Unsupported value for --{0}. Only FLOAT and \"\n \"QUANTIZED_UINT8 are supported.\".format(flag))\n\n\ndef _get_toco_converter(flags):\n \"\"\"Makes a TFLiteConverter object based on the flags provided.\n\n Args:\n flags: argparse.Namespace object containing TFLite flags.\n\n Returns:\n TFLiteConverter object.\n\n Raises:\n ValueError: Invalid flags.\n \"\"\"\n # Parse input and output arrays.\n input_arrays = _parse_array(flags.input_arrays)\n input_shapes = None\n if flags.input_shapes:\n input_shapes_list = [\n _parse_array(shape, type_fn=int)\n for shape in flags.input_shapes.split(\":\")\n ]\n input_shapes = dict(zip(input_arrays, input_shapes_list))\n output_arrays = _parse_array(flags.output_arrays)\n\n converter_kwargs = {\n \"input_arrays\": input_arrays,\n \"input_shapes\": input_shapes,\n \"output_arrays\": output_arrays\n }\n\n # Create TFLiteConverter.\n if flags.graph_def_file:\n converter_fn = lite.TFLiteConverter.from_frozen_graph\n converter_kwargs[\"graph_def_file\"] = flags.graph_def_file\n elif flags.saved_model_dir:\n converter_fn = lite.TFLiteConverter.from_saved_model\n converter_kwargs[\"saved_model_dir\"] = flags.saved_model_dir\n converter_kwargs[\"tag_set\"] = _parse_set(flags.saved_model_tag_set)\n converter_kwargs[\"signature_key\"] = flags.saved_model_signature_key\n elif flags.keras_model_file:\n converter_fn = lite.TFLiteConverter.from_keras_model_file\n converter_kwargs[\"model_file\"] = flags.keras_model_file\n else:\n raise ValueError(\"--graph_def_file, --saved_model_dir, or \"\n \"--keras_model_file must be specified.\")\n\n return converter_fn(**converter_kwargs)\n\n\ndef _convert_tf1_model(flags):\n \"\"\"Calls function to convert the TensorFlow 1.X model into a TFLite model.\n\n Args:\n flags: argparse.Namespace object.\n\n Raises:\n ValueError: Invalid flags.\n \"\"\"\n # Create converter.\n converter = _get_toco_converter(flags)\n if flags.inference_type:\n converter.inference_type = _parse_inference_type(flags.inference_type,\n \"inference_type\")\n if flags.inference_input_type:\n converter.inference_input_type = _parse_inference_type(\n flags.inference_input_type, \"inference_input_type\")\n if flags.output_format:\n converter.output_format = _toco_flags_pb2.FileFormat.Value(\n flags.output_format)\n\n if flags.mean_values and flags.std_dev_values:\n input_arrays = converter.get_input_arrays()\n std_dev_values = _parse_array(flags.std_dev_values, type_fn=float)\n\n # In quantized inference, mean_value has to be integer so that the real\n # value 0.0 is exactly representable.\n if converter.inference_type == lite_constants.QUANTIZED_UINT8:\n mean_values = _parse_array(flags.mean_values, type_fn=int)\n else:\n mean_values = _parse_array(flags.mean_values, type_fn=float)\n quant_stats = list(zip(mean_values, std_dev_values))\n if ((not flags.input_arrays and len(input_arrays) > 1) or\n (len(input_arrays) != len(quant_stats))):\n raise ValueError(\"Mismatching --input_arrays, --std_dev_values, and \"\n \"--mean_values. The flags must have the same number of \"\n \"items. The current input arrays are '{0}'. \"\n \"--input_arrays must be present when specifying \"\n \"--std_dev_values and --mean_values with multiple input \"\n \"tensors in order to map between names and \"\n \"values.\".format(\",\".join(input_arrays)))\n converter.quantized_input_stats = dict(zip(input_arrays, quant_stats))\n if (flags.default_ranges_min is not None) and (flags.default_ranges_max is\n not None):\n converter.default_ranges_stats = (flags.default_ranges_min,\n flags.default_ranges_max)\n\n if flags.drop_control_dependency:\n converter.drop_control_dependency = flags.drop_control_dependency\n if flags.reorder_across_fake_quant:\n converter.reorder_across_fake_quant = flags.reorder_across_fake_quant\n if flags.change_concat_input_ranges:\n converter.change_concat_input_ranges = (\n flags.change_concat_input_ranges == \"TRUE\")\n\n if flags.allow_custom_ops:\n converter.allow_custom_ops = flags.allow_custom_ops\n if flags.target_ops:\n ops_set_options = lite.OpsSet.get_options()\n converter.target_ops = set()\n for option in flags.target_ops.split(\",\"):\n if option not in ops_set_options:\n raise ValueError(\"Invalid value for --target_ops. Options: \"\n \"{0}\".format(\",\".join(ops_set_options)))\n converter.target_spec.supported_ops.add(lite.OpsSet(option))\n\n if flags.post_training_quantize:\n converter.optimizations = [lite.Optimize.DEFAULT]\n if converter.inference_type == lite_constants.QUANTIZED_UINT8:\n print(\"--post_training_quantize quantizes a graph of inference_type \"\n \"FLOAT. Overriding inference type QUANTIZED_UINT8 to FLOAT.\")\n converter.inference_type = lite_constants.FLOAT\n\n if flags.quantize_to_float16:\n converter.target_spec.supported_types = [lite.constants.FLOAT16]\n if not flags.post_training_quantize:\n print(\"--quantize_to_float16 will only take effect with the \"\n \"--post_training_quantize flag enabled.\")\n\n if flags.dump_graphviz_dir:\n converter.dump_graphviz_dir = flags.dump_graphviz_dir\n if flags.dump_graphviz_video:\n converter.dump_graphviz_vode = flags.dump_graphviz_video\n\n if flags.experimental_enable_mlir_converter:\n converter.experimental_enable_mlir_converter = True\n\n # Convert model.\n output_data = converter.convert()\n with open(flags.output_file, \"wb\") as f:\n f.write(output_data)\n\n\ndef _convert_tf2_model(flags):\n \"\"\"Calls function to convert the TensorFlow 2.0 model into a TFLite model.\n\n Args:\n flags: argparse.Namespace object.\n\n Raises:\n ValueError: Unsupported file format.\n \"\"\"\n # Load the model.\n if flags.saved_model_dir:\n converter = lite.TFLiteConverterV2.from_saved_model(flags.saved_model_dir)\n elif flags.keras_model_file:\n model = keras.models.load_model(flags.keras_model_file)\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n\n if flags.experimental_enable_mlir_converter:\n converter.experimental_enable_mlir_converter = True\n\n # Convert the model.\n tflite_model = converter.convert()\n with open(flags.output_file, \"wb\") as f:\n f.write(tflite_model)\n\n\ndef _check_tf1_flags(flags, unparsed):\n \"\"\"Checks the parsed and unparsed flags to ensure they are valid in 1.X.\n\n Raises an error if previously support unparsed flags are found. Raises an\n error for parsed flags that don't meet the required conditions.\n\n Args:\n flags: argparse.Namespace object containing TFLite flags.\n unparsed: List of unparsed flags.\n\n Raises:\n ValueError: Invalid flags.\n \"\"\"\n\n # Check unparsed flags for common mistakes based on previous TOCO.\n def _get_message_unparsed(flag, orig_flag, new_flag):\n if flag.startswith(orig_flag):\n return \"\\n Use {0} instead of {1}\".format(new_flag, orig_flag)\n return \"\"\n\n if unparsed:\n output = \"\"\n for flag in unparsed:\n output += _get_message_unparsed(flag, \"--input_file\", \"--graph_def_file\")\n output += _get_message_unparsed(flag, \"--savedmodel_directory\",\n \"--saved_model_dir\")\n output += _get_message_unparsed(flag, \"--std_value\", \"--std_dev_values\")\n output += _get_message_unparsed(flag, \"--batch_size\", \"--input_shapes\")\n output += _get_message_unparsed(flag, \"--dump_graphviz\",\n \"--dump_graphviz_dir\")\n if output:\n raise ValueError(output)\n\n # Check that flags are valid.\n if flags.graph_def_file and (not flags.input_arrays or\n not flags.output_arrays):\n raise ValueError(\"--input_arrays and --output_arrays are required with \"\n \"--graph_def_file\")\n\n if flags.input_shapes:\n if not flags.input_arrays:\n raise ValueError(\"--input_shapes must be used with --input_arrays\")\n if flags.input_shapes.count(\":\") != flags.input_arrays.count(\",\"):\n raise ValueError(\"--input_shapes and --input_arrays must have the same \"\n \"number of items\")\n\n if flags.std_dev_values or flags.mean_values:\n if bool(flags.std_dev_values) != bool(flags.mean_values):\n raise ValueError(\"--std_dev_values and --mean_values must be used \"\n \"together\")\n if flags.std_dev_values.count(\",\") != flags.mean_values.count(\",\"):\n raise ValueError(\"--std_dev_values, --mean_values must have the same \"\n \"number of items\")\n\n if (flags.default_ranges_min is None) != (flags.default_ranges_max is None):\n raise ValueError(\"--default_ranges_min and --default_ranges_max must be \"\n \"used together\")\n\n if flags.dump_graphviz_video and not flags.dump_graphviz_dir:\n raise ValueError(\"--dump_graphviz_video must be used with \"\n \"--dump_graphviz_dir\")\n\n\ndef _get_tf1_flags(parser):\n \"\"\"Returns ArgumentParser for tflite_convert for TensorFlow 1.X.\n\n Args:\n parser: ArgumentParser\n \"\"\"\n # Input file flags.\n input_file_group = parser.add_mutually_exclusive_group(required=True)\n input_file_group.add_argument(\n \"--graph_def_file\",\n type=str,\n help=\"Full filepath of file containing frozen TensorFlow GraphDef.\")\n input_file_group.add_argument(\n \"--saved_model_dir\",\n type=str,\n help=\"Full filepath of directory containing the SavedModel.\")\n input_file_group.add_argument(\n \"--keras_model_file\",\n type=str,\n help=\"Full filepath of HDF5 file containing tf.Keras model.\")\n\n # Model format flags.\n parser.add_argument(\n \"--output_format\",\n type=str.upper,\n choices=[\"TFLITE\", \"GRAPHVIZ_DOT\"],\n help=\"Output file format.\")\n parser.add_argument(\n \"--inference_type\",\n type=str.upper,\n choices=[\"FLOAT\", \"QUANTIZED_UINT8\"],\n help=\"Target data type of real-number arrays in the output file.\")\n parser.add_argument(\n \"--inference_input_type\",\n type=str.upper,\n choices=[\"FLOAT\", \"QUANTIZED_UINT8\"],\n help=(\"Target data type of real-number input arrays. Allows for a \"\n \"different type for input arrays in the case of quantization.\"))\n\n # Input and output arrays flags.\n parser.add_argument(\n \"--input_arrays\",\n type=str,\n help=\"Names of the input arrays, comma-separated.\")\n parser.add_argument(\n \"--input_shapes\",\n type=str,\n help=\"Shapes corresponding to --input_arrays, colon-separated.\")\n parser.add_argument(\n \"--output_arrays\",\n type=str,\n help=\"Names of the output arrays, comma-separated.\")\n\n # SavedModel related flags.\n parser.add_argument(\n \"--saved_model_tag_set\",\n type=str,\n help=(\"Comma-separated set of tags identifying the MetaGraphDef within \"\n \"the SavedModel to analyze. All tags must be present. In order to \"\n \"pass in an empty tag set, pass in \\\"\\\". (default \\\"serve\\\")\"))\n parser.add_argument(\n \"--saved_model_signature_key\",\n type=str,\n help=(\"Key identifying the SignatureDef containing inputs and outputs. \"\n \"(default DEFAULT_SERVING_SIGNATURE_DEF_KEY)\"))\n\n # Quantization flags.\n parser.add_argument(\n \"--std_dev_values\",\n type=str,\n help=(\"Standard deviation of training data for each input tensor, \"\n \"comma-separated floats. Used for quantized input tensors. \"\n \"(default None)\"))\n parser.add_argument(\n \"--mean_values\",\n type=str,\n help=(\"Mean of training data for each input tensor, comma-separated \"\n \"floats. Used for quantized input tensors. (default None)\"))\n parser.add_argument(\n \"--default_ranges_min\",\n type=float,\n help=(\"Default value for min bound of min/max range values used for all \"\n \"arrays without a specified range, Intended for experimenting with \"\n \"quantization via \\\"dummy quantization\\\". (default None)\"))\n parser.add_argument(\n \"--default_ranges_max\",\n type=float,\n help=(\"Default value for max bound of min/max range values used for all \"\n \"arrays without a specified range, Intended for experimenting with \"\n \"quantization via \\\"dummy quantization\\\". (default None)\"))\n # quantize_weights is DEPRECATED.\n parser.add_argument(\n \"--quantize_weights\",\n dest=\"post_training_quantize\",\n action=\"store_true\",\n help=argparse.SUPPRESS)\n parser.add_argument(\n \"--post_training_quantize\",\n dest=\"post_training_quantize\",\n action=\"store_true\",\n help=(\n \"Boolean indicating whether to quantize the weights of the \"\n \"converted float model. Model size will be reduced and there will \"\n \"be latency improvements (at the cost of accuracy). (default False)\"))\n parser.add_argument(\n \"--quantize_to_float16\",\n dest=\"quantize_to_float16\",\n action=\"store_true\",\n help=(\"Boolean indicating whether to quantize weights to fp16 instead of \"\n \"the default int8 when post-training quantization \"\n \"(--post_training_quantize) is enabled. (default False)\"))\n # Graph manipulation flags.\n parser.add_argument(\n \"--drop_control_dependency\",\n action=\"store_true\",\n help=(\"Boolean indicating whether to drop control dependencies silently. \"\n \"This is due to TensorFlow not supporting control dependencies. \"\n \"(default True)\"))\n parser.add_argument(\n \"--reorder_across_fake_quant\",\n action=\"store_true\",\n help=(\"Boolean indicating whether to reorder FakeQuant nodes in \"\n \"unexpected locations. Used when the location of the FakeQuant \"\n \"nodes is preventing graph transformations necessary to convert \"\n \"the graph. Results in a graph that differs from the quantized \"\n \"training graph, potentially causing differing arithmetic \"\n \"behavior. (default False)\"))\n # Usage for this flag is --change_concat_input_ranges=true or\n # --change_concat_input_ranges=false in order to make it clear what the flag\n # is set to. This keeps the usage consistent with other usages of the flag\n # where the default is different. The default value here is False.\n parser.add_argument(\n \"--change_concat_input_ranges\",\n type=str.upper,\n choices=[\"TRUE\", \"FALSE\"],\n help=(\"Boolean to change behavior of min/max ranges for inputs and \"\n \"outputs of the concat operator for quantized models. Changes the \"\n \"ranges of concat operator overlap when true. (default False)\"))\n\n # Permitted ops flags.\n parser.add_argument(\n \"--allow_custom_ops\",\n action=\"store_true\",\n help=(\"Boolean indicating whether to allow custom operations. When false \"\n \"any unknown operation is an error. When true, custom ops are \"\n \"created for any op that is unknown. The developer will need to \"\n \"provide these to the TensorFlow Lite runtime with a custom \"\n \"resolver. (default False)\"))\n parser.add_argument(\n \"--target_ops\",\n type=str,\n help=(\"Experimental flag, subject to change. Set of OpsSet options \"\n \"indicating which converter to use. Options: {0}. One or more \"\n \"option may be specified. (default set([OpsSet.TFLITE_BUILTINS]))\"\n \"\".format(\",\".join(lite.OpsSet.get_options()))))\n\n # Logging flags.\n parser.add_argument(\n \"--dump_graphviz_dir\",\n type=str,\n help=(\"Full filepath of folder to dump the graphs at various stages of \"\n \"processing GraphViz .dot files. Preferred over --output_format=\"\n \"GRAPHVIZ_DOT in order to keep the requirements of the output \"\n \"file.\"))\n parser.add_argument(\n \"--dump_graphviz_video\",\n action=\"store_true\",\n help=(\"Boolean indicating whether to dump the graph after every graph \"\n \"transformation\"))\n\n\ndef _get_tf2_flags(parser):\n \"\"\"Returns ArgumentParser for tflite_convert for TensorFlow 2.0.\n\n Args:\n parser: ArgumentParser\n \"\"\"\n # Input file flags.\n input_file_group = parser.add_mutually_exclusive_group(required=True)\n input_file_group.add_argument(\n \"--saved_model_dir\",\n type=str,\n help=\"Full path of the directory containing the SavedModel.\")\n input_file_group.add_argument(\n \"--keras_model_file\",\n type=str,\n help=\"Full filepath of HDF5 file containing tf.Keras model.\")\n\n\ndef _get_parser():\n \"\"\"Returns an ArgumentParser for tflite_convert.\"\"\"\n parser = argparse.ArgumentParser(\n description=(\"Command line tool to run TensorFlow Lite Converter.\"))\n\n # Output file flag.\n parser.add_argument(\n \"--output_file\",\n type=str,\n help=\"Full filepath of the output file.\",\n required=True)\n\n if tf2.enabled():\n _get_tf2_flags(parser)\n else:\n _get_tf1_flags(parser)\n\n # Enable MLIR-TFLite converter.\n parser.add_argument(\n \"--experimental_enable_mlir_converter\",\n action=\"store_true\",\n help=(\"Experimental flag, subject to change. Enables the MLIR converter \"\n \"instead of the TOCO converter.\"))\n return parser\n\n\ndef run_main(_):\n \"\"\"Main in toco_convert.py.\"\"\"\n parser = _get_parser()\n tflite_flags, unparsed = parser.parse_known_args(args=sys.argv[1:])\n\n if tf2.enabled():\n _convert_tf2_model(tflite_flags)\n else:\n try:\n _check_tf1_flags(tflite_flags, unparsed)\n except ValueError as e:\n parser.print_usage()\n file_name = os.path.basename(sys.argv[0])\n sys.stderr.write(\"{0}: error: {1}\\n\".format(file_name, str(e)))\n sys.exit(1)\n _convert_tf1_model(tflite_flags)\n\n\ndef main():\n app.run(main=run_main, argv=sys.argv[:1])\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model", "tensorflow.python.keras.models.load_model", "tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model", "tensorflow.lite.python.lite.OpsSet.get_options", "tensorflow.lite.python.lite.OpsSet", "tensorflow.python.platform.app.run", "tensorflow.python.tf2.enabled", "tensorflow.lite.toco.toco_flags_pb2.FileFormat.Value" ] ]
joncrawf/mime
[ "7be7b1351cabaacc17caddbb6f808f3d37721a81" ]
[ "metaworld/envs/mujoco/sawyer_xyz/sawyer_basketball.py" ]
[ "import numpy as np\nfrom gym.spaces import Box\n\nfrom metaworld.envs.env_util import get_asset_full_path\nfrom metaworld.envs.mujoco.sawyer_xyz.base import SawyerXYZEnv, _assert_task_is_set, _sparse_task\n\n\nclass SawyerBasketballEnv(SawyerXYZEnv):\n\n def __init__(self):\n\n liftThresh = 0.3\n goal_low = (-0.1, 0.85, 0.15)\n goal_high = (0.1, 0.9+1e-7, 0.15)\n hand_low = (-0.5, 0.40, 0.05)\n hand_high = (0.5, 1, 0.5)\n obj_low = (-0.1, 0.6, 0.03)\n obj_high = (0.1, 0.7, 0.03)\n\n super().__init__(\n self.model_name,\n hand_low=hand_low,\n hand_high=hand_high,\n )\n\n self.init_config = {\n 'obj_init_angle': .3,\n 'obj_init_pos': np.array([0, 0.6, 0.03], dtype=np.float32),\n 'hand_init_pos': np.array((0, 0.6, 0.2), dtype=np.float32),\n }\n self.goal = np.array([0, 0.9, 0.15])\n self.obj_init_pos = self.init_config['obj_init_pos']\n self.obj_init_angle = self.init_config['obj_init_angle']\n self.hand_init_pos = self.init_config['hand_init_pos']\n\n self.max_path_length = 150\n self.liftThresh = liftThresh\n\n self.obj_and_goal_space = Box(\n np.hstack((obj_low, goal_low)),\n np.hstack((obj_high, goal_high)),\n )\n self.goal_space = Box(np.array(goal_low), np.array(goal_high))\n self.observation_space = Box(\n np.hstack((self.hand_low, obj_low, obj_low, goal_low)),\n np.hstack((self.hand_high, obj_high, obj_high, goal_high)),\n )\n\n @property\n def model_name(self):\n return get_asset_full_path('sawyer_xyz/sawyer_basketball.xml')\n\n @_assert_task_is_set\n @_sparse_task\n def step(self, action):\n self.set_xyz_action(action[:3])\n self.do_simulation([action[-1], -action[-1]])\n # The marker seems to get reset every time you do a simulation\n self._set_goal_marker(self._state_goal)\n ob = self._get_obs()\n obs_dict = self._get_obs_dict()\n reward, reachDist, pickRew, placingDist = self.compute_reward(action, obs_dict)\n self.curr_path_length +=1\n info = {'reachDist': reachDist, 'goalDist': placingDist, 'epRew' : reward, 'pickRew':pickRew, 'success': float(placingDist <= 0.08)}\n info['goal'] = self.goal\n return ob, reward, self.curr_path_length == self.max_path_length, info\n\n def _get_pos_objects(self):\n return self.data.get_geom_xpos('objGeom')\n\n def _set_goal_marker(self, goal):\n self.data.site_xpos[self.model.site_name2id('goal')] = (\n goal[:3]\n )\n\n def _set_obj_xyz(self, pos):\n qpos = self.data.qpos.flat.copy()\n qvel = self.data.qvel.flat.copy()\n qpos[9:12] = pos.copy()\n qvel[9:15] = 0\n self.set_state(qpos, qvel)\n\n def reset_model(self):\n self._reset_hand()\n\n basket_pos = self.goal.copy()\n self.sim.model.body_pos[self.model.body_name2id('basket_goal')] = basket_pos\n self._state_goal = self.data.site_xpos[self.model.site_name2id('goal')]\n\n self.objHeight = self.data.get_geom_xpos('objGeom')[2]\n self.heightTarget = self.objHeight + self.liftThresh\n\n if self.random_init:\n goal_pos = self._get_state_rand_vec()\n basket_pos = goal_pos[3:]\n while np.linalg.norm(goal_pos[:2] - basket_pos[:2]) < 0.15:\n goal_pos = self._get_state_rand_vec()\n basket_pos = goal_pos[3:]\n self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))\n self.sim.model.body_pos[self.model.body_name2id('basket_goal')] = basket_pos\n self._state_goal = basket_pos + np.array([0, -0.05, 0.1])\n\n self._set_goal_marker(self._state_goal)\n self._set_obj_xyz(self.obj_init_pos)\n self.maxPlacingDist = np.linalg.norm(np.array([self.obj_init_pos[0], self.obj_init_pos[1], self.heightTarget]) - np.array(self._state_goal)) + self.heightTarget\n return self._get_obs()\n\n def _reset_hand(self):\n for _ in range(10):\n self.data.set_mocap_pos('mocap', self.hand_init_pos)\n self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))\n self.do_simulation([-1,1], self.frame_skip)\n rightFinger, leftFinger = self.get_site_pos('rightEndEffector'), self.get_site_pos('leftEndEffector')\n self.init_fingerCOM = (rightFinger + leftFinger)/2\n self.pickCompleted = False\n\n def compute_reward(self, actions, obs):\n obs = obs['state_observation']\n\n objPos = obs[3:6]\n\n rightFinger, leftFinger = self.get_site_pos('rightEndEffector'), self.get_site_pos('leftEndEffector')\n fingerCOM = (rightFinger + leftFinger)/2\n\n heightTarget = self.heightTarget\n goal = self._state_goal\n\n reachDist = np.linalg.norm(objPos - fingerCOM)\n placingDist = np.linalg.norm(objPos - goal)\n assert np.all(goal == self.get_site_pos('goal'))\n\n def reachReward():\n reachRew = -reachDist\n reachDistxy = np.linalg.norm(objPos[:-1] - fingerCOM[:-1])\n zRew = np.linalg.norm(fingerCOM[-1] - self.init_fingerCOM[-1])\n if reachDistxy < 0.05:\n reachRew = -reachDist\n else:\n reachRew = -reachDistxy - 2*zRew\n\n #incentive to close fingers when reachDist is small\n if reachDist < 0.05:\n reachRew = -reachDist + max(actions[-1],0)/50\n return reachRew , reachDist\n\n def pickCompletionCriteria():\n tolerance = 0.01\n if objPos[2] >= (heightTarget - tolerance):\n return True\n else:\n return False\n\n if pickCompletionCriteria():\n self.pickCompleted = True\n\n\n def objDropped():\n return (objPos[2] < (self.objHeight + 0.005)) and (placingDist >0.02) and (reachDist > 0.02)\n\n def orig_pickReward():\n hScale = 100\n if self.pickCompleted and not(objDropped()):\n return hScale*heightTarget\n elif (reachDist < 0.1) and (objPos[2]> (self.objHeight + 0.005)) :\n return hScale* min(heightTarget, objPos[2])\n else:\n return 0\n\n def placeReward():\n c1 = 1000 ; c2 = 0.01 ; c3 = 0.001\n cond = self.pickCompleted and (reachDist < 0.1) and not(objDropped())\n if cond:\n placeRew = 1000*(self.maxPlacingDist - placingDist) + c1*(np.exp(-(placingDist**2)/c2) + np.exp(-(placingDist**2)/c3))\n placeRew = max(placeRew,0)\n return [placeRew , placingDist]\n else:\n return [0 , placingDist]\n\n reachRew, reachDist = reachReward()\n pickRew = orig_pickReward()\n placeRew , placingDist = placeReward()\n assert ((placeRew >=0) and (pickRew>=0))\n reward = reachRew + pickRew + placeRew\n return [reward, reachDist, pickRew, placingDist]\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.linalg.norm", "numpy.exp", "numpy.hstack" ] ]
felixrlopezm/Udacity-Nanodegree-program-AI-programming-with-Python
[ "a87fcf6e9c37cf60ae6ff01c9909313b5d1b987a" ]
[ "predict_functions.py" ]
[ "# python3\n#\n# PROGRAMMER: Félix Ramón López Martínez\n# DATE CREATED: 10/11/2020\n# REVISED DATE:\n# PURPOSE: This is the repository of all the functions called fron predict.py.\n#\n##\n\n# Imports python modules\nimport argparse\nfrom torchvision import models\nimport torch\nfrom torch import nn\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef get_input_args():\n \"\"\"\n Retrieves and parses the command line arguments provided by the user when\n they run the program from a terminal window. If the user fails to provide\n some or all of the arguments, then the default values are used for the\n missing arguments.\n This function returns these arguments as an ArgumentParser object.\n Returns:\n parse_args() -data structure that stores the command line arguments object\n \"\"\"\n # Create Parse\n parser = argparse.ArgumentParser(description='Retrieving inputs from user')\n\n # Create command line arguments\n parser.add_argument('image_path', type = str, default = './predict.jpg',\n help = 'image path to the image to predict (default: ./predict.jpg)')\n parser.add_argument('checkpoint_file', type = str, default = 'vgg16_model_checkpoint.pth',\n help = 'Checkpoint file (default: vgg16_model_checkpoint.pth)')\n parser.add_argument('--topk', type = int, default = 5,\n help = 'Top k most likely categories (default: 5)')\n parser.add_argument('--category_names', type = str, default = 'cat_to_name.json',\n help = 'Categories to name file (default: cat_to_name.json)')\n parser.add_argument('--arch', type = str, default = 'VGG16',\n help = 'CNN Model Architecture: vgg16, alexnet or densenet161 (default: VGG16)')\n\n return parser.parse_args()\n\ndef process_image(image):\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns an Torch tensor\n '''\n # Open a PIL image\n img = Image.open(image)\n\n # Resizing keeping the aspect ratio\n img_width, img_height = img.size\n ratio = img_width / img_height\n if img_width < img_height:\n img = img.resize((256, int(256 / ratio)))\n else:\n img = img.resize((int(256 * ratio) , 256))\n\n # Center cropping\n center_x, center_y = img.size\n left = max(0, int(center_x - 224)/2)\n upper = max(0, int(center_y - 224)/2)\n\n img = img.crop((left, upper, left + 224, upper + 224))\n\n # Turning RGB values between [0, 1]\n img = np.array(img) / 255\n\n # Normalizing acc. to ImageNet standards\n mean_n = np.array([0.485, 0.456, 0.406])\n std_n = np.array([0.229, 0.224, 0.225])\n img_n = ((img - mean_n) / std_n)\n\n # Putting color cannal information first\n img_n = img_n.transpose(2,0,1)\n\n # From numpy ndarray to torch tensor\n img_tch = torch.from_numpy(np.array([img_n])).float()\n\n return img_tch\n\n\ndef load_checkpoint_file(filepath, model_arch):\n ''' This function loads the checkpoint_file, loar a pre-trained CNN\n according to the input CNN architecture, creates a customized classifier,\n replace it in the pre-trained CNN model and finally loads the checkpoint\n in the model.\n It returns the rebuilt model\n '''\n # Reading checkpoint file\n checkpoint = torch.load(filepath)\n\n # Loading paramenters\n pretrained_model = checkpoint['pretrained_model']\n input_size = checkpoint['input_size']\n layer1_size = checkpoint['layer1_size']\n layer2_size = checkpoint['layer2_size']\n output_size = checkpoint['output_size']\n dropout = checkpoint['dropout']\n\n # Load pre-trained model from torchvision\n if model_arch == 'vgg16':\n model = models.vgg16(pretrained=True)\n\n elif model_arch == 'alexnet':\n model = models.alexnet(pretrained=True)\n\n elif model_arch == 'densenet161':\n model = models.densenet161(pretrained=True)\n else:\n model = models.vgg16(pretrained=True)\n print('Invalid model name input in --arch. Loaded VGG16 model instead')\n model_name = 'vgg16'\n\n print('Loaded {} pretrained model'.format(model_arch))\n\n # Freeze parameters to not backprop through them\n for param in model.parameters():\n param.requires_grad = False\n\n # Creation of the classifier to substitue that from the pre-trained model\n classifier = nn.Sequential(nn.Linear(input_size, layer1_size),\n nn.ReLU(),\n nn.Dropout(p = dropout),\n nn.Linear(layer1_size, layer2_size),\n nn.ReLU(),\n nn.Dropout(p = dropout),\n nn.Linear(layer2_size, output_size),\n nn.LogSoftmax(dim=1))\n\n model.classifier = classifier\n\n # Loading data in the model\n state_dict = checkpoint['state_dict']\n model.load_state_dict(state_dict)\n model.class_to_idx = checkpoint['class_to_idx']\n\n return model\n\ndef predict(image, model, topk):\n ''' Predict the likely probabilities and category of an image using a\n trained deep learning model.\n It returns the probability and category prediction\n '''\n # Setting model to evaluation mode\n model.eval()\n\n # Turn off gradients before prediction\n with torch.no_grad():\n output = model.forward(image)\n\n # Calculating the class probabilities for img\n ps = torch.exp(output)\n\n # Extracting topk probabilities (values, indices)\n ps_topk = torch.topk(ps, topk)[0].tolist()[0]\n index_topk = torch.topk(ps, topk)[1].tolist()[0]\n\n # Transforminng index_topk to image class_topk\n indices = []\n for i in range(len(model.class_to_idx)):\n indices.append(list(model.class_to_idx.items())[i][0])\n cat_topk = [indices[index] for index in index_topk]\n\n return ps_topk, cat_topk\n\ndef plotting(image_path, ps_topk, labels):\n ''' This function plots the image to predict and then a horizontal bar chart\n with the top k probabilites output by the prediction algorithm.\n '''\n plt.figure(figsize = [10, 8])\n\n # Show image to predict\n image = Image.open(image_path)\n ax1 = plt.subplot(2, 1, 1)\n ax1.axis('off')\n ax1.imshow(image)\n\n # Show top k predictions\n labels.reverse()\n ps_topk.reverse()\n ax2 = plt.subplot(2, 1, 2)\n ax2.set_title('Prediction')\n ax2.barh(labels, ps_topk);\n\n plt.show(block = True)\n\n return\n" ]
[ [ "torch.nn.Linear", "torch.nn.LogSoftmax", "numpy.array", "torch.nn.Dropout", "torch.no_grad", "matplotlib.pyplot.figure", "torch.nn.ReLU", "torch.topk", "torch.load", "matplotlib.pyplot.show", "torch.exp", "matplotlib.pyplot.subplot" ] ]
SeaOfOcean/FastNN
[ "73b70c633117ccff4f1a270f461bacb96e0fc4ee" ]
[ "moe/trainer.py" ]
[ "# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"Training entry.\"\"\"\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom absl import flags\n\nimport tensorflow.compat.v1 as tf\nimport epl\n\nfrom tensor2tensor import problems as problems_lib # pylint: disable=unused-import\ntry:\n from tensor2tensor import models # pylint: disable=unused-import\nexcept: # pylint: disable=bare-except\n pass\nfrom tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import\nfrom tensor2tensor.utils import mlperf_log\nfrom tensor2tensor.utils import optimize\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import t2t_model\nfrom tensor2tensor.utils import trainer_lib\nfrom model_config import t5_large # pylint: disable=unused-import\n\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer(\"random_seed\", None, \"Random seed.\")\nflags.DEFINE_integer(\"iterations_per_loop\", 100,\n \"Number of iterations in a TPU training loop.\")\nflags.DEFINE_bool(\"generate_data\", False, \"Generate data before training?\")\nflags.DEFINE_string(\"tmp_dir\", \"/tmp/t2t_datagen\",\n \"Temporary storage directory, used if --generate_data.\")\nflags.DEFINE_integer(\"inter_op_parallelism_threads\", 0,\n \"Number of inter_op_parallelism_threads to use for CPU. \"\n \"See TensorFlow config.proto for details.\")\nflags.DEFINE_integer(\"intra_op_parallelism_threads\", 0,\n \"Number of intra_op_parallelism_threads to use for CPU. \"\n \"See TensorFlow config.proto for details.\")\n\n# To maintain compatibility with some internal libs, we guard against these flag\n# definitions possibly erroring. Apologies for the ugliness.\ntry:\n flags.DEFINE_string(\"master\", \"\", \"Address of TensorFlow master.\")\n flags.DEFINE_string(\"output_dir\", \"\", \"Base output directory for run.\")\n flags.DEFINE_string(\"schedule\", \"train_and_eval\",\n \"Method of Experiment to run.\")\n flags.DEFINE_integer(\"eval_steps\", 100,\n \"Number of steps in evaluation. By default, eval will \"\n \"stop after eval_steps or when it runs through the eval \"\n \"dataset once in full, whichever comes first, so this \"\n \"can be a very large number.\")\nexcept: # pylint: disable=bare-except\n pass\n\n\n# Note than in open-source TensorFlow, the dash gets converted to an underscore,\n# so access is FLAGS.job_dir.\nflags.DEFINE_integer(\"log_step_count_steps\", 100,\n \"Number of local steps after which progress is printed \"\n \"out\")\n\nflags.DEFINE_integer(\"op_split\", 0, \"whether to shard moe layer or not.\")\nflags.DEFINE_bool(\"enable_fp16\", False, \"\")\n\n\ndef set_hparams_from_args(args):\n \"\"\"Set hparams overrides from unparsed args list.\"\"\"\n if not args:\n return\n\n hp_prefix = \"--hp_\"\n tf.logging.info(\"Found unparsed command-line arguments. Checking if any \"\n \"start with %s and interpreting those as hparams \"\n \"settings.\", hp_prefix)\n\n pairs = []\n i = 0\n while i < len(args):\n arg = args[i]\n if arg.startswith(hp_prefix):\n pairs.append((arg[len(hp_prefix):], args[i+1]))\n i += 2\n else:\n tf.logging.warn(\"Found unknown flag: %s\", arg)\n i += 1\n\n as_hparams = \",\".join([\"%s=%s\" % (key, val) for key, val in pairs])\n if FLAGS.hparams:\n as_hparams = \",\" + as_hparams\n FLAGS.hparams += as_hparams\n\n\ndef create_hparams():\n \"\"\"Create hparams.\"\"\"\n hparams_path = os.path.join(FLAGS.output_dir, \"hparams.json\")\n return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams,\n hparams_path=hparams_path)\n\n\ndef create_run_config():\n \"\"\"Create a run config.\n\n Returns:\n a run config\n \"\"\"\n session_config = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=FLAGS.log_device_placement,\n inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,\n intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads)\n run_config = tf.estimator.RunConfig(save_checkpoints_steps=5,\n model_dir=FLAGS.output_dir,\n session_config=session_config)\n run_config.use_tpu = False\n return run_config\n\n\ndef generate_data():\n # Generate data if requested.\n data_dir = os.path.expanduser(FLAGS.data_dir)\n tmp_dir = os.path.expanduser(FLAGS.tmp_dir)\n tf.gfile.MakeDirs(data_dir)\n tf.gfile.MakeDirs(tmp_dir)\n\n problem_name = FLAGS.problem\n tf.logging.info(\"Generating data for %s\" % problem_name)\n registry.problem(problem_name).generate_data(data_dir, tmp_dir)\n\n\ndef is_chief():\n schedules = [\"train\", \"train_and_eval\", \"continuous_train_and_eval\"]\n return FLAGS.worker_id == 0 and FLAGS.schedule in schedules\n\n\ndef save_metadata(hparams):\n \"\"\"Saves FLAGS and hparams to output_dir.\"\"\"\n output_dir = os.path.expanduser(FLAGS.output_dir)\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MakeDirs(output_dir)\n\n # Save hparams as hparams.json\n hparams_fname = os.path.join(output_dir, \"hparams.json\")\n with tf.gfile.Open(hparams_fname, \"w\") as f:\n f.write(hparams.to_json(indent=0, sort_keys=True))\n tf.logging.info(\"Write hparams.json to {}\".format(output_dir))\n\n\ndef main(argv):\n config = epl.Config({\"cluster.colocate_split_and_replicate\": True})\n epl.init(config)\n FLAGS.worker_id = epl.Env.get().cluster.worker_index\n FLAGS.worker_gpu = epl.Env.get().cluster.total_gpu_num\n epl.set_default_strategy(epl.replicate(FLAGS.worker_gpu))\n\n # Create HParams.\n if argv:\n set_hparams_from_args(argv[1:])\n if FLAGS.schedule != \"run_std_server\":\n hparams = create_hparams()\n\n if FLAGS.schedule == \"train\":\n mlperf_log.transformer_print(key=mlperf_log.RUN_START)\n else:\n raise RuntimeError(\"Support training tasks only for now, you can define tasks in other modes.\")\n trainer_lib.set_random_seed(FLAGS.random_seed)\n\n hparams.add_hparam(\"data_dir\", FLAGS.data_dir)\n hparams.add_hparam(\"schedule\", FLAGS.schedule)\n hparams.add_hparam(\"train_steps\", FLAGS.train_steps)\n hparams.add_hparam(\"warm_start_from\", None)\n trainer_lib.add_problem_hparams(hparams, FLAGS.problem)\n\n # Dataset generation.\n if FLAGS.generate_data:\n generate_data()\n\n def model_fn_replicate(features, labels, mode):\n model_fn = t2t_model.T2TModel.make_estimator_model_fn(FLAGS.model, hparams)\n return model_fn(features, labels, mode)\n\n if is_chief():\n save_metadata(hparams)\n\n estimator = tf.estimator.Estimator(model_fn=model_fn_replicate, config=create_run_config())\n hooks = []\n hooks.append(tf.train.StepCounterHook(every_n_steps=FLAGS.log_step_count_steps))\n\n optimize.log_variable_sizes(verbose=True)\n\n problem = hparams.problem\n train_input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.TRAIN,\n hparams)\n\n estimator.train(train_input_fn, max_steps=hparams.train_steps, hooks=hooks)\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n" ]
[ [ "tensorflow.compat.v1.estimator.RunConfig", "tensorflow.compat.v1.gfile.MakeDirs", "tensorflow.compat.v1.logging.warn", "tensorflow.compat.v1.logging.info", "tensorflow.compat.v1.train.StepCounterHook", "tensorflow.compat.v1.gfile.Open", "tensorflow.compat.v1.ConfigProto", "tensorflow.compat.v1.gfile.Exists", "tensorflow.compat.v1.logging.set_verbosity", "tensorflow.compat.v1.app.run" ] ]
ZwX1616/mxnet-SSD
[ "fd89424d711b1ec4f02c35987212d1038e69e905" ]
[ "deploy/python/live.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport sys\nimport cv2\nimport mxnet as mx\nimport numpy as np\nimport random\nfrom pathlib import Path\nimport time\n\nmillisecond = lambda x: int(round(x * 1000))\n\nclass Detector(object):\n \"\"\"\n SSD detector which hold a detection network and wraps detection API\n\n Parameters:\n ----------\n symbol : mx.Symbol\n detection network Symbol\n model_prefix : str\n name prefix of trained model\n epoch : int\n load epoch of trained model\n img_path : str\n image path\n data_shape : int\n input data resize shape\n mean_pixels : tuple of float\n (mean_r, mean_g, mean_b)\n threshold: float\n thresh for scores\n batch_size : int\n run detection with batch size\n ctx : mx.ctx\n device to use, if None, use mx.cpu() as default context\n \"\"\"\n def __init__(self, symbol, model_prefix, epoch, data_shape=300, mean_pixels=(123, 117, 104), threshold=0.2, batch_size=1, ctx=None):\n self.ctx = ctx\n if self.ctx is None:\n self.ctx = mx.cpu()\n self.data_shape = data_shape\n self.threshold = threshold\n self.mean_pixels = mean_pixels\n self.batch_size = batch_size\n self.dets = None\n self.load_symbol, self.args, self.auxs = mx.model.load_checkpoint(\n model_prefix, epoch)\n self.args, self.auxs = self.ch_dev(self.args, self.auxs, self.ctx)\n\n def ch_dev(self, arg_params, aux_params, ctx):\n new_args = dict()\n new_auxs = dict()\n for k, v in arg_params.items():\n new_args[k] = v.as_in_context(ctx)\n for k, v in aux_params.items():\n new_auxs[k] = v.as_in_context(ctx)\n return new_args, new_auxs\n\n def make_input(self, input_image):\n img = cv2.resize(input_image, (self.data_shape, self.data_shape))\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n img = np.swapaxes(img, 0, 2)\n img = np.swapaxes(img, 1, 2) # change to (channel, height, width)\n img = img[np.newaxis, :]\n return img\n\n def nms(self, boxes, overlap_threshold, mode='Union'):\n \"\"\"non max suppression\n\n Paremeters:\n ----------\n box: numpy array n x 5\n input bbox array, x1,y1,x2,y2,score\n overlap_threshold: float number\n threshold of overlap\n mode: float number\n how to compute overlap ratio, 'Union' or 'Min'\n Returns:\n -------\n index array of the selected bbox\n \"\"\"\n if len(boxes) == 0:\n return []\n if boxes.dtype.kind == \"i\":\n boxes = boxes.astype(\"float\")\n pick = []\n score, x1, y1, x2, y2 = [boxes[:, i+1] for i in range(5)]\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n idxs = np.argsort(score)\n while len(idxs) > 0:\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n inter = w * h\n if mode == 'Min':\n overlap = inter / np.minimum(area[i], area[idxs[:last]])\n else:\n overlap = inter / (area[i] + area[idxs[:last]] - inter)\n idxs = np.delete(idxs, np.concatenate(([last],np.where(overlap > overlap_threshold)[0])))\n return pick\n\n def im_detect(self, input_image):\n \"\"\"\n wrapper for detecting multiple images\n\n Returns:\n ----------\n list of detection results in format [det0, det1...], det is in\n format np.array([id, score, xmin, ymin, xmax, ymax]...)\n \"\"\"\n start = time.clock()\n im_data = self.make_input(input_image)\n print(\"make inputs costs: %dms\" % millisecond(time.clock()-start))\n\n start = time.clock()\n self.args[\"data\"] = mx.nd.array(im_data, self.ctx)\n exe = self.load_symbol.bind(self.ctx, self.args, args_grad=None,\n grad_req=\"null\", aux_states=self.auxs)\n print(\"bind data costs: %dms\" % millisecond(time.clock()-start))\n\n start = time.clock()\n exe.forward()\n total_dets = exe.outputs[0][0]\n # https://github.com/apache/incubator-mxnet/issues/6974\n total_dets.wait_to_read()\n print(\"network forward costs: %dms\" % millisecond(time.clock()-start))\n\n start = time.clock()\n total_dets_np = total_dets.asnumpy()\n selected_dets = total_dets_np[total_dets_np[:, 0] == 1]\n selected_dets = selected_dets[selected_dets[:, 1] >= self.threshold]\n picked_ids = self.nms(selected_dets, overlap_threshold=0.5)\n self.dets = selected_dets[picked_ids]\n print(\"results post-processing costs: %dms\" % millisecond(time.clock()-start))\n\n return self.dets\n\n def save_results(self, input_img, frame_num=0, save_path=\"./\", color='red'):\n\n if len(self.dets) == 0:\n return\n\n height, width, _ = input_img.shape\n colors = dict()\n\n for det in self.dets:\n cls_id, score, box = int(det[0]), det[1], det[2:]\n if cls_id not in colors:\n colors[cls_id] = (int(random.random()*255), int(random.random()*255), int(random.random()*255))\n left, top = int(box[0] * width), int(box[1] * height)\n right, bottom = int(box[2] * width), int(box[3] * height)\n cv2.rectangle(input_img, (left, top), (right, bottom), colors[cls_id], 1)\n cv2.putText(input_img, '%d:%.3f'%(cls_id,score), (left, top+30), cv2.FONT_HERSHEY_SIMPLEX, 1, colors[cls_id], 1)\n\n det_img_path = Path(save_path, \"frame_det_%d.png\" % (frame_num))\n if not det_img_path.parent.exists():\n det_img_path.parent.mkdir()\n cv2.imwrite(det_img_path.as_posix(), input_img)\n print(\"save results at %s\" % det_img_path)\n\ndef main(*args, **kwargs):\n\n video_path = args[0]\n # video_path = '../../data/videos/ch01_20180508113155.mp4'\n assert Path(video_path).exists(), \"%s not exists\" % video_path\n\n epoch_num = 128\n threshold = 0.65\n data_shape = 300\n ctx = mx.gpu(0)\n # model_prefix = '/app/model/deploy_ssd-densenet-tiny-ebike-detection'\n # model_prefix = '/app/model/deploy_ssd-densenet-two-bikes'\n model_prefix = '/app/model/deploy_deploy_ssd-densenet-tiny-ebike-detection-nms'\n ped_detector = Detector(symbol=None, model_prefix=model_prefix, epoch=epoch_num, threshold=threshold, data_shape=data_shape, ctx=ctx)\n\n cap = cv2.VideoCapture(video_path)\n frame_num = 0\n while(cap.isOpened()):\n ret, frame = cap.read()\n if frame is None:\n break\n frame_num += 1\n if frame_num % 30 == 0:\n # img = cv2.imread(self.img_path)\n start = time.clock()\n ped_detector.im_detect(frame)\n print(\"total time used: %.4fs\" % (time.clock()-start))\n ped_detector.save_results(frame, frame_num, 'noon-video4-test3')\n cap.release()\n\nif __name__ == '__main__':\n print(\"load video from %s\" % sys.argv[1])\n main(sys.argv[1])\n" ]
[ [ "numpy.minimum", "numpy.where", "numpy.swapaxes", "numpy.argsort", "numpy.maximum" ] ]
MhmudAlpurd/IC-pytorchlite
[ "76ba0e04a423acbfe960dcd8dd9a0bc47c3893e7" ]
[ "ASLRecognition/scripts/test.py" ]
[ "'''\nUSAGE:\npython test.py --img A_test.jpg\n'''\nimport torch\nimport joblib\nimport torch.nn as nn\nimport numpy as np\nimport cv2\nimport argparse\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\nimport time\nimport cnn_models\nfrom PIL import Image\n\n# construct the argument parser and parse the arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('-i', '--img', default='../app/src/main/assets/C1.jpg', type=str,\n help='path for the image to test on')\nargs = vars(parser.parse_args())\n\naug = transforms.Compose([\n transforms.Resize((224, 224)),\n])\n\n# load label binarizer\nlb = joblib.load('lb.pkl')\nmodel = cnn_models.CustomCNN()\nmodel.load_state_dict(torch.load('asl.pth'))\nprint(model)\nprint('Model loaded')\n\nimage = Image.open(f\"{args['img']}\")\nimage = aug(image)\nimage = np.transpose(image, (2, 0, 1)).astype(np.float32)\nimage = torch.tensor(image, dtype=torch.float)\nimage = image.unsqueeze(0)\nprint(image.shape)\n\nstart = time.time()\noutputs = model(image)\n_, preds = torch.max(outputs.data, 1)\nprint('PREDS', preds)\nprint(f\"Predicted output: {lb.classes_[preds]}\")\nend = time.time()\nprint(f\"{(end-start):.3f} seconds\")\n" ]
[ [ "numpy.transpose", "torch.tensor", "torch.load", "torch.max" ] ]
yagamimisa/dfc2019
[ "b823b6b1ac9215f7477ea38b5bf39919c7e1c02c" ]
[ "track2/make_track2_npz.py" ]
[ "# convert folders of images to npz train/validation sets\r\n# for training DenseMapNet and ICNet models\r\nfrom os.path import join\r\nfrom pathlib import Path\r\nfrom sys import stderr\r\n\r\nimport numpy as np\r\nimport os\r\nfrom copy import deepcopy\r\nfrom tqdm import tqdm\r\nimport tifffile\r\nimport glob\r\n\r\nTRAIN_FRACTION = 0.95\r\nMAX_IMAGES_PER_TRAIN_FILE = 200\r\n\r\n\r\ndef sequential_to_las_labels(seq_labels):\r\n labels = deepcopy(seq_labels)\r\n labels[:] = 65\r\n labels[seq_labels == 0] = 2 # ground\r\n labels[seq_labels == 1] = 5 # trees\r\n labels[seq_labels == 2] = 6 # building roof\r\n labels[seq_labels == 3] = 9 # water\r\n labels[seq_labels == 4] = 17 # bridge / elevated road\r\n return labels\r\n\r\n\r\ndef las_to_sequential_labels(las_labels):\r\n labels = deepcopy(las_labels)\r\n labels[:] = 5 # unlabeled\r\n labels[las_labels == 2] = 0 # ground\r\n labels[las_labels == 5] = 1 # trees\r\n labels[las_labels == 6] = 2 # building roof\r\n labels[las_labels == 9] = 3 # water\r\n labels[las_labels == 17] = 4 # bridge / elevated road\r\n return labels\r\n\r\n\r\n# create npz files\r\ndef convert_files_to_npz(input_folder, out_folder, out_prefix):\r\n # get list of files\r\n files = glob.glob(input_folder + '*LEFT_RGB*.tif')\r\n num = len(files)\r\n print('Number of images = ', num)\r\n if num == 0:\r\n print(\"No matching files found\", file=stderr)\r\n return\r\n\r\n # determine size of train and test sets\r\n train_fraction = TRAIN_FRACTION\r\n num_train = int(train_fraction * num)\r\n max_per_train = MAX_IMAGES_PER_TRAIN_FILE\r\n\r\n print('Number of training images = ', num_train)\r\n print('Number of validation images = ', num - num_train)\r\n\r\n # initialize lists and counters\r\n count = 0\r\n num_files = 0\r\n disparities = []\r\n lefts = []\r\n rights = []\r\n left_categories = []\r\n left_agls = []\r\n\r\n # Shuffle the file list\r\n indices = np.arange(num)\r\n np.random.seed(0)\r\n np.random.shuffle(indices)\r\n files = [files[i] for i in indices]\r\n\r\n # loop on all files\r\n for i in tqdm(range(num)):\r\n\r\n # get file names\r\n left_name = os.path.basename(files[i])\r\n start = left_name.find('LEFT_RGB')\r\n right_name = input_folder + left_name[0:start] + 'RIGHT_RGB.tif'\r\n left_agl_name = input_folder + left_name[0:start] + 'LEFT_AGL.tif'\r\n disparity_name = input_folder + left_name[0:start] + 'LEFT_DSP.tif'\r\n left_cls_name = input_folder + left_name[0:start] + 'LEFT_CLS.tif'\r\n left_name = input_folder + left_name\r\n\r\n # read files\r\n left = np.array(tifffile.imread(left_name))\r\n right = np.array(tifffile.imread(right_name))\r\n left_cls = np.array(tifffile.imread(left_cls_name))\r\n disparity = np.array(tifffile.imread(disparity_name))\r\n left_agl = np.array(tifffile.imread(left_agl_name))\r\n\r\n # convert LAS labels to sequential labeling scheme for training \r\n left_labels = las_to_sequential_labels(left_cls)\r\n\r\n # add images to lists after confirming that all corresponding files exist\r\n lefts.append(left)\r\n rights.append(right)\r\n disparities.append(disparity)\r\n left_categories.append(left_labels)\r\n left_agls.append(left_agl)\r\n\r\n # update the image counter\r\n count = count + 1\r\n\r\n # when counter gets too high, save new files\r\n if ((count >= max_per_train) and (i < num_train)) or (i == num_train - 1):\r\n\r\n # update the file counter\r\n num_files = num_files + 1\r\n\r\n # print counts for categories\r\n print(' ')\r\n print('Counts for train file ', num_files)\r\n cats = np.asarray(left_categories)\r\n max_category = cats.max()\r\n for j in range(max_category):\r\n print(j, ': ', len(cats[cats == j]))\r\n print('Writing files...')\r\n print(' ')\r\n\r\n # write the next training files if needed\r\n out_path = Path(out_folder)\r\n if not out_path.exists():\r\n out_path.mkdir()\r\n\r\n disparity_name = join(out_path, out_prefix + '.train.disparity.' + '{:1d}'.format(num_files) + '.npz')\r\n left_name = join(out_path, out_prefix + '.train.left.' + '{:1d}'.format(num_files) + '.npz')\r\n right_name = join(out_path, out_prefix + '.train.right.' + '{:1d}'.format(num_files) + '.npz')\r\n left_cat_name = join(out_path, out_prefix + '.train.left_label.' + '{:1d}'.format(num_files) + '.npz')\r\n left_agl_name = join(out_path, out_prefix + '.train.left_agl.' + '{:1d}'.format(num_files) + '.npz')\r\n np.savez_compressed(disparity_name, disparities)\r\n np.savez_compressed(left_name, lefts)\r\n np.savez_compressed(right_name, rights)\r\n np.savez_compressed(left_cat_name, left_categories)\r\n np.savez_compressed(left_agl_name, left_agls)\r\n\r\n # reset counter and all lists\r\n count = 0\r\n disparities = []\r\n lefts = []\r\n rights = []\r\n left_categories = []\r\n left_agls = []\r\n\r\n # print counts for categories\r\n print(' ')\r\n print('Counts for validation file')\r\n cats = np.asarray(left_categories)\r\n max_category = cats.max()\r\n for j in range(max_category):\r\n print(j, ': ', len(cats[cats == j]))\r\n print('Writing files...')\r\n print(' ')\r\n\r\n out_path = Path(out_folder)\r\n if not out_path.exists():\r\n out_path.mkdir()\r\n\r\n # write the validation set\r\n print('Writing validation files')\r\n print('Number of validation samples = ', len(disparities))\r\n disparity_name = join(out_path, out_prefix + '.test.disparity.npz')\r\n left_name = join(out_path, out_prefix + '.test.left.npz')\r\n right_name = join(out_path, out_prefix + '.test.right.npz')\r\n left_cat_name = join(out_path, out_prefix + '.test.left_label.npz')\r\n left_agl_name = join(out_path, out_prefix + '.test.left_agl.npz')\r\n np.savez_compressed(disparity_name, disparities)\r\n np.savez_compressed(left_name, lefts)\r\n np.savez_compressed(right_name, rights)\r\n np.savez_compressed(left_cat_name, left_categories)\r\n np.savez_compressed(left_agl_name, left_agls)\r\n\r\n\r\nif __name__ == '__main__':\r\n in_folder = '/data/projects/dataFusion/Track2/Train-Track2-RGB/'\r\n output_folder = '/data/projects/dataFusion/Track2/track2_npz'\r\n output_prefix = 'dfc2019.track2'\r\n convert_files_to_npz(in_folder, output_folder, output_prefix)\r\n" ]
[ [ "numpy.asarray", "numpy.random.seed", "numpy.random.shuffle", "numpy.savez_compressed", "numpy.arange" ] ]
Eugenio2192/autumnopen
[ "9001304d711dc94070992897ad1cfb4eae8c5e36" ]
[ "src/homogenization/cost_of_carbon_capture_cement.py" ]
[ "from src.tools.config_loader import Configuration\nfrom src.technoeconomical.cost_operations_functions import cost_of_carbon_capture\nfrom src.harmonization.cost_transformation_functions import convert_value, index_generator\nimport pandas as pd\nconfig = Configuration.get_instance()\npd.set_option('display.max_columns', None)\n#idx_map = index_generator()\nio = config[\"IO\"]\n\nCLINKER_EMISSION_FACTOR = 0.5 # kgCO2 / kgClinker\nCLINKER_CEMENT_RATIO = 0.87 # kgClinker / kgCement\n\nCAPTURE_EFF = config[\"InputHomogenization\"][\"CostOfCCConfig\"][\"DefaultValues\"][\"CaptureEfficiency\"] # % of CO2 emitted\n\n#COST_OF_CLINKER_REF = convert_value(62.6, idx_map, 2014, 2019, \"CEPCI\") # € / t Clinker 2014\n#COST_OF_CLINKER_MEA = convert_value(107.4, idx_map, 2014, 2019, \"CEPCI\") # -\n#COST_OF_CLINKER_OXY = convert_value(93.0, idx_map, 2014, 2019, \"CEPCI\") # -\n\nSPECIFIC_POWER_REF = 15.88 # MW / MtCLinker\nSPECIFIC_POWER_CAP = 29.5 # MW /MtClinker\n\nCF = 1\n\nELEC_EMISSION_FACTOR = 0.85 # kg / kWh\n\n\ndef calculate_clinker(annual_production, clinker_cement_ratio):\n \"\"\"\n Calculate annual clinker production based on given cement/clinker ratio\n :param annual_production: Reported annual production\n :param clinker_cement_ratio: Amount of clinker produced per cement output\n :return: Clinker per year\n \"\"\"\n return annual_production * clinker_cement_ratio\n\n\ndef calculate_clinker_emissions(clinker_production, emission_factor):\n \"\"\"\n Calculate emissions per year based on clinker input\n :param clinker_production: Yearly clinker production\n :param clinker_cement_ratio: Amount of clinker produced per cement output\n :return: Clinker per year\n \"\"\"\n return clinker_production * emission_factor\n\n\ndef calculate_primary_energy_emissions(specific_power, electric_emission_factor):\n \"\"\"\n Calculate emissions related to primary energy generation\n :param specific_power: Power needed for the production of a Mt of clinker\n :param electric_emission_factor: Emission factor of the energy production\n :return: emissions per year related to energy productoin\n \"\"\"\n energy = specific_power * CF * 8600 # KWh / y\n emissions = electric_emission_factor * energy / 1000000 # tCo2 / tclinker\n return emissions\n\n\ndef calculate_total_emisisons(clinker_emissions, power_emissions, capture_ratio=0.0):\n \"\"\"\n Calculate the added emissions of the cement production unit\n :param clinker_emissions: emissions related to the clinker production\n :param power_emissions: emissions related to the energy production\n :param capture_ratio: Emissions captured through CCS, 0 when no CCS is not used\n :return: Array of emitted and captured amounts\n \"\"\"\n emissions = (clinker_emissions + power_emissions) * (1 - capture_ratio)\n captured = (clinker_emissions + power_emissions) * capture_ratio\n return emissions, captured\n\n\ndef cost_of_carbon_capture_cement():\n \"\"\"\n Main function for the calculation of cost of carbon capture from cement production\n :return: dataframe with the cost and amounts\n \"\"\"\n E_FUEL_REF = calculate_primary_energy_emissions(SPECIFIC_POWER_REF, ELEC_EMISSION_FACTOR)\n E_FUEL_CAP = calculate_primary_energy_emissions(SPECIFIC_POWER_CAP, ELEC_EMISSION_FACTOR)\n\n df = pd.read_csv(io[\"CEMENT_INPUT_PATH\"], **config[\"InputConfig\"][\"iron\"])\n data = df[\"Production\"].values\n clinker = (calculate_clinker(val, CLINKER_CEMENT_RATIO) for val in data)\n e_clinker = (calculate_clinker_emissions(val, CLINKER_CEMENT_RATIO) for val in clinker)\n cap_emm_pairs = (calculate_total_emisisons(val, E_FUEL_CAP, CAPTURE_EFF) for val in e_clinker)\n captured = [x[1] for x in cap_emm_pairs]\n cost = [cost_of_carbon_capture(COST_OF_CLINKER_REF, COST_OF_CLINKER_OXY, val) for val in captured]\n\n df_out = df.copy()\n df_out[\"AmountCapturedMtY\"] = captured\n df_out[\"CostOfCarbonCaptureEURtonCO2\"] = cost\n df_out[\"Source\"] = \"Cement\"\n df_out[\"Year\"] = 2019\n\n return df_out\n\n\ndef create():\n \"\"\"\n Support function for the production of the dataset as a csv file\n \"\"\"\n df = cost_of_carbon_capture_cement()\n df.to_csv(io[\"cement_output_path\"], index=True)\n\n\nif __name__ == \"__main__\":\n create()\n" ]
[ [ "pandas.read_csv", "pandas.set_option" ] ]
xlegend1024/onnxruntime-iot-edge
[ "9fc76b7dabf70ad4144e6f1d567689a2965adb30" ]
[ "modules/InferenceModule/inference.py" ]
[ "# Copyright (c) Microsoft. All rights reserved.\n# Licensed under the MIT license. See LICENSE file in the project root for\n# full license information.\n\nimport numpy as np\nimport time\n\ndef run_onnx(frame, location, timestamp, sess):\n\t\"\"\"\n\tDetect objects in frame of your camera, and returns results.\n\tUses TinyYolo from the onnxmodel zoo. Feel free to use your own model or choose another from https://github.com/onnx/models.\n\t\"\"\"\n\tinput_name = sess.get_inputs()[0].name\n\n\tdef softmax(x):\n\t\treturn np.exp(x) / np.sum(np.exp(x), axis=0)\n\n\tdef sigmoid(x):\n\t\treturn 1/(1+np.exp(-x))\n\n\tstart_time = time.time()\n\tpred = sess.run(None, {input_name: frame})\n\tpred = np.array(pred[0][0])\n\tprint(\"INFERENCE TIME (PURE ONNXRUNTIME)\", (time.time()-start_time)*1000,\"ms\")\n\t\n\t# YOLO \n\tlabels_file = open(\"labels.txt\")\n\tlabels = labels_file.read().split(\",\")\n\t# YOLO \n\t\n\t# # # YOLOv3\n\t# labels_file = open(\"coco_classes.txt\")\n\t# labels = labels_file.read().split('\\n')\n\t# # # YOLOv3\n\toutputstring = \"\" #FOR SENDING RESPONSE\n\toutput = [timestamp, location] #FOR SAVING IN CSV\n\n\tpost_start_time = time.time()\n\t\n\ttiny_yolo_cell_width = 13\n\ttiny_yolo_cell_height = 13\n\tnum_boxes = 5\n\t# YOLO\n\ttiny_yolo_classes = 20\n\t# # YOLOv3\n\t# tiny_yolo_classes = 80\n\n\tCONFIDENCE_THRESHOLD = 0.50\n\n\t# Goes through each of the 'cells' in tiny_yolo. Each cell is responsible for detecting 5 objects\n\tfor bx in range (0, tiny_yolo_cell_width):\n\t\tfor by in range (0, tiny_yolo_cell_height):\n\t\t\t# Iterate through each 'object'\n\t\t\tfor bound in range (0, num_boxes):\n\t\t\t\t# extract x, y, width, height, and confidence\n\t\t\t\tchannel = bound*25\n\t\t\t\ttx = pred[channel][by][bx]\n\t\t\t\tty = pred[channel+1][by][bx]\n\t\t\t\ttw = pred[channel+2][by][bx]\n\t\t\t\tth = pred[channel+3][by][bx]\n\t\t\t\ttc = pred[channel+4][by][bx]\n\n\t\t\t\t# apply sigmoid function to get real x/y coordinates, shift by cell position (COMMENTED OUT TO SAVE TIME)\n\t\t\t\t#x = (float(bx) + sigmoid(tx))*32\n\t\t\t\t#y = (float(by) + sigmoid(ty))*32\n\t\t\t\t\n\t\t\t\t#Apply sigmoid to get confidence on a scale from 0 to 1\n\t\t\t\tconfidence = sigmoid(tc)\n\t\t\t\t#Iterate through 20 classes and apply softmax to see which one has the highest confidence, which would be the class of the object\n\t\t\t\tclass_out = pred[channel+5:channel+5+tiny_yolo_classes][bx][by]\n\t\t\t\tclass_out = softmax(np.array(class_out))\n\t\t\t\tclass_detected = np.argmax(class_out)\n\t\t\t\tdisplay_confidence = class_out[class_detected]*confidence\n\t\t\t\tif display_confidence > CONFIDENCE_THRESHOLD:\n\t\t\t\t\toutputstring += \" \"+ labels[class_detected] + \" confidence \" + str(display_confidence)\n\t\t\t\t\toutput.append([labels[class_detected], display_confidence])\n\toutputstring = location + \" Results @\"+ timestamp + \" \" + outputstring\n\tprint(\"POST PROCESSING TIME\", (time.time() - post_start_time)*1000,\"ms\")\n\tprint(\"TOTAL INFERENCE TIME\", (time.time() - start_time)*1000,\"ms\")\n\treturn output, outputstring\n" ]
[ [ "numpy.array", "numpy.argmax", "numpy.exp" ] ]
Virtsionis/torch-nilm
[ "3df0d37ebc90e0429545c83effee93d346ef5a83", "3df0d37ebc90e0429545c83effee93d346ef5a83" ]
[ "neural_networks/custom_modules.py", "lab/training_tools.py" ]
[ "import warnings\nimport torch.nn as nn\n\n\nclass LinearDropRelu(nn.Module):\n def __init__(self, in_features, out_features, dropout=0):\n super(LinearDropRelu, self).__init__()\n self.linear = nn.Sequential(\n nn.Linear(in_features, out_features),\n nn.Dropout(dropout),\n nn.ReLU(inplace=True),\n )\n\n def forward(self, x):\n return self.linear(x)\n\n\nclass ConvDropRelu(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, dropout=0, groups=1, relu=True):\n super(ConvDropRelu, self).__init__()\n\n left, right = kernel_size // 2, kernel_size // 2\n if kernel_size % 2 == 0:\n right -= 1\n padding = (left, right, 0, 0)\n\n if relu:\n self.conv = nn.Sequential(\n nn.ZeroPad2d(padding),\n nn.Conv1d(in_channels, out_channels, kernel_size, groups=groups),\n nn.Dropout(dropout),\n nn.ReLU(inplace=True),\n )\n else:\n self.conv = nn.Sequential(\n nn.ZeroPad2d(padding),\n nn.Conv1d(in_channels, out_channels, kernel_size, groups=groups),\n nn.Dropout(dropout),\n )\n\n def forward(self, x):\n return self.conv(x)\n\n\nclass ConvBatchRelu(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, groups=1, relu=True, batch_norm=True):\n super(ConvBatchRelu, self).__init__()\n\n left, right = kernel_size // 2, kernel_size // 2\n if kernel_size % 2 == 0:\n right -= 1\n padding = (left, right, 0, 0)\n\n modules = [nn.ZeroPad2d(padding),\n nn.Conv1d(in_channels, out_channels, kernel_size, groups=groups)]\n if batch_norm:\n modules.append(nn.BatchNorm1d(out_channels))\n if relu:\n modules.append(nn.ReLU(inplace=True))\n self.conv = nn.Sequential(*modules)\n\n def forward(self, x):\n return self.conv(x)\n\n\nclass IBNNet(nn.Module):\n def __init__(self, input_channels, output_dim=64, kernel_size=3, inst_norm=True, residual=True, max_pool=True):\n \"\"\"\n Inputs:\n input_channels - Dimensionality of the input (seq_len or window_size)\n output_dim - Dimensionality of the output\n \"\"\"\n super().__init__()\n self.residual = residual\n self.max_pool = max_pool\n\n self.ibn = nn.Sequential(\n ConvBatchRelu(kernel_size=kernel_size, in_channels=input_channels, out_channels=64,\n relu=True, batch_norm=True),\n ConvBatchRelu(kernel_size=kernel_size, in_channels=64, out_channels=64,\n relu=True, batch_norm=True),\n ConvBatchRelu(kernel_size=kernel_size, in_channels=64, out_channels=256,\n relu=False, batch_norm=True),\n )\n modules = []\n if inst_norm:\n modules.append(nn.InstanceNorm1d(output_dim))\n modules.append(nn.ReLU(inplace=True))\n self.out_layer = nn.Sequential(*modules)\n\n if self.max_pool:\n self.pool = nn.MaxPool1d(2)\n\n def forward(self, x):\n x = x\n ibn_out = self.ibn(x)\n if self.residual:\n x = x + ibn_out\n else:\n x = ibn_out\n\n out = self.out_layer(x)\n if self.max_pool:\n pool_out = self.pool(out)\n return out, pool_out\n else:\n return out, None\n\n\nclass VIBDecoder(nn.Module):\n def __init__(self, k, drop=0, output_dim=1):\n super().__init__()\n self.conv = ConvDropRelu(1, 3, kernel_size=5, dropout=drop)\n self.flatten = nn.Flatten()\n self.feedforward = nn.Sequential(\n LinearDropRelu(k * 3, 2 * k, drop),\n LinearDropRelu(2 * k, k, drop),\n nn.Linear(k, output_dim)\n )\n\n def forward(self, x):\n encoding = x.unsqueeze(1)\n decoding = self.conv(encoding).squeeze()\n decoding = self.flatten(decoding)\n return self.feedforward(decoding)\n", "import math\nimport torch\nimport numpy as np\nimport torch.nn as nn\nfrom torch import Tensor\nfrom typing import Dict, Tuple\nimport pytorch_lightning as pl\nimport torch.nn.functional as F\nfrom constants.constants import *\nfrom utils.nilm_metrics import NILMmetrics\nfrom neural_networks.base_models import BaseModel\nfrom utils.helpers import denormalize, destandardize\nfrom constants.appliance_thresholds import ON_THRESHOLDS\nfrom constants.enumerates import ElectricalAppliances\nfrom lab.active_models import *\n\n# Setting the seed\n# pl.seed_everything(42)\n\n# Ensure that all operations are deterministic on GPU (if used) for reproducibility\ntorch.backends.cudnn.determinstic = True\n# torch.backends.cudnn.benchmark = False\n\ndevice = torch.device(\"cuda:0\") if torch.cuda.is_available() else torch.device(\"cpu\")\nprint(\"Device:\", device)\n\n\n\nVAL_ACC = \"val_acc\"\nVAL_LOSS = 'val_loss'\n\n\ndef create_model(model_name, model_hparams):\n model_dict = ACTIVE_MODELS\n if model_name in model_dict:\n return model_dict[model_name](**model_hparams)\n else:\n assert False, \"Unknown model name \\\"%s\\\". Available models are: %s\" % (model_name, str(model_dict.keys()))\n\n\nclass TrainingToolsFactory:\n\n @staticmethod\n def build_and_equip_model(model_name, model_hparams, eval_params):\n model: BaseModel = create_model(model_name, model_hparams)\n return TrainingToolsFactory.equip_model(model, model_hparams, eval_params)\n\n @staticmethod\n def equip_model(model, model_hparams, eval_params):\n if model.supports_vib():\n return VIBTrainingTools(model, model_hparams, eval_params)\n elif model.supports_bayes():\n return BayesTrainingTools(model, model_hparams, eval_params)\n elif model.supports_bert():\n return BertTrainingTools(model, model_hparams, eval_params)\n else:\n return ClassicTrainingTools(model, model_hparams, eval_params)\n\n\nclass ClassicTrainingTools(pl.LightningModule):\n\n def __init__(self, model: BaseModel, model_hparams, eval_params, learning_rate=0.001):\n \"\"\"\n Inputs:\n model_name - Name of the model to run. Used for creating the model (see function below)\n model_hparams - Hyperparameters for the model, as dictionary.\n \"\"\"\n super().__init__()\n # Exports the hyperparameters to a YAML file, and create \"self.hparams\" namespace\n self.save_hyperparameters()\n # Create model\n self.model = model\n\n self.eval_params = eval_params\n self.model_name = self.model.architecture_name\n\n self.final_preds = np.array([])\n self.results = {}\n\n def forward(self, x):\n # Forward function that is run when visualizing the graph\n return self.model(x)\n\n def configure_optimizers(self):\n # print(f\"learning rate {self.model.lr}\")\n # print(f\"learning rate {self.lr}\")\n # print(f\"model params {[p for p in self.model.parameters()]}\")\n # print(f\"params {[p for p in self.parameters()]}\")\n return torch.optim.Adam(self.parameters())\n # return torch.optim.SGD(self.parameters(), lr=0.001)\n\n def training_step(self, batch, batch_idx):\n # x must be in shape [batch_size, 1, window_size]\n x, y = batch\n # Forward pass\n outputs = self(x)\n loss = F.mse_loss(outputs.squeeze(), y)\n\n tensorboard_logs = {'train_loss': loss}\n return {'loss': loss, 'log': tensorboard_logs}\n\n def validation_step(self, val_batch: Tensor, batch_idx: int) -> Dict:\n loss, mae = self._forward_step(val_batch)\n # self.log(\"loss\", loss, prog_bar=True)\n self.log(VAL_LOSS, mae, prog_bar=True)\n return {\"vloss\": loss, \"val_loss\": mae}\n\n @staticmethod\n def calculate_loss(logits, labels):\n return F.mse_loss(logits, labels)\n\n def _forward_step(self, batch: Tensor) -> Tuple[Tensor, Tensor]:\n inputs, labels = batch\n outputs = self.forward(inputs).squeeze()\n loss = self.calculate_loss(outputs, labels)\n mae = F.l1_loss(outputs, labels)\n return loss, mae\n\n def train_epoch_end(self, outputs):\n # outputs is a list of whatever you returned in `training_step`\n train_loss = torch.stack([x['loss'] for x in outputs]).mean()\n self.log(\"loss\", train_loss)\n\n def test_step(self, batch, batch_idx):\n x, y = batch\n # Forward pass\n outputs = self(x)\n loss = F.mse_loss(outputs.squeeze(), y.squeeze())\n preds_batch = outputs.squeeze().cpu().numpy()\n self.final_preds = np.append(self.final_preds, preds_batch)\n return {'test_loss': loss}\n\n def test_epoch_end(self, outputs):\n # outputs is a list of whatever you returned in `test_step`\n avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()\n if self.model_name == 'DAE':\n self.final_preds = np.reshape(self.final_preds, (-1))\n res = self._metrics()\n print('#### model name: {} ####'.format(res[COLUMN_MODEL]))\n print('metrics: {}'.format(res[COLUMN_METRICS]))\n self.log(\"test_test_avg_loss\", avg_loss)\n return res\n\n def _metrics(self):\n dev, mmax, groundtruth = self.eval_params[COLUMN_DEVICE], \\\n self.eval_params[COLUMN_MMAX], \\\n self.eval_params[COLUMN_GROUNDTRUTH]\n\n means = self.eval_params[COLUMN_MEANS]\n stds = self.eval_params[COLUMN_STDS]\n\n if mmax:\n preds = denormalize(self.final_preds, mmax)\n ground = denormalize(groundtruth, mmax)\n elif means and stds:\n preds = destandardize(self.final_preds, means, stds)\n ground = destandardize(groundtruth, means, stds)\n\n res = NILMmetrics(pred=preds,\n ground=ground,\n threshold=ON_THRESHOLDS.get(ElectricalAppliances(dev), 50)\n )\n\n results = {COLUMN_MODEL: self.model_name,\n COLUMN_METRICS: res,\n COLUMN_PREDICTIONS: preds,\n COLUMN_GROUNDTRUTH: ground, }\n self.set_res(results)\n self.final_preds = np.array([])\n return results\n\n def set_ground(self, ground):\n self.eval_params[COLUMN_GROUNDTRUTH] = ground\n\n def set_res(self, res):\n self.reset_res()\n self.results = res\n\n def reset_res(self):\n self.results = {}\n\n def get_res(self):\n return self.results\n\n\nclass VIBTrainingTools(ClassicTrainingTools):\n def __init__(self, model, model_hparams, eval_params, beta=1e-3):\n \"\"\"\n Inputs:\n model_name - Name of the model to run. Used for creating the model (see function below)\n model_hparams - Hyperparameters for the model, as dictionary.\n \"\"\"\n super().__init__(model, model_hparams, eval_params)\n if 'beta' in model_hparams.keys():\n self.beta = model_hparams['beta']\n else:\n self.beta = beta\n\n def forward(self, x):\n # Forward function that is run when visualizing the graph\n return self.model(x, self.current_epoch)\n\n def training_step(self, batch, batch_idx):\n # x must be in shape [batch_size, 1, window_size]\n x, y = batch\n # Forward pass\n (mu, std), logit = self(x)\n class_loss = F.mse_loss(logit.squeeze(), y.squeeze()).div(math.log(2))\n\n info_loss = -0.5 * (1 + 2 * std.log() - mu.pow(2) - std.pow(2)).sum(1).mean().div(math.log(2))\n total_loss = class_loss + self.beta * info_loss\n\n tensorboard_logs = {'train_loss': total_loss}\n return {'loss': total_loss, 'log': tensorboard_logs}\n\n def test_step(self, batch, batch_idx):\n x, y = batch\n # Forward pass\n (mu, std), outputs = self(x)\n\n loss = F.mse_loss(outputs.squeeze(), y.squeeze())\n preds_batch = outputs.squeeze().cpu().numpy()\n self.final_preds = np.append(self.final_preds, preds_batch)\n return {'test_loss': loss}\n\n def _forward_step(self, batch: Tensor) -> Tuple[Tensor, Tensor]:\n inputs, labels = batch\n (mu, std), outputs = self.forward(inputs)\n loss = self.calculate_loss(outputs.squeeze(), labels)\n mae = F.l1_loss(outputs, labels)\n\n return loss, mae\n\n def test_epoch_end(self, outputs):\n # outputs is a list of whatever you returned in `test_step`\n avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()\n res = self._metrics()\n print('#### model name: {} ####'.format(res['model']))\n print('metrics: {}'.format(res['metrics']))\n\n self.log(\"test_test_avg_loss\", avg_loss)\n return res\n\n\nclass BayesTrainingTools(ClassicTrainingTools):\n def __init__(self, model, model_hparams, eval_params, sample_nbr=3):\n \"\"\"\n Inputs:\n model_name - Name of the model to run. Used for creating the model (see function below)\n model_hparams - Hyperparameters for the model, as dictionary.\n \"\"\"\n super().__init__(model, model_hparams, eval_params)\n print('BAYES TRAINING')\n self.criterion = torch.nn.MSELoss() # F.mse_loss()\n self.sample_nbr = sample_nbr\n\n def training_step(self, batch, batch_idx):\n # x must be in shape [batch_size, 1, window_size]\n x, y = batch\n # Forward pass\n outputs = self(x)\n # fit_loss = F.mse_loss(outputs.squeeze(1), y)\n # complexity_loss = self.model.nn_kl_divergence()\n # loss = fit_loss + complexity_loss\n\n loss = self.model.sample_elbo(inputs=x,\n labels=y,\n criterion=self.criterion,\n sample_nbr=self.sample_nbr,\n complexity_cost_weight=1. / x.shape[0])\n\n tensorboard_logs = {'train_loss': loss}\n return {'loss': loss, 'log': tensorboard_logs}\n\n\nclass BertTrainingTools(ClassicTrainingTools):\n def __init__(self, model, model_hparams, eval_params):\n \"\"\"\n Inputs:\n model_name - Name of the model to run. Used for creating the model (see function below)\n model_hparams - Hyperparameters for the model, as dictionary.\n \"\"\"\n super().__init__(model, model_hparams, eval_params)\n print('BERT4NILM')\n self.kl = nn.KLDivLoss(reduction='batchmean')\n self.mse = nn.MSELoss()\n self.margin = nn.SoftMarginLoss()\n self.l1_on = nn.L1Loss(reduction='sum')\n self.temperature = 0.1\n self.dev = self.eval_params[COLUMN_DEVICE]\n self.C0 = torch.tensor(LAMBDA[self.dev])\n self.cutoff = torch.tensor(CUT_OFF[self.dev])\n self.threshold = torch.tensor(POWER_ON_THRESHOLD[self.dev])\n self.min_on = torch.tensor(MIN_ON_DUR[self.dev])\n self.min_off = torch.tensor(MIN_OFF_DUR[self.dev])\n\n def training_step(self, batch, batch_idx):\n total_loss = self._bert_loss(batch)\n tensorboard_logs = {'train_loss': total_loss}\n return {'loss': total_loss, 'log': tensorboard_logs}\n\n def test_step(self, batch, batch_idx):\n # Forward pass\n x, y = batch\n outputs = self(x)\n loss = self._bert_loss((outputs.squeeze(), y))\n preds_batch = outputs.squeeze().cpu().numpy()\n self.final_preds = np.append(self.final_preds, preds_batch)\n return {'test_loss': loss}\n\n def validation_step(self, val_batch: Tensor, batch_idx: int) -> Dict:\n loss, mae = self._forward_step(val_batch)\n self.log(VAL_LOSS, mae, prog_bar=True)\n return {\"vloss\": loss, \"val_loss\": mae}\n\n def _forward_step(self, batch: Tensor) -> Tuple[Tensor, Tensor]:\n inputs, labels = batch\n outputs = self.forward(inputs)\n loss = self.calculate_loss(outputs.squeeze(), labels)\n # loss = self._bert_loss((outputs.squeeze(), labels))\n mae = F.l1_loss(outputs, labels)\n return loss, mae\n\n def test_epoch_end(self, outputs):\n # outputs is a list of whatever you returned in `test_step`\n avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()\n res = self._metrics()\n print('#### model name: {} ####'.format(res['model']))\n print('metrics: {}'.format(res['metrics']))\n\n self.log(\"test_test_avg_loss\", avg_loss)\n return res\n\n def _bert_loss(self, batch):\n x, y = batch\n status = self._get_appliance_status(y)\n logits = self.model(x)\n labels = y / self.cutoff\n logits_energy = self.cutoff_energy(logits * self.cutoff)\n logits_status = self.compute_status(logits_energy)\n\n kl_loss = self.kl(torch.log(F.softmax(logits.squeeze() / self.temperature, dim=-1) + 1e-9),\n F.softmax(labels.squeeze() / self.temperature, dim=-1))\n mse_loss = self.mse(logits.contiguous().view(-1).double(),\n labels.contiguous().view(-1).double())\n margin_loss = self.margin((logits_status * 2 - 1).contiguous().view(-1).double(),\n (status * 2 - 1).contiguous().view(-1).double())\n # margin_loss = 0\n total_loss = kl_loss + mse_loss + margin_loss\n\n on_mask = ((status == 1) + (status != logits_status.reshape(status.shape))) >= 1\n if on_mask.sum() > 0:\n total_size = torch.tensor(on_mask.shape).prod()\n logits_on = torch.masked_select(logits.reshape(on_mask.shape), on_mask)\n labels_on = torch.masked_select(labels.reshape(on_mask.shape), on_mask)\n loss_l1_on = self.l1_on(logits_on.contiguous().view(-1),\n labels_on.contiguous().view(-1))\n total_loss += self.C0 * loss_l1_on / total_size\n return total_loss\n\n def cutoff_energy(self, data):\n columns = data.squeeze().shape[-1]\n if self.cutoff == 0:\n self.cutoff = torch.tensor(\n [3100 for i in range(columns)]).to(self.device)\n\n data[data < 5] = 0\n data = torch.min(data, self.cutoff.double())\n return data\n\n def _get_appliance_status(self, data):\n status = np.zeros(data.shape)\n if len(data.squeeze().shape) == 1:\n columns = 1\n else:\n columns = data.squeeze().shape[-1]\n\n if not self.threshold:\n self.threshold = [10 for i in range(columns)]\n if not self.min_on:\n self.min_on = [1 for i in range(columns)]\n if not self.min_off:\n self.min_off = [1 for i in range(columns)]\n\n initial_status = data >= self.threshold\n status_diff = np.diff(initial_status.cpu())\n events_idx = status_diff.nonzero()\n\n events_idx = np.array(events_idx).squeeze()\n events_idx += 1\n\n if all(initial_status[0]):\n events_idx = np.insert(events_idx, 0, 0)\n\n if all(initial_status[-1]):\n events_idx = np.insert(\n events_idx, events_idx.size, initial_status.size)\n\n events_idx = events_idx.reshape((-1, 2))\n on_events = events_idx[:, 0].copy()\n off_events = events_idx[:, 1].copy()\n assert len(on_events) == len(off_events)\n\n if len(on_events) > 0:\n off_duration = on_events[1:] - off_events[:-1]\n off_duration = np.insert(off_duration, 0, 1000)\n on_events = on_events[off_duration > self.min_off[i]]\n off_events = off_events[np.roll(\n off_duration, -1) > self.min_off[i]]\n\n on_duration = off_events - on_events\n on_events = on_events[on_duration >= self.min_on[i]]\n off_events = off_events[on_duration >= self.min_on[i]]\n assert len(on_events) == len(off_events)\n\n temp_status = data.clone()\n temp_status[:] = 0\n for on, off in zip(on_events, off_events):\n temp_status[on: off] = 1\n status = temp_status\n return status\n\n def compute_status(self, data):\n columns = data.squeeze().shape[-1]\n\n if self.threshold == 0:\n self.threshold = torch.tensor(\n [10 for i in range(columns)]).to(self.device)\n\n status = (data >= self.threshold) * 1\n return status\n" ]
[ [ "torch.nn.Linear", "torch.nn.InstanceNorm1d", "torch.nn.Dropout", "torch.nn.Conv1d", "torch.nn.Sequential", "torch.nn.ReLU", "torch.nn.BatchNorm1d", "torch.nn.ZeroPad2d", "torch.nn.MaxPool1d", "torch.nn.Flatten" ], [ "torch.device", "numpy.array", "torch.stack", "torch.nn.MSELoss", "numpy.zeros", "numpy.reshape", "torch.nn.functional.l1_loss", "numpy.roll", "torch.nn.L1Loss", "torch.nn.functional.mse_loss", "torch.cuda.is_available", "torch.tensor", "torch.nn.SoftMarginLoss", "torch.nn.KLDivLoss", "numpy.append", "numpy.insert" ] ]
DorAmram/pandas
[ "a2bbdb5a0abd131d0190fe58c0ba7cbf21b960c9", "4071dde86e33434e1bee8304fa62074949f813cc" ]
[ "pandas/tests/io/json/test_normalize.py", "pandas/tests/indexes/datetimes/test_datetime.py" ]
[ "import json\n\nimport numpy as np\nimport pytest\n\nfrom pandas import DataFrame, Index, json_normalize\nimport pandas.util.testing as tm\n\nfrom pandas.io.json._normalize import nested_to_record\n\n\[email protected]\ndef deep_nested():\n # deeply nested data\n return [\n {\n \"country\": \"USA\",\n \"states\": [\n {\n \"name\": \"California\",\n \"cities\": [\n {\"name\": \"San Francisco\", \"pop\": 12345},\n {\"name\": \"Los Angeles\", \"pop\": 12346},\n ],\n },\n {\n \"name\": \"Ohio\",\n \"cities\": [\n {\"name\": \"Columbus\", \"pop\": 1234},\n {\"name\": \"Cleveland\", \"pop\": 1236},\n ],\n },\n ],\n },\n {\n \"country\": \"Germany\",\n \"states\": [\n {\"name\": \"Bayern\", \"cities\": [{\"name\": \"Munich\", \"pop\": 12347}]},\n {\n \"name\": \"Nordrhein-Westfalen\",\n \"cities\": [\n {\"name\": \"Duesseldorf\", \"pop\": 1238},\n {\"name\": \"Koeln\", \"pop\": 1239},\n ],\n },\n ],\n },\n ]\n\n\[email protected]\ndef state_data():\n return [\n {\n \"counties\": [\n {\"name\": \"Dade\", \"population\": 12345},\n {\"name\": \"Broward\", \"population\": 40000},\n {\"name\": \"Palm Beach\", \"population\": 60000},\n ],\n \"info\": {\"governor\": \"Rick Scott\"},\n \"shortname\": \"FL\",\n \"state\": \"Florida\",\n },\n {\n \"counties\": [\n {\"name\": \"Summit\", \"population\": 1234},\n {\"name\": \"Cuyahoga\", \"population\": 1337},\n ],\n \"info\": {\"governor\": \"John Kasich\"},\n \"shortname\": \"OH\",\n \"state\": \"Ohio\",\n },\n ]\n\n\[email protected]\ndef author_missing_data():\n return [\n {\"info\": None},\n {\n \"info\": {\"created_at\": \"11/08/1993\", \"last_updated\": \"26/05/2012\"},\n \"author_name\": {\"first\": \"Jane\", \"last_name\": \"Doe\"},\n },\n ]\n\n\[email protected]\ndef missing_metadata():\n return [\n {\n \"name\": \"Alice\",\n \"addresses\": [\n {\n \"number\": 9562,\n \"street\": \"Morris St.\",\n \"city\": \"Massillon\",\n \"state\": \"OH\",\n \"zip\": 44646,\n }\n ],\n },\n {\n \"addresses\": [\n {\n \"number\": 8449,\n \"street\": \"Spring St.\",\n \"city\": \"Elizabethton\",\n \"state\": \"TN\",\n \"zip\": 37643,\n }\n ]\n },\n ]\n\n\[email protected]\ndef max_level_test_input_data():\n \"\"\"\n input data to test json_normalize with max_level param\n \"\"\"\n return [\n {\n \"CreatedBy\": {\"Name\": \"User001\"},\n \"Lookup\": {\n \"TextField\": \"Some text\",\n \"UserField\": {\"Id\": \"ID001\", \"Name\": \"Name001\"},\n },\n \"Image\": {\"a\": \"b\"},\n }\n ]\n\n\nclass TestJSONNormalize:\n def test_simple_records(self):\n recs = [\n {\"a\": 1, \"b\": 2, \"c\": 3},\n {\"a\": 4, \"b\": 5, \"c\": 6},\n {\"a\": 7, \"b\": 8, \"c\": 9},\n {\"a\": 10, \"b\": 11, \"c\": 12},\n ]\n\n result = json_normalize(recs)\n expected = DataFrame(recs)\n\n tm.assert_frame_equal(result, expected)\n\n def test_simple_normalize(self, state_data):\n result = json_normalize(state_data[0], \"counties\")\n expected = DataFrame(state_data[0][\"counties\"])\n tm.assert_frame_equal(result, expected)\n\n result = json_normalize(state_data, \"counties\")\n\n expected = []\n for rec in state_data:\n expected.extend(rec[\"counties\"])\n expected = DataFrame(expected)\n\n tm.assert_frame_equal(result, expected)\n\n result = json_normalize(state_data, \"counties\", meta=\"state\")\n expected[\"state\"] = np.array([\"Florida\", \"Ohio\"]).repeat([3, 2])\n\n tm.assert_frame_equal(result, expected)\n\n def test_empty_array(self):\n result = json_normalize([])\n expected = DataFrame()\n tm.assert_frame_equal(result, expected)\n\n def test_simple_normalize_with_separator(self, deep_nested):\n # GH 14883\n result = json_normalize({\"A\": {\"A\": 1, \"B\": 2}})\n expected = DataFrame([[1, 2]], columns=[\"A.A\", \"A.B\"])\n tm.assert_frame_equal(result.reindex_like(expected), expected)\n\n result = json_normalize({\"A\": {\"A\": 1, \"B\": 2}}, sep=\"_\")\n expected = DataFrame([[1, 2]], columns=[\"A_A\", \"A_B\"])\n tm.assert_frame_equal(result.reindex_like(expected), expected)\n\n result = json_normalize({\"A\": {\"A\": 1, \"B\": 2}}, sep=\"\\u03c3\")\n expected = DataFrame([[1, 2]], columns=[\"A\\u03c3A\", \"A\\u03c3B\"])\n tm.assert_frame_equal(result.reindex_like(expected), expected)\n\n result = json_normalize(\n deep_nested,\n [\"states\", \"cities\"],\n meta=[\"country\", [\"states\", \"name\"]],\n sep=\"_\",\n )\n expected = Index([\"name\", \"pop\", \"country\", \"states_name\"]).sort_values()\n assert result.columns.sort_values().equals(expected)\n\n def test_value_array_record_prefix(self):\n # GH 21536\n result = json_normalize({\"A\": [1, 2]}, \"A\", record_prefix=\"Prefix.\")\n expected = DataFrame([[1], [2]], columns=[\"Prefix.0\"])\n tm.assert_frame_equal(result, expected)\n\n def test_nested_object_record_path(self):\n # GH 22706\n data = {\n \"state\": \"Florida\",\n \"info\": {\n \"governor\": \"Rick Scott\",\n \"counties\": [\n {\"name\": \"Dade\", \"population\": 12345},\n {\"name\": \"Broward\", \"population\": 40000},\n {\"name\": \"Palm Beach\", \"population\": 60000},\n ],\n },\n }\n result = json_normalize(data, record_path=[\"info\", \"counties\"])\n expected = DataFrame(\n [[\"Dade\", 12345], [\"Broward\", 40000], [\"Palm Beach\", 60000]],\n columns=[\"name\", \"population\"],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_more_deeply_nested(self, deep_nested):\n\n result = json_normalize(\n deep_nested, [\"states\", \"cities\"], meta=[\"country\", [\"states\", \"name\"]]\n )\n ex_data = {\n \"country\": [\"USA\"] * 4 + [\"Germany\"] * 3,\n \"states.name\": [\n \"California\",\n \"California\",\n \"Ohio\",\n \"Ohio\",\n \"Bayern\",\n \"Nordrhein-Westfalen\",\n \"Nordrhein-Westfalen\",\n ],\n \"name\": [\n \"San Francisco\",\n \"Los Angeles\",\n \"Columbus\",\n \"Cleveland\",\n \"Munich\",\n \"Duesseldorf\",\n \"Koeln\",\n ],\n \"pop\": [12345, 12346, 1234, 1236, 12347, 1238, 1239],\n }\n\n expected = DataFrame(ex_data, columns=result.columns)\n tm.assert_frame_equal(result, expected)\n\n def test_shallow_nested(self):\n data = [\n {\n \"state\": \"Florida\",\n \"shortname\": \"FL\",\n \"info\": {\"governor\": \"Rick Scott\"},\n \"counties\": [\n {\"name\": \"Dade\", \"population\": 12345},\n {\"name\": \"Broward\", \"population\": 40000},\n {\"name\": \"Palm Beach\", \"population\": 60000},\n ],\n },\n {\n \"state\": \"Ohio\",\n \"shortname\": \"OH\",\n \"info\": {\"governor\": \"John Kasich\"},\n \"counties\": [\n {\"name\": \"Summit\", \"population\": 1234},\n {\"name\": \"Cuyahoga\", \"population\": 1337},\n ],\n },\n ]\n\n result = json_normalize(\n data, \"counties\", [\"state\", \"shortname\", [\"info\", \"governor\"]]\n )\n ex_data = {\n \"name\": [\"Dade\", \"Broward\", \"Palm Beach\", \"Summit\", \"Cuyahoga\"],\n \"state\": [\"Florida\"] * 3 + [\"Ohio\"] * 2,\n \"shortname\": [\"FL\", \"FL\", \"FL\", \"OH\", \"OH\"],\n \"info.governor\": [\"Rick Scott\"] * 3 + [\"John Kasich\"] * 2,\n \"population\": [12345, 40000, 60000, 1234, 1337],\n }\n expected = DataFrame(ex_data, columns=result.columns)\n tm.assert_frame_equal(result, expected)\n\n def test_meta_name_conflict(self):\n data = [\n {\n \"foo\": \"hello\",\n \"bar\": \"there\",\n \"data\": [\n {\"foo\": \"something\", \"bar\": \"else\"},\n {\"foo\": \"something2\", \"bar\": \"else2\"},\n ],\n }\n ]\n\n msg = r\"Conflicting metadata name (foo|bar), need distinguishing prefix\"\n with pytest.raises(ValueError, match=msg):\n json_normalize(data, \"data\", meta=[\"foo\", \"bar\"])\n\n result = json_normalize(data, \"data\", meta=[\"foo\", \"bar\"], meta_prefix=\"meta\")\n\n for val in [\"metafoo\", \"metabar\", \"foo\", \"bar\"]:\n assert val in result\n\n def test_meta_parameter_not_modified(self):\n # GH 18610\n data = [\n {\n \"foo\": \"hello\",\n \"bar\": \"there\",\n \"data\": [\n {\"foo\": \"something\", \"bar\": \"else\"},\n {\"foo\": \"something2\", \"bar\": \"else2\"},\n ],\n }\n ]\n\n COLUMNS = [\"foo\", \"bar\"]\n result = json_normalize(data, \"data\", meta=COLUMNS, meta_prefix=\"meta\")\n\n assert COLUMNS == [\"foo\", \"bar\"]\n for val in [\"metafoo\", \"metabar\", \"foo\", \"bar\"]:\n assert val in result\n\n def test_record_prefix(self, state_data):\n result = json_normalize(state_data[0], \"counties\")\n expected = DataFrame(state_data[0][\"counties\"])\n tm.assert_frame_equal(result, expected)\n\n result = json_normalize(\n state_data, \"counties\", meta=\"state\", record_prefix=\"county_\"\n )\n\n expected = []\n for rec in state_data:\n expected.extend(rec[\"counties\"])\n expected = DataFrame(expected)\n expected = expected.rename(columns=lambda x: \"county_\" + x)\n expected[\"state\"] = np.array([\"Florida\", \"Ohio\"]).repeat([3, 2])\n\n tm.assert_frame_equal(result, expected)\n\n def test_non_ascii_key(self):\n testjson = (\n b'[{\"\\xc3\\x9cnic\\xc3\\xb8de\":0,\"sub\":{\"A\":1, \"B\":2}},'\n + b'{\"\\xc3\\x9cnic\\xc3\\xb8de\":1,\"sub\":{\"A\":3, \"B\":4}}]'\n ).decode(\"utf8\")\n\n testdata = {\n b\"\\xc3\\x9cnic\\xc3\\xb8de\".decode(\"utf8\"): [0, 1],\n \"sub.A\": [1, 3],\n \"sub.B\": [2, 4],\n }\n expected = DataFrame(testdata)\n\n result = json_normalize(json.loads(testjson))\n tm.assert_frame_equal(result, expected)\n\n def test_missing_field(self, author_missing_data):\n # GH20030:\n result = json_normalize(author_missing_data)\n ex_data = [\n {\n \"info\": np.nan,\n \"info.created_at\": np.nan,\n \"info.last_updated\": np.nan,\n \"author_name.first\": np.nan,\n \"author_name.last_name\": np.nan,\n },\n {\n \"info\": None,\n \"info.created_at\": \"11/08/1993\",\n \"info.last_updated\": \"26/05/2012\",\n \"author_name.first\": \"Jane\",\n \"author_name.last_name\": \"Doe\",\n },\n ]\n expected = DataFrame(ex_data)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"max_level,expected\",\n [\n (\n 0,\n [\n {\n \"TextField\": \"Some text\",\n \"UserField\": {\"Id\": \"ID001\", \"Name\": \"Name001\"},\n \"CreatedBy\": {\"Name\": \"User001\"},\n \"Image\": {\"a\": \"b\"},\n },\n {\n \"TextField\": \"Some text\",\n \"UserField\": {\"Id\": \"ID001\", \"Name\": \"Name001\"},\n \"CreatedBy\": {\"Name\": \"User001\"},\n \"Image\": {\"a\": \"b\"},\n },\n ],\n ),\n (\n 1,\n [\n {\n \"TextField\": \"Some text\",\n \"UserField.Id\": \"ID001\",\n \"UserField.Name\": \"Name001\",\n \"CreatedBy\": {\"Name\": \"User001\"},\n \"Image\": {\"a\": \"b\"},\n },\n {\n \"TextField\": \"Some text\",\n \"UserField.Id\": \"ID001\",\n \"UserField.Name\": \"Name001\",\n \"CreatedBy\": {\"Name\": \"User001\"},\n \"Image\": {\"a\": \"b\"},\n },\n ],\n ),\n ],\n )\n def test_max_level_with_records_path(self, max_level, expected):\n # GH23843: Enhanced JSON normalize\n test_input = [\n {\n \"CreatedBy\": {\"Name\": \"User001\"},\n \"Lookup\": [\n {\n \"TextField\": \"Some text\",\n \"UserField\": {\"Id\": \"ID001\", \"Name\": \"Name001\"},\n },\n {\n \"TextField\": \"Some text\",\n \"UserField\": {\"Id\": \"ID001\", \"Name\": \"Name001\"},\n },\n ],\n \"Image\": {\"a\": \"b\"},\n \"tags\": [\n {\"foo\": \"something\", \"bar\": \"else\"},\n {\"foo\": \"something2\", \"bar\": \"else2\"},\n ],\n }\n ]\n\n result = json_normalize(\n test_input,\n record_path=[\"Lookup\"],\n meta=[[\"CreatedBy\"], [\"Image\"]],\n max_level=max_level,\n )\n expected_df = DataFrame(data=expected, columns=result.columns.values)\n tm.assert_equal(expected_df, result)\n\n def test_nested_flattening_consistent(self):\n # see gh-21537\n df1 = json_normalize([{\"A\": {\"B\": 1}}])\n df2 = json_normalize({\"dummy\": [{\"A\": {\"B\": 1}}]}, \"dummy\")\n\n # They should be the same.\n tm.assert_frame_equal(df1, df2)\n\n\nclass TestNestedToRecord:\n def test_flat_stays_flat(self):\n recs = [dict(flat1=1, flat2=2), dict(flat1=3, flat2=4)]\n result = nested_to_record(recs)\n expected = recs\n assert result == expected\n\n def test_one_level_deep_flattens(self):\n data = dict(flat1=1, dict1=dict(c=1, d=2))\n\n result = nested_to_record(data)\n expected = {\"dict1.c\": 1, \"dict1.d\": 2, \"flat1\": 1}\n\n assert result == expected\n\n def test_nested_flattens(self):\n data = dict(flat1=1, dict1=dict(c=1, d=2), nested=dict(e=dict(c=1, d=2), d=2))\n\n result = nested_to_record(data)\n expected = {\n \"dict1.c\": 1,\n \"dict1.d\": 2,\n \"flat1\": 1,\n \"nested.d\": 2,\n \"nested.e.c\": 1,\n \"nested.e.d\": 2,\n }\n\n assert result == expected\n\n def test_json_normalize_errors(self, missing_metadata):\n # GH14583:\n # If meta keys are not always present a new option to set\n # errors='ignore' has been implemented\n\n msg = \"Try running with errors='ignore' as key 'name' is not always present\"\n with pytest.raises(KeyError, match=msg):\n json_normalize(\n data=missing_metadata,\n record_path=\"addresses\",\n meta=\"name\",\n errors=\"raise\",\n )\n\n def test_missing_meta(self, missing_metadata):\n # GH25468\n # If metadata is nullable with errors set to ignore, the null values\n # should be numpy.nan values\n result = json_normalize(\n data=missing_metadata, record_path=\"addresses\", meta=\"name\", errors=\"ignore\"\n )\n ex_data = [\n [9562, \"Morris St.\", \"Massillon\", \"OH\", 44646, \"Alice\"],\n [8449, \"Spring St.\", \"Elizabethton\", \"TN\", 37643, np.nan],\n ]\n columns = [\"city\", \"number\", \"state\", \"street\", \"zip\", \"name\"]\n columns = [\"number\", \"street\", \"city\", \"state\", \"zip\", \"name\"]\n expected = DataFrame(ex_data, columns=columns)\n tm.assert_frame_equal(result, expected)\n\n def test_donot_drop_nonevalues(self):\n # GH21356\n data = [\n {\"info\": None, \"author_name\": {\"first\": \"Smith\", \"last_name\": \"Appleseed\"}},\n {\n \"info\": {\"created_at\": \"11/08/1993\", \"last_updated\": \"26/05/2012\"},\n \"author_name\": {\"first\": \"Jane\", \"last_name\": \"Doe\"},\n },\n ]\n result = nested_to_record(data)\n expected = [\n {\n \"info\": None,\n \"author_name.first\": \"Smith\",\n \"author_name.last_name\": \"Appleseed\",\n },\n {\n \"author_name.first\": \"Jane\",\n \"author_name.last_name\": \"Doe\",\n \"info.created_at\": \"11/08/1993\",\n \"info.last_updated\": \"26/05/2012\",\n },\n ]\n\n assert result == expected\n\n def test_nonetype_top_level_bottom_level(self):\n # GH21158: If inner level json has a key with a null value\n # make sure it does not do a new_d.pop twice and except\n data = {\n \"id\": None,\n \"location\": {\n \"country\": {\n \"state\": {\n \"id\": None,\n \"town.info\": {\n \"id\": None,\n \"region\": None,\n \"x\": 49.151580810546875,\n \"y\": -33.148521423339844,\n \"z\": 27.572303771972656,\n },\n }\n }\n },\n }\n result = nested_to_record(data)\n expected = {\n \"id\": None,\n \"location.country.state.id\": None,\n \"location.country.state.town.info.id\": None,\n \"location.country.state.town.info.region\": None,\n \"location.country.state.town.info.x\": 49.151580810546875,\n \"location.country.state.town.info.y\": -33.148521423339844,\n \"location.country.state.town.info.z\": 27.572303771972656,\n }\n assert result == expected\n\n def test_nonetype_multiple_levels(self):\n # GH21158: If inner level json has a key with a null value\n # make sure it does not do a new_d.pop twice and except\n data = {\n \"id\": None,\n \"location\": {\n \"id\": None,\n \"country\": {\n \"id\": None,\n \"state\": {\n \"id\": None,\n \"town.info\": {\n \"region\": None,\n \"x\": 49.151580810546875,\n \"y\": -33.148521423339844,\n \"z\": 27.572303771972656,\n },\n },\n },\n },\n }\n result = nested_to_record(data)\n expected = {\n \"id\": None,\n \"location.id\": None,\n \"location.country.id\": None,\n \"location.country.state.id\": None,\n \"location.country.state.town.info.region\": None,\n \"location.country.state.town.info.x\": 49.151580810546875,\n \"location.country.state.town.info.y\": -33.148521423339844,\n \"location.country.state.town.info.z\": 27.572303771972656,\n }\n assert result == expected\n\n @pytest.mark.parametrize(\n \"max_level, expected\",\n [\n (\n None,\n [\n {\n \"CreatedBy.Name\": \"User001\",\n \"Lookup.TextField\": \"Some text\",\n \"Lookup.UserField.Id\": \"ID001\",\n \"Lookup.UserField.Name\": \"Name001\",\n \"Image.a\": \"b\",\n }\n ],\n ),\n (\n 0,\n [\n {\n \"CreatedBy\": {\"Name\": \"User001\"},\n \"Lookup\": {\n \"TextField\": \"Some text\",\n \"UserField\": {\"Id\": \"ID001\", \"Name\": \"Name001\"},\n },\n \"Image\": {\"a\": \"b\"},\n }\n ],\n ),\n (\n 1,\n [\n {\n \"CreatedBy.Name\": \"User001\",\n \"Lookup.TextField\": \"Some text\",\n \"Lookup.UserField\": {\"Id\": \"ID001\", \"Name\": \"Name001\"},\n \"Image.a\": \"b\",\n }\n ],\n ),\n ],\n )\n def test_with_max_level(self, max_level, expected, max_level_test_input_data):\n # GH23843: Enhanced JSON normalize\n output = nested_to_record(max_level_test_input_data, max_level=max_level)\n assert output == expected\n\n def test_with_large_max_level(self):\n # GH23843: Enhanced JSON normalize\n max_level = 100\n input_data = [\n {\n \"CreatedBy\": {\n \"user\": {\n \"name\": {\"firstname\": \"Leo\", \"LastName\": \"Thomson\"},\n \"family_tree\": {\n \"father\": {\n \"name\": \"Father001\",\n \"father\": {\n \"Name\": \"Father002\",\n \"father\": {\n \"name\": \"Father003\",\n \"father\": {\"Name\": \"Father004\"},\n },\n },\n }\n },\n }\n }\n }\n ]\n expected = [\n {\n \"CreatedBy.user.name.firstname\": \"Leo\",\n \"CreatedBy.user.name.LastName\": \"Thomson\",\n \"CreatedBy.user.family_tree.father.name\": \"Father001\",\n \"CreatedBy.user.family_tree.father.father.Name\": \"Father002\",\n \"CreatedBy.user.family_tree.father.father.father.name\": \"Father003\",\n \"CreatedBy.user.family_tree.father.father.father.father.Name\": \"Father004\", # noqa: E501\n }\n ]\n output = nested_to_record(input_data, max_level=max_level)\n assert output == expected\n\n def test_deprecated_import(self):\n with tm.assert_produces_warning(FutureWarning):\n from pandas.io.json import json_normalize\n\n recs = [{\"a\": 1, \"b\": 2, \"c\": 3}, {\"a\": 4, \"b\": 5, \"c\": 6}]\n json_normalize(recs)\n", "from datetime import date\n\nimport dateutil\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import DataFrame, DatetimeIndex, Index, Timestamp, date_range, offsets\nimport pandas.util.testing as tm\n\nrandn = np.random.randn\n\n\nclass TestDatetimeIndex:\n def test_roundtrip_pickle_with_tz(self):\n\n # GH 8367\n # round-trip of timezone\n index = date_range(\"20130101\", periods=3, tz=\"US/Eastern\", name=\"foo\")\n unpickled = tm.round_trip_pickle(index)\n tm.assert_index_equal(index, unpickled)\n\n def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):\n # GH7774\n index = date_range(\"20130101\", periods=3, tz=\"US/Eastern\")\n assert str(index.reindex([])[0].tz) == \"US/Eastern\"\n assert str(index.reindex(np.array([]))[0].tz) == \"US/Eastern\"\n\n def test_time_loc(self): # GH8667\n from datetime import time\n from pandas._libs.index import _SIZE_CUTOFF\n\n ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)\n key = time(15, 11, 30)\n start = key.hour * 3600 + key.minute * 60 + key.second\n step = 24 * 3600\n\n for n in ns:\n idx = pd.date_range(\"2014-11-26\", periods=n, freq=\"S\")\n ts = pd.Series(np.random.randn(n), index=idx)\n i = np.arange(start, n, step)\n\n tm.assert_numpy_array_equal(ts.index.get_loc(key), i, check_dtype=False)\n tm.assert_series_equal(ts[key], ts.iloc[i])\n\n left, right = ts.copy(), ts.copy()\n left[key] *= -10\n right.iloc[i] *= -10\n tm.assert_series_equal(left, right)\n\n def test_time_overflow_for_32bit_machines(self):\n # GH8943. On some machines NumPy defaults to np.int32 (for example,\n # 32-bit Linux machines). In the function _generate_regular_range\n # found in tseries/index.py, `periods` gets multiplied by `strides`\n # (which has value 1e9) and since the max value for np.int32 is ~2e9,\n # and since those machines won't promote np.int32 to np.int64, we get\n # overflow.\n periods = np.int_(1000)\n\n idx1 = pd.date_range(start=\"2000\", periods=periods, freq=\"S\")\n assert len(idx1) == periods\n\n idx2 = pd.date_range(end=\"2000\", periods=periods, freq=\"S\")\n assert len(idx2) == periods\n\n def test_nat(self):\n assert DatetimeIndex([np.nan])[0] is pd.NaT\n\n def test_week_of_month_frequency(self):\n # GH 5348: \"ValueError: Could not evaluate WOM-1SUN\" shouldn't raise\n d1 = date(2002, 9, 1)\n d2 = date(2013, 10, 27)\n d3 = date(2012, 9, 30)\n idx1 = DatetimeIndex([d1, d2])\n idx2 = DatetimeIndex([d3])\n result_append = idx1.append(idx2)\n expected = DatetimeIndex([d1, d2, d3])\n tm.assert_index_equal(result_append, expected)\n result_union = idx1.union(idx2)\n expected = DatetimeIndex([d1, d3, d2])\n tm.assert_index_equal(result_union, expected)\n\n # GH 5115\n result = date_range(\"2013-1-1\", periods=4, freq=\"WOM-1SAT\")\n dates = [\"2013-01-05\", \"2013-02-02\", \"2013-03-02\", \"2013-04-06\"]\n expected = DatetimeIndex(dates, freq=\"WOM-1SAT\")\n tm.assert_index_equal(result, expected)\n\n def test_hash_error(self):\n index = date_range(\"20010101\", periods=10)\n with pytest.raises(\n TypeError, match=f\"unhashable type: '{type(index).__name__}'\"\n ):\n hash(index)\n\n def test_stringified_slice_with_tz(self):\n # GH#2658\n start = \"2013-01-07\"\n idx = date_range(start=start, freq=\"1d\", periods=10, tz=\"US/Eastern\")\n df = DataFrame(np.arange(10), index=idx)\n df[\"2013-01-14 23:44:34.437768-05:00\":] # no exception here\n\n def test_append_join_nondatetimeindex(self):\n rng = date_range(\"1/1/2000\", periods=10)\n idx = Index([\"a\", \"b\", \"c\", \"d\"])\n\n result = rng.append(idx)\n assert isinstance(result[0], Timestamp)\n\n # it works\n rng.join(idx, how=\"outer\")\n\n def test_map(self):\n rng = date_range(\"1/1/2000\", periods=10)\n\n f = lambda x: x.strftime(\"%Y%m%d\")\n result = rng.map(f)\n exp = Index([f(x) for x in rng], dtype=\"<U8\")\n tm.assert_index_equal(result, exp)\n\n def test_map_fallthrough(self, capsys):\n # GH#22067, check we don't get warnings about silently ignored errors\n dti = date_range(\"2017-01-01\", \"2018-01-01\", freq=\"B\")\n\n dti.map(lambda x: pd.Period(year=x.year, month=x.month, freq=\"M\"))\n\n captured = capsys.readouterr()\n assert captured.err == \"\"\n\n def test_iteration_preserves_tz(self):\n # see gh-8890\n index = date_range(\"2012-01-01\", periods=3, freq=\"H\", tz=\"US/Eastern\")\n\n for i, ts in enumerate(index):\n result = ts\n expected = index[i]\n assert result == expected\n\n index = date_range(\n \"2012-01-01\", periods=3, freq=\"H\", tz=dateutil.tz.tzoffset(None, -28800)\n )\n\n for i, ts in enumerate(index):\n result = ts\n expected = index[i]\n assert result._repr_base == expected._repr_base\n assert result == expected\n\n # 9100\n index = pd.DatetimeIndex(\n [\"2014-12-01 03:32:39.987000-08:00\", \"2014-12-01 04:12:34.987000-08:00\"]\n )\n for i, ts in enumerate(index):\n result = ts\n expected = index[i]\n assert result._repr_base == expected._repr_base\n assert result == expected\n\n @pytest.mark.parametrize(\"periods\", [0, 9999, 10000, 10001])\n def test_iteration_over_chunksize(self, periods):\n # GH21012\n\n index = date_range(\"2000-01-01 00:00:00\", periods=periods, freq=\"min\")\n num = 0\n for stamp in index:\n assert index[num] == stamp\n num += 1\n assert num == len(index)\n\n def test_misc_coverage(self):\n rng = date_range(\"1/1/2000\", periods=5)\n result = rng.groupby(rng.day)\n assert isinstance(list(result.values())[0][0], Timestamp)\n\n idx = DatetimeIndex([\"2000-01-03\", \"2000-01-01\", \"2000-01-02\"])\n assert not idx.equals(list(idx))\n\n non_datetime = Index(list(\"abc\"))\n assert not idx.equals(list(non_datetime))\n\n def test_string_index_series_name_converted(self):\n # #1644\n df = DataFrame(np.random.randn(10, 4), index=date_range(\"1/1/2000\", periods=10))\n\n result = df.loc[\"1/3/2000\"]\n assert result.name == df.index[2]\n\n result = df.T[\"1/3/2000\"]\n assert result.name == df.index[2]\n\n def test_argmin_argmax(self):\n idx = DatetimeIndex([\"2000-01-04\", \"2000-01-01\", \"2000-01-02\"])\n assert idx.argmin() == 1\n assert idx.argmax() == 0\n\n def test_sort_values(self):\n idx = DatetimeIndex([\"2000-01-04\", \"2000-01-01\", \"2000-01-02\"])\n\n ordered = idx.sort_values()\n assert ordered.is_monotonic\n\n ordered = idx.sort_values(ascending=False)\n assert ordered[::-1].is_monotonic\n\n ordered, dexer = idx.sort_values(return_indexer=True)\n assert ordered.is_monotonic\n tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0], dtype=np.intp))\n\n ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)\n assert ordered[::-1].is_monotonic\n tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1], dtype=np.intp))\n\n def test_map_bug_1677(self):\n index = DatetimeIndex([\"2012-04-25 09:30:00.393000\"])\n f = index.asof\n\n result = index.map(f)\n expected = Index([f(index[0])])\n tm.assert_index_equal(result, expected)\n\n def test_groupby_function_tuple_1677(self):\n df = DataFrame(np.random.rand(100), index=date_range(\"1/1/2000\", periods=100))\n monthly_group = df.groupby(lambda x: (x.year, x.month))\n\n result = monthly_group.mean()\n assert isinstance(result.index[0], tuple)\n\n def test_append_numpy_bug_1681(self):\n # another datetime64 bug\n dr = date_range(\"2011/1/1\", \"2012/1/1\", freq=\"W-FRI\")\n a = DataFrame()\n c = DataFrame({\"A\": \"foo\", \"B\": dr}, index=dr)\n\n result = a.append(c)\n assert (result[\"B\"] == dr).all()\n\n def test_isin(self):\n index = tm.makeDateIndex(4)\n result = index.isin(index)\n assert result.all()\n\n result = index.isin(list(index))\n assert result.all()\n\n tm.assert_almost_equal(\n index.isin([index[2], 5]), np.array([False, False, True, False])\n )\n\n def test_does_not_convert_mixed_integer(self):\n df = tm.makeCustomDataframe(\n 10,\n 10,\n data_gen_f=lambda *args, **kwargs: randn(),\n r_idx_type=\"i\",\n c_idx_type=\"dt\",\n )\n cols = df.columns.join(df.index, how=\"outer\")\n joined = cols.join(df.columns)\n assert cols.dtype == np.dtype(\"O\")\n assert cols.dtype == joined.dtype\n tm.assert_numpy_array_equal(cols.values, joined.values)\n\n def test_join_self(self, join_type):\n index = date_range(\"1/1/2000\", periods=10)\n joined = index.join(index, how=join_type)\n assert index is joined\n\n def assert_index_parameters(self, index):\n assert index.freq == \"40960N\"\n assert index.inferred_freq == \"40960N\"\n\n def test_ns_index(self):\n nsamples = 400\n ns = int(1e9 / 24414)\n dtstart = np.datetime64(\"2012-09-20T00:00:00\")\n\n dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, \"ns\")\n freq = ns * offsets.Nano()\n index = pd.DatetimeIndex(dt, freq=freq, name=\"time\")\n self.assert_index_parameters(index)\n\n new_index = pd.date_range(start=index[0], end=index[-1], freq=index.freq)\n self.assert_index_parameters(new_index)\n\n def test_join_with_period_index(self, join_type):\n df = tm.makeCustomDataframe(\n 10,\n 10,\n data_gen_f=lambda *args: np.random.randint(2),\n c_idx_type=\"p\",\n r_idx_type=\"dt\",\n )\n s = df.iloc[:5, 0]\n\n expected = df.columns.astype(\"O\").join(s.index, how=join_type)\n result = df.columns.join(s.index, how=join_type)\n tm.assert_index_equal(expected, result)\n\n def test_factorize(self):\n idx1 = DatetimeIndex(\n [\"2014-01\", \"2014-01\", \"2014-02\", \"2014-02\", \"2014-03\", \"2014-03\"]\n )\n\n exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)\n exp_idx = DatetimeIndex([\"2014-01\", \"2014-02\", \"2014-03\"])\n\n arr, idx = idx1.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n\n arr, idx = idx1.factorize(sort=True)\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n\n # tz must be preserved\n idx1 = idx1.tz_localize(\"Asia/Tokyo\")\n exp_idx = exp_idx.tz_localize(\"Asia/Tokyo\")\n\n arr, idx = idx1.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n\n idx2 = pd.DatetimeIndex(\n [\"2014-03\", \"2014-03\", \"2014-02\", \"2014-01\", \"2014-03\", \"2014-01\"]\n )\n\n exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)\n exp_idx = DatetimeIndex([\"2014-01\", \"2014-02\", \"2014-03\"])\n arr, idx = idx2.factorize(sort=True)\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n\n exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)\n exp_idx = DatetimeIndex([\"2014-03\", \"2014-02\", \"2014-01\"])\n arr, idx = idx2.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n\n # freq must be preserved\n idx3 = date_range(\"2000-01\", periods=4, freq=\"M\", tz=\"Asia/Tokyo\")\n exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)\n arr, idx = idx3.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, idx3)\n\n def test_factorize_tz(self, tz_naive_fixture):\n tz = tz_naive_fixture\n # GH#13750\n base = pd.date_range(\"2016-11-05\", freq=\"H\", periods=100, tz=tz)\n idx = base.repeat(5)\n\n exp_arr = np.arange(100, dtype=np.intp).repeat(5)\n\n for obj in [idx, pd.Series(idx)]:\n arr, res = obj.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(res, base)\n\n def test_factorize_dst(self):\n # GH 13750\n idx = pd.date_range(\"2016-11-06\", freq=\"H\", periods=12, tz=\"US/Eastern\")\n\n for obj in [idx, pd.Series(idx)]:\n arr, res = obj.factorize()\n tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))\n tm.assert_index_equal(res, idx)\n\n idx = pd.date_range(\"2016-06-13\", freq=\"H\", periods=12, tz=\"US/Eastern\")\n\n for obj in [idx, pd.Series(idx)]:\n arr, res = obj.factorize()\n tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))\n tm.assert_index_equal(res, idx)\n\n @pytest.mark.parametrize(\n \"arr, expected\",\n [\n (pd.DatetimeIndex([\"2017\", \"2017\"]), pd.DatetimeIndex([\"2017\"])),\n (\n pd.DatetimeIndex([\"2017\", \"2017\"], tz=\"US/Eastern\"),\n pd.DatetimeIndex([\"2017\"], tz=\"US/Eastern\"),\n ),\n ],\n )\n def test_unique(self, arr, expected):\n result = arr.unique()\n tm.assert_index_equal(result, expected)\n # GH 21737\n # Ensure the underlying data is consistent\n assert result[0] == expected[0]\n\n def test_asarray_tz_naive(self):\n # This shouldn't produce a warning.\n idx = pd.date_range(\"2000\", periods=2)\n # M8[ns] by default\n with tm.assert_produces_warning(None):\n result = np.asarray(idx)\n\n expected = np.array([\"2000-01-01\", \"2000-01-02\"], dtype=\"M8[ns]\")\n tm.assert_numpy_array_equal(result, expected)\n\n # optionally, object\n with tm.assert_produces_warning(None):\n result = np.asarray(idx, dtype=object)\n\n expected = np.array([pd.Timestamp(\"2000-01-01\"), pd.Timestamp(\"2000-01-02\")])\n tm.assert_numpy_array_equal(result, expected)\n\n def test_asarray_tz_aware(self):\n tz = \"US/Central\"\n idx = pd.date_range(\"2000\", periods=2, tz=tz)\n expected = np.array([\"2000-01-01T06\", \"2000-01-02T06\"], dtype=\"M8[ns]\")\n # We warn by default and return an ndarray[M8[ns]]\n with tm.assert_produces_warning(FutureWarning):\n result = np.asarray(idx)\n\n tm.assert_numpy_array_equal(result, expected)\n\n # Old behavior with no warning\n with tm.assert_produces_warning(None):\n result = np.asarray(idx, dtype=\"M8[ns]\")\n\n tm.assert_numpy_array_equal(result, expected)\n\n # Future behavior with no warning\n expected = np.array(\n [pd.Timestamp(\"2000-01-01\", tz=tz), pd.Timestamp(\"2000-01-02\", tz=tz)]\n )\n with tm.assert_produces_warning(None):\n result = np.asarray(idx, dtype=object)\n\n tm.assert_numpy_array_equal(result, expected)\n\n def test_to_frame_datetime_tz(self):\n # GH 25809\n idx = date_range(start=\"2019-01-01\", end=\"2019-01-30\", freq=\"D\", tz=\"UTC\")\n result = idx.to_frame()\n expected = DataFrame(idx, index=idx)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"name\", [None, \"name\"])\n def test_index_map(self, name):\n # see GH20990\n count = 6\n index = pd.date_range(\"2018-01-01\", periods=count, freq=\"M\", name=name).map(\n lambda x: (x.year, x.month)\n )\n exp_index = pd.MultiIndex.from_product(\n ((2018,), range(1, 7)), names=[name, name]\n )\n tm.assert_index_equal(index, exp_index)\n" ]
[ [ "numpy.array", "pandas.Index", "pandas.util.testing.assert_frame_equal", "pandas.io.json._normalize.nested_to_record", "pandas.DataFrame", "pandas.io.json.json_normalize", "pandas.util.testing.assert_produces_warning", "pandas.util.testing.assert_equal" ], [ "numpy.random.rand", "pandas.DatetimeIndex", "pandas.offsets.Nano", "pandas.Timestamp", "numpy.int_", "numpy.dtype", "pandas.util.testing.assert_numpy_array_equal", "pandas.DataFrame", "pandas.util.testing.assert_index_equal", "numpy.arange", "pandas.util.testing.assert_produces_warning", "numpy.random.randint", "pandas.util.testing.makeDateIndex", "pandas.Period", "numpy.array", "pandas.util.testing.round_trip_pickle", "numpy.random.randn", "numpy.timedelta64", "numpy.datetime64", "pandas.Index", "pandas.util.testing.assert_frame_equal", "numpy.asarray", "pandas.date_range", "pandas.util.testing.assert_series_equal", "pandas.Series" ] ]
jccmak/lightpipes
[ "1a296fe08bdd97fc9a0e11f92bab25c85f68e57d" ]
[ "sphinx-sources/Examples/Commands/PipFFT.py" ]
[ "from LightPipes import *\nimport matplotlib.pyplot as plt\n\nsize=15*mm\nwavelength=1*um\nN=150\nz=1*m\nR=3*mm\nRf=1.5*mm\nseed=7\nMaxPhase=1.5\n\nF=Begin(size,wavelength,N);\nF=CircAperture(R,0,0,F);\nF=RandomPhase(seed,MaxPhase,F);\nF=Fresnel(z,F);\nI0=Intensity(0,F);\n\nF=PipFFT(1,F);\nF=CircAperture(Rf,0,0,F);\nF=PipFFT(-1,F);\nI1=Intensity(1,F);\n\nfig=plt.figure(figsize=(10,6))\nax1 = fig.add_subplot(121)\nax2 = fig.add_subplot(122)\nax1.imshow(I0,cmap='rainbow'); ax1.axis('off'); ax1.set_title('Unfiltered intensity')\nax2.imshow(I1,cmap='rainbow'); ax2.axis('off'); ax2.set_title('Filtered intensity')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
gabrielfior/hackzurich-earthquake
[ "448f6229c8b87ce7aa9ecbcf1e5d585ef553a532" ]
[ "notebooks/execute.py" ]
[ "import pandas as pd\nfrom fastai.tabular.all import *\nfrom fastai.tabular.data import *\nfrom functools import reduce\nfrom tqdm import tqdm, trange\n\nlearn = load_learner('monster_model_10batches.pkl')\n\ndf = pd.read_csv('../public_data/train.csv')\ntest = pd.read_csv('../public_data/test.csv')\nbuild_owner = pd.read_csv('../public_data/building_ownership.csv')\nbuild_struct = pd.read_csv('../public_data/building_structure.csv')\nward_demo = pd.read_csv('../public_data/ward_demographic_data.csv')\n\n# merge everything before predicting\n# joining on building_id\ndfs = [test, build_owner,build_struct]\ncol_name='building_id'\ndf_final = reduce(lambda left,right: pd.merge(left,right,on=col_name), dfs)\n#df_final.shape, df_final.columns\ndf_end = df_final.merge(ward_demo,left_on='ward_id_x',right_on='ward_id')\n\npreds = []\nfor i in trange(len(df_end)):\n obj = df_end.iloc[i]\n row, _,_ = learn.predict(obj)\n preds.append({'building_id':df_end.iloc[i]['building_id'],\n 'damage_grade':int(row['damage_grade'].tolist()[0])})\n\ndf_export = pd.DataFrame(preds)\ndf_export.to_csv('submission_simple.csv',index=False)" ]
[ [ "pandas.DataFrame", "pandas.read_csv", "pandas.merge" ] ]
TOffergeld/pandapower
[ "630e3278ca012535f78282ae73f1b86f3fe932fc" ]
[ "pandapower/test/loadflow/test_runpp.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\nimport copy\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport pandapower as pp\nfrom pandapower.auxiliary import _check_connectivity, _add_ppc_options, lightsim2grid_available\nfrom pandapower.networks import create_cigre_network_mv, four_loads_with_branches_out, \\\n example_simple, simple_four_bus_system, example_multivoltage\nfrom pandapower.pd2ppc import _pd2ppc\nfrom pandapower.pf.create_jacobian import _create_J_without_numba\nfrom pandapower.pf.run_newton_raphson_pf import _get_pf_variables_from_ppci\nfrom pandapower.powerflow import LoadflowNotConverged\nfrom pandapower.test.consistency_checks import runpp_with_consistency_checks\nfrom pandapower.test.loadflow.result_test_network_generator import add_test_xward, add_test_trafo3w, \\\n add_test_line, add_test_oos_bus_with_is_element, result_test_network_generator, add_test_trafo\nfrom pandapower.test.toolbox import add_grid_connection, create_test_line, assert_net_equal\nfrom pandapower.toolbox import nets_equal\n\n\ndef test_minimal_net(**kwargs):\n # tests corner-case when the grid only has 1 bus and an ext-grid\n net = pp.create_empty_network()\n b = pp.create_bus(net, 110)\n pp.create_ext_grid(net, b)\n runpp_with_consistency_checks(net, **kwargs)\n\n pp.create_load(net, b, p_mw=0.1)\n runpp_with_consistency_checks(net, **kwargs)\n\n b2 = pp.create_bus(net, 110)\n pp.create_switch(net, b, b2, 'b')\n pp.create_sgen(net, b2, p_mw=0.2)\n runpp_with_consistency_checks(net, **kwargs)\n\n\ndef test_set_user_pf_options():\n net = example_simple()\n pp.runpp(net)\n\n old_options = net._options.copy()\n test_options = {key: i for i, key in enumerate(old_options.keys())}\n\n pp.set_user_pf_options(net, hello='bye', **test_options)\n test_options.update({'hello': 'bye'})\n\n assert net.user_pf_options == test_options\n\n # remove what is in user_pf_options and add hello=world\n pp.set_user_pf_options(net, overwrite=True, hello='world')\n assert net.user_pf_options == {'hello': 'world'}\n\n # check if 'hello' is added to net._options, but other options are untouched\n pp.runpp(net)\n assert 'hello' in net._options.keys() and net._options['hello'] == 'world'\n net._options.pop('hello')\n assert net._options == old_options\n\n # check if user_pf_options can be deleted and net._options is as it was before\n pp.set_user_pf_options(net, overwrite=True, hello='world')\n pp.set_user_pf_options(net, overwrite=True)\n assert net.user_pf_options == {}\n pp.runpp(net)\n assert 'hello' not in net._options.keys()\n\n # see if user arguments overrule user_pf_options, but other user_pf_options still have the\n # priority\n pp.set_user_pf_options(net, tolerance_mva=1e-6, max_iteration=20)\n pp.runpp(net, tolerance_mva=1e-2)\n assert net.user_pf_options['tolerance_mva'] == 1e-6\n assert net._options['tolerance_mva'] == 1e-2\n assert net._options['max_iteration'] == 20\n\n\ndef test_kwargs_with_user_options():\n net = example_simple()\n pp.runpp(net)\n assert net._options[\"trafo3w_losses\"] == \"hv\"\n pp.set_user_pf_options(net, trafo3w_losses=\"lv\")\n pp.runpp(net)\n assert net._options[\"trafo3w_losses\"] == \"lv\"\n\n\[email protected](reason=\"Until now there was no way found to dynamically identify \"\n \"the arguments passed to runpp, so if the user options are \"\n \"overwritten with the default values, this is not recognized.\")\ndef test_overwrite_default_args_with_user_options():\n net = example_simple()\n pp.runpp(net)\n assert net._options[\"check_connectivity\"] is True\n pp.set_user_pf_options(net, check_connectivity=False)\n pp.runpp(net)\n assert net._options[\"check_connectivity\"] is False\n pp.runpp(net, check_connectivity=True)\n assert net._options[\"check_connectivity\"] is True\n\n\ndef test_runpp_init():\n net = pp.create_empty_network()\n b1, b2, l1 = add_grid_connection(net)\n b3 = pp.create_bus(net, vn_kv=0.4)\n tidx = pp.create_transformer(net, hv_bus=b2, lv_bus=b3, std_type=\"0.25 MVA 20/0.4 kV\")\n net.trafo.shift_degree.at[tidx] = 70\n pp.runpp(net)\n va = net.res_bus.va_degree.at[4]\n pp.runpp(net, calculate_voltage_angles=True, init_va_degree=\"dc\")\n assert np.allclose(va - net.trafo.shift_degree.at[tidx], net.res_bus.va_degree.at[4])\n pp.runpp(net, calculate_voltage_angles=True, init_va_degree=\"results\")\n assert np.allclose(va - net.trafo.shift_degree.at[tidx], net.res_bus.va_degree.at[4])\n\n\ndef test_runpp_init_auxiliary_buses():\n net = pp.create_empty_network()\n b1, b2, l1 = add_grid_connection(net, vn_kv=110.)\n b3 = pp.create_bus(net, vn_kv=20.)\n b4 = pp.create_bus(net, vn_kv=10.)\n tidx = pp.create_transformer3w(net, b2, b3, b4, std_type='63/25/38 MVA 110/20/10 kV')\n pp.create_load(net, b3, p_mw=5)\n pp.create_load(net, b4, p_mw=5)\n pp.create_xward(net, b4, ps_mw=1, qs_mvar=1, pz_mw=1, qz_mvar=1, r_ohm=0.1, x_ohm=0.1,\n vm_pu=1.0)\n net.trafo3w.shift_lv_degree.at[tidx] = 120\n net.trafo3w.shift_mv_degree.at[tidx] = 80\n pp.runpp(net)\n va = net.res_bus.va_degree.at[b2]\n pp.runpp(net, calculate_voltage_angles=True, init_va_degree=\"dc\")\n assert np.allclose(va - net.trafo3w.shift_mv_degree.at[tidx], net.res_bus.va_degree.at[b3],\n atol=2)\n assert np.allclose(va - net.trafo3w.shift_lv_degree.at[tidx], net.res_bus.va_degree.at[b4],\n atol=2)\n pp.runpp(net, calculate_voltage_angles=True, init_va_degree=\"results\")\n assert np.allclose(va - net.trafo3w.shift_mv_degree.at[tidx], net.res_bus.va_degree.at[b3],\n atol=2)\n assert np.allclose(va - net.trafo3w.shift_lv_degree.at[tidx], net.res_bus.va_degree.at[b4],\n atol=2)\n\n\ndef test_result_iter():\n for net in result_test_network_generator():\n try:\n runpp_with_consistency_checks(net, enforce_q_lims=True)\n except (AssertionError):\n raise UserWarning(\"Consistency Error after adding %s\" % net.last_added_case)\n except(LoadflowNotConverged):\n raise UserWarning(\"Power flow did not converge after adding %s\" % net.last_added_case)\n\n\[email protected]\ndef bus_bus_net():\n net = pp.create_empty_network()\n add_grid_connection(net)\n for _u in range(4):\n pp.create_bus(net, vn_kv=.4)\n pp.create_load(net, 5, p_mw=0.01)\n pp.create_switch(net, 3, 6, et=\"b\")\n pp.create_switch(net, 4, 5, et=\"b\")\n pp.create_switch(net, 6, 5, et=\"b\")\n pp.create_switch(net, 0, 7, et=\"b\")\n create_test_line(net, 4, 7)\n pp.create_load(net, 4, p_mw=0.01)\n return net\n\n\ndef test_bus_bus_switches(bus_bus_net):\n net = bus_bus_net\n pp.runpp(net)\n assert net.res_bus.vm_pu.at[3] == net.res_bus.vm_pu.at[4] == net.res_bus.vm_pu.at[5] == \\\n net.res_bus.vm_pu.at[6]\n assert net.res_bus.vm_pu.at[0] == net.res_bus.vm_pu.at[7]\n\n net.bus.in_service.at[5] = False\n pp.runpp(net)\n assert net.res_bus.vm_pu.at[3] == net.res_bus.vm_pu.at[6]\n assert net.res_bus.vm_pu.at[0] == net.res_bus.vm_pu.at[7]\n assert pd.isnull(net.res_bus.vm_pu.at[5])\n assert net.res_bus.vm_pu.at[6] != net.res_bus.vm_pu.at[4]\n\n\ndef test_bus_bus_switches_merges_two_gens(bus_bus_net):\n \"buses should not be fused if two gens are connected\"\n net = bus_bus_net\n net.bus.in_service.at[5] = False\n pp.create_gen(net, 6, 10)\n pp.create_gen(net, 4, 10)\n net.bus.in_service.at[5] = True\n pp.runpp(net)\n assert net.converged\n\n\ndef test_bus_bus_switches_throws_exception_for_two_gen_with_diff_vm(bus_bus_net):\n \"buses should not be fused if two gens are connected\"\n net = bus_bus_net\n pp.create_gen(net, 6, 10, 1.)\n pp.create_gen(net, 4, 10, 1.1)\n with pytest.raises(UserWarning):\n pp.runpp(net)\n\n\[email protected]\ndef z_switch_net():\n net = pp.create_empty_network()\n for i in range(3):\n pp.create_bus(net, vn_kv=.4)\n pp.create_load(net, i, p_mw=0.1)\n pp.create_ext_grid(net, 0, vm_pu=1.0)\n pp.create_line_from_parameters(net, 0, 1, 1, r_ohm_per_km=0.1 / np.sqrt(2),\n x_ohm_per_km=0.1 / np.sqrt(2),\n c_nf_per_km=0, max_i_ka=.2)\n pp.create_switch(net, 0, 2, et=\"b\", z_ohm=0.1)\n return net\n\n\[email protected](\"numba\", [True, False])\ndef test_z_switch(z_switch_net, numba):\n net = z_switch_net\n pp.runpp(net, numba=numba, switch_rx_ratio=1)\n assert net.res_bus.vm_pu.at[1] == net.res_bus.vm_pu.at[2]\n\n net_zero_z_switch = copy.deepcopy(net)\n net_zero_z_switch.switch.z_ohm = 0\n pp.runpp(net_zero_z_switch, numba=numba, switch_rx_ratio=1)\n assert net_zero_z_switch.res_bus.vm_pu.at[0] == net_zero_z_switch.res_bus.vm_pu.at[2]\n\n\[email protected]\ndef z_switch_net_4bus_parallel():\n net = pp.create_empty_network()\n for i in range(4):\n pp.create_bus(net, vn_kv=.4)\n pp.create_load(net, i, p_mw=0.1)\n pp.create_ext_grid(net, 0, vm_pu=1.0)\n pp.create_line_from_parameters(net, 0, 1, 1, r_ohm_per_km=0.1 / np.sqrt(2),\n x_ohm_per_km=0.1 / np.sqrt(2),\n c_nf_per_km=0, max_i_ka=.2)\n pp.create_line_from_parameters(net, 1, 3, 1, r_ohm_per_km=0.1 / np.sqrt(2),\n x_ohm_per_km=0.1 / np.sqrt(2),\n c_nf_per_km=0, max_i_ka=.2)\n pp.create_switch(net, 0, 2, et=\"b\", z_ohm=0.1)\n pp.create_switch(net, 0, 2, et=\"b\", z_ohm=0)\n return net\n\n\[email protected]\ndef z_switch_net_4bus():\n net = pp.create_empty_network()\n for i in range(4):\n pp.create_bus(net, vn_kv=.4)\n pp.create_load(net, i, p_mw=0.01)\n pp.create_ext_grid(net, 0, vm_pu=1.0)\n pp.create_line_from_parameters(net, 0, 1, 1, r_ohm_per_km=0.1 / np.sqrt(2),\n x_ohm_per_km=0.1 / np.sqrt(2),\n c_nf_per_km=0, max_i_ka=.2)\n pp.create_switch(net, 1, 2, et=\"b\", z_ohm=0.1)\n pp.create_switch(net, 2, 3, et=\"b\", z_ohm=0)\n return net\n\n\[email protected](\"numba\", [True, False])\ndef test_switch_fuse_z_ohm_0(z_switch_net_4bus_parallel, z_switch_net_4bus, numba):\n net = z_switch_net_4bus_parallel\n pp.runpp(net, numba=numba)\n assert net.res_bus.vm_pu[0] == net.res_bus.vm_pu[2]\n assert net.res_switch.i_ka[0] == 0\n\n net = z_switch_net_4bus\n pp.runpp(net, numba=numba)\n assert net.res_bus.vm_pu[2] == net.res_bus.vm_pu[3]\n assert net.res_bus.vm_pu[1] != net.res_bus.vm_pu[2]\n\n\[email protected](\"numba\", [True, False])\ndef test_switch_z_ohm_different(z_switch_net_4bus_parallel, z_switch_net_4bus, numba):\n net = z_switch_net_4bus_parallel\n net.switch.at[1, 'z_ohm'] = 0.2\n pp.runpp(net, numba=numba)\n assert net.res_bus.vm_pu[0] != net.res_bus.vm_pu[2]\n assert np.all(net.res_switch.i_ka > 0)\n\n net = z_switch_net_4bus\n net.switch.at[1, 'z_ohm'] = 0.2\n pp.runpp(net, numba=numba)\n assert net.res_bus.vm_pu[2] != net.res_bus.vm_pu[3]\n assert net.res_bus.vm_pu[1] != net.res_bus.vm_pu[2]\n\n\ndef test_two_open_switches():\n net = pp.create_empty_network()\n b1, b2, l1 = add_grid_connection(net)\n b3 = pp.create_bus(net, vn_kv=20.)\n l2 = create_test_line(net, b2, b3)\n create_test_line(net, b3, b1)\n pp.create_switch(net, b2, l2, et=\"l\", closed=False)\n pp.create_switch(net, b3, l2, et=\"l\", closed=False)\n pp.runpp(net)\n assert np.isnan(net.res_line.i_ka.at[l2]) or net.res_line.i_ka.at[l2] == 0\n\n\ndef test_oos_bus():\n net = pp.create_empty_network()\n add_test_oos_bus_with_is_element(net)\n assert runpp_with_consistency_checks(net)\n\n # test for pq-node result\n pp.create_shunt(net, 6, q_mvar=0.8)\n assert runpp_with_consistency_checks(net)\n\n # 1test for pv-node result\n pp.create_gen(net, 4, p_mw=0.5)\n assert runpp_with_consistency_checks(net)\n\n\ndef get_isolated(net):\n net._options = {}\n _add_ppc_options(net, calculate_voltage_angles=False,\n trafo_model=\"t\", check_connectivity=False,\n mode=\"pf\", switch_rx_ratio=2, init_vm_pu=\"flat\",\n init_va_degree=\"flat\",\n enforce_q_lims=False, recycle=None)\n\n ppc, ppci = _pd2ppc(net)\n return _check_connectivity(ppc)\n\n\ndef test_connectivity_check_island_without_pv_bus():\n # Network with islands without pv bus -> all buses in island should be set out of service\n net = create_cigre_network_mv(with_der=False)\n iso_buses, iso_p, iso_q = get_isolated(net)\n assert len(iso_buses) == 0\n assert np.isclose(iso_p, 0)\n assert np.isclose(iso_q, 0)\n\n isolated_bus1 = pp.create_bus(net, vn_kv=20., name=\"isolated Bus1\")\n isolated_bus2 = pp.create_bus(net, vn_kv=20., name=\"isolated Bus2\")\n pp.create_line(net, isolated_bus2, isolated_bus1, length_km=1,\n std_type=\"N2XS(FL)2Y 1x300 RM/35 64/110 kV\",\n name=\"IsolatedLine\")\n iso_buses, iso_p, iso_q = get_isolated(net)\n assert len(iso_buses) == 2\n assert np.isclose(iso_p, 0)\n assert np.isclose(iso_q, 0)\n\n pp.create_load(net, isolated_bus1, p_mw=0.2, q_mvar=0.02)\n pp.create_sgen(net, isolated_bus2, p_mw=0.15, q_mvar=0.01)\n\n # with pytest.warns(UserWarning):\n iso_buses, iso_p, iso_q = get_isolated(net)\n assert len(iso_buses) == 2\n assert np.isclose(iso_p, 350)\n assert np.isclose(iso_q, 30)\n # with pytest.warns(UserWarning):\n runpp_with_consistency_checks(net, check_connectivity=True)\n\n\ndef test_connectivity_check_island_with_one_pv_bus():\n # Network with islands with one PV bus -> PV bus should be converted to the reference bus\n net = create_cigre_network_mv(with_der=False)\n iso_buses, iso_p, iso_q = get_isolated(net)\n assert len(iso_buses) == 0\n assert np.isclose(iso_p, 0)\n assert np.isclose(iso_q, 0)\n\n isolated_bus1 = pp.create_bus(net, vn_kv=20., name=\"isolated Bus1\")\n isolated_bus2 = pp.create_bus(net, vn_kv=20., name=\"isolated Bus2\")\n isolated_gen = pp.create_bus(net, vn_kv=20., name=\"isolated Gen\")\n isolated_pv_bus = pp.create_gen(net, isolated_gen, p_mw=0.35, vm_pu=1.0, name=\"isolated PV bus\")\n pp.create_line(net, isolated_bus2, isolated_bus1, length_km=1,\n std_type=\"N2XS(FL)2Y 1x300 RM/35 64/110 kV\", name=\"IsolatedLine\")\n pp.create_line(net, isolated_gen, isolated_bus1, length_km=1,\n std_type=\"N2XS(FL)2Y 1x300 RM/35 64/110 kV\", name=\"IsolatedLineToGen\")\n # with pytest.warns(UserWarning):\n iso_buses, iso_p, iso_q = get_isolated(net)\n\n # assert len(iso_buses) == 0\n # assert np.isclose(iso_p, 0)\n # assert np.isclose(iso_q, 0)\n #\n # pp.create_load(net, isolated_bus1, p_mw=0.200., q_mvar=0.020)\n # pp.create_sgen(net, isolated_bus2, p_mw=0.0150., q_mvar=-0.010)\n #\n # iso_buses, iso_p, iso_q = get_isolated(net)\n # assert len(iso_buses) == 0\n # assert np.isclose(iso_p, 0)\n # assert np.isclose(iso_q, 0)\n\n # with pytest.warns(UserWarning):\n runpp_with_consistency_checks(net, check_connectivity=True)\n\n\ndef test_connectivity_check_island_with_multiple_pv_buses():\n # Network with islands an multiple PV buses in the island -> Error should be thrown since it\n # would be random to choose just some PV bus as the reference bus\n net = create_cigre_network_mv(with_der=False)\n iso_buses, iso_p, iso_q = get_isolated(net)\n assert len(iso_buses) == 0\n assert np.isclose(iso_p, 0)\n assert np.isclose(iso_q, 0)\n\n isolated_bus1 = pp.create_bus(net, vn_kv=20., name=\"isolated Bus1\")\n isolated_bus2 = pp.create_bus(net, vn_kv=20., name=\"isolated Bus2\")\n isolated_pv_bus1 = pp.create_bus(net, vn_kv=20., name=\"isolated PV bus1\")\n isolated_pv_bus2 = pp.create_bus(net, vn_kv=20., name=\"isolated PV bus2\")\n pp.create_gen(net, isolated_pv_bus1, p_mw=0.3, vm_pu=1.0, name=\"isolated PV bus1\")\n pp.create_gen(net, isolated_pv_bus2, p_mw=0.05, vm_pu=1.0, name=\"isolated PV bus2\")\n\n pp.create_line(net, isolated_pv_bus1, isolated_bus1, length_km=1,\n std_type=\"N2XS(FL)2Y 1x300 RM/35 64/110 kV\",\n name=\"IsolatedLineToGen1\")\n pp.create_line(net, isolated_pv_bus2, isolated_bus2, length_km=1,\n std_type=\"N2XS(FL)2Y 1x300 RM/35 64/110 kV\",\n name=\"IsolatedLineToGen2\")\n pp.create_line(net, isolated_bus2, isolated_bus1, length_km=1,\n std_type=\"N2XS(FL)2Y 1x300 RM/35 64/110 kV\",\n name=\"IsolatedLine\")\n # ToDo with pytest.warns(UserWarning):\n iso_buses, iso_p, iso_q = get_isolated(net)\n\n\ndef test_isolated_in_service_bus_at_oos_line():\n net = pp.create_empty_network()\n b1, b2, l1 = add_grid_connection(net)\n b = pp.create_bus(net, vn_kv=135)\n l = pp.create_line(net, b2, b, 0.1, std_type=\"NAYY 4x150 SE\")\n net.line.loc[l, \"in_service\"] = False\n assert runpp_with_consistency_checks(net, init=\"flat\")\n\n\ndef test_isolated_in_service_line():\n # ToDo: Fix this\n net = pp.create_empty_network()\n _, b2, l1 = add_grid_connection(net)\n b = pp.create_bus(net, vn_kv=20.)\n pp.create_line(net, b2, b, 0.1, std_type=\"NAYY 4x150 SE\")\n net.line.loc[l1, \"in_service\"] = False\n assert runpp_with_consistency_checks(net, init=\"flat\")\n\n\ndef test_makeYbus():\n # tests if makeYbus fails for nets where every bus is connected to each other\n net = pp.create_empty_network()\n b1, b2, l1 = add_grid_connection(net)\n\n # number of buses to create\n n_bus = 20\n bus_list = []\n # generate buses and connect them\n for _ in range(n_bus):\n bus_list.append(pp.create_bus(net, vn_kv=20.))\n\n # connect the first bus to slack node\n create_test_line(net, bus_list[0], b2)\n # iterate over every bus and add connection to every other bus\n for bus_1 in bus_list:\n for bus_2 in bus_list:\n # add no connection to itself\n if bus_1 == bus_2:\n continue\n create_test_line(net, bus_1, bus_2)\n\n assert runpp_with_consistency_checks(net)\n\n\ndef test_test_sn_mva():\n test_net_gen1 = result_test_network_generator(sn_mva=1)\n test_net_gen2 = result_test_network_generator(sn_mva=2)\n for net1, net2 in zip(test_net_gen1, test_net_gen2):\n pp.runpp(net1)\n pp.runpp(net2)\n try:\n assert_net_equal(net1, net2)\n except:\n raise UserWarning(\"Result difference due to sn_mva after adding %s\" %\n net1.last_added_case)\n\n\ndef test_bsfw_algorithm():\n net = example_simple()\n\n pp.runpp(net)\n vm_nr = copy.copy(net.res_bus.vm_pu)\n va_nr = copy.copy(net.res_bus.va_degree)\n\n pp.runpp(net, algorithm='bfsw')\n vm_alg = net.res_bus.vm_pu\n va_alg = net.res_bus.va_degree\n\n assert np.allclose(vm_nr, vm_alg)\n assert np.allclose(va_nr, va_alg)\n\n\[email protected](reason=\"unknown\")\ndef test_bsfw_algorithm_multi_net():\n net = example_simple()\n add_grid_connection(net, vn_kv=110., zone=\"second\")\n\n pp.runpp(net)\n vm_nr = copy.copy(net.res_bus.vm_pu)\n va_nr = copy.copy(net.res_bus.va_degree)\n\n pp.runpp(net, algorithm='bfsw')\n vm_alg = net.res_bus.vm_pu\n va_alg = net.res_bus.va_degree\n\n assert np.allclose(vm_nr, vm_alg)\n assert np.allclose(va_nr, va_alg)\n\n\ndef test_bsfw_algorithm_with_trafo_shift_and_voltage_angles():\n net = example_simple()\n net[\"trafo\"].loc[:, \"shift_degree\"] = 180.\n\n pp.runpp(net, calculate_voltage_angles=True)\n vm_nr = net.res_bus.vm_pu\n va_nr = net.res_bus.va_degree\n\n pp.runpp(net, algorithm='bfsw', calculate_voltage_angles=True)\n vm_alg = net.res_bus.vm_pu\n va_alg = net.res_bus.va_degree\n assert np.allclose(vm_nr, vm_alg)\n assert np.allclose(va_nr, va_alg)\n\n\ndef test_bsfw_algorithm_with_enforce_q_lims():\n net = example_simple()\n net.ext_grid[\"max_q_mvar\"] = [0.1]\n net.ext_grid[\"min_q_mvar\"] = [-0.1]\n net.gen[\"max_q_mvar\"] = [5.]\n net.gen[\"min_q_mvar\"] = [4.]\n\n pp.runpp(net, enforce_q_lims=True)\n vm_nr = net.res_bus.vm_pu\n va_nr = net.res_bus.va_degree\n\n pp.runpp(net, algorithm='bfsw', enforce_q_lims=True)\n vm_alg = net.res_bus.vm_pu\n va_alg = net.res_bus.va_degree\n assert np.allclose(vm_nr, vm_alg)\n assert np.allclose(va_nr, va_alg)\n\n\ndef test_bsfw_algorithm_with_branch_loops():\n net = example_simple()\n pp.create_line(net, 0, 6, length_km=2.5,\n std_type=\"NA2XS2Y 1x240 RM/25 12/20 kV\", name=\"Line meshed\")\n net.switch.loc[:, \"closed\"] = True\n\n pp.runpp(net)\n vm_nr = net.res_bus.vm_pu\n va_nr = net.res_bus.va_degree\n\n pp.runpp(net, algorithm='bfsw')\n vm_alg = net.res_bus.vm_pu\n va_alg = net.res_bus.va_degree\n assert np.allclose(vm_nr, vm_alg)\n assert np.allclose(va_nr, va_alg)\n\n\[email protected]\ndef test_pypower_algorithms_iter():\n alg_to_test = ['fdbx', 'fdxb', 'gs']\n for alg in alg_to_test:\n for net in result_test_network_generator(skip_test_impedance=True):\n try:\n runpp_with_consistency_checks(net, enforce_q_lims=True, algorithm=alg)\n runpp_with_consistency_checks(net, enforce_q_lims=False, algorithm=alg)\n except (AssertionError):\n raise UserWarning(\"Consistency Error after adding %s\" % net.last_added_case)\n except(LoadflowNotConverged):\n raise UserWarning(\"Power flow did not converge after adding %s\" %\n net.last_added_case)\n\n\[email protected]\ndef test_zip_loads_gridcal():\n # Tests newton power flow considering zip loads against GridCal's pf result\n\n # Results used for benchmarking are obtained using GridCal with the following code:\n # from GridCal.grid.CalculationEngine import *\n #\n # np.set_printoptions(precision=4)\n # grid = MultiCircuit()\n #\n # # Add buses\n # bus1 = Bus('Bus 1', vnom=20)\n #\n # bus1.controlled_generators.append(ControlledGenerator('Slack Generator', voltage_module=1.0))\n # grid.add_bus(bus1)\n #\n # bus2 = Bus('Bus 2', vnom=20)\n # bus2.loads.append(Load('load 2',\n # power=0.2 * complex(40, 20),\n # impedance=1 / (0.40 * (40. - 20.j)),\n # current=np.conj(0.40 * (40. + 20.j)) / (20 * np.sqrt(3)),\n # ))\n # grid.add_bus(bus2)\n #\n # bus3 = Bus('Bus 3', vnom=20)\n # bus3.loads.append(Load('load 3', power=complex(25, 15)))\n # grid.add_bus(bus3)\n #\n # bus4 = Bus('Bus 4', vnom=20)\n # bus4.loads.append(Load('load 4', power=complex(40, 20)))\n # grid.add_bus(bus4)\n #\n # bus5 = Bus('Bus 5', vnom=20)\n # bus5.loads.append(Load('load 5', power=complex(50, 20)))\n # grid.add_bus(bus5)\n #\n # # add branches (Lines in this case)\n # grid.add_branch(Branch(bus1, bus2, 'line 1-2', r=0.05, x=0.11, b=0.02))\n #\n # grid.add_branch(Branch(bus1, bus3, 'line 1-3', r=0.05, x=0.11, b=0.02))\n #\n # grid.add_branch(Branch(bus1, bus5, 'line 1-5', r=0.03, x=0.08, b=0.02))\n #\n # grid.add_branch(Branch(bus2, bus3, 'line 2-3', r=0.04, x=0.09, b=0.02))\n #\n # grid.add_branch(Branch(bus2, bus5, 'line 2-5', r=0.04, x=0.09, b=0.02))\n #\n # grid.add_branch(Branch(bus3, bus4, 'line 3-4', r=0.06, x=0.13, b=0.03))\n #\n # grid.add_branch(Branch(bus4, bus5, 'line 4-5', r=0.04, x=0.09, b=0.02))\n #\n # grid.compile()\n #\n # print('Ybus:\\n', grid.circuits[0].power_flow_input.Ybus.todense())\n #\n # options = PowerFlowOptions(SolverType.NR, robust=False)\n # power_flow = PowerFlow(grid, options)\n # power_flow.run()\n #\n # print('\\n\\n', grid.name)\n # print('\\t|V|:', abs(grid.power_flow_results.voltage))\n # print('\\tVang:', np.rad2deg(np.angle(grid.power_flow_results.voltage)))\n\n vm_pu_gridcal = np.array([1., 0.9566486349, 0.9555640318, 0.9340468428, 0.9540542172])\n va_degree_gridcal = np.array([0., -2.3717973886, -2.345654238, -3.6303651197, -2.6713716569])\n\n Ybus_gridcal = np.array(\n [[10.9589041096 - 25.9973972603j, -3.4246575342 + 7.5342465753j,\n -3.4246575342 + 7.5342465753j,\n 0.0000000000 + 0.j, -4.1095890411 + 10.9589041096j],\n [-3.4246575342 + 7.5342465753j, 11.8320802147 - 26.1409476063j,\n -4.1237113402 + 9.2783505155j,\n 0.0000000000 + 0.j, -4.1237113402 + 9.2783505155j],\n [-3.4246575342 + 7.5342465753j, -4.1237113402 + 9.2783505155j,\n 10.4751981427 - 23.1190605054j,\n -2.9268292683 + 6.3414634146j, 0.0000000000 + 0.j],\n [0.0000000000 + 0.j, 0.0000000000 + 0.j, -2.9268292683 + 6.3414634146j,\n 7.0505406085 - 15.5948139301j,\n -4.1237113402 + 9.2783505155j],\n [-4.1095890411 + 10.9589041096j, -4.1237113402 + 9.2783505155j, 0.0000000000 + 0.j,\n -4.1237113402 + 9.2783505155j, 12.3570117215 - 29.4856051405j]])\n\n losses_gridcal = 4.69773448916 - 2.710430515j\n\n abs_path = os.path.join(pp.pp_dir, 'networks', 'power_system_test_case_jsons',\n 'case5_demo_gridcal.json')\n net = pp.from_json(abs_path)\n\n pp.runpp(net, voltage_depend_loads=True, recycle=None)\n\n # Test Ybus matrix\n Ybus_pp = net[\"_ppc\"]['internal']['Ybus'].todense()\n bus_ord = net[\"_pd2ppc_lookups\"][\"bus\"]\n Ybus_pp = Ybus_pp[bus_ord, :][:, bus_ord]\n\n assert np.allclose(Ybus_pp, Ybus_gridcal)\n\n # Test Results\n assert np.allclose(net.res_bus.vm_pu, vm_pu_gridcal)\n assert np.allclose(net.res_bus.va_degree, va_degree_gridcal)\n\n # Test losses\n losses_pp = net.res_bus.p_mw.sum() + 1.j * net.res_bus.q_mvar.sum()\n assert np.isclose(losses_gridcal, - losses_pp / 1.e3)\n\n # Test bfsw algorithm\n pp.runpp(net, voltage_depend_loads=True, algorithm='bfsw')\n assert np.allclose(net.res_bus.vm_pu, vm_pu_gridcal)\n assert np.allclose(net.res_bus.va_degree, va_degree_gridcal)\n\n\ndef test_zip_loads_consistency(**kwargs):\n net = four_loads_with_branches_out()\n net.load['const_i_percent'] = 40\n net.load['const_z_percent'] = 40\n assert runpp_with_consistency_checks(net, **kwargs)\n\n\ndef test_zip_loads_pf_algorithms():\n net = four_loads_with_branches_out()\n net.load['const_i_percent'] = 40\n net.load['const_z_percent'] = 40\n\n alg_to_test = ['bfsw']\n for alg in alg_to_test:\n pp.runpp(net, algorithm='nr')\n vm_nr = net.res_bus.vm_pu\n va_nr = net.res_bus.va_degree\n\n pp.runpp(net, algorithm=alg)\n vm_alg = net.res_bus.vm_pu\n va_alg = net.res_bus.va_degree\n\n assert np.allclose(vm_nr, vm_alg, rtol=1e-6)\n assert np.allclose(va_nr.values, va_alg.values, rtol=1e-4)\n\n\ndef test_zip_loads_with_voltage_angles():\n net = pp.create_empty_network()\n b1 = pp.create_bus(net, vn_kv=1.)\n b2 = pp.create_bus(net, vn_kv=1.)\n pp.create_ext_grid(net, b1)\n pp.create_line_from_parameters(net, b1, b2, length_km=1, r_ohm_per_km=0.3,\n x_ohm_per_km=0.3, c_nf_per_km=10, max_i_ka=1)\n pp.create_load(net, b2, p_mw=0.002, const_z_percent=0, const_i_percent=100)\n\n pp.set_user_pf_options(net, calculate_voltage_angles=True, init='dc')\n\n pp.runpp(net)\n\n res_load = net.res_load.copy()\n net.ext_grid.va_degree = 100\n\n pp.runpp(net)\n\n assert np.allclose(net.res_load.values, res_load.values)\n\n\ndef test_xward_buses():\n \"\"\"\n Issue: xward elements create dummy buses for the load flow, that are cleaned up afterwards.\n However, if the load flow does not converge, those buses end up staying in the net and don't get\n removed. This can potentially lead to thousands of dummy buses in net.\n \"\"\"\n net = pp.create_empty_network()\n bus_sl = pp.create_bus(net, 110, name='ExtGrid')\n pp.create_ext_grid(net, bus_sl, vm_pu=1)\n bus_x = pp.create_bus(net, 110, name='XWARD')\n pp.create_xward(net, bus_x, 0, 0, 0, 0, 0, 10, 1.1)\n iid = pp.create_impedance(net, bus_sl, bus_x, 0.2, 0.2, 1e3)\n\n bus_num1 = len(net.bus)\n\n pp.runpp(net)\n\n bus_num2 = len(net.bus)\n\n assert bus_num1 == bus_num2\n\n # now - make sure that the loadflow doesn't converge:\n net.impedance.at[iid, 'rft_pu'] = 1\n pp.create_load(net, bus_x, 1e6, 0)\n with pytest.raises(LoadflowNotConverged):\n # here the load flow doesn't converge and there is an extra bus in net\n pp.runpp(net)\n\n bus_num3 = len(net.bus)\n assert bus_num3 == bus_num1\n\n\ndef test_pvpq_lookup():\n net = pp.create_empty_network()\n\n b1 = pp.create_bus(net, vn_kv=0.4, index=4)\n b2 = pp.create_bus(net, vn_kv=0.4, index=2)\n b3 = pp.create_bus(net, vn_kv=0.4, index=3)\n\n pp.create_gen(net, b1, p_mw=0.01, vm_pu=0.4)\n pp.create_load(net, b2, p_mw=0.01)\n pp.create_ext_grid(net, b3)\n\n pp.create_line(net, from_bus=b1, to_bus=b2, length_km=0.5, std_type=\"NAYY 4x120 SE\")\n pp.create_line(net, from_bus=b1, to_bus=b3, length_km=0.5, std_type=\"NAYY 4x120 SE\")\n net_numba = copy.deepcopy(net)\n pp.runpp(net_numba, numba=True)\n pp.runpp(net, numba=False)\n\n assert nets_equal(net, net_numba)\n\n\ndef test_get_internal():\n net = example_simple()\n # for Newton raphson\n pp.runpp(net)\n J_intern = net._ppc[\"internal\"][\"J\"]\n\n ppc = net._ppc\n V_mag = ppc[\"bus\"][:, 7][:-2]\n V_ang = ppc[\"bus\"][:, 8][:-2]\n V = V_mag * np.exp(1j * V_ang / 180 * np.pi)\n\n # Get stored Ybus in ppc\n Ybus = ppc[\"internal\"][\"Ybus\"]\n\n _, ppci = _pd2ppc(net)\n baseMVA, bus, gen, branch, ref, pv, pq, _, _, V0, _ = _get_pf_variables_from_ppci(ppci)\n\n pvpq = np.r_[pv, pq]\n\n J = _create_J_without_numba(Ybus, V, pvpq, pq)\n\n assert sum(sum(abs(abs(J.toarray()) - abs(J_intern.toarray())))) < 0.05\n # get J for all other algorithms\n\n\ndef test_storage_pf():\n net = pp.create_empty_network()\n\n b1 = pp.create_bus(net, vn_kv=0.4)\n b2 = pp.create_bus(net, vn_kv=0.4)\n\n pp.create_line(net, b1, b2, length_km=5, std_type=\"NAYY 4x50 SE\")\n\n pp.create_ext_grid(net, b2)\n pp.create_load(net, b1, p_mw=0.010)\n pp.create_sgen(net, b1, p_mw=0.010)\n\n # test generator behaviour\n pp.create_storage(net, b1, p_mw=-0.010, max_e_mwh=0.010)\n pp.create_sgen(net, b1, p_mw=0.010, in_service=False)\n\n res_gen_beh = runpp_with_consistency_checks(net)\n res_ll_stor = net[\"res_line\"].loading_percent.iloc[0]\n\n net[\"storage\"].in_service.iloc[0] = False\n net[\"sgen\"].in_service.iloc[1] = True\n\n runpp_with_consistency_checks(net)\n res_ll_sgen = net[\"res_line\"].loading_percent.iloc[0]\n\n assert np.isclose(res_ll_stor, res_ll_sgen)\n\n # test load behaviour\n pp.create_load(net, b1, p_mw=0.01, in_service=False)\n net[\"storage\"].in_service.iloc[0] = True\n net[\"storage\"].p_mw.iloc[0] = 0.01\n net[\"sgen\"].in_service.iloc[1] = False\n\n res_load_beh = runpp_with_consistency_checks(net)\n res_ll_stor = net[\"res_line\"].loading_percent.iloc[0]\n\n net[\"storage\"].in_service.iloc[0] = False\n net[\"load\"].in_service.iloc[1] = True\n\n runpp_with_consistency_checks(net)\n res_ll_load = net[\"res_line\"].loading_percent.iloc[0]\n\n assert np.isclose(res_ll_stor, res_ll_load)\n\n assert res_gen_beh and res_load_beh\n\n\ndef test_add_element_and_init_results():\n net = simple_four_bus_system()\n pp.runpp(net, init=\"flat\")\n pp.create_bus(net, vn_kv=20.)\n pp.create_line(net, from_bus=2, to_bus=3, length_km=1, name=\"new line\" + str(1),\n std_type=\"NAYY 4x150 SE\")\n pp.runpp(net, init=\"results\")\n\n\ndef test_pp_initialization():\n net = pp.create_empty_network()\n\n b1 = pp.create_bus(net, vn_kv=0.4)\n b2 = pp.create_bus(net, vn_kv=0.4)\n\n pp.create_ext_grid(net, b1, vm_pu=0.7)\n pp.create_line(net, b1, b2, 0.5, std_type=\"NAYY 4x50 SE\", index=4)\n pp.create_load(net, b2, p_mw=0.01)\n\n pp.runpp(net, init_va_degree=\"flat\", init_vm_pu=1.02)\n assert net._ppc[\"iterations\"] == 5\n\n pp.runpp(net, init_va_degree=\"dc\", init_vm_pu=0.8)\n assert net._ppc[\"iterations\"] == 4\n\n pp.runpp(net, init_va_degree=\"flat\", init_vm_pu=np.array([0.75, 0.7]))\n assert net._ppc[\"iterations\"] == 3\n\n pp.runpp(net, init_va_degree=\"dc\", init_vm_pu=[0.75, 0.7])\n assert net._ppc[\"iterations\"] == 3\n\n pp.runpp(net, init_va_degree=\"flat\", init_vm_pu=\"auto\")\n assert net._ppc[\"iterations\"] == 3\n\n pp.runpp(net, init_va_degree=\"dc\")\n assert net._ppc[\"iterations\"] == 3\n\n\ndef test_equal_indices_res():\n # tests if res_bus indices of are the same as the ones in bus.\n # If this is not the case and you init from results, the PF will fail\n net = pp.create_empty_network()\n\n b1 = pp.create_bus(net, vn_kv=10., index=3)\n b2 = pp.create_bus(net, vn_kv=0.4, index=1)\n b3 = pp.create_bus(net, vn_kv=0.4, index=2)\n\n pp.create_ext_grid(net, b1)\n pp.create_transformer(net, b1, b2, std_type=\"0.63 MVA 20/0.4 kV\")\n pp.create_line(net, b2, b3, 0.5, std_type=\"NAYY 4x50 SE\", index=4)\n pp.create_load(net, b3, p_mw=0.010)\n pp.runpp(net)\n net[\"bus\"] = net[\"bus\"].sort_index()\n try:\n pp.runpp(net, init_vm_pu=\"results\", init_va_degree=\"results\")\n assert True\n except LoadflowNotConverged:\n assert False\n\n\ndef test_ext_grid_and_gen_at_one_bus(**kwargs):\n net = pp.create_empty_network()\n b1 = pp.create_bus(net, vn_kv=110)\n b2 = pp.create_bus(net, vn_kv=110)\n pp.create_ext_grid(net, b1, vm_pu=1.01)\n pp.create_line(net, b1, b2, 1., std_type=\"305-AL1/39-ST1A 110.0\")\n pp.create_load(net, bus=b2, p_mw=3.5, q_mvar=1)\n\n runpp_with_consistency_checks(net, **kwargs)\n q = net.res_ext_grid.q_mvar.sum()\n\n ##create two gens at the slack bus\n g1 = pp.create_gen(net, b1, vm_pu=1.01, p_mw=1)\n g2 = pp.create_gen(net, b1, vm_pu=1.01, p_mw=1)\n runpp_with_consistency_checks(net, **kwargs)\n\n # all the reactive power previously provided by the ext_grid is now provided by the generators\n assert np.isclose(net.res_ext_grid.q_mvar.values, 0)\n assert np.isclose(net.res_gen.q_mvar.sum(), q)\n # since no Q-limits were set, reactive power is distributed equally to both generators\n assert np.isclose(net.res_gen.q_mvar.at[g1], net.res_gen.q_mvar.at[g2])\n\n # set reactive power limits at the generators\n net.gen[\"max_q_mvar\"] = [0.1, 0.01]\n net.gen[\"min_q_mvar\"] = [-0.1, -0.01]\n runpp_with_consistency_checks(net, **kwargs)\n # g1 now has 10 times the reactive power of g2 in accordance with the different Q ranges\n assert np.isclose(net.res_gen.q_mvar.at[g1], net.res_gen.q_mvar.at[g2] * 10)\n # all the reactive power is still provided by the generators, because Q-lims are not enforced\n assert np.allclose(net.res_ext_grid.q_mvar.values, [0])\n assert np.isclose(net.res_gen.q_mvar.sum(), q)\n\n # now enforce Q-lims\n runpp_with_consistency_checks(net, enforce_q_lims=True, **kwargs)\n # both generators are at there lower limit with regard to the reactive power\n assert np.allclose(net.res_gen.q_mvar.values, net.gen.max_q_mvar.values)\n # the total reactive power remains unchanged, but the rest of the power is now provided by the ext_grid\n assert np.isclose(net.res_gen.q_mvar.sum() + net.res_ext_grid.q_mvar.sum(), q)\n\n # second ext_grid at the slack bus\n pp.create_ext_grid(net, b1, vm_pu=1.01)\n runpp_with_consistency_checks(net, enforce_q_lims=False, **kwargs)\n # gens still have the correct active power\n assert np.allclose(net.gen.p_mw.values, net.res_gen.p_mw.values)\n # slack active power is evenly distributed to both ext_grids\n assert np.isclose(net.res_ext_grid.p_mw.values[0], net.res_ext_grid.p_mw.values[1])\n\n # q limits at the ext_grids are not enforced\n net.ext_grid[\"max_q_mvar\"] = [0.1, 0.01]\n net.ext_grid[\"min_q_mvar\"] = [-0.1, -0.01]\n runpp_with_consistency_checks(net, enforce_q_lims=True, **kwargs)\n assert net.res_ext_grid.q_mvar.values[0] > net.ext_grid.max_q_mvar.values[0]\n assert np.allclose(net.res_gen.q_mvar.values, net.gen.max_q_mvar.values)\n\n\ndef two_ext_grids_at_one_bus():\n net = pp.create_empty_network()\n b1 = pp.create_bus(net, vn_kv=110, index=3)\n b2 = pp.create_bus(net, vn_kv=110, index=5)\n pp.create_ext_grid(net, b1, vm_pu=1.01, index=2)\n pp.create_line(net, b1, b2, 1., std_type=\"305-AL1/39-ST1A 110.0\")\n pp.create_load(net, bus=b2, p_mw=3.5, q_mvar=1)\n pp.create_gen(net, b1, vm_pu=1.01, p_mw=1)\n runpp_with_consistency_checks(net)\n assert net.converged\n\n # connect second ext_grid to b1 with different angle but out of service\n eg2 = pp.create_ext_grid(net, b1, vm_pu=1.01, va_degree=20, index=5, in_service=False)\n runpp_with_consistency_checks(net) # power flow still converges since eg2 is out of service\n assert net.converged\n\n # error is raised after eg2 is set in service\n net.ext_grid.in_service.at[eg2] = True\n with pytest.raises(UserWarning):\n pp.runpp(net)\n\n # error is also raised when eg2 is connected to first ext_grid through bus-bus switch\n b3 = pp.create_bus(net, vn_kv=110)\n pp.create_switch(net, b1, b3, et=\"b\")\n net.ext_grid.bus.at[eg2] = b3\n with pytest.raises(UserWarning):\n pp.runpp(net)\n\n # no error is raised when voltage angles are not calculated\n runpp_with_consistency_checks(net, calculate_voltage_angles=False)\n assert net.converged\n\n # same angle but different voltage magnitude also raises an error\n net.ext_grid.vm_pu.at[eg2] = 1.02\n net.ext_grid.va_degree.at[eg2] = 0\n with pytest.raises(UserWarning):\n pp.runpp(net)\n\n\ndef test_dc_with_ext_grid_at_one_bus():\n net = pp.create_empty_network()\n b1 = pp.create_bus(net, vn_kv=110)\n b2 = pp.create_bus(net, vn_kv=110)\n\n pp.create_ext_grid(net, b1, vm_pu=1.01)\n pp.create_ext_grid(net, b2, vm_pu=1.01)\n\n pp.create_dcline(net, from_bus=b1, to_bus=b2, p_mw=10,\n loss_percent=0, loss_mw=0, vm_from_pu=1.01, vm_to_pu=1.01)\n\n pp.create_sgen(net, b1, p_mw=10)\n pp.create_load(net, b2, p_mw=10)\n\n runpp_with_consistency_checks(net)\n assert np.allclose(net.res_ext_grid.p_mw.values, [0, 0])\n\n\ndef test_init_results_without_results():\n # should switch to \"auto\" mode and not fail\n net = example_multivoltage()\n pp.reset_results(net)\n pp.runpp(net, init=\"results\")\n assert net.converged\n pp.reset_results(net)\n pp.runpp(net, init_vm_pu=\"results\")\n assert net.converged\n pp.reset_results(net)\n pp.runpp(net, init_va_degree=\"results\")\n assert net.converged\n pp.reset_results(net)\n pp.runpp(net, init_va_degree=\"results\", init_vm_pu=\"results\")\n assert net.converged\n\n\ndef test_init_results():\n net = pp.create_empty_network()\n add_test_line(net) # line network with switch at to bus\n assert_init_results(net)\n net.switch.at[0, \"bus\"] = 0 # switch at from bus\n assert_init_results(net)\n\n add_test_trafo(net) # trafo network with switch at lv bus\n assert_init_results(net)\n net.switch.at[0, \"bus\"] = 7 # switch at hv bus\n assert_init_results(net)\n\n add_test_xward(net) # xward with internal node\n assert_init_results(net)\n add_test_trafo3w(net) # trafo3w with internal node\n assert_init_results(net)\n t3idx = net.trafo3w.index[0]\n t3_switch = pp.create_switch(net, bus=net.trafo3w.hv_bus.at[t3idx],\n element=t3idx, et=\"t3\", closed=False) # trafo3w switch at hv side\n assert_init_results(net)\n net.switch.bus.at[t3_switch] = net.trafo3w.mv_bus.at[t3idx] # trafo3w switch at mv side\n assert_init_results(net)\n net.switch.bus.at[t3_switch] = net.trafo3w.lv_bus.at[t3idx] # trafo3w switch at lv side\n assert_init_results(net)\n\n\ndef assert_init_results(net):\n pp.runpp(net, init=\"auto\")\n assert net._ppc[\"iterations\"] > 0\n pp.runpp(net, init=\"results\")\n assert net._ppc[\"iterations\"] == 0\n\n\ndef test_wye_delta():\n from pandapower.pypower.idx_brch import BR_R, BR_X, BR_B\n net = pp.create_empty_network()\n pp.create_bus(net, vn_kv=110)\n pp.create_buses(net, nr_buses=4, vn_kv=20)\n trafo = pp.create_transformer(net, hv_bus=0, lv_bus=1, std_type='25 MVA 110/20 kV')\n pp.create_line(net, 1, 2, length_km=2.0, std_type=\"NAYY 4x50 SE\")\n pp.create_line(net, 2, 3, length_km=6.0, std_type=\"NAYY 4x50 SE\")\n pp.create_line(net, 3, 4, length_km=10.0, std_type=\"NAYY 4x50 SE\")\n pp.create_ext_grid(net, 0)\n pp.create_load(net, 4, p_mw=0.1)\n pp.create_sgen(net, 2, p_mw=4.)\n pp.create_sgen(net, 3, p_mw=4.)\n\n pp.runpp(net, trafo_model=\"pi\")\n f, t = net._pd2ppc_lookups[\"branch\"][\"trafo\"]\n assert np.isclose(net.res_trafo.p_hv_mw.at[trafo], -7.560996, rtol=1e-7)\n assert np.allclose(net._ppc[\"branch\"][f:t, [BR_R, BR_X, BR_B]].flatten(),\n np.array([0.0001640 + 0.j, 0.0047972 + 0.j, -0.0105000 - 0.014j]),\n rtol=1e-7)\n\n pp.runpp(net, trafo_model=\"t\")\n assert np.allclose(net._ppc[\"branch\"][f:t, [BR_R, BR_X, BR_B]].flatten(),\n np.array([0.00016392 + 0.j, 0.00479726 + 0.j, -0.01050009 - 0.01399964j]))\n assert np.isclose(net.res_trafo.p_hv_mw.at[trafo], -7.561001, rtol=1e-7)\n\n\ndef test_line_temperature():\n net = four_loads_with_branches_out()\n r_init = net.line.r_ohm_per_km.values.copy()\n\n # r_ohm_per_km is not in line results by default\n pp.runpp(net)\n v_init = net.res_bus.vm_pu.values.copy()\n va_init = net.res_bus.va_degree.values.copy()\n assert \"r_ohm_per_km\" not in net.res_line.columns\n\n # no temperature adjustment performed if not explicitly set in options/arguments to runpp\n net.line[\"temperature_degree_celsius\"] = 20\n pp.runpp(net)\n assert \"r_ohm_per_km\" not in net.res_line.columns\n assert np.allclose(net.res_bus.vm_pu, v_init, rtol=0, atol=1e-16)\n assert np.allclose(net.res_bus.va_degree, va_init, rtol=0, atol=1e-16)\n\n # argument in runpp is considered\n pp.runpp(net, consider_line_temperature=True)\n assert \"r_ohm_per_km\" in net.res_line.columns\n assert np.allclose(net.res_line.r_ohm_per_km, r_init, rtol=0, atol=1e-16)\n assert np.allclose(net.res_bus.vm_pu, v_init, rtol=0, atol=1e-16)\n assert np.allclose(net.res_bus.va_degree, va_init, rtol=0, atol=1e-16)\n\n # check results of r adjustment, check that user_pf_options works, alpha is 4e-3 by default\n t = np.arange(0, 80, 10)\n net.line.temperature_degree_celsius = t\n pp.set_user_pf_options(net, consider_line_temperature=True)\n pp.runpp(net)\n alpha = 4e-3\n r_temp = r_init * (1 + alpha * (t - 20))\n assert np.allclose(net.res_line.r_ohm_per_km, r_temp, rtol=0, atol=1e-16)\n assert not np.allclose(net.res_bus.vm_pu, v_init, rtol=0, atol=1e-4)\n assert not np.allclose(net.res_bus.va_degree, va_init, rtol=0, atol=1e-2)\n\n # check reults with user-defined alpha\n alpha = np.arange(3e-3, 5e-3, 2.5e-4)\n net.line['alpha'] = alpha\n pp.runpp(net)\n r_temp = r_init * (1 + alpha * (t - 20))\n assert np.allclose(net.res_line.r_ohm_per_km, r_temp, rtol=0, atol=1e-16)\n assert not np.allclose(net.res_bus.vm_pu, v_init, rtol=0, atol=1e-4)\n assert not np.allclose(net.res_bus.va_degree, va_init, rtol=0, atol=1e-2)\n\n # not anymore in net if not considered\n pp.set_user_pf_options(net, overwrite=True)\n pp.runpp(net)\n assert np.allclose(net.res_bus.vm_pu, v_init, rtol=0, atol=1e-16)\n assert np.allclose(net.res_bus.va_degree, va_init, rtol=0, atol=1e-16)\n assert \"r_ohm_per_km\" not in net.res_line.columns\n\n\ndef test_results_for_line_temperature():\n net = pp.create_empty_network()\n pp.create_bus(net, 0.4)\n pp.create_buses(net, 2, 0.4)\n\n pp.create_ext_grid(net, 0)\n pp.create_load(net, 1, 5e-3, 10e-3)\n pp.create_load(net, 2, 5e-3, 10e-3)\n\n pp.create_line_from_parameters(net, 0, 1, 0.5, 0.642, 0.083, 210, 1, alpha=0.00403)\n pp.create_line_from_parameters(net, 1, 2, 0.5, 0.642, 0.083, 210, 1, alpha=0.00403)\n\n vm_res_20 = [1, 0.9727288676, 0.95937328755]\n va_res_20 = [0, 2.2103403814, 3.3622612327]\n vm_res_80 = [1, 0.96677572771, 0.95062498477]\n va_res_80 = [0, 2.7993156134, 4.2714451629]\n\n pp.runpp(net)\n\n assert np.allclose(net.res_bus.vm_pu, vm_res_20, rtol=0, atol=1e-6)\n assert np.allclose(net.res_bus.va_degree, va_res_20, rtol=0, atol=1e-6)\n\n net.line[\"temperature_degree_celsius\"] = 80\n pp.set_user_pf_options(net, consider_line_temperature=True)\n pp.runpp(net)\n\n assert np.allclose(net.res_bus.vm_pu, vm_res_80, rtol=0, atol=1e-6)\n assert np.allclose(net.res_bus.va_degree, va_res_80, rtol=0, atol=1e-6)\n\n\[email protected](not lightsim2grid_available, reason=\"lightsim2grid is not installed\")\ndef test_lightsim2grid():\n # test several nets\n for net in result_test_network_generator():\n try:\n runpp_with_consistency_checks(net, lightsim2grid=True)\n except (AssertionError):\n raise UserWarning(\"Consistency Error after adding %s\" % net.last_added_case)\n except(LoadflowNotConverged):\n raise UserWarning(\"Power flow did not converge after adding %s\" % net.last_added_case)\n\n\[email protected](not lightsim2grid_available, reason=\"lightsim2grid is not installed\")\ndef test_lightsim2grid_zip():\n test_zip_loads_consistency(lightsim2grid=True)\n\n\[email protected](not lightsim2grid_available, reason=\"lightsim2grid is not installed\")\ndef test_lightsim2grid_qlims():\n test_minimal_net(lightsim2grid=True, enforce_q_lims=True)\n\n\[email protected](not lightsim2grid_available, reason=\"lightsim2grid is not installed\")\ndef test_lightsim2grid_extgrid():\n test_ext_grid_and_gen_at_one_bus(lightsim2grid=True)\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__])\n# test_minimal_net()\n# net = pp.create_empty_network()\n# b = pp.create_bus(net, 110)\n# pp.create_ext_grid(net, b)\n# runpp_with_consistency_checks(net)\n#\n# pp.create_load(net, b, p_mw=0.1)\n# runpp_with_consistency_checks(net)\n#\n# b2 = pp.create_bus(net, 110)\n# pp.create_switch(net, b, b2, 'b')\n# pp.create_sgen(net, b2, p_mw=0.2)\n# runpp_with_consistency_checks(net)\n" ]
[ [ "pandas.isnull", "numpy.array", "numpy.isclose", "numpy.isnan", "numpy.exp", "numpy.allclose", "numpy.arange", "numpy.sqrt", "numpy.all" ] ]
awslabs/improving-forecast-accuracy-with-machine-learning
[ "020e9c1c9ca6ea6c0f5df9a502119dbd11a0c328" ]
[ "source/tests/shared/test_dataset_file.py" ]
[ "# #####################################################################################################################\n# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file except in compliance #\n# with the License. You may obtain a copy of the License at #\n# #\n# http://www.apache.org/licenses/LICENSE-2.0 #\n# #\n# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #\n# the specific language governing permissions and limitations under the License. #\n# #####################################################################################################################\n\nimport datetime\nfrom hashlib import md5\n\nimport boto3\nimport numpy as np\nimport pytest\nfrom dateutil.tz import tzutc\nfrom freezegun import freeze_time\nfrom moto import mock_s3\n\nfrom shared.Dataset.dataset_file import DatasetFile\nfrom shared.Dataset.dataset_type import DatasetType\n\n\[email protected]\ndef dataset_target():\n return \"some/s3/path/train/some_filename.csv\"\n\n\[email protected]\ndef dataset_related():\n return \"some/s3/path/train/some_filename.related.csv\"\n\n\[email protected]\ndef dataset_metadata():\n return \"some/s3/path/train/some_filename.metadata.csv\"\n\n\[email protected]\ndef bucket():\n return \"somebucket\"\n\n\[email protected]\ndef dataset_file(dataset_target, bucket):\n dsf = DatasetFile(dataset_target, bucket)\n\n with mock_s3():\n cli = boto3.client(\"s3\")\n cli.create_bucket(Bucket=bucket)\n\n yield dsf\n\n\ndef test_target_timeseries_file(dataset_target, bucket):\n assert (\n DatasetFile(dataset_target, bucket).data_type == DatasetType.TARGET_TIME_SERIES\n )\n\n\ndef test_related_timeseries_file(dataset_related, bucket):\n assert (\n DatasetFile(dataset_related, bucket).data_type\n == DatasetType.RELATED_TIME_SERIES\n )\n\n\ndef test_metadata_file(dataset_metadata, bucket):\n assert DatasetFile(dataset_metadata, bucket).data_type == DatasetType.ITEM_METADATA\n\n\ndef test_dataset_name(dataset_target, bucket):\n dsf = DatasetFile(dataset_target, bucket)\n assert dsf.name == \"some_filename\"\n assert dsf.data_type == DatasetType.TARGET_TIME_SERIES\n\n\ndef test_dataset_name_metadata(dataset_metadata, bucket):\n dsf = DatasetFile(dataset_metadata, bucket)\n assert dsf.prefix == \"some_filename\"\n assert dsf.name == \"some_filename_metadata\"\n assert dsf.data_type == DatasetType.ITEM_METADATA\n\n\ndef test_dataset_name_related(dataset_related, bucket):\n dsf = DatasetFile(dataset_related, bucket)\n assert dsf.name == \"some_filename_related\"\n assert dsf.prefix == \"some_filename\"\n assert dsf.data_type == DatasetType.RELATED_TIME_SERIES\n\n\[email protected](\n \"path,bucket,key\",\n [\n (\"s3://some_bucket/some_key\", \"some_bucket\", \"some_key\"),\n (\"s3://some_bucket/\", \"some_bucket\", \"\"),\n (\"s3://some_bucket/some_key?query=string\", \"some_bucket\", \"some_key\"),\n ],\n)\ndef test_dataset_file_from_s3_path(path, bucket, key):\n dsf = DatasetFile.from_s3_path(s3_path=path)\n assert dsf.bucket == bucket\n assert dsf.key == key\n\n\[email protected](\n \"path\",\n [\n (\"s3://some_bucket/some_key\"),\n (\"s3://some_bucket/\"),\n (\"s3://some_bucket/some_key?query=string\"),\n ],\n)\ndef test_dataset_file_from_s3_path(path):\n dsf = DatasetFile.from_s3_path(s3_path=path)\n assert dsf.s3_url == path.split(\"?\")[0]\n\n\[email protected](\n \"path\",\n [\n (\"s3://some_bucket/some_key\"),\n (\"s3://some_bucket/\"),\n (\"s3://some_bucket/some_key?query=string\"),\n ],\n)\ndef test_dataset_file_from_s3_path(path):\n dsf = DatasetFile.from_s3_path(s3_path=path)\n assert dsf.s3_prefix == \"s3://some_bucket/\"\n\n\ndef test_s3_file_hash(dataset_target, bucket, dataset_file):\n # create some random data to be checksummed\n MB = 1024 * 1024\n random_data = np.random.bytes(25 * MB)\n hexdigest = md5(random_data).hexdigest()\n\n # write the data\n cli = boto3.client(\"s3\")\n cli.put_object(Bucket=bucket, Key=dataset_target, Body=random_data)\n\n assert dataset_file.etag == hexdigest\n\n\ndef test_last_updated(dataset_target, bucket, dataset_file):\n cli = boto3.client(\"s3\")\n now = datetime.datetime.now(tzutc()).replace(microsecond=0)\n\n dataset_file.cli = cli\n with freeze_time(now):\n cli.put_object(Bucket=bucket, Key=dataset_target, Body=\"SOME_DATA\")\n assert dataset_file.last_updated == now\n" ]
[ [ "numpy.random.bytes" ] ]
sourcery-ai-bot/detectron2
[ "fd0c5c59afbdc43f7005fb1a8c0c39ac5dc44039" ]
[ "detectron2/modeling/meta_arch/panoptic_fpn.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport logging\nfrom typing import Dict, List\nimport torch\nfrom torch import nn\n\nfrom detectron2.config import configurable\nfrom detectron2.structures import ImageList\n\nfrom ..postprocessing import detector_postprocess, sem_seg_postprocess\nfrom .build import META_ARCH_REGISTRY\nfrom .rcnn import GeneralizedRCNN\nfrom .semantic_seg import build_sem_seg_head\n\n__all__ = [\"PanopticFPN\"]\n\n\n@META_ARCH_REGISTRY.register()\nclass PanopticFPN(GeneralizedRCNN):\n \"\"\"\n Implement the paper :paper:`PanopticFPN`.\n \"\"\"\n\n @configurable\n def __init__(\n self,\n *,\n sem_seg_head: nn.Module,\n combine_overlap_thresh: float = 0.5,\n combine_stuff_area_thresh: float = 4096,\n combine_instances_score_thresh: float = 0.5,\n **kwargs,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n sem_seg_head: a module for the semantic segmentation head.\n combine_overlap_thresh: combine masks into one instances if\n they have enough overlap\n combine_stuff_area_thresh: ignore stuff areas smaller than this threshold\n combine_instances_score_thresh: ignore instances whose score is\n smaller than this threshold\n\n Other arguments are the same as :class:`GeneralizedRCNN`.\n \"\"\"\n super().__init__(**kwargs)\n self.sem_seg_head = sem_seg_head\n # options when combining instance & semantic outputs\n self.combine_overlap_thresh = combine_overlap_thresh\n self.combine_stuff_area_thresh = combine_stuff_area_thresh\n self.combine_instances_score_thresh = combine_instances_score_thresh\n\n @classmethod\n def from_config(cls, cfg):\n ret = super().from_config(cfg)\n ret.update(\n {\n \"combine_overlap_thresh\": cfg.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH,\n \"combine_stuff_area_thresh\": cfg.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT,\n \"combine_instances_score_thresh\": cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH, # noqa\n }\n )\n ret[\"sem_seg_head\"] = build_sem_seg_head(cfg, ret[\"backbone\"].output_shape())\n logger = logging.getLogger(__name__)\n if not cfg.MODEL.PANOPTIC_FPN.COMBINE.ENABLED:\n logger.warning(\n \"PANOPTIC_FPN.COMBINED.ENABLED is no longer used. \"\n \" model.inference(do_postprocess=) should be used to toggle postprocessing.\"\n )\n if cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT != 1.0:\n w = cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT\n logger.warning(\n \"PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT should be replaced by weights on each ROI head.\"\n )\n\n def update_weight(x):\n return {k: v * w for k, v in x.items()} if isinstance(x, dict) else x * w\n\n roi_heads = ret[\"roi_heads\"]\n roi_heads.box_predictor.loss_weight = update_weight(roi_heads.box_predictor.loss_weight)\n roi_heads.mask_head.loss_weight = update_weight(roi_heads.mask_head.loss_weight)\n return ret\n\n def forward(self, batched_inputs):\n \"\"\"\n Args:\n batched_inputs: a list, batched outputs of :class:`DatasetMapper`.\n Each item in the list contains the inputs for one image.\n\n For now, each item in the list is a dict that contains:\n\n * \"image\": Tensor, image in (C, H, W) format.\n * \"instances\": Instances\n * \"sem_seg\": semantic segmentation ground truth.\n * Other information that's included in the original dicts, such as:\n \"height\", \"width\" (int): the output resolution of the model, used in inference.\n See :meth:`postprocess` for details.\n\n Returns:\n list[dict]:\n each dict has the results for one image. The dict contains the following keys:\n\n * \"instances\": see :meth:`GeneralizedRCNN.forward` for its format.\n * \"sem_seg\": see :meth:`SemanticSegmentor.forward` for its format.\n * \"panoptic_seg\": See the return value of\n :func:`combine_semantic_and_instance_outputs` for its format.\n \"\"\"\n if not self.training:\n return self.inference(batched_inputs)\n images = self.preprocess_image(batched_inputs)\n features = self.backbone(images.tensor)\n\n assert \"sem_seg\" in batched_inputs[0]\n gt_sem_seg = [x[\"sem_seg\"].to(self.device) for x in batched_inputs]\n gt_sem_seg = ImageList.from_tensors(\n gt_sem_seg, self.backbone.size_divisibility, self.sem_seg_head.ignore_value\n ).tensor\n sem_seg_results, sem_seg_losses = self.sem_seg_head(features, gt_sem_seg)\n\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)\n detector_results, detector_losses = self.roi_heads(\n images, features, proposals, gt_instances\n )\n\n losses = sem_seg_losses\n losses.update(proposal_losses)\n losses.update(detector_losses)\n return losses\n\n def inference(self, batched_inputs: List[Dict[str, torch.Tensor]], do_postprocess: bool = True):\n \"\"\"\n Run inference on the given inputs.\n\n Args:\n batched_inputs (list[dict]): same as in :meth:`forward`\n do_postprocess (bool): whether to apply post-processing on the outputs.\n\n Returns:\n When do_postprocess=True, see docs in :meth:`forward`.\n Otherwise, returns a (list[Instances], list[Tensor]) that contains\n the raw detector outputs, and raw semantic segmentation outputs.\n \"\"\"\n images = self.preprocess_image(batched_inputs)\n features = self.backbone(images.tensor)\n sem_seg_results, sem_seg_losses = self.sem_seg_head(features, None)\n proposals, _ = self.proposal_generator(images, features, None)\n detector_results, _ = self.roi_heads(images, features, proposals, None)\n\n if not do_postprocess:\n return detector_results, sem_seg_results\n processed_results = []\n for sem_seg_result, detector_result, input_per_image, image_size in zip(\n sem_seg_results, detector_results, batched_inputs, images.image_sizes\n ):\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n sem_seg_r = sem_seg_postprocess(sem_seg_result, image_size, height, width)\n detector_r = detector_postprocess(detector_result, height, width)\n\n processed_results.append({\"sem_seg\": sem_seg_r, \"instances\": detector_r})\n\n panoptic_r = combine_semantic_and_instance_outputs(\n detector_r,\n sem_seg_r.argmax(dim=0),\n self.combine_overlap_thresh,\n self.combine_stuff_area_thresh,\n self.combine_instances_score_thresh,\n )\n processed_results[-1][\"panoptic_seg\"] = panoptic_r\n return processed_results\n\n\ndef combine_semantic_and_instance_outputs(\n instance_results,\n semantic_results,\n overlap_threshold,\n stuff_area_thresh,\n instances_score_thresh,\n):\n \"\"\"\n Implement a simple combining logic following\n \"combine_semantic_and_instance_predictions.py\" in panopticapi\n to produce panoptic segmentation outputs.\n\n Args:\n instance_results: output of :func:`detector_postprocess`.\n semantic_results: an (H, W) tensor, each element is the contiguous semantic\n category id\n\n Returns:\n panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.\n segments_info (list[dict]): Describe each segment in `panoptic_seg`.\n Each dict contains keys \"id\", \"category_id\", \"isthing\".\n \"\"\"\n panoptic_seg = torch.zeros_like(semantic_results, dtype=torch.int32)\n\n # sort instance outputs by scores\n sorted_inds = torch.argsort(-instance_results.scores)\n\n current_segment_id = 0\n segments_info = []\n\n instance_masks = instance_results.pred_masks.to(dtype=torch.bool, device=panoptic_seg.device)\n\n # Add instances one-by-one, check for overlaps with existing ones\n for inst_id in sorted_inds:\n score = instance_results.scores[inst_id].item()\n if score < instances_score_thresh:\n break\n mask = instance_masks[inst_id] # H,W\n mask_area = mask.sum().item()\n\n if mask_area == 0:\n continue\n\n intersect = (mask > 0) & (panoptic_seg > 0)\n intersect_area = intersect.sum().item()\n\n if intersect_area * 1.0 / mask_area > overlap_threshold:\n continue\n\n if intersect_area > 0:\n mask = mask & (panoptic_seg == 0)\n\n current_segment_id += 1\n panoptic_seg[mask] = current_segment_id\n segments_info.append(\n {\n \"id\": current_segment_id,\n \"isthing\": True,\n \"score\": score,\n \"category_id\": instance_results.pred_classes[inst_id].item(),\n \"instance_id\": inst_id.item(),\n }\n )\n\n # Add semantic results to remaining empty areas\n semantic_labels = torch.unique(semantic_results).cpu().tolist()\n for semantic_label in semantic_labels:\n if semantic_label == 0: # 0 is a special \"thing\" class\n continue\n mask = (semantic_results == semantic_label) & (panoptic_seg == 0)\n mask_area = mask.sum().item()\n if mask_area < stuff_area_thresh:\n continue\n\n current_segment_id += 1\n panoptic_seg[mask] = current_segment_id\n segments_info.append(\n {\n \"id\": current_segment_id,\n \"isthing\": False,\n \"category_id\": semantic_label,\n \"area\": mask_area,\n }\n )\n\n return panoptic_seg, segments_info\n" ]
[ [ "torch.zeros_like", "torch.argsort", "torch.unique" ] ]
ZhangYikaii/PRML
[ "cf4fd9539af08de739110673afbf450963b6e931" ]
[ "prml/preprocess/polynomial.py" ]
[ "import itertools\nimport functools\nimport numpy as np\n\n\nclass PolynomialFeature(object):\n \"\"\"\n polynomial features\n\n transforms input array with polynomial features\n\n Example\n =======\n x =\n [[a, b],\n [c, d]]\n\n y = PolynomialFeatures(degree=2).transform(x)\n y =\n [[1, a, b, a^2, a * b, b^2],\n [1, c, d, c^2, c * d, d^2]]\n \"\"\"\n\n def __init__(self, degree=2):\n \"\"\"\n construct polynomial features\n\n Parameters\n ----------\n degree : int\n degree of polynomial\n \"\"\"\n assert isinstance(degree, int)\n self.degree = degree\n\n def transform(self, x):\n \"\"\"\n transforms input array with polynomial features\n\n Parameters\n ----------\n x : (sample_size, n) ndarray\n input array\n\n Returns\n -------\n output : (sample_size, 1 + nC1 + ... + nCd) ndarray\n polynomial features\n \"\"\"\n if x.ndim == 1:\n x = x[:, None]\n x_t = x.transpose()\n features = [np.ones(len(x))]\n for degree in range(1, self.degree + 1):\n # https://docs.python.org/zh-cn/3/library/itertools.html\n ## combinations_with_replacement 例子:\n ### combinations_with_replacement('ABCD', 2)\n ### AA AB AC AD BB BC BD CC CD DD\n for items in itertools.combinations_with_replacement(x_t, degree):\n # reduce 高级程序设计课堂测试有实现过这个函数:\n # https://docs.python.org/zh-cn/3/library/functools.html#functools.reduce\n ## 这里就是累乘.\n features.append(functools.reduce(lambda x, y: x * y, items))\n return np.asarray(features).transpose()\n" ]
[ [ "numpy.asarray" ] ]
woctezuma/download-steam-banners
[ "b4cc1d5b96ac479b76517e11ea7cd19c43eee38e" ]
[ "find_unique_games.py" ]
[ "# Code inspired from:\n# - build_feature_index.py\n# - https://github.com/woctezuma/steam-descriptions/blob/master/find_unique_games.py\n\nimport json\nimport logging\nfrom time import time\n\nimport numpy as np\nimport steamspypi\nfrom sklearn.neighbors import NearestNeighbors\n\nfrom build_feature_index import get_features_folder_name\nfrom build_feature_index import get_label_database_filename, convert_label_database, get_frozen_app_ids\n\n\ndef load_game_names_from_steamspy():\n data = steamspypi.load()\n\n game_names = dict()\n for app_id in data.keys():\n game_names[app_id] = data[app_id]['name']\n\n return game_names\n\n\ndef get_app_name(app_id, game_names=None):\n # Reference: https://github.com/woctezuma/steam-descriptions/blob/master/benchmark_utils.py\n if game_names is None:\n game_names = load_game_names_from_steamspy()\n\n try:\n app_name = game_names[str(app_id)]\n except KeyError:\n app_name = 'Unknown'\n\n return app_name\n\n\ndef get_store_url(app_id):\n # Reference: https://github.com/woctezuma/steam-descriptions/blob/master/benchmark_utils.py\n store_url = 'https://store.steampowered.com/app/' + str(app_id)\n return store_url\n\n\ndef get_banner_url(app_id):\n # Reference: https://github.com/woctezuma/steam-descriptions/blob/master/benchmark_utils.py\n banner_url = 'https://steamcdn-a.akamaihd.net/steam/apps/' + str(app_id) + '/header.jpg'\n return banner_url\n\n\ndef populate_database(pooling=None):\n try:\n print('\\n[pooling] {}'.format(pooling))\n label_database = np.load(get_label_database_filename(pooling))\n except FileNotFoundError or OSError:\n if pooling is None:\n raise AssertionError()\n else:\n label_database = convert_label_database(pooling)\n\n knn = NearestNeighbors(metric='cosine', algorithm='brute')\n knn.fit(label_database)\n\n # query = label_database\n # num_neighbors = 2\n\n query = None\n num_neighbors = 1\n\n start = time()\n # Caveat: the output 'dist' returned by knn.kneighbors() is the 'cosine distance', not the cosine similarity!\n # Reference: https://en.wikipedia.org/wiki/Cosine_similarity\n dist, matches = knn.kneighbors(X=query, n_neighbors=num_neighbors)\n print('Elapsed time: {:.2f} s'.format(time() - start))\n\n app_ids = get_frozen_app_ids()\n\n sim_dict = dict()\n for counter, query_app_id in enumerate(app_ids):\n last_index = num_neighbors - 1\n\n second_best_match = matches[counter][last_index]\n second_best_matched_app_id = app_ids[second_best_match]\n\n cosine_distance = dist[counter][last_index]\n second_best_similarity_score = 1.0 - cosine_distance\n\n sim_dict[query_app_id] = dict()\n sim_dict[query_app_id]['app_id'] = second_best_matched_app_id\n sim_dict[query_app_id]['similarity'] = second_best_similarity_score\n\n with open(get_unique_games_file_name(pooling=pooling), 'w') as f:\n json.dump(sim_dict, f)\n\n return sim_dict\n\n\ndef get_unique_games_file_name(pooling=None):\n unique_games_file_name = get_features_folder_name() + 'unique_games'\n\n if pooling is not None:\n unique_games_file_name += '.' + pooling\n\n unique_games_file_name += '.json'\n\n return unique_games_file_name\n\n\ndef load_sim_dict(pooling=None):\n with open(get_unique_games_file_name(pooling=pooling), 'r') as f:\n sim_dict = json.load(f)\n\n return sim_dict\n\n\ndef get_small_banner_url(app_id):\n # Reference: https://github.com/woctezuma/steam-descriptions/blob/master/find_unique_games.py\n small_banner_url = 'https://steamcdn-a.akamaihd.net/steam/apps/' + str(app_id) + '/capsule_sm_120.jpg'\n return small_banner_url\n\n\ndef get_bb_code_linked_image(app_id):\n # Reference: https://github.com/woctezuma/steam-descriptions/blob/master/find_unique_games.py\n bb_code_linked_image = '[URL={}][IMG]{}[/IMG][/URL]'.format(get_store_url(app_id), get_small_banner_url(app_id))\n return bb_code_linked_image\n\n\ndef print_unique_games(sim_dict,\n similarity_threshold,\n game_names,\n only_print_banners=False,\n use_markdown=True):\n # Reference: https://github.com/woctezuma/steam-descriptions/blob/master/find_unique_games.py\n # Markdown\n # Reference: https://stackoverflow.com/a/14747656\n image_link_str = '[<img alt=\"{}\" src=\"{}\" width=\"{}\">]({})'\n image_width = 150\n\n sorted_app_ids = sorted(sim_dict.keys(), key=lambda x: sim_dict[x]['similarity'])\n\n unique_app_ids = []\n\n for counter, app_id in enumerate(sorted_app_ids):\n similarity_value = sim_dict[app_id]['similarity']\n if similarity_value <= similarity_threshold:\n unique_app_ids.append(app_id)\n\n app_name = get_app_name(app_id, game_names=game_names)\n if only_print_banners:\n if use_markdown:\n # Markdown\n print(image_link_str.format(app_name, get_banner_url(app_id), image_width, get_store_url(app_id)))\n else:\n # BBCode\n end_of_entry = ' ' # Either a line break '\\n' or a space ' '. Prefer spaces if you post to a forum.\n print(get_bb_code_linked_image(app_id), end=end_of_entry)\n else:\n print('{}) similarity = {:.2f} ; appID = {} ({})'.format(counter + 1,\n similarity_value,\n app_id,\n app_name))\n\n return unique_app_ids\n\n\ndef main(pooling=None, # Either None, or 'avg', or 'max'\n num_output=250, # Allows to automatically define a value for 'similarity_threshold' so that N games are output\n similarity_threshold=None,\n update_sim_dict=False,\n only_print_banners=False,\n use_markdown=True):\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n game_names = load_game_names_from_steamspy()\n\n if update_sim_dict:\n sim_dict = populate_database(pooling=pooling)\n else:\n sim_dict = load_sim_dict(pooling=pooling)\n\n if similarity_threshold is None:\n sorted_similarity_values = sorted(match['similarity'] for match in sim_dict.values())\n similarity_threshold = sorted_similarity_values[num_output]\n print('Similarity threshold is automatically set to {:.2f}'.format(similarity_threshold))\n\n unique_app_ids = print_unique_games(sim_dict,\n similarity_threshold,\n game_names,\n only_print_banners=only_print_banners,\n use_markdown=use_markdown)\n\n return\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "sklearn.neighbors.NearestNeighbors" ] ]
yosagaf/medical-biometrics
[ "9f3151d20ee40e12bf9a9abdcdd89b9de4ef8fc0" ]
[ "assignement2/viewing3DBrainMRI.py" ]
[ "# import necessary packages\n\nimport imageio\nimport scipy.ndimage as ndi\nimport numpy as np\nimport SimpleITK as sitk\nimport matplotlib.pyplot as plt\n\n# the path of a T1-weighted brain .nii image\npath = \"data/BRAIN.nii\"\n\n\n# read the .nii image containing the volume with the SimpleITK \nsitk_f = sitk.ReadImage(path)\n\n# access to the numpy array\nslices = sitk.GetArrayFromImage(sitk_f)\nprint(\"[INFOS] 2D Array slice data type :\", type(slices)) #-> numpy array\nprint(\"[INFOS] 3D sitk object type :\", type(sitk_f)) #-> numpy array\nprint(\"[INFOS] Shape of 3D image array :\", slices.shape)\nprint(\"[INFOS] Shape of 2D slice array :\", slices[0].shape)\nprint(\"[INFOS] Number of slices :\", slices.shape[0])\n\ndef removeKeymapConflicts(new_keys_set):\n for prop in plt.rcParams:\n if prop.startswith('keymap.'):\n keys = plt.rcParams[prop]\n remove_list = set(keys) & new_keys_set\n for key in remove_list: \n keys.remove(key)\n\ndef viewer3D(volume):\n removeKeymapConflicts({'n', 'l'})\n fig, ax = plt.subplots()\n ax.volume = volume\n ax.index = volume.shape[0] // 2\n ax.imshow(volume[ax.index], cmap='gray')\n fig.canvas.mpl_connect('key_press_event', processKey)\n\ndef processKey(event):\n fig = event.canvas.figure\n ax = fig.axes[0]\n if event.key == 'n':\n lSlice(ax)\n elif event.key == 'l':\n nSlice(ax)\n fig.canvas.draw()\n\ndef lSlice(ax):\n volume = ax.volume\n ax.index = (ax.index - 1) % volume.shape[0] # wrap around using %\n ax.images[0].set_array(volume[ax.index])\n\ndef nSlice(ax):\n volume = ax.volume\n ax.index = (ax.index + 1) % volume.shape[0]\n ax.images[0].set_array(volume[ax.index])\n\n\nmask3d_array = slices > 900\nmask3d_array = ndi.binary_dilation(mask3d_array, iterations=8)\nmask3d_array = ndi.binary_closing(mask3d_array, iterations=8)\n\nweights_edge = [[[1, 1, 1],\n [0, 0, 0], \n [1, -1, -1]],\n \n [[1, 1, 1],\n [0, 0, 0], \n [-1, -1, -1]],\n \n [[1, 1, 1],\n [0, 0, 0], \n [-1, -1, -1]]]\n\nim3d_edge = ndi.convolve(slices, weights_edge)\n\n#viewer3D(mask_array)\n#viewer3D(slices)\nviewer3D(im3d_edge)\nplt.show()\n" ]
[ [ "scipy.ndimage.convolve", "scipy.ndimage.binary_closing", "matplotlib.pyplot.subplots", "scipy.ndimage.binary_dilation", "matplotlib.pyplot.show" ] ]
Open-Speech-EkStep/data-acquisition-pipeline
[ "b28df36d417010d85d3e5c5f6882eb8fe89ce5ae" ]
[ "selenium_youtube_crawler/tests/test_downloader.py" ]
[ "import os\nfrom unittest import TestCase\nfrom unittest.mock import patch, call, MagicMock\n\nimport pandas as pd\n\nfrom selenium_youtube_crawler.downloader import Downloader\n\n\nclass DownloaderTest(TestCase):\n\n @patch(\"selenium_youtube_crawler.downloader.GCSHelper\")\n def setUp(self, mock_gcs_helper):\n self.bucket_name = \"tester\"\n self.bucket_path = \"test\"\n self.language = \"ta\"\n self.thread_local = MagicMock()\n self.downloader = Downloader(self.thread_local, self.bucket_name, self.bucket_path, self.language)\n self.file_dir = self.downloader.file_dir\n self.mock_gcs_helper = mock_gcs_helper.return_value\n\n def test_download(self):\n download_url = \"http://test.mp4\"\n video_id = \"3sdf8sdf\"\n source = \"test\"\n\n with patch.object(self.downloader, 'download_and_save') as mock_download_and_save:\n with patch.object(self.downloader, 'post_download_process') as mock_post_download_process:\n self.downloader.download(download_url, video_id, source)\n\n file_name = \"file-id\" + video_id + \".mp4\"\n csv_name = \"file-id\" + video_id + \".csv\"\n\n mock_download_and_save.assert_called_once_with(download_url, file_name)\n mock_post_download_process.assert_called_once_with(file_name, csv_name, source, video_id)\n\n @patch(\"selenium_youtube_crawler.downloader.requests.Response\")\n @patch(\"selenium_youtube_crawler.downloader.requests.Session\")\n def test_download_and_save_with_file_dir_not_present(self, mock_session, mock_response):\n os.system(\"rm -rf \" + self.file_dir)\n url = \"http://test.mp4\"\n file_name = \"test.mp4\"\n\n def respond_stream(byte_count):\n for _ in range(3):\n yield b'dsf'\n\n mock_response_value = mock_response.return_value.__enter__.return_value\n mock_response_value.iter_content.side_effect = respond_stream\n mock_session.get.return_value = mock_response.return_value\n with patch.object(self.downloader, 'get_session') as mock_get_session:\n mock_get_session.return_value = mock_session\n self.downloader.download_and_save(url, file_name)\n\n self.assertTrue(os.path.exists(self.file_dir))\n self.assertTrue(os.path.exists(self.file_dir + \"/\" + file_name))\n mock_session.get.assert_called_once_with(url, stream=True)\n mock_response_value.iter_content.assert_called_once_with(1024)\n os.system(\"rm -rf \" + self.file_dir)\n\n @patch(\"selenium_youtube_crawler.downloader.requests.Response\")\n @patch(\"selenium_youtube_crawler.downloader.requests.Session\")\n def test_download_and_save_with_file_dir_present(self, mock_session, mock_response):\n os.system(\"rm -rf \" + self.file_dir)\n os.system(\"mkdir \" + self.file_dir)\n url = \"http://test.mp4\"\n file_name = \"test.mp4\"\n\n def respond_stream(byte_count):\n for _ in range(3):\n yield b'dsf'\n\n mock_response_value = mock_response.return_value.__enter__.return_value\n mock_response_value.iter_content.side_effect = respond_stream\n mock_session.get.return_value = mock_response.return_value\n with patch.object(self.downloader, 'get_session') as mock_get_session:\n mock_get_session.return_value = mock_session\n self.downloader.download_and_save(url, file_name)\n\n self.assertTrue(os.path.exists(self.file_dir))\n self.assertTrue(os.path.exists(self.file_dir + \"/\" + file_name))\n mock_session.get.assert_called_once_with(url, stream=True)\n mock_response_value.iter_content.assert_called_once_with(1024)\n os.system(\"rm -rf \" + self.file_dir)\n\n @patch(\"selenium_youtube_crawler.downloader.extract_metadata\")\n def test_post_download_process(self, mock_extract_metadata):\n file_name = \"test.mp4\"\n csv_name = \"test.csv\"\n source = \"test\"\n video_id = \"43dsfgs\"\n youtube_url = \"https://www.youtube.com/watch?v=\" + video_id\n duration = 60\n mock_extract_metadata.return_value = duration\n modified_file_name = str(duration) + file_name\n modified_csv_name = str(duration) + csv_name\n is_method_called_flag = False\n\n with patch.object(self.downloader, 'update_metadata_fields') as mock_update_metadata_fields:\n with patch.object(self.downloader, 'clean_up_files') as mock_clean_up_files:\n with patch.object(self.downloader, 'update_archive') as mock_update_archive:\n self.downloader.post_download_process(file_name, csv_name, source, video_id)\n\n mock_update_metadata_fields.assert_called_once_with(modified_file_name, csv_name, video_id)\n mock_clean_up_files.assert_called_once_with(file_name, csv_name)\n mock_update_archive.assert_called_once_with(source, video_id)\n is_method_called_flag = True\n\n self.assertTrue(is_method_called_flag)\n mock_extract_metadata.assert_called_once_with(self.downloader.file_dir, file_name, youtube_url, source)\n upload1_call = call(source, file_name, modified_file_name, self.file_dir)\n upload2_call = call(source, csv_name, modified_csv_name, self.file_dir)\n\n self.mock_gcs_helper.upload_file_to_bucket.assert_has_calls([upload1_call, upload2_call])\n\n @patch(\"selenium_youtube_crawler.downloader.YoutubeApiUtils\")\n def test_update_metdata_fields(self, mock_youtube_api_utils):\n os.system(\"mkdir \" + self.file_dir)\n metadata = dict({\n 'name': ['hello']\n })\n csv_name = \"test.csv\"\n df = pd.DataFrame(metadata, columns=['name'])\n df.to_csv(self.file_dir + \"/\" + csv_name, index=False)\n\n modified_file_name = str(60) + \"test.mp4\"\n video_id = \"43dsfgs\"\n expected_license_string = 'Creative Commons'\n mock_youtube_api_utils.return_value.get_license_info.return_value = expected_license_string\n\n self.downloader.update_metadata_fields(modified_file_name, csv_name, video_id)\n\n data = pd.read_csv(self.file_dir + \"/\" + csv_name)\n self.assertEqual('hello', data['name'][0])\n self.assertEqual(modified_file_name, data['raw_file_name'][0])\n self.assertEqual(modified_file_name, data['title'][0])\n self.assertEqual(self.language, data['language'][0])\n self.assertEqual(expected_license_string, data['license'][0])\n os.system(\"rm -rf \" + self.file_dir)\n\n def test_clean_up_files(self):\n file_name = \"test.mp4\"\n csv_name = \"test.csv\"\n os.system(\"mkdir \" + self.file_dir)\n os.system(\"touch {0}/{1}\".format(self.file_dir, file_name))\n os.system(\"touch {0}/{1}\".format(self.file_dir, csv_name))\n self.assertTrue(os.path.exists(self.file_dir + \"/\" + file_name))\n self.assertTrue(os.path.exists(self.file_dir + \"/\" + csv_name))\n\n self.downloader.clean_up_files(file_name, csv_name)\n\n self.assertFalse(os.path.exists(self.file_dir + \"/\" + file_name))\n self.assertFalse(os.path.exists(self.file_dir + \"/\" + csv_name))\n\n os.system(\"rm -rf \" + self.file_dir)\n\n @patch(\"selenium_youtube_crawler.downloader.populate_local_archive\")\n def test_update_archive(self, mock_populate_local_archive):\n video_id = \"24jsdf\"\n source = \"test\"\n\n self.downloader.update_archive(source, video_id)\n\n mock_populate_local_archive.assert_called_once_with(source, video_id)\n self.mock_gcs_helper.upload_archive_to_bucket.assert_called_once_with(source)\n\n def test_get_session_with_session_attribute_present(self):\n self.thread_local.session = \"hello\"\n\n session = self.downloader.get_session()\n\n self.assertEqual(\"hello\", session)\n\n # @patch(\"selenium_youtube_crawler.downloader.requests.Session\")\n # def test_get_session_with_session_attribute_not_present(self, mock_session):\n #\n # session = self.downloader.get_session()\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv" ] ]
euCanSHare/image_segmentation
[ "6c314f6d3ac5912af8729b04393a694544f6adb8" ]
[ "model_zoo/unet2D_bn_modified.py" ]
[ "import tensorflow as tf\nfrom tfwrapper import layers\n\n\n\ndef forward(images, training, nlabels):\n\n images_padded = tf.pad(images, [[0,0], [92, 92], [92, 92], [0,0]], 'CONSTANT')\n\n conv1_1 = layers.conv2D_layer_bn(images_padded, 'conv1_1', num_filters=64, training=training, padding='VALID')\n conv1_2 = layers.conv2D_layer_bn(conv1_1, 'conv1_2', num_filters=64, training=training, padding='VALID')\n\n pool1 = layers.max_pool_layer2d(conv1_2, 'pool_1')\n\n conv2_1 = layers.conv2D_layer_bn(pool1, 'conv2_1', num_filters=128, training=training, padding='VALID')\n conv2_2 = layers.conv2D_layer_bn(conv2_1, 'conv2_2', num_filters=128, training=training, padding='VALID')\n\n pool2 = layers.max_pool_layer2d(conv2_2, 'pool_2')\n\n conv3_1 = layers.conv2D_layer_bn(pool2, 'conv3_1', num_filters=256, training=training, padding='VALID')\n conv3_2 = layers.conv2D_layer_bn(conv3_1, 'conv3_2', num_filters=256, training=training, padding='VALID')\n\n pool3 = layers.max_pool_layer2d(conv3_2, 'pool_3')\n\n conv4_1 = layers.conv2D_layer_bn(pool3, 'conv4_1', num_filters=512, training=training, padding='VALID')\n conv4_2 = layers.conv2D_layer_bn(conv4_1, 'conv4_2', num_filters=512, training=training, padding='VALID')\n\n pool4 = layers.max_pool_layer2d(conv4_2, 'pool_4')\n\n conv5_1 = layers.conv2D_layer_bn(pool4, 'conv5_1', num_filters=1024, training=training, padding='VALID')\n conv5_2 = layers.conv2D_layer_bn(conv5_1, 'conv5_2', num_filters=1024, training=training, padding='VALID')\n\n upconv4 = layers.deconv2D_layer_bn(conv5_2, name='upconv4', kernel_size=(4, 4), strides=(2, 2), num_filters=nlabels, training=training)\n concat4 = layers.crop_and_concat_layer([upconv4, conv4_2], 'crop_concat_4', axis=3)\n\n conv6_1 = layers.conv2D_layer_bn(concat4, 'conv6_1', num_filters=512, training=training, padding='VALID')\n conv6_2 = layers.conv2D_layer_bn(conv6_1, 'conv6_2', num_filters=512, training=training, padding='VALID')\n\n upconv3 = layers.deconv2D_layer_bn(conv6_2, name='upconv3', kernel_size=(4, 4), strides=(2, 2), num_filters=nlabels, training=training)\n\n concat3 = layers.crop_and_concat_layer([upconv3, conv3_2], 'crop_concat_3', axis=3)\n\n conv7_1 = layers.conv2D_layer_bn(concat3, 'conv7_1', num_filters=256, training=training, padding='VALID')\n conv7_2 = layers.conv2D_layer_bn(conv7_1, 'conv7_2', num_filters=256, training=training, padding='VALID')\n\n upconv2 = layers.deconv2D_layer_bn(conv7_2, name='upconv2', kernel_size=(4, 4), strides=(2, 2), num_filters=nlabels, training=training)\n concat2 = layers.crop_and_concat_layer([upconv2, conv2_2], 'crop_concat_2', axis=3)\n\n conv8_1 = layers.conv2D_layer_bn(concat2, 'conv8_1', num_filters=128, training=training, padding='VALID')\n conv8_2 = layers.conv2D_layer_bn(conv8_1, 'conv8_2', num_filters=128, training=training, padding='VALID')\n\n upconv1 = layers.deconv2D_layer_bn(conv8_2, name='upconv1', kernel_size=(4, 4), strides=(2, 2), num_filters=nlabels, training=training)\n concat1 = layers.crop_and_concat_layer([upconv1, conv1_2], 'crop_concat_1', axis=3)\n\n conv9_1 = layers.conv2D_layer_bn(concat1, 'conv9_1', num_filters=64, training=training, padding='VALID')\n conv9_2 = layers.conv2D_layer_bn(conv9_1, 'conv9_2', num_filters=64, training=training, padding='VALID')\n\n pred = layers.conv2D_layer_bn(conv9_2, 'pred', num_filters=nlabels, kernel_size=(1,1), activation=tf.identity, training=training, padding='VALID')\n\n return pred\n" ]
[ [ "tensorflow.pad" ] ]
mahow0/neighborhood-shelves
[ "c1814ad425101994e1e5dc824c06cd2cb4e02ea5" ]
[ "ai/src/main.py" ]
[ "import argparse\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom transformers import TrainingArguments\nfrom train import train\nfrom evaluate import evaluate\nfrom model import T5Seq2SeqModel\nfrom utils import load_train_test_split, ProductDataset\n\ndef main(model, optim_params, train_set, eval_set, num_epochs, save_dir, device = torch.device('cpu')):\n model = T5Seq2SeqModel(model_name = model_name)\n tokenizer = model.tokenizer\n\n # Initialize optimizer and scheduler\n optimizer = optim.AdamW(model.parameters(), **optim_params)\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=3, cooldown=1,\n verbose=False, threshold=0.001)\n\n # Train model\n torch.cuda.empty_cache()\n\n model = train(model, train_set, optimizer=optimizer, scheduler=scheduler, num_epochs=num_epochs,\n device=device)\n\n # Save model weights\n checkpoint_name = f't5_{num_epochs}epochs_{model_name}.pt'\n save_dir = save_dir + checkpoint_name\n torch.save(model.state_dict(), save_dir)\n\n # Evaluate model\n #acc, precision, recall, f1 = evaluate(model, tokenizer, eval_set, device=device)\n\n #print(f'Accuracy: {acc}')\n #print(f'Precision: {precision}')\n #print(f'Recall: {recall}')\n #print(f'F1: {f1}')\n\n #return acc, precision, recall, f1\n\n pass\n\n\nif __name__ == '__main__':\n # Set up the argument parser\n parser = argparse.ArgumentParser()\n parser.add_argument('--output_dir',\n type = str,\n help = 'Directory where results (checkpoints + predictions) are saved',\n default = '../experiments/')\n\n parser.add_argument('--epochs',\n type = int,\n help = 'Number of epochs for training',\n default = 1)\n\n parser.add_argument('--train_batch_size',\n type = int,\n help = 'Batch size for training',\n default = 4)\n\n parser.add_argument('--eval_batch_size',\n type=int,\n help='Batch size for eval',\n default=16)\n\n parser.add_argument('--lr',\n type = float,\n help = 'Learning rate',\n default = 0.001)\n\n parser.add_argument('--cuda',\n type = bool,\n nargs = '?',\n default=True)\n\n # Parse arguments\n args = parser.parse_args()\n\n if args.cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n else:\n device = torch.device(\"cpu\")\n\n model_name = 'google/t5-v1_1-base'\n model = T5Seq2SeqModel(model_name)\n\n train_set_params = {'batch_size': args.train_batch_size, 'shuffle': True, 'num_workers': 0}\n eval_set_params = {'batch_size': args.eval_batch_size, 'num_workers': 0}\n\n #Retrieve datasets\n train_set, eval_set = load_train_test_split('../data/train.csv')\n\n train_set = DataLoader(ProductDataset(model.tokenizer, train_set), **train_set_params)\n eval_set = DataLoader(ProductDataset(model.tokenizer, eval_set), **eval_set_params)\n\n optim_params = {'lr': args.lr}\n #Initialize model\n main(model,\n optim_params=optim_params,\n train_set = train_set,\n eval_set = eval_set,\n num_epochs=args.epochs,\n save_dir = args.output_dir,\n device = device)\n\n\n\n\n" ]
[ [ "torch.device", "torch.cuda.empty_cache", "torch.cuda.is_available", "torch.optim.lr_scheduler.ReduceLROnPlateau" ] ]
raphaelsulzer/dgnn
[ "08ef076e80ea38daf000ac2be6771363d6d4ea9a" ]
[ "processing/shapenet/scan.py" ]
[ "import argparse, subprocess, os, random, sys\nimport numpy as np\nfrom tqdm import tqdm\nimport multiprocessing\n\ndef scan_one(args):\n\n # choose random scan parameters\n\n if(args.scan_conf == 0):\n points = 12000\n cameras = 15\n noise = 0.0\n outliers = 0.0\n elif(args.scan_conf == 1):\n points = 3000\n cameras = 15\n noise = 0.0025\n outliers = 0.0\n elif(args.scan_conf == 2):\n points = 12000\n cameras = 15\n noise = 0.005\n outliers = 0.33\n elif(args.scan_conf == 3): # convonet configuration, 50 cameras\n points = 3000\n cameras = 50\n noise = 0.005\n outliers = 0.0\n elif(args.scan_conf == 4): # convonet configuration, 10 cameras\n points = 3000\n cameras = 10\n noise = 0.005\n outliers = 0.0\n elif(args.scan_conf == 99):\n points = int(np.abs(np.random.randn())*6000) # we want 3*sigma to be 20000 (so factor should be 6666.66 but made it a bit lower)\n cameras = 2+int(np.abs(np.random.randn())*6) # we want 3*sigma to be 20 (so factor should be 6.66 but made it a bit lower), and at least 2 cameras\n noise = np.abs(np.random.randn())*0.01 # we want 3*sigma to be 0.03 (so factor should be 0.01)\n outliers = np.abs(np.random.randn())*0.1 # we want 3*sigma to be 0.3 (so factor should be 0.1)\n else:\n print(\"\\nERROR: not a valid config. choose [0,1,2]\")\n sys.exit(1)\n\n outfile = os.path.join(args.wdir,args.o+\".npz\")\n if(os.path.isfile(outfile) and not args.overwrite):\n print(\"exists!\")\n return\n\n # extract features from mpu\n command = [args.sure_dir + \"/scan\",\n \"-w\", str(args.wdir),\n \"-i\", str(args.i),\n \"-o\", str(args.o),\n '--normal_method', 'jet',\n \"--noise\", str(noise),\n \"--outliers\", str(outliers),\n \"--points\", str(points),\n \"--cameras\", str(cameras),\n \"--export\", \"npz\",\n \"--gclosed\", \"1\"]\n # print(\"run command: \", command)\n p = subprocess.Popen(command)\n p.wait()\n\n\n npzfile = np.load(os.path.join(args.wdir,'scan',str(args.scan_conf)+\".npz\"))\n np.savez(os.path.join(args.wdir,'scan',str(args.scan_conf)+\".npz\"),\n points=npzfile[\"points\"],\n normals=npzfile[\"normals\"],\n gt_normals=npzfile[\"gt_normals\"],\n sensor_position=npzfile[\"sensor_position\"],\n cameras=np.array(cameras,dtype=np.float64),\n noise=np.array(noise,dtype=np.float64),\n outliers=np.array(outliers,dtype=np.float64))\n\n a=5\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='reconbench evaluation')\n\n parser.add_argument('-d', '--dataset_dir', type=str, default=\"/home/rsulzer/data2/ShapeNetWatertight/\",\n help='working directory which should include the different scene folders.')\n parser.add_argument('--scan_conf', type=int, default=4,\n help='the scan conf')\n parser.add_argument('--overwrite', type=int, default=1,\n help='overwrite existing files')\n parser.add_argument('--njobs', type=int, default=0,\n help='number of workers. > 0 uses multiprocessing.')\n parser.add_argument('--sure_dir', type=str, default=\"/home/rsulzer/cpp/surfaceReconstruction/build/release\",\n help='Indicate the sure build directory, pointing to .../build/release folder starting from user_dir')\n\n\n parser.add_argument('--category', type=str, default=None,\n help='Indicate the category class')\n\n args = parser.parse_args()\n\n if args.category is not None:\n categories = [args.category]\n else:\n categories = os.listdir(args.dataset_dir)\n if 'x' in categories:\n categories.remove('x')\n\n\n # scan all training data with random configuration from 0,1,2\n # and test data with 0,1,2\n\n ### scanner confs\n # 0 (easy) --cameras 15 --points 12000 --noise 0.000 --outliers 0.0\n # 1 (medium) --cameras 15 --points 3000 --noise 0.0025 --outliers 0.0\n # 2 (hard) --cameras 15 --points 12000 --noise 0.005 --outliers 0.33\n # 3 (convonet) --cameras 50 --points 3000 --noise 0.005 --outliers 0.0\n\n for i,c in enumerate(categories):\n if c.startswith('.'):\n continue\n print(\"\\n\\n############## Processing {} - {}/{} ############\\n\\n\".format(c,i+1,len(categories)))\n\n ### train\n args.cdir = os.path.join(args.dataset_dir, c)\n files = os.listdir(os.path.join(args.cdir,\"4_watertight_scaled\"))\n\n for i in tqdm(files,ncols=50):\n try:\n i=i[:-4]\n os.makedirs(os.path.join(args.cdir,i,\"mesh\"),exist_ok=True)\n os.rename(os.path.join(args.cdir,\"4_watertight_scaled\",i+\".off\"),os.path.join(args.cdir,i,\"mesh\",\"mesh.off\"))\n args.wdir = os.path.join(args.cdir,i)\n args.i = os.path.join(\"mesh\",\"mesh.off\")\n args.o = os.path.join('scan',str(args.scan_conf))\n os.makedirs(os.path.join(args.wdir,'scan'),exist_ok=True)\n scan_one(args)\n except:\n pass\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.array", "numpy.random.randn" ] ]
Coastchb/tensorflow
[ "cabefb9f98502c739aa2761a9fc654004a993d58" ]
[ "tensorflow/python/training/tracking/python_state_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport io\nimport os\n\nimport numpy\n\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training.tracking import base\nfrom tensorflow.python.training.tracking import python_state\nfrom tensorflow.python.training.tracking import util\n\n\nclass _NumpyState(base.Trackable):\n \"\"\"A checkpointable object whose NumPy array attributes are saved/restored.\n\n Example usage:\n\n ```python\n arrays = _NumpyState()\n checkpoint = tf.train.Checkpoint(numpy_arrays=arrays)\n arrays.x = numpy.zeros([3, 4])\n save_path = checkpoint.save(\"/tmp/ckpt\")\n arrays.x[1, 1] = 4.\n checkpoint.restore(save_path)\n assert (arrays.x == numpy.zeros([3, 4])).all()\n\n second_checkpoint = tf.train.Checkpoint(\n numpy_arrays=_NumpyState())\n # Attributes of NumpyState objects are created automatically by restore()\n second_checkpoint.restore(save_path)\n assert (second_checkpoint.numpy_arrays.x == numpy.zeros([3, 4])).all()\n ```\n\n Note that `NumpyState` objects re-create the attributes of the previously\n saved object on `restore()`. This is in contrast to TensorFlow variables, for\n which a `Variable` object must be created and assigned to an attribute.\n\n This snippet works both when graph building and when executing eagerly. On\n save, the NumPy array(s) are fed as strings to be saved in the checkpoint (via\n a placeholder when graph building, or as a string constant when executing\n eagerly). When restoring they skip the TensorFlow graph entirely, and so no\n restore ops need be run. This means that restoration always happens eagerly,\n rather than waiting for `checkpoint.restore(...).run_restore_ops()` like\n TensorFlow variables when graph building.\n \"\"\"\n\n def _lookup_dependency(self, name):\n \"\"\"Create placeholder NumPy arrays for to-be-restored attributes.\n\n Typically `_lookup_dependency` is used to check by name whether a dependency\n exists. We cheat slightly by creating a checkpointable object for `name` if\n we don't already have one, giving us attribute re-creation behavior when\n loading a checkpoint.\n\n Args:\n name: The name of the dependency being checked.\n Returns:\n An existing dependency if one exists, or a new `_NumpyWrapper` placeholder\n dependency (which will generally be restored immediately).\n \"\"\"\n value = super(_NumpyState, self)._lookup_dependency(name)\n if value is None:\n value = _NumpyWrapper(numpy.array([]))\n new_reference = base.TrackableReference(name=name, ref=value)\n self._unconditional_checkpoint_dependencies.append(new_reference)\n self._unconditional_dependency_names[name] = value\n super(_NumpyState, self).__setattr__(name, value)\n return value\n\n def __getattribute__(self, name):\n \"\"\"Un-wrap `_NumpyWrapper` objects when accessing attributes.\"\"\"\n value = super(_NumpyState, self).__getattribute__(name)\n if isinstance(value, _NumpyWrapper):\n return value.array\n return value\n\n def __setattr__(self, name, value):\n \"\"\"Automatically wrap NumPy arrays assigned to attributes.\"\"\"\n # TODO(allenl): Consider supporting lists/tuples, either ad-hoc or by making\n # ndarrays checkpointable natively and using standard checkpointable list\n # tracking.\n if isinstance(value, (numpy.ndarray, numpy.generic)):\n try:\n existing = super(_NumpyState, self).__getattribute__(name)\n existing.array = value\n return\n except AttributeError:\n value = _NumpyWrapper(value)\n self._track_trackable(value, name=name, overwrite=True)\n elif (name not in (\"_setattr_tracking\", \"_update_uid\")\n and getattr(self, \"_setattr_tracking\", True)):\n # Mixing restore()-created attributes with user-added checkpointable\n # objects is tricky, since we can't use the `_lookup_dependency` trick to\n # re-create attributes (we might accidentally steal the restoration for\n # another checkpointable object). For now `_NumpyState` objects must be\n # leaf nodes. Theoretically we could add some extra arguments to\n # `_lookup_dependency` to figure out whether we should create a NumPy\n # array for the attribute or not.\n raise NotImplementedError(\n (\"Assigned %s to the %s property of %s, which is not a NumPy array. \"\n \"Currently mixing NumPy arrays and other checkpointable objects is \"\n \"not supported. File a feature request if this limitation bothers \"\n \"you.\")\n % (value, name, self))\n super(_NumpyState, self).__setattr__(name, value)\n\n\nclass _NumpyWrapper(python_state.PythonState):\n \"\"\"Wraps a NumPy array for storage in an object-based checkpoint.\"\"\"\n\n def __init__(self, array):\n \"\"\"Specify a NumPy array to wrap.\n\n Args:\n array: The NumPy array to save and restore (may be overwritten).\n \"\"\"\n self.array = array\n\n def serialize(self):\n \"\"\"Callback to serialize the array.\"\"\"\n string_file = io.BytesIO()\n try:\n numpy.save(string_file, self.array, allow_pickle=False)\n serialized = string_file.getvalue()\n finally:\n string_file.close()\n return serialized\n\n def deserialize(self, string_value):\n \"\"\"Callback to deserialize the array.\"\"\"\n string_file = io.BytesIO(string_value)\n try:\n self.array = numpy.load(string_file, allow_pickle=False)\n finally:\n string_file.close()\n\n\nclass NumpyStateTests(test.TestCase):\n\n def testWrapper(self):\n directory = self.get_temp_dir()\n prefix = os.path.join(directory, \"ckpt\")\n root = util.Checkpoint(numpy=_NumpyWrapper(numpy.array([1.])))\n save_path = root.save(prefix)\n root.numpy.array *= 2.\n self.assertEqual([2.], root.numpy.array)\n root.restore(save_path)\n self.assertEqual([1.], root.numpy.array)\n\n @test_util.run_in_graph_and_eager_modes\n def testSaveRestoreNumpyState(self):\n directory = self.get_temp_dir()\n prefix = os.path.join(directory, \"ckpt\")\n save_state = _NumpyState()\n saver = util.Checkpoint(numpy=save_state)\n save_state.a = numpy.ones([2, 2])\n save_state.b = numpy.ones([2, 2])\n save_state.b = numpy.zeros([2, 2])\n save_state.c = numpy.int64(3)\n self.assertAllEqual(numpy.ones([2, 2]), save_state.a)\n self.assertAllEqual(numpy.zeros([2, 2]), save_state.b)\n self.assertEqual(3, save_state.c)\n first_save_path = saver.save(prefix)\n save_state.a[1, 1] = 2.\n save_state.c = numpy.int64(4)\n second_save_path = saver.save(prefix)\n\n load_state = _NumpyState()\n loader = util.Checkpoint(numpy=load_state)\n loader.restore(first_save_path).initialize_or_restore()\n self.assertAllEqual(numpy.ones([2, 2]), load_state.a)\n self.assertAllEqual(numpy.zeros([2, 2]), load_state.b)\n self.assertEqual(3, load_state.c)\n load_state.a[0, 0] = 42.\n self.assertAllEqual([[42., 1.], [1., 1.]], load_state.a)\n loader.restore(first_save_path).run_restore_ops()\n self.assertAllEqual(numpy.ones([2, 2]), load_state.a)\n loader.restore(second_save_path).run_restore_ops()\n self.assertAllEqual([[1., 1.], [1., 2.]], load_state.a)\n self.assertAllEqual(numpy.zeros([2, 2]), load_state.b)\n self.assertEqual(4, load_state.c)\n\n def testNoGraphPollution(self):\n graph = ops.Graph()\n with graph.as_default(), session.Session():\n directory = self.get_temp_dir()\n prefix = os.path.join(directory, \"ckpt\")\n save_state = _NumpyState()\n saver = util.Checkpoint(numpy=save_state)\n save_state.a = numpy.ones([2, 2])\n save_path = saver.save(prefix)\n saver.restore(save_path)\n graph.finalize()\n saver.save(prefix)\n save_state.a = numpy.zeros([2, 2])\n saver.save(prefix)\n saver.restore(save_path)\n\n @test_util.run_in_graph_and_eager_modes\n def testNoMixedNumpyStateTF(self):\n save_state = _NumpyState()\n save_state.a = numpy.ones([2, 2])\n with self.assertRaises(NotImplementedError):\n save_state.v = variables.Variable(1.)\n\n @test_util.run_in_graph_and_eager_modes\n def testDocstringExample(self):\n arrays = _NumpyState()\n checkpoint = util.Checkpoint(numpy_arrays=arrays)\n arrays.x = numpy.zeros([3, 4])\n save_path = checkpoint.save(os.path.join(self.get_temp_dir(), \"ckpt\"))\n arrays.x[1, 1] = 4.\n checkpoint.restore(save_path)\n self.assertAllEqual(numpy.zeros([3, 4]), arrays.x)\n\n second_checkpoint = util.Checkpoint(numpy_arrays=_NumpyState())\n second_checkpoint.restore(save_path)\n self.assertAllEqual(numpy.zeros([3, 4]), second_checkpoint.numpy_arrays.x)\n\n\nif __name__ == \"__main__\":\n ops.enable_eager_execution()\n test.main()\n" ]
[ [ "tensorflow.python.ops.variables.Variable", "tensorflow.python.framework.ops.Graph", "numpy.array", "tensorflow.python.training.tracking.base.TrackableReference", "tensorflow.python.training.tracking.util.Checkpoint", "numpy.zeros", "numpy.ones", "numpy.load", "tensorflow.python.client.session.Session", "numpy.save", "tensorflow.python.framework.ops.enable_eager_execution", "numpy.int64", "tensorflow.python.platform.test.main" ] ]
agolovanov/quill
[ "18cf99cc8517f173765d4f56a90a6d53403e90f8" ]
[ "python/energy_test.py" ]
[ "#!/usr/bin/python\nimport numpy as np\nimport resread\n\ndef check(data_folder = '../results/', t=None):\n 'Verifies energy conservation in a Quill run'\n resread.data_folder = data_folder\n resread.read_parameters()\n data = resread.t_data('energy', silent = True)\n data_deleted = None\n if resread.catching:\n data_deleted = resread.t_data('energy_deleted', silent=True)\n \n index = -1\n index_del = -1\n if t==None:\n t = data[-1][0]\n else:\n index = next((i for i in range(len(data)) if data[i][0] > t), None)\n if resread.catching:\n index_del = next((i for i in range(len(data_deleted)) if data_deleted[i][0] > t), None)\n \n if index == None or index_del == None:\n raise ValueError(\"T = {0} is too big; T max = {1}\".format(t, data[-1][0]))\n print (\"T = {0}, T max = {1}, T end = {2}, data folder = {3}\".format(t, data[-1][0], resread.t_end, data_folder))\n print (\"dx = {0}, dy = {1}, dz = {2}, dt = {3}, ne = {4}, nfilm = {5}\".format(resread.dx, resread.dy, resread.dz, resread.dt, resread.ne, resread.nfilm))\n print (\"W in box (t={0}) / W_0 = {1}\".format(t, np.sum(data[index][1:]) / np.sum(data[0][1:])))\n if resread.catching:\n print (\"W total (t={0}) / W_0 = {1}\".format(t, (np.sum(data[index][1:]) + np.sum(data_deleted[index_del][1:])) / np.sum(data[0][1:])))\n\n\n" ]
[ [ "numpy.sum" ] ]
lebronjames/TensorFlow
[ "cb72f1363bfcaafa496917307e3a0824c5483ee2" ]
[ "TensorFlowTest01.py" ]
[ "import tensorflow as tf\ngreeting = tf.constant('Hello Google Tensorflow!')\nsess = tf.Session()\nresult = sess.run(greeting)\nprint(result)\nsess.close()\n" ]
[ [ "tensorflow.constant", "tensorflow.Session" ] ]
mzhang-code/serving
[ "527c6f2173eba584ebdca4f8b11ae3c0550ab1a9" ]
[ "tensorflow_serving/model_servers/test_util/tensorflow_model_server_test_base.py" ]
[ "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for tensorflow_model_server.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport atexit\nimport json\nimport os\nimport shlex\nimport socket\nimport subprocess\nimport time\n\n# This is a placeholder for a Google-internal import.\n\nimport grpc\nfrom six.moves import range\nfrom six.moves import urllib\nimport tensorflow as tf\n\nfrom tensorflow.core.framework import types_pb2\nfrom tensorflow.python.platform import flags\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow_serving.apis import predict_pb2\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc\n\nFLAGS = flags.FLAGS\n\nRPC_TIMEOUT = 5.0\nHTTP_REST_TIMEOUT_MS = 5000\nCHANNEL_WAIT_TIMEOUT = 5.0\nWAIT_FOR_SERVER_READY_INT_SECS = 60\nGRPC_SOCKET_PATH = '/tmp/tf-serving.sock'\n\n\ndef SetVirtualCpus(num_virtual_cpus):\n \"\"\"Create virtual CPU devices if they haven't yet been created.\"\"\"\n if num_virtual_cpus < 1:\n raise ValueError('`num_virtual_cpus` must be at least 1 not %r' %\n (num_virtual_cpus,))\n physical_devices = tf.config.experimental.list_physical_devices('CPU')\n if not physical_devices:\n raise RuntimeError('No CPUs found')\n configs = tf.config.experimental.get_virtual_device_configuration(\n physical_devices[0])\n if configs is None:\n virtual_devices = [tf.config.experimental.VirtualDeviceConfiguration()\n for _ in range(num_virtual_cpus)]\n tf.config.experimental.set_virtual_device_configuration(\n physical_devices[0], virtual_devices)\n else:\n if len(configs) < num_virtual_cpus:\n raise RuntimeError('Already configured with %d < %d virtual CPUs' %\n (len(configs), num_virtual_cpus))\n\n\ndef PickUnusedPort():\n s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)\n s.bind(('', 0))\n port = s.getsockname()[1]\n s.close()\n return port\n\n\ndef WaitForServerReady(port):\n \"\"\"Waits for a server on the localhost to become ready.\"\"\"\n for _ in range(0, WAIT_FOR_SERVER_READY_INT_SECS):\n time.sleep(1)\n request = predict_pb2.PredictRequest()\n request.model_spec.name = 'intentionally_missing_model'\n\n try:\n # Send empty request to missing model\n channel = grpc.insecure_channel('localhost:{}'.format(port))\n stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)\n stub.Predict(request, RPC_TIMEOUT)\n except grpc.RpcError as error:\n # Missing model error will have details containing 'Servable'\n if 'Servable' in error.details():\n print('Server is ready')\n break\n\n\ndef CallREST(url, req, max_attempts=60):\n \"\"\"Returns HTTP response body from a REST API call.\"\"\"\n for attempt in range(max_attempts):\n try:\n print('Attempt {}: Sending request to {} with data:\\n{}'.format(\n attempt, url, req))\n json_data = json.dumps(req).encode('utf-8') if req is not None else None\n resp = urllib.request.urlopen(urllib.request.Request(url, data=json_data))\n resp_data = resp.read()\n print('Received response:\\n{}'.format(resp_data))\n resp.close()\n return resp_data\n except Exception as e: # pylint: disable=broad-except\n print('Failed attempt {}. Error: {}'.format(attempt, e))\n if attempt == max_attempts - 1:\n raise\n print('Retrying...')\n time.sleep(1)\n\n\ndef SortedObject(obj):\n \"\"\"Returns sorted object (with nested list/dictionaries).\"\"\"\n if isinstance(obj, dict):\n return sorted((k, SortedObject(v)) for k, v in obj.items())\n if isinstance(obj, list):\n return sorted(SortedObject(x) for x in obj)\n if isinstance(obj, tuple):\n return list(sorted(SortedObject(x) for x in obj))\n else:\n return obj\n\n\nclass TensorflowModelServerTestBase(tf.test.TestCase):\n \"\"\"This class defines integration test cases for tensorflow_model_server.\"\"\"\n\n @staticmethod\n def __TestSrcDirPath(relative_path=''):\n return os.path.join(os.environ['TEST_SRCDIR'],\n 'tf_serving/tensorflow_serving', relative_path)\n\n @staticmethod\n def GetArgsKey(*args, **kwargs):\n return args + tuple(sorted(kwargs.items()))\n\n # Maps string key -> 2-tuple of 'host:port' string.\n model_servers_dict = {}\n\n @staticmethod\n def RunServer(model_name,\n model_path,\n model_type='tf',\n model_config_file=None,\n monitoring_config_file=None,\n batching_parameters_file=None,\n grpc_channel_arguments='',\n wait_for_server_ready=True,\n pipe=None,\n model_config_file_poll_period=None):\n \"\"\"Run tensorflow_model_server using test config.\n\n A unique instance of server is started for each set of arguments.\n If called with same arguments, handle to an existing server is\n returned.\n\n Args:\n model_name: Name of model.\n model_path: Path to model.\n model_type: Type of model TensorFlow ('tf') or TF Lite ('tflite').\n model_config_file: Path to model config file.\n monitoring_config_file: Path to the monitoring config file.\n batching_parameters_file: Path to batching parameters.\n grpc_channel_arguments: Custom gRPC args for server.\n wait_for_server_ready: Wait for gRPC port to be ready.\n pipe: subpipe.PIPE object to read stderr from server.\n model_config_file_poll_period: Period for polling the\n filesystem to discover new model configs.\n\n Returns:\n 3-tuple (<Popen object>, <grpc host:port>, <rest host:port>).\n\n Raises:\n ValueError: when both model_path and config_file is empty.\n \"\"\"\n args_key = TensorflowModelServerTestBase.GetArgsKey(**locals())\n if args_key in TensorflowModelServerTestBase.model_servers_dict:\n return TensorflowModelServerTestBase.model_servers_dict[args_key]\n port = PickUnusedPort()\n rest_api_port = PickUnusedPort()\n print(('Starting test server on port: {} for model_name: '\n '{}/model_config_file: {}'.format(port, model_name,\n model_config_file)))\n command = os.path.join(\n TensorflowModelServerTestBase.__TestSrcDirPath('model_servers'),\n 'tensorflow_model_server')\n command += ' --port=' + str(port)\n command += ' --rest_api_port=' + str(rest_api_port)\n command += ' --rest_api_timeout_in_ms=' + str(HTTP_REST_TIMEOUT_MS)\n command += ' --grpc_socket_path=' + GRPC_SOCKET_PATH\n\n if model_config_file:\n command += ' --model_config_file=' + model_config_file\n elif model_path:\n command += ' --model_name=' + model_name\n command += ' --model_base_path=' + model_path\n else:\n raise ValueError('Both model_config_file and model_path cannot be empty!')\n\n if model_type == 'tflite':\n command += ' --prefer_tflite_model=true'\n\n if monitoring_config_file:\n command += ' --monitoring_config_file=' + monitoring_config_file\n\n if model_config_file_poll_period is not None:\n command += ' --model_config_file_poll_wait_seconds=' + str(\n model_config_file_poll_period)\n\n if batching_parameters_file:\n command += ' --enable_batching'\n command += ' --batching_parameters_file=' + batching_parameters_file\n if grpc_channel_arguments:\n command += ' --grpc_channel_arguments=' + grpc_channel_arguments\n print(command)\n proc = subprocess.Popen(shlex.split(command), stderr=pipe)\n atexit.register(proc.kill)\n print('Server started')\n if wait_for_server_ready:\n WaitForServerReady(port)\n hostports = (\n proc,\n 'localhost:' + str(port),\n 'localhost:' + str(rest_api_port),\n )\n TensorflowModelServerTestBase.model_servers_dict[args_key] = hostports\n return hostports\n\n def VerifyPredictRequest(\n self,\n model_server_address,\n expected_output,\n expected_version,\n model_name='default',\n specify_output=True,\n batch_input=False,\n signature_name=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY,\n rpc_timeout=RPC_TIMEOUT):\n \"\"\"Send PredictionService.Predict request and verify output.\"\"\"\n print('Sending Predict request...')\n # Prepare request\n request = predict_pb2.PredictRequest()\n request.model_spec.name = model_name\n request.model_spec.signature_name = signature_name\n request.inputs['x'].dtype = types_pb2.DT_FLOAT\n request.inputs['x'].float_val.append(2.0)\n dim = request.inputs['x'].tensor_shape.dim.add()\n dim.size = 1\n if batch_input:\n request.inputs['x'].tensor_shape.dim.add().size = 1\n\n if specify_output:\n request.output_filter.append('y')\n # Send request\n channel = grpc.insecure_channel(model_server_address)\n stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)\n result = stub.Predict(request, rpc_timeout) # 5 secs timeout\n # Verify response\n self.assertTrue('y' in result.outputs)\n self.assertEqual(types_pb2.DT_FLOAT, result.outputs['y'].dtype)\n self.assertEqual(1, len(result.outputs['y'].float_val))\n self.assertEqual(expected_output, result.outputs['y'].float_val[0])\n self._VerifyModelSpec(result.model_spec, request.model_spec.name,\n signature_name, expected_version)\n\n def _GetSavedModelBundlePath(self):\n \"\"\"Returns a path to a model in SavedModel format.\"\"\"\n return os.path.join(self.testdata_dir, 'saved_model_half_plus_two_cpu')\n\n def _GetModelVersion(self, model_path):\n \"\"\"Returns version of SavedModel/SessionBundle in given path.\n\n This method assumes there is exactly one directory with an 'int' valued\n directory name under `model_path`.\n\n Args:\n model_path: A string representing path to the SavedModel/SessionBundle.\n\n Returns:\n version of SavedModel/SessionBundle in given path.\n \"\"\"\n return int(os.listdir(model_path)[0])\n\n def _GetSavedModelHalfPlusTwoTf2(self):\n \"\"\"Returns a path to a TF2 half_plus_two model in SavedModel format.\"\"\"\n return os.path.join(self.testdata_dir, 'saved_model_half_plus_two_tf2_cpu')\n\n def _GetSavedModelHalfPlusThreePath(self):\n \"\"\"Returns a path to a half_plus_three model in SavedModel format.\"\"\"\n return os.path.join(self.testdata_dir, 'saved_model_half_plus_three')\n\n def _GetTfLiteModelPath(self):\n \"\"\"Returns a path to a model in TF Lite format.\"\"\"\n return os.path.join(self.testdata_dir, 'saved_model_half_plus_two_tflite')\n\n def _GetTfLiteModelWithSigDefPath(self):\n \"\"\"Returns a path to a model in TF Lite format.\"\"\"\n return os.path.join(self.testdata_dir,\n 'saved_model_half_plus_two_tflite_with_sigdef')\n\n def _GetSessionBundlePath(self):\n \"\"\"Returns a path to a model in SessionBundle format.\"\"\"\n return os.path.join(self.session_bundle_testdata_dir, 'half_plus_two')\n\n def _GetGoodModelConfigTemplate(self):\n \"\"\"Returns a path to a working configuration file template.\"\"\"\n return os.path.join(self.testdata_dir, 'good_model_config.txt')\n\n def _GetGoodModelConfigFile(self):\n \"\"\"Returns a path to a working configuration file.\"\"\"\n return os.path.join(self.temp_dir, 'good_model_config.conf')\n\n def _GetBadModelConfigFile(self):\n \"\"\"Returns a path to a improperly formatted configuration file.\"\"\"\n return os.path.join(self.testdata_dir, 'bad_model_config.txt')\n\n def _GetBatchingParametersFile(self):\n \"\"\"Returns a path to a batching configuration file.\"\"\"\n return os.path.join(self.testdata_dir, 'batching_config.txt')\n\n def _GetModelMetadataFile(self):\n \"\"\"Returns a path to a sample model metadata file.\"\"\"\n return os.path.join(self.testdata_dir, 'half_plus_two_model_metadata.json')\n\n def _GetMonitoringConfigFile(self):\n \"\"\"Returns a path to a monitoring configuration file.\"\"\"\n return os.path.join(self.testdata_dir, 'monitoring_config.txt')\n\n def _VerifyModelSpec(self,\n actual_model_spec,\n exp_model_name,\n exp_signature_name,\n exp_version):\n \"\"\"Verifies model_spec matches expected model name, signature, version.\n\n Args:\n actual_model_spec: An instance of ModelSpec proto.\n exp_model_name: A string that represents expected model name.\n exp_signature_name: A string that represents expected signature.\n exp_version: An integer that represents expected version.\n\n Returns:\n None.\n \"\"\"\n self.assertEqual(actual_model_spec.name, exp_model_name)\n self.assertEqual(actual_model_spec.signature_name, exp_signature_name)\n self.assertEqual(actual_model_spec.version.value, exp_version)\n\n def _TestPredict(\n self,\n model_path,\n batching_parameters_file=None,\n signature_name=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY):\n \"\"\"Helper method to test prediction.\n\n Args:\n model_path: Path to the model on disk.\n batching_parameters_file: Batching parameters file to use (if None\n batching is not enabled).\n signature_name: Signature name to expect in the PredictResponse.\n \"\"\"\n model_server_address = TensorflowModelServerTestBase.RunServer(\n 'default',\n model_path,\n batching_parameters_file=batching_parameters_file)[1]\n expected_version = self._GetModelVersion(model_path)\n self.VerifyPredictRequest(model_server_address, expected_output=3.0,\n expected_version=expected_version,\n signature_name=signature_name)\n self.VerifyPredictRequest(\n model_server_address, expected_output=3.0, specify_output=False,\n expected_version=expected_version, signature_name=signature_name)\n" ]
[ [ "tensorflow.config.experimental.set_virtual_device_configuration", "tensorflow.config.experimental.list_physical_devices", "tensorflow.config.experimental.VirtualDeviceConfiguration", "tensorflow.config.experimental.get_virtual_device_configuration" ] ]
kitkat52/pietoolbelt
[ "0e0b5859662fcb43b008218746cc3e76cc66b6b8" ]
[ "pietoolbelt/metrics/torch/regression.py" ]
[ "import torch\nimport numpy as np\nfrom piepline import AbstractMetric\nfrom sklearn.preprocessing import MinMaxScaler\nfrom torch import Tensor\n\nfrom pietoolbelt.metrics.cpu.regression import rmse as rmse_cpu\nfrom pietoolbelt.metrics.cpu.regression import amad as amad_cpu\nfrom pietoolbelt.metrics.cpu.regression import relative as relative_cpu\n\n__all__ = ['rmse', 'amad', 'relative', 'AMADMetric', 'RelativeMetric', 'RMSEMetric']\n\n\ndef rmse(predict: Tensor, target: Tensor) -> float:\n return float(torch.mean(torch.sqrt(torch.mean((predict - target) ** 2, axis=0))).cpu())\n\n\ndef amad(predict: Tensor, target: Tensor) -> float:\n return float(torch.mean(torch.mean(torch.abs(predict - target), axis=0)).cpu())\n\n\ndef relative(predict: Tensor, target: Tensor) -> float:\n return float(torch.mean(torch.mean(torch.abs(predict - target) / (target + 1e-6), axis=0)).cpu())\n\n\nclass _AbstractRegressionMetric(AbstractMetric):\n def __init__(self, name: str, calc_cpu: callable, calc_torch: callable, min_max_scaler: MinMaxScaler = None):\n super().__init__(name)\n self.scaler = min_max_scaler\n\n self._calc_cpu, self._calc_torch = calc_cpu, calc_torch\n if self.scaler is None:\n self._calc = self._calc_without_scaler\n else:\n self._calc = self._calc_with_scaler\n\n def _calc_with_scaler(self, output: Tensor, target: Tensor) -> np.ndarray or float:\n output_inner = self.scaler.inverse_transform(output.detach().cpu().numpy())\n target_inner = self.scaler.inverse_transform(target.detach().cpu().numpy())\n return self._calc_cpu(output_inner, target_inner)\n\n def _calc_without_scaler(self, output: Tensor, target: Tensor) -> np.ndarray or float:\n return self._calc_torch(output, target)\n\n def calc(self, output: Tensor, target: Tensor) -> np.ndarray or float:\n return self._calc(output, target)\n\n\nclass AMADMetric(_AbstractRegressionMetric):\n def __init__(self, min_max_scaler: MinMaxScaler = None):\n super().__init__(\"AMAD\", calc_cpu=amad_cpu, calc_torch=amad, min_max_scaler=min_max_scaler)\n\n\nclass RMSEMetric(_AbstractRegressionMetric):\n def __init__(self, min_max_scaler: MinMaxScaler = None):\n super().__init__(\"RMSE\", calc_cpu=rmse_cpu, calc_torch=rmse, min_max_scaler=min_max_scaler)\n\n\nclass RelativeMetric(_AbstractRegressionMetric):\n def __init__(self, min_max_scaler: MinMaxScaler = None):\n super().__init__(\"Relative\", calc_cpu=relative_cpu, calc_torch=relative, min_max_scaler=min_max_scaler)\n" ]
[ [ "torch.abs", "torch.mean" ] ]
jgori-ouistiti/interaction-agents
[ "922d9bddb2b14784e32c4639b66cec302e80e13a" ]
[ "test/unit/space/test_gym-conversions.py" ]
[ "import numpy\nfrom coopihc.space.Space import Space\nfrom coopihc.space.utils import discrete_space, continuous_space, multidiscrete_space\nimport gym\n\n\ndef test_all_conversions():\n test_discrete()\n test_continuous()\n test_multidiscrete()\n\n\ndef test_discrete():\n s = discrete_space([1, 2, 3])\n assert s.convert_to_gym() == [gym.spaces.Discrete(3)]\n\n\ndef test_continuous():\n s = continuous_space(\n -numpy.ones((2, 2)),\n numpy.ones((2, 2)),\n )\n assert s.convert_to_gym() == [\n gym.spaces.Box(\n low=-numpy.ones((2, 2), dtype=numpy.float32),\n high=numpy.ones((2, 2), dtype=numpy.float32),\n )\n ]\n\n\ndef test_multidiscrete():\n s = multidiscrete_space([[1, 2, 3], (4, 5, 6)])\n assert s.convert_to_gym() == [gym.spaces.Discrete(3), gym.spaces.Discrete(3)]\n\n\nif __name__ == \"__main__\":\n test_all_conversions()\n" ]
[ [ "numpy.ones" ] ]
johnwlambert/dlupi-heteroscedastic-dropou
[ "057dd079fce7ec8833b818b77fd694c01a1adcbc" ]
[ "cnns/base_networks/vgg_truncated.py" ]
[ "# John Lambert, Ozan Sener\n\nimport torch.nn as nn\nimport math\n\nclass VGGTruncatedConv(nn.Module):\n def __init__(self, opt ):\n super(VGGTruncatedConv, self).__init__()\n\n self.cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']\n self.conv = self.make_conv_layers()\n self._initialize_weights()\n\n def make_conv_layers(self, batch_norm=True):\n layers = []\n in_channels = 3\n for v in self.cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n def forward(self,x):\n \"\"\"\n In at (N x 3 x 224 x 224)\n\n Out at (N x 512 x 7 x 7)\n \"\"\"\n x = self.conv(x)\n return x\n\n\n\n\nclass VGGTruncatedClassifier(nn.Module):\n def __init__(self, opt):\n super(VGGTruncatedClassifier, self).__init__()\n\n IMAGE_SIZE = opt.image_size\n out_recept_fld_sz = IMAGE_SIZE / (2 ** 5 ) # 5 max pools that shrink size\n flattened_feat_sz = 512 * out_recept_fld_sz * out_recept_fld_sz\n flattened_feat_sz = int(flattened_feat_sz) # Int must be Tensor Size input\n\n self.classifier = nn.Sequential(\n nn.Linear( flattened_feat_sz , 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, opt.num_classes),\n )\n self._initialize_weights()\n\n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n\nclass VGGHallucinationClassifier(nn.Module):\n \"\"\"\n Return not just final logits but also FC1 activations\n \"\"\"\n def __init__(self, opt):\n super(VGGHallucinationClassifier, self).__init__()\n\n IMAGE_SIZE = opt.image_size\n out_recept_fld_sz = IMAGE_SIZE / (2 ** 5 ) # 5 max pools that shrink size\n flattened_feat_sz = 512 * out_recept_fld_sz * out_recept_fld_sz\n flattened_feat_sz = int(flattened_feat_sz) # Int must be Tensor Size input\n\n self.dropout = nn.Dropout()\n self.relu = nn.ReLU(True)\n\n self.fc1 = nn.Linear(flattened_feat_sz, 4096)\n self.fc2 = nn.Linear(4096, 4096)\n self.fc3 = nn.Linear(4096, opt.num_classes)\n\n self._initialize_weights()\n\n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = self.relu(self.fc1(x))\n fc1_act = x.clone()\n x = self.dropout(x)\n x = self.relu(self.fc2(x))\n x = self.dropout(x)\n x = self.fc3(x)\n return fc1_act, x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.Conv2d" ] ]
zhiqiang00/Hon-GCN
[ "916f826f9193a800ac9d4d2e66f2ee108025a23d" ]
[ "pygcn/data_processed.py" ]
[ "import random\nfrom itertools import combinations\n\nimport networkx as nx\nimport pandas as pd\nimport numpy as np\nimport torch\n\nfrom pygcn.utils import sample_neg_graph\n\n\ndef get_Graph(Path):\n edges = []\n f = open(Path)\n for line in f.readlines():\n node1, node2 = line.strip().split()[:2]\n # edges.append([node_id_mapping[node1], node_id_mapping[node2]])\n edges.append([node1, node2])\n f.close()\n G = nx.Graph(edges)\n nx.info(G)\n print(\"generate graph done!\")\n return G\n\n\ndef get_Graph2(Path, node_id_mapping):\n edges = []\n f = open(Path)\n for line in f.readlines():\n node1, node2 = line.strip().split()[:2]\n edges.append([node_id_mapping[node1], node_id_mapping[node2]])\n f.close()\n G = nx.Graph(edges)\n nx.info(G)\n print(\"generate graph done!\")\n return G\n\ndef get_degrees(graph_path, save_path, isSave):\n G = get_Graph(graph_path)\n print(\"正在生成特征(度)......\")\n degrees = nx.degree(G)\n features = pd.DataFrame(degrees)\n features.columns = ['idx', 'degree']\n if isSave:\n features.to_csv(save_path, index=False)\n return features\n\n\ndef get_mapping(Path):\n nodes = []\n with open(Path) as ef:\n for line in ef.readlines():\n nodes.extend(line.strip().split()[:2])\n nodes = sorted(list(set(nodes)))\n node_id_mapping = {old_id: new_id for new_id, old_id in enumerate(nodes)}\n return node_id_mapping\n\n\n\ndef hon_to_originan(hon_path, origin_path, save_path_origin, save_path):\n print(\"开始进行映射\")\n # 利用原始网络生成字典,即在原始网络中将每个边给一个id,然后进行映射\n with open(origin_path) as f:\n edge_dict = {}\n edge_original = []\n lines = f.readlines()\n for id, l in enumerate(lines):\n # print(l)\n node1, node2 = l.split()\n edge_original.append([node1, node2, id])\n key = node1 + '-' + node2\n edge_dict[key] = id\n\n edge_original = pd.DataFrame(edge_original, columns=['node1', 'node2', 'label'])\n edge_original.to_csv(save_path_origin, index=True)\n\n\n with open(hon_path) as f:\n edge_list = [] # 用于存放高阶网络中高阶表示的节点实际是原始网络中的那个节点。\n lines = f.readlines()\n for i, l in enumerate(lines):\n node1, node2 = l.split()\n node1 = node1.split(sep=\"|\")[0]\n node2 = node2.split(sep=\"|\")[0]\n # print(\"l, node1, node2: \\nid=\", i, l, node1, node2)\n label = edge_dict[node1 + '-' + node2]\n edge_list.append([l.split()[0], l.split()[1], label])\n edge_list = pd.DataFrame(edge_list, columns=['node1', 'node2', 'label'])\n edge_list.to_csv(save_path, index=True)\n print(\"Done!\")\n\n\ndef get_weibo_message(num_path, thresholds):\n thresholds_min, thresholds_max = thresholds\n data = np.genfromtxt(num_path, delimiter='\t', dtype=np.int64)\n data_thresholds = data[data[:, 1] > thresholds_min]\n data_thresholds = data_thresholds[data_thresholds[:, 1] < thresholds_max]\n # data = pd.DataFrame(data, columns=['id', 'num'])\n # tmp = data.groupby(['num']).agg(['count'])\n return data[:, 0], data_thresholds[:, 0]\n\n\n# 将微博数据抽取为序列\ndef get_seq(retwent_path, num_path, save_path, thresholds):\n messageid, messageid_thresholds = get_weibo_message(num_path, thresholds)\n messageid = [str(i) for i in messageid]\n messageid = set(messageid)\n messageid_thresholds = [str(i) for i in messageid_thresholds]\n messageid_thresholds = set(messageid_thresholds)\n seq_retweet = []\n f = open(retwent_path, '+r')\n line = f.readline()\n tmp = []\n i = 0\n while line:\n l = line.strip().split()\n if l and l[0] in messageid:\n if tmp and tmp[0] in messageid_thresholds:\n seq_retweet.append(tmp)\n if i % 500 == 0:\n print(i, ' '.join(tmp))\n i += 1\n tmp = []\n tmp.append(l[0])\n tmp.append(l[1])\n elif l and len(l[0]) == 10 and l[0] != tmp[-1]:\n tmp.append(l[0])\n line = f.readline()\n f.close()\n seq_retweet = seq_retweet[1:]\n # seq_retweet = [[int(j) for j in i] for i in seq_retweet]\n f = open(save_path, '+w')\n for s in seq_retweet:\n ss = \" \".join(s) + '\\n'\n f.write(ss)\n f.close()\n print(\"weibo 序列数据抽取完成!\")\n\n\ndef get_neg_test_hon_emb(features, test_neg_n_samples, feature_type, dataname=\"traces-1000\"):\n # # 得到测试数据集中的test部分的neg的高阶网络的id\n path_hon_root = r\"../data/{}/\".format(dataname)\n path_original_root = r\"../data/{}-original/\".format(dataname)\n # f = open(path_original_root + 'edges.txt', \"r\")\n edges = np.genfromtxt(path_original_root + 'edges.txt',\n dtype=np.dtype(str))\n # lines = f.readlines()\n G = nx.Graph(edges.tolist())\n node1_id_list, node2_id_list = sample_neg_graph(G, path_hon_root, path_original_root, feature_type, n_samples=test_neg_n_samples)\n features = np.array(features)\n node1_emb_list = []\n node2_emb_list = []\n for i in node1_id_list:\n node1_id_same = np.array(i)\n # print(\"node1_id_same\", node1_id_same)\n embedding = features[node1_id_same]\n embedding = np.mean(embedding, axis=0)\n node1_emb_list.append(embedding)\n for i in node2_id_list:\n node2_id_same = np.array(i)\n embedding = features[node2_id_same]\n embedding = np.mean(embedding, axis=0)\n node2_emb_list.append(embedding)\n return torch.LongTensor(np.array(node1_emb_list)), torch.LongTensor(np.array(node2_emb_list))\n\n\ndef deepwalk_emb_pro(dataname, model_type):\n\n embpath = r\"../data/embeddingforother/{}-original-{}.embeddings\".format(dataname, model_type)\n degreepath = r\"../data/{}-original/traces.degree\".format(dataname)\n data = np.genfromtxt(embpath, skip_header=1)\n data = data[np.argsort(data[:, 0])]\n data = pd.DataFrame(data)\n degree = pd.read_csv(degreepath)\n data = data.iloc[:, 1:]\n data = pd.concat([degree['idx'], data], axis=1)\n # data.to_csv(embpath, index=False)\n return data\n\ndef get_origin(degreepath=\"../data/traces-1000-original/traces.degree\",\n savepath='../data/traces-1000-original/edges_mapping.txt',\n edgepath='../data/traces-1000-original/edges.txt'):\n degreepath = pd.read_csv(degreepath)\n idx = degreepath['idx']\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(edgepath, dtype=np.int32)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten()))).reshape(edges_unordered.shape)\n np.savetxt(savepath, edges, delimiter='\t', fmt='%d')\n\ndef edges_proc(dataname=\"\"):\n # dataname = \"traces-simulated\"\n # dataname = \"traces-1000\"\n # dataname = \"traces-10000\"\n # dataname = \"traces-100\"\n # dataname = \"click-stream\"\n print(\"正在处理{}数据\".format(dataname))\n path_original_root = r\"../data/{}-original/\".format(dataname)\n path_hon_root = r\"../data/{}/\".format(dataname)\n # 生成度\n get_degrees(isSave=True, graph_path=path_original_root + \"edges.txt\", save_path=path_original_root + \"traces.degree\")\n get_degrees(isSave=True, graph_path=path_hon_root + \"edges.txt\", save_path=path_hon_root + \"traces.degree\")\n print(\" 生成度 done!\")\n #\n # # 将高阶网络映射回原始网络\n hon_to_originan(hon_path=path_hon_root + \"edges.txt\",\n origin_path=path_original_root + \"edges.txt\",\n save_path=path_hon_root + \"edges_label.txt\",\n save_path_origin=path_original_root + \"edges_label.txt\")\n\ndef get_degrees2(G, node_id_mapping, flag, save_path):\n print(\"正在生成特征(度)......\")\n degrees = nx.degree(G)\n new_degrees = []\n max_degree = 0\n for node in degrees:\n new_degrees.append((list(node_id_mapping.keys())[list(node_id_mapping.values()).index(node[0])], node[1]))\n if node[1] > max_degree:\n max_degree = node[1]\n if flag == 'degree':\n dim = 1\n features = pd.DataFrame(list(new_degrees))\n elif flag == 'degree-onehot':\n dim = 100 if max_degree > 100 else max_degree\n new_degrees_onehot = []\n for node in new_degrees:\n temp = [node[0]]\n temp.extend([0] * dim)\n if node[1] > 100:\n temp[100] = 1\n else:\n temp[node[1]] = 1\n new_degrees_onehot.append(tuple(temp))\n features = pd.DataFrame(list(new_degrees_onehot))\n else:\n print('Error flag!')\n return\n# node_num = len(node_id_mapping.keys())\n# for i in range(node_num):\n# temp = [list(node_id_mapping.keys())[i]]\n# temp.extend([0] * node_num)\n# temp[i+1] = 1\n# new_degrees.append(tuple(temp))\n features.columns = ['idx'] + ['degree'] * dim\n save_file = save_path + flag + '.csv'\n features.to_csv(save_file, index=False)\n\ndef save_orginal_emb(path_original_root):\n degree = pd.read_csv(path_original_root + 'traces.degree')\n idx = degree['idx']\n idx_map = {j: i for i, j in enumerate(idx)}\n\n # 读取高阶网络的边\n edge = np.genfromtxt(path_original_root + 'edges.txt')\n edges_unordered = edge\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\n dtype=np.int32).reshape(edges_unordered.shape)\n np.savetxt(path_original_root + \"edges_mapping.txt\", edges, delimiter='\t', fmt='%d')\n\n\ndef add_ho_edges():\n ho_edges = set()\n f = open(\"../data/click-stream-10/edges.txt\", 'r')\n lines = f.readlines()\n for l in lines:\n node1, node2 = l.split()\n node1 = node1.split(sep=\"|\")[0]\n node2 = node2.split(sep=\"|\")[0]\n if (node1, node2) not in ho_edges:\n ho_edges.add((node1, node2))\n\n original_edges = []\n f = open(\"../data/click-stream-10-original/edges.txt\", 'r')\n lines = f.readlines()\n for l in lines:\n n1, n2 = l.strip().split()\n original_edges.append((n1, n2))\n add_edges = []\n for oe in original_edges:\n if oe not in ho_edges:\n add_edges.append([oe[0] + '|', oe[1] + '|'])\n # ho_edges = np.genfromtxt(\"../data/click-stream-10/edges.txt\")\n # ho_edges = np.concatenate((ho_edges, np.array(add_edges)), axis=0)\n f = open(\"../data/click-stream-10/test.txt\", 'a+')\n for ae in add_edges:\n l = ae[0] + ' ' + ae[1]\n f.write(l+'\\n')\n f.close()\n print('done!')\n\n\n\n\n\nif __name__ == '__main__':\n # 生成度\n # dataname = \"traces-simulated\"\n # dataname = \"traces-10000\"\n dataname = \"click-stream-10\"\n # path_original_root = r\"../data/{}-original/\".format(dataname)\n path_hon_root = r\"../data/{}/\".format(dataname)\n\n\n # # 将微博数据抽取为序列\n # retwent_path = r\"../data/weibo/Retweet_Content.txt\"\n # num_path = r\"../data/weibo/Weibo_Retweet_Num.txt\"\n # save_path = r\"../data/weibo/traces_weibo_threshold50-300.txt\"\n # get_seq(retwent_path, num_path, save_path, thresholds=(50, 300))\n\n # 得到测试数据集中的test部分的neg的高阶网络的id\n # dataname = \"traces-1000\"\n # path_hon_root = r\"../data/{}/\".format(dataname)\n # path_original_root = r\"../data/{}-original/\".format(dataname)\n # # f = open(path_original_root + 'edges.txt', \"r\")\n # edges = np.genfromtxt(path_original_root + 'edges.txt',\n # dtype=np.dtype(str))\n # # lines = f.readlines()\n # G = nx.Graph(edges.tolist())\n # node1_id_list, node2_id_list = sample_neg_graph(G, path_hon_root, path_original_root, n_samples=10000)\n # print(\" get neg embedding\")\n # 将利用deepwalk得到的embedding进一步处理\n # deepwalk_emb_pro()\n\n # # get_origin()\n # # edges_proc()\n # dataname = \"click-stream-10\" # traces-10000 click-stream-10\n Path = '../data/{}/edges.txt'.format(dataname)\n node_id_mapping = get_mapping(Path)\n G = get_Graph2(Path, node_id_mapping)\n flag = 'degree-onehot'\n get_degrees2(G, node_id_mapping, flag, '../data/{}/'.format(dataname))\n # print(\"done!\")\n # save_orginal_emb(path_original_root)\n # del_node(\"click-stream-10\")\n # add_ho_edges()\n pass\n\n\n\n\n" ]
[ [ "numpy.array", "numpy.savetxt", "pandas.DataFrame", "numpy.genfromtxt", "numpy.mean", "numpy.argsort", "pandas.concat", "pandas.read_csv", "numpy.dtype" ] ]
rtaiello/learning-to-learn
[ "f3c1a8d176b8ea7cc60478bfcfdd10a7a52fd296" ]
[ "preprocess.py" ]
[ "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Learning 2 Learn preprocessing modules.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport sonnet as snt\nimport tensorflow as tf\n\n\nclass Clamp(snt.AbstractModule):\n\n def __init__(self, min_value=None, max_value=None, name=\"clamp\"):\n super(Clamp, self).__init__(name=name)\n self._min = min_value\n self._max = max_value\n\n def _build(self, inputs):\n output = inputs\n if self._min is not None:\n output = tf.maximum(output, self._min)\n if self._max is not None:\n output = tf.minimum(output, self._max)\n return output\n\n\nclass LogAndSign(snt.AbstractModule):\n \"\"\"Log and sign preprocessing.\n\n As described in https://arxiv.org/pdf/1606.04474v1.pdf (Appendix A).\n \"\"\"\n\n def __init__(self, k, name=\"preprocess_log\"):\n super(LogAndSign, self).__init__(name=name)\n self._k = k\n\n def _build(self, gradients):\n \"\"\"Connects the LogAndSign module into the graph.\n\n Args:\n gradients: `Tensor` of gradients with shape `[d_1, ..., d_n]`.\n\n Returns:\n `Tensor` with shape `[d_1, ..., d_n-1, 2 * d_n]`. The first `d_n` elements\n along the nth dimension correspond to the log output and the remaining\n `d_n` elements to the sign output.\n \"\"\"\n eps = np.finfo(gradients.dtype.as_numpy_dtype).eps\n ndims = gradients.get_shape().ndims\n\n log = tf.log(tf.abs(gradients) + eps)\n clamped_log = Clamp(min_value=-1.0)(log / self._k) # pylint: disable=not-callable\n sign = Clamp(min_value=-1.0, max_value=1.0)(gradients * np.exp(self._k)) # pylint: disable=not-callable\n\n return tf.concat([clamped_log, sign], ndims - 1)\n" ]
[ [ "tensorflow.abs", "tensorflow.minimum", "tensorflow.concat", "numpy.exp", "numpy.finfo", "tensorflow.maximum" ] ]
mohamed799/Learning-to-See-in-the-Dark
[ "80baad1011c7829be1a1269d2cee7f55b99a238a" ]
[ "train_Sony.py" ]
[ "# uniform content loss + adaptive threshold + per_class_input + recursive G\n# improvement upon cqf37\nfrom __future__ import division\nimport os, time, scipy.io\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport numpy as np\nimport rawpy\nimport glob\nfrom PIL import Image\n\ninput_dir = '/content/Learning-to-See-in-the-Dark/dataset/Sony/short/'\ngt_dir = '/content/Learning-to-See-in-the-Dark/dataset/Sony/long/'\ncheckpoint_dir = '/content/Learning-to-See-in-the-Dark/result_Sony/'\nresult_dir = '/content/Learning-to-See-in-the-Dark/result_Sony/'\n\n# get train IDs\ntrain_fns = glob.glob(gt_dir + '0*.ARW')\ntrain_ids = [int(os.path.basename(train_fn)[0:5]) for train_fn in train_fns]\n\nps = 512 # patch size for training\nsave_freq = 500\n\nDEBUG = 0\nif DEBUG == 1:\n save_freq = 2\n train_ids = train_ids[0:5]\n\n\ndef lrelu(x):\n return tf.maximum(x * 0.2, x)\n\n\ndef upsample_and_concat(x1, x2, output_channels, in_channels):\n pool_size = 2\n deconv_filter = tf.Variable(tf.truncated_normal([pool_size, pool_size, output_channels, in_channels], stddev=0.02))\n deconv = tf.nn.conv2d_transpose(x1, deconv_filter, tf.shape(x2), strides=[1, pool_size, pool_size, 1])\n\n deconv_output = tf.concat([deconv, x2], 3)\n deconv_output.set_shape([None, None, None, output_channels * 2])\n\n return deconv_output\n\n\ndef network(input):\n conv1 = slim.conv2d(input, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv1_1')\n conv1 = slim.conv2d(conv1, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv1_2')\n pool1 = slim.max_pool2d(conv1, [2, 2], padding='SAME')\n\n conv2 = slim.conv2d(pool1, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv2_1')\n conv2 = slim.conv2d(conv2, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv2_2')\n pool2 = slim.max_pool2d(conv2, [2, 2], padding='SAME')\n\n conv3 = slim.conv2d(pool2, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv3_1')\n conv3 = slim.conv2d(conv3, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv3_2')\n pool3 = slim.max_pool2d(conv3, [2, 2], padding='SAME')\n\n conv4 = slim.conv2d(pool3, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv4_1')\n conv4 = slim.conv2d(conv4, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv4_2')\n pool4 = slim.max_pool2d(conv4, [2, 2], padding='SAME')\n\n conv5 = slim.conv2d(pool4, 512, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv5_1')\n conv5 = slim.conv2d(conv5, 512, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv5_2')\n\n up6 = upsample_and_concat(conv5, conv4, 256, 512)\n conv6 = slim.conv2d(up6, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv6_1')\n conv6 = slim.conv2d(conv6, 256, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv6_2')\n\n up7 = upsample_and_concat(conv6, conv3, 128, 256)\n conv7 = slim.conv2d(up7, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv7_1')\n conv7 = slim.conv2d(conv7, 128, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv7_2')\n\n up8 = upsample_and_concat(conv7, conv2, 64, 128)\n conv8 = slim.conv2d(up8, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv8_1')\n conv8 = slim.conv2d(conv8, 64, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv8_2')\n\n up9 = upsample_and_concat(conv8, conv1, 32, 64)\n conv9 = slim.conv2d(up9, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv9_1')\n conv9 = slim.conv2d(conv9, 32, [3, 3], rate=1, activation_fn=lrelu, scope='g_conv9_2')\n\n conv10 = slim.conv2d(conv9, 12, [1, 1], rate=1, activation_fn=None, scope='g_conv10')\n out = tf.depth_to_space(conv10, 2)\n return out\n\n\ndef pack_raw(raw):\n # pack Bayer image to 4 channels\n im = raw.raw_image_visible.astype(np.float32)\n im = np.maximum(im - 512, 0) / (16383 - 512) # subtract the black level\n\n im = np.expand_dims(im, axis=2)\n img_shape = im.shape\n H = img_shape[0]\n W = img_shape[1]\n\n out = np.concatenate((im[0:H:2, 0:W:2, :],\n im[0:H:2, 1:W:2, :],\n im[1:H:2, 1:W:2, :],\n im[1:H:2, 0:W:2, :]), axis=2)\n return out\n\n\nsess = tf.Session()\nin_image = tf.placeholder(tf.float32, [None, None, None, 4])\ngt_image = tf.placeholder(tf.float32, [None, None, None, 3])\nout_image = network(in_image)\n\nG_loss = tf.reduce_mean(tf.abs(out_image - gt_image))\n\nt_vars = tf.trainable_variables()\nlr = tf.placeholder(tf.float32)\nG_opt = tf.train.AdamOptimizer(learning_rate=lr).minimize(G_loss)\n\nsaver = tf.train.Saver()\nsess.run(tf.global_variables_initializer())\nckpt = tf.train.get_checkpoint_state(checkpoint_dir)\nif ckpt:\n print('loaded ' + ckpt.model_checkpoint_path)\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n# Raw data takes long time to load. Keep them in memory after loaded.\ngt_images = [None] * 6000\ninput_images = {}\ninput_images['300'] = [None] * len(train_ids)\ninput_images['250'] = [None] * len(train_ids)\ninput_images['100'] = [None] * len(train_ids)\n\ng_loss = np.zeros((5000, 1))\n\nallfolders = glob.glob(result_dir + '*0')\nlastepoch = 0\nfor folder in allfolders:\n lastepoch = np.maximum(lastepoch, int(folder[-4:]))\n\nlearning_rate = 1e-4\nfor epoch in range(lastepoch, 4001):\n if os.path.isdir(result_dir + '%04d' % epoch):\n continue\n cnt = 0\n if epoch > 2000:\n learning_rate = 1e-5\n for ind in np.random.permutation(len(train_ids)):\n # get the path from image id\n train_id = train_ids[ind]\n in_files = glob.glob(input_dir + '%05d_00*.ARW' % train_id)\n in_path = in_files[np.random.random_integers(0, len(in_files) - 1)]\n in_fn = os.path.basename(in_path)\n\n gt_files = glob.glob(gt_dir + '%05d_00*.ARW' % train_id)\n gt_path = gt_files[0]\n gt_fn = os.path.basename(gt_path)\n in_exposure = float(in_fn[9:-5])\n gt_exposure = float(gt_fn[9:-5])\n ratio = min(gt_exposure / in_exposure, 300)\n\n st = time.time()\n cnt += 1\n\n if input_images[str(ratio)[0:3]][ind] is None:\n raw = rawpy.imread(in_path)\n input_images[str(ratio)[0:3]][ind] = np.expand_dims(pack_raw(raw), axis=0) * ratio\n\n gt_raw = rawpy.imread(gt_path)\n im = gt_raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)\n gt_images[ind] = np.expand_dims(np.float32(im / 65535.0), axis=0)\n\n # crop\n H = input_images[str(ratio)[0:3]][ind].shape[1]\n W = input_images[str(ratio)[0:3]][ind].shape[2]\n\n xx = np.random.randint(0, W - ps)\n yy = np.random.randint(0, H - ps)\n input_patch = input_images[str(ratio)[0:3]][ind][:, yy:yy + ps, xx:xx + ps, :]\n gt_patch = gt_images[ind][:, yy * 2:yy * 2 + ps * 2, xx * 2:xx * 2 + ps * 2, :]\n\n if np.random.randint(2, size=1)[0] == 1: # random flip\n input_patch = np.flip(input_patch, axis=1)\n gt_patch = np.flip(gt_patch, axis=1)\n if np.random.randint(2, size=1)[0] == 1:\n input_patch = np.flip(input_patch, axis=2)\n gt_patch = np.flip(gt_patch, axis=2)\n if np.random.randint(2, size=1)[0] == 1: # random transpose\n input_patch = np.transpose(input_patch, (0, 2, 1, 3))\n gt_patch = np.transpose(gt_patch, (0, 2, 1, 3))\n\n input_patch = np.minimum(input_patch, 1.0)\n\n _, G_current, output = sess.run([G_opt, G_loss, out_image],\n feed_dict={in_image: input_patch, gt_image: gt_patch, lr: learning_rate})\n output = np.minimum(np.maximum(output, 0), 1)\n g_loss[ind] = G_current\n\n print(\"%d %d Loss=%.3f Time=%.3f\" % (epoch, cnt, np.mean(g_loss[np.where(g_loss)]), time.time() - st))\n\n if epoch % save_freq == 0:\n if not os.path.isdir(result_dir + '%04d' % epoch):\n os.makedirs(result_dir + '%04d' % epoch)\n\n temp = np.concatenate((gt_patch[0, :, :, :], output[0, :, :, :]), axis=1)\n Image.fromarray((temp* 255).astype('uint8')).save(\n result_dir + '%04d/%05d_00_train_%d.jpg' % (epoch, train_id, ratio))\n\n saver.save(sess, checkpoint_dir + 'model.ckpt')\n" ]
[ [ "tensorflow.contrib.slim.max_pool2d", "numpy.minimum", "tensorflow.train.get_checkpoint_state", "numpy.where", "tensorflow.depth_to_space", "tensorflow.global_variables_initializer", "numpy.concatenate", "tensorflow.trainable_variables", "tensorflow.shape", "tensorflow.concat", "tensorflow.train.Saver", "numpy.random.randint", "numpy.transpose", "numpy.expand_dims", "tensorflow.abs", "tensorflow.train.AdamOptimizer", "numpy.zeros", "tensorflow.Session", "tensorflow.truncated_normal", "numpy.float32", "tensorflow.placeholder", "tensorflow.contrib.slim.conv2d", "tensorflow.maximum", "numpy.flip", "numpy.maximum" ] ]
niranjana687/hangar-py
[ "d4af0cd85c5588b59fb869097b3245d3b85ad8c3" ]
[ "tests/test_dataloaders.py" ]
[ "from os.path import join as pjoin\nfrom os import mkdir\nimport pytest\nimport numpy as np\nfrom hangar import Repository\n\n\ntry:\n import torch\n from torch.utils.data import DataLoader\n from hangar import make_torch_dataset\n skipTorch = False\nexcept ImportError:\n skipTorch = True\n\n\[email protected](skipTorch is True,\n reason='pytorch is not installed in the test environment.')\nclass TestTorchDataLoader(object):\n\n def test_warns_experimental(self, repo_with_20_samples):\n repo = repo_with_20_samples\n co = repo.checkout()\n first_aset = co.arraysets['writtenaset']\n second_aset = co.arraysets['second_aset']\n with pytest.warns(UserWarning, match='Dataloaders are experimental'):\n make_torch_dataset([first_aset, second_aset])\n co.close()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n def test_warns_arrayset_sample_size_mismatch(self, repo_with_20_samples):\n repo = repo_with_20_samples\n co = repo.checkout(write=True)\n second_aset = co.arraysets['second_aset']\n del second_aset['10']\n co.commit('deleting')\n co.close()\n\n co = repo.checkout()\n first_aset = co.arraysets['writtenaset']\n second_aset = co.arraysets['second_aset']\n with pytest.warns(UserWarning, match='Arraysets do not contain equal number of samples'):\n make_torch_dataset([first_aset, second_aset])\n co.close()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n def test_multiple_dataset_loader(self, repo_with_20_samples):\n repo = repo_with_20_samples\n co = repo.checkout(write=True)\n second_aset = co.arraysets['second_aset']\n del second_aset['10']\n co.commit('deleting')\n co.close()\n\n co = repo.checkout()\n first_aset = co.arraysets['writtenaset']\n second_aset = co.arraysets['second_aset']\n with pytest.raises(ValueError):\n # emtpy list\n make_torch_dataset([])\n with pytest.raises(TypeError):\n # if more than one dataset, those should be in a list/tuple\n make_torch_dataset(first_aset, first_aset)\n\n with pytest.warns(UserWarning, match='Arraysets do not contain equal number of samples'):\n torch_dset = make_torch_dataset([first_aset, second_aset])\n loader = DataLoader(torch_dset, batch_size=6, drop_last=True)\n total_samples = 0\n for dset1, dset2 in loader:\n total_samples += dset1.shape[0]\n assert dset1.shape == (6, 5, 7)\n assert dset2.shape == (6, 5, 7)\n assert total_samples == 18 # drop last is True\n co.close()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n def test_dataset_loader_fails_with_write_enabled_checkout(self, repo_with_20_samples):\n repo = repo_with_20_samples\n co = repo.checkout(write=True)\n first_aset = co.arraysets['writtenaset']\n second_aset = co.arraysets['second_aset']\n with pytest.raises(TypeError):\n make_torch_dataset([first_aset, second_aset])\n co.close()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n def test_with_keys(self, repo_with_20_samples):\n repo = repo_with_20_samples\n co = repo.checkout()\n aset = co.arraysets['writtenaset']\n\n # with keys\n keys = ['2', '4', '5', '6', '7', '9', '15', '18', '19']\n bad_tensor0 = aset['0']\n bad_tensor1 = aset['1']\n bad_tensor3 = aset['3']\n bad_tensor8 = aset['8']\n\n torch_dset = make_torch_dataset(aset, keys=keys)\n loader = DataLoader(torch_dset, batch_size=3)\n total_batches = 0\n for batch in loader:\n assert batch[0].size(0) == 3\n total_batches += 1\n for sample in batch:\n assert not np.allclose(sample, bad_tensor0)\n assert not np.allclose(sample, bad_tensor1)\n assert not np.allclose(sample, bad_tensor3)\n assert not np.allclose(sample, bad_tensor8)\n assert total_batches == 3\n co.close()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n def test_with_index_range(self, repo_with_20_samples):\n repo = repo_with_20_samples\n co = repo.checkout()\n aset = co.arraysets['writtenaset']\n\n # with keys\n bad_tensor0 = aset['0']\n bad_tensor1 = aset['1']\n\n # with index range\n index_range = slice(2, 20)\n torch_dset = make_torch_dataset(aset, index_range=index_range)\n loader = DataLoader(torch_dset, batch_size=3)\n total_batches = 0\n for batch in loader:\n assert batch[0].size(0) == 3\n total_batches += 1\n for sample in batch:\n assert not np.allclose(sample, bad_tensor0)\n assert not np.allclose(sample, bad_tensor1)\n assert total_batches == 6\n co.close()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n def test_field_names(self, repo_with_20_samples):\n repo = repo_with_20_samples\n co = repo.checkout()\n first_aset = co.arraysets['writtenaset']\n second_aset = co.arraysets['second_aset']\n with pytest.raises(ValueError): # number of dsets and field_names are different\n make_torch_dataset([first_aset, second_aset], field_names=('input',))\n with pytest.raises(TypeError): # field_names's type is wrong\n make_torch_dataset([first_aset, second_aset], field_names={'input': '', 'target': ''})\n torch_dset = make_torch_dataset([first_aset, second_aset], field_names=('input', 'target'))\n assert hasattr(torch_dset[1], 'input')\n assert hasattr(torch_dset[1], 'target')\n if torch.__version__ > '1.0.1':\n loader = DataLoader(torch_dset, batch_size=5)\n for sample in loader:\n assert hasattr(sample, 'input')\n assert hasattr(sample, 'target')\n co.close()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n def test_lots_of_data_with_multiple_backend(self, repo_with_10000_samples):\n repo = repo_with_10000_samples\n co = repo.checkout()\n aset = co.arraysets['aset']\n torch_dset = make_torch_dataset([aset])\n loader = DataLoader(torch_dset, batch_size=1000, drop_last=True)\n for data in loader:\n assert data.aset.shape == (1000, 5, 7)\n co.close()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n @pytest.mark.filterwarnings(\"ignore:Arrayset.* writtenaset contains `reference-only` samples\")\n def test_local_without_data_fails_no_common_no_local(self, written_two_cmt_server_repo, managed_tmpdir):\n new_tmpdir = pjoin(managed_tmpdir, 'new')\n mkdir(new_tmpdir)\n server, _ = written_two_cmt_server_repo\n repo = Repository(path=new_tmpdir, exists=False)\n repo.clone('name', '[email protected]', server, remove_old=True)\n co = repo.checkout()\n aset = co.arraysets['writtenaset']\n with pytest.raises(ValueError):\n torch_dset = make_torch_dataset(aset)\n co.close()\n repo._env._close_environments()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n @pytest.mark.filterwarnings(\"ignore:Arrayset.* writtenaset contains `reference-only` samples\")\n def test_local_without_data_fails_no_common(self, written_two_cmt_server_repo, managed_tmpdir):\n new_tmpdir = pjoin(managed_tmpdir, 'new')\n mkdir(new_tmpdir)\n server, _ = written_two_cmt_server_repo\n repo = Repository(path=new_tmpdir, exists=False)\n repo.clone('name', '[email protected]', server, remove_old=True)\n co = repo.checkout()\n aset = co.arraysets['writtenaset']\n with pytest.raises(KeyError):\n torch_dset = make_torch_dataset(aset, keys=['1', -1])\n co.close()\n repo._env._close_environments()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n @pytest.mark.filterwarnings(\"ignore:Arrayset.* writtenaset contains `reference-only` samples\")\n def test_local_without_data_fails_data_unavailable(self, written_two_cmt_server_repo, managed_tmpdir):\n new_tmpdir = pjoin(managed_tmpdir, 'new')\n mkdir(new_tmpdir)\n server, _ = written_two_cmt_server_repo\n repo = Repository(path=new_tmpdir, exists=False)\n repo.clone('name', '[email protected]', server, remove_old=True)\n co = repo.checkout()\n aset = co.arraysets['writtenaset']\n with pytest.raises(FileNotFoundError):\n torch_dset = make_torch_dataset(aset, keys=['1', '2'])\n co.close()\n repo._env._close_environments()\n\n\ntry:\n import tensorflow as tf\n tf.compat.v1.enable_eager_execution()\n from hangar import make_tf_dataset\n skipTF = False\nexcept ImportError:\n skipTF = True\n\n\[email protected](\n skipTF is True,\n reason='tensorflow is not installed in the test environment.')\nclass TestTfDataLoader(object):\n\n def test_warns_experimental(self, repo_with_20_samples):\n repo = repo_with_20_samples\n co = repo.checkout()\n first_aset = co.arraysets['writtenaset']\n second_aset = co.arraysets['second_aset']\n with pytest.warns(UserWarning, match='Dataloaders are experimental'):\n make_tf_dataset([first_aset, second_aset])\n co.close()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n def test_wans_arrayset_sample_size_mismatch(self, repo_with_20_samples):\n repo = repo_with_20_samples\n co = repo.checkout(write=True)\n second_aset = co.arraysets['second_aset']\n del second_aset['10']\n co.commit('deleting')\n co.close()\n\n co = repo.checkout()\n first_aset = co.arraysets['writtenaset']\n second_aset = co.arraysets['second_aset']\n with pytest.warns(UserWarning, match='Arraysets do not contain equal number of samples'):\n make_tf_dataset([first_aset, second_aset])\n co.close()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n def test_dataset_loader(self, repo_with_20_samples):\n repo = repo_with_20_samples\n co = repo.checkout()\n first_aset = co.arraysets['writtenaset']\n second_aset = co.arraysets['second_aset']\n\n # multiple datasets\n tf_dset = make_tf_dataset([first_aset, second_aset])\n tf_dset = tf_dset.batch(6)\n for dset1, dset2 in tf_dset.take(2):\n assert dset1.shape == tf.TensorShape((6, 5, 7))\n assert dset2.shape == tf.TensorShape((6, 5, 7))\n co.close()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n def test_with_keys(self, repo_with_20_samples):\n repo = repo_with_20_samples\n co = repo.checkout()\n aset = co.arraysets['writtenaset']\n\n # with keys\n keys = ['2', '4', '5', '6', '7', '9', '15', '18', '19']\n bad_tensor0 = aset['0']\n bad_tensor1 = aset['1']\n bad_tensor3 = aset['3']\n bad_tensor8 = aset['8']\n\n tf_dset = make_tf_dataset(aset, keys=keys)\n tf_dset = tf_dset.batch(3)\n total_batches = 0\n for dset1 in tf_dset:\n total_batches += 1\n assert dset1[0].shape == tf.TensorShape((3, 5, 7))\n for sample in dset1[0]:\n assert not np.allclose(sample, bad_tensor0)\n assert not np.allclose(sample, bad_tensor1)\n assert not np.allclose(sample, bad_tensor3)\n assert not np.allclose(sample, bad_tensor8)\n assert total_batches == 3\n co.close()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n def test_with_index_range(self, repo_with_20_samples):\n repo = repo_with_20_samples\n co = repo.checkout()\n aset = co.arraysets['writtenaset']\n\n # with keys\n bad_tensor0 = aset['0']\n bad_tensor1 = aset['1']\n\n # with index range\n index_range = slice(2, 20)\n tf_dset = make_tf_dataset(aset, index_range=index_range)\n tf_dset = tf_dset.batch(3)\n total_batches = 0\n for dset1 in tf_dset:\n total_batches += 1\n assert dset1[0].shape == tf.TensorShape((3, 5, 7))\n for sample in dset1[0]:\n assert not np.allclose(sample, bad_tensor0)\n assert not np.allclose(sample, bad_tensor1)\n assert total_batches == 6\n co.close()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n def test_dataset_loader_fails_with_write_enabled_checkout(self, repo_with_20_samples):\n repo = repo_with_20_samples\n co = repo.checkout(write=True)\n first_aset = co.arraysets['writtenaset']\n second_aset = co.arraysets['second_aset']\n with pytest.raises(TypeError):\n make_tf_dataset([first_aset, second_aset])\n co.close()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n def test_variably_shaped(self, variable_shape_written_repo):\n # Variably shaped test is required since the collation is dependent on\n # the way we return the data from generator\n repo = variable_shape_written_repo\n co = repo.checkout(write=True)\n aset = co.arraysets['writtenaset']\n for i in range(5, 10):\n aset[i] = np.random.random((2, i))\n co.commit('added data')\n co.close()\n\n co = repo.checkout()\n aset = co.arraysets['writtenaset']\n tf_dset = make_tf_dataset(aset)\n shape_obj = tf.TensorShape((2, None))\n tf_dset = tf_dset.padded_batch(5, padded_shapes=(shape_obj,))\n for val in tf_dset:\n assert val[0].shape[0] == 5\n assert val[0].shape[1] == 2\n assert 11 > val[0].shape[2] > 4\n co.close()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n def test_lots_of_data_with_multiple_backend(self, repo_with_10000_samples):\n repo = repo_with_10000_samples\n co = repo.checkout()\n aset = co.arraysets['aset']\n tf_dset = make_tf_dataset([aset])\n tf_dset = tf_dset.batch(1000)\n for data in tf_dset:\n assert data[0].shape == (1000, 5, 7)\n co.close()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n @pytest.mark.filterwarnings(\"ignore:Arrayset.* writtenaset contains `reference-only` samples\")\n def test_local_without_data_fails_no_common_no_local(self, written_two_cmt_server_repo, managed_tmpdir):\n new_tmpdir = pjoin(managed_tmpdir, 'new')\n mkdir(new_tmpdir)\n server, _ = written_two_cmt_server_repo\n repo = Repository(path=new_tmpdir, exists=False)\n repo.clone('name', '[email protected]', server, remove_old=True)\n co = repo.checkout()\n aset = co.arraysets['writtenaset']\n with pytest.raises(ValueError):\n tf_dset = make_tf_dataset(aset)\n co.close()\n repo._env._close_environments()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n @pytest.mark.filterwarnings(\"ignore:Arrayset.* writtenaset contains `reference-only` samples\")\n def test_local_without_data_fails_no_common(self, written_two_cmt_server_repo, managed_tmpdir):\n new_tmpdir = pjoin(managed_tmpdir, 'new')\n mkdir(new_tmpdir)\n server, _ = written_two_cmt_server_repo\n repo = Repository(path=new_tmpdir, exists=False)\n repo.clone('name', '[email protected]', server, remove_old=True)\n co = repo.checkout()\n aset = co.arraysets['writtenaset']\n with pytest.raises(KeyError):\n tf_dset = make_tf_dataset(aset, keys=['1', -1])\n co.close()\n repo._env._close_environments()\n\n @pytest.mark.filterwarnings(\"ignore:Dataloaders are experimental\")\n @pytest.mark.filterwarnings(\"ignore:Arrayset.* writtenaset contains `reference-only` samples\")\n def test_local_without_data_fails_data_unavailable(self, written_two_cmt_server_repo, managed_tmpdir):\n new_tmpdir = pjoin(managed_tmpdir, 'new')\n mkdir(new_tmpdir)\n server, _ = written_two_cmt_server_repo\n repo = Repository(path=new_tmpdir, exists=False)\n repo.clone('name', '[email protected]', server, remove_old=True)\n co = repo.checkout()\n aset = co.arraysets['writtenaset']\n with pytest.raises(FileNotFoundError):\n tf_dset = make_tf_dataset(aset, keys=['1', '2'])\n co.close()\n repo._env._close_environments()" ]
[ [ "tensorflow.TensorShape", "numpy.allclose", "torch.utils.data.DataLoader", "tensorflow.compat.v1.enable_eager_execution", "numpy.random.random" ] ]
tanmayb123/DeepSPADE
[ "3c62a0b588850b142b77dca6bb3f1d93f6c1e6b1" ]
[ "train2.py" ]
[ "\"\"\"\nTrain convolutional network for sentiment analysis. Based on\n\"Convolutional Neural Networks for Sentence Classification\" by Yoon Kim\nhttp://arxiv.org/pdf/1408.5882v2.pdf\n\nFor 'CNN-non-static' gets to 82.1% after 61 epochs with following settings:\nembedding_dim = 20\nfilter_sizes = (3, 4)\nnum_filters = 3\ndropout_prob = (0.7, 0.8)\nhidden_dims = 100\n\nFor 'CNN-rand' gets to 78-79% after 7-8 epochs with following settings:\nembedding_dim = 20\nfilter_sizes = (3, 4)\nnum_filters = 150\ndropout_prob = (0.25, 0.5)\nhidden_dims = 150\n\nFor 'CNN-static' gets to 75.4% after 7 epochs with following settings:\nembedding_dim = 100\nfilter_sizes = (3, 4)\nnum_filters = 150\ndropout_prob = (0.25, 0.5)\nhidden_dims = 150\n\n* it turns out that such a small data set as \"Movie reviews with one\nsentence per review\" (Pang and Lee, 2005) requires much smaller network\nthan the one introduced in the original article:\n- embedding dimension is only 20 (instead of 300; 'CNN-static' still requires ~100)\n- 2 filter sizes (instead of 3)\n- higher dropout probabilities and\n- 3 filters per filter size is enough for 'CNN-non-static' (instead of 100)\n- embedding initialization does not require prebuilt Google Word2Vec data.\nTraining Word2Vec on the same \"Movie reviews\" data set is enough to\nachieve performance reported in the article (81.6%)\n\n** Another distinct difference is slidind MaxPooling window of length=2\ninstead of MaxPooling over whole feature map as in the article\n\"\"\"\n\nimport numpy as np\nimport data_helpers\nfrom w2v import train_word2vec\nimport tensorflow as tf\nsess = tf.Session()\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import Activation, Dense, Dropout, Embedding, Flatten, Input, Merge, Convolution1D, MaxPooling1D\nfrom keras import backend as K\nK.set_session(sess)\n\nnp.random.seed(2)\n\n# Parameters\n# ==================================================\n#\n# Model Variations. See Kim Yoon's Convolutional Neural Networks for\n# Sentence Classification, Section 3 for detail.\n\nmodel_variation = 'CNN-non-static' # CNN-rand | CNN-non-static | CNN-static\nprint('Model variation is %s' % model_variation)\n\n# Model Hyperparameters\nsequence_length = 45\nembedding_dim = 20\nfilter_sizes = (3, 4)\nnum_filters = 128\ndropout_prob = (0.25, 0.5)\nhidden_dims = 128\n\n# Training parameters\nbatch_size = 32\nnum_epochs = 30\nval_split = 0.1\n\n# Word2Vec parameters, see train_word2vec\nmin_word_count = 1 # Minimum word count\ncontext = 10 # Context window size\n\n# Data Preparatopn\n# ==================================================\n#\n# Load data\nprint(\"Loading data...\")\nx, y, vocabulary, vocabulary_inv = data_helpers.load_data()\n\nif model_variation=='CNN-non-static' or model_variation=='CNN-static':\n embedding_weights = train_word2vec(x, vocabulary_inv, embedding_dim, min_word_count, context)\n if model_variation=='CNN-static':\n x = embedding_weights[0][x]\nelif model_variation=='CNN-rand':\n embedding_weights = None\nelse:\n raise ValueError('Unknown model variation')\n\n# Shuffle data\nshuffle_indices = np.random.permutation(np.arange(len(y)))\nx_shuffled = x[shuffle_indices]\ny_shuffled = y[shuffle_indices].argmax(axis=1)\n\nprint(\"Vocabulary Size: {:d}\".format(len(vocabulary)))\n\n# Building model\n# ==================================================\n#\n# graph subnet with one input and one output,\n# convolutional layers concateneted in parallel\ngraph_in = Input(shape=(sequence_length, embedding_dim))\nconvs = []\nfor fsz in filter_sizes:\n conv = Convolution1D(nb_filter=num_filters,\n filter_length=fsz,\n border_mode='valid',\n activation='relu',\n subsample_length=1)(graph_in)\n pool = MaxPooling1D(pool_length=2)(conv)\n flatten = Flatten()(pool)\n convs.append(flatten)\n\nif len(filter_sizes)>1:\n out = Merge(mode='concat')(convs)\nelse:\n out = convs[0]\n\ngraph = Model(input=graph_in, output=out)\n\n# main sequential model\nmodel = Sequential()\nif not model_variation=='CNN-static':\n model.add(Embedding(len(vocabulary), embedding_dim, input_length=198,\n weights=embedding_weights))\n\nmodel.add(Dropout(dropout_prob[0], input_shape=(sequence_length, embedding_dim)))\nmodel.add(graph)\nmodel.add(Dense(hidden_dims))\nmodel.add(Dropout(dropout_prob[1]))\nmodel.add(Activation('relu'))\nmodel.add(Dense(1))\nmodel.add(Activation('sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n\n# Training model\n# ==================================================\nmodel.fit(x_shuffled, y_shuffled, batch_size=batch_size,\n nb_epoch=num_epochs, validation_split=val_split, verbose=1)\n\nmodel.save('save_tmp.h5')\n" ]
[ [ "numpy.random.seed", "tensorflow.Session" ] ]
fhvilshoj/TorchLRP
[ "74253a1be05f0be0b7c535736023408670443b6e" ]
[ "examples/explain_vgg.py" ]
[ "import os\nimport sys\nimport torch\nimport pickle\nfrom torch.nn import Sequential, Conv2d, Linear\n\nimport pathlib\nimport argparse\nimport torchvision\nfrom torchvision import datasets, transforms as T\nimport configparser\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Append parent directory of this file to sys.path, \n# no matter where it is run from\nbase_path = pathlib.Path(__file__).parent.parent.absolute()\nsys.path.insert(0, base_path.as_posix())\n\nimport lrp\nfrom lrp.patterns import fit_patternnet, fit_patternnet_positive # PatternNet patterns\nfrom utils import store_patterns, load_patterns\nfrom visualization import project, clip_quantile, heatmap_grid, grid\n\ntorch.manual_seed(1337)\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\n\n# # # # # ImageNet Data\nconfig = configparser.ConfigParser()\nconfig.read((base_path / 'config.ini').as_posix())\nsys.path.append(config['DEFAULT']['ImageNetDir'])\nfrom torch_imagenet import ImageNetDataset\n\n# Normalization as expected by pytorch vgg models\n# https://pytorch.org/docs/stable/torchvision/models.html\n_mean = torch.tensor([0.485, 0.456, 0.406], device=device).view((1, 3, 1, 1))\n_std = torch.tensor([0.229, 0.224, 0.225], device=device).view((1, 3, 1, 1))\n\ndef unnormalize(x):\n return x * _std + _mean\n\ntransform = T.Compose([\n T.Resize(256), \n T.CenterCrop(224), \n T.ToTensor(),\n T.Normalize( mean= _mean.flatten(),\n std = _std.flatten() ),\n])\n\ndataset = ImageNetDataset(transform=transform)\ntrain_loader = torch.utils.data.DataLoader(dataset, batch_size=12, shuffle=True)\n# # # # # End ImageNet Data\n\n# # # # # VGG model\nvgg_num = int(sys.argv[1]) if len(sys.argv) > 1 else 16 # Default to vgg16\n\nvgg = getattr(torchvision.models, \"vgg%i\"%vgg_num)(pretrained=True).to(device)\n# vgg = torchvision.models.vgg16(pretrained=True).to(device)\nvgg.eval()\n\nprint(\"Loaded vgg-%i\" % vgg_num)\n\nlrp_vgg = lrp.convert_vgg(vgg).to(device)\n# # # # #\n\n# Check that the vgg and lrp_vgg models does the same thing\nfor x, y in train_loader: break\nx = x.to(device)\nx.requires_grad_(True)\n\ny_hat = vgg(x)\ny_hat_lrp = lrp_vgg.forward(x)\n\nassert torch.allclose(y_hat, y_hat_lrp, atol=1e-4, rtol=1e-4), \"\\n\\n%s\\n%s\\n%s\" % (str(y_hat.view(-1)[:10]), str(y_hat_lrp.view(-1)[:10]), str((torch.abs(y_hat - y_hat_lrp)).max()))\nprint(\"Done testing\")\n# # # # #\n\n# # # # # Patterns for PatternNet and PatternAttribution\npatterns_path = (base_path / 'examples' / 'patterns' / ('vgg%i_pattern_pos.pkl' % vgg_num)).as_posix()\nif not os.path.exists(patterns_path):\n patterns = fit_patternnet_positive(lrp_vgg, train_loader, device=device)\n store_patterns(patterns_path, patterns)\nelse:\n patterns = [torch.tensor(p).to(device) for p in load_patterns(patterns_path)]\n\nprint(\"Loaded patterns\")\n\n# # # # # Plotting \ndef compute_and_plot_explanation(rule, ax_, patterns=None, plt_fn=heatmap_grid): \n # Forward pass\n y_hat_lrp = lrp_vgg.forward(x, explain=True, rule=rule, pattern=patterns)\n\n # Choose argmax\n y_hat_lrp = y_hat_lrp[torch.arange(x.shape[0]), y_hat_lrp.max(1)[1]]\n y_hat_lrp = y_hat_lrp.sum()\n\n # Backward pass (compute explanation)\n y_hat_lrp.backward()\n attr = x.grad\n\n # Plot\n attr = plt_fn(attr)\n ax_.imshow(attr)\n ax_.set_title(rule)\n ax_.axis('off')\n\n# PatternNet is typically handled a bit different, when visualized.\ndef signal_fn(X):\n if X.shape[1] in [1, 3]: X = X.permute(0, 2, 3, 1).detach().cpu().numpy()\n X = clip_quantile(X)\n X = project(X)\n X = grid(X)\n return X\n\nexplanations = [\n # rule Pattern plt_fn Fig. pos\n ('alpha1beta0', None, heatmap_grid, (1, 0)), \n ('epsilon', None, heatmap_grid, (0, 1)), \n ('gamma+epsilon', None, heatmap_grid, (1, 1)), \n ('patternnet', patterns, signal_fn, (0, 2)),\n ('patternattribution', patterns, heatmap_grid, (1, 2)),\n ]\n\nfig, ax = plt.subplots(2, 3, figsize=(12, 8))\nprint(\"Plotting\")\n\n# Plot inputs\ninput_to_plot = unnormalize(x).permute(0, 2, 3, 1).contiguous().detach().cpu().numpy()\ninput_to_plot = grid(input_to_plot, 3, 1.)\nax[0, 0].imshow(input_to_plot)\nax[0, 0].set_title(\"Input\")\nax[0, 0].axis('off')\n\n# Plot explanations\nfor i, (rule, pattern, fn, (p, q) ) in enumerate(explanations): \n compute_and_plot_explanation(rule, ax[p, q], patterns=pattern, plt_fn=fn)\n\nfig.tight_layout()\nfig.savefig((base_path / 'examples' / 'plots' / (\"vgg%i_explanations.png\" % vgg_num)).as_posix(), dpi=280)\nplt.show()\n\n\n\n" ]
[ [ "torch.arange", "matplotlib.pyplot.subplots", "torch.manual_seed", "torch.abs", "torch.cuda.is_available", "matplotlib.pyplot.show", "torch.utils.data.DataLoader", "torch.tensor", "torch.allclose" ] ]
loyanie/Mask_RCNN
[ "16f56ab86b9cb9834fcdc431e49eab119304b5da" ]
[ "mrcnn/model.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n\"\"\"\nMask R-CNN\nThe main Mask R-CNN model implementation.\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport random\nimport datetime\nimport re\nimport math\nimport logging\nfrom collections import OrderedDict\nimport multiprocessing\nimport numpy as np\nimport tensorflow as tf\nimport keras\nimport keras.backend as K\nimport keras.layers as KL\nimport keras.engine as KE\nimport keras.models as KM\n\nfrom mrcnn import utils\n\n# Requires TensorFlow 1.3+ and Keras 2.0.8+.\nfrom distutils.version import LooseVersion\nassert LooseVersion(tf.__version__) >= LooseVersion(\"1.3\")\nassert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')\n\n\n############################################################\n# Utility Functions\n############################################################\n\ndef log(text, array=None):\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\n prints it's shape, min, and max values.\n \"\"\"\n if array is not None:\n text = text.ljust(25)\n text += (\"shape: {:20} \".format(str(array.shape)))\n if array.size:\n text += (\"min: {:10.5f} max: {:10.5f}\".format(array.min(),array.max()))\n else:\n text += (\"min: {:10} max: {:10}\".format(\"\",\"\"))\n text += \" {}\".format(array.dtype)\n print(text)\n\n\nclass BatchNorm(KL.BatchNormalization):\n \"\"\"Extends the Keras BatchNormalization class to allow a central place\n to make changes if needed.\n Batch normalization has a negative effect on training if batches are small\n so this layer is often frozen (via setting in Config class) and functions\n as linear layer.\n \"\"\"\n def call(self, inputs, training=None):\n \"\"\"\n Note about training values:\n None: Train BN layers. This is the normal mode\n False: Freeze BN layers. Good when batch size is small\n True: (don't use). Set layer in training mode even when making inferences\n \"\"\"\n return super(self.__class__, self).call(inputs, training=training)\n\n\ndef compute_backbone_shapes(config, image_shape):\n \"\"\"Computes the width and height of each stage of the backbone network.\n Returns:\n [N, (height, width)]. Where N is the number of stages\n \"\"\"\n if callable(config.BACKBONE):\n return config.COMPUTE_BACKBONE_SHAPE(image_shape)\n\n # Currently supports ResNet only\n assert config.BACKBONE in [\"resnet50\", \"resnet101\"]\n return np.array(\n [[int(math.ceil(image_shape[0] / stride)),\n int(math.ceil(image_shape[1] / stride))]\n for stride in config.BACKBONE_STRIDES])\n\n\n############################################################\n# Resnet Graph\n############################################################\n\n# Code adopted from:\n# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block,\n use_bias=True, train_bn=True):\n \"\"\"The identity_block is the block that has no conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',\n use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',\n use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n x = KL.Add()([x, input_tensor])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block,\n strides=(2, 2), use_bias=True, train_bn=True):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n Note that from stage 3, the first conv layer at main path is with subsample=(2,2)\n And the shortcut should have subsample=(2,2) as well\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,\n name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +\n '2c', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,\n name=conv_name_base + '1', use_bias=use_bias)(input_tensor)\n shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)\n\n x = KL.Add()([x, shortcut])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef resnet_graph(input_image, architecture, stage5=False, train_bn=True):\n \"\"\"Build a ResNet graph.\n architecture: Can be resnet50 or resnet101\n stage5: Boolean. If False, stage5 of the network is not created\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n assert architecture in [\"resnet50\", \"resnet101\"]\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_image)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\n x = BatchNorm(name='bn_conv1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]\n\n\n############################################################\n# Proposal Layer\n############################################################\n\ndef apply_box_deltas_graph(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, (y1, x1, y2, x2)] boxes to update\n deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= tf.exp(deltas[:, 2])\n width *= tf.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n result = tf.stack([y1, x1, y2, x2], axis=1, name=\"apply_box_deltas_out\")\n return result\n\n\ndef clip_boxes_graph(boxes, window):\n \"\"\"\n boxes: [N, (y1, x1, y2, x2)]\n window: [4] in the form y1, x1, y2, x2\n \"\"\"\n # Split\n wy1, wx1, wy2, wx2 = tf.split(window, 4)\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)\n # Clip\n y1 = tf.maximum(tf.minimum(y1, wy2), wy1)\n x1 = tf.maximum(tf.minimum(x1, wx2), wx1)\n y2 = tf.maximum(tf.minimum(y2, wy2), wy1)\n x2 = tf.maximum(tf.minimum(x2, wx2), wx1)\n clipped = tf.concat([y1, x1, y2, x2], axis=1, name=\"clipped_boxes\")\n clipped.set_shape((clipped.shape[0], 4))\n return clipped\n\n\nclass ProposalLayer(KE.Layer):\n \"\"\"Receives anchor scores and selects a subset to pass as proposals\n to the second stage. Filtering is done based on anchor scores and\n non-max suppression to remove overlaps. It also applies bounding\n box refinement deltas to anchors.\n Inputs:\n rpn_probs: [batch, num_anchors, (bg prob, fg prob)]\n rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]\n anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates\n Returns:\n Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]\n \"\"\"\n\n def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):\n super(ProposalLayer, self).__init__(**kwargs)\n self.config = config\n self.proposal_count = proposal_count\n self.nms_threshold = nms_threshold\n\n def call(self, inputs):\n # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]\n scores = inputs[0][:, :, 1]\n # Box deltas [batch, num_rois, 4]\n deltas = inputs[1]\n deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])\n # Anchors\n anchors = inputs[2]\n\n # Improve performance by trimming to top anchors by score\n # and doing the rest on the smaller subset.\n pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])\n ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,\n name=\"top_anchors\").indices\n scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),\n self.config.IMAGES_PER_GPU,\n names=[\"pre_nms_anchors\"])\n\n # Apply deltas to anchors to get refined anchors.\n # [batch, N, (y1, x1, y2, x2)]\n boxes = utils.batch_slice([pre_nms_anchors, deltas],\n lambda x, y: apply_box_deltas_graph(x, y),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors\"])\n\n # Clip to image boundaries. Since we're in normalized coordinates,\n # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]\n window = np.array([0, 0, 1, 1], dtype=np.float32)\n boxes = utils.batch_slice(boxes,\n lambda x: clip_boxes_graph(x, window),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors_clipped\"])\n\n # Filter out small boxes\n # According to Xinlei Chen's paper, this reduces detection accuracy\n # for small objects, so we're skipping it.\n\n # Non-max suppression\n def nms(boxes, scores):\n indices = tf.image.non_max_suppression(\n boxes, scores, self.proposal_count,\n self.nms_threshold, name=\"rpn_non_max_suppression\")\n proposals = tf.gather(boxes, indices)\n # Pad if needed\n padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)\n proposals = tf.pad(proposals, [(0, padding), (0, 0)])\n return proposals\n proposals = utils.batch_slice([boxes, scores], nms,\n self.config.IMAGES_PER_GPU)\n return proposals\n\n def compute_output_shape(self, input_shape):\n return (None, self.proposal_count, 4)\n\n\n############################################################\n# ROIAlign Layer\n############################################################\n\ndef log2_graph(x):\n \"\"\"Implementation of Log2. TF doesn't have a native implementation.\"\"\"\n return tf.log(x) / tf.log(2.0)\n\n\nclass PyramidROIAlign(KE.Layer):\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\n Params:\n - pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates. Possibly padded with zeros if not enough\n boxes to fill the array.\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - feature_maps: List of feature maps from different levels of the pyramid.\n Each is [batch, height, width, channels]\n Output:\n Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n\n def __init__(self, pool_shape, **kwargs):\n super(PyramidROIAlign, self).__init__(**kwargs)\n self.pool_shape = tuple(pool_shape)\n\n def call(self, inputs):\n # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n\n # Image meta\n # Holds details about the image. See compose_image_meta()\n image_meta = inputs[1]\n\n # Feature Maps. List of feature maps from different level of the\n # feature pyramid. Each is [batch, height, width, channels]\n feature_maps = inputs[2:]\n\n # Assign each ROI to a level in the pyramid based on the ROI area.\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)\n h = y2 - y1\n w = x2 - x1\n # Use shape of first image. Images in a batch must have the same size.\n image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]\n # Equation 1 in the Feature Pyramid Networks paper. Account for\n # the fact that our coordinates are normalized here.\n # e.g. a 224x224 ROI (in pixels) maps to P4\n image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)\n roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))\n roi_level = tf.minimum(5, tf.maximum(\n 2, 4 + tf.cast(tf.round(roi_level), tf.int32)))\n roi_level = tf.squeeze(roi_level, 2)\n\n # Loop through levels and apply ROI pooling to each. P2 to P5.\n pooled = []\n box_to_level = []\n for i, level in enumerate(range(2, 6)):\n ix = tf.where(tf.equal(roi_level, level))\n level_boxes = tf.gather_nd(boxes, ix)\n\n # Box indices for crop_and_resize.\n box_indices = tf.cast(ix[:, 0], tf.int32)\n\n # Keep track of which box is mapped to which level\n box_to_level.append(ix)\n\n # Stop gradient propogation to ROI proposals\n level_boxes = tf.stop_gradient(level_boxes)\n box_indices = tf.stop_gradient(box_indices)\n\n # Crop and Resize\n # From Mask R-CNN paper: \"We sample four regular locations, so\n # that we can evaluate either max or average pooling. In fact,\n # interpolating only a single value at each bin center (without\n # pooling) is nearly as effective.\"\n #\n # Here we use the simplified approach of a single value per bin,\n # which is how it's done in tf.crop_and_resize()\n # Result: [batch * num_boxes, pool_height, pool_width, channels]\n pooled.append(tf.image.crop_and_resize(\n feature_maps[i], level_boxes, box_indices, self.pool_shape,\n method=\"bilinear\"))\n\n # Pack pooled features into one tensor\n pooled = tf.concat(pooled, axis=0)\n\n # Pack box_to_level mapping into one array and add another\n # column representing the order of pooled boxes\n box_to_level = tf.concat(box_to_level, axis=0)\n box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)\n box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],\n axis=1)\n\n # Rearrange pooled features to match the order of the original boxes\n # Sort box_to_level by batch then box index\n # TF doesn't have a way to sort by two columns, so merge them and sort.\n sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]\n ix = tf.nn.top_k(sorting_tensor, k=tf.shape(\n box_to_level)[0]).indices[::-1]\n ix = tf.gather(box_to_level[:, 2], ix)\n pooled = tf.gather(pooled, ix)\n\n # Re-add the batch dimension\n shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)\n pooled = tf.reshape(pooled, shape)\n return pooled\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )\n\n\n############################################################\n# Detection Target Layer\n############################################################\n\ndef overlaps_graph(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n \"\"\"\n # 1. Tile boxes2 and repeat boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeat() so simulate it\n # using tf.tile() and tf.reshape.\n b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),\n [1, 1, tf.shape(boxes2)[0]]), [-1, 4])\n b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)\n y1 = tf.maximum(b1_y1, b2_y1)\n x1 = tf.maximum(b1_x1, b2_x1)\n y2 = tf.minimum(b1_y2, b2_y2)\n x2 = tf.minimum(b1_x2, b2_x2)\n intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)\n # 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area + b2_area - intersection\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])\n return overlaps\n\n\ndef detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generates detection targets for one image. Subsamples proposals and\n generates target class IDs, bounding box deltas, and masks for each.\n Inputs:\n proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [MAX_GT_INSTANCES] int class IDs\n gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.\n gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.\n deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]\n masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox\n boundaries and resized to neural network output size.\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n # Assertions\n asserts = [\n tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],\n name=\"roi_assertion\"),\n ]\n with tf.control_dependencies(asserts):\n proposals = tf.identity(proposals)\n\n # Remove zero padding\n proposals, _ = trim_zeros_graph(proposals, name=\"trim_proposals\")\n gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name=\"trim_gt_boxes\")\n gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,\n name=\"trim_gt_class_ids\")\n gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,\n name=\"trim_gt_masks\")\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = tf.where(gt_class_ids < 0)[:, 0]\n non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]\n crowd_boxes = tf.gather(gt_boxes, crowd_ix)\n gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)\n gt_boxes = tf.gather(gt_boxes, non_crowd_ix)\n gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)\n\n # Compute overlaps matrix [proposals, gt_boxes]\n overlaps = overlaps_graph(proposals, gt_boxes)\n\n # Compute overlaps with crowd boxes [proposals, crowd_boxes]\n crowd_overlaps = overlaps_graph(proposals, crowd_boxes)\n crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n\n # Determine positive and negative ROIs\n roi_iou_max = tf.reduce_max(overlaps, axis=1)\n # 1. Positive ROIs are those with >= 0.5 IoU with a GT box\n positive_roi_bool = (roi_iou_max >= 0.5)\n positive_indices = tf.where(positive_roi_bool)[:, 0]\n # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.\n negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]\n\n # Subsample ROIs. Aim for 33% positive\n # Positive ROIs\n positive_count = int(config.TRAIN_ROIS_PER_IMAGE *\n config.ROI_POSITIVE_RATIO)\n positive_indices = tf.random_shuffle(positive_indices)[:positive_count]\n positive_count = tf.shape(positive_indices)[0]\n # Negative ROIs. Add enough to maintain positive:negative ratio.\n r = 1.0 / config.ROI_POSITIVE_RATIO\n negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count\n negative_indices = tf.random_shuffle(negative_indices)[:negative_count]\n # Gather selected ROIs\n positive_rois = tf.gather(proposals, positive_indices)\n negative_rois = tf.gather(proposals, negative_indices)\n\n # Assign positive ROIs to GT boxes.\n positive_overlaps = tf.gather(overlaps, positive_indices)\n roi_gt_box_assignment = tf.cond(\n tf.greater(tf.shape(positive_overlaps)[1], 0),\n true_fn = lambda: tf.argmax(positive_overlaps, axis=1),\n false_fn = lambda: tf.cast(tf.constant([]),tf.int64)\n )\n roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)\n roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)\n\n # Compute bbox refinement for positive ROIs\n deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)\n deltas /= config.BBOX_STD_DEV\n\n # Assign positive ROIs to GT masks\n # Permute masks to [N, height, width, 1]\n transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)\n # Pick the right mask for each ROI\n roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)\n\n # Compute mask targets\n boxes = positive_rois\n if config.USE_MINI_MASK:\n # Transform ROI coordinates from normalized image space\n # to normalized mini-mask space.\n y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)\n gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)\n gt_h = gt_y2 - gt_y1\n gt_w = gt_x2 - gt_x1\n y1 = (y1 - gt_y1) / gt_h\n x1 = (x1 - gt_x1) / gt_w\n y2 = (y2 - gt_y1) / gt_h\n x2 = (x2 - gt_x1) / gt_w\n boxes = tf.concat([y1, x1, y2, x2], 1)\n box_ids = tf.range(0, tf.shape(roi_masks)[0])\n masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,\n box_ids,\n config.MASK_SHAPE)\n # Remove the extra dimension from masks.\n masks = tf.squeeze(masks, axis=3)\n\n # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with\n # binary cross entropy loss.\n masks = tf.round(masks)\n\n # Append negative ROIs and pad bbox deltas and masks that\n # are not used for negative ROIs with zeros.\n rois = tf.concat([positive_rois, negative_rois], axis=0)\n N = tf.shape(negative_rois)[0]\n P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)\n rois = tf.pad(rois, [(0, P), (0, 0)])\n roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])\n roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])\n deltas = tf.pad(deltas, [(0, N + P), (0, 0)])\n masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])\n\n return rois, roi_gt_class_ids, deltas, masks\n\n\nclass DetectionTargetLayer(KE.Layer):\n \"\"\"Subsamples proposals and generates target box refinement, class_ids,\n and masks for each.\n Inputs:\n proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.\n gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized\n coordinates.\n gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized\n coordinates\n target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]\n target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]\n Masks cropped to bbox boundaries and resized to neural\n network output size.\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super(DetectionTargetLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n proposals = inputs[0]\n gt_class_ids = inputs[1]\n gt_boxes = inputs[2]\n gt_masks = inputs[3]\n\n # Slice the batch and run a graph for each slice\n # TODO: Rename target_bbox to target_deltas for clarity\n names = [\"rois\", \"target_class_ids\", \"target_bbox\", \"target_mask\"]\n outputs = utils.batch_slice(\n [proposals, gt_class_ids, gt_boxes, gt_masks],\n lambda w, x, y, z: detection_targets_graph(\n w, x, y, z, self.config),\n self.config.IMAGES_PER_GPU, names=names)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return [\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois\n (None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas\n (None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],\n self.config.MASK_SHAPE[1]) # masks\n ]\n\n def compute_mask(self, inputs, mask=None):\n return [None, None, None, None]\n\n\n############################################################\n# Detection Layer\n############################################################\n\ndef refine_detections_graph(rois, probs, deltas, window, config):\n \"\"\"Refine classified proposals and filter overlaps and return final\n detections.\n Inputs:\n rois: [N, (y1, x1, y2, x2)] in normalized coordinates\n probs: [N, num_classes]. Class probabilities.\n deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific\n bounding box deltas.\n window: (y1, x1, y2, x2) in normalized coordinates. The part of the image\n that contains the image excluding the padding.\n Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where\n coordinates are normalized.\n \"\"\"\n # Class IDs per ROI\n class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)\n # Class probability of the top class of each ROI\n indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)\n class_scores = tf.gather_nd(probs, indices)\n # Class-specific bounding box deltas\n deltas_specific = tf.gather_nd(deltas, indices)\n # Apply bounding box deltas\n # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates\n refined_rois = apply_box_deltas_graph(\n rois, deltas_specific * config.BBOX_STD_DEV)\n # Clip boxes to image window\n refined_rois = clip_boxes_graph(refined_rois, window)\n\n # TODO: Filter out boxes with zero area\n\n # Filter out background boxes\n keep = tf.where(class_ids > 0)[:, 0]\n # Filter out low confidence boxes\n if config.DETECTION_MIN_CONFIDENCE:\n conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(conf_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n\n # Apply per-class NMS\n # 1. Prepare variables\n pre_nms_class_ids = tf.gather(class_ids, keep)\n pre_nms_scores = tf.gather(class_scores, keep)\n pre_nms_rois = tf.gather(refined_rois, keep)\n unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]\n\n def nms_keep_map(class_id):\n \"\"\"Apply Non-Maximum Suppression on ROIs of the given class.\"\"\"\n # Indices of ROIs of the given class\n ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]\n # Apply NMS\n class_keep = tf.image.non_max_suppression(\n tf.gather(pre_nms_rois, ixs),\n tf.gather(pre_nms_scores, ixs),\n max_output_size=config.DETECTION_MAX_INSTANCES,\n iou_threshold=config.DETECTION_NMS_THRESHOLD)\n # Map indices\n class_keep = tf.gather(keep, tf.gather(ixs, class_keep))\n # Pad with -1 so returned tensors have the same shape\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]\n class_keep = tf.pad(class_keep, [(0, gap)],\n mode='CONSTANT', constant_values=-1)\n # Set shape so map_fn() can infer result shape\n class_keep.set_shape([config.DETECTION_MAX_INSTANCES])\n return class_keep\n\n # 2. Map over class IDs\n nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,\n dtype=tf.int64)\n # 3. Merge results into one list, and remove -1 padding\n nms_keep = tf.reshape(nms_keep, [-1])\n nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])\n # 4. Compute intersection between keep and nms_keep\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(nms_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n # Keep top detections\n roi_count = config.DETECTION_MAX_INSTANCES\n class_scores_keep = tf.gather(class_scores, keep)\n num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)\n top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]\n keep = tf.gather(keep, top_ids)\n\n # Arrange output as [N, (y1, x1, y2, x2, class_id, score)]\n # Coordinates are normalized.\n detections = tf.concat([\n tf.gather(refined_rois, keep),\n tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],\n tf.gather(class_scores, keep)[..., tf.newaxis]\n ], axis=1)\n\n # Pad with zeros if detections < DETECTION_MAX_INSTANCES\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]\n detections = tf.pad(detections, [(0, gap), (0, 0)], \"CONSTANT\")\n return detections\n\n\nclass DetectionLayer(KE.Layer):\n \"\"\"Takes classified proposal boxes and their bounding box deltas and\n returns the final detection boxes.\n Returns:\n [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where\n coordinates are normalized.\n \"\"\"\n\n def __init__(self, config=None, **kwargs):\n super(DetectionLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n rois = inputs[0]\n mrcnn_class = inputs[1]\n mrcnn_bbox = inputs[2]\n image_meta = inputs[3]\n\n # Get windows of images in normalized coordinates. Windows are the area\n # in the image that excludes the padding.\n # Use the shape of the first image in the batch to normalize the window\n # because we know that all images get resized to the same size.\n m = parse_image_meta_graph(image_meta)\n image_shape = m['image_shape'][0]\n window = norm_boxes_graph(m['window'], image_shape[:2])\n\n # Run detection refinement graph on each item in the batch\n detections_batch = utils.batch_slice(\n [rois, mrcnn_class, mrcnn_bbox, window],\n lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),\n self.config.IMAGES_PER_GPU)\n\n # Reshape output\n # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in\n # normalized coordinates\n return tf.reshape(\n detections_batch,\n [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])\n\n def compute_output_shape(self, input_shape):\n return (None, self.config.DETECTION_MAX_INSTANCES, 6)\n\n\n############################################################\n# Region Proposal Network (RPN)\n############################################################\n\ndef rpn_graph(feature_map, anchors_per_location, anchor_stride):\n \"\"\"Builds the computation graph of Region Proposal Network.\n feature_map: backbone features [batch, height, width, depth]\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n Returns:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n # TODO: check if stride of 2 causes alignment issues if the feature map\n # is not even.\n # Shared convolutional base of the RPN\n shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',\n strides=anchor_stride,\n name='rpn_conv_shared')(feature_map)\n\n # Anchor Score. [batch, height, width, anchors per location * 2].\n x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',\n activation='linear', name='rpn_class_raw')(shared)\n\n # Reshape to [batch, anchors, 2]\n rpn_class_logits = KL.Lambda(\n lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)\n\n # Softmax on last dimension of BG/FG.\n rpn_probs = KL.Activation(\n \"softmax\", name=\"rpn_class_xxx\")(rpn_class_logits)\n\n # Bounding box refinement. [batch, H, W, anchors per location * depth]\n # where depth is [x, y, log(w), log(h)]\n x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding=\"valid\",\n activation='linear', name='rpn_bbox_pred')(shared)\n\n # Reshape to [batch, anchors, 4]\n rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)\n\n return [rpn_class_logits, rpn_probs, rpn_bbox]\n\n\ndef build_rpn_model(anchor_stride, anchors_per_location, depth):\n \"\"\"Builds a Keras model of the Region Proposal Network.\n It wraps the RPN graph so it can be used multiple times with shared\n weights.\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n depth: Depth of the backbone feature map.\n Returns a Keras Model object. The model outputs, when called, are:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n input_feature_map = KL.Input(shape=[None, None, depth],\n name=\"input_rpn_feature_map\")\n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")\n\n\n############################################################\n# Feature Pyramid Network Heads\n############################################################\n\ndef fpn_classifier_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True,\n fc_layers_size=1024):\n \"\"\"Builds the computation graph of the feature pyramid network classifier\n and regressor heads.\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n fc_layers_size: Size of the 2 FC layers\n Returns:\n logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)\n probs: [batch, num_rois, NUM_CLASSES] classifier probabilities\n bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to\n proposal boxes\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_classifier\")([rois, image_meta] + feature_maps)\n # Two 1024 FC layers (implemented with Conv2D for consistency)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding=\"valid\"),\n name=\"mrcnn_class_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),\n name=\"mrcnn_class_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),\n name=\"pool_squeeze\")(x)\n\n # Classifier head\n mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),\n name='mrcnn_class_logits')(shared)\n mrcnn_probs = KL.TimeDistributed(KL.Activation(\"softmax\"),\n name=\"mrcnn_class\")(mrcnn_class_logits)\n\n # BBox head\n # [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]\n x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),\n name='mrcnn_bbox_fc')(shared)\n # Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]\n s = K.int_shape(x)\n mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name=\"mrcnn_bbox\")(x)\n\n return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox\n\n\ndef build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n \"\"\"Builds the computation graph of the mask head of Feature Pyramid Network.\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x\n\n\n############################################################\n# Loss Functions\n############################################################\n\ndef smooth_l1_loss(y_true, y_pred):\n \"\"\"Implements Smooth-L1 loss.\n y_true and y_pred are typically: [N, 4], but could be any shape.\n \"\"\"\n diff = K.abs(y_true - y_pred)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n return loss\n\n\ndef rpn_class_loss_graph(rpn_match, rpn_class_logits):\n \"\"\"RPN anchor classifier loss.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.\n \"\"\"\n # Squeeze last dim to simplify\n rpn_match = tf.squeeze(rpn_match, -1)\n # Get anchor classes. Convert the -1/+1 match to 0/1 values.\n anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)\n # Positive and Negative anchors contribute to the loss,\n # but neutral anchors (match value = 0) don't.\n indices = tf.where(K.not_equal(rpn_match, 0))\n # Pick rows that contribute to the loss and filter out the rest.\n rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)\n anchor_class = tf.gather_nd(anchor_class, indices)\n # Cross entropy loss\n loss = K.sparse_categorical_crossentropy(target=anchor_class,\n output=rpn_class_logits,\n from_logits=True)\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):\n \"\"\"Return the RPN bounding box loss graph.\n config: the model config object.\n target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].\n Uses 0 padding to fill in unsed bbox deltas.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Positive anchors contribute to the loss, but negative and\n # neutral anchors (match value of 0 or -1) don't.\n rpn_match = K.squeeze(rpn_match, -1)\n indices = tf.where(K.equal(rpn_match, 1))\n\n # Pick bbox deltas that contribute to the loss\n rpn_bbox = tf.gather_nd(rpn_bbox, indices)\n\n # Trim target bounding box deltas to the same length as rpn_bbox.\n batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)\n target_bbox = batch_pack_graph(target_bbox, batch_counts,\n config.IMAGES_PER_GPU)\n\n loss = smooth_l1_loss(target_bbox, rpn_bbox)\n \n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef mrcnn_class_loss_graph(target_class_ids, pred_class_logits,\n active_class_ids):\n \"\"\"Loss for the classifier head of Mask RCNN.\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\n padding to fill in the array.\n pred_class_logits: [batch, num_rois, num_classes]\n active_class_ids: [batch, num_classes]. Has a value of 1 for\n classes that are in the dataset of the image, and 0\n for classes that are not in the dataset.\n \"\"\"\n # During model building, Keras calls this function with\n # target_class_ids of type float32. Unclear why. Cast it\n # to int to get around it.\n target_class_ids = tf.cast(target_class_ids, 'int64')\n\n # Find predictions of classes that are not in the dataset.\n pred_class_ids = tf.argmax(pred_class_logits, axis=2)\n # TODO: Update this line to work with batch > 1. Right now it assumes all\n # images in a batch have the same active_class_ids\n pred_active = tf.gather(active_class_ids[0], pred_class_ids)\n\n # Loss\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=target_class_ids, logits=pred_class_logits)\n\n # Erase losses of predictions of classes that are not in the active\n # classes of the image.\n loss = loss * pred_active\n\n # Computer loss mean. Use only predictions that contribute\n # to the loss to get a correct mean.\n loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)\n return loss\n\n\ndef mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Reshape to merge batch and roi dimensions for simplicity.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n target_bbox = K.reshape(target_bbox, (-1, 4))\n pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))\n\n # Only positive ROIs contribute to the loss. And only\n # the right class_id of each ROI. Get their indices.\n positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_roi_ix), tf.int64)\n indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)\n\n # Gather the deltas (predicted and true) that contribute to loss\n target_bbox = tf.gather(target_bbox, positive_roi_ix)\n pred_bbox = tf.gather_nd(pred_bbox, indices)\n\n # Smooth-L1 Loss\n loss = K.switch(tf.size(target_bbox) > 0,\n smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\ndef mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):\n \"\"\"Mask binary cross-entropy loss for the masks head.\n target_masks: [batch, num_rois, height, width].\n A float32 tensor of values 0 or 1. Uses zero padding to fill array.\n target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.\n pred_masks: [batch, proposals, height, width, num_classes] float32 tensor\n with values from 0 to 1.\n \"\"\"\n # Reshape for simplicity. Merge first two dimensions into one.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n mask_shape = tf.shape(target_masks)\n target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))\n pred_shape = tf.shape(pred_masks)\n pred_masks = K.reshape(pred_masks,\n (-1, pred_shape[2], pred_shape[3], pred_shape[4]))\n # Permute predicted masks to [N, num_classes, height, width]\n pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])\n\n # Only positive ROIs contribute to the loss. And only\n # the class specific mask of each ROI.\n positive_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_ix), tf.int64)\n indices = tf.stack([positive_ix, positive_class_ids], axis=1)\n\n # Gather the masks (predicted and true) that contribute to loss\n y_true = tf.gather(target_masks, positive_ix)\n y_pred = tf.gather_nd(pred_masks, indices)\n\n # Compute binary cross entropy. If no positive ROIs, then return 0.\n # shape: [batch, roi, num_classes]\n loss = K.switch(tf.size(y_true) > 0,\n K.binary_crossentropy(target=y_true, output=y_pred),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\n############################################################\n# Data Generator\n############################################################\n\ndef load_image_gt(dataset, config, image_id, augment=False, augmentation=None,\n use_mini_mask=False):\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n use_mini_mask: If False, returns full-size masks that are the same height\n and width as the original image. These can be big, for example\n 1024x1024x100 (for 100 instances). Mini masks are smaller, typically,\n 224x224 and are generated by extracting the bounding box of the\n object and resizing it to MINI_MASK_SHAPE.\n Returns:\n image: [height, width, 3]\n shape: the original shape of the image before resizing and cropping.\n class_ids: [instance_count] Integer class IDs\n bbox: [instance_count, (y1, x1, y2, x2)]\n mask: [height, width, instance_count]. The height and width are those\n of the image unless use_mini_mask is True, in which case they are\n defined in MINI_MASK_SHAPE.\n \"\"\"\n # Load image and mask\n image = dataset.load_image(image_id)\n mask, class_ids = dataset.load_mask(image_id)\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Random horizontal flips.\n # TODO: will be removed in a future update in favor of augmentation\n if augment:\n logging.warning(\"'augment' is deprecated. Use 'augmentation' instead.\")\n if random.randint(0, 1):\n image = np.fliplr(image)\n mask = np.fliplr(mask)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n import imgaug\n\n # Augmenters that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n mask = mask.astype(np.bool)\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n class_ids = class_ids[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox = utils.extract_bboxes(mask)\n\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n\n # Resize masks to smaller size to reduce memory usage\n if use_mini_mask:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids)\n\n return image, image_meta, class_ids, bbox, mask\n\n\ndef build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generate targets for training Stage 2 classifier and mask heads.\n This is not used in normal training. It's useful for debugging or to train\n the Mask RCNN heads without using the RPN head.\n Inputs:\n rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.\n gt_class_ids: [instance count] Integer class IDs\n gt_boxes: [instance count, (y1, x1, y2, x2)]\n gt_masks: [height, width, instance count] Ground truth masks. Can be full\n size or mini-masks.\n Returns:\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific\n bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped\n to bbox boundaries and resized to neural network output size.\n \"\"\"\n assert rpn_rois.shape[0] > 0\n assert gt_class_ids.dtype == np.int32, \"Expected int but got {}\".format(\n gt_class_ids.dtype)\n assert gt_boxes.dtype == np.int32, \"Expected int but got {}\".format(\n gt_boxes.dtype)\n assert gt_masks.dtype == np.bool_, \"Expected bool but got {}\".format(\n gt_masks.dtype)\n\n # It's common to add GT Boxes to ROIs but we don't do that here because\n # according to XinLei Chen's paper, it doesn't help.\n\n # Trim empty padding in gt_boxes and gt_masks parts\n instance_ids = np.where(gt_class_ids > 0)[0]\n assert instance_ids.shape[0] > 0, \"Image must contain instances.\"\n gt_class_ids = gt_class_ids[instance_ids]\n gt_boxes = gt_boxes[instance_ids]\n gt_masks = gt_masks[:, :, instance_ids]\n\n # Compute areas of ROIs and ground truth boxes.\n rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * (rpn_rois[:, 3] - rpn_rois[:, 1])\n gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * (gt_boxes[:, 3] - gt_boxes[:, 1])\n\n # Compute overlaps [rpn_rois, gt_boxes]\n overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))\n for i in range(overlaps.shape[1]):\n gt = gt_boxes[i]\n overlaps[:, i] = utils.compute_iou(\n gt, rpn_rois, gt_box_area[i], rpn_roi_area)\n\n # Assign ROIs to GT boxes\n rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)\n rpn_roi_iou_max = overlaps[np.arange(\n overlaps.shape[0]), rpn_roi_iou_argmax]\n # GT box assigned to each ROI\n rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]\n rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]\n\n # Positive ROIs are those with >= 0.5 IoU with a GT box.\n fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]\n\n # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)\n # TODO: To hard example mine or not to hard example mine, that's the question\n # bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n\n # Subsample ROIs. Aim for 33% foreground.\n # FG\n fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)\n if fg_ids.shape[0] > fg_roi_count:\n keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)\n else:\n keep_fg_ids = fg_ids\n # BG\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]\n if bg_ids.shape[0] > remaining:\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n else:\n keep_bg_ids = bg_ids\n # Combine indices of ROIs to keep\n keep = np.concatenate([keep_fg_ids, keep_bg_ids])\n # Need more?\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]\n if remaining > 0:\n # Looks like we don't have enough samples to maintain the desired\n # balance. Reduce requirements and fill in the rest. This is\n # likely different from the Mask RCNN paper.\n\n # There is a small chance we have neither fg nor bg samples.\n if keep.shape[0] == 0:\n # Pick bg regions with easier IoU threshold\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n assert bg_ids.shape[0] >= remaining\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n assert keep_bg_ids.shape[0] == remaining\n keep = np.concatenate([keep, keep_bg_ids])\n else:\n # Fill the rest with repeated bg rois.\n keep_extra_ids = np.random.choice(\n keep_bg_ids, remaining, replace=True)\n keep = np.concatenate([keep, keep_extra_ids])\n assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \"keep doesn't match ROI batch size {}, {}\".format(\n keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)\n\n # Reset the gt boxes assigned to BG ROIs.\n rpn_roi_gt_boxes[keep_bg_ids, :] = 0\n rpn_roi_gt_class_ids[keep_bg_ids] = 0\n\n # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.\n rois = rpn_rois[keep]\n roi_gt_boxes = rpn_roi_gt_boxes[keep]\n roi_gt_class_ids = rpn_roi_gt_class_ids[keep]\n roi_gt_assignment = rpn_roi_iou_argmax[keep]\n\n # Class-aware bbox deltas. [y, x, log(h), log(w)]\n bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,\n config.NUM_CLASSES, 4), dtype=np.float32)\n pos_ids = np.where(roi_gt_class_ids > 0)[0]\n bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(\n rois[pos_ids], roi_gt_boxes[pos_ids, :4])\n # Normalize bbox refinements\n bboxes /= config.BBOX_STD_DEV\n\n # Generate class-specific target masks\n masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),\n dtype=np.float32)\n for i in pos_ids:\n class_id = roi_gt_class_ids[i]\n assert class_id > 0, \"class id must be greater than 0\"\n gt_id = roi_gt_assignment[i]\n class_mask = gt_masks[:, :, gt_id]\n\n if config.USE_MINI_MASK:\n # Create a mask placeholder, the size of the image\n placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)\n # GT box\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]\n gt_w = gt_x2 - gt_x1\n gt_h = gt_y2 - gt_y1\n # Resize mini mask to size of GT box\n placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)\n # Place the mini batch in the placeholder\n class_mask = placeholder\n\n # Pick part of the mask and resize it\n y1, x1, y2, x2 = rois[i].astype(np.int32)\n m = class_mask[y1:y2, x1:x2]\n mask = utils.resize(m, config.MASK_SHAPE)\n masks[i, :, :, class_id] = mask\n\n return rois, roi_gt_class_ids, bboxes, masks\n\n\ndef build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n anchors: [num_anchors, (y1, x1, y2, x2)]\n gt_class_ids: [num_gt_boxes] Integer class IDs.\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\n Returns:\n rpn_match: [N] (int32) matches between anchors and GT boxes.\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n \"\"\"\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0]\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\n np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinement() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox\n\n\ndef generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):\n \"\"\"Generates ROI proposals similar to what a region proposal network\n would generate.\n image_shape: [Height, Width, Depth]\n count: Number of ROIs to generate\n gt_class_ids: [N] Integer ground truth class IDs\n gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.\n Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.\n \"\"\"\n # placeholder\n rois = np.zeros((count, 4), dtype=np.int32)\n\n # Generate random ROIs around GT boxes (90% of count)\n rois_per_box = int(0.9 * count / gt_boxes.shape[0])\n for i in range(gt_boxes.shape[0]):\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]\n h = gt_y2 - gt_y1\n w = gt_x2 - gt_x1\n # random boundaries\n r_y1 = max(gt_y1 - h, 0)\n r_y2 = min(gt_y2 + h, image_shape[0])\n r_x1 = max(gt_x1 - w, 0)\n r_x2 = min(gt_x2 + w, image_shape[1])\n\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))\n x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:rois_per_box]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:rois_per_box]\n if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n box_rois = np.hstack([y1, x1, y2, x2])\n rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois\n\n # Generate random ROIs anywhere in the image (10% of count)\n remaining_count = count - (rois_per_box * gt_boxes.shape[0])\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))\n x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:remaining_count]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:remaining_count]\n if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n global_rois = np.hstack([y1, x1, y2, x2])\n rois[-remaining_count:] = global_rois\n return rois\n\n\ndef data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,\n random_rois=0, batch_size=1, detection_targets=False,\n no_augmentation_sources=None):\n \"\"\"A generator that returns images and corresponding target class ids,\n bounding box deltas, and masks.\n dataset: The Dataset object to pick data from\n config: The model config object\n shuffle: If True, shuffles the samples before every epoch\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n random_rois: If > 0 then generate proposals to be used to train the\n network classifier and mask heads. Useful if training\n the Mask RCNN part without the RPN.\n batch_size: How many images to return in each call\n detection_targets: If True, generate detection targets (class IDs, bbox\n deltas, and masks). Typically for debugging or visualizations because\n in trainig detection targets are generated by DetectionTargetLayer.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n Returns a Python generator. Upon calling next() on it, the\n generator returns two lists, inputs and outputs. The contents\n of the lists differs depending on the received arguments:\n inputs list:\n - images: [batch, H, W, C]\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)\n - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs\n - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]\n - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width\n are those of the image unless use_mini_mask is True, in which\n case they are defined in MINI_MASK_SHAPE.\n outputs list: Usually empty in regular training. But if detection_targets\n is True then the outputs list contains target class_ids, bbox deltas,\n and masks.\n \"\"\"\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n no_augmentation_sources = no_augmentation_sources or []\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n # Keras requires a generator to run indefinitely.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n\n # If the image source is not to be augmented pass None as augmentation\n if dataset.image_info[image_id]['source'] in no_augmentation_sources:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=None,\n use_mini_mask=config.USE_MINI_MASK)\n else:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=augmentation,\n use_mini_mask=config.USE_MINI_MASK)\n\n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,\n gt_class_ids, gt_boxes, config)\n\n # Mask R-CNN Targets\n if random_rois:\n rpn_rois = generate_random_rois(\n image.shape, random_rois, gt_class_ids, gt_boxes)\n if detection_targets:\n rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask = build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros(\n (batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros(\n [batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros(\n [batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n batch_gt_masks = np.zeros(\n (batch_size, gt_masks.shape[0], gt_masks.shape[1],\n config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\n if random_rois:\n batch_rpn_rois = np.zeros(\n (batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if detection_targets:\n batch_rois = np.zeros(\n (batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros(\n (batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n batch_mrcnn_bbox = np.zeros(\n (batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_mask = np.zeros(\n (batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n gt_boxes = gt_boxes[ids]\n gt_masks = gt_masks[:, :, ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n if random_rois:\n batch_rpn_rois[b] = rpn_rois\n if detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_mask[b] = mrcnn_mask\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]\n outputs = []\n\n if random_rois:\n inputs.extend([batch_rpn_rois])\n if detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(\n batch_mrcnn_class_ids, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise\n\n\n############################################################\n# MaskRCNN Class\n############################################################\n\nclass MaskRCNN():\n \"\"\"Encapsulates the Mask RCNN model functionality.\n The actual Keras model is in the keras_model property.\n \"\"\"\n\n def __init__(self, mode, config, model_dir):\n \"\"\"\n mode: Either \"training\" or \"inference\"\n config: A Sub-class of the Config class\n model_dir: Directory to save training logs and trained weights\n \"\"\"\n assert mode in ['training', 'inference']\n self.mode = mode\n self.config = config\n self.model_dir = model_dir\n self.set_log_dir()\n self.keras_model = self.build(mode=mode, config=config)\n\n def build(self, mode, config):\n \"\"\"Build Mask R-CNN architecture.\n input_shape: The shape of the input image.\n mode: Either \"training\" or \"inference\". The inputs and\n outputs of the model differ accordingly.\n \"\"\"\n assert mode in ['training', 'inference']\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Inputs\n input_image = KL.Input(\n shape=[None, None, config.IMAGE_SHAPE[2]], name=\"input_image\")\n input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],\n name=\"input_image_meta\")\n if mode == \"training\":\n # RPN GT\n input_rpn_match = KL.Input(\n shape=[None, 1], name=\"input_rpn_match\", dtype=tf.int32)\n input_rpn_bbox = KL.Input(\n shape=[None, 4], name=\"input_rpn_bbox\", dtype=tf.float32)\n\n # Detection GT (class IDs, bounding boxes, and masks)\n # 1. GT Class IDs (zero padded)\n input_gt_class_ids = KL.Input(\n shape=[None], name=\"input_gt_class_ids\", dtype=tf.int32)\n # 2. GT Boxes in pixels (zero padded)\n # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates\n input_gt_boxes = KL.Input(\n shape=[None, 4], name=\"input_gt_boxes\", dtype=tf.float32)\n # Normalize coordinates\n gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_gt_boxes)\n # 3. GT Masks (zero padded)\n # [batch, height, width, MAX_GT_INSTANCES]\n if config.USE_MINI_MASK:\n input_gt_masks = KL.Input(\n shape=[config.MINI_MASK_SHAPE[0],\n config.MINI_MASK_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n else:\n input_gt_masks = KL.Input(\n shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n elif mode == \"inference\":\n # Anchors in normalized coordinates\n input_anchors = KL.Input(shape=[None, 4], name=\"input_anchors\")\n\n # Build the shared convolutional layers.\n # Bottom-up Layers\n # Returns a list of the last layers of each stage, 5 in total.\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\n if callable(config.BACKBONE):\n _, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,\n train_bn=config.TRAIN_BN)\n else:\n _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,\n stage5=True, train_bn=config.TRAIN_BN)\n # Top-down Layers\n # TODO: add assert to varify feature map sizes match what's in config\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)\n P4 = KL.Add(name=\"fpn_p4add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])\n P3 = KL.Add(name=\"fpn_p3add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])\n P2 = KL.Add(name=\"fpn_p2add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\n P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\n P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\n # P6 is used for the 5th anchor scale in RPN. Generated by\n # subsampling from P5 with stride of 2.\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\n\n # Note that P6 is used in RPN, but not in the classifier heads.\n rpn_feature_maps = [P2, P3, P4, P5, P6]\n mrcnn_feature_maps = [P2, P3, P4, P5]\n\n # Anchors\n if mode == \"training\":\n anchors = self.get_anchors(config.IMAGE_SHAPE)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)\n # A hack to get around Keras's bad support for constants\n anchors = KL.Lambda(lambda x: tf.Variable(anchors), name=\"anchors\")(input_image)\n else:\n anchors = input_anchors\n\n # RPN Model\n rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,\n len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)\n # Loop through pyramid layers\n layer_outputs = [] # list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(rpn([p]))\n # Concatenate layer outputs\n # Convert from list of lists of level outputs to list of lists\n # of outputs across levels.\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\n outputs = list(zip(*layer_outputs))\n outputs = [KL.Concatenate(axis=1, name=n)(list(o))\n for o, n in zip(outputs, output_names)]\n\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n # Generate proposals\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n # and zero padded.\n proposal_count = config.POST_NMS_ROIS_TRAINING if mode == \"training\" else config.POST_NMS_ROIS_INFERENCE\n rpn_rois = ProposalLayer(\n proposal_count=proposal_count,\n nms_threshold=config.RPN_NMS_THRESHOLD,\n name=\"ROI\",\n config=config)([rpn_class, rpn_bbox, anchors])\n\n if mode == \"training\":\n # Class ID mask to mark class IDs supported by the dataset the image\n # came from.\n active_class_ids = KL.Lambda(\n lambda x: parse_image_meta_graph(x)[\"active_class_ids\"]\n )(input_image_meta)\n\n if not config.USE_RPN_ROIS:\n # Ignore predicted ROIs and use ROIs provided as an input.\n input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],\n name=\"input_roi\", dtype=np.int32)\n # Normalize coordinates\n target_rois = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_rois)\n else:\n target_rois = rpn_rois\n\n # Generate detection targets\n # Subsamples proposals and generates target outputs for training\n # Note that proposal class IDs, gt_boxes, and gt_masks are zero\n # padded. Equally, returned rois and targets are zero padded.\n rois, target_class_ids, target_bbox, target_mask = DetectionTargetLayer(config, name=\"proposal_targets\")([\n target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])\n\n # Network Heads\n # TODO: verify that this handles zero padded ROIs\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox = fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n # TODO: clean up (use tf.identify if necessary)\n output_rois = KL.Lambda(lambda x: x * 1, name=\"output_rois\")(rois)\n\n # Losses\n rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name=\"rpn_class_loss\")(\n [input_rpn_match, rpn_class_logits])\n rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name=\"rpn_bbox_loss\")(\n [input_rpn_bbox, input_rpn_match, rpn_bbox])\n class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name=\"mrcnn_class_loss\")(\n [target_class_ids, mrcnn_class_logits, active_class_ids])\n bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name=\"mrcnn_bbox_loss\")(\n [target_bbox, target_class_ids, mrcnn_bbox])\n mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name=\"mrcnn_mask_loss\")(\n [target_mask, target_class_ids, mrcnn_mask])\n\n # Model\n inputs = [input_image, input_image_meta,\n input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]\n if not config.USE_RPN_ROIS:\n inputs.append(input_rois)\n outputs = [rpn_class_logits, rpn_class, rpn_bbox,\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,\n rpn_rois, output_rois,\n rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]\n model = KM.Model(inputs, outputs, name='mask_rcnn')\n else:\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox = fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n # Detections\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in\n # normalized coordinates\n detections = DetectionLayer(config, name=\"mrcnn_detection\")(\n [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])\n\n # Create masks for detections\n detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)\n mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n model = KM.Model([input_image, input_image_meta, input_anchors],\n [detections, mrcnn_class, mrcnn_bbox,\n mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],\n name='mask_rcnn')\n\n # Add multi-GPU support.\n if config.GPU_COUNT > 1:\n from mrcnn.parallel_model import ParallelModel\n model = ParallelModel(model, config.GPU_COUNT)\n\n return model\n\n def find_last(self):\n \"\"\"Finds the last checkpoint file of the last trained model in the\n model directory.\n Returns:\n The path of the last checkpoint file\n \"\"\"\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n \"Could not find model directory under {}\".format(self.model_dir))\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n import errno\n raise FileNotFoundError(\n errno.ENOENT, \"Could not find weight files in {}\".format(dir_name))\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return checkpoint\n\n def load_weights(self, filepath, by_name=False, exclude=None):\n \"\"\"Modified version of the corresponding Keras function with\n the addition of multi-GPU support and the ability to exclude\n some layers from loading.\n exclude: list of layer names to exclude\n \"\"\"\n import h5py\n # Conditional import to support versions of Keras before 2.2\n # TODO: remove in about 6 months (end of 2018)\n try:\n from keras.engine import saving\n except ImportError:\n # Keras before 2.2 used the 'topology' namespace.\n from keras.engine import topology as saving\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n f = h5py.File(filepath, mode='r')\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\") else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n\n if by_name:\n saving.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n saving.load_weights_from_hdf5_group(f, layers)\n if hasattr(f, 'close'):\n f.close()\n\n # Update the log directory\n self.set_log_dir(filepath)\n\n def get_imagenet_weights(self):\n \"\"\"Downloads ImageNet trained weights from Keras.\n Returns path to weights file.\n \"\"\"\n from keras.utils.data_utils import get_file\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/' 'releases/download/v0.2/' 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n TF_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n return weights_path\n\n def compile(self, learning_rate, momentum):\n \"\"\"Gets the model ready for training. Adds losses, regularization, and\n metrics. Then calls the Keras compile() function.\n \"\"\"\n # Optimizer object\n optimizer = keras.optimizers.SGD(\n lr=learning_rate, momentum=momentum,\n clipnorm=self.config.GRADIENT_CLIP_NORM)\n # Add Losses\n # First, clear previously set losses to avoid duplication\n self.keras_model._losses = []\n self.keras_model._per_input_losses = {}\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_loss(loss)\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(\n optimizer=optimizer,\n loss=[None] * len(self.keras_model.outputs))\n\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.metrics_tensors.append(loss)\n\n def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\n \"\"\"Sets model layers as trainable if their names match\n the given regular expression.\n \"\"\"\n # Print message on the first call (but not on recursive calls)\n if verbose > 0 and keras_model is None:\n log(\"Selecting layers to train\")\n\n keras_model = keras_model or self.keras_model\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\") else keras_model.layers\n\n for layer in layers:\n # Is the layer a model?\n if layer.__class__.__name__ == 'Model':\n print(\"In model: \", layer.name)\n self.set_trainable(\n layer_regex, keras_model=layer, indent=indent + 4)\n continue\n\n if not layer.weights:\n continue\n # Is it trainable?\n trainable = bool(re.fullmatch(layer_regex, layer.name))\n # Update layer. If layer is a container, update inner layer.\n if layer.__class__.__name__ == 'TimeDistributed':\n layer.layer.trainable = trainable\n else:\n layer.trainable = trainable\n # Print trainable layer names\n if trainable and verbose > 0:\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\n layer.__class__.__name__))\n\n def set_log_dir(self, model_path=None):\n \"\"\"Sets the model log directory and epoch counter.\n model_path: If None, or a format different from what this code uses\n then set a new log directory and start epochs from 0. Otherwise,\n extract the log directory and the epoch counter from the file\n name.\n \"\"\"\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n # \\path\\to\\logs\\coco20171029T2315\\mask_rcnn_coco_0001.h5 (Windows)\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)\n regex = r\".*[/\\\\][\\w-]+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})[/\\\\]mask\\_rcnn\\_[\\w-]+(\\d{4})\\.h5\"\n m = re.match(regex, model_path)\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n # Epoch number in file is 1-based, and in Keras code it's 0-based.\n # So, adjust for that then increment by one to start from the next epoch\n self.epoch = int(m.group(6)) - 1 + 1\n print('Re-starting from epoch %d' % self.epoch)\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"mask_rcnn_{}_*epoch*.h5\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{epoch:04d}\")\n\n def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,\n augmentation=None, custom_callbacks=None, no_augmentation_sources=None):\n \"\"\"Train the model.\n train_dataset, val_dataset: Training and validation Dataset objects.\n learning_rate: The learning rate to train with\n epochs: Number of training epochs. Note that previous training epochs\n are considered to be done alreay, so this actually determines\n the epochs to train in total rather than in this particaular\n call.\n layers: Allows selecting wich layers to train. It can be:\n - A regular expression to match layer names to train\n - One of these predefined values:\n heads: The RPN, classifier and mask heads of the network\n all: All the layers\n 3+: Train Resnet stage 3 and up\n 4+: Train Resnet stage 4 and up\n 5+: Train Resnet stage 5 and up\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)\n augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)\n flips images right/left 50% of the time. You can pass complex\n augmentations as well. This augmentation applies 50% of the\n time, and when it does it flips images right/left half the time\n and adds a Gaussian blur with a random sigma in range 0 to 5.\n augmentation = imgaug.augmenters.Sometimes(0.5, [\n imgaug.augmenters.Fliplr(0.5),\n imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))\n ])\n\t custom_callbacks: Optional. Add custom callbacks to be called\n\t with the keras fit_generator method. Must be list of type keras.callbacks.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n \"\"\"\n assert self.mode == \"training\", \"Create model in training mode.\"\n\n # Pre-defined layer regular expressions\n layer_regex = {\n # all layers but the backbone\n \"heads\": r\"(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # From a specific Resnet stage and up\n \"3+\": r\"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"4+\": r\"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"5+\": r\"(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # All layers\n \"all\": \".*\",\n }\n if layers in layer_regex.keys():\n layers = layer_regex[layers]\n\n # Data generators\n train_generator = data_generator(train_dataset, self.config, shuffle=True,\n augmentation=augmentation,\n batch_size=self.config.BATCH_SIZE,\n no_augmentation_sources=no_augmentation_sources)\n val_generator = data_generator(val_dataset, self.config, shuffle=True,\n batch_size=self.config.BATCH_SIZE)\n\n # Create log_dir if it does not exist\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n # Callbacks\n callbacks = [\n keras.callbacks.TensorBoard(log_dir=self.log_dir,\n histogram_freq=0, write_graph=True, write_images=False),\n keras.callbacks.ModelCheckpoint(self.checkpoint_path,\n verbose=0, save_weights_only=True),\n ]\n\n # Add custom callbacks to the list\n if custom_callbacks:\n callbacks += custom_callbacks\n\n # Train\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch, learning_rate))\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\n self.set_trainable(layers)\n self.compile(learning_rate, self.config.LEARNING_MOMENTUM)\n\n # Work-around for Windows: Keras fails on Windows when using\n # multiprocessing workers. See discussion here:\n # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009\n if os.name is 'nt':\n workers = 0\n else:\n workers = multiprocessing.cpu_count()\n\n self.keras_model.fit_generator(\n train_generator,\n initial_epoch=self.epoch,\n epochs=epochs,\n steps_per_epoch=self.config.STEPS_PER_EPOCH,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=self.config.VALIDATION_STEPS,\n max_queue_size=100,\n workers=workers,\n use_multiprocessing=True,\n )\n self.epoch = max(self.epoch, epochs)\n\n def mold_inputs(self, images):\n \"\"\"Takes a list of images and modifies them to the format expected\n as an input to the neural network.\n images: List of image matrices [height,width,depth]. Images can have\n different sizes.\n Returns 3 Numpy matrices:\n molded_images: [N, h, w, 3]. Images resized and normalized.\n image_metas: [N, length of meta data]. Details about each image.\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\n original image (padding excluded).\n \"\"\"\n molded_images = []\n image_metas = []\n windows = []\n for image in images:\n # Resize image\n # TODO: move resizing to mold_image()\n molded_image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=self.config.IMAGE_MIN_DIM,\n min_scale=self.config.IMAGE_MIN_SCALE,\n max_dim=self.config.IMAGE_MAX_DIM,\n mode=self.config.IMAGE_RESIZE_MODE)\n molded_image = mold_image(molded_image, self.config)\n # Build image_meta\n image_meta = compose_image_meta(\n 0, image.shape, molded_image.shape, window, scale,\n np.zeros([self.config.NUM_CLASSES], dtype=np.int32))\n # Append\n molded_images.append(molded_image)\n windows.append(window)\n image_metas.append(image_meta)\n # Pack into arrays\n molded_images = np.stack(molded_images)\n image_metas = np.stack(image_metas)\n windows = np.stack(windows)\n return molded_images, image_metas, windows\n\n def unmold_detections(self, detections, mrcnn_mask, original_image_shape,\n image_shape, window):\n \"\"\"Reformats the detections of one image from the format of the neural\n network output to a format suitable for use in the rest of the\n application.\n detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates\n mrcnn_mask: [N, height, width, num_classes]\n original_image_shape: [H, W, C] Original image shape before resizing\n image_shape: [H, W, C] Shape of the image after resizing and padding\n window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real\n image is excluding the padding.\n Returns:\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\n class_ids: [N] Integer class IDs for each bounding box\n scores: [N] Float probability scores of the class_id\n masks: [height, width, num_instances] Instance masks\n \"\"\"\n # How many detections do we have?\n # Detections array is padded with zeros. Find the first class_id == 0.\n zero_ix = np.where(detections[:, 4] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\n\n # Extract boxes, class_ids, scores, and class-specific masks\n boxes = detections[:N, :4]\n class_ids = detections[:N, 4].astype(np.int32)\n scores = detections[:N, 5]\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\n\n # Translate normalized coordinates in the resized image to pixel\n # coordinates in the original image before resizing\n window = utils.norm_boxes(window, image_shape[:2])\n wy1, wx1, wy2, wx2 = window\n shift = np.array([wy1, wx1, wy1, wx1])\n wh = wy2 - wy1 # window height\n ww = wx2 - wx1 # window width\n scale = np.array([wh, ww, wh, ww])\n # Convert boxes to normalized coordinates on the window\n boxes = np.divide(boxes - shift, scale)\n # Convert boxes to pixel coordinates on the original image\n boxes = utils.denorm_boxes(boxes, original_image_shape[:2])\n\n # Filter out detections with zero area. Happens in early training when\n # network weights are still random\n exclude_ix = np.where(\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\n if exclude_ix.shape[0] > 0:\n boxes = np.delete(boxes, exclude_ix, axis=0)\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\n scores = np.delete(scores, exclude_ix, axis=0)\n masks = np.delete(masks, exclude_ix, axis=0)\n N = class_ids.shape[0]\n\n # Resize masks to original image size and set boundary threshold.\n full_masks = []\n for i in range(N):\n # Convert neural network mask to full size mask\n full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)\n full_masks.append(full_mask)\n full_masks = np.stack(full_masks, axis=-1) if full_masks else np.empty(original_image_shape[:2] + (0,))\n\n return boxes, class_ids, scores, full_masks\n\n def detect(self, images, verbose=0):\n \"\"\"Runs the detection pipeline.\n images: List of images, potentially of different sizes.\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape, \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ = self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks = self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def detect_molded(self, molded_images, image_metas, verbose=0):\n \"\"\"Runs the detection pipeline, but expect inputs that are\n molded already. Used mostly for debugging and inspecting\n the model.\n molded_images: List of images loaded using load_image_gt()\n image_metas: image meta data, also returned by load_image_gt()\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(molded_images) == self.config.BATCH_SIZE, \"Number of images must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(molded_images)))\n for image in molded_images:\n log(\"image\", image)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape, \"Images must have the same size\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ = self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(molded_images):\n window = [0, 0, image.shape[0], image.shape[1]]\n final_rois, final_class_ids, final_scores, final_masks = self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n window)\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def get_anchors(self, image_shape):\n \"\"\"Returns anchor pyramid for the given image size.\"\"\"\n backbone_shapes = compute_backbone_shapes(self.config, image_shape)\n # Cache anchors and reuse if image shape is the same\n if not hasattr(self, \"_anchor_cache\"):\n self._anchor_cache = {}\n if not tuple(image_shape) in self._anchor_cache:\n # Generate Anchors\n a = utils.generate_pyramid_anchors(\n self.config.RPN_ANCHOR_SCALES,\n self.config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n self.config.BACKBONE_STRIDES,\n self.config.RPN_ANCHOR_STRIDE)\n # Keep a copy of the latest anchors in pixel coordinates because\n # it's used in inspect_model notebooks.\n # TODO: Remove this after the notebook are refactored to not use it\n self.anchors = a\n # Normalize coordinates\n self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])\n return self._anchor_cache[tuple(image_shape)]\n\n def ancestor(self, tensor, name, checked=None):\n \"\"\"Finds the ancestor of a TF tensor in the computation graph.\n tensor: TensorFlow symbolic tensor.\n name: Name of ancestor tensor to find\n checked: For internal use. A list of tensors that were already\n searched to avoid loops in traversing the graph.\n \"\"\"\n checked = checked if checked is not None else []\n # Put a limit on how deep we go to avoid very long loops\n if len(checked) > 500:\n return None\n # Convert name to a regex and allow matching a number prefix\n # because Keras adds them automatically\n if isinstance(name, str):\n name = re.compile(name.replace(\"/\", r\"(\\_\\d+)*/\"))\n\n parents = tensor.op.inputs\n for p in parents:\n if p in checked:\n continue\n if bool(re.fullmatch(name, p.name)):\n return p\n checked.append(p)\n a = self.ancestor(p, name, checked)\n if a is not None:\n return a\n return None\n\n def find_trainable_layer(self, layer):\n \"\"\"If a layer is encapsulated by another layer, this function\n digs through the encapsulation and returns the layer that holds\n the weights.\n \"\"\"\n if layer.__class__.__name__ == 'TimeDistributed':\n return self.find_trainable_layer(layer.layer)\n return layer\n\n def get_trainable_layers(self):\n \"\"\"Returns a list of layers that have weights.\"\"\"\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers\n\n def run_graph(self, images, outputs, image_metas=None):\n \"\"\"Runs a sub-set of the computation graph that computes the given\n outputs.\n image_metas: If provided, the images are assumed to be already\n molded (i.e. resized, padded, and normalized)\n outputs: List of tuples (name, tensor) to compute. The tensors are\n symbolic TensorFlow tensors and the names are for easy tracking.\n Returns an ordered dict of results. Keys are the names received in the\n input and values are Numpy arrays.\n \"\"\"\n model = self.keras_model\n\n # Organize desired outputs into an ordered dict\n outputs = OrderedDict(outputs)\n for o in outputs.values():\n assert o is not None\n\n # Build a Keras function to run parts of the computation graph\n inputs = model.inputs\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n kf = K.function(model.inputs, list(outputs.values()))\n\n # Prepare inputs\n if image_metas is None:\n molded_images, image_metas, _ = self.mold_inputs(images)\n else:\n molded_images = images\n image_shape = molded_images[0].shape\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n model_in = [molded_images, image_metas, anchors]\n\n # Run inference\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n model_in.append(0.)\n outputs_np = kf(model_in)\n\n # Pack the generated Numpy arrays into a a dict and log the results.\n outputs_np = OrderedDict([(k, v)\n for k, v in zip(outputs.keys(), outputs_np)])\n for k, v in outputs_np.items():\n log(k, v)\n return outputs_np\n\n\n############################################################\n# Data Formatting\n############################################################\n\ndef compose_image_meta(image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n \"\"\"Takes attributes of an image and puts them in one 1D array.\n image_id: An int ID of the image. Useful for debugging.\n original_image_shape: [H, W, C] before resizing or padding.\n image_shape: [H, W, C] after resizing and padding\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\n image is (excluding the padding)\n scale: The scaling factor applied to the original image (float32)\n active_class_ids: List of class_ids available in the dataset from which\n the image came. Useful if training on images from multiple datasets\n where not all classes are present in all datasets.\n \"\"\"\n meta = np.array(\n [image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=num_classes\n )\n return meta\n\n\ndef parse_image_meta(meta):\n \"\"\"Parses an array that contains image attributes to its components.\n See compose_image_meta() for more details.\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n Returns a dict of the parsed values.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id.astype(np.int32),\n \"original_image_shape\": original_image_shape.astype(np.int32),\n \"image_shape\": image_shape.astype(np.int32),\n \"window\": window.astype(np.int32),\n \"scale\": scale.astype(np.float32),\n \"active_class_ids\": active_class_ids.astype(np.int32),\n }\n\n\ndef parse_image_meta_graph(meta):\n \"\"\"Parses a tensor that contains image attributes to its components.\n See compose_image_meta() for more details.\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n Returns a dict of the parsed tensors.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n\n\ndef mold_image(images, config):\n \"\"\"Expects an RGB image (or array of images) and subtracts\n the mean pixel and converts it to float. Expects image\n colors in RGB order.\n \"\"\"\n return images.astype(np.float32) - config.MEAN_PIXEL\n\n\ndef unmold_image(normalized_images, config):\n \"\"\"Takes a image normalized with mold() and returns the original.\"\"\"\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\n\n\n############################################################\n# Miscellenous Graph Functions\n############################################################\n\ndef trim_zeros_graph(boxes, name='trim_zeros'):\n \"\"\"Often boxes are represented with matrices of shape [N, 4] and\n are padded with zeros. This removes zero boxes.\n boxes: [N, 4] matrix of boxes.\n non_zeros: [N] a 1D boolean mask identifying the rows to keep\n \"\"\"\n non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)\n boxes = tf.boolean_mask(boxes, non_zeros, name=name)\n return boxes, non_zeros\n\n\ndef batch_pack_graph(x, counts, num_rows):\n \"\"\"Picks different number of values from each row\n in x depending on the values in counts.\n \"\"\"\n outputs = []\n for i in range(num_rows):\n outputs.append(x[i, :counts[i]])\n return tf.concat(outputs, axis=0)\n\n\ndef norm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\n boxes: [..., (y1, x1, y2, x2)] in pixel coordinates\n shape: [..., (height, width)] in pixels\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n Returns:\n [..., (y1, x1, y2, x2)] in normalized coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.divide(boxes - shift, scale)\n\n\ndef denorm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\n boxes: [..., (y1, x1, y2, x2)] in normalized coordinates\n shape: [..., (height, width)] in pixels\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n Returns:\n [..., (y1, x1, y2, x2)] in pixel coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)\n\n" ]
[ [ "tensorflow.exp", "numpy.random.choice", "tensorflow.image.non_max_suppression", "numpy.copy", "tensorflow.unique", "tensorflow.reshape", "numpy.where", "tensorflow.sqrt", "numpy.sort", "tensorflow.stack", "tensorflow.control_dependencies", "numpy.broadcast_to", "tensorflow.divide", "tensorflow.cast", "tensorflow.identity", "numpy.concatenate", "numpy.divide", "numpy.max", "tensorflow.shape", "numpy.empty", "tensorflow.concat", "numpy.log", "tensorflow.argmax", "tensorflow.image.crop_and_resize", "tensorflow.Variable", "tensorflow.transpose", "tensorflow.add_n", "tensorflow.constant", "tensorflow.squeeze", "numpy.argmax", "numpy.random.randint", "numpy.arange", "tensorflow.split", "tensorflow.pad", "numpy.expand_dims", "tensorflow.abs", "numpy.array", "tensorflow.range", "tensorflow.minimum", "numpy.reshape", "numpy.zeros", "tensorflow.where", "tensorflow.gather_nd", "tensorflow.round", "tensorflow.expand_dims", "numpy.delete", "numpy.random.shuffle", "tensorflow.map_fn", "tensorflow.sparse_tensor_to_dense", "tensorflow.log", "numpy.stack", "numpy.amax", "tensorflow.reduce_sum", "tensorflow.nn.top_k", "numpy.hstack", "tensorflow.boolean_mask", "tensorflow.logical_and", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "numpy.fliplr", "tensorflow.random_shuffle", "tensorflow.size", "tensorflow.multiply", "numpy.sum", "numpy.ones", "tensorflow.equal", "numpy.any", "tensorflow.reduce_max", "numpy.abs", "tensorflow.gather", "tensorflow.maximum", "tensorflow.reduce_mean", "tensorflow.stop_gradient" ] ]
baharefatemi/dgl
[ "ed1948b5555106dee133cef91ed9ecfd3bd4310d" ]
[ "examples/pytorch/jtnn/jtnn/chemutils.py" ]
[ "import rdkit\nimport rdkit.Chem as Chem\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse.csgraph import minimum_spanning_tree\nfrom collections import defaultdict\nfrom rdkit.Chem.EnumerateStereoisomers import EnumerateStereoisomers, StereoEnumerationOptions\n\nMST_MAX_WEIGHT = 100 \nMAX_NCAND = 2000\n\ndef set_atommap(mol, num=0):\n for atom in mol.GetAtoms():\n atom.SetAtomMapNum(num)\n\ndef get_mol(smiles):\n mol = Chem.MolFromSmiles(smiles)\n if mol is None: \n return None\n Chem.Kekulize(mol)\n return mol\n\ndef get_smiles(mol):\n return Chem.MolToSmiles(mol, kekuleSmiles=True)\n\ndef decode_stereo(smiles2D):\n mol = Chem.MolFromSmiles(smiles2D)\n dec_isomers = list(EnumerateStereoisomers(mol))\n\n dec_isomers = [Chem.MolFromSmiles(Chem.MolToSmiles(mol, isomericSmiles=True)) for mol in dec_isomers]\n smiles3D = [Chem.MolToSmiles(mol, isomericSmiles=True) for mol in dec_isomers]\n\n chiralN = [atom.GetIdx() for atom in dec_isomers[0].GetAtoms() if int(atom.GetChiralTag()) > 0 and atom.GetSymbol() == \"N\"]\n if len(chiralN) > 0:\n for mol in dec_isomers:\n for idx in chiralN:\n mol.GetAtomWithIdx(idx).SetChiralTag(Chem.rdchem.ChiralType.CHI_UNSPECIFIED)\n smiles3D.append(Chem.MolToSmiles(mol, isomericSmiles=True))\n\n return smiles3D\n\ndef sanitize(mol):\n try:\n smiles = get_smiles(mol)\n mol = get_mol(smiles)\n except Exception as e:\n return None\n return mol\n\ndef copy_atom(atom):\n new_atom = Chem.Atom(atom.GetSymbol())\n new_atom.SetFormalCharge(atom.GetFormalCharge())\n new_atom.SetAtomMapNum(atom.GetAtomMapNum())\n return new_atom\n\ndef copy_edit_mol(mol):\n new_mol = Chem.RWMol(Chem.MolFromSmiles(''))\n for atom in mol.GetAtoms():\n new_atom = copy_atom(atom)\n new_mol.AddAtom(new_atom)\n for bond in mol.GetBonds():\n a1 = bond.GetBeginAtom().GetIdx()\n a2 = bond.GetEndAtom().GetIdx()\n bt = bond.GetBondType()\n new_mol.AddBond(a1, a2, bt)\n return new_mol\n\ndef get_clique_mol(mol, atoms):\n smiles = Chem.MolFragmentToSmiles(mol, atoms, kekuleSmiles=True)\n new_mol = Chem.MolFromSmiles(smiles, sanitize=False)\n new_mol = copy_edit_mol(new_mol).GetMol()\n new_mol = sanitize(new_mol) #We assume this is not None\n return new_mol\n\ndef tree_decomp(mol):\n n_atoms = mol.GetNumAtoms()\n if n_atoms == 1:\n return [[0]], []\n\n cliques = []\n for bond in mol.GetBonds():\n a1 = bond.GetBeginAtom().GetIdx()\n a2 = bond.GetEndAtom().GetIdx()\n if not bond.IsInRing():\n cliques.append([a1,a2])\n\n ssr = [list(x) for x in Chem.GetSymmSSSR(mol)]\n cliques.extend(ssr)\n\n nei_list = [[] for i in range(n_atoms)]\n for i in range(len(cliques)):\n for atom in cliques[i]:\n nei_list[atom].append(i)\n \n #Merge Rings with intersection > 2 atoms\n for i in range(len(cliques)):\n if len(cliques[i]) <= 2: continue\n for atom in cliques[i]:\n for j in nei_list[atom]:\n if i >= j or len(cliques[j]) <= 2: continue\n inter = set(cliques[i]) & set(cliques[j])\n if len(inter) > 2:\n cliques[i].extend(cliques[j])\n cliques[i] = list(set(cliques[i]))\n cliques[j] = []\n \n cliques = [c for c in cliques if len(c) > 0]\n nei_list = [[] for i in range(n_atoms)]\n for i in range(len(cliques)):\n for atom in cliques[i]:\n nei_list[atom].append(i)\n \n #Build edges and add singleton cliques\n edges = defaultdict(int)\n for atom in range(n_atoms):\n if len(nei_list[atom]) <= 1: \n continue\n cnei = nei_list[atom]\n bonds = [c for c in cnei if len(cliques[c]) == 2]\n rings = [c for c in cnei if len(cliques[c]) > 4]\n if len(bonds) > 2 or (len(bonds) == 2 and len(cnei) > 2): #In general, if len(cnei) >= 3, a singleton should be added, but 1 bond + 2 ring is currently not dealt with.\n cliques.append([atom])\n c2 = len(cliques) - 1\n for c1 in cnei:\n edges[(c1,c2)] = 1\n elif len(rings) > 2: #Multiple (n>2) complex rings\n cliques.append([atom])\n c2 = len(cliques) - 1\n for c1 in cnei:\n edges[(c1,c2)] = MST_MAX_WEIGHT - 1\n else:\n for i in range(len(cnei)):\n for j in range(i + 1, len(cnei)):\n c1,c2 = cnei[i],cnei[j]\n inter = set(cliques[c1]) & set(cliques[c2])\n if edges[(c1,c2)] < len(inter):\n edges[(c1,c2)] = len(inter) #cnei[i] < cnei[j] by construction\n\n edges = [u + (MST_MAX_WEIGHT-v,) for u,v in edges.items()]\n if len(edges) == 0:\n return cliques, edges\n\n #Compute Maximum Spanning Tree\n row,col,data = list(zip(*edges))\n n_clique = len(cliques)\n clique_graph = csr_matrix( (data,(row,col)), shape=(n_clique,n_clique) )\n junc_tree = minimum_spanning_tree(clique_graph)\n row,col = junc_tree.nonzero()\n edges = [(row[i],col[i]) for i in range(len(row))]\n return (cliques, edges)\n\ndef atom_equal(a1, a2):\n return a1.GetSymbol() == a2.GetSymbol() and a1.GetFormalCharge() == a2.GetFormalCharge()\n\n#Bond type not considered because all aromatic (so SINGLE matches DOUBLE)\ndef ring_bond_equal(b1, b2, reverse=False):\n b1 = (b1.GetBeginAtom(), b1.GetEndAtom())\n if reverse:\n b2 = (b2.GetEndAtom(), b2.GetBeginAtom())\n else:\n b2 = (b2.GetBeginAtom(), b2.GetEndAtom())\n return atom_equal(b1[0], b2[0]) and atom_equal(b1[1], b2[1])\n\ndef attach_mols_nx(ctr_mol, neighbors, prev_nodes, nei_amap):\n prev_nids = [node['nid'] for node in prev_nodes]\n for nei_node in prev_nodes + neighbors:\n nei_id, nei_mol = nei_node['nid'], nei_node['mol']\n amap = nei_amap[nei_id]\n for atom in nei_mol.GetAtoms():\n if atom.GetIdx() not in amap:\n new_atom = copy_atom(atom)\n amap[atom.GetIdx()] = ctr_mol.AddAtom(new_atom)\n\n if nei_mol.GetNumBonds() == 0:\n nei_atom = nei_mol.GetAtomWithIdx(0)\n ctr_atom = ctr_mol.GetAtomWithIdx(amap[0])\n ctr_atom.SetAtomMapNum(nei_atom.GetAtomMapNum())\n else:\n for bond in nei_mol.GetBonds():\n a1 = amap[bond.GetBeginAtom().GetIdx()]\n a2 = amap[bond.GetEndAtom().GetIdx()]\n if ctr_mol.GetBondBetweenAtoms(a1, a2) is None:\n ctr_mol.AddBond(a1, a2, bond.GetBondType())\n elif nei_id in prev_nids: #father node overrides\n ctr_mol.RemoveBond(a1, a2)\n ctr_mol.AddBond(a1, a2, bond.GetBondType())\n return ctr_mol\n\ndef local_attach_nx(ctr_mol, neighbors, prev_nodes, amap_list):\n ctr_mol = copy_edit_mol(ctr_mol)\n nei_amap = {nei['nid']: {} for nei in prev_nodes + neighbors}\n\n for nei_id,ctr_atom,nei_atom in amap_list:\n nei_amap[nei_id][nei_atom] = ctr_atom\n\n ctr_mol = attach_mols_nx(ctr_mol, neighbors, prev_nodes, nei_amap)\n return ctr_mol.GetMol()\n\n#This version records idx mapping between ctr_mol and nei_mol\ndef enum_attach_nx(ctr_mol, nei_node, amap, singletons):\n nei_mol,nei_idx = nei_node['mol'], nei_node['nid']\n att_confs = []\n black_list = [atom_idx for nei_id,atom_idx,_ in amap if nei_id in singletons]\n ctr_atoms = [atom for atom in ctr_mol.GetAtoms() if atom.GetIdx() not in black_list]\n ctr_bonds = [bond for bond in ctr_mol.GetBonds()]\n\n if nei_mol.GetNumBonds() == 0: #neighbor singleton\n nei_atom = nei_mol.GetAtomWithIdx(0)\n used_list = [atom_idx for _,atom_idx,_ in amap]\n for atom in ctr_atoms:\n if atom_equal(atom, nei_atom) and atom.GetIdx() not in used_list:\n new_amap = amap + [(nei_idx, atom.GetIdx(), 0)]\n att_confs.append( new_amap )\n \n elif nei_mol.GetNumBonds() == 1: #neighbor is a bond\n bond = nei_mol.GetBondWithIdx(0)\n bond_val = int(bond.GetBondTypeAsDouble())\n b1,b2 = bond.GetBeginAtom(), bond.GetEndAtom()\n\n for atom in ctr_atoms: \n #Optimize if atom is carbon (other atoms may change valence)\n if atom.GetAtomicNum() == 6 and atom.GetTotalNumHs() < bond_val:\n continue\n if atom_equal(atom, b1):\n new_amap = amap + [(nei_idx, atom.GetIdx(), b1.GetIdx())]\n att_confs.append( new_amap )\n elif atom_equal(atom, b2):\n new_amap = amap + [(nei_idx, atom.GetIdx(), b2.GetIdx())]\n att_confs.append( new_amap )\n else: \n #intersection is an atom\n for a1 in ctr_atoms:\n for a2 in nei_mol.GetAtoms():\n if atom_equal(a1, a2):\n #Optimize if atom is carbon (other atoms may change valence)\n if a1.GetAtomicNum() == 6 and a1.GetTotalNumHs() + a2.GetTotalNumHs() < 4:\n continue\n new_amap = amap + [(nei_idx, a1.GetIdx(), a2.GetIdx())]\n att_confs.append( new_amap )\n\n #intersection is an bond\n if ctr_mol.GetNumBonds() > 1:\n for b1 in ctr_bonds:\n for b2 in nei_mol.GetBonds():\n if ring_bond_equal(b1, b2):\n new_amap = amap + [(nei_idx, b1.GetBeginAtom().GetIdx(), b2.GetBeginAtom().GetIdx()), (nei_idx, b1.GetEndAtom().GetIdx(), b2.GetEndAtom().GetIdx())]\n att_confs.append( new_amap )\n\n if ring_bond_equal(b1, b2, reverse=True):\n new_amap = amap + [(nei_idx, b1.GetBeginAtom().GetIdx(), b2.GetEndAtom().GetIdx()), (nei_idx, b1.GetEndAtom().GetIdx(), b2.GetBeginAtom().GetIdx())]\n att_confs.append( new_amap )\n return att_confs\n\n#Try rings first: Speed-Up \ndef enum_assemble_nx(node, neighbors, prev_nodes=[], prev_amap=[]):\n all_attach_confs = []\n singletons = [nei_node['nid'] for nei_node in neighbors + prev_nodes if nei_node['mol'].GetNumAtoms() == 1]\n\n def search(cur_amap, depth):\n if len(all_attach_confs) > MAX_NCAND:\n return\n if depth == len(neighbors):\n all_attach_confs.append(cur_amap)\n return\n\n nei_node = neighbors[depth]\n cand_amap = enum_attach_nx(node['mol'], nei_node, cur_amap, singletons)\n cand_smiles = set()\n candidates = []\n for amap in cand_amap:\n cand_mol = local_attach_nx(node['mol'], neighbors[:depth+1], prev_nodes, amap)\n cand_mol = sanitize(cand_mol)\n if cand_mol is None:\n continue\n smiles = get_smiles(cand_mol)\n if smiles in cand_smiles:\n continue\n cand_smiles.add(smiles)\n candidates.append(amap)\n\n if len(candidates) == 0:\n return []\n\n for new_amap in candidates:\n search(new_amap, depth + 1)\n\n search(prev_amap, 0)\n cand_smiles = set()\n candidates = []\n for amap in all_attach_confs:\n cand_mol = local_attach_nx(node['mol'], neighbors, prev_nodes, amap)\n cand_mol = Chem.MolFromSmiles(Chem.MolToSmiles(cand_mol))\n smiles = Chem.MolToSmiles(cand_mol)\n if smiles in cand_smiles:\n continue\n cand_smiles.add(smiles)\n Chem.Kekulize(cand_mol)\n candidates.append( (smiles,cand_mol,amap) )\n\n return candidates\n\n#Only used for debugging purpose\ndef dfs_assemble_nx(graph, cur_mol, global_amap, fa_amap, cur_node_id, fa_node_id):\n cur_node = graph.nodes_dict[cur_node_id]\n fa_node = graph.nodes_dict[fa_node_id] if fa_node_id is not None else None\n\n fa_nid = fa_node['nid'] if fa_node is not None else -1\n prev_nodes = [fa_node] if fa_node is not None else []\n\n children_id = [nei for nei in graph[cur_node_id] if graph.nodes_dict[nei]['nid'] != fa_nid]\n children = [graph.nodes_dict[nei] for nei in children_id]\n neighbors = [nei for nei in children if nei['mol'].GetNumAtoms() > 1]\n neighbors = sorted(neighbors, key=lambda x:x['mol'].GetNumAtoms(), reverse=True)\n singletons = [nei for nei in children if nei['mol'].GetNumAtoms() == 1]\n neighbors = singletons + neighbors\n\n cur_amap = [(fa_nid,a2,a1) for nid,a1,a2 in fa_amap if nid == cur_node['nid']]\n cands = enum_assemble_nx(graph.nodes_dict[cur_node_id], neighbors, prev_nodes, cur_amap)\n if len(cands) == 0:\n return\n\n cand_smiles, _, cand_amap = zip(*cands)\n label_idx = cand_smiles.index(cur_node['label'])\n label_amap = cand_amap[label_idx]\n\n for nei_id,ctr_atom,nei_atom in label_amap:\n if nei_id == fa_nid:\n continue\n global_amap[nei_id][nei_atom] = global_amap[cur_node['nid']][ctr_atom]\n \n cur_mol = attach_mols_nx(cur_mol, children, [], global_amap) #father is already attached\n for nei_node_id, nei_node in zip(children_id, children):\n if not nei_node['is_leaf']:\n dfs_assemble_nx(graph, cur_mol, global_amap, label_amap, nei_node_id, cur_node_id)\n" ]
[ [ "scipy.sparse.csr_matrix", "scipy.sparse.csgraph.minimum_spanning_tree" ] ]
helia95/SpeakerRecognition_tutorial
[ "5c00f9165fd260d50b74ab46e4d81d7cfd77ab8c" ]
[ "model/resnet.py" ]
[ "\"\"\"Imported from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\r\nand added support for the 1x32x32 mel spectrogram for the speech recognition.\r\nKaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun: Deep Residual Learning for Image Recognition\r\nhttps://arxiv.org/abs/1512.03385\r\n\"\"\"\r\n\r\nimport torch.nn as nn\r\nimport math\r\nimport torch.utils.model_zoo as model_zoo\r\n\r\n\r\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\r\n 'resnet152']\r\n\r\n\r\nmodel_urls = {\r\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\r\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\r\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\r\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\r\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\r\n}\r\n\r\n\r\ndef conv3x3(in_planes, out_planes, stride=1):\r\n \"\"\"3x3 convolution with padding\"\"\"\r\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\r\n padding=1, bias=False)\r\n\r\n\r\nclass BasicBlock(nn.Module):\r\n expansion = 1\r\n\r\n def __init__(self, inplanes, planes, stride=1, downsample=None):\r\n super(BasicBlock, self).__init__()\r\n self.conv1 = conv3x3(inplanes, planes, stride)\r\n self.bn1 = nn.BatchNorm2d(planes)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.conv2 = conv3x3(planes, planes)\r\n self.bn2 = nn.BatchNorm2d(planes)\r\n self.downsample = downsample\r\n self.stride = stride\r\n\r\n def forward(self, x):\r\n residual = x\r\n\r\n out = self.conv1(x)\r\n out = self.bn1(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n\r\n if self.downsample is not None:\r\n residual = self.downsample(x)\r\n\r\n out += residual\r\n out = self.relu(out)\r\n\r\n return out\r\n\r\n\r\nclass Bottleneck(nn.Module):\r\n expansion = 4\r\n\r\n def __init__(self, inplanes, planes, stride=1, downsample=None):\r\n super(Bottleneck, self).__init__()\r\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(planes)\r\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\r\n padding=1, bias=False)\r\n self.bn2 = nn.BatchNorm2d(planes)\r\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\r\n self.bn3 = nn.BatchNorm2d(planes * 4)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.downsample = downsample\r\n self.stride = stride\r\n\r\n def forward(self, x):\r\n residual = x\r\n\r\n out = self.conv1(x)\r\n out = self.bn1(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n out = self.relu(out)\r\n\r\n out = self.conv3(out)\r\n out = self.bn3(out)\r\n\r\n if self.downsample is not None:\r\n residual = self.downsample(x)\r\n\r\n out += residual\r\n out = self.relu(out)\r\n\r\n return out\r\n\r\n\r\nclass ResNet(nn.Module):\r\n\r\n def __init__(self, block, layers, num_classes=1000, in_channels=1):\r\n self.inplanes = 16\r\n super(ResNet, self).__init__()\r\n self.conv1 = nn.Conv2d(in_channels, 16, kernel_size=7, stride=1, padding=3,\r\n bias=False) # ori : stride = 2\r\n self.bn1 = nn.BatchNorm2d(16)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\r\n self.layer1 = self._make_layer(block, 16, layers[0])\r\n self.layer2 = self._make_layer(block, 32, layers[1], stride=2)\r\n self.layer3 = self._make_layer(block, 64, layers[2], stride=2)\r\n self.layer4 = self._make_layer(block, 128, layers[3], stride=2)\r\n self.avgpool = nn.AvgPool2d(1, stride=1)\r\n self.fc = nn.Linear(128 * block.expansion, num_classes)\r\n\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\r\n m.weight.data.normal_(0, math.sqrt(2. / n))\r\n elif isinstance(m, nn.BatchNorm2d):\r\n m.weight.data.fill_(1)\r\n m.bias.data.zero_()\r\n\r\n def _make_layer(self, block, planes, blocks, stride=1):\r\n downsample = None\r\n if stride != 1 or self.inplanes != planes * block.expansion:\r\n downsample = nn.Sequential(\r\n nn.Conv2d(self.inplanes, planes * block.expansion,\r\n kernel_size=1, stride=stride, bias=False),\r\n nn.BatchNorm2d(planes * block.expansion),\r\n )\r\n\r\n layers = []\r\n layers.append(block(self.inplanes, planes, stride, downsample))\r\n self.inplanes = planes * block.expansion\r\n for i in range(1, blocks):\r\n layers.append(block(self.inplanes, planes))\r\n\r\n return nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.bn1(x)\r\n x = self.relu(x)\r\n x = self.maxpool(x)\r\n\r\n x = self.layer1(x)\r\n x = self.layer2(x)\r\n x = self.layer3(x)\r\n x = self.layer4(x)\r\n\r\n x = self.avgpool(x)\r\n x = x.view(x.size(0), -1)\r\n x = self.fc(x)\r\n\r\n return x\r\n\r\n\r\ndef resnet18(pretrained=False, **kwargs):\r\n \"\"\"Constructs a ResNet-18 model.\r\n Args:\r\n pretrained (bool): If True, returns a model pre-trained on ImageNet\r\n \"\"\"\r\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\r\n if pretrained:\r\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\r\n return model\r\n\r\n\r\ndef resnet34(pretrained=False, **kwargs):\r\n \"\"\"Constructs a ResNet-34 model.\r\n Args:\r\n pretrained (bool): If True, returns a model pre-trained on ImageNet\r\n \"\"\"\r\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\r\n if pretrained:\r\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\r\n return model\r\n\r\n\r\ndef resnet50(pretrained=False, **kwargs):\r\n \"\"\"Constructs a ResNet-50 model.\r\n Args:\r\n pretrained (bool): If True, returns a model pre-trained on ImageNet\r\n \"\"\"\r\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\r\n if pretrained:\r\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\r\n return model\r\n\r\n\r\ndef resnet101(pretrained=False, **kwargs):\r\n \"\"\"Constructs a ResNet-101 model.\r\n Args:\r\n pretrained (bool): If True, returns a model pre-trained on ImageNet\r\n \"\"\"\r\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\r\n if pretrained:\r\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\r\n return model\r\n\r\n\r\ndef resnet152(pretrained=False, **kwargs):\r\n \"\"\"Constructs a ResNet-152 model.\r\n Args:\r\n pretrained (bool): If True, returns a model pre-trained on ImageNet\r\n \"\"\"\r\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\r\n if pretrained:\r\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\r\n return model" ]
[ [ "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.utils.model_zoo.load_url", "torch.nn.ReLU", "torch.nn.Conv2d" ] ]
jackyjsy/SGGAN
[ "bf07e933f8a53eff30ecb7398324a0b549508fa3" ]
[ "model.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n\n\nclass ResidualBlock(nn.Module):\n \"\"\"Residual Block.\"\"\"\n def __init__(self, dim_in, dim_out):\n super(ResidualBlock, self).__init__()\n self.main = nn.Sequential(\n nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False),\n nn.InstanceNorm2d(dim_out, affine=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False),\n nn.InstanceNorm2d(dim_out, affine=True))\n\n def forward(self, x):\n return x + self.main(x)\n\n\nclass Generator(nn.Module):\n \"\"\"Generator. Encoder-Decoder Architecture.\"\"\"\n def __init__(self, conv_dim=64, c_dim=5, s_dim=7, repeat_num=6):\n super(Generator, self).__init__()\n print('initializing generator')\n print(c_dim)\n print(s_dim)\n layers = []\n layers.append(nn.Conv2d(3+c_dim+s_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))\n layers.append(nn.InstanceNorm2d(conv_dim, affine=True))\n layers.append(nn.ReLU(inplace=True))\n\n # Down-Sampling\n curr_dim = conv_dim\n for i in range(2):\n layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False))\n layers.append(nn.InstanceNorm2d(curr_dim*2, affine=True))\n layers.append(nn.ReLU(inplace=True))\n curr_dim = curr_dim * 2\n\n # Bottleneck\n for i in range(repeat_num):\n layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim))\n\n # Up-Sampling\n for i in range(2):\n layers.append(nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False))\n layers.append(nn.InstanceNorm2d(curr_dim//2, affine=True))\n layers.append(nn.ReLU(inplace=True))\n curr_dim = curr_dim // 2\n\n layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))\n layers.append(nn.Tanh())\n self.main = nn.Sequential(*layers)\n\n def forward(self, x, c, s):\n # replicate spatially and concatenate domain information\n c = c.unsqueeze(2).unsqueeze(3)\n c = c.expand(c.size(0), c.size(1), x.size(2), x.size(3))\n x = torch.cat([x, c], dim=1)\n x = torch.cat([x,s], dim=1)\n return self.main(x)\n\n\nclass Discriminator(nn.Module):\n \"\"\"Discriminator. PatchGAN.\"\"\"\n def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6):\n super(Discriminator, self).__init__()\n c_dim=5\n layers = []\n layers.append(nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1))\n layers.append(nn.LeakyReLU(0.01, inplace=True))\n\n curr_dim = conv_dim\n for i in range(1, repeat_num):\n layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1))\n layers.append(nn.LeakyReLU(0.01, inplace=True))\n curr_dim = curr_dim * 2\n\n k_size = int(image_size / np.power(2, repeat_num))\n self.main = nn.Sequential(*layers)\n self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)\n self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=k_size, bias=False)\n\n def forward(self, x):\n h = self.main(x)\n out_real = self.conv1(h)\n out_aux = self.conv2(h)\n return out_real.squeeze(), out_aux.squeeze()\n\nclass Segmentor(nn.Module):\n \"\"\"Generator. Encoder-Decoder Architecture.\"\"\"\n def __init__(self, conv_dim=64, repeat_num=4):\n super(Segmentor, self).__init__()\n\n layers = []\n layers.append(nn.Conv2d(3, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))\n layers.append(nn.InstanceNorm2d(conv_dim, affine=True))\n layers.append(nn.ReLU(inplace=True))\n\n # Down-Sampling\n curr_dim = conv_dim\n for i in range(2):\n layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False))\n layers.append(nn.InstanceNorm2d(curr_dim*2, affine=True))\n layers.append(nn.ReLU(inplace=True))\n curr_dim = curr_dim * 2\n\n # Bottleneck\n for i in range(repeat_num):\n layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim))\n\n # Up-Sampling\n for i in range(2):\n layers.append(nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False))\n layers.append(nn.InstanceNorm2d(curr_dim//2, affine=True))\n layers.append(nn.ReLU(inplace=True))\n curr_dim = curr_dim // 2\n\n layers.append(nn.Conv2d(curr_dim, 7, kernel_size=7, stride=1, padding=3, bias=False))\n # layers.append(nn.LogSoftmax())\n # layers.append(nn.Softmax2d())\n self.main = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.main(x)" ]
[ [ "torch.cat", "torch.nn.Sequential", "torch.nn.Tanh", "torch.nn.LeakyReLU", "torch.nn.ConvTranspose2d", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.InstanceNorm2d", "numpy.power" ] ]
testingautomated-usi/rl-plasticity-experiments
[ "a32cebcee89f6f734477a1f1bdd8b7f8ef7aa99a" ]
[ "src/agent.py" ]
[ "import csv\nimport glob\nimport multiprocessing\nimport os\nimport warnings\nfrom queue import Queue\nfrom typing import Tuple\n\nimport gym\nimport numpy as np\nimport stable_baselines3\nimport tensorflow as tf\nimport yaml\nfrom stable_baselines3.common.utils import get_linear_fn, set_random_seed\nfrom tensorflow.python.platform import tf_logging as tf_log\n\ntf.get_logger().setLevel(tf_log.ERROR)\ngym.logger.set_level(gym.logger.ERROR)\n\nfrom stable_baselines import DQN, PPO2, SAC\nfrom stable_baselines.common import set_global_seeds\nfrom stable_baselines.common.callbacks import (CheckpointCallback,\n EvalCallback,\n StopTrainingOnRewardThreshold)\nfrom stable_baselines.common.noise import (AdaptiveParamNoiseSpec,\n NormalActionNoise,\n OrnsteinUhlenbeckActionNoise)\nfrom stable_baselines.common.schedules import constfn\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines.logger import configure\n\nfrom abstract_agent import AbstractAgent\nfrom algo.env_predicate_pair import EnvPredicatePair\nfrom custom_callbacks import (LoggingTrainingMetricsCallback,\n SaveVecNormalizeCallback)\nfrom custom_callbacks3 import \\\n LoggingTrainingMetricsCallback as LoggingTrainingMetricsCallbackSb3\nfrom custom_callbacks3 import \\\n SaveVecNormalizeCallback as SaveVecNormalizeCallbackSb3\nfrom env_utils import (get_n_actions, get_reward_threshold, make_custom_env,\n make_env_parallel, normalize_env)\nfrom envs.env_eval_callback import EnvEvalCallback\nfrom envs.env_variables import EnvVariables\nfrom evaluation import custom_evaluate_policy\nfrom log import Log\nfrom progress_bar_manager import ProgressBarManager\nfrom training.custom_dqn import CustomDQN\nfrom training.custom_sac import CustomSAC\nfrom utilities import (HOME, PREFIX_DIR_MODELS_SAVE, LinearNormalActionNoise,\n linear_schedule)\n\nif multiprocessing.cpu_count() <= 4:\n n_cpu_tf_sess = multiprocessing.cpu_count() // 2\nelse:\n n_cpu_tf_sess = multiprocessing.cpu_count()\n\n\ndef filter_tf_version_warnings():\n # https://stackoverflow.com/questions/40426502/is-there-a-way-to-suppress-the-messages-tensorflow-prints/40426709\n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\" # or any {'0', '1', '2'}\n # https://stackoverflow.com/questions/15777951/how-to-suppress-pandas-future-warning\n warnings.simplefilter(action=\"ignore\", category=FutureWarning)\n warnings.simplefilter(action=\"ignore\", category=Warning)\n tf.autograph.set_verbosity(0)\n\n\ndef get_value_given_key(filename, key) -> str:\n with open(filename, newline=\"\") as csvfile:\n csv_reader = csv.DictReader(csvfile)\n for row in csv_reader:\n last_row = row\n if key in last_row:\n return last_row[key]\n return None\n\n\ndef load_hyperparams(algo_name=None, env_name=None, model_suffix=None):\n # Load hyperparameters from yaml file\n abs_hyperparams_dir = os.path.abspath(HOME + \"/hyperparams\")\n filename = (\n abs_hyperparams_dir + \"/{}.yml\".format(algo_name)\n if not model_suffix\n else abs_hyperparams_dir + \"/{}_{}.yml\".format(algo_name, model_suffix)\n )\n with open(filename, \"r\") as f:\n hyperparams_dict = yaml.safe_load(f)\n if env_name in list(hyperparams_dict.keys()):\n return hyperparams_dict[env_name]\n else:\n if model_suffix:\n raise ValueError(\"Hyperparameters not found for {}_{}-{}\".format(algo_name, model_suffix, env_name))\n else:\n raise ValueError(\"Hyperparameters not found for {}-{}\".format(algo_name, env_name))\n\n\ndef _parse_normalize(dictionary):\n normalize_kwargs = {}\n if \"normalize\" in dictionary.keys():\n normalize = dictionary[\"normalize\"]\n if isinstance(normalize, str):\n normalize_kwargs = eval(normalize)\n del dictionary[\"normalize\"]\n\n return normalize_kwargs\n\n\nDEFAULT_N_EVAL_EPISODES = 0\n\n\nclass Agent(AbstractAgent):\n def __init__(\n self,\n algo_name: str = \"ppo2\",\n env_name: str = \"CartPole-v1\",\n log_to_tensorboard: bool = False,\n tb_log_name: str = \"ppo2\",\n train_total_timesteps: int = None,\n n_eval_episodes: int = DEFAULT_N_EVAL_EPISODES,\n render: bool = False,\n num_envs: int = 1,\n model_to_load: str = None,\n continue_learning: bool = False,\n continue_learning_suffix: str = \"continue_learning\",\n discrete_action_space: bool = False,\n eval_callback: bool = False,\n env_variables: EnvVariables = None,\n env_eval_callback: EnvEvalCallback = None,\n show_progress_bar: bool = False,\n log_every: int = 1000,\n save_replay_buffer: bool = True,\n save_model: bool = True,\n algo_hyperparams: str = None,\n sb_version: str = \"sb2\",\n model_suffix: str = None,\n ):\n\n self.algo_name = algo_name\n self.env_name = env_name\n self.log_to_tensorboard = log_to_tensorboard\n self.tb_log_name = tb_log_name\n self.train_total_timesteps = train_total_timesteps\n self.n_eval_episodes = n_eval_episodes\n self.render = render\n self.num_envs = num_envs\n self.model_to_load = model_to_load\n self.continue_learning = continue_learning\n self.continue_learning_suffix = continue_learning_suffix\n self.discrete_action_space = discrete_action_space\n self.eval_callback = eval_callback\n self.env_kwargs = env_variables\n self.env_eval_callback = env_eval_callback\n self.show_progress_bar = show_progress_bar\n self.log_every = log_every\n self.save_replay_buffer = save_replay_buffer\n self.save_model = save_model\n self.algo_hyperparams = algo_hyperparams\n self.model_suffix = model_suffix\n self.logger = Log(\"Agent\")\n assert sb_version == \"sb2\" or sb_version == \"sb3\", \"sb_version == sb2 or sb3: {}\".format(sb_version)\n self.sb_version = sb_version\n\n filter_tf_version_warnings()\n self.logger.debug(\"Instantiating agent\")\n\n if algo_name == \"sac\":\n assert not discrete_action_space, \"discrete_action_space not supported in sac\"\n elif algo_name == \"dqn\":\n assert discrete_action_space, \"continues_action_space not supported in dqn\"\n elif algo_name == \"ppo2\":\n self.logger.warn(\"PPO with {} action space\".format(\"continuous\" if not discrete_action_space else \"discrete\"))\n\n def _preprocess_hyperparams(self, _hyperparams):\n # Convert to python object if needed\n if \"policy_kwargs\" in _hyperparams.keys() and isinstance(_hyperparams[\"policy_kwargs\"], str):\n _hyperparams[\"policy_kwargs\"] = eval(_hyperparams[\"policy_kwargs\"])\n\n n_timesteps = _hyperparams.pop(\"n_timesteps\", None)\n n_envs = _hyperparams.pop(\"n_envs\", None)\n log_every = _hyperparams.pop(\"log_every\", None)\n if not self.continue_learning:\n if not log_every:\n self.logger.debug(\"log_every not defined in yml file: using command line log_every {}\".format(self.log_every))\n log_every = self.log_every\n else:\n self.logger.debug(\"using log_every as defined in yml file: {}\".format(log_every))\n else:\n self.logger.debug(\"priority to command line log_every {}\".format(self.log_every))\n log_every = self.log_every\n\n # Parse noise string\n if self.algo_name in [\"ddpg\", \"sac\", \"td3\"] and _hyperparams.get(\"noise_type\") is not None:\n noise_type = _hyperparams[\"noise_type\"].strip()\n noise_std = _hyperparams[\"noise_std\"]\n n_actions = get_n_actions(env_name=self.env_name, env_variables=self.env_kwargs)\n self.logger.debug(\"n_actions: {}\".format(n_actions))\n if \"adaptive-param\" in noise_type:\n assert self.algo_name == \"ddpg\", \"Parameter is not supported by SAC\"\n _hyperparams[\"param_noise\"] = AdaptiveParamNoiseSpec(initial_stddev=noise_std, desired_action_stddev=noise_std)\n elif \"normal\" in noise_type:\n if \"lin\" in noise_type:\n _hyperparams[\"action_noise\"] = LinearNormalActionNoise(\n mean=np.zeros(n_actions),\n sigma=noise_std * np.ones(n_actions),\n final_sigma=_hyperparams.get(\"noise_std_final\", 0.0) * np.ones(n_actions),\n max_steps=n_timesteps,\n )\n else:\n _hyperparams[\"action_noise\"] = NormalActionNoise(\n mean=np.zeros(n_actions), sigma=noise_std * np.ones(n_actions)\n )\n elif \"ornstein-uhlenbeck\" in noise_type:\n _hyperparams[\"action_noise\"] = OrnsteinUhlenbeckActionNoise(\n mean=np.zeros(n_actions), sigma=noise_std * np.ones(n_actions)\n )\n else:\n raise RuntimeError('Unknown noise type \"{}\"'.format(noise_type))\n self.logger.debug(\"Applying {} noise with std {}\".format(noise_type, noise_std))\n del _hyperparams[\"noise_type\"]\n del _hyperparams[\"noise_std\"]\n if \"noise_std_final\" in _hyperparams:\n del _hyperparams[\"noise_std_final\"]\n\n normalize_kwargs = _parse_normalize(dictionary=_hyperparams)\n\n if n_envs is None:\n self.logger.debug(\"n_envs not defined in yml file: using command line n_envs {}\".format(self.num_envs))\n n_envs = self.num_envs\n else:\n self.logger.debug(\"using n_envs as num of envs defined in yml file:\".format(n_envs))\n\n if not self.continue_learning:\n # priority to yml defined n_timesteps\n if n_timesteps is None:\n self.logger.debug(\n \"n_timesteps not defined in yml file: using command line n_timesteps {}\".format(self.train_total_timesteps)\n )\n n_timesteps = self.train_total_timesteps\n else:\n self.logger.debug(\"using n_timesteps as total timesteps defined in yml file: {}\".format(n_timesteps))\n n_timesteps = int(n_timesteps)\n else:\n if self.train_total_timesteps and self.train_total_timesteps != -1:\n assert self.train_total_timesteps <= int(n_timesteps), \"train_total_timesteps <= n_timesteps: {}, {}\".format(\n self.train_total_timesteps, n_timesteps\n )\n # priority to command line n_timesteps\n self.logger.debug(\"priority to command line n_timesteps {}\".format(self.train_total_timesteps))\n n_timesteps = self.train_total_timesteps\n elif self.train_total_timesteps == -1:\n assert n_timesteps, \"n_timesteps should have a value: {}\".format(n_timesteps)\n n_timesteps = int(n_timesteps)\n self.logger.info(\"training in continual learning = training from scratch. n_timesteps {}\".format(n_timesteps))\n else:\n assert n_timesteps, \"n_timesteps should have a value: {}\".format(n_timesteps)\n n_timesteps = int(n_timesteps // 2)\n self.logger.debug(\n \"train_total_timesteps not specified in continue_learning: \"\n \"taking half of original n_timesteps defined in yml file {}\".format(n_timesteps)\n )\n\n assert n_timesteps % log_every == 0, \"it should be possible to divide n_timesteps for log_every: {}, {}\".format(\n n_timesteps, log_every\n )\n return normalize_kwargs, n_envs, n_timesteps, log_every, _hyperparams\n\n def _preprocess_storage_dirs(self):\n if self.model_suffix:\n best_model_save_path = (\n PREFIX_DIR_MODELS_SAVE + \"/\" + self.algo_name + \"/logs_\" + self.tb_log_name + \"_\" + self.model_suffix\n )\n else:\n best_model_save_path = PREFIX_DIR_MODELS_SAVE + \"/\" + self.algo_name + \"/logs_\" + self.tb_log_name\n if self.log_to_tensorboard:\n tensorboard_log_dir = PREFIX_DIR_MODELS_SAVE + \"/\" + self.algo_name + \"/logs_\" + self.tb_log_name\n else:\n tensorboard_log_dir = None\n return best_model_save_path, tensorboard_log_dir\n\n def _set_global_seed(self, seed):\n if self.sb_version == \"sb3\":\n set_random_seed(seed)\n else:\n set_global_seeds(seed)\n\n # TODO: could be optimized when used in search (some variables can be passed instead of being created from\n # scratch)\n def train(\n self,\n seed: int,\n communication_queue: Queue = None,\n current_iteration: int = -1,\n search_suffix: str = \"1\",\n env_variables: EnvVariables = None,\n random_search: bool = False,\n ):\n\n self._set_global_seed(seed=seed)\n\n env_kwargs_to_set = env_variables if env_variables else self.env_kwargs\n self.logger.debug(\"env_variables: {}\".format(env_kwargs_to_set.get_params_string()))\n\n reward_threshold = get_reward_threshold(env_name=self.env_name)\n\n best_model_save_path, tensorboard_log_dir = self._preprocess_storage_dirs()\n\n if current_iteration != -1 and not self.continue_learning:\n best_model_save_path = best_model_save_path + \"_\" + str(current_iteration)\n\n self.logger.debug(\"best_model_save_path: {}\".format(best_model_save_path))\n\n if communication_queue or search_suffix != \"1\":\n continue_learning_suffix = self.continue_learning_suffix + \"_\" + search_suffix\n else:\n continue_learning_suffix = self.continue_learning_suffix\n\n os.environ[\"OPENAI_LOG_FORMAT\"] = \"log,csv\"\n if self.continue_learning:\n os.environ[\"OPENAI_LOGDIR\"] = best_model_save_path + \"_\" + continue_learning_suffix\n else:\n os.environ[\"OPENAI_LOGDIR\"] = best_model_save_path\n configure()\n\n if self.algo_hyperparams:\n self.logger.debug(\"Overriding file specified hyperparams with {}\".format(eval(self.algo_hyperparams)))\n hyperparams = eval(self.algo_hyperparams)\n else:\n hyperparams = load_hyperparams(algo_name=self.algo_name, env_name=self.env_name, model_suffix=self.model_suffix)\n\n (normalize_kwargs, n_envs, n_timesteps, log_every, hyperparams,) = self._preprocess_hyperparams(\n _hyperparams=hyperparams\n )\n\n if n_envs > 1 and self.algo_name == \"ppo2\":\n # On most env, SubprocVecEnv does not help and is quite memory hungry\n env = DummyVecEnv(\n [\n make_env_parallel(\n sb_version=self.sb_version,\n seed=seed,\n rank=i,\n env_name=self.env_name,\n continue_learning=self.continue_learning,\n log_dir=best_model_save_path,\n env_kwargs=env_kwargs_to_set,\n algo_name=self.algo_name,\n continue_learning_suffix=continue_learning_suffix,\n )\n for i in range(n_envs)\n ]\n )\n if len(normalize_kwargs) > 0:\n env = normalize_env(\n env=env,\n vectorize=False,\n orig_log_dir=best_model_save_path,\n continue_learning=self.continue_learning,\n sb_version=self.sb_version,\n normalize_kwargs=normalize_kwargs,\n )\n else:\n env = make_custom_env(\n seed=seed,\n sb_version=self.sb_version,\n env_kwargs=env_kwargs_to_set,\n normalize_kwargs=normalize_kwargs,\n continue_learning=self.continue_learning,\n log_dir=best_model_save_path,\n env_name=self.env_name,\n algo_name=self.algo_name,\n continue_learning_suffix=continue_learning_suffix,\n )\n\n if self.n_eval_episodes > DEFAULT_N_EVAL_EPISODES:\n analysis_callback = self.build_callback(\n algo_name=self.algo_name,\n continue_learning=self.continue_learning,\n call_every=log_every,\n eval_callback=self.eval_callback,\n _reward_threshold=reward_threshold,\n eval_episodes=self.n_eval_episodes,\n _eval_env=make_custom_env(\n seed=seed,\n continue_learning=self.continue_learning,\n sb_version=self.sb_version,\n env_kwargs=env_kwargs_to_set,\n env_name=self.env_name,\n log_dir=best_model_save_path,\n algo_name=self.algo_name,\n normalize_kwargs=normalize_kwargs,\n evaluate=True,\n evaluate_during_learning=True,\n continue_learning_suffix=continue_learning_suffix,\n ),\n original_env=make_custom_env(\n seed=seed,\n continue_learning=self.continue_learning,\n sb_version=self.sb_version,\n env_kwargs=self.env_kwargs,\n env_name=self.env_name,\n log_dir=best_model_save_path,\n algo_name=self.algo_name,\n normalize_kwargs=normalize_kwargs,\n evaluate=True,\n evaluate_during_learning=True,\n ),\n env_name=self.env_name,\n _best_model_save_path=best_model_save_path,\n num_envs=n_envs,\n total_timesteps=n_timesteps,\n continue_learning_suffix=continue_learning_suffix,\n communication_queue=communication_queue,\n env_eval_callback=self.env_eval_callback,\n save_replay_buffer=self.save_replay_buffer,\n save_model=self.save_model,\n random_search=random_search,\n )\n else:\n analysis_callback = self.build_callback(\n algo_name=self.algo_name,\n continue_learning=self.continue_learning,\n call_every=log_every,\n eval_callback=self.eval_callback,\n _reward_threshold=reward_threshold,\n eval_episodes=self.n_eval_episodes,\n env_name=self.env_name,\n _best_model_save_path=best_model_save_path,\n num_envs=n_envs,\n continue_learning_suffix=continue_learning_suffix,\n save_replay_buffer=self.save_replay_buffer,\n save_model=self.save_model,\n random_search=random_search,\n )\n\n if self.continue_learning:\n model = self.create_model(\n seed=seed,\n algo_name=self.algo_name,\n env=env,\n tensorboard_log_dir=tensorboard_log_dir,\n hyperparams=hyperparams,\n best_model_save_path=best_model_save_path,\n n_timesteps=n_timesteps,\n continue_learning=True,\n env_name=self.env_name,\n model_to_load=self.model_to_load,\n save_replay_buffer=self.save_replay_buffer,\n )\n else:\n model = self.create_model(\n seed=seed,\n algo_name=self.algo_name,\n env=env,\n tensorboard_log_dir=tensorboard_log_dir,\n hyperparams=hyperparams,\n env_name=self.env_name,\n n_timesteps=n_timesteps,\n model_to_load=self.model_to_load,\n save_replay_buffer=self.save_replay_buffer,\n )\n\n try:\n callback_list = [analysis_callback]\n\n # if len(normalize_kwargs) > 0 and not self.continue_learning:\n # callback_list = [self._build_vec_normalize_callback(save_path=best_model_save_path,\n # log_every=log_every), analysis_callback]\n\n if self.show_progress_bar:\n with ProgressBarManager(total_timesteps=n_timesteps, sb_version=self.sb_version) as progress_callback:\n callback_list.append(progress_callback)\n if self.continue_learning and self.log_to_tensorboard:\n model.learn(\n total_timesteps=n_timesteps,\n callback=callback_list,\n tb_log_name=self.tb_log_name + \"_\" + continue_learning_suffix,\n )\n else:\n model.learn(\n total_timesteps=n_timesteps, callback=callback_list, tb_log_name=self.tb_log_name,\n )\n\n else:\n if self.continue_learning and self.log_to_tensorboard:\n model.learn(\n total_timesteps=n_timesteps,\n callback=callback_list,\n tb_log_name=self.tb_log_name + \"_\" + continue_learning_suffix,\n )\n else:\n self.logger.debug(\"Model learn start...\")\n model.learn(\n total_timesteps=n_timesteps, callback=callback_list, tb_log_name=self.tb_log_name,\n )\n self.logger.debug(\"Model learn end\")\n except KeyboardInterrupt:\n pass\n finally:\n if len(normalize_kwargs) > 0 and not self.continue_learning:\n # Important: save the running average, for testing the agent we need that normalization\n model.get_vec_normalize_env().save(os.path.join(best_model_save_path, \"vecnormalize.pkl\"))\n\n # Release resources\n env.close()\n\n def _build_vec_normalize_callback(self, save_path: str, log_every: int):\n\n if self.sb_version == \"sb2\":\n return SaveVecNormalizeCallback(log_every=log_every, save_path=save_path)\n return SaveVecNormalizeCallbackSb3(log_every=log_every, save_path=save_path)\n\n def test_with_callback(self, seed, env_variables: EnvVariables, n_eval_episodes: int = None) -> EnvPredicatePair:\n\n assert self.env_eval_callback, \"env_eval_callback should be instantiated\"\n\n self._set_global_seed(seed=seed)\n\n self.logger.debug(\"env_variables: {}\".format(env_variables.get_params_string()))\n\n best_model_save_path, tensorboard_log_dir = self._preprocess_storage_dirs()\n\n if self.algo_hyperparams:\n self.logger.debug(\"Overriding file specified hyperparams with {}\".format(eval(self.algo_hyperparams)))\n hyperparams = eval(self.algo_hyperparams)\n else:\n hyperparams = load_hyperparams(algo_name=self.algo_name, env_name=self.env_name)\n\n normalize_kwargs = _parse_normalize(dictionary=hyperparams)\n\n eval_env = make_custom_env(\n seed=seed,\n sb_version=self.sb_version,\n env_kwargs=env_variables,\n algo_name=self.algo_name,\n env_name=self.env_name,\n normalize_kwargs=normalize_kwargs,\n log_dir=best_model_save_path,\n evaluate=True,\n continue_learning_suffix=self.continue_learning_suffix,\n )\n model = self.create_model(\n seed=seed,\n algo_name=self.algo_name,\n env=eval_env,\n tensorboard_log_dir=tensorboard_log_dir,\n hyperparams=hyperparams,\n best_model_save_path=best_model_save_path,\n model_to_load=self.model_to_load,\n env_name=self.env_name,\n )\n\n n_eval_episodes_to_run = n_eval_episodes if n_eval_episodes else self.n_eval_episodes\n adequate_performance, info = self.env_eval_callback.evaluate_env(\n model=model, env=eval_env, n_eval_episodes=n_eval_episodes_to_run, sb_version=self.sb_version,\n )\n return EnvPredicatePair(env_variables=env_variables, predicate=adequate_performance, execution_info=info,)\n\n def test_without_callback(self, seed, n_eval_episodes: int = 0, model_path: str = None) -> Tuple[float, float]:\n assert n_eval_episodes > 0 or self.n_eval_episodes > 0, \"n_eval_episodes > 0: {}, {}\".format(\n n_eval_episodes, self.n_eval_episodes\n )\n\n self._set_global_seed(seed=seed)\n\n if n_eval_episodes == 0:\n n_eval_episodes = self.n_eval_episodes\n\n best_model_save_path, tensorboard_log_dir = self._preprocess_storage_dirs()\n if model_path:\n best_model_save_path = model_path\n\n if self.algo_hyperparams:\n self.logger.debug(\"Overriding file specified hyperparams with {}\".format(eval(self.algo_hyperparams)))\n hyperparams = eval(self.algo_hyperparams)\n else:\n hyperparams = load_hyperparams(algo_name=self.algo_name, env_name=self.env_name)\n\n normalize_kwargs = _parse_normalize(dictionary=hyperparams)\n\n eval_env = make_custom_env(\n seed=seed,\n sb_version=self.sb_version,\n env_kwargs=self.env_kwargs,\n algo_name=self.algo_name,\n env_name=self.env_name,\n log_dir=best_model_save_path,\n normalize_kwargs=normalize_kwargs,\n evaluate=True,\n continue_learning_suffix=self.continue_learning_suffix,\n )\n model = self.create_model(\n seed=seed,\n algo_name=self.algo_name,\n env=eval_env,\n tensorboard_log_dir=tensorboard_log_dir,\n hyperparams=hyperparams,\n best_model_save_path=best_model_save_path,\n model_to_load=self.model_to_load,\n env_name=self.env_name,\n )\n\n mean_reward, std_reward = custom_evaluate_policy(\n model, eval_env, n_eval_episodes=n_eval_episodes, render=self.render, deterministic=True,\n )\n\n # release resources\n eval_env.close()\n\n return mean_reward, std_reward\n\n def test(self, seed, continue_learning_suffix: str = None, env_variables: EnvVariables = None):\n\n assert self.n_eval_episodes > 0, \"n_eval_episodes > 0: {}\".format(self.n_eval_episodes)\n\n self._set_global_seed(seed=seed)\n\n env_kwargs_to_set = env_variables if env_variables else self.env_kwargs\n self.logger.debug(\"env_variables: {}\".format(env_kwargs_to_set.get_params_string()))\n\n best_model_save_path, tensorboard_log_dir = self._preprocess_storage_dirs()\n\n if self.algo_hyperparams:\n self.logger.debug(\"Overriding file specified hyperparams with {}\".format(eval(self.algo_hyperparams)))\n hyperparams = eval(self.algo_hyperparams)\n else:\n hyperparams = load_hyperparams(algo_name=self.algo_name, env_name=self.env_name)\n\n normalize_kwargs = _parse_normalize(dictionary=hyperparams)\n\n if self.continue_learning and not continue_learning_suffix:\n best_model_save_path = best_model_save_path + \"_\" + self.continue_learning_suffix + \"/\"\n elif self.continue_learning and continue_learning_suffix:\n best_model_save_path = best_model_save_path + \"_\" + continue_learning_suffix + \"/\"\n\n eval_env = make_custom_env(\n seed=seed,\n sb_version=self.sb_version,\n env_kwargs=env_kwargs_to_set,\n algo_name=self.algo_name,\n env_name=self.env_name,\n log_dir=best_model_save_path,\n normalize_kwargs=normalize_kwargs,\n evaluate=True,\n continue_learning_suffix=self.continue_learning_suffix,\n )\n model = self.create_model(\n seed=seed,\n algo_name=self.algo_name,\n env=eval_env,\n tensorboard_log_dir=tensorboard_log_dir,\n hyperparams=hyperparams,\n best_model_save_path=best_model_save_path,\n model_to_load=self.model_to_load,\n env_name=self.env_name,\n )\n\n if self.show_progress_bar:\n with ProgressBarManager(total_timesteps=self.n_eval_episodes, sb_version=self.sb_version) as progress_callback:\n mean_reward, std_reward = custom_evaluate_policy(\n model,\n eval_env,\n n_eval_episodes=self.n_eval_episodes,\n render=self.render,\n callback=progress_callback,\n deterministic=True,\n )\n else:\n mean_reward, std_reward = custom_evaluate_policy(\n model, eval_env, n_eval_episodes=self.n_eval_episodes, render=self.render, deterministic=True,\n )\n\n self.logger.debug(f\"mean_reward:{mean_reward:.2f} +/- {std_reward:.2f}\")\n\n # release resources\n eval_env.close()\n\n def load_model(self, directory, model_name=None):\n abs_dir_models = os.path.abspath(directory)\n if model_name:\n model_to_load = os.path.join(abs_dir_models, model_name + \".zip\")\n self.logger.debug(\"Searching model file {}\".format(model_to_load))\n else:\n self.logger.debug(\"Searching model file in directory {}\".format(abs_dir_models))\n model_files = glob.glob(abs_dir_models + \"/*.zip\")\n model_to_load = max(model_files, key=os.path.getmtime)\n if not os.path.exists(model_to_load):\n raise FileNotFoundError(\"File \" + model_to_load + \" not found\")\n self.logger.debug(\"Loading model file {}\".format(model_to_load))\n return model_to_load\n\n def create_model(\n self,\n seed,\n algo_name,\n env,\n tensorboard_log_dir,\n hyperparams,\n best_model_save_path=None,\n model_to_load=None,\n continue_learning=False,\n env_name=\"CartPole-v1\",\n n_timesteps=-1,\n save_replay_buffer: bool = True,\n ):\n\n old_hyperparams = dict()\n\n # Create learning rate schedules for ppo2 and sac\n if algo_name in [\"ppo2\", \"sac\", \"td3\"]:\n for key in [\"learning_rate\", \"cliprange\", \"cliprange_vf\"]:\n if key not in hyperparams:\n continue\n if isinstance(hyperparams[key], str):\n self.logger.debug(\"Key {}, value {}\".format(key, hyperparams[key]))\n old_hyperparams[key] = hyperparams[key]\n schedule, initial_value = hyperparams[key].split(\"_\")\n initial_value = float(initial_value)\n hyperparams[key] = linear_schedule(initial_value)\n elif isinstance(hyperparams[key], (float, int)):\n # Negative value: ignore (ex: for clipping)\n if hyperparams[key] < 0:\n continue\n old_hyperparams[key] = float(hyperparams[key])\n hyperparams[key] = constfn(float(hyperparams[key]))\n else:\n raise ValueError(\"Invalid value for {}: {}\".format(key, hyperparams[key]))\n\n if algo_name == \"ppo2\":\n\n if self.sb_version == \"sb3\":\n raise NotImplementedError(\"PPO still in sb2\")\n\n if best_model_save_path and continue_learning:\n model = PPO2.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n )\n key = \"cliprange\"\n cl_cliprange_value = 0.08 # new policy can be a bit different than the old one\n if key in old_hyperparams:\n if isinstance(old_hyperparams[key], str):\n self.logger.debug(\"Setting cliprange to lin_{}\".format(cl_cliprange_value))\n model.cliprange = linear_schedule(cl_cliprange_value)\n elif isinstance(old_hyperparams[key], (float, int)):\n self.logger.debug(\"Setting cliprange to value {}\".format(cl_cliprange_value))\n model.cliprange = constfn(cl_cliprange_value)\n else:\n # default value is too high for continual learning (0.2)\n self.logger.debug(\"Setting cliprange to value {}\".format(cl_cliprange_value))\n model.cliprange = cl_cliprange_value\n\n return model\n elif best_model_save_path:\n return PPO2.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n n_cpu_tf_sess=n_cpu_tf_sess,\n )\n return PPO2(env=env, verbose=1, tensorboard_log=tensorboard_log_dir, **hyperparams, n_cpu_tf_sess=n_cpu_tf_sess,)\n\n elif algo_name == \"sac\":\n if self.sb_version == \"sb3\":\n if best_model_save_path and continue_learning:\n model = stable_baselines3.SAC.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n seed=seed,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n )\n model.load_replay_buffer(path=best_model_save_path + \"/replay_buffer\")\n self.logger.debug(\"Model replay buffer size: {}\".format(model.replay_buffer.size()))\n self.logger.debug(\"Setting learning_starts to 0\")\n model.learning_starts = 0\n\n value = get_value_given_key(best_model_save_path + \"/progress.csv\", \"ent_coef\")\n if value:\n ent_coef = float(value)\n self.logger.debug(\"Restore model old ent_coef: {}\".format(\"auto_\" + str(ent_coef)))\n model.ent_coef = \"auto_\" + str(ent_coef)\n model.target_entropy = str(ent_coef)\n\n return model\n elif best_model_save_path:\n return stable_baselines3.SAC.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n seed=seed,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n n_cpu_tf_sess=n_cpu_tf_sess,\n )\n assert n_timesteps > 0, \"n_timesteps > 0: {}\".format(n_timesteps)\n return stable_baselines3.SAC(env=env, verbose=0, seed=seed, tensorboard_log=tensorboard_log_dir, **hyperparams)\n\n else:\n if best_model_save_path and continue_learning:\n model = CustomSAC.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n )\n self.logger.debug(\"Model replay buffer size: {}\".format(len(model.replay_buffer)))\n self.logger.debug(\"Setting learning_starts to 0\")\n model.learning_starts = 0\n if not save_replay_buffer:\n self.logger.debug(\"Setting save_replay_buffer to False\")\n model.save_replay_buffer = False\n\n value = get_value_given_key(best_model_save_path + \"/progress.csv\", \"ent_coef\")\n if value:\n ent_coef = float(value)\n self.logger.debug(\"Restore model old ent_coef: {}\".format(\"auto_\" + str(ent_coef)))\n model.ent_coef = \"auto_\" + str(ent_coef)\n model.target_entropy = str(ent_coef)\n\n return model\n\n elif best_model_save_path:\n # do not load replay buffer since we are in testing mode (no continue_learning)\n return SAC.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n n_cpu_tf_sess=n_cpu_tf_sess,\n )\n return CustomSAC(\n total_timesteps=n_timesteps,\n env=env,\n verbose=1,\n tensorboard_log=tensorboard_log_dir,\n **hyperparams,\n n_cpu_tf_sess=n_cpu_tf_sess,\n save_replay_buffer=save_replay_buffer,\n )\n\n elif algo_name == \"dqn\":\n\n if self.sb_version == \"sb3\":\n\n if best_model_save_path:\n if continue_learning:\n model = stable_baselines3.DQN.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n seed=seed,\n tensorboard_log=tensorboard_log_dir,\n verbose=0,\n )\n model.load_replay_buffer(path=best_model_save_path + \"/replay_buffer\")\n model.learning_starts = 0\n model.exploration_fraction = 0.0005\n model.exploration_initial_eps = model.exploration_final_eps\n model.exploration_schedule = get_linear_fn(\n model.exploration_initial_eps, model.exploration_final_eps, model.exploration_fraction\n )\n self.logger.debug(\"Model replay buffer size: {}\".format(model.replay_buffer.size()))\n self.logger.debug(\"Setting learning_starts to {}\".format(model.learning_starts))\n self.logger.debug(\"Setting exploration_fraction to {}\".format(model.exploration_fraction))\n self.logger.debug(\"Setting exploration_initial_eps to {}\".format(model.exploration_initial_eps))\n return model\n return stable_baselines3.DQN.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n seed=seed,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n )\n return stable_baselines3.DQN(env=env, verbose=0, seed=seed, tensorboard_log=tensorboard_log_dir, **hyperparams)\n else:\n if best_model_save_path:\n if continue_learning:\n model = CustomDQN.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n )\n self.logger.debug(\"Model replay buffer size: {}\".format(len(model.replay_buffer)))\n self.logger.debug(\n \"Setting exploration initial eps to exploration final eps {}\".format(model.exploration_final_eps)\n )\n self.logger.debug(\"Setting learning_starts to 0\")\n if not save_replay_buffer:\n self.logger.debug(\"Setting save_replay_buffer to False\")\n model.save_replay_buffer = False\n model.learning_starts = 0\n model.exploration_fraction = 0.005\n model.exploration_initial_eps = model.exploration_final_eps\n return model\n return DQN.load(\n self.load_model(best_model_save_path, model_to_load),\n env=env,\n tensorboard_log=tensorboard_log_dir,\n verbose=1,\n n_cpu_tf_sess=n_cpu_tf_sess,\n )\n return CustomDQN(\n env=env,\n save_replay_buffer=save_replay_buffer,\n verbose=1,\n tensorboard_log=tensorboard_log_dir,\n **hyperparams,\n n_cpu_tf_sess=n_cpu_tf_sess,\n )\n raise NotImplementedError(\"algo_name {} not supported yet\".format(algo_name))\n\n def build_checkpoint_callback(self, save_freq=10000, save_path=None):\n self.logger.debug(\"Checkpoint callback called every {} timesteps\".format(save_freq))\n return CheckpointCallback(save_freq=save_freq, save_path=save_path)\n\n def build_logging_training_metrics_callback(\n self,\n algo_name=\"ppo2\",\n env_name=None,\n log_every=1000,\n save_path=None,\n num_envs=1,\n _eval_env=None,\n original_env=None,\n total_timesteps=0,\n n_eval_episodes=10,\n communication_queue=None,\n env_eval_callback=None,\n continue_learning=False,\n save_replay_buffer=True,\n save_model=True,\n random_search=False,\n ):\n self.logger.debug(\"Logging training metrics callback called every {}\".format(log_every))\n if self.sb_version == \"sb3\":\n return LoggingTrainingMetricsCallbackSb3(\n log_every=log_every,\n log_dir=save_path,\n num_envs=num_envs,\n env_name=env_name,\n total_timesteps=total_timesteps,\n eval_env=_eval_env,\n original_env=original_env,\n n_eval_episodes=n_eval_episodes,\n communication_queue=communication_queue,\n env_eval_callback=env_eval_callback,\n continue_learning=continue_learning,\n save_replay_buffer=save_replay_buffer,\n save_model=save_model,\n random_search=random_search,\n )\n return LoggingTrainingMetricsCallback(\n log_every=log_every,\n log_dir=save_path,\n num_envs=num_envs,\n env_name=env_name,\n total_timesteps=total_timesteps,\n eval_env=_eval_env,\n original_env=original_env,\n n_eval_episodes=n_eval_episodes,\n communication_queue=communication_queue,\n env_eval_callback=env_eval_callback,\n continue_learning=continue_learning,\n save_model=save_model,\n random_search=random_search,\n )\n\n def build_eval_callback(\n self, eval_freq=10000, reward_threshold=900, log_path=None, eval_episodes=10, eval_env=None,\n ):\n callback_on_best = StopTrainingOnRewardThreshold(reward_threshold=reward_threshold, verbose=1)\n eval_callback = EvalCallback(\n eval_env=eval_env,\n best_model_save_path=log_path,\n log_path=log_path,\n eval_freq=eval_freq,\n deterministic=True,\n render=False,\n n_eval_episodes=eval_episodes,\n callback_on_new_best=callback_on_best,\n verbose=1,\n )\n self.logger.debug(\n \"Eval callback called every {} timesteps: stop training when mean reward is above {} in {} episodes\".format(\n eval_freq, reward_threshold, eval_episodes\n )\n )\n return eval_callback\n\n def build_callback(\n self,\n algo_name=\"ppo2\",\n continue_learning=False,\n call_every=1000,\n eval_callback=False,\n _reward_threshold=900,\n eval_episodes=10,\n _eval_env=None,\n original_env=None,\n _best_model_save_path=None,\n num_envs=1,\n env_name=None,\n continue_learning_suffix=\"continue_learning\",\n communication_queue=None,\n env_eval_callback=None,\n total_timesteps=0,\n save_replay_buffer=True,\n save_model=True,\n random_search=False,\n ):\n if continue_learning:\n save_path = _best_model_save_path + \"_\" + continue_learning_suffix + \"/\"\n else:\n save_path = _best_model_save_path\n\n if eval_callback:\n return self.build_eval_callback(\n eval_env=_eval_env,\n eval_freq=call_every,\n reward_threshold=_reward_threshold,\n log_path=save_path,\n eval_episodes=eval_episodes,\n )\n else:\n if _eval_env:\n return self.build_logging_training_metrics_callback(\n algo_name=algo_name,\n env_name=env_name,\n log_every=call_every,\n save_path=save_path,\n num_envs=num_envs,\n _eval_env=_eval_env,\n original_env=original_env,\n n_eval_episodes=eval_episodes,\n communication_queue=communication_queue,\n env_eval_callback=env_eval_callback,\n total_timesteps=total_timesteps,\n continue_learning=continue_learning,\n save_replay_buffer=save_replay_buffer,\n save_model=save_model,\n random_search=random_search,\n )\n return self.build_logging_training_metrics_callback(\n algo_name=algo_name,\n env_name=env_name,\n log_every=call_every,\n save_path=save_path,\n num_envs=num_envs,\n save_replay_buffer=save_replay_buffer,\n save_model=save_model,\n )\n" ]
[ [ "tensorflow.autograph.set_verbosity", "numpy.ones", "numpy.zeros", "tensorflow.get_logger" ] ]
itrharrison/skypy-itrharrison
[ "cea1f02d1b2cd3b689266d7ae9bca1a4cfe986a2" ]
[ "skypy/galaxies/_schechter.py" ]
[ "'''Implementation of Schechter LF and SMF.'''\n\nimport numpy as np\n\nfrom .redshift import schechter_lf_redshift, schechter_smf_redshift\nfrom .stellar_mass import schechter_smf_mass\nfrom .luminosity import schechter_lf_magnitude\nfrom astropy import units\n\n__all__ = [\n 'schechter_lf',\n 'schechter_smf',\n]\n\n\[email protected]_input(sky_area=units.sr)\ndef schechter_lf(redshift, M_star, phi_star, alpha, m_lim, sky_area, cosmology, noise=True):\n r'''Sample redshifts and magnitudes from a Schechter luminosity function.\n\n Sample the redshifts and magnitudes of galaxies following a Schechter\n luminosity function with potentially redshift-dependent parameters, limited\n by an apparent magnitude `m_lim`, for a sky area `sky_area`.\n\n Parameters\n ----------\n redshift : array_like\n Input redshift grid on which the Schechter function parameters are\n evaluated. Galaxies are sampled over this redshift range.\n M_star : array_like or function\n Characteristic absolute magnitude of the Schechter function. Can be a\n single value, an array of values for each `redshift`, or a function of\n redshift.\n phi_star : array_like or function\n Normalisation of the Schechter function. Can be a single value, an\n array of values for each `redshift`, or a function of redshift.\n alpha : array_like or function\n Schechter function power law index. Can be a single value, an array of\n values for each `redshift`, or a function of redshift.\n m_lim : float\n Limiting apparent magnitude.\n sky_area : `~astropy.units.Quantity`\n Sky area over which galaxies are sampled. Must be in units of solid angle.\n cosmology : Cosmology\n Cosmology object to convert apparent to absolute magnitudes.\n noise : bool, optional\n Poisson-sample the number of galaxies. Default is `True`.\n\n Notes\n -----\n\n Effectively calls `~skypy.galaxies.redshift.schechter_lf_redshift` and\n `~skypy.galaxies.luminosity.schechter_lf_magnitude` internally and returns\n the tuple of results.\n\n Returns\n -------\n redshifts, magnitudes : tuple of array_like\n Redshifts and magnitudes of the galaxy sample described by the Schechter\n luminosity function.\n\n '''\n\n # sample galaxy redshifts\n z = schechter_lf_redshift(redshift, M_star, phi_star, alpha, m_lim, sky_area, cosmology, noise)\n\n # if a function is NOT given for M_star, phi_star, alpha, interpolate to z\n if not callable(M_star) and np.ndim(M_star) > 0:\n M_star = np.interp(z, redshift, M_star)\n if not callable(phi_star) and np.ndim(phi_star) > 0:\n phi_star = np.interp(z, redshift, phi_star)\n if not callable(alpha) and np.ndim(alpha) > 0:\n alpha = np.interp(z, redshift, alpha)\n\n # sample galaxy magnitudes for redshifts\n M = schechter_lf_magnitude(z, M_star, alpha, m_lim, cosmology)\n\n return z, M\n\n\[email protected]_input(sky_area=units.sr)\ndef schechter_smf(redshift, m_star, phi_star, alpha, m_min, m_max, sky_area, cosmology, noise=True):\n r'''Sample redshifts and stellar masses from a Schechter mass function.\n\n Sample the redshifts and stellar masses of galaxies following a Schechter\n mass function with potentially redshift-dependent parameters, limited\n by maximum and minimum masses `m_min`, `m_max`, for a sky area `sky_area`.\n\n Parameters\n ----------\n redshift : array_like\n Input redshift grid on which the Schechter function parameters are\n evaluated. Galaxies are sampled over this redshift range.\n m_star : array_like or function\n Characteristic mass of the Schechter function. Can be a\n single value, an array of values for each `redshift`, or a function of\n redshift.\n phi_star : array_like or function\n Normalisation of the Schechter function. Can be a single value, an\n array of values for each `redshift`, or a function of redshift.\n alpha : array_like or function\n Schechter function power law index. Can be a single value, an array of\n values for each `redshift`, or a function of redshift.\n m_min, m_max : float\n Lower and upper bounds for the stellar mass.\n sky_area : `~astropy.units.Quantity`\n Sky area over which galaxies are sampled. Must be in units of solid angle.\n cosmology : Cosmology\n Cosmology object to calculate comoving densities.\n noise : bool, optional\n Poisson-sample the number of galaxies. Default is `True`.\n\n Notes\n -----\n\n Effectively calls `~skypy.galaxies.redshift.schechter_smf_redshift` and\n `~skypy.galaxies.stellar_mass.schechter_smf_mass` internally and returns\n the tuple of results.\n\n Returns\n -------\n redshifts, stellar masses : tuple of array_like\n Redshifts and stellar masses of the galaxy sample described by the Schechter\n stellar mass function.\n\n '''\n\n # sample halo redshifts\n z = schechter_smf_redshift(redshift, m_star, phi_star, alpha, m_min, m_max,\n sky_area, cosmology, noise)\n\n # if a function is NOT given for M_star, phi_star, alpha, interpolate to z\n if not callable(m_star) and np.ndim(m_star) > 0:\n m_star = np.interp(z, redshift, m_star)\n if not callable(phi_star) and np.ndim(phi_star) > 0:\n phi_star = np.interp(z, redshift, phi_star)\n if not callable(alpha) and np.ndim(alpha) > 0:\n alpha = np.interp(z, redshift, alpha)\n\n # sample galaxy mass for redshifts\n m = schechter_smf_mass(z, alpha, m_star, m_min, m_max)\n\n return z, m\n" ]
[ [ "numpy.interp", "numpy.ndim" ] ]
Gorilla-Lab-SCUT/gorilla-3d
[ "399ed8616781a0fbc462f655c0e80c258c5a5207" ]
[ "gorilla3d/nn/models/pointnet/pointnet.py" ]
[ "# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Iterable\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom ...modules.pointnet import PointNetFeatExt\n\n\nclass PointNetCls(nn.Module):\n r\"\"\"PointNet classifier. Uses the PointNet feature extractor, and\n adds classification layers on top.\n .. note::\n If you use this code, please cite the original paper in addition to Kaolin.\n .. code-block::\n @article{qi2016pointnet,\n title={PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation},\n author={Qi, Charles R and Su, Hao and Mo, Kaichun and Guibas, Leonidas J},\n journal={arXiv preprint arXiv:1612.00593},\n year={2016}\n }\n Args:\n in_channels (int): Number of channels in the input pointcloud\n (default: 3, for X, Y, Z coordinates respectively).\n feat_size (int): Size of the global feature vector\n (default: 1024)\n num_classes (int): Number of classes (for the classification\n task) (default: 2).\n dropout (float): Dropout ratio to use (default: 0.). Note: If\n the ratio is set to 0., we altogether skip using a dropout\n layer.\n layer_dims (Iterable[int]): Sizes of fully connected layers\n to be used in the feature extractor (excluding the input and\n the output layer sizes). Note: the number of\n layers in the feature extractor is implicitly parsed from\n this variable.\n activation (function): Nonlinearity to be used as activation\n function after each batchnorm (default: F.relu)\n batchnorm (bool): Whether or not to use batchnorm layers\n (default: True)\n transposed_input (bool): Whether the input\"s second and third dimension\n is already transposed. If so, a transpose operation can be avoided,\n improving performance.\n See documentation of PointNetFeatExt for more details.\n Example:\n >>> pointnet = PointNetCls(in_channels=6, feat_size=1024, \\\n feat_layer_dims=[32, 64, 256], \\\n classifier_layer_dims=[500, 200, 100])\n >>> x = torch.rand(5, 6, 30)\n >>> y = pointnet(x)\n >>> print(y.shape)\n \"\"\"\n def __init__(self,\n in_channels: int = 3,\n feat_size: int = 1024,\n num_classes: int = 2,\n dropout: float = 0.,\n classifier_layer_dims: Iterable[int] = [512, 256],\n feat_layer_dims: Iterable[int] = [64, 128],\n activation=F.relu,\n batchnorm: bool = True,\n transposed_input: bool = False):\n\n super().__init__()\n\n if not isinstance(num_classes, int):\n raise TypeError(f\"Argument num_classes must be of type int. \"\n f\"Got {type(num_classes)} instead.\")\n if not isinstance(dropout, float):\n raise TypeError(f\"Argument dropout must be of type float. \"\n f\"Got {type(dropout)} instead.\")\n if dropout < 0 or dropout > 1:\n raise ValueError(f\"Dropout ratio must always be in the range\"\n f\"[0, 1]. Got {dropout} instead.\")\n if not hasattr(classifier_layer_dims, \"__iter__\"):\n raise TypeError(\"Argument classifier_layer_dims is not iterable.\")\n for idx, layer_dim in enumerate(classifier_layer_dims):\n if not isinstance(layer_dim, int):\n raise TypeError(\n f\"Expected classifier_layer_dims to contain \"\n f\"int. Found type {type(layer_dim)} at index {idx}.\")\n\n # Add feat_size to the head of classifier_layer_dims (the output of\n # the PointNet feature extractor has number of elements equal to\n # has number of channels equal to `in_channels`).\n if not isinstance(classifier_layer_dims, list):\n classifier_layer_dims = list(classifier_layer_dims)\n classifier_layer_dims.insert(0, feat_size)\n\n # Note that `global_feat` MUST be set to True, for global\n # classification tasks.\n self.feature_extractor = PointNetFeatExt(\n in_channels=in_channels,\n feat_size=feat_size,\n layer_dims=feat_layer_dims,\n global_feat=True,\n activation=activation,\n batchnorm=batchnorm,\n transposed_input=transposed_input)\n\n self.linear_layers = nn.ModuleList()\n if batchnorm:\n self.bn_layers = nn.ModuleList()\n for idx in range(len(classifier_layer_dims) - 1):\n self.linear_layers.append(\n nn.Linear(classifier_layer_dims[idx],\n classifier_layer_dims[idx + 1]))\n if batchnorm:\n self.bn_layers.append(\n nn.BatchNorm1d(classifier_layer_dims[idx + 1]))\n\n self.last_linear_layer = nn.Linear(classifier_layer_dims[-1],\n num_classes)\n\n # Store activation as a class attribute\n self.activation = activation\n\n # Dropout layer (if dropout ratio is in the interval (0, 1]).\n if dropout > 0:\n self.dropout = nn.Dropout(p=dropout)\n\n else:\n self.dropout = None\n\n # Store whether or not to use batchnorm as a class attribute\n self.batchnorm = batchnorm\n\n self.transposed_input = transposed_input\n\n def forward(self, x):\n r\"\"\"Forward pass through the PointNet classifier.\n Args:\n x (torch.Tensor): Tensor representing a pointcloud\n (shape: :math:`B \\times N \\times D`, where :math:`B`\n is the batchsize, :math:`N` is the number of points\n in the pointcloud, and :math:`D` is the dimensionality\n of each point in the pointcloud).\n If self.transposed_input is True, then the shape is\n :math:`B \\times D \\times N`.\n \"\"\"\n x = self.feature_extractor(x)\n for idx in range(len(self.linear_layers) - 1):\n if self.batchnorm:\n x = self.activation(self.bn_layers[idx](\n self.linear_layers[idx](x)))\n else:\n x = self.activation(self.linear_layers[idx](x))\n # For penultimate linear layer, apply dropout before batchnorm\n if self.dropout:\n if self.batchnorm:\n x = self.activation(self.bn_layers[-1](self.dropout(\n self.linear_layers[-1](x))))\n else:\n x = self.activation(self.dropout(self.linear_layers[-1](x)))\n else:\n if self.batchnorm:\n x = self.activation(self.bn_layers[-1](\n self.linear_layers[-1](x)))\n else:\n x = self.activation(self.linear_layers[-1](x))\n # TODO: Use dropout before batchnorm of penultimate linear layer\n x = self.last_linear_layer(x)\n # return F.log_softmax(x, dim=1)\n return x\n\n\nclass PointNetSeg(nn.Module):\n r\"\"\"PointNet segmenter. Uses the PointNet feature extractor, and\n adds per-point segmentation layers on top.\n .. note::\n If you use this code, please cite the original paper in addition to Kaolin.\n .. code-block::\n @article{qi2016pointnet,\n title={PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation},\n author={Qi, Charles R and Su, Hao and Mo, Kaichun and Guibas, Leonidas J},\n journal={arXiv preprint arXiv:1612.00593},\n year={2016}\n }\n Args:\n in_channels (int): Number of channels in the input pointcloud\n (default: 3, for X, Y, Z coordinates respectively).\n feat_size (int): Size of the global feature vector\n (default: 1024)\n num_classes (int): Number of classes (for the segmentation\n task) (default: 2).\n dropout (float): Dropout ratio to use (default: 0.). Note: If\n the ratio is set to 0., we altogether skip using a dropout\n layer.\n layer_dims (Iterable[int]): Sizes of fully connected layers\n to be used in the feature extractor (excluding the input and\n the output layer sizes). Note: the number of\n layers in the feature extractor is implicitly parsed from\n this variable.\n activation (function): Nonlinearity to be used as activation\n function after each batchnorm (default: F.relu)\n batchnorm (bool): Whether or not to use batchnorm layers\n (default: True)\n transposed_input (bool): Whether the input\"s second and third dimension\n is already transposed. If so, a transpose operation can be avoided,\n improving performance.\n See documentation of PointNetFeatExt for more details.\n Example:\n >>> pointnet = PointNetSeg(in_channels=6, feat_size=1024, \\\n feat_layer_dims=[32, 64, 256], \\\n classifier_layer_dims=[500, 200, 100])\n >>> x = torch.rand(5, 6, 30)\n >>> y = pointnet(x)\n >>> print(y.shape)\n \"\"\"\n def __init__(self,\n in_channels: int = 3,\n feat_size: int = 1024,\n num_classes: int = 2,\n dropout: float = 0.,\n classifier_layer_dims: Iterable[int] = [512, 256],\n feat_layer_dims: Iterable[int] = [64, 128],\n activation=F.relu,\n batchnorm: bool = True,\n transposed_input: bool = False):\n super().__init__()\n\n if not isinstance(num_classes, int):\n raise TypeError(f\"Argument num_classes must be of type int. \"\n f\"Got {type(num_classes)} instead.\")\n if not isinstance(dropout, float):\n raise TypeError(f\"Argument dropout must be of type float. \"\n f\"Got {type(dropout)} instead.\")\n if not hasattr(classifier_layer_dims, \"__iter__\"):\n raise TypeError(\"Argument classifier_layer_dims is not iterable.\")\n for idx, layer_dim in enumerate(classifier_layer_dims):\n if not isinstance(layer_dim, int):\n raise TypeError(\n f\"Expected classifier_layer_dims to contain \"\n f\"int. Found type {type(layer_dim)} at index {idx}.\")\n\n # Add feat_size to the head of classifier_layer_dims (the output of\n # the PointNet feature extractor has number of elements equal to\n # has number of channels equal to `in_channels`).\n if not isinstance(classifier_layer_dims, list):\n classifier_layer_dims = list(classifier_layer_dims)\n classifier_layer_dims.insert(0, feat_size)\n\n # Note that `global_feat` MUST be set to False, for\n # segmentation tasks.\n self.feature_extractor = PointNetFeatExt(\n in_channels=in_channels,\n feat_size=feat_size,\n layer_dims=feat_layer_dims,\n global_feat=False,\n activation=activation,\n batchnorm=batchnorm,\n transposed_input=transposed_input)\n\n # Compute the dimensionality of local features\n # Local feature size = (global feature size) + (feature size\n # from the output of the first layer of feature extractor)\n # Note: In self.feature_extractor, we manually append in_channels\n # to the head of feat_layer_dims. So, we use index 1 of\n # feat_layer_dims in the below line, to compute local_feat_size,\n # and not index 0.\n self.local_feat_size = feat_size + feat_layer_dims[1]\n\n self.conv_layers = nn.ModuleList()\n if batchnorm:\n self.bn_layers = nn.ModuleList()\n # First classifier layer\n self.conv_layers.append(\n nn.Conv1d(self.local_feat_size, classifier_layer_dims[0], 1))\n if batchnorm:\n self.bn_layers.append(nn.BatchNorm1d(classifier_layer_dims[0]))\n for idx in range(len(classifier_layer_dims) - 1):\n self.conv_layers.append(\n nn.Conv1d(classifier_layer_dims[idx],\n classifier_layer_dims[idx + 1], 1))\n if batchnorm:\n self.bn_layers.append(\n nn.BatchNorm1d(classifier_layer_dims[idx + 1]))\n\n self.last_conv_layer = nn.Conv1d(classifier_layer_dims[-1],\n num_classes, 1)\n\n # Store activation as a class attribute\n self.activation = activation\n\n # Store the number of classes as an attribute\n self.num_classes = num_classes\n\n # Store whether or not to use batchnorm as a class attribute\n self.batchnorm = batchnorm\n\n self.transposed_input = transposed_input\n\n def forward(self, x):\n r\"\"\"Forward pass through the PointNet segmentation model.\n Args:\n x (torch.Tensor): Tensor representing a pointcloud\n shape: :math:`B \\times N \\times D`, where :math:`B`\n is the batchsize, :math:`N` is the number of points\n in the pointcloud, and :math:`D` is the dimensionality\n of each point in the pointcloud.\n If self.transposed_input is True, then the shape is\n :math:`B \\times D \\times N`.\n \"\"\"\n batchsize = x.shape[0]\n num_points = x.shape[2] if self.transposed_input else x.shape[1]\n x = self.feature_extractor(x)\n for idx in range(len(self.conv_layers)):\n if self.batchnorm:\n x = self.activation(self.bn_layers[idx](\n self.conv_layers[idx](x)))\n else:\n x = self.activation(self.conv_layers[idx](x))\n x = self.last_conv_layer(x)\n x = x.transpose(2, 1).contiguous()\n # x = F.log_softmax(x.view(-1, self.num_classes), dim=-1)\n print(f\"x.shape = {x.shape}\")\n return x.view(batchsize, num_points, self.num_classes)\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.ModuleList", "torch.nn.Conv1d", "torch.nn.BatchNorm1d" ] ]
gcslui/Money-generator
[ "1b9e40296d30851344bb2bf06ad58ecf2e37d4fc" ]
[ "src/visualize.py" ]
[ "import matplotlib.pyplot as plt\nimport mplfinance as mpf\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime, timedelta\n\nfrom src.db_default import DB_ASSUMED_TZ, DB_FROZEN_VARIANTS\nfrom src.db_class import FinanceDB\n\n\ndef plot_timeseries(df, ticker, column='close', interval='minutes'):\n\n MAP_COLUMN_TO_LABEL = {\n 'open': 'Price (open)',\n 'high': 'Price (high)',\n 'low': 'Price (low)',\n 'close': 'Price (close)',\n 'volume': 'Volume (units)',\n }\n\n assert column in MAP_COLUMN_TO_LABEL.keys()\n ylabel = MAP_COLUMN_TO_LABEL[column]\n title = '%s (%s) for interval: %s' % (ticker, column, interval)\n\n plt.plot(np.arange(len(df)), df[column])\n plt.title(title); plt.ylabel(ylabel)\n plt.show()\n\n df[column].plot()\n plt.title(title); plt.ylabel(ylabel)\n plt.show()\n return\n\n\ndef plot_timeseries_fancy(df, ticker, style='yahoo', interval='minutes', vol=True, start=None, end=None):\n \"\"\" Uses mplfinance package to make OHLC candle plot with volume subplot\n\n ticker: is either a string (to a DB ticker) or a pandas DataFrame\n if string: try reading the default database for that ticker; this fetches a DataFrame\n if DataFrame: no need to load the database\n\n If vol: plot volume subplot below primary plot\n style options:\n 'binance', 'blueskies', 'brasil', 'charles', 'checkers',\n 'classic', 'default', 'mike', 'nightclouds', 'sas',\n 'starsandstripes', 'yahoo']\n\n Inputs:\n - start, end: assumed to be datetime.datetime objects\n e.g. start = datetime.today() - timedelta(days=20)\n \"\"\"\n def validate_start_end_times(df, endpt, left_endpt=True):\n if endpt is not None:\n from pytz import timezone\n # enforce timezone of the specified start/end time\n if isinstance(endpt, pd._libs.tslibs.timestamps.Timestamp):\n assert endpt.tz.zone == DB_ASSUMED_TZ\n else:\n assert (isinstance(endpt, datetime) or isinstance(endpt, str))\n endpt = pd.to_datetime(endpt).tz_localize(DB_ASSUMED_TZ)\n # slice the dataframe\n if left_endpt:\n assert endpt >= df.index.min()\n df = df.loc[df.index > endpt]\n else:\n assert endpt <= df.index.max()\n df = df.loc[df.index < endpt]\n return df\n\n # slicing df based on datetime intervals\n # TODO because mpf is computing moving averages, maybe better to pass the whole df and use xlim\n df = validate_start_end_times(df, start, left_endpt=True)\n df = validate_start_end_times(df, end, left_endpt=False)\n\n kwargs = {'type': 'candle',\n 'style': style,\n 'volume': vol,\n 'title': '%s (interval: %s)' % (ticker, interval),\n 'ylabel': 'Price ($)',\n 'ylabel_lower': 'Volume',\n 'tz_localize': True,\n 'warn_too_much_data': int(1e6)}\n if interval == 'days':\n kwargs['mav'] = (50, 200)\n if interval == 'minutes':\n kwargs['mav'] = (15, 60)\n mpf.plot(df, **kwargs)\n return\n\n\ndef postprocess_db_timedata_per_ticker(df):\n \"\"\" Intended use: immediate postprocessing on a database fetch\n E.g.\n df = finance_db.get_daily_per_ticker(ticker)\n df = postprocess_db_timedata_per_ticker(df)\n Notes:\n - Removes 'adjusted_close' and 'security_ticker'\n - The index of the returned dataframe will be <class 'pandas._libs.tslibs.timestamps.Timestamp'>\n - Note: that class inherits from datetime.datetime\n \"\"\"\n df.drop(columns=['adjusted_close', 'security_ticker'], inplace=True)\n df.set_index(pd.DatetimeIndex(df['date']), inplace=True)\n df.index = df.index.tz_localize(DB_ASSUMED_TZ)\n return df\n\n\nif __name__ == '__main__':\n\n # specify which database and instantiate the FinanceDB class\n db_variant_label = 'v1'\n db_filename = DB_FROZEN_VARIANTS['v1']['db_filename']\n finance_db = FinanceDB(db_filename)\n\n # choose a ticker from the database\n ticker = 'MSFT' # e.g. 'AAPL', 'MSFT', 'CADUSD=X', 'BTC-USD'\n\n # plot daily data\n df = finance_db.get_daily_per_ticker(ticker)\n df = postprocess_db_timedata_per_ticker(df)\n plot_timeseries_fancy(df, ticker, interval='days', start='2020-01-01', end='2021-11-21')\n\n # plot minutely data data\n df = finance_db.get_minutely_per_ticker(ticker)\n df = postprocess_db_timedata_per_ticker(df)\n minutely_start = df.index.max() - timedelta(days=10)\n minutely_end = df.index.max() - timedelta(days=8)\n plot_timeseries_fancy(df, ticker, interval='minutes', start=minutely_start, end=minutely_end)\n" ]
[ [ "pandas.to_datetime", "pandas.DatetimeIndex", "matplotlib.pyplot.title", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
wjjmjh/Cogent3-GitHub-CI
[ "c79c82c4b51f56be50f1079ddcdfcffaccaf80dd", "c79c82c4b51f56be50f1079ddcdfcffaccaf80dd" ]
[ "src/cogent3/evolve/likelihood_function.py", "src/cogent3/draw/drawable.py" ]
[ "#!/usr/bin/env python\n\nimport json\nimport random\n\nfrom collections import defaultdict\nfrom copy import deepcopy\n\nimport numpy\n\nfrom cogent3.core.alignment import ArrayAlignment\nfrom cogent3.evolve import substitution_model\nfrom cogent3.evolve.simulate import AlignmentEvolver, random_sequence\nfrom cogent3.maths.matrix_exponential_integration import expected_number_subs\nfrom cogent3.maths.matrix_logarithm import is_generator_unique\nfrom cogent3.maths.measure import (\n paralinear_continuous_time,\n paralinear_discrete_time,\n)\nfrom cogent3.recalculation.definition import ParameterController\nfrom cogent3.util import table\nfrom cogent3.util.dict_array import DictArrayTemplate\nfrom cogent3.util.misc import adjusted_gt_minprob, get_object_provenance\n\n\n__author__ = \"Peter Maxwell\"\n__copyright__ = \"Copyright 2007-2020, The Cogent Project\"\n__credits__ = [\n \"Gavin Huttley\",\n \"Andrew Butterfield\",\n \"Peter Maxwell\",\n \"Matthew Wakefield\",\n \"Rob Knight\",\n \"Brett Easton\",\n \"Ben Kaehler\",\n \"Ananias Iliadis\",\n]\n__license__ = \"BSD-3\"\n__version__ = \"2020.2.7a\"\n__maintainer__ = \"Gavin Huttley\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n\n# cogent3.evolve.parameter_controller.LikelihoodParameterController tells the\n# recalculation framework to use this subclass rather than the generic\n# recalculation Calculator. It adds methods which are useful for examining\n# the parameter, psub, mprob and likelihood values after the optimisation is\n# complete.\n\n\ndef _get_keyed_rule_indices(rules):\n \"\"\"returns {frozesent((par_name, edge1, edge2, ..)): index}\"\"\"\n new = {}\n for i, rule in enumerate(rules):\n edges = rule.get(\"edges\", rule.get(\"edge\", None)) or []\n edges = [edges] if type(edges) == str else edges\n par_name = rule[\"par_name\"]\n key = frozenset([par_name] + edges)\n new[key] = i\n return new\n\n\ndef update_rule_value(rich, null):\n \"\"\"applies value from null rule to rich rule\"\"\"\n val_key = \"init\" if \"init\" in rich else \"value\"\n rich[val_key] = null.get(\"init\", null.get(\"value\"))\n return rich\n\n\ndef extend_rule_value(rich, nulls):\n \"\"\"creates new rich rules from edges in null rules\"\"\"\n val_key = \"init\" if \"init\" in rich else \"value\"\n rules = []\n for null in nulls:\n edges = null.get(\"edges\", null.get(\"edge\"))\n edges = [edges] if type(edges) == str else edges\n for edge in edges:\n rule = deepcopy(rich)\n rule[\"edge\"] = edge\n rule[val_key] = null.get(\"init\", null.get(\"value\"))\n rules.append(rule)\n return rules\n\n\ndef update_scoped_rules(rich, null):\n \"\"\"returns rich rules with values derived from those in null rules\"\"\"\n new_rules = []\n rich = deepcopy(rich)\n # we build a dict keyed by frozen set consisting of the param name\n # and affected edges. The dict value is the list index in original.\n richd = _get_keyed_rule_indices(rich)\n nulld = _get_keyed_rule_indices(null)\n common = set(richd) & set(nulld)\n # 1-to-1 mapping, just extract the param value\n for key in common:\n rule = update_rule_value(rich[richd[key]], null[nulld[key]])\n new_rules.append(rule)\n\n # following rules differing in scope\n rich_remainder = set(richd) - set(nulld)\n null_remainder = set(nulld) - set(richd)\n for rich_key in rich_remainder:\n matches = []\n rich_rule = rich[richd[rich_key]]\n pname = rich_rule[\"par_name\"]\n enames = rich_rule.get(\"edges\", rich_rule.get(\"edge\", None))\n if type(enames) == str:\n enames = [enames]\n enames = None if enames is None else set(enames)\n for null_key in null_remainder:\n null_rule = null[nulld[null_key]]\n if pname != null_rule[\"par_name\"]:\n continue\n # parameter now fully general\n if enames is None:\n matches.append(null_rule)\n continue\n\n null_enames = null_rule.get(\"edges\", null_rule.get(\"edge\", None))\n null_enames = None if null_enames is None else set(null_enames)\n if None in (enames, null_enames) or null_enames & enames:\n matches.append(null_rule)\n\n if enames is None: # rich rule is \"free\"\n new_rules.extend(extend_rule_value(rich_rule, matches))\n continue\n\n if len(matches) > 1 and enames is not None:\n raise ValueError(f\"{rich_key} has too many mappings {matches}\")\n\n match = matches[0]\n new_rules.append(update_rule_value(rich_rule, match))\n return new_rules\n\n\ndef _get_param_mapping(rich, simple):\n \"\"\"returns {simple_param_name: rich_param_name, ...}, the mapping of simple\n to rich parameters based on matrix coordinates\n \"\"\"\n assert len(rich) >= len(simple)\n simple_to_rich = defaultdict(set)\n rich_to_simple = defaultdict(set)\n for simple_param in simple:\n simple_coords = simple[simple_param]\n for rich_param in rich:\n rich_coords = rich[rich_param]\n if rich_coords <= simple_coords:\n simple_to_rich[simple_param].add(rich_param)\n rich_to_simple[rich_param].add(simple_param)\n\n for rich_param in rich_to_simple:\n simple_counterparts = rich_to_simple[rich_param]\n if len(simple_counterparts) == 1:\n continue\n\n sized_simple = [(len(simple[param]), param) for param in simple_counterparts]\n sized_simple.sort()\n if sized_simple[0][0] == sized_simple[1][0]:\n msg = \"%s and %s tied for matrix space\" % (\n sized_simple[0][1],\n sized_simple[1][1],\n )\n raise ValueError(msg)\n\n _, chosen = sized_simple.pop(0)\n rich_to_simple[rich_param] = [chosen]\n for _, simple_param in sized_simple:\n simple_to_rich[simple_param].pop(rich_param)\n\n return simple_to_rich\n\n\nclass _ParamProjection:\n \"\"\"projects parameter names, values between nested models\"\"\"\n\n def __init__(self, simple_model, rich_model, motif_probs, same=True):\n # construct following by calling the functions we wrote\n self._rich_coords = rich_model.get_param_matrix_coords(include_ref_cell=True)\n self._simple_coords = simple_model.get_param_matrix_coords(\n include_ref_cell=True\n )\n self._param_map = _get_param_mapping(self._rich_coords, self._simple_coords)\n self._same = same\n # end of constructing attributes\n self._motif_probs = motif_probs\n self._ref_val = self._set_ref_val(same)\n self.projected_rate = {False: self._rate_not_same}.get(same, self._rate_same)\n\n def _set_ref_val(self, same):\n \"\"\"returns the motif prob corresponding to the model reference cell\"\"\"\n if same:\n return 1\n else:\n i, j = list(self._rich_coords[\"ref_cell\"])[0]\n return self._motif_probs[j]\n\n def _rate_not_same(self, simple_param, mle):\n \"\"\"returns {rich_param: val, ...} from simple_param: val\"\"\"\n ref_val = self._ref_val\n new_terms = {}\n for rich_param in self._param_map[simple_param]:\n if rich_param == \"ref_cell\":\n continue\n for i, j in self._rich_coords[rich_param]:\n new_terms[rich_param] = self._motif_probs[j] * mle / ref_val\n\n return new_terms\n\n def _rate_same(self, simple_param, mle):\n new_terms = {}\n for rich_param in self._param_map[simple_param]:\n if rich_param == \"ref_cell\":\n continue\n for i, j in self._rich_coords[rich_param]:\n new_terms[rich_param] = mle\n return new_terms\n\n def update_param_rules(self, rules):\n new_rules = []\n if not self._same:\n rules = rules[:] + [dict(par_name=\"ref_cell\", init=1.0, edges=None)]\n\n for rule in rules:\n # get the param name, mle, call self.projected_rate\n name = rule[\"par_name\"]\n if name in (\"mprobs\", \"length\"):\n new_rules.append(rule)\n continue\n\n if rule.get(\"is_constant\", False):\n par_val_key = \"value\"\n else:\n par_val_key = \"init\"\n\n mle = rule[par_val_key]\n\n proj_rate = self.projected_rate(name, mle)\n for new_name, new_mle in proj_rate.items():\n rule_dict = rule.copy()\n rule_dict[\"par_name\"] = new_name\n # update it with the new parname and mle and append to new rules\n rule_dict[\"init\"] = new_mle\n new_rules.append(rule_dict)\n\n return new_rules\n\n\ndef compatible_likelihood_functions(lf1, lf2):\n \"\"\"returns True if all attributes of the two likelihood functions are compatible\n for mapping parameters, else raises ValueError or an AssertionError\"\"\"\n # tree's must have the same topology AND be oriented the same way\n # plus have the same edge names\n if len(lf1.bin_names) != 1 or len(lf1.bin_names) != len(lf2.bin_names):\n raise NotImplementedError(\"Too many bins\")\n if len(lf1.locus_names) != 1 or len(lf1.locus_names) != len(lf2.locus_names):\n raise NotImplementedError(\"Too many loci\")\n if lf1.model.get_motifs() != lf2.model.get_motifs():\n raise AssertionError(\"Motifs don't match\")\n if lf1.tree.get_newick(with_node_names=True) != lf2.tree.get_newick(\n with_node_names=True\n ):\n raise AssertionError(\"Topology, Orientation or node names don't match\")\n return True\n\n\nclass LikelihoodFunction(ParameterController):\n @property\n def lnL(self):\n \"\"\"log-likelihood\"\"\"\n return self.get_log_likelihood()\n\n def get_log_likelihood(self):\n return self.get_final_result()\n\n def get_all_psubs(self):\n \"\"\"returns all psubs as a dict keyed by used dimensions\"\"\"\n try:\n defn = self.defn_for[\"dsubs\"]\n except KeyError:\n defn = self.defn_for[\"psubs\"]\n\n used_dims = defn.used_dimensions()\n vdims = defn.valid_dimensions\n indices = [vdims.index(k) for k in used_dims if k in vdims]\n result = {}\n darr_template = DictArrayTemplate(self._motifs, self._motifs)\n for scope, index in defn.index.items():\n psub = defn.values[index]\n key = tuple(numpy.take(scope, indices))\n result[key] = darr_template.wrap(psub)\n return result\n\n def get_psub_for_edge(self, name, **kw):\n \"\"\"returns the substitution probability matrix for the named edge\"\"\"\n try:\n # For PartialyDiscretePsubsDefn\n array = self.get_param_value(\"dpsubs\", edge=name, **kw)\n except KeyError:\n array = self.get_param_value(\"psubs\", edge=name, **kw)\n return DictArrayTemplate(self._motifs, self._motifs).wrap(array)\n\n def get_all_rate_matrices(self, calibrated=True):\n \"\"\"returns all rate matrices (Q) as a dict, keyed by scope\n\n Parameters\n ----------\n calibrated : bool\n scales the rate matrix by branch length for each edge. If a rate\n heterogeneity model, then the matrix is further scaled by rate\n for a bin\n Returns\n -------\n If a single rate matrix, the key is an empty tuple\n \"\"\"\n defn = self.defn_for[\"Q\"]\n\n rate_het = self.defn_for.get(\"rate\", False)\n if rate_het:\n bin_index = rate_het.valid_dimensions.index(\"bin\")\n bin_names = [k[bin_index] for k in rate_het.index]\n bin_names = {n: i for i, n in enumerate(bin_names)}\n bin_index = defn.valid_dimensions.index(\"bin\")\n else:\n bin_names = None\n bin_index = None\n\n used_dims = defn.used_dimensions()\n edge_index = defn.valid_dimensions.index(\"edge\")\n\n indices = {defn.valid_dimensions.index(k) for k in used_dims}\n if not calibrated:\n indices.add(edge_index)\n\n if not calibrated and rate_het:\n indices.add(bin_index)\n\n indices = list(sorted(indices))\n result = {}\n darr_template = DictArrayTemplate(self._motifs, self._motifs)\n for scope, index in defn.index.items():\n q = defn.values[index] # this gives the appropriate Q\n # from scope we extract only the relevant dimensions\n key = tuple(numpy.take(scope, indices))\n q = q.copy()\n if not calibrated:\n length = self.get_param_value(\"length\", edge=scope[edge_index])\n if rate_het:\n bdex = bin_names[scope[bin_index]]\n rate = rate_het.values[bdex]\n length *= rate\n q *= length\n result[key] = darr_template.wrap(q)\n if not indices and calibrated:\n break # single rate matrix\n\n return result\n\n def get_rate_matrix_for_edge(self, name, calibrated=True, **kw):\n \"\"\"returns the rate matrix (Q) for the named edge\n\n If calibrated=False, expm(Q) will give the same result as\n get_psub_for_edge(name)\"\"\"\n try:\n array = self.get_param_value(\"Q\", edge=name, **kw)\n array = array.copy()\n if not calibrated:\n length = self.get_param_value(\"length\", edge=name, **kw)\n array *= length\n except KeyError as err:\n if err[0] == \"Q\" and name != \"Q\":\n raise RuntimeError(\"rate matrix not known by this model\")\n else:\n raise\n return DictArrayTemplate(self._motifs, self._motifs).wrap(array)\n\n def _getLikelihoodValuesSummedAcrossAnyBins(self, locus=None):\n if self.bin_names and len(self.bin_names) > 1:\n root_lhs = [\n self.get_param_value(\"lh\", locus=locus, bin=bin)\n for bin in self.bin_names\n ]\n bprobs = self.get_param_value(\"bprobs\")\n root_lh = bprobs.dot(root_lhs)\n else:\n root_lh = self.get_param_value(\"lh\", locus=locus)\n return root_lh\n\n def get_full_length_likelihoods(self, locus=None):\n \"\"\"Array of [site, motif] likelihoods from the root of the tree\"\"\"\n root_lh = self._getLikelihoodValuesSummedAcrossAnyBins(locus=locus)\n root_lht = self.get_param_value(\"root\", locus=locus)\n return root_lht.get_full_length_likelihoods(root_lh)\n\n def get_G_statistic(self, return_table=False, locus=None):\n \"\"\"Goodness-of-fit statistic derived from the unambiguous columns\"\"\"\n root_lh = self._getLikelihoodValuesSummedAcrossAnyBins(locus=locus)\n root_lht = self.get_param_value(\"root\", locus=locus)\n return root_lht.calc_G_statistic(root_lh, return_table)\n\n def reconstruct_ancestral_seqs(self, locus=None):\n \"\"\"returns a dict of DictArray objects containing probabilities\n of each alphabet state for each node in the tree.\n\n Parameters\n ----------\n locus\n a named locus\n\n \"\"\"\n result = {}\n array_template = None\n for restricted_edge in self._tree.get_edge_vector():\n if restricted_edge.istip():\n continue\n try:\n r = []\n for motif in range(len(self._motifs)):\n self.set_param_rule(\n \"fixed_motif\",\n value=motif,\n edge=restricted_edge.name,\n locus=locus,\n is_constant=True,\n )\n likelihoods = self.get_full_length_likelihoods(locus=locus)\n r.append(likelihoods)\n if array_template is None:\n array_template = DictArrayTemplate(\n likelihoods.shape[0], self._motifs\n )\n finally:\n self.set_param_rule(\n \"fixed_motif\",\n value=-1,\n edge=restricted_edge.name,\n locus=locus,\n is_constant=True,\n )\n # dict of site x motif arrays\n result[restricted_edge.name] = array_template.wrap(\n numpy.transpose(numpy.asarray(r))\n )\n return result\n\n def likely_ancestral_seqs(self, locus=None):\n \"\"\"Returns the most likely reconstructed ancestral sequences as an\n alignment.\n\n Parameters\n ----------\n locus\n a named locus\n\n \"\"\"\n prob_array = self.reconstruct_ancestral_seqs(locus=locus)\n seqs = []\n for edge, probs in list(prob_array.items()):\n seq = []\n for row in probs:\n by_p = [(p, state) for state, p in list(row.items())]\n seq.append(max(by_p)[1])\n seqs += [(edge, self.model.moltype.make_seq(\"\".join(seq)))]\n return ArrayAlignment(data=seqs, moltype=self.model.moltype)\n\n def get_bin_probs(self, locus=None):\n hmm = self.get_param_value(\"bindex\", locus=locus)\n lhs = [\n self.get_param_value(\"lh\", locus=locus, bin=bin) for bin in self.bin_names\n ]\n array = hmm.get_posterior_probs(*lhs)\n return DictArrayTemplate(self.bin_names, array.shape[1]).wrap(array)\n\n def _valuesForDimension(self, dim):\n # in support of __str__\n if dim == \"edge\":\n result = [e.name for e in self._tree.get_edge_vector()]\n elif dim == \"bin\":\n result = self.bin_names[:]\n elif dim == \"locus\":\n result = self.locus_names[:]\n elif dim.startswith(\"motif\"):\n result = self._mprob_motifs\n elif dim == \"position\":\n result = self.posn_names[:]\n else:\n raise KeyError(dim)\n return result\n\n def _valuesForDimensions(self, dims):\n # in support of __str__\n result = [[]]\n for dim in dims:\n new_result = []\n for r in result:\n for cat in self._valuesForDimension(dim):\n new_result.append(r + [cat])\n result = new_result\n return result\n\n def _for_display(self):\n \"\"\"processes statistics tables for display\"\"\"\n title = self._name if self._name else \"Likelihood function statistics\"\n result = []\n result += self.get_statistics(with_motif_probs=True, with_titles=True)\n for i, table in enumerate(result):\n if (\n \"motif\" in table.title and table.shape[1] == 2 and table.shape[0] >= 60\n ): # just sort codon motif probs, then truncate\n table = table.sorted(columns=\"motif\")\n data = table.tolist()\n data = data[:5] + [[\"...\", \"...\"]] + data[-5:]\n table = table.__class__(\n header=table.header,\n rows=data,\n digits=table._digits,\n title=table.title,\n )\n result[i] = table\n return title, result\n\n def _repr_html_(self):\n \"\"\"for jupyter notebook display\"\"\"\n try:\n lnL = \"<p>log-likelihood = %.4f</p>\" % self.get_log_likelihood()\n except ValueError:\n # alignment probably not yet set\n lnL = \"\"\n\n nfp = \"<p>number of free parameters = %d</p>\" % self.get_num_free_params()\n title, results = self._for_display()\n for i, table in enumerate(results):\n table.title = table.title.capitalize()\n results[i] = table._repr_html_(include_shape=False)\n results = [\"<h4>%s</h4>\" % title, lnL, nfp] + results\n return \"\\n\".join(results)\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n title, results = self._for_display()\n\n try:\n lnL = \"log-likelihood = %.4f\" % self.get_log_likelihood()\n except ValueError:\n # alignment probably not yet set\n lnL = None\n\n nfp = \"number of free parameters = %d\" % self.get_num_free_params()\n for table in results:\n table.title = \"\"\n\n if lnL:\n results = [title, lnL, nfp] + results\n else:\n results = [title, nfp] + results\n\n return \"\\n\".join(map(str, results))\n\n def get_annotated_tree(self, length_as=None):\n \"\"\"returns tree with model attributes on node.params\n\n length_as : str or None\n replaces 'length' param with either 'ENS' or 'paralinear'.\n 'ENS' is the expected number of substitution, (which will be\n different to standard length if the substitution model is\n non-stationary). 'paralinear' is the measure of Lake 1994.\n\n The other measures are always available in the params dict of each\n node.\n \"\"\"\n from cogent3.evolve.ns_substitution_model import DiscreteSubstitutionModel\n\n is_discrete = isinstance(self.model, DiscreteSubstitutionModel)\n\n if is_discrete and not length_as == \"paralinear\":\n raise ValueError(f\"{length_as} invalid for discrete time process\")\n\n assert length_as in (\"ENS\", \"paralinear\", None)\n d = self.get_param_value_dict([\"edge\"])\n lengths = d.pop(\"length\", None)\n mprobs = self.get_motif_probs_by_node()\n if not is_discrete:\n ens = self.get_lengths_as_ens(motif_probs=mprobs)\n\n plin = self.get_paralinear_metric(motif_probs=mprobs)\n if length_as == \"ENS\":\n lengths = ens\n elif length_as == \"paralinear\":\n lengths = plin\n\n tree = self._tree.deepcopy()\n for edge in tree.get_edge_vector():\n if edge.name == \"root\":\n edge.params[\"mprobs\"] = mprobs[edge.name].to_dict()\n continue\n\n if not is_discrete:\n edge.params[\"ENS\"] = ens[edge.name]\n\n edge.params[\"length\"] = lengths[edge.name]\n edge.params[\"paralinear\"] = plin[edge.name]\n edge.params[\"mprobs\"] = mprobs[edge.name].to_dict()\n for par in d:\n val = d[par][edge.name]\n if par == length_as:\n val = ens[edge.name]\n edge.params[par] = val\n\n return tree\n\n def get_motif_probs(self, edge=None, bin=None, locus=None, position=None):\n \"\"\"\n Parameters\n ----------\n edge : str\n name of edge\n bin : int or str\n name of bin\n locus : str\n name of locus\n position : int or str\n name of position\n\n Returns\n -------\n If 1D, returns DictArray, else a dict of DictArray\n \"\"\"\n param_names = self.get_param_names()\n mprob_name = [n for n in param_names if \"mprob\" in n][0]\n dims = tuple(self.get_used_dimensions(mprob_name))\n mprobs = self.get_param_value_dict(dimensions=dims, params=[mprob_name])\n if len(dims) == 2:\n var = [c for c in dims if c != mprob_name][0]\n key = locals().get(var, None)\n mprobs = mprobs[mprob_name]\n if key is not None:\n mprobs = mprobs.get(str(key), mprobs.get(key))\n mprobs = {mprob_name: mprobs}\n\n # these can fall below the minimum allowed value due to\n # rounding errors, so I adjust these\n for k, value in mprobs.items():\n value.array = adjusted_gt_minprob(value.array, minprob=1e-6)\n\n if len(mprobs) == 1:\n mprobs = mprobs[mprob_name]\n\n return mprobs\n\n def get_bin_prior_probs(self, locus=None):\n bin_probs_array = self.get_param_value(\"bprobs\", locus=locus)\n return DictArrayTemplate(self.bin_names).wrap(bin_probs_array)\n\n def get_scaled_lengths(self, predicate, bin=None, locus=None):\n \"\"\"A dictionary of {scale:{edge:length}}\"\"\"\n if not hasattr(self._model, \"get_scaled_lengths_from_Q\"):\n return {}\n\n get_value_of = self.get_param_value\n value_of_kw = dict(locus=locus)\n\n if bin is None:\n bin_names = self.bin_names\n else:\n bin_names = [bin]\n\n if len(bin_names) == 1:\n bprobs = [1.0]\n else:\n bprobs = get_value_of(\"bprobs\", **value_of_kw)\n\n mprobs = [get_value_of(\"mprobs\", bin=b, **value_of_kw) for b in bin_names]\n\n scaled_lengths = {}\n for edge in self._tree.get_edge_vector():\n if edge.isroot():\n continue\n Qs = [\n get_value_of(\"Qd\", bin=b, edge=edge.name, **value_of_kw).Q\n for b in bin_names\n ]\n length = get_value_of(\"length\", edge=edge.name, **value_of_kw)\n scaled_lengths[edge.name] = length * self._model.get_scale_from_Qs(\n Qs, bprobs, mprobs, predicate\n )\n return scaled_lengths\n\n def get_paralinear_metric(self, motif_probs=None):\n \"\"\"returns {edge.name: paralinear, ...}\n Parameters\n ----------\n motif_probs : dict or DictArray\n an item for each edge of the tree. Computed if not provided.\n \"\"\"\n from cogent3.evolve.ns_substitution_model import DiscreteSubstitutionModel\n\n is_discrete = isinstance(self.model, DiscreteSubstitutionModel)\n\n if motif_probs is None:\n motif_probs = self.get_motif_probs_by_node()\n plin = {}\n for edge in self.tree.get_edge_vector(include_root=False):\n parent_name = edge.parent.name\n pi = motif_probs[parent_name]\n P = self.get_psub_for_edge(edge.name)\n if is_discrete:\n para = paralinear_discrete_time(P.array, pi.array)\n else:\n Q = self.get_rate_matrix_for_edge(edge.name, calibrated=False)\n para = paralinear_continuous_time(P.array, pi.array, Q.array)\n\n plin[edge.name] = para\n\n return plin\n\n def get_lengths_as_ens(self, motif_probs=None):\n \"\"\"returns {edge.name: ens, ...} where ens is the expected number of substitutions\n\n for a stationary Markov process, this is just branch length\n Parameters\n ----------\n motif_probs : dict or DictArray\n an item for each edge of the tree. Computed if not provided.\n \"\"\"\n if motif_probs is None:\n motif_probs = self.get_motif_probs_by_node()\n node_names = self.tree.get_node_names()\n node_names.remove(\"root\")\n lengths = {e: self.get_param_value(\"length\", edge=e) for e in node_names}\n if not isinstance(self.model, substitution_model.Stationary):\n ens = {}\n for e in node_names:\n Q = self.get_rate_matrix_for_edge(e)\n length = expected_number_subs(motif_probs[e], Q, lengths[e])\n ens[e] = length\n\n lengths = ens\n\n return lengths\n\n def get_param_rules(self):\n \"\"\"returns the [{rule}, ..] that would allow reconstruction\"\"\"\n # markov model rate terms\n rules = []\n param_names = self.get_param_names()\n for param_name in param_names:\n defn = self.defn_for[param_name]\n try:\n rules.extend(defn.get_param_rules())\n except AttributeError:\n # aggregate params, like those deriving from gamma shaped rates\n pass\n\n return rules\n\n def get_statistics(self, with_motif_probs=True, with_titles=True):\n \"\"\"returns the parameter values as tables/dict\n\n Parameters\n ----------\n with_motif_probs\n include the motif probability table\n with_titles\n include a title for each table based on it's\n dimension\n\n \"\"\"\n result = []\n group = {}\n param_names = self.get_param_names()\n\n mprob_name = [n for n in param_names if \"mprob\" in n]\n if mprob_name:\n mprob_name = mprob_name[0]\n else:\n mprob_name = \"\"\n\n if not with_motif_probs:\n param_names.remove(mprob_name)\n\n for param in param_names:\n dims = tuple(self.get_used_dimensions(param))\n if dims not in group:\n group[dims] = []\n group[dims].append(param)\n table_order = list(group.keys())\n table_order.sort()\n for table_dims in table_order:\n raw_table = self.get_param_value_dict(\n dimensions=table_dims, params=group[table_dims]\n )\n param_names = group[table_dims]\n param_names.sort()\n if table_dims == (\"edge\",):\n if \"length\" in param_names:\n param_names.remove(\"length\")\n param_names.insert(0, \"length\")\n raw_table[\"parent\"] = dict(\n [\n (e.name, e.parent.name)\n for e in self._tree.get_edge_vector()\n if not e.isroot()\n ]\n )\n param_names.insert(0, \"parent\")\n list_table = []\n heading_names = list(table_dims) + param_names\n row_order = self._valuesForDimensions(table_dims)\n for scope in row_order:\n row = {}\n row_used = False\n for param in param_names:\n d = raw_table[param]\n try:\n for part in scope:\n d = d[part]\n except KeyError:\n d = \"NA\"\n else:\n row_used = True\n row[param] = d\n if row_used:\n row.update(dict(list(zip(table_dims, scope))))\n row = [row[k] for k in heading_names]\n list_table.append(row)\n if table_dims:\n title = [\"\", \"%s params\" % \" \".join(table_dims)][with_titles]\n else:\n title = [\"\", \"global params\"][with_titles]\n row_ids = None\n stat_table = table.Table(\n heading_names,\n list_table,\n max_width=80,\n index=row_ids,\n title=title,\n **self._format,\n )\n if group[table_dims] == [mprob_name]:\n # if stat_table.shape\n # if mprobs, we use the motifs as header\n motifs = list(sorted(set(stat_table.tolist(\"motif\"))))\n if stat_table.shape[1] == 2:\n motif_prob = dict(stat_table.tolist())\n heading_names = motifs\n list_table = [motif_prob[m] for m in motifs]\n list_table = [list_table]\n elif stat_table.shape[1] == 3:\n rows = []\n other_col = [\n c\n for c in stat_table.header\n if \"motif\" not in c and \"mprobs\" not in c\n ][0]\n for val in stat_table.distinct_values(other_col):\n subtable = stat_table.filtered(\n lambda x: x == val, columns=other_col\n )\n motif_prob = dict(\n subtable.tolist(\n [c for c in stat_table.header if c != other_col]\n )\n )\n rows.append([val] + [motif_prob[m] for m in motifs])\n heading_names = [other_col] + motifs\n list_table = rows\n stat_table = table.Table(\n heading_names, list_table, max_width=80, title=title, **self._format\n )\n\n result.append(stat_table)\n return result\n\n def to_rich_dict(self):\n \"\"\"returns detailed info on object, used by to_json\"\"\"\n data = self._serialisable.copy()\n for key in (\"model\", \"tree\"):\n del data[key]\n\n tree = self.tree.to_rich_dict()\n edge_attr = tree[\"edge_attributes\"]\n for edge in edge_attr:\n if edge == \"root\":\n continue\n try:\n edge_attr[edge][\"length\"] = self.get_param_value(\"length\", edge=edge)\n except KeyError:\n # probably discrete-time model\n edge_attr[edge][\"length\"] = None\n\n model = self._model.to_rich_dict(for_pickle=False)\n alignment = self.get_param_value(\"alignment\").to_rich_dict()\n mprobs = self.get_motif_probs().to_dict()\n DLC = self.all_psubs_DLC()\n try:\n unique_Q = self.all_rate_matrices_unique()\n except Exception:\n # there's a mix of assertions\n # for \"storage\", make this indeterminate in those cases\n unique_Q = None\n\n data = dict(\n model=model,\n tree=tree,\n alignment=alignment,\n likelihood_construction=data,\n param_rules=self.get_param_rules(),\n lnL=self.get_log_likelihood(),\n nfp=self.get_num_free_params(),\n motif_probs=mprobs,\n DLC=DLC,\n unique_Q=unique_Q,\n type=get_object_provenance(self),\n name=self.get_name(),\n version=__version__,\n )\n return data\n\n def to_json(self):\n data = self.to_rich_dict()\n data = json.dumps(data)\n return data\n\n # For tests. Compat with old LF interface\n def set_name(self, name):\n self._name = name\n\n def get_name(self):\n return self._name or \"unnamed\"\n\n def set_tables_format(self, space=4, digits=4):\n \"\"\"sets display properties for statistics tables. This affects results\n of str(lf) too.\"\"\"\n space = [space, 4][type(space) != int]\n digits = [digits, 4][type(digits) != int]\n self._format = dict(space=space, digits=digits)\n\n def _get_motif_probs_by_node_tr(self, edges=None, bin=None, locus=None):\n \"\"\"returns motif probs by node for time-reversible models\"\"\"\n mprob_rules = [r for r in self.get_param_rules() if \"mprob\" in r[\"par_name\"]]\n if len(mprob_rules) > 1 or self.model.mprob_model == \"monomers\":\n raise NotImplementedError\n\n mprobs = self.get_motif_probs()\n if len(mprobs) != len(self.motifs):\n # a Muse and Gaut model\n expanded = numpy.zeros(len(self.motifs), dtype=float)\n for i, motif in enumerate(self.motifs):\n val = 1.0\n for b in motif:\n val *= mprobs[b]\n expanded[i] = val\n mprobs = expanded / expanded.sum()\n else:\n mprobs = [mprobs[m] for m in self.motifs]\n edges = []\n values = []\n for e in self.tree.postorder():\n edges.append(e.name)\n values.append(mprobs)\n\n return DictArrayTemplate(edges, self.motifs).wrap(values)\n\n def get_motif_probs_by_node(self, edges=None, bin=None, locus=None):\n from cogent3.evolve.substitution_model import TimeReversible\n\n if isinstance(self.model, TimeReversible):\n return self._get_motif_probs_by_node_tr(edges=edges, bin=bin, locus=locus)\n\n kw = dict(bin=bin, locus=locus)\n mprobs = self.get_param_value(\"mprobs\", **kw)\n mprobs = self._model.calc_word_probs(mprobs)\n result = self._nodeMotifProbs(self._tree, mprobs, kw)\n if edges is None:\n edges = [name for (name, m) in result]\n result = dict(result)\n values = [result[name] for name in edges]\n return DictArrayTemplate(edges, self._mprob_motifs).wrap(values)\n\n def _nodeMotifProbs(self, tree, mprobs, kw):\n result = [(tree.name, mprobs)]\n for child in tree.children:\n psub = self.get_psub_for_edge(child.name, **kw)\n child_mprobs = numpy.dot(mprobs, psub)\n result.extend(self._nodeMotifProbs(child, child_mprobs, kw))\n return result\n\n def simulate_alignment(\n self,\n sequence_length=None,\n random_series=None,\n exclude_internal=True,\n locus=None,\n seed=None,\n root_sequence=None,\n ):\n \"\"\"\n Returns an alignment of simulated sequences with key's corresponding to\n names from the current attached alignment.\n\n Parameters\n ----------\n sequence_length\n the legnth of the alignment to be simulated,\n default is the length of the attached alignment.\n random_series\n a random number generator.\n exclude_internal\n if True, only sequences for tips are returned.\n root_sequence\n a sequence from which all others evolve.\n\n \"\"\"\n\n if sequence_length is None:\n lht = self.get_param_value(\"lht\", locus=locus)\n sequence_length = len(lht.index)\n leaves = self.get_param_value(\"leaf_likelihoods\", locus=locus)\n orig_ambig = {}\n for (seq_name, leaf) in list(leaves.items()):\n orig_ambig[seq_name] = leaf.get_ambiguous_positions()\n else:\n orig_ambig = {}\n\n if random_series is None:\n random_series = random.Random()\n random_series.seed(seed)\n\n def psub_for(edge, bin):\n return self.get_psub_for_edge(edge, bin=bin, locus=locus)\n\n if len(self.bin_names) > 1:\n hmm = self.get_param_value(\"bdist\", locus=locus)\n site_bins = hmm.emit(sequence_length, random_series)\n else:\n site_bins = numpy.zeros([sequence_length], int)\n\n evolver = AlignmentEvolver(\n random_series,\n orig_ambig,\n exclude_internal,\n self.bin_names,\n site_bins,\n psub_for,\n self._motifs,\n )\n\n if root_sequence is not None: # we convert to a vector of motifs\n if isinstance(root_sequence, str):\n root_sequence = self._model.moltype.make_seq(root_sequence)\n motif_len = self._model.get_alphabet().get_motif_len()\n root_sequence = root_sequence.get_in_motif_size(motif_len)\n else:\n mprobs = self.get_param_value(\"mprobs\", locus=locus, edge=\"root\")\n mprobs = self._model.calc_word_probs(mprobs)\n mprobs = dict((m, p) for (m, p) in zip(self._motifs, mprobs))\n root_sequence = random_sequence(random_series, mprobs, sequence_length)\n\n simulated_sequences = evolver(self._tree, root_sequence)\n\n return ArrayAlignment(data=simulated_sequences, moltype=self._model.moltype)\n\n def all_psubs_DLC(self):\n \"\"\"Returns True if every Psub matrix is Diagonal Largest in Column\"\"\"\n all_psubs = self.get_all_psubs()\n for P in all_psubs.values():\n if (P.to_array().diagonal() < P).any():\n return False\n return True\n\n def all_rate_matrices_unique(self):\n \"\"\"Returns True if every rate matrix is unique for its Psub matrix\"\"\"\n # get all possible Q, as products of t, and any rate-het terms\n all_Q = self.get_all_rate_matrices(calibrated=False)\n for Q in all_Q.values():\n Q = Q.to_array()\n if not is_generator_unique(Q):\n return False\n return True\n\n def initialise_from_nested(self, nested_lf):\n from cogent3.evolve.substitution_model import TimeReversible\n\n assert (\n self.get_num_free_params() > nested_lf.get_num_free_params()\n ), \"wrong order for likelihood functions\"\n compatible_likelihood_functions(self, nested_lf)\n\n same = (\n isinstance(self.model, TimeReversible)\n and isinstance(nested_lf.model, TimeReversible)\n ) or (\n not isinstance(self.model, TimeReversible)\n and not isinstance(nested_lf.model, TimeReversible)\n )\n\n mprobs = nested_lf.get_motif_probs()\n edge_names = self.tree.get_node_names()\n edge_names.remove(\"root\")\n param_proj = _ParamProjection(nested_lf.model, self.model, mprobs, same=same)\n param_rules = nested_lf.get_param_rules()\n param_rules = param_proj.update_param_rules(param_rules)\n my_rules = self.get_param_rules()\n my_rules = update_scoped_rules(my_rules, param_rules)\n self.apply_param_rules(my_rules)\n return\n", "import os\nimport pathlib\n\nimport numpy\n\nfrom cogent3.util.misc import extend_docstring_from\nfrom cogent3.util.union_dict import UnionDict\nfrom cogent3.util.warning import deprecated\n\n\n__author__ = \"Rahul Ghangas and Gavin Huttley\"\n__copyright__ = \"Copyright 2007-2020, The Cogent Project\"\n__credits__ = [\"Rahul Ghangas\", \"Gavin Huttley\"]\n__license__ = \"BSD-3\"\n__version__ = \"2020.2.7a\"\n__maintainer__ = \"Gavin Huttley\"\n__email__ = \"[email protected]\"\n__status__ = \"Alpha\"\n\n# user specified environment variable for plotly renderer\nPLOTLY_RENDERER = os.environ.get(\"PLOTLY_RENDERER\", None)\n\n\ndef get_domain(total, element, is_y, space=0.01):\n \"\"\"returns evenly spaced domain for an element in a grid plot\n\n Parameters\n ----------\n total : int\n the total number of elements on the axis\n element : int\n the element number to compute the domain for\n is_y : bool\n if True, this is for a y-coordinate domain. This is reversed\n so the result is in cartesian, not array, coordinates\n space : float\n the separation between elements\n \"\"\"\n if total == 1:\n return [0, 1]\n\n if element > total - 1:\n raise ValueError(f\"{element} index too big for {total}\")\n\n per_element = 1 / total\n space = min(space / 2, per_element / 10)\n bounds = [per_element * i for i in range(total + 1)]\n domains = [\n (bounds[k] + space, bounds[k + 1] - space) for k in range(len(bounds) - 1)\n ]\n if is_y:\n element = total - element - 1\n\n return domains[element]\n\n\ndef _customise_sphinx_gallery_renderer():\n # this is an ugly hack to get around plotly's NOT robust handling of script path\n # for automated file naming\n import inspect\n from plotly.io._renderers import renderers\n from plotly.io import _base_renderers as base_render\n\n class SphinxGalleryRenderer(base_render.ExternalRenderer):\n def render(self, fig_dict):\n # use the environment variable\n # DOCUTILSCONFIG to get the location of the sphinx root doc dir\n # and select the stack filename whose path is a sibling directory\n # based on the maxinum number of matches to the root path\n sphinx_root = pathlib.Path(os.environ.get(\"DOCUTILSCONFIG\", \"\")).absolute()\n sphinx_root = sphinx_root.resolve()\n stack = inspect.stack()\n max_match = 0\n for level in stack:\n # parent directory\n path = pathlib.Path(level.filename).absolute().resolve()\n for i, (a, b) in enumerate(zip(path.parts, sphinx_root.parts)):\n if a != b:\n break\n\n if i > max_match:\n max_match = i\n filename = str(path)\n\n filename_root, _ = os.path.splitext(filename)\n filename_html = filename_root + \".html\"\n filename_png = filename_root + \".png\"\n figure = base_render.return_figure_from_figure_or_data(fig_dict, True)\n _ = base_render.write_html(fig_dict, file=filename_html)\n base_render.write_image(figure, filename_png)\n\n renderers[\"sphinx_gallery\"] = SphinxGalleryRenderer()\n\n\ndef _show_(cls, renderer=None, **kwargs):\n \"\"\"display figure\n\n Parameters\n ----------\n renderer : str\n names of renderers that control ability for display. If not specified,\n looks for PLOTLY_RENDERER environment variable, otherwise defaults to\n 'notebook_connected+plotly_mimetype'. This setting supports display in\n JupyterLab and Jupyter Notebook, while keeping notebook size small (relies\n on internet connection for getting the javascript). See\n help(plotly.io.renderer) for more options.\n kwargs\n other arguments for plotly.io.show\n \"\"\"\n from plotly.io import show\n\n if renderer is None and PLOTLY_RENDERER is None:\n renderer = \"notebook_connected+plotly_mimetype\"\n elif renderer is None:\n renderer = PLOTLY_RENDERER\n\n if renderer == \"sphinx_gallery\":\n _customise_sphinx_gallery_renderer()\n\n kwargs[\"renderer\"] = renderer\n drawable = getattr(cls, \"drawable\", None) or cls\n fig = getattr(drawable, \"figure\", None)\n if fig is None:\n raise TypeError(f\"{cls} does not have a drawable or figure attribute\")\n\n width = kwargs.get(\"width\", fig.layout.width)\n height = kwargs.get(\"height\", fig.layout.height)\n kwargs[\"width\"] = fig.layout.width = width\n kwargs[\"height\"] = fig.layout.height = height\n show(fig, **kwargs)\n\n\ndef _iplot_(cls, width=None, height=None):\n from plotly.offline import iplot as _iplot\n\n layout = {}\n if width:\n layout[\"width\"] = width\n if height:\n layout[\"height\"] = height\n if layout:\n cls.drawable.layout |= dict(layout)\n _iplot(cls.drawable.figure)\n\n\ndef bind_drawable(obj, drawable):\n \"\"\"binds drawable\"\"\"\n from types import MethodType\n\n obj.drawable = drawable\n obj.iplot = MethodType(_iplot_, obj)\n obj.show = MethodType(_show_, obj)\n return obj\n\n\nclass Drawable:\n \"\"\"container object for Plotly figures\"\"\"\n\n def __init__(\n self,\n title=None,\n traces=None,\n width=None,\n height=None,\n showlegend=True,\n visible_axes=True,\n layout=None,\n xtitle=None,\n ytitle=None,\n ):\n if traces is None:\n self._traces = []\n else:\n try:\n self._traces = [UnionDict(trace) for trace in traces]\n except ValueError as msg:\n raise TypeError(f\"expected a series of dicts, got {traces}\")\n title = title if title is None else dict(text=title)\n self._default_layout = UnionDict(\n font=dict(family=\"Balto\", size=14),\n autosize=False,\n hovermode=\"closest\",\n template=None,\n plot_bgcolor=None,\n margin=dict(l=50, r=50, t=50, b=50, pad=4),\n xaxis=dict(visible=visible_axes),\n yaxis=dict(visible=visible_axes),\n title=title,\n width=width,\n height=height,\n showlegend=showlegend,\n )\n layout = layout or {}\n self.layout = UnionDict(self._default_layout)\n self.layout |= layout\n # constructor layout value over-rides\n overrides = UnionDict(\n title=title,\n width=width,\n height=height,\n showlegend=showlegend,\n xaxis=dict(visible=visible_axes),\n yaxis=dict(visible=visible_axes),\n )\n self.layout |= overrides\n self.xtitle = xtitle\n self.ytitle = ytitle\n self.title = title\n\n def _repr_html_(self):\n self.show()\n\n @property\n def traces(self):\n return self._traces\n\n def add_trace(self, trace):\n self.traces.append(UnionDict(trace))\n\n def bound_to(self, obj):\n \"\"\"returns obj with self bound to it\"\"\"\n return bind_drawable(obj, self)\n\n @property\n def figure(self):\n if not self.traces and hasattr(self, \"_build_fig\"):\n self._build_fig()\n\n traces = self.traces if self.traces else [{}]\n\n if self.xtitle:\n xtitle = self.xtitle\n else:\n xtitle = self.layout.xaxis.get(\"title\", None)\n\n if self.ytitle:\n ytitle = self.ytitle\n else:\n ytitle = self.layout.yaxis.get(\"title\", None)\n\n self.layout.xaxis.title = xtitle\n self.layout.yaxis.title = ytitle\n return UnionDict(data=traces, layout=self.layout)\n\n def iplot(self, *args, **kwargs):\n from plotly.offline import iplot as _iplot\n\n deprecated(\"method\", \"iplot\", \"show\", \"2020.6\")\n _iplot(self.figure, *args, **kwargs)\n\n @extend_docstring_from(_show_)\n def show(self, renderer=None, **kwargs):\n _show_(self, renderer, **kwargs)\n\n def write(self, path, **kwargs):\n \"\"\"writes static image file, suffix dictates format\"\"\"\n from plotly.io import write_image\n\n fig = self.figure\n kwargs[\"width\"] = kwargs.get(\"width\", fig.layout.width)\n kwargs[\"height\"] = kwargs.get(\"height\", fig.layout.height)\n\n write_image(fig, path, **kwargs)\n\n def to_image(self, format=\"png\", **kwargs):\n \"\"\"creates static image, suffix dictates format\"\"\"\n from plotly.io import to_image\n\n fig = self.figure\n kwargs[\"width\"] = kwargs.get(\"width\", fig.layout.width)\n kwargs[\"height\"] = kwargs.get(\"height\", fig.layout.height)\n\n return to_image(fig, format=format, **kwargs)\n\n @property\n def fig_width(self):\n \"\"\"figure width, also settable via .layout.width\"\"\"\n return self.layout.width\n\n @fig_width.setter\n def fig_width(self, width):\n self.layout.width = width\n\n @property\n def fig_height(self):\n \"\"\"figure height, also settable via .layout.height\"\"\"\n return self.layout.height\n\n @fig_height.setter\n def fig_height(self, height):\n self.layout.height = height\n\n\n_ticks_off = (\n (\"showticklabels\", False),\n (\"mirror\", True),\n (\"showgrid\", False),\n (\"showline\", True),\n (\"ticks\", \"\"),\n)\n\n_ticks_on = (\n (\"showticklabels\", True),\n (\"mirror\", True),\n (\"showgrid\", False),\n (\"showline\", True),\n)\n\n\nclass AnnotatedDrawable(Drawable):\n \"\"\"supports drawing with left and bottom tracks of annotations\"\"\"\n\n def __init__(\n self,\n core,\n left_track=None,\n bottom_track=None,\n xtitle=None,\n ytitle=None,\n title=None,\n xrange=None,\n yrange=None,\n width=500,\n height=500,\n layout=None,\n ):\n super(AnnotatedDrawable, self).__init__(\n visible_axes=True,\n showlegend=True,\n width=width,\n height=height,\n layout=layout,\n xtitle=xtitle,\n ytitle=ytitle,\n )\n self.yrange = yrange\n self.xrange = xrange\n self._overlaying = False\n\n core.title = title or core.title\n self.core = core\n self.left_track = left_track\n self.bottom_track = bottom_track\n\n def _build_fig(self, xaxis=\"x\", yaxis=\"y\"):\n f = self.core.figure\n try:\n if self.layout.yaxis2.overlaying != \"free\":\n self._overlaying = True\n except AttributeError:\n pass\n\n traces = f.data\n self.layout |= dict(f.layout)\n for trace in traces:\n trace.xaxis = xaxis\n if self._overlaying and \"yaxis\" in trace:\n trace.yaxis = \"y3\"\n else:\n trace.yaxis = yaxis\n\n self._traces = traces\n ticks_on = dict(_ticks_on)\n f.layout.xaxis.title = self.xtitle\n f.layout.yaxis.title = self.ytitle\n f.layout.xaxis |= ticks_on\n f.layout.yaxis |= ticks_on\n return f\n\n def _build_2x2_fig(self):\n if not self.traces:\n _ = self._build_fig(xaxis=\"x2\", yaxis=\"y2\")\n\n layout = UnionDict(\n {\n \"xaxis\": {\"anchor\": \"y\", \"domain\": [0.0, 0.099]},\n \"xaxis2\": {\"anchor\": \"y2\", \"domain\": [0.109, 1.0]},\n \"xaxis3\": {\"anchor\": \"y3\", \"domain\": [0.109, 1.0]},\n \"yaxis\": {\"anchor\": \"x\", \"domain\": [0.109, 1.0]},\n \"yaxis2\": {\"anchor\": \"x2\", \"domain\": [0.109, 1.0]},\n \"yaxis3\": {\"anchor\": \"x3\", \"domain\": [0.0, 0.099]},\n }\n )\n layout |= self.layout\n fig = UnionDict(data=[], layout=layout)\n\n # common settings\n ticks_off_kwargs = dict(_ticks_off)\n ticks_on_kwargs = dict(_ticks_on)\n\n # core traces and layout\n fig.data.extend(self.traces)\n\n fig.layout.xaxis2 |= dict(range=self.xrange, **ticks_off_kwargs)\n fig.layout.yaxis2 |= dict(range=self.yrange, **ticks_off_kwargs)\n\n # left_track traces\n seen_types = set()\n max_x = 0\n traces = []\n for trace in self.left_track.traces:\n traces.append(trace)\n # convert to numpy array to handle None's\n x = numpy.array(trace.x, dtype=float)\n indices = numpy.logical_not(numpy.isnan(x))\n max_x = max(x[indices].max(), max_x)\n if trace.legendgroup in seen_types:\n trace.showlegend = False\n seen_types.add(trace.legendgroup)\n\n left_range = [0, int(max_x) + 1]\n\n # bottom_track traces\n max_y = 0\n for trace in self.bottom_track.traces:\n trace.xaxis = \"x3\"\n trace.yaxis = \"y3\"\n traces.append(trace)\n # convert to numpy array to handle None's\n y = numpy.array(trace.y, dtype=float)\n indices = numpy.logical_not(numpy.isnan(y))\n max_y = max(y[indices].max(), max_y)\n if trace.legendgroup in seen_types:\n trace.showlegend = False\n seen_types.add(trace.legendgroup)\n\n bottom_range = [0, int(max_y) + 1]\n\n # add all traces\n fig.data.extend(traces)\n # configure axes for titles, limits, border and ticks\n fig.layout.yaxis |= dict(\n title=dict(text=self.ytitle), range=self.yrange, **ticks_on_kwargs\n )\n\n fig.layout.xaxis3 |= dict(\n title=dict(text=self.xtitle), range=self.xrange, **ticks_on_kwargs\n )\n\n # adjust row width of left plot for number of feature tracks\n min_range = min(left_range[1], bottom_range[1])\n left_prop = left_range[1] / min_range\n\n # first the top row\n xaxis_domain = list(layout.xaxis.domain)\n xaxis_domain[1] = left_prop * xaxis_domain[1]\n fig.layout.xaxis |= dict(\n title=None, range=left_range, domain=xaxis_domain, **ticks_off_kwargs\n )\n fig.layout.xaxis |= dict(\n title={}, range=left_range, domain=xaxis_domain, **ticks_off_kwargs\n )\n\n space = 0.01\n fig.layout.xaxis2.domain = (xaxis_domain[1] + space, 1.0)\n fig.layout.xaxis3.domain = (xaxis_domain[1] + space, 1.0)\n\n # now the right column\n bottom_prop = bottom_range[1] / min_range\n yaxis_domain = list(layout.yaxis3.domain)\n yaxis_domain[1] = bottom_prop * yaxis_domain[1]\n fig.layout.yaxis3 |= dict(\n title={}, range=bottom_range, domain=yaxis_domain, **ticks_off_kwargs\n )\n\n # and bottom of the boxes above\n fig.layout.yaxis.domain = (yaxis_domain[1] + space, 1.0)\n fig.layout.yaxis2.domain = (yaxis_domain[1] + space, 1.0)\n\n return fig\n\n def _build_2x1_fig(self):\n \"\"\"2 rows, one column, dotplot and seq1 annotated\"\"\"\n if not self.traces:\n _ = self._build_fig()\n\n layout = UnionDict(\n xaxis={\"anchor\": \"y2\", \"domain\": [0.0, 1.0]},\n yaxis={\"anchor\": \"free\", \"domain\": [0.1135, 1.0], \"position\": 0.0},\n yaxis2={\"anchor\": \"x\", \"domain\": [0.0, 0.0985]},\n )\n if self._overlaying:\n self.layout.yaxis3 = self.layout.yaxis2\n self.layout.yaxis2 = {}\n self.layout.legend.x = 1.3\n layout |= dict(self.layout)\n fig = UnionDict(data=[], layout=layout)\n\n # common settings\n ticks_off_kwargs = dict(_ticks_off)\n ticks_on_kwargs = dict(_ticks_on)\n\n # core traces and layout\n fig.data.extend(self.traces)\n\n fig.layout.xaxis |= dict(\n title=dict(text=self.xtitle), range=self.xrange, **ticks_on_kwargs\n )\n fig.layout.yaxis |= dict(\n title=dict(text=self.ytitle), range=self.yrange, **ticks_on_kwargs\n )\n\n # bottom traces\n seen_types = set()\n max_y = 0\n traces = []\n for trace in self.bottom_track.traces:\n trace.yaxis = \"y2\"\n trace.xaxis = \"x\"\n traces.append(trace)\n y = numpy.array(trace.y, dtype=float)\n indices = numpy.logical_not(numpy.isnan(y))\n max_y = max(y[indices].max(), max_y)\n if trace.legendgroup in seen_types:\n trace.showlegend = False\n seen_types.add(trace.legendgroup)\n\n fig.data.extend(traces)\n fig.layout.yaxis2 |= dict(\n title={}, range=[0, int(max_y) + 1], **ticks_off_kwargs\n )\n return fig\n\n def _build_1x2_fig(self):\n if not self.traces:\n self._build_fig(xaxis=\"x2\")\n layout = UnionDict(\n xaxis={\"anchor\": \"y\", \"domain\": [0.0, 0.099]},\n xaxis2={\"anchor\": \"free\", \"domain\": [0.109, 1.0], \"position\": 0.0},\n yaxis={\"anchor\": \"x\", \"domain\": [0.0, 1.0]},\n )\n\n layout |= self.layout\n fig = UnionDict(data=[], layout=layout)\n\n # common settings\n ticks_off_kwargs = dict(_ticks_off)\n ticks_on_kwargs = dict(_ticks_on)\n\n # core traces and layout\n fig.data.extend(self.traces)\n\n fig.layout.xaxis2 |= dict(\n title=self.xtitle, range=self.xrange, **ticks_on_kwargs\n )\n fig.layout.yaxis |= dict(\n title=self.ytitle, range=self.yrange, **ticks_on_kwargs\n )\n\n # left track\n seen_types = set()\n max_x = 0\n traces = []\n for trace in self.left_track.traces:\n trace.yaxis = \"y\"\n traces.append(trace)\n x = numpy.array(trace.x, dtype=float)\n indices = numpy.logical_not(numpy.isnan(x))\n max_x = max(x[indices].max(), max_x)\n if trace.legendgroup in seen_types:\n trace.showlegend = False\n seen_types.add(trace.legendgroup)\n\n fig.data.extend(traces)\n fig.layout.xaxis |= dict(\n title=None, range=[0, int(max_x) + 1], **ticks_off_kwargs\n )\n return fig\n\n @property\n def figure(self):\n if self.bottom_track and self.left_track:\n func = self._build_2x2_fig\n elif self.bottom_track:\n func = self._build_2x1_fig\n elif self.left_track:\n func = self._build_1x2_fig\n else:\n func = self._build_fig\n\n result = func()\n\n return result\n\n def remove_track(self, left_track=False, bottom_track=False):\n \"\"\"\n Parameters\n ----------\n left_track : bool\n the left track is removed\n bottom_track : bool\n the bottom track is removed\n \"\"\"\n if left_track:\n self.left_track = None\n\n if bottom_track:\n self.bottom_track = None\n\n if left_track or bottom_track:\n self.core._traces = []\n self._traces = []\n\n\nclass Shape:\n _mode = \"lines\"\n\n def __init__(\n self,\n name=None,\n text=None,\n filled=True,\n legendgroup=None,\n showlegend=True,\n hoverinfo=None,\n fillcolor=None,\n ):\n self.filled = filled\n self.fillcolor = fillcolor\n self._legendgroup = legendgroup\n self._showlegend = showlegend\n self.name = name\n self.text = text\n self._hoverinfo = hoverinfo or name\n\n def shift(self, x=0, y=0):\n if not isinstance(self.x, numpy.ndarray):\n self.x += x\n self.y += y\n else:\n self.x[self.x != None] += x\n self.y[self.y != None] += y\n\n return self\n\n @property\n def height(self):\n return self.top - self.bottom\n\n @property\n def top(self):\n if not isinstance(self.y, numpy.ndarray):\n return numpy.max(self.y)\n else:\n return numpy.max(self.y[self.y != None])\n\n @property\n def bottom(self):\n if not isinstance(self.y, numpy.ndarray):\n return numpy.min(self.y)\n else:\n return numpy.min(self.y[self.y != None])\n\n @property\n def middle(self):\n return self.height / 2 + self.bottom\n\n @property\n def T(self):\n self.x, self.y = self.y, self.x\n return self\n\n def as_trace(self, name=None):\n \"\"\"returns component for plotly display\"\"\"\n name = name or self.name\n data = UnionDict(\n type=\"scatter\",\n x=self.x,\n y=self.y,\n mode=self._mode,\n fill=\"toself\",\n fillcolor=self.fillcolor,\n line=dict(color=self.fillcolor),\n text=self.text,\n name=name,\n legendgroup=self._legendgroup,\n showlegend=self._showlegend,\n hoverinfo=\"text\",\n )\n return data\n\n\nclass Rectangle(Shape):\n def __init__(self, coords, y=0, height=0.25, **kwargs):\n super(Rectangle, self).__init__(**kwargs)\n width = abs(coords[0][0] - coords[0][1])\n x_coord = min(coords[0][0], coords[0][1])\n xs = [x_coord, x_coord, x_coord + width, x_coord + width, x_coord]\n ys = [y, y + height, y + height, y, y]\n for i in range(1, len(coords)):\n # Add coordinates for connecting line segment\n xs += [None, coords[i - 1][1], coords[i][0], None]\n ys += [None, y + height / 2, y + height / 2, None]\n # Add coordinates for individual rectangle\n width = abs(coords[i][0] - coords[i][1])\n x_coord = min(coords[i][0], coords[i][1])\n xs += [x_coord, x_coord, x_coord + width, x_coord + width, x_coord]\n ys += [y, y + height, y + height, y, y]\n self.x = numpy.array(xs)\n self.y = numpy.array(ys)\n\n\nclass Diamond(Shape):\n def __init__(self, coords, y=0, height=0.25, **kwargs):\n super(Diamond, self).__init__(**kwargs)\n width = abs(coords[0][0] - coords[0][1])\n x_coord = min(coords[0][0], coords[0][1])\n hh = height / 2\n xs = [\n x_coord,\n x_coord + width / 2,\n x_coord + width,\n x_coord + width / 2,\n x_coord,\n ]\n ys = [y, y + hh, y, y - hh, y]\n for i in range(1, len(coords)):\n # Add coordinates for connecting line segment\n xs += [None, coords[i - 1][1], coords[i][0], None]\n ys += [None, y, y, None]\n # Add coordinates for individual diamond\n width = abs(coords[i][0] - coords[i][1])\n x_coord = min(coords[i][0], coords[i][1])\n xs += [\n x_coord,\n x_coord + width / 2,\n x_coord + width,\n x_coord + width / 2,\n x_coord,\n ]\n ys += [y, y + hh, y, y - hh, y]\n self.x = numpy.array(xs)\n self.y = numpy.array(ys)\n\n\nclass Arrow(Shape):\n def __init__(\n self, coords, y=0, height=0.25, arrow_head_w=0.1, reverse=False, **kwargs\n ):\n super(Arrow, self).__init__(**kwargs)\n xs = []\n ys = []\n for i in range(len(coords) - 1):\n # Add coordinates for individual rectangle\n width = abs(coords[i][0] - coords[i][1])\n x_coord = min(coords[i][0], coords[i][1])\n xs += [x_coord, x_coord, x_coord + width, x_coord + width, x_coord]\n ys += [y, y + height, y + height, y, y]\n # Add coordinates for connecting line segment\n xs += [None, coords[i][1], coords[i + 1][0], None]\n ys += [None, y + height / 2, y + height / 2, None]\n\n width = abs(coords[-1][0] - coords[-1][1])\n x_coord = min(coords[-1][0], coords[-1][1])\n hh = height * arrow_head_w * 2\n hw = width * arrow_head_w * 2\n\n # Coordinates for arrow head\n arrow_x = [\n x_coord,\n x_coord + width - hw,\n x_coord + width - hw,\n x_coord + width,\n x_coord + width - hw,\n x_coord + width - hw,\n x_coord,\n x_coord,\n ]\n arrow_y = [\n y,\n y,\n y - hh,\n y + height / 2,\n y + height + hh,\n y + height,\n y + height,\n y,\n ]\n if not reverse:\n xs += arrow_x\n ys += arrow_y\n else:\n arrow_x = numpy.array(arrow_x)\n arrow_y = numpy.array(arrow_y)\n xs += list(numpy.flip(arrow_x.max() - arrow_x + arrow_x.min()))\n ys += list(numpy.flip(arrow_y))\n\n self.x = numpy.array(xs)\n self.y = numpy.array(ys)\n\n\n# https://plot.ly/python/marker-style/\n# https://plot.ly/python/reference/#scatter-marker-symbol\nclass Point(Shape):\n _mode = \"markers\"\n\n def __init__(self, x, y, size=14, symbol=\"square\", **kwargs):\n super(Point, self).__init__(**kwargs)\n self.x = numpy.array([x], dtype=\"O\")\n self.y = numpy.array([y], dtype=\"O\")\n self._size = size\n self._symbol = symbol\n\n\nclass _MakeShape:\n \"\"\"container class that builds annotation shapes\"\"\"\n\n _colors = dict(\n cds=\"rgba(0,0,150,0.5)\",\n exon=\"rgba(0,0,100,0.5)\",\n gene=\"rgba(0,0,150,0.5)\",\n transcript=\"rgba(0,0,200,0.5)\",\n snp=\"rgba(200,0,0,0.5)\",\n snv=\"rgba(200,0,0,0.5)\",\n )\n _shapes = dict(\n cds=Arrow,\n exon=Arrow,\n transcript=Arrow,\n gene=Arrow,\n repeat=Rectangle,\n snp=Point,\n snv=Point,\n variation=Diamond,\n )\n\n def __call__(self, type_=None, name=None, coords=None, **kwargs):\n from cogent3.core.annotation import _Annotatable\n\n if isinstance(type_, _Annotatable):\n if not type_.map.useful:\n return None\n\n name = type_.name\n coords = type_.map.get_coordinates()\n reverse = type_.map.get_covering_span().reverse\n type_ = type_.type\n else:\n if coords[0][0] > coords[-1][1]:\n reverse = True\n else:\n reverse = False\n if coords is None:\n raise Exception(\"No coordinates defined\")\n kwargs.update(dict(reverse=reverse))\n\n klass = self._shapes.get(type_.lower(), Rectangle)\n color = self._colors.get(type_.lower(), None)\n if klass != Arrow:\n kwargs.pop(\"reverse\", None)\n\n if klass != Point:\n result = klass(\n name=type_,\n text=name,\n legendgroup=type_,\n coords=coords,\n fillcolor=color,\n **kwargs,\n )\n else:\n result = Point(\n name=type_,\n text=name,\n legendgroup=type_,\n x=min(coords[0][0], coords[-1][1]),\n y=1,\n size=14,\n symbol=\"square\",\n fillcolor=color,\n **kwargs,\n )\n return result\n\n\nmake_shape = _MakeShape()\n" ]
[ [ "numpy.dot", "numpy.asarray", "numpy.zeros", "numpy.take" ], [ "numpy.max", "numpy.array", "numpy.isnan", "numpy.min", "numpy.flip" ] ]
josephtessmer/EMsoft
[ "97daa26978c42d5f569f4588a9991393c157d509" ]
[ "Source/pyEMsoft/EMsoft/pyEMsoftTools.py" ]
[ "# some simple tools to have when working with pyEMsoft module\nfrom EMsoft import pyEMsoft\nimport numpy as np\nimport h5py as h5\nimport os\nimport matplotlib.pyplot as plt\n\n\nclass Tools(object):\n \"\"\"\n Module Tools\n\n\n Some tools to help with pyEMsoft module\n\n \"\"\"\n @staticmethod\n def get_character_array(ASCII_array):\n \"\"\"\n CArray = get_character_array(ASCII_array)\n\n\n Convert numpy array (ASCII) data into character array\n\n Parameters\n ----------\n ASCII_array : float array\n\n Returns\n -------\n CArray : character array\n\n \"\"\"\n if len(ASCII_array.shape) == 3:\n ASCII_Array = ASCII_array.reshape(\n ASCII_array.shape[0], ASCII_array.shape[1]*ASCII_array.shape[2])\n else:\n ASCII_Array = ASCII_array\n CArray = np.chararray(\n (1, ASCII_Array.shape[1]), itemsize=ASCII_Array.shape[0], unicode=True)\n for j in range(ASCII_Array.shape[1]):\n tempCharacter = ''\n for i in range(ASCII_Array.shape[0]):\n tempCharacter += chr(ASCII_Array[i, j])\n CArray[0, j] = tempCharacter\n if len(ASCII_array.shape) == 3:\n CArray = CArray.reshape(ASCII_array.shape[1], ASCII_array.shape[2])\n return CArray\n\n @staticmethod\n def get_point_group(SGNUM):\n \"\"\"\n pgnum = get_point_group(SGNUM)\n\n\n Determine the point group a space group number\n\n Parameters\n ----------\n SGNUM : int\n\n Returns\n -------\n pgnum : int\n\n \"\"\"\n SGPG = pyEMsoft.typedefs.sgpg\n pgnum = 0\n for i in range(31, -1, -1):\n if SGPG[i] <= SGNUM:\n pgnum = i+1\n break\n return pgnum\n\n @staticmethod\n def get_space_string(chr):\n \"\"\"\n transspace_str=get_space_string(chr)\n\n\n Get the corresponding name of the space in strings\n\n Parameters\n ----------\n chr : str\n\n Returns\n -------\n transspace_str : str\n\n \"\"\"\n if chr == 'd':\n transspace_str = 'direct space'\n elif chr == 'r':\n transspace_str = 'reciprocal space'\n elif chr == 'c':\n transspace_str = 'standard cartesian reference frame'\n else:\n print('Undefined space')\n return transspace_str\n\n @staticmethod\n def get_crystal_system_name(crystal_system_number):\n \"\"\"\n crystal_system_name = get_crystal_system_name(crystal_system_number)\n\n\n Get the corresponding name of the crystal system\n\n Parameters\n ----------\n crystal_system_number : int\n\n Returns\n -------\n crystal_system_name : str\n\n \"\"\"\n\n if crystal_system_number == 1:\n crystal_system_name = 'Cubic'\n elif crystal_system_number == 2:\n crystal_system_name = 'Tetragonal'\n elif crystal_system_number == 3:\n crystal_system_name = 'Orthorhombic'\n elif crystal_system_number == 4:\n crystal_system_name = 'Hexagonal'\n elif crystal_system_number == 5:\n crystal_system_name = 'Trigonal'\n elif crystal_system_number == 6:\n crystal_system_name = 'Monoclinic'\n elif crystal_system_number == 7:\n crystal_system_name = 'Triclinic'\n elif crystal_system_number == 8:\n crystal_system_name = '2-D Quasi-Crystal'\n elif crystal_system_number == 9:\n crystal_system_name = '3-D Quasi-Crystal'\n else:\n print('Undefined crystal system')\n return crystal_system_name\n\n\nclass ExtractData:\n \"\"\"\n Module ExtractData\n\n Extract hdf5 data from crystal structure file or master EBSD file\n\n \"\"\"\n\n def __init__(self, master):\n if len(list(master.keys())) == 1:\n self.crystaldata = master['CrystalData']\n\n else:\n self.crystaldata = master['CrystalData']\n self.emdata = master['EMData']\n self.emheader = master['EMheader']\n self.nmlfiles = master['NMLfiles']\n self.nmlparameters = master['NMLparameters']\n self.crystaldata = master['CrystalData']\n\n def crystal_data(self):\n \"\"\"\n Crystal, AtomDict, Info = crystal_data()\n\n\n Extract everything about the crystal structure information\n\n\n Returns\n -------\n Crystal : dict\n AtomDict : dict\n Info : dict\n\n \"\"\"\n\n Elements = Tools.get_character_array(pyEMsoft.constants.atom_sym)\n AtomData = self.crystaldata['AtomData'][:]\n Atomtypes = self.crystaldata['Atomtypes'][:]\n CreationDate = self.crystaldata['CreationDate'][:]\n Creator = self.crystaldata['Creator'][:]\n CrystalSystem = self.crystaldata['CrystalSystem'][:]\n LatticeParameters = self.crystaldata['LatticeParameters'][:]\n Natomtypes = self.crystaldata['Natomtypes'][:]\n ProgramName = self.crystaldata['ProgramName'][:]\n Source = self.crystaldata['Source'][:]\n SpaceGroupSetting = self.crystaldata['SpaceGroupSetting'][:]\n SpaceGroupNumber = self.crystaldata['SpaceGroupNumber'][:]\n\n # the Crystal dictionary contains crystallographic information of the structure\n # get point group number\n pgnum = Tools.get_point_group(SpaceGroupNumber.tolist())\n # get space group symbol\n space_group_names = Tools.get_character_array(\n pyEMsoft.typedefs.sym_sgname)\n # a dictionary for lattice parameters and angles\n LatticeParameters = {'a': LatticeParameters.tolist()[0], 'b': LatticeParameters.tolist()[1], 'c': LatticeParameters.tolist()[2],\n 'alpha': LatticeParameters.tolist()[3], 'beta': LatticeParameters.tolist()[4], 'gamma': LatticeParameters.tolist()[5]}\n # nexted dictioanry for crystal information\n Crystal = {'Crystal System': Tools.get_crystal_system_name(CrystalSystem[0]), 'Lattice Parameters': LatticeParameters,\n 'Point Group': pgnum, 'Space Group Number': SpaceGroupNumber.tolist(), 'Space Group Name': space_group_names[0, SpaceGroupNumber[0]-1],\n 'Space Group Setting': SpaceGroupSetting.tolist()}\n # Atom information\n atomkeys = ['Element', 'x', 'y', 'z',\n 'Site occupation parameter', 'Debye-Waller factor']\n AtomDict = {}\n for i in range(Natomtypes[0]):\n Element_symbol = Elements[0, Atomtypes[i]-1]\n Atom = AtomData[0:5, i].tolist()\n Atom.insert(0, Element_symbol)\n AtomDict.update(\n {str('Atom %d' % (i+1)): dict(zip(atomkeys, Atom))})\n\n # other info\n Info = {'Creator': Creator[0].decode('utf-8'), 'Creation Date': CreationDate[0].decode('utf-8'),\n 'Program Name': ProgramName[0].decode('utf-8'), 'Source': Source[0].decode('utf-8')}\n return Crystal, AtomDict, Info\n\n def ebsd_master(self):\n \"\"\"\n master_info, master_pattern = ebsd_master()\n\n\n Extract 2D master patterns (stereographic projection and Lambert projection) as well as related information\n\n\n Returns\n -------\n master_info : dict\n master_pattern : float array\n\n \"\"\"\n ebsd_master = self.emdata['EBSDmaster']\n\n BetheParameters = ebsd_master['BetheParameters'][:]\n EkeVs = ebsd_master['EkeVs'][:]\n Z2percent = ebsd_master['Z2percent'][:]\n lastEnergy = ebsd_master['lastEnergy'][:]\n mLPNH = ebsd_master['mLPNH'][:]\n mLPSH = ebsd_master['mLPSH'][:]\n masterSPNH = ebsd_master['masterSPNH'][:]\n masterSPSH = ebsd_master['masterSPSH'][:]\n numEbins = ebsd_master['numEbins'][:]\n master_info = {'Bethe Parameters': BetheParameters.tolist(), 'Energy Bins (kVs)': EkeVs.tolist(),\n 'Z2percent': Z2percent.tolist(), 'lastEnergy': lastEnergy.tolist()}\n accum_e = np.sum(\n self.emdata['MCOpenCL/accum_e'], axis=(0, 1)).astype('float32')\n\n LPNH = mLPNH[0]\n LPSH = mLPSH[0]\n for i in range(1, mLPNH.shape[0]):\n LPNH += mLPNH[i]\n LPSH += mLPNH[i]\n\n accum_e = accum_e / np.sum(accum_e)\n LPNH = np.average(LPNH, axis=0, weights=accum_e)\n LPSH = np.average(LPSH, axis=0, weights=accum_e)\n SPNH = np.average(masterSPNH, axis=0, weights=accum_e)\n SPSH = np.average(masterSPSH, axis=0, weights=accum_e)\n\n master_pattern = {'SPNH': SPNH,\n 'SPSH': SPSH, 'LPNH': LPNH, 'LPSH': LPSH}\n return master_info, master_pattern\n\n\ndef loadOptimizationData(emdatapath, inputtype, EBSD, Opt):\n \"\"\" \n EBSD_Opt, quaternion =loadOptimizationData(emdatapath, inputtype, EBSD)\n\n Load global optimization output data with default output file name\n\n Parameters\n ----------\n emdatapath : str\n inputtype : str\n EBSD : class\n Opt : class\n\n Returns\n -------\n EBSD_Opt : class\n quaternion : float array\n Ftensor : float array\n \"\"\"\n # read the output data file\n Data = h5.File(os.path.join(emdatapath, EBSD.datafile))\n eulerangles = np.reshape(Data['/EMData/EBSD/eulerangles'][:], (3)) \n PC = np.reshape((Data['/EMData/EBSD/PC'][:]), (3))\n Ftensor=np.array([1,0,0,0,1,0,0,0,1],dtype=float)\n # read the optimized pc and Euler angles\n if EBSD.applydeformation=='n':\n Ftensor = np.reshape(Data['/EMData/EBSD/Ftensor'][:], (9))\n if Opt.hybrid=='y':\n eulerangles = np.reshape(Data['/EMData/EBSD/eulerangles_NMS'][:], (3)) \n PC = np.reshape((Data['/EMData/EBSD/PC_NMS'][:]), (3))\n Ftensor = np.reshape(Data['/EMData/EBSD/Ftensor_NMS'][:], (9))\n\n # optimization code output all orientation in TSL format\n # convert the Bruker and Oxford back by subtracting 90 degrees from phi1\n if inputtype == 'BrukerHDF':\n string = 'bruker'\n eulerangles[0] = eulerangles[0]-np.pi/2\n quaternion = pyEMsoft.rotations.eu2qu(eulerangles)\n elif inputtype == 'TSLHDF':\n string = 'tsl'\n quaternion = pyEMsoft.rotations.eu2qu(eulerangles)\n elif inputtype == 'OxfordHDF':\n string = 'hkl'\n eulerangles[0] = eulerangles[0]-np.pi/2\n quaternion = pyEMsoft.rotations.eu2qu(eulerangles)\n else:\n print('Undefined input type')\n # update the pattern center\n EBSD_Opt = PCtoEMsoftPC(string, PC, EBSD)\n return EBSD_Opt, quaternion, Ftensor\n\n\ndef EMEBSDGlobalOptimizationUpdate(string, Opt, EBSD, Pattern):\n \"\"\" \n loadOptimizationData(string, Opt, EBSD, Pattern)\n\n Update the template file for global optimization \n\n Parameters\n ----------\n string : str\n Opt : class\n EBSD : class\n Pattern : class\n\n \"\"\"\n # copy the template file\n os.system('EMEBSDGlobalOpt -t')\n # open input file\n fin = open(\"EMEBSDGlobalOpt.template\", \"rt\")\n # output file to write the result to\n fout = open(\"EMEBSDGlobalOpt.nml\", \"wt\")\n # HDFstrings that contains the patterns\n if Pattern.inputtype == 'EMEBSD':\n HDFstring = \"HDFstrings = 'EMData' 'EBSD' 'EBSDPatterns' '' '' '' '' '' '' ''\"\n elif Pattern.inputtype == 'BrukerHDF':\n HDFstring = \"HDFstrings = '\"+string + \\\n \"' 'EBSD' 'Data' 'RawPatterns' '' '' '' '' '' ''\"\n elif Pattern.inputtype == 'TSLHDF':\n HDFstring = \"HDFstrings = '\"+string+\"' 'EBSD' 'Data' 'Pattern' '' '' '' '' '' ''\"\n else:\n print('Undefined inputtype')\n\n # the default values in the template file\n old = (\"applyDeformation = 'n'\",\"NP=60\", \"itermax=100\", \"refresh=0\", \"bound = 0.001,2,2\",\n \"hybrid='n'\",\"masterfile = 'Ni-master-20kV.h5'\", \"datafile='data.h5'\",\n \"targetfile = 'Ni-scan-LargeArea.h5'\",\n \"numsx = 480\", \"numsy = 480\", \"ipf_wd = 101\", \"ipf_ht = 151\",\n \"inputtype = 'EMEBSD'\",\n \"HDFstrings = 'EMData' 'EBSD' 'EBSDPatterns' '' '' '' '' '' '' ''\",\n \"patx = 0\", \"paty = 0\", \"nthreads = 1\", \"energymax = 20\",\n \"energymin = 10\", \"delta=50\", \"scalingmode = 'not'\", \"gammavalue = 0\", \"binning = 1\",\n \"makedictionary = 'n'\", \"maskpattern = 'n'\", \"nregions = 10\")\n\n # replace the default values in the template file\n new = (\"applyDeformation = '\"+EBSD.applyDeformation+\"'\", 'NP =' + str(Opt.NP), 'itermax =' + str(Opt.itermax),\n 'refresh= ' + str(Opt.refresh), 'bound = '+str(Opt.bound)[1:-1], \"hybrid='\"+Opt.hybrid+\"'\",\n \"masterfile ='\"+EBSD.masterfile+\"'\", \"datafile= '\"+EBSD.datafile+\"'\",\n \"targetfile = '\"+Pattern.targetfile+\"'\",\n 'numsx = ' + str(EBSD.numsx), 'numsy = ' + str(EBSD.numsy),\n 'ipf_wd = ' + str(Pattern.ipf_wd), 'ipf_ht = ' +\n str(Pattern.ipf_ht),\n 'inputtype = '+\"'\"+Pattern.inputtype+\"'\", HDFstring,\n 'patx = '+str(Pattern.patx), 'paty = ' +\n str(Pattern.paty), 'nthreads = '+str(EBSD.nthreads),\n 'energymax = ' +\n str(EBSD.energymax), 'energymin = ' + str(EBSD.energymin),\n 'delta='+str(EBSD.delta), 'scalingmode ='+\"'\" +\n EBSD.scalingmode+\"'\", 'gammavalue =' +\n str(EBSD.gammavalue), \"binning = \"+str(EBSD.binning),\n \"makedictionary ='\"+EBSD.makedictionary+\"'\",\n \"maskpattern ='\"+EBSD.maskpattern+\"'\",\n 'nregions = '+str(EBSD.nregions))\n # replace the values\n for line in fin:\n for check, rep in zip(old, new):\n line = line.replace(check, rep)\n fout.write(line)\n # close input and output files\n fin.close()\n fout.close()\n\n\ndef loadPattern(inputtype, data, path):\n \"\"\" \n enl, patterndata, angles = loadPattern(inputtype, data, path)\n\n Load metadata from pattern file\n\n Parameters\n ----------\n inputtype : str\n data : class\n path : str\n\n Returns\n -------\n enl : class\n patterndata : class\n angles : float array \n\n \"\"\"\n if inputtype == 'TSLHDF':\n enl, patterndata, angles = loadTSLHDF(data, path)\n elif inputtype == 'BrukerHDF':\n enl, patterndata, angles = loadBrukerHDF(data, path)\n else:\n print(\"Undefined Data Type\")\n pattern.targetfile=path\n return enl, patterndata, angles\n\n\ndef loadSEM(inputtype, data, path):\n \"\"\" \n PatternQuality, SEM = loadSEM(inputtype, data, path)\n\n Load pattern quality map and SEM array from pattern file\n\n Parameters\n ----------\n inputtype : str\n data : class\n path : str\n\n Returns\n -------\n PatternQuality : float array \n SEM : nt array\n \"\"\"\n if inputtype == 'TSLHDF':\n PatternQuality, SEM = loadTSLHDFSEM(data, path)\n elif inputtype == 'BrukerHDF':\n PatternQuality, SEM = loadBrukerHDFSEM(data, path)\n else:\n print(\"Undefined Data Type\")\n return PatternQuality, SEM\n\n\ndef loadBrukerHDF(str, path):\n \"\"\" \n enl, patterndata, angles = loadBrukerHDF(str, path)\n\n Load metadata from BrukerHDF pattern file\n\n Parameters\n ----------\n str : str\n path : str\n\n Returns\n -------\n enl : class\n patterndata : class\n angles : float array \n\n \"\"\"\n Data = h5.File(path, 'r')\n enl = pyEMsoft.Namelisttypedefs.EBSDNameListType\n patterndata = pyEMsoft.Namelisttypedefs.EBSDDIpreviewNameListType\n # Euler angle convention\n enl.eulerconvention = 'hkl'\n # Detector pixel size\n enl.delta = 50 # um\n\n enl.thetac = Data['/'+str+'/EBSD/Header/CameraTilt'][()]\n enl.numsy = Data['/'+str+'/EBSD/Header/PatternHeight'][()]\n enl.numsx = Data['/'+str+'/EBSD/Header/PatternWidth'][()]\n\n PC_X = np.mean(Data['/'+str+'/EBSD/Data/PCX'][:])\n PC_Y = np.mean(Data['/'+str+'/EBSD/Data/PCY'][:])\n DD = np.mean(Data['/'+str+'/EBSD/Data/DD'][:])\n\n enl = PCtoEMsoftPC('bruker', [PC_X, PC_Y, DD], enl)\n\n patterndata.ipf_wd = Data['/'+str+'/EBSD/Header/NCOLS'][()]\n patterndata.ipf_ht = Data['/'+str+'/EBSD/Header/NROWS'][()]\n patterndata.inputtype = 'BrukerHDF'\n\n phi1 = (Data['/'+str+'/EBSD/Data/phi1'])\n phi2 = (Data['/' +\n str+'/EBSD/Data/phi2'])\n Phi = (Data['/' +\n str+'/EBSD/Data/PHI'])\n\n # total number of patterns\n numangles = patterndata.ipf_wd*patterndata.ipf_ht\n\n # convert the Euler angles to quaternions\n angles = np.zeros((4, numangles))\n for i in range(numangles):\n angles[:, i] = pyEMsoft.rotations.eu2qu(\n np.deg2rad([phi1[i], Phi[i], phi2[i]]))\n return enl, patterndata, angles\n\n\ndef loadBrukerHDFSEM(str, path):\n \"\"\" \n PatternQuality, SEM= loadBrukerHDFSEM(str, path)\n\n Load pattern quality map and SEM array from BrukerHDF file\n\n Parameters\n ----------\n str : str\n path : str\n\n Returns\n -------\n PatternQuality : float array \n SEM : nt array\n \"\"\"\n Data = h5.File(path, 'r')\n wd = Data['/'+str+'/EBSD/Header/NCOLS'][()]\n ht = Data['/'+str+'/EBSD/Header/NROWS'][()]\n PatternQuality = np.zeros((ht, wd))\n SEM = np.zeros((2, wd*ht), dtype=int)\n PatternQuality = np.reshape(\n Data['/'+str+'/EBSD/Data/RadonQuality'][:], (ht, wd))\n SEM[0, :] = Data['/'+str+'/SEM/SEM IX'][:]\n SEM[1, :] = Data['/'+str+'/SEM/SEM IY'][:]\n return PatternQuality, SEM\n\n\ndef loadTSLHDF(str, path):\n \"\"\" \n enl, patterndata, angles = loadBrukerHDF(str, path)\n\n Load metadata from TSLHDF pattern file\n\n Parameters\n ----------\n str : str\n path : str\n\n Returns\n -------\n enl : class\n patterndata : class\n angles : float array \n\n \"\"\"\n Data = h5.File(path, 'r')\n enl = pyEMsoft.Namelisttypedefs.EBSDNameListType\n patterndata = pyEMsoft.Namelisttypedefs.EBSDDIpreviewNameListType\n # Euler angle convention\n enl.eulerconvention = 'tsl'\n # Detector pixel size\n enl.delta = 59.2 # um\n\n enl.thetac = Data['/' +\n str+'/EBSD/Header/Camera Elevation Angle'][0]\n enl.numsy = Data['/' +\n str+'/EBSD/Header/Pattern Height'][0]\n enl.numsx = Data['/' +\n str+'/EBSD/Header/Pattern Width'][0]\n PC_X = Data['/' +\n str+'/EBSD/Header/Pattern Center Calibration/x-star'][0]\n\n PC_Y = Data['/' +\n str+'/EBSD/Header/Pattern Center Calibration/y-star'][0]\n\n DD = Data['/' +\n str+'/EBSD/Header/Pattern Center Calibration/z-star'][0]\n\n enl = PCtoEMsoftPC('tsl', [PC_X, PC_Y, DD], enl)\n\n patterndata.ipf_wd = Data['/' +\n str+'/EBSD/Header/nColumns'][0]\n patterndata.ipf_ht = Data['/' +\n str+'/EBSD/Header/nRows'][0]\n patterndata.inputtype = 'TSLHDF'\n phi1 = (Data['/' +\n str+'/EBSD/Data/Phi1'])\n phi2 = (Data['/' +\n str+'/EBSD/Data/Phi2'])\n Phi = (Data['/' +\n str+'/EBSD/Data/Phi'])\n\n # total number of patterns\n numangles = patterndata.ipf_wd*patterndata.ipf_ht\n\n # convert the Euler angles to quaternions\n angles = np.zeros((4, numangles))\n for i in range(numangles):\n angles[:, i] = pyEMsoft.rotations.eu2qu(\n np.asarray([phi1[i], Phi[i], phi2[i]]))\n return enl, patterndata, angles\n\n\ndef loadTSLHDFSEM(str, path):\n \"\"\" \n PatternQuality, SEM= loadTSLHDFSEM(str, path)\n\n Load pattern quality map and SEM array from TSLHDF file\n\n Parameters\n ----------\n str : str\n path : str\n\n Returns\n -------\n PatternQuality : float array \n SEM : nt array\n \"\"\"\n Data = h5.File(path, 'r')\n wd = Data['/'+str+'/EBSD/Header/nColumns'][0]\n ht = Data['/'+str+'/EBSD/Header/nRows'][0]\n PatternQuality = np.zeros((ht, wd))\n SEM = np.zeros((2, wd*ht), dtype=int)\n PatternQuality = np.reshape(\n Data['/'+str+'/EBSD/Data/IQ'][:], (ht, wd))\n\n SEM[0, :] = np.divide(Data['/'+str+'/EBSD/Data/X Position']\n [:], Data['/'+str+'/EBSD/Header/Step X'][:])\n SEM[1, :] = np.divide(Data['/'+str+'/EBSD/Data/Y Position']\n [:], Data['/'+str+'/EBSD/Header/Step Y'][:])\n\n return PatternQuality, SEM\n\n\ndef getSingleEBSDPattern(str, EBSD, Pattern, path):\n \"\"\" \n TargetPattern = getSingleEBSDPattern(str, EBSD, Pattern, path)\n\n Get a single EBSD pattern from a pattern file\n\n Parameters\n ----------\n str : str\n EBSD : class\n Pattern : class\n path : str\n\n Returns\n -------\n TargetPattern : array \n \"\"\"\n ID = Pattern.paty*Pattern.ipf_wd+Pattern.patx\n if str == 'EMData':\n Data = h5.File(os.path.join(path, 'EBSDout.h5'), 'r')\n\n if EBSD.makedictionary == 'n':\n TargetPattern = (Data['/EMData/EBSD/EBSDPatterns'][0, :, :])\n else:\n TargetPattern = np.reshape(\n Data['/EMData/EBSD/EBSDPatterns'][0, :], (EBSD.numsy, EBSD.numsx))\n else:\n Data = h5.File(path, 'r')\n if Pattern.inputtype == 'TSLHDF':\n TargetPattern = Data['/' + str+'/EBSD/Data/Pattern'][ID, :, :]\n elif Pattern.inputtype == 'BrukerHDF':\n TargetPattern = Data['/' + str +\n '/EBSD/Data/RawPatterns'][ID, :, :]\n return TargetPattern\n\n\ndef EMEBSDnamelistUpdate(EBSD):\n \"\"\" \n EMEBSDnamelistUpdate(EBSD)\n\n Update EMEBSD nml file with EBSD namelist class\n\n Parameters\n ----------\n EBSD : class\n \"\"\"\n os.system('EMEBSD -t')\n # open input file\n fin = open(\"EMEBSD.template\", \"rt\")\n # output file to write the result to\n fout = open(\"EMEBSD.nml\", \"wt\")\n # replace the default values in the template file\n old = (\"L = 15000.0\", \"thetac = 10.0\", \"delta = 50.0\", \"numsx = 0\",\n \"numsy = 0\", \"xpc = 0.0\", \"ypc = 0.0\", \"energymin = 5.0\",\n \"energymax = 20.0\", \"eulerconvention = 'tsl'\",\n \"master.h5\", \"scalingmode = 'not'\", \"gammavalue = 1.0\",\n \"includebackground = 'y'\", \"makedictionary = 'n'\", \"maskpattern = 'n'\", \"nregions = 10\")\n new = ('L =' + str(EBSD.L), 'thetac =' + str(EBSD.thetac),\n 'delta = ' + str(EBSD.delta), 'numsx = ' + str(\n EBSD.numsx), 'numsy = ' + str(EBSD.numsy), 'xpc = ' + str(EBSD.xpc),\n 'ypc = ' + str(EBSD.ypc), 'energymin = ' + str(EBSD.energymin), 'energymax = ' + str(\n EBSD.energymax), 'eulerconvention ='+\"'\"+EBSD.eulerconvention+\"'\",\n EBSD.masterfile, 'scalingmode ='+\"'\"+EBSD.scalingmode +\n \"'\", 'gammavalue =' +\n str(EBSD.gammavalue),\n \"includebackground ='\"+EBSD.includebackground+\"'\",\n \"makedictionary ='\"+EBSD.makedictionary +\n \"'\", \"maskpattern ='\"+EBSD.maskpattern+\"'\",\n 'nregions = '+str(EBSD.nregions))\n for line in fin:\n for check, rep in zip(old, new):\n line = line.replace(check, rep)\n fout.write(line)\n # close input and output files\n fin.close()\n fout.close()\n\n\ndef createAngleFile(emdatapath, angle_type, Pattern, AnglesMatrix):\n \"\"\" \n createAngleFile(emdatapath, angle_type, Pattern, AnglesMatrix)\n\n Create Euler angle file for EMEBSD program\n\n Parameters\n ----------\n emdatapath : str\n angle_type : str\n Pattern : class\n AnglesMatrix : float array\n\n \"\"\"\n if AnglesMatrix.size > 4:\n quaternion = AnglesMatrix[:, Pattern.paty*Pattern.ipf_wd+Pattern.patx]\n else:\n quaternion = AnglesMatrix\n\n if angle_type == 'eu':\n Angles = np.rad2deg(pyEMsoft.rotations.qu2eu(quaternion))\n head = \"eu\"\n elif angle_type == 'qu':\n Angles = quaternion\n head = \"qu\"\n else:\n print(\"Undefined angle representation\")\n\n f = open(os.path.join(emdatapath, 'testeuler' + \".\" + 'txt'), \"w+\")\n\n if Angles.ndim == 1:\n n_angles = 1\n text_to_write = [head+'\\n', str(n_angles)+'\\n']\n text_to_write.append(str(Angles)[1:-1]+'\\n')\n else:\n n_angles = Angles.shape[1]\n text_to_write = [head+'\\n', str(n_angles)+'\\n']\n for angle in range(n_angles):\n text_to_write.append(str(Angles[:, angle])[1:-1]+'\\n')\n\n f.writelines(text_to_write)\n\n\ndef PCtoEMsoftPC(str, PC, enl):\n \"\"\" \n enl = PCtoEMsoftPC(str, PC, enl)\n\n Convert pattern center to EMsoft convention\n\n Parameters\n ----------\n str : str\n PC : class\n enl : class\n\n Returns\n -------\n enl : class\n \"\"\"\n if str == 'tsl':\n enl.xpc = -enl.numsx*(PC[0]-0.5)\n enl.ypc = enl.numsx*PC[1]-enl.numsy*0.5\n enl.L = enl.numsx*enl.delta*PC[2]\n elif str == 'hkl':\n enl.xpc = -enl.numsx*(PC[0]-0.5)\n enl.ypc = enl.numsy*(PC[1]-0.5)\n enl.L = enl.numsx*enl.delta*PC[2]\n elif str == 'bruker':\n enl.xpc = -enl.numsx*(PC[0]-0.5)\n enl.ypc = enl.numsy*(0.5-PC[1])\n enl.L = enl.numsy*enl.delta*PC[2]\n else:\n print('undefined pattern center convention')\n return enl\n\n\ndef EMsoftPCtoPC(str, enl):\n \"\"\" \n enl = EMsoftPCtoPC(str, PC, enl)\n\n Convert EMsoft pattern center to vendor's convention\n\n Parameters\n ----------\n str : str\n PC : class\n enl : class\n\n Returns\n -------\n enl : class\n \"\"\"\n PC = np.zeros(3)\n if str == 'tsl':\n PC[0] = -enl.xpc/enl.numsx+0.5\n PC[1] = (enl.ypc+enl.numsy*0.5)/enl.numsx\n PC[2] = enl.L / (enl.numsx*enl.delta)\n elif str == 'hkl':\n PC[0] = -enl.xpc/enl.numsx+0.5\n PC[1] = enl.ypc/enl.numsy+0.5\n PC[2] = enl.L / (enl.numsx*enl.delta)\n elif str == 'bruker':\n PC[0] = -enl.xpc/enl.numsx+0.5\n PC[1] = 0.5-enl.ypc/enl.numsy\n PC[2] = enl.L / (enl.numsy*enl.delta)\n else:\n print('undefined pattern center convention')\n return PC\n\n\ndef getGrayscale(binned):\n \"\"\" \n bpatint = getGrayscale(binned)\n\n Get grayscale pattern [0,255]\n\n Parameters\n ----------\n binned : array\n\n Returns\n -------\n bpatint : int array\n \"\"\"\n mi = binned.min()\n ma = binned.max()\n bpatint = np.asarray(((binned - mi) / (ma-mi))*255.0, dtype=int)\n return bpatint\n\n\ndef circular_mask(ht, wd):\n \"\"\" \n mask = circular_mask(ht, wd)\n\n Create a circular mask for a pattern\n\n Parameters\n ----------\n binned : array\n\n Returns\n -------\n mask : int array\n \"\"\"\n center = [int(wd/2), int(ht/2)]\n radius = min(center[0], center[1], wd-center[0], ht-center[1])\n Y, X = np.ogrid[:ht, :wd]\n dist_from_center = np.sqrt((X - center[0])**2 + (Y-center[1])**2)\n mask = dist_from_center <= radius\n return mask\n" ]
[ [ "numpy.divide", "numpy.deg2rad", "numpy.array", "numpy.reshape", "numpy.zeros", "numpy.asarray", "numpy.sum", "numpy.mean", "numpy.chararray", "numpy.sqrt", "numpy.average" ] ]
columbustech/label-debugger
[ "e509c64e3184c05ef936c0a7a881ee0124067776" ]
[ "web.py/v6/fpfn.py" ]
[ "'''\nCreated on Mar 5, 2019\n\n@author: hzhang0418\n'''\nimport numpy as np\nfrom operator import itemgetter\n\nfrom sklearn.model_selection import KFold\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom v6.detector import Detector\n\nclass FPFN(Detector):\n \n def __init__(self, features, labels, params):\n super(FPFN, self).__init__(features, labels, params)\n \n self.num_cores = params.get('num_cores', 1)\n self.nfolds = params.get('num_folds', 5)\n self.incon_indices = {} # map index to prediction probability \n self.num_iter = 0\n \n self.clf = RandomForestClassifier(n_estimators=20, random_state=0, n_jobs=self.num_cores)\n \n \n def detect_and_rank(self):\n self.get_inconsistency_indices()\n ranked = self.rank_inconsistency_indices()\n #print(\"Number of suspicious examples after ranking:\", len(ranked))\n self.num_iter += 1\n return [ t[0] for t in ranked ]\n \n \n def use_feedback(self, index2correct_label):\n for k,v in index2correct_label.items():\n self.labels[k] = v\n \n self.incon_indices.clear()\n \n \n def get_inconsistency_indices(self):\n # cross validation\n mismatching = self.cross_validation(self.clf, self.features, self.labels, self.nfolds)\n print(\"Number of suspicious examples after CV:\", len(mismatching))\n \n # samples with matching labels as train set\n # samples with mismatching labels as test set\n test_index = list(mismatching.keys())\n train_index = [ i for i in range(len(self.features)) if i not in mismatching ]\n \n train_set_features, test_set_features = self.features[train_index], self.features[test_index]\n train_set_labels, test_set_labels = self.labels[train_index], self.labels[test_index]\n \n # predict again\n proba = self.train_and_test(self.clf, train_set_features, train_set_labels, test_set_features)\n \n # find samples with mismatching labels in test set\n tmp = self.find_mismatching(proba, test_set_labels)\n \n for k, v in tmp.items():\n self.incon_indices[ test_index[k] ] = v\n \n \n def rank_inconsistency_indices(self):\n # Note that we use negative probability\n incons_prob = [ (k, -np.max(v)) for k,v in self.incon_indices.items() ]\n # sort in ascending order of negative probability\n # if same probability, sort in ascending order of index\n incons_prob = sorted(incons_prob, key=itemgetter(1,0))\n return incons_prob\n \n \n def cross_validation(self, classifier, features, labels, nfolds):\n \n kf = KFold(nfolds, shuffle=True, random_state = 0)\n \n mismatching = {}\n \n for train_index, test_index in kf.split(features):\n train_set_features, test_set_features = features[train_index], features[test_index]\n train_set_labels, test_set_labels = labels[train_index], labels[test_index]\n \n proba = self.train_and_test(self.clf, train_set_features, train_set_labels, test_set_features)\n \n tmp = self.find_mismatching(proba, test_set_labels)\n \n for k, v in tmp.items():\n mismatching[ test_index[k] ] = v\n \n return mismatching\n \n \n def train_and_test(self, classifier, train_set_features, train_set_labels, test_set_features):\n # train\n classifier.fit(train_set_features, train_set_labels)\n # predict\n return classifier.predict_proba(test_set_features)\n \n \n def find_mismatching(self, proba, labels):\n # find predicted labels\n predicted = np.argmax(proba, axis=1)\n # find those indices whose predicted labels differ from given labels\n diff = np.where(predicted!=labels)[0]\n \n index2proba = {} # index to probability from classifier\n for index in diff:\n index2proba[index] = proba[index]\n \n return index2proba\n \n" ]
[ [ "numpy.max", "sklearn.ensemble.RandomForestClassifier", "numpy.where", "numpy.argmax", "sklearn.model_selection.KFold" ] ]
PhilVest/scanning-xray-diffraction
[ "ea469d1806df78237d43fbf427cc44f017148970" ]
[ "s3dxrd/utils/scanning_transform.py" ]
[ "\nfrom __future__ import print_function\n\n\"\"\" Modified from ImageD11 transform.py by Axel Henningsson 2021\n This is only good for the scanning_3DXRD scenario when the t_y==0 and \n t_z==0 for all reflections, the idea is to allow for per measurement\n grain cms positions to be passed via t_x,t_y,t_z params.\"\"\"\n\nimport numpy as np\nfrom ImageD11 import gv_general\nfrom numpy import radians, degrees\n\ndef cross_product_2x2(a, b):\n \"\"\" returns axb for two len(3) vectors a,b\"\"\"\n assert len(a) == len(b) == 3\n return np.array([a[1] * b[2] - a[2] * b[1],\n a[2] * b[0] - a[0] * b[2],\n a[0] * b[1] - a[1] * b[0]])\n\ndef detector_rotation_matrix(tilt_x, tilt_y, tilt_z):\n \"\"\"\n Return the tilt matrix to apply to peaks\n tilts in radians\n typically applied to peaks rotating around beam center\n \"\"\"\n r1 = np.array([[np.cos(tilt_z), -np.sin(tilt_z), 0], # note this is r.h.\n [np.sin(tilt_z), np.cos(tilt_z), 0],\n [0, 0, 1]], np.float)\n r2 = np.array([[np.cos(tilt_y), 0, np.sin(tilt_y)],\n [0, 1, 0],\n [-np.sin(tilt_y), 0, np.cos(tilt_y)]], np.float)\n r3 = np.array([[1, 0, 0],\n [0, np.cos(tilt_x), -np.sin(tilt_x)],\n [0, np.sin(tilt_x), np.cos(tilt_x)]], np.float)\n r2r1 = np.dot(np.dot(r3, r2), r1)\n return r2r1\n\ndef compute_xyz_lab(peaks,\n y_center=0., y_size=0., tilt_y=0.,\n z_center=0., z_size=0., tilt_z=0.,\n tilt_x=0.,\n distance=0.,\n # detector_orientation=((1,0),(0,1)),\n o11=1.0, o12=0.0, o21=0.0, o22=-1.0,\n **kwds):\n \"\"\"\n Peaks is a 2 d array of x,y\n yc is the centre in y\n ys is the y pixel size\n ty is the tilt around y\n zc is the centre in z\n zs is the z pixel size\n tz is the tilt around z\n dist is the sample - detector distance\n detector_orientation is a matrix to apply to peaks arg to get\n ImageD11 convention\n (( 0, 1),( 1, 0)) for ( y, x)\n ((-1, 0),( 0, 1)) for (-x, y)\n (( 0,-1),(-1, 0)) for (-y,-x)\n etc...\n \"\"\"\n assert len(peaks) == 2, \"peaks must be a 2D array\"\n # Matrix for the tilt rotations\n r2r1 = detector_rotation_matrix(tilt_x, tilt_y, tilt_z)\n # Peak positions in 3D space\n # - apply detector orientation\n peaks_on_detector = np.array(peaks)\n peaks_on_detector[0, :] = (peaks_on_detector[0, :] - z_center) * z_size\n peaks_on_detector[1, :] = (peaks_on_detector[1, :] - y_center) * y_size\n #\n detector_orientation = [[o11, o12], [o21, o22]]\n # logging.debug(\"detector_orientation = \"+str(detector_orientation))\n flipped = np.dot(np.array(detector_orientation, np.float),\n peaks_on_detector)\n #\n vec = np.array([np.zeros(flipped.shape[1]), # place detector at zero,\n # sample at -dist\n flipped[1, :], # x in search, frelon +z\n flipped[0, :]], np.float) # y in search, frelon -y\n # Position of diffraction spots in 3d space after detector tilts about\n # the beam centre on the detector\n rotvec = np.dot(r2r1, vec)\n # Now add the distance (along x)\n rotvec[0, :] = rotvec[0, :] + distance\n return rotvec\n\n\ndef compute_tth_eta(peaks,\n y_center=0., y_size=0., tilt_y=0.,\n z_center=0., z_size=0., tilt_z=0.,\n tilt_x=0.,\n distance=0.,\n # detector_orientation=((1,0),(0,1)),\n o11=1.0, o12=0.0, o21=0.0, o22=-1.0,\n t_x=0.0, t_y=0.0, t_z=0.0,\n omega=None, # == phi at chi=90\n wedge=0.0, # Wedge == theta on 4circ\n chi=0.0, # == chi - 90\n **kwds): # spare args are ignored\n \"\"\"\n 0/10 for style\n \"\"\"\n\n peaks_xyz = compute_xyz_lab(\n peaks,\n y_center=y_center, y_size=y_size, tilt_y=tilt_y,\n z_center=z_center, z_size=z_size, tilt_z=tilt_z,\n tilt_x=tilt_x,\n distance=distance,\n # detector_orientation=((1,0),(0,1)),\n o11=o11, o12=o12, o21=o21, o22=o22)\n\n tth, eta = compute_tth_eta_from_xyz(\n peaks_xyz,\n t_x=t_x, t_y=t_y, t_z=t_z,\n omega=omega,\n wedge=wedge,\n chi=chi)\n\n return tth, eta\n\n\ndef compute_tth_eta_from_xyz(peaks_xyz, omega,\n t_x=0.0, t_y=0.0, t_z=0.0,\n # == phi at chi=90\n wedge=0.0, # Wedge == theta on 4circ\n chi=0.0, # == chi - 90\n **kwds): # last line is for laziness -\n \"\"\"\n Peaks is a 3 d array of x,y,z peak co-ordinates\n crystal_translation is the position of the grain giving rise\n to a diffraction spot\n in x,y,z ImageD11 co-ordinates\n x,y with respect to axis of rotation and or beam centre ??\n z with respect to beam height, z centre\n omega data needed if crystal translations used\n \"\"\"\n assert len(peaks_xyz) == 3\n # Scattering vectors\n if omega is None:\n s1 = peaks_xyz\n else:\n # scattering_vectors\n if len(omega) != len(peaks_xyz[0]):\n raise Exception(\n \"omega and peaks arrays must have same number of peaks\")\n s1 = peaks_xyz - compute_grain_origins(omega, wedge, chi,\n t_x, t_y, t_z)\n # CHANGED to HFP convention 4-9-2007\n eta = np.degrees(np.arctan2(-s1[1, :], s1[2, :]))\n s1_perp_x = np.sqrt(s1[1, :] * s1[1, :] + s1[2, :] * s1[2, :])\n tth = np.degrees(np.arctan2(s1_perp_x, s1[0, :]))\n return tth, eta\n\n\ndef compute_xyz_from_tth_eta(tth, eta, omega,\n t_x=0.0, t_y=0.0, t_z=0.0,\n # == phi at chi=90\n wedge=0.0, # Wedge == theta on 4circ\n chi=0.0, # == chi - 90\n **kwds): # last line is for laziness -\n \"\"\"\n Given the tth, eta and omega, compute the xyz on the detector\n\n crystal_translation is the position of the grain giving rise\n to a diffraction spot\n in x,y,z ImageD11 co-ordinates\n x,y with respect to axis of rotation and or beam centre ??\n z with respect to beam height, z centre\n\n omega data needed if crystal translations used\n \"\"\"\n # xyz = unit vectors along the scattered vectors\n xyz = np.zeros((3, tth.shape[0]), np.float)\n rtth = np.radians(tth)\n reta = np.radians(eta)\n xyz[0, :] = np.cos(rtth)\n # eta = np.degrees(np.arctan2(-s1[1, :], s1[2, :]))\n xyz[1, :] = -np.sin(rtth) * np.sin(reta)\n xyz[2, :] = np.sin(rtth) * np.cos(reta)\n\n # Find vectors in the fast, slow directions in the detector plane\n pks = np.array([(1, 0),\n (0, 1),\n (0, 0) ], np.float).T\n dxyzl = compute_xyz_lab(pks, **kwds)\n # == [xpos, ypos, zpos] shape (3,n)\n #\n # This was based on the recipe from Thomas in Acta Cryst ...\n # ... Modern Equations of ...\n\n ds = dxyzl[:,0] - dxyzl[:,2] # 1,0 in plane is (1,0)-(0,0)\n df = dxyzl[:,1] - dxyzl[:,2] # 0,1 in plane\n dO = dxyzl[:,2] # origin pixel\n\n # Cross products to get the detector normal\n # Thomas uses an inverse matrix, but then divides out the determinant anyway\n det_norm = np.cross( ds, df )\n\n # Scattered rays on detector normal\n norm = np.dot( det_norm, xyz )\n # Check for divide by zero\n msk = (norm == 0)\n needmask = False\n if msk.sum()>0:\n norm += msk\n needmask = True\n\n # Intersect ray on detector plane\n sc = np.dot( np.cross( df, dO ), xyz ) / norm\n fc = np.dot( np.cross( dO, ds ), xyz ) / norm\n\n if (t_x != 0) or (t_y != 0) or (t_z != 0):\n go = compute_grain_origins(omega,\n wedge=wedge, chi=chi,\n t_x=t_x, t_y=t_y, t_z=t_z)\n # project these onto the detector face to give shifts\n sct = ( xyz * np.cross( df, go.T ).T ).sum(axis=0) / norm\n fct = ( xyz * np.cross( go.T, ds ).T ).sum(axis=0) / norm\n sc -= sct\n fc -= fct\n\n if needmask:\n fc = np.where( msk, 0, fc )\n sc = np.where( msk, 0, sc )\n\n return fc, sc\n\n\ndef compute_grain_origins(omega, wedge=0.0, chi=0.0,\n t_x=0.0, t_y=0.0, t_z=0.0):\n \"\"\"\n # print \"Using translations t_x %f t_y %f t_z %f\"%(t_x,t_y,t_z)\n # Compute positions of grains\n # expecting tx, ty, tz for each diffraction spot\n #\n # g = R . W . k\n # g - is g-vector w.r.t crystal\n # k is scattering vector in lab\n # so we want displacement in lab from displacement in sample\n # shift = W-1 R-1 crystal_translation\n #\n # R = ( cos(omega) , sin(omega), 0 )\n # (-sin(omega) , cos(omega), 0 )\n # ( 0 , 0 , 1 )\n #\n # W = ( cos(wedge) , 0 , sin(wedge) )\n # ( 0 , 1 , 0 )\n # (-sin(wedge) , 0 , cos(wedge) )\n #\n # C = ( 1 , 0 , 0 ) ??? Use eta0 instead\n # ( 0 , cos(chi) , sin(chi) ) ??? Use eta0 instead\n # ( 0 , -sin(chi) , cos(chi) ) ??? Use eta0 instead\n \"\"\"\n w = np.radians(wedge)\n WI = np.array([[np.cos(w), 0, -np.sin(w)],\n [0, 1, 0],\n [np.sin(w), 0, np.cos(w)]], np.float)\n c = np.radians(chi)\n CI = np.array([[1, 0, 0],\n [0, np.cos(c), -np.sin(c)],\n [0, np.sin(c), np.cos(c)]], np.float)\n t = np.zeros((3, omega.shape[0]), np.float) # crystal translations\n # Rotations in reverse order compared to making g-vector\n # also reverse directions. this is trans at all zero to\n # current setting. gv is scattering vector to all zero\n om_r = np.radians(omega)\n # This is the real rotation (right handed, g back to k)\n\n # Modified for translating sample, input t_x,t_y,t_z are\n # now precomputed scattering positions.\n #---------------------------------------------------\n t[0, :] = t_x\n t[1, :] = t_y\n t[2, :] = t_z\n \n\n if chi != 0.0:\n c = np.cos(np.radians(chi))\n s = np.sin(np.radians(chi))\n u = np.zeros(t.shape, np.float)\n u[0, :] = t[0, :]\n u[1, :] = c * t[1, :] + -s * t[2, :]\n u[2, :] = s * t[1, :] + c * t[2, :]\n t = u\n if wedge != 0.0:\n c = np.cos(np.radians(wedge))\n s = np.sin(np.radians(wedge))\n u = np.zeros(t.shape, np.float)\n u[0, :] = c * t[0, :] + -s * t[2, :]\n u[1, :] = t[1, :]\n u[2, :] = s * t[0, :] + c * t[2, :]\n t = u\n return t\n\ndef compute_k_vectors(tth, eta, wvln):\n \"\"\"\n generate k vectors - scattering vectors in laboratory frame\n\n NOTE: These are the G_l vectors. not k vectors -... - Axel Henningsson 2021\n These do not have length 2pi/wvlv but some other scaling.\n \"\"\"\n tth = np.radians(tth)\n eta = np.radians(eta)\n c = np.cos(tth / 2) # cos theta\n s = np.sin(tth / 2) # sin theta\n ds = 2 * s / wvln\n k = np.zeros((3, tth.shape[0]), np.float)\n # x - along incident beam\n k[0, :] = -ds * s # this is negative x\n # y - towards door\n k[1, :] = -ds * c * np.sin(eta) # CHANGED eta to HFP convention 4-9-2007\n # z - towards roof\n k[2, :] = ds * c * np.cos(eta)\n\n return k\n\n\ndef compute_g_vectors(tth,\n eta,\n omega,\n wvln,\n wedge=0.0,\n chi=0.0):\n \"\"\"\n Generates spot positions in reciprocal space from\n twotheta, wavelength, omega and eta\n Assumes single axis vertical\n ... unless a wedge angle is specified\n \"\"\"\n k = compute_k_vectors(tth, eta, wvln)\n# print k[:,0]\n return compute_g_from_k(k, omega, wedge, chi)\n\n\ndef compute_g_from_k(k, omega, wedge=0, chi=0):\n \"\"\"\n Compute g-vectors with cached k-vectors\n \"\"\"\n om = np.radians(omega)\n # G-vectors - rotate k onto the crystal axes\n g = np.zeros((3, k.shape[1]), np.float)\n t = np.zeros((3, k.shape[1]), np.float)\n #\n # g = R . W . k where:\n # R = ( cos(omega) , sin(omega), 0 )\n # (-sin(omega) , cos(omega), 0 )\n # ( 0 , 0 , 1 )\n #\n # W = ( cos(wedge) , 0 , sin(wedge) )\n # ( 0 , 1 , 0 )\n # (-sin(wedge) , 0 , cos(wedge) )\n #\n # C = ( 1 , 0 , 0 )\n # ( 0 , cos(chi) , sin(chi) )\n # ( 0 , -sin(chi) , cos(chi) )\n #\n if wedge != 0.0:\n c = np.cos(np.radians(wedge))\n s = np.sin(np.radians(wedge))\n t[0, :] = c * k[0, :] + s * k[2, :]\n t[1, :] = k[1, :]\n t[2, :] = -s * k[0, :] + c * k[2, :]\n k = t.copy()\n if chi != 0.0:\n c = np.cos(np.radians(chi))\n s = np.sin(np.radians(chi))\n t[0, :] = k[0, :]\n t[1, :] = c * k[1, :] + s * k[2, :]\n t[2, :] = -s * k[1, :] + c * k[2, :]\n k = t.copy()\n # This is the reverse rotation (left handed, k back to g)\n g[0, :] = np.cos(om) * k[0, :] + np.sin(om) * k[1, :]\n g[1, :] = -np.sin(om) * k[0, :] + np.cos(om) * k[1, :]\n g[2, :] = k[2, :]\n return g\n\n\ndef uncompute_g_vectors(g, wavelength, wedge=0.0, chi=0.0):\n \"\"\"\n Given g-vectors compute tth,eta,omega\n assert uncompute_g_vectors(compute_g_vector(tth,eta,omega))==tth,eta,omega\n \"\"\"\n if wedge == chi == 0:\n post = None\n else:\n post = gv_general.wedgechi( wedge=wedge, chi=chi )\n omega1, omega2, valid = gv_general.g_to_k(\n g, wavelength,axis=[0,0,-1], pre=None, post=post )\n # we know g, omega. Compute k as ... ?\n if post is None:\n pre = None\n else:\n pre = gv_general.chiwedge( wedge=wedge, chi=chi ).T\n k_one = gv_general.k_to_g( g, omega1, axis=[0,0,1],\n pre = pre, post=None)\n k_two = gv_general.k_to_g( g, omega2, axis=[0,0,1],\n pre = pre, post=None)\n #\n # k[1,:] = -ds*c*sin(eta)\n # ------ ------------- .... tan(eta) = -k1/k2\n # k[2,:] = ds*c*cos(eta)\n #\n eta_one = np.arctan2(-k_one[1, :], k_one[2, :])\n eta_two = np.arctan2(-k_two[1, :], k_two[2, :])\n #\n #\n ds = np.sqrt(np.sum(g * g, 0))\n s = ds * wavelength / 2.0 # sin theta\n tth = np.degrees(np.arcsin(s) * 2.) * valid\n eta1 = np.degrees(eta_one) * valid\n eta2 = np.degrees(eta_two) * valid\n omega1 = omega1 * valid\n omega2 = omega2 * valid\n return tth, [eta1, eta2], [omega1, omega2]" ]
[ [ "numpy.array", "numpy.dot", "numpy.sin", "numpy.zeros", "numpy.arcsin", "numpy.sum", "numpy.degrees", "numpy.radians", "numpy.where", "numpy.arctan2", "numpy.sqrt", "numpy.cos", "numpy.cross" ] ]
jmachalica/PygameProjects
[ "f1cdcad32b9e4c3eb584c7447f88a0436a30d134" ]
[ "Maze/maze.py" ]
[ "\r\n\r\n# '''\r\n# Assumptions:\r\n\r\n# maze returned will be a grid - array consisting of 1 and 0\r\n# 0 - cell is empty\r\n# 1 - cell is wall\r\n\r\n# EXAMPLE:\r\n# 1 1 1 1 1 1 1\r\n# 0 0 0 0 1 0 0\r\n# 1 1 1 0 1 0 1\r\n# 1 0 0 0 1 0 1\r\n# 1 0 1 1 1 0 1\r\n# 1 0 0 0 0 0 1\r\n# 1 1 1 1 1 1 1\r\n\r\n\r\n# self.maze = [ 1,1,1,1,1,1,1,1,1,1,\r\n# 1,0,0,0,0,0,0,0,0,1,\r\n# 1,0,0,0,0,0,0,0,0,1,\r\n# 1,0,1,1,1,1,1,1,0,1,\r\n# 1,0,1,0,0,0,0,0,0,1,\r\n# 1,0,1,0,1,1,1,1,0,1,\r\n# 1,0,0,0,0,0,0,0,0,1,\r\n# 1,1,1,1,1,1,1,1,1,1,]\r\n\r\n# alternative: array containing specific codes indicating on which side, cell has a wall, one for each cell\r\n# 0 is it has none\r\n# cells on the side dont inform about external wall of the maze\r\n\r\n\r\n# '''\r\n\r\n# import numpy as np\r\n# import random\r\n# width = 10\r\n# height = 10\r\n# # directions\r\n# N, S, E, W = 1, 2, 3, 4\r\n# DIRECTIONS = [N, S, E, W]\r\n# DX = {N: 0, S: 0, E: 1, W: -1}\r\n# DY = {N: 1, S: -1, E: 0, W: 0}\r\n\r\n# OPOSSITE_DIRECTION = {E: W, W: E, N: S, S: N}\r\n\r\n\r\n# def valid_indexes(x, y, grid):\r\n# x_max, y_max = grid.shape\r\n\r\n# return x >= 0 and x < x_max and y >= 0 and y < y_max\r\n\r\n\r\n# def was_visited(x, y, grid, value=0):\r\n# return grid[x][y] != value\r\n\r\n\r\n# def find_path(x, y, grid):\r\n# # possible directions for each cell\r\n# directions = random.sample(DIRECTIONS, len(DIRECTIONS))\r\n\r\n# for direction in directions:\r\n# print(direction)\r\n# new_x, new_y = x+DX[direction], y+DY[direction]\r\n\r\n# # check if is valid direction\r\n# if valid_indexes(new_x, new_y, grid) and not was_visited(new_x, new_y, grid):\r\n# grid[x][y] = direction\r\n# grid[new_x][new_y] = OPOSSITE_DIRECTION[direction]\r\n# find_path(new_x, new_y, grid)\r\n\r\n\r\n# grid = np.zeros((width, height))\r\n# find_path(0, 0, grid)\r\n# print(grid)\r\n\r\nimport random\r\nimport numpy as np\r\n\r\n\r\nclass Maze:\r\n\r\n \"\"\" Class representing maze \r\n Maze is padded with cells representing walls.\r\n There is no start and no finish point since it was developed for path finding with dynamic end points, but they can be set easily with changing desired wall to be 0\r\n \r\n \r\n Attributes:\r\n\r\n grid (np.ndarray) - represents maze, by convention 1 represents wall and 0 represents an empty cell\r\n width (int): number of desired empty cells on x axis \r\n height (int) number of desired empty cells on y axis\r\n grid_width (int): width of created ndarray\r\n grid_height (int): height of created ndarray\r\n\r\n \r\n \"\"\"\r\n def __init__(self, width, height):\r\n \r\n # Parameters:\r\n\r\n # width(int): number of desired empty cells on x axis\r\n # height(int) number of desired empty cells on y axis\r\n # NOTE: grid dimension will be 2*width+1 and 2*height+1\r\n #\r\n self.width = width\r\n self.height = height\r\n self.grid_width = 2*width +1\r\n self.grid_height = 2*height +1\r\n self.grid = np.ones((self.grid_height, self.grid_width)) # full walls\r\n self.directions = [(1, 0), (-1, 0), (0, 1), (0, -1)]\r\n self.create(1, 1)\r\n\r\n def create(self, x, y):\r\n \"\"\"Creates maze with backtracking algorithm\r\n \r\n Parameters: \r\n x (int): starting position in x axis\r\n y (int): starting position in y axis\r\n\"\"\"\r\n self.set_as_path(x, y)\r\n\r\n random_directions = random.sample(\r\n self.directions, len(self.directions))\r\n\r\n for direction in random_directions:\r\n new_x = x+direction[0]*2\r\n new_y = y+direction[1]*2\r\n\r\n if self.valid_direction(new_x, new_y):\r\n beetwen_x = x+direction[0]\r\n beetwen_y = y+direction[1]\r\n\r\n self.set_as_path(beetwen_x, beetwen_y)\r\n self.create(new_x, new_y)\r\n\r\n def valid_direction(self, x, y):\r\n \"\"\"Checks if passed cordinates are within matrix\r\n \r\n Parameters:\r\n x (int): cordinate on x axis\r\n y (int): cordinate on y axis\r\n \r\n \"\"\"\r\n if x > 0 and x < self.grid_width and y > 0 and y < self.grid_height:\r\n return self.grid[x][y]\r\n\r\n return False\r\n\r\n def set_as_path(self, x, y):\r\n \"\"\"Sets cell on x,y cordinates as empty\r\n\r\n Parameters:\r\n x (int): cordinate on x axis\r\n y (int): cordinate on y axis\r\n \r\n \"\"\"\r\n self.grid[x][y] = 0\r\n\r\n def __str__(self) -> str:\r\n \"\"\"Represents Maze as string\"\"\"\r\n\r\n output_string = \"\"\r\n style = {0: ' ', 1: '██'}\r\n\r\n for x in range(self.grid_width):\r\n output_string+=f'{x} '\r\n \r\n output_string+='\\n'\r\n \r\n for y in range(self.grid_height):\r\n for x in range(self.grid_width):\r\n output_string += style[self.grid[y][x]]\r\n output_string += f\"{y} \\n\"\r\n\r\n return output_string\r\n" ]
[ [ "numpy.ones" ] ]
hoya012/carrier-of-tricks-for-classification-pytorch
[ "d788d7a4e5007da9c410bdd3ef7ce3766d2ba0cd" ]
[ "main.py" ]
[ "import os, sys\nimport torch\nimport torch.nn as nn\nimport torchvision\n\nPATH = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, PATH + '/../..')\n\nfrom option import get_args\nfrom learning.trainer import Trainer\nfrom learning.evaluator import Evaluator\nfrom utils import get_model, make_optimizer, make_scheduler, make_dataloader, plot_learning_curves\n\ndef main():\n args = get_args()\n torch.manual_seed(args.seed)\n\n shape = (224,224,3) \n\n \"\"\" define dataloader \"\"\"\n train_loader, valid_loader, test_loader = make_dataloader(args)\n\n \"\"\" define model architecture \"\"\"\n model = get_model(args, shape, args.num_classes)\n\n if torch.cuda.device_count() >= 1:\n print('Model pushed to {} GPU(s), type {}.'.format(torch.cuda.device_count(), torch.cuda.get_device_name(0)))\n model = model.cuda() \n else:\n raise ValueError('CPU training is not supported')\n\n \"\"\" define loss criterion \"\"\"\n criterion = nn.CrossEntropyLoss().cuda()\n\n \"\"\" define optimizer \"\"\"\n optimizer = make_optimizer(args, model)\n\n \"\"\" define learning rate scheduler \"\"\"\n scheduler = make_scheduler(args, optimizer)\n \n \"\"\" define trainer, evaluator, result_dictionary \"\"\"\n result_dict = {'args':vars(args), 'epoch':[], 'train_loss' : [], 'train_acc' : [], 'val_loss' : [], 'val_acc' : [], 'test_acc':[]}\n trainer = Trainer(model, criterion, optimizer, scheduler)\n evaluator = Evaluator(model, criterion)\n\n if args.evaluate:\n \"\"\" load model checkpoint \"\"\"\n model.load()\n result_dict = evaluator.test(test_loader, args, result_dict)\n else:\n evaluator.save(result_dict)\n\n best_val_acc = 0.0\n \"\"\" define training loop \"\"\"\n for epoch in range(args.epochs):\n result_dict['epoch'] = epoch\n result_dict = trainer.train(train_loader, epoch, args, result_dict)\n result_dict = evaluator.evaluate(valid_loader, epoch, args, result_dict)\n\n if result_dict['val_acc'][-1] > best_val_acc:\n print(\"{} epoch, best epoch was updated! {}%\".format(epoch, result_dict['val_acc'][-1]))\n best_val_acc = result_dict['val_acc'][-1]\n model.save(checkpoint_name='best_model')\n\n evaluator.save(result_dict)\n plot_learning_curves(result_dict, epoch, args)\n\n result_dict = evaluator.test(test_loader, args, result_dict)\n evaluator.save(result_dict)\n\n \"\"\" save model checkpoint \"\"\"\n model.save(checkpoint_name='last_model')\n\n \"\"\" calculate test accuracy using best model \"\"\"\n model.load(checkpoint_name='best_model')\n result_dict = evaluator.test(test_loader, args, result_dict)\n evaluator.save(result_dict)\n\n print(result_dict)\nif __name__ == '__main__':\n main()" ]
[ [ "torch.manual_seed", "torch.cuda.get_device_name", "torch.nn.CrossEntropyLoss", "torch.cuda.device_count" ] ]
fbarth/agents
[ "b8cc1651671148efb2b6ae082774d28b1d1800cb" ]
[ "code/games/fourinrow_popout/FuziyPlayer.py" ]
[ "from Player import Player\nimport numpy as np\n\nclass FuziyPlayer(Player):\n def name(self):\n return \"Fuziy Player\"\n\n def max_value(self, board, action, alpha, beta, player_code, p):\n if p == 0:\n result = self.evaluate(player_code, board), action\n return result\n sucessors = self.sucessores(player_code, board)\n for s in sucessors:\n mv, ac = self.min_value(s['board'], s['action'], alpha, beta, player_code, p-1)\n if (mv > alpha):\n alpha = mv\n action = ac\n if (alpha >= beta):\n return alpha, action\n return alpha, action\n \n def min_value(self, board, action, alpha, beta, player_code, p):\n if p == 0:\n result = self.evaluate(player_code, board), action\n return result\n sucessors = self.sucessores(player_code, board)\n for s in sucessors:\n mv, ac = self.max_value(s['board'], s['action'], alpha, beta, player_code, p-1)\n if (mv < beta):\n beta = mv\n action = ac\n if (beta <= alpha):\n return beta, action \n return beta, action\n\n def move(self, player_code, board):\n _, action = self.max_value(board, None, -999999, 999999, player_code, 5)\n \n if (self.emergency(board, player_code)):\n sucessores = self.sucessores(self.enemy(player_code), board)\n for s in sucessores:\n result = self.evaluate(self.enemy(player_code), s['board'])\n if (result > 70000):\n print(\"EMERGENCY\")\n return None, s['action']\n\n near_lost, defence_position = self.next_move(self.enemy(player_code), board)\n if near_lost:\n print(\"BLOQUEIO APENAS\")\n return None, defence_position\n \n near_win, win_position = self.next_move(player_code, board)\n if near_win:\n print(\"VITORIA APENAS\")\n return None, win_position\n\n if action is None:\n for i in range(6):\n for j in range(7):\n if board[i,j] == 0:\n return None, j\n\n return None, action\n\n def sucessores(self, player_code, board):\n sucessors = []\n for i in range(0,7):\n b = self.movement(player_code, board, i)\n if(b is not None):\n sucessors.append({'board':b, 'action':i})\n return sucessors\n\n def enemy(self, player):\n if player == 1:\n return 2\n else:\n return 1\n\n def evaluate(self, player, board): \n lines = self.count_row_line(player, board)\n cols = self.count_row_column(player, board)\n diags = self.count_row_diag(player, board)\n diags2 = self.count_row_diag(player, board[::-1])\n possible_path = lines['2'] + cols['2'] + diags['2'] + diags2['2']\n near_to_win = lines['3'] + cols['3'] + diags['3'] + diags2['3']\n almost_win = lines['4'] + cols['4'] + diags['4'] + diags2['4']\n win = 100000*almost_win + 1000*near_to_win + possible_path\n\n enemy = self.enemy(player)\n enemy_lines = self.count_row_line(enemy, board)\n enemy_cols = self.count_row_column(enemy, board)\n enemy_digs = self.count_row_diag(enemy, board)\n enemy_digs2 = self.count_row_diag(enemy, board[::-1])\n possible_path_lost = enemy_lines['2'] + enemy_cols['2'] + enemy_digs['2'] + enemy_digs2['2']\n near_to_lost = enemy_lines['3'] + enemy_cols['3'] + enemy_digs['3'] + enemy_digs2['3']\n almost_lost = enemy_lines['4'] + enemy_cols['4'] + enemy_digs['4'] + enemy_digs2['4']\n lost = 100000*almost_lost + 1000*near_to_lost + possible_path_lost\n \n return (win - lost)\n\n def count_row_line(self, player, board):\n retorno = {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0}\n for i in range(6):\n counter = 0\n for j in range(6):\n if ((board[i, j] == player) and (board[i, j] == board[i, j + 1])):\n counter = counter + 1\n else:\n counter = 0\n if (counter==1):\n retorno['2'] = retorno['2'] + 1\n if (counter==2):\n retorno['3'] = retorno['3'] + 1\n if (counter==3):\n retorno['4'] = retorno['4'] + 1\n return retorno\n \n def count_row_column(self, player, board):\n retorno = {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0}\n for i in range(7):\n counter = 0\n for j in range(5):\n if ((board[j, i] == player) and (board[j,i] == board[j+1,i])):\n counter = counter + 1\n else:\n counter = 0\n if (counter==1):\n retorno['2'] = retorno['2'] + 1\n if (counter==2):\n retorno['3'] = retorno['3'] + 1\n if (counter==3):\n retorno['4'] = retorno['4'] + 1\n return retorno\n \n def count_row_diag(self, player, board):\n retorno = {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0}\n for k in range(-2,4):\n counter = 0\n x = np.diag(board, k=k)\n for i in range(0,len(x)-1):\n if ((x[i] == player) and (x[i] == x[i+1])):\n counter = counter + 1\n else:\n counter = 0\n if (counter==1):\n retorno['2'] = retorno['2'] + 1\n if (counter==2):\n retorno['3'] = retorno['3'] + 1\n if (counter==3):\n retorno['4'] = retorno['4'] + 1\n return retorno\n\n def count_last_line(self, player, board):\n counter = 0\n for i in range(6):\n if (board[5, i] == player):\n counter = counter + 1\n return counter\n\n def emergency(self, board, player_code):\n enemy = self.enemy(player_code)\n enemy_lines = self.count_row_line(enemy, board)\n enemy_cols = self.count_row_column(enemy, board)\n enemy_digs = self.count_row_diag(enemy, board)\n enemy_digs2 = self.count_row_diag(enemy, board[::-1])\n if (enemy_cols['3'] > 0 or enemy_lines['3'] > 0 or enemy_digs['3'] > 0 or enemy_digs2['3']> 0):\n return True\n return False\n \n def next_move(self, player, board):\n next_position = 0\n\n #horizontal\n for i in range(6):\n stay = 0\n for j in range(6):\n if i == 5:\n if j == 3:\n if ((board[i, j-3] == player) and (board[i, j-2] == player) and (board[i, j-1] == 0) and (board[i, j] == player)):\n stay += 1\n next_position = j-1\n return True, next_position\n if ((board[i, j-3] == player) and (board[i, j-2] == 0) and (board[i, j-1] == player) and (board[i, j] == player)):\n stay += 1\n next_position = j-2\n return True, next_position\n if ((board[i, j+3] == player) and (board[i, j+2] == player) and (board[i, j+1] == 0) and (board[i, j] == player)):\n stay += 1\n next_position = j+1\n return True, next_position\n if ((board[i, j+3] == player) and (board[i, j+2] == 0) and (board[i, j+1] == player) and (board[i, j] == player)):\n stay += 1\n next_position = j+2\n return True, next_position\n \n if j == 4: \n if ((board[i, j-3] == player) and (board[i, j-2] == player) and (board[i, j-1] == 0) and (board[i, j] == player)):\n stay += 1\n next_position = j-1\n return True, next_position\n if ((board[i, j-3] == player) and (board[i, j-2] == 0) and (board[i, j-1] == player) and (board[i, j] == player)):\n stay += 1\n next_position = j-2\n return True, next_position\n if ((board[i, j+2] == player) and (board[i, j+1] == 0) and (board[i, j] == player) and (board[i, j-1] == player)):\n stay += 1\n next_position = j+1\n return True, next_position\n\n if j >= 5:\n if ((board[i, j-1] == 0) and (board[i, j-2] == player) and (board[i, j-3] == player) and (board[i, j] == player)):\n stay += 1\n next_position = j-1\n return True, next_position\n if ((board[i, j-1] == player) and (board[i, j-2] == 0) and (board[i, j-3] == player) and (board[i, j] == player)):\n stay += 1\n next_position = j-2\n return True, next_position\n else:\n if j == 3:\n if ((board[i, j-3] == player) and (board[i, j-2] == player) and (board[i, j-1] == 0) and (board[i+1, j-1] != 0) and (board[i, j] == player)):\n stay += 1\n next_position = j-1\n return True, next_position\n if ((board[i, j-3] == player) and (board[i, j-2] == 0) and (board[i+1, j-2] != 0) and (board[i, j-1] == player) and (board[i, j] == player)):\n stay += 1\n next_position = j-2\n return True, next_position\n if ((board[i, j+3] == player) and (board[i, j+2] == player) and (board[i, j+1] == 0) and (board[i+1, j+1] != 0) and (board[i, j] == player)):\n stay += 1\n next_position = j+1\n return True, next_position\n if ((board[i, j+3] == player) and (board[i, j+2] == 0) and (board[i+1, j+2] != 0) and (board[i, j+1] == player) and (board[i, j] == player)):\n stay += 1\n next_position = j+2\n return True, next_position\n \n if j == 4: \n if ((board[i, j-3] == player) and (board[i, j-2] == player) and (board[i, j-1] == 0) and (board[i+1, j-1] != 0) and (board[i, j] == player)):\n stay += 1\n next_position = j-1\n return True, next_position\n if ((board[i, j-3] == player) and (board[i, j-2] == 0) and (board[i+1, j-2] != 0) and (board[i, j-1] == player) and (board[i, j] == player)):\n stay += 1\n next_position = j-2\n return True, next_position\n if ((board[i, j+2] == player) and (board[i, j+1] == 0) and (board[i+1, j+1] != 0) and (board[i, j] == player) and (board[i, j-1] == player)):\n stay += 1\n next_position = j+1\n return True, next_position\n\n if j >= 5:\n if ((board[i, j-1] == 0) and (board[i+1, j-1] != 0) and (board[i, j-2] == player) and (board[i, j-3] == player) and (board[i, j] == player)):\n stay += 1\n next_position = j-1\n return True, next_position\n if ((board[i, j-1] == player) and (board[i, j-2] == 0) and (board[i+1, j-2] != 0) and (board[i, j-3] == player) and (board[i, j] == player)):\n stay += 1\n next_position = j-2\n return True, next_position\n \n #vertical\n for i in range(7):\n end = 0\n for j in range(5):\n if ((board[j, i] == player) and (board[j+1,i] == player)):\n end += 1\n else:\n end = 0\n if (end >= 2):\n if j >= 2:\n if board[j-2,i] == 0:\n next_position = i\n return True, next_position\n \n return False, next_position\n \n def movement(self, player, board, column):\n result_board = np.matrix(board)\n for i in range(5,-2,-1):\n if (board[i,column] == 0):\n break\n if(i < 0):\n return None\n result_board[i, column] = player\n return result_board\n\n" ]
[ [ "numpy.matrix", "numpy.diag" ] ]
oAzv/GCFM
[ "5dc584f0722b90b99614616c9b210d9e086f8ff3" ]
[ "scripts/WK_NetArch/alexnet_features.py" ]
[ "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nfrom torch import nn\n\nclass EncoderCNN(nn.Module):\n '''\n Alexnet pre-training model call, network structure adaptation modification.\n '''\n def __init__(self, model):\n '''Initialize the model, deconstruct it.\n\n Args:\n\n model: Alexnet pre-training model.\n '''\n super(EncoderCNN, self).__init__()\n\n self.feature = nn.Sequential(*list(model.children())[:-1])\n self.classifier = nn.Sequential(*list(model.classifier.children())[:-1])\n\n self.Linear_layer = nn.Sequential(\n nn.Linear(4096, 2048),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(2048, 1024),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(1024, 800)\n # nn.ReLU(True),\n # nn.Dropout(),\n # nn.Linear(512, 2)\n )\n\n def forward(self, x):\n '''Forward calculation to extract the required features.\n\n Args:\n x: The gray image data.\n\n Returns:\n The SIR feature.\n '''\n x = self.feature(x)\n x = x.view(x.size(0), -1)\n\n x = self.classifier(x)\n x = x.view(x.size(0), -1)\n\n x = self.Linear_layer(x) # (1, 10)\n # x = x.view(x.size(0), -1)\n\n return x\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.ReLU" ] ]
NikitaKramarev/GW-stripping
[ "9147ba6ce871b4a263b9579cf4be8b709cb808ec" ]
[ "test/test_gw.py" ]
[ "import gw_stripping\nimport pytest\nimport numpy\nimport pandas\nimport astropy.constants\nimport os.path\n\[email protected]('q, res', [\n # Next tests from Eggleton Paper\n (9.9901e-4, 0.0484),\n (9.901e-3, 0.1020),\n (0.0909, 0.2068),\n (0.2857, 0.3031),\n (0.5000, 0.3789),\n (0.7143, 0.4599),\n (0.9091, 0.5782),\n (0.9901, 0.7203),\n ])\ndef test_roche_nonrelativistic(q, res):\n f, _ = gw_stripping.roche_nonrelativistic(q)\n numpy.testing.assert_allclose(f, res, atol=0.0001)\n\n\ndef get_relativistic_asymtotic_data():\n M = [1, 1.5, 2.5, 3, 4]\n a = [50, 60, 70, 80, 90]\n q = [0.0909, 0.2857, 0.5000, 0.7143, 0.9091]\n return zip(q, M, a)\n\n\ndef get_relativistic_asymtotic_data_2():\n au = astropy.constants.au.to_value(\"km\")\n return map(lambda x: (x[0], x[1], x[2] * au), get_relativistic_asymtotic_data())\n\[email protected]('q, M, a', get_relativistic_asymtotic_data())\ndef test_roche_relativistic_asymtotic(q, M, a):\n res, _ = gw_stripping.roche_nonrelativistic(q)\n f, _, _ = gw_stripping.roche_relativistic(q, M, a)\n numpy.testing.assert_allclose(f, res, atol=0.10)\n\[email protected]('q, M, a', get_relativistic_asymtotic_data_2())\ndef test_roche_relativistic_asymtotic_2(q, M, a):\n res, _ = gw_stripping.roche_nonrelativistic(q)\n f, _, _ = gw_stripping.roche_relativistic(q, M, a)\n numpy.testing.assert_allclose(f, res, atol=0.00001)\n\n\ndef get_mass_data(path, mode):\n df = pandas.read_table(path, sep=\" \")\n eta = df[\"eta\"].tolist()\n r = df[\"radius\"].tolist()\n m = df[\"mass\"].tolist()\n\n if (mode == \"r2\"):\n return map(lambda eta, r, m: (r, \"r2\", eta, m), eta, r, m)\n\n if (mode == \"m2\"):\n return map(lambda eta, r, m: (m, \"m2\", eta, r), eta, r, m)\n\n raise Exception(\"unknown mode: \", mode)\n\n\ndef local_file(path):\n return os.path.join(os.path.dirname(__file__), path)\n\n\[email protected]('inp, m2_or_r2, eta, res', get_mass_data(local_file(\"mass.txt\"), \"r2\"))\ndef test_mass_r2(inp, m2_or_r2, eta, res):\n f, _, _ = gw_stripping.radius_to_mass(inp, eta)\n numpy.testing.assert_allclose(f, res, atol=0.07)\n\n\[email protected]('inp, m2_or_r2, eta, res', get_mass_data(local_file(\"mass.txt\"), \"m2\"))\ndef test_mass_m2(inp, m2_or_r2, eta, res):\n f, _, _ = gw_stripping.mass_to_radius(inp, eta)\n numpy.testing.assert_allclose(f, res, rtol=0.1)\n" ]
[ [ "numpy.testing.assert_allclose", "pandas.read_table" ] ]
andylucny/slnava
[ "02141541440c0e948abb9f287f130238adaa9e28" ]
[ "navigator.py" ]
[ "import numpy as np\nimport cv2\nfrom agentspace import Agent, Space\n\nclass NavigatorAgent(Agent):\n\n def __init__(self,gpsName,goalName,headingName,forwardName,turnName):\n self.gpsName = gpsName\n self.goalName = goalName\n self.headingName = headingName\n self.forwardName = forwardName\n self.turnName = turnName\n super().__init__()\n\n def init(self):\n self.state = 0\n self.last_position = None\n self.attach_trigger(self.gpsName)\n\n def senseSelectAct(self):\n \n position = Space.read(self.gpsName,None)\n if position is None:\n return\n \n if self.last_position is not None:\n heading = (position[0] - self.last_position[0], position[1] - self.last_position[1])\n Space.write(self.headingName,heading,validity=1.5)\n else:\n heading = None\n \n self.last_position = position\n \n if heading is None:\n return\n\n goal = Space.read(self.goalName,None)\n if goal is None:\n return\n \n if self.state == 0:\n self.state = 1\n print('goal',goal,'accepted')\n\n vis = Space.read('visual',None)\n if vis is not None:\n limit = 5\n angle = -vis\n print('vis',angle)\n if angle < -limit:\n Space.write(self.turnName,-1,validity=0.3)\n Space.write(self.forwardName,1,validity=0.3)\n elif angle > limit:\n Space.write(self.turnName,1,validity=0.3)\n Space.write(self.forwardName,1,validity=0.3)\n else:\n Space.write(self.turnName,0,validity=0.3)\n Space.write(self.forwardName,1,validity=0.3)\n \n return\n \n eps = 0.000005\n if (position[0] - goal[0])**2 + (position[1] - goal[1])**2 < eps**2:\n Space.write(self.forwardName,0,validity=1.5)\n Space.write(self.turnName,0,validity=1.5)\n if self.state == 1:\n self.state = 0\n print('goal',goal,'achieved')\n Space.write(self.goalName,None)\n else:\n goal_heading = (goal[0] - position[0], goal[1] - position[1])\n angle = -np.arctan2(heading[0]*goal_heading[1]-heading[1]*goal_heading[0],heading[0]*goal_heading[0]+goal_heading[1]*heading[1])\n angle *= 180 / np.pi\n #print('angle',angle)\n limit = 5\n if angle < -limit:\n Space.write(self.forwardName,1,validity=1.5)\n Space.write(self.turnName,-1,validity=0.8)\n elif angle > limit:\n Space.write(self.forwardName,1,validity=1.5)\n Space.write(self.turnName,1,validity=0.8)\n else:\n Space.write(self.forwardName,1,validity=1.5)\n Space.write(self.turnName,0,validity=1.5)\n" ]
[ [ "numpy.arctan2" ] ]
ekantola/character-segmenter
[ "44cc16d4a9d9b260a1a52424b00ac40b638e5387" ]
[ "sample_image.py" ]
[ "import cv2.cv2 as cv2 # and not just `import cv2`, to make VSCode happier\nimport numpy as np\n\nfrom PIL import Image\n\n\nPIL_GRAYSCALE = \"L\"\n\n\ndef read_grayscale(filename: str) -> np.ndarray:\n return cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2GRAY)\n\n\ndef pad_and_resize(image: Image.Image, desired_size: int = 48) -> Image.Image:\n old_size = image.size\n ratio = float(desired_size) / max(old_size)\n new_size = tuple([int(x * ratio) for x in old_size])\n resized_image = image.resize(new_size, Image.ANTIALIAS)\n new_image = Image.new(PIL_GRAYSCALE, (desired_size, desired_size), 255)\n x_offset = (desired_size - new_size[0]) // 2\n y_offset = (desired_size - new_size[1]) // 2\n new_image.paste(resized_image, (x_offset, y_offset))\n\n return new_image\n\n\nif __name__ == '__main__':\n import sys\n np.set_printoptions(edgeitems=30, linewidth=200, precision=1)\n print(read_grayscale(sys.argv[1]) / 255.0)\n" ]
[ [ "numpy.set_printoptions" ] ]
unax127/HyperGAN
[ "aa847e0c12f854ca95ac2d86e3dfa7cb7309410e" ]
[ "hypergan/tk_viewer.py" ]
[ "\"\"\"\nOpens a window that displays an image.\nUsage:\n\n from viewer import GlobalViewer\n GlobalViewer.update(image)\n\n\"\"\"\nimport numpy as np\nimport os\nimport contextlib\n\n\nclass TkViewer:\n def __init__(self, title=\"HyperGAN\", viewer_size=1, enabled=True):\n self.screen = None\n self.title = title\n self.viewer_size = viewer_size\n self.enabled = enabled\n self.enable_menu = True\n\n def update(self, gan, image):\n if not self.enabled: return\n\n original_image = image\n if len(np.shape(image)) == 2:\n s = np.shape(image)\n image = np.reshape(image, [s[0], s[1], 1])\n image = np.tile(image, [1,1,3])\n image = np.transpose(image, [1, 0,2])\n\n if not self.screen:\n\n with contextlib.redirect_stdout(None):\n import pygame\n\n import tkinter as tk\n import tkinter.ttk\n class ResizableFrame(tk.Frame):\n def __init__(self,parent,tkviewer=None,**kwargs):\n tk.Frame.__init__(self,parent,**kwargs)\n self.bind(\"<Configure>\", self.on_resize)\n self.height = kwargs['height']\n self.width = kwargs['width']\n self.tkviewer = tkviewer\n self.aspect_ratio = float(self.width)/float(self.height)\n\n def on_resize(self,event):\n wscale = float(event.width)/self.width\n hscale = float(event.height)/self.height\n self.width = event.width\n self.height = event.height\n self.config(width=self.width, height=self.height)\n self.tkviewer.size = [self.width, self.height]\n self.tkviewer.screen = self.tkviewer.pg.display.set_mode(self.tkviewer.size,self.tkviewer.pg.RESIZABLE) \n self.enforce_aspect_ratio(event)\n surface = self.tkviewer.pg.Surface([image.shape[0],image.shape[1]])\n self.tkviewer.pg.surfarray.blit_array(surface, image[:,:,:3])\n self.tkviewer.screen.blit(self.tkviewer.pg.transform.scale(surface,self.tkviewer.size),(0,0))\n self.tkviewer.pg.display.flip()\n\n\n def enforce_aspect_ratio(self, event):\n desired_width = event.width\n desired_height = int(event.width / self.aspect_ratio)\n\n if desired_height > event.height:\n desired_height = event.height\n desired_width = int(event.height * self.aspect_ratio)\n\n self.config(width=desired_width, height=desired_height)\n self.tkviewer.size = [desired_width, desired_height]\n self.tkviewer.screen = self.tkviewer.pg.display.set_mode(self.tkviewer.size,self.tkviewer.pg.RESIZABLE) \n\n\n self.size = [int(image.shape[0] * self.viewer_size), int(image.shape[1] * self.viewer_size)]\n\n self.pg = pygame\n self.tk = tk\n root = tk.Tk(className=self.title)\n embed = ResizableFrame(root, width=self.size[0], height=self.size[1], tkviewer=self)\n root.rowconfigure(0,weight=1)\n root.rowconfigure(1,weight=1)\n root.columnconfigure(0,weight=1)\n root.columnconfigure(1,weight=1)\n embed.pack(expand=tk.YES, fill=tk.BOTH)\n\n def _save_model(*args):\n gan.save(gan.save_file)\n\n def _exit(*args):\n gan.exit()\n\n def _refresh_sample(*args):\n gan.cli.sample()\n\n def _select_sampler(gan, name, value, submenu):\n def _select_sampler_proc():\n gan.cli.sampler = gan.cli.sampler_for(name)(gan)\n gan.cli.sample()\n _refresh_sampler_submenu(submenu)\n return _select_sampler_proc\n\n def _refresh_sampler_submenu(submenu):\n if submenu.count > 0:\n submenu.delete(0, submenu.count)\n\n for (k, v) in gan.get_registered_samplers().items():\n showall = tk.BooleanVar()\n showall.set(gan.selected_sampler == k)\n if v.compatible_with(gan):\n state = tk.NORMAL\n else:\n state = tk.DISABLED\n\n print(\"Selected\", gan.selected_sampler, k, gan.selected_sampler == k)\n submenu.add_checkbutton(label=k, onvalue=True, offvalue=False, variable=showall, command=_select_sampler(gan, k, showall, submenu), state=state)\n num_samplers = len(gan.get_registered_samplers())\n\n submenu.count = num_samplers\n\n\n def _create_status_bar(root):\n statusbar = tk.Frame(root, height=24)\n statusbar.pack(side=tk.BOTTOM, fill=tk.X)\n\n label_training = tk.Label(statusbar, text=\"Training\", font=12)\n label_training.grid(row=0,column=0) \n sep = tkinter.ttk.Separator(statusbar, orient=tk.VERTICAL).grid(column=1, row=0, sticky='ns')\n label = tk.Label(statusbar, text=\"Starting\", font=12)\n label.grid(row=0, column=2) \n def __update_step():\n if hasattr(gan, 'step_count'):\n label['text']=(\"Step \" + str(gan.step_count))\n root.after(1000, __update_step)\n\n\n __update_step()\n return statusbar\n\n menubar = tk.Menu(root)\n filemenu = tk.Menu(menubar, tearoff=0)\n filemenu.add_command(label=\"Save\", command=_save_model, underline=0, accelerator=\"Ctrl+s\")\n\n filemenu.add_separator()\n\n samplemenu = tk.Menu(menubar, tearoff=0)\n samplemenu.add_command(label=\"Refresh\", command=_refresh_sample, underline=0, accelerator=\"Ctrl+r\")\n\n filemenu.add_command(label=\"Save and Exit\", command=_exit, underline=10, accelerator=\"Ctrl+q\")\n menubar.add_cascade(label=\"File\", menu=filemenu, underline=0)\n menubar.add_cascade(label=\"Sample\", menu=samplemenu, underline=0)\n samplermenu = tk.Menu(samplemenu)\n samplemenu.add_cascade(label=\"Sampler\", menu=samplermenu, underline=0)\n samplermenu.count = 0\n _refresh_sampler_submenu(samplermenu)\n\n root.bind_all(\"<Control-q>\", _exit)\n root.bind_all(\"<Control-r>\", _refresh_sample)\n root.bind_all(\"<Control-s>\", _save_model)\n\n\n if self.enable_menu:\n root.config(menu=menubar)\n _create_status_bar(root)\n\n # Tell pygame's SDL window which window ID to use\n os.environ['SDL_WINDOWID'] = str(embed.winfo_id())\n # Show the window so it's assigned an ID.\n root.update()\n self.root = root\n\n # Usual pygame initialization\n if self.viewer_size <= 0:\n self.viewer_size = 0.1\n self.aspect_w = image.shape[1] / image.shape[0]\n self.aspect_h = image.shape[0] / image.shape[1]\n self.temp_size = self.size\n self.screen = self.pg.display.set_mode(self.size,self.pg.RESIZABLE)\n self.pg.display.set_caption(self.title)\n\n root.title(self.title)\n root.wm_title(self.title)\n embed.winfo_toplevel().title(self.title)\n\n padw = 0\n padh = 0\n if original_image.shape[0] > original_image.shape[1]:\n padh = (original_image.shape[0] - original_image.shape[1])//2\n if original_image.shape[1] > original_image.shape[0]:\n padw = (original_image.shape[1] - original_image.shape[0])//2\n pad_image = np.pad(original_image, [(padw, padw), (padh,padh), (0,0)], 'constant')\n w = pad_image.shape[0]\n h = pad_image.shape[1]\n xdata = b'P6 ' + str(w).encode() + b' ' + str(h).encode() + b' 255 ' + pad_image.tobytes()\n tk_image = self.tk.PhotoImage(data=xdata, format=\"PPM\", width=w, height=h)\n self.root.tk.call('wm', 'iconphoto', self.root._w, tk_image.subsample(max(1, w//256), max(1, h//256)))\n\n surface = self.pg.Surface([image.shape[0],image.shape[1]])\n self.pg.surfarray.blit_array(surface, image[:,:,:3])\n self.screen.blit(self.pg.transform.scale(surface,self.size),(0,0))\n self.pg.display.flip()\n\n def tick(self):\n \"\"\"\n Called repeatedly regardless of gan state.\n \"\"\"\n if hasattr(self, 'root'):\n self.root.update()\n\n \n" ]
[ [ "numpy.pad", "numpy.reshape", "numpy.tile", "numpy.shape", "numpy.transpose" ] ]
JhonesBR/google_by_img
[ "cfb8565648cff475a58c91fc8d90c19087a71244" ]
[ "googleByImg.py" ]
[ "'''##########################################################################################################'''\n'''System Variables'''\n\nMAX_WORDS = 14\nPYTESSERACT_PATH = r'C:\\Program Files\\Tesseract-OCR\\tesseract'\n\n'''\n If you want to change the language of recognition change it at\n line 108 \n por for Portuguese Language\n eng for English Language\n'''\n\nOPERATION = 1\n'''\n 1 for scan and search at Google\n 2 for scan and print\n'''\n\nBOOK = 1\n'''\n Change to 1 to scan book/photo images\n'''\n\n\n'''##########################################################################################################'''\n\n\n# Default python Library imports\nimport os\nimport webbrowser\n\n# Pillow library\ntry:\n from PIL import Image, ImageOps\nexcept ImportError:\n os.system('python -m pip install pillow')\n from PIL import Image, ImageOps\n\n# Pyautogui library\ntry:\n import pyautogui\nexcept ImportError:\n os.system('python -m pip install pyautogui')\n import pyautogui\n\n# Numpy library\ntry:\n import numpy as np\nexcept ImportError:\n os.system('python -m pip install numpy')\n import numpy as np\n\n# Keyboard library\ntry:\n from pynput.keyboard import Key, KeyCode, Listener\nexcept ImportError:\n os.system('python -m pip install pynput')\n from pynput.keyboard import Key, KeyCode, Listener\n\n# Cv2 library\ntry:\n import cv2\nexcept ImportError:\n os.system('python -m pip install opencv-python')\n\n# PyTesseract library\ntry:\n import pytesseract\nexcept ImportError:\n os.system('python -m pip install pytesseract')\n\n\npytesseract.pytesseract.tesseract_cmd = PYTESSERACT_PATH\n\nstartCord, endCord = (), ()\n\n\n# Function to get the first coordinates (Top Left)\ndef getStartZone():\n global startCord\n global endCord\n startCord = pyautogui.position()\n print(\"Top-left:\", \"X=\"+str(startCord[0])+\", Y=\"+str(startCord[1]))\n\n\n# Function to get the second coordinates (Botton Right) and execute \ndef getEndZone():\n global startCord\n global endCord\n endCord = pyautogui.position()\n print(\"Botton-right:\", \"X=\"+str(endCord[0])+\", Y=\"+str(endCord[1]))\n \n\n# Verify Possibility, Take Screenshot and Execute\ndef VPTSaE():\n global startCord\n global endCord\n global MAX_WORDS\n global OPERATION\n global pressed_vks\n global BOOK\n\n start, end = startCord, endCord\n\n # Verify existency of both coordinates\n if start != () and end != ():\n # Get Screenshot of area\n width = end[0] - start[0]\n height = end[1] - start[1]\n img = pyautogui.screenshot(region=(start[0], start[1], width, height))\n \n # Improve the image for better readability\n img = improveImage(img, BOOK)\n\n # Analyze text of image using tesseract\n text = pytesseract.image_to_string(img, lang=\"por\")\n\n if OPERATION == 2:\n # Just print the result\n print(text)\n \n # Cut the text based on MAX_WORDS defined on top of the program\n text = getXWords(text, MAX_WORDS)\n\n if OPERATION == 1:\n # Search at google for results\n url = \"https://www.google.com.tr/search?q={}\".format(text)\n webbrowser.open(url) \n # Shows confirmation message\n print('Searching for \"'+ text +'\"\\n')\n\n # Redefine coordinates to empty\n startCord, endCord = (), ()\n else:\n # \"Error\" message\n print(\"Coordinates non defined\")\n \n # Remove pressed keys to fix buffer issue\n toRemove = []\n for vk in pressed_vks:\n toRemove.append(vk)\n for vk in toRemove:\n pressed_vks.remove(vk)\n\n\n# Function to cut text based on MAX_WORD\ndef getXWords(text, MAX_WORDS):\n s = text.split()\n if len(s) <= MAX_WORDS:\n return \" \".join(s)\n else:\n return \" \".join(s[:MAX_WORDS])\n\n\n# Function to improve the readability of the image for tesseract\ndef improveImage(img, BOOK):\n contrast = ImageOps.autocontrast(img)\n opencvImage = cv2.cvtColor(np.array(contrast), cv2.COLOR_RGB2BGR)\n gray = cv2.cvtColor(opencvImage, cv2.COLOR_BGR2GRAY)\n if BOOK == 1:\n adaptive_threshold = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 85, 11)\n adaptive_threshold = adjust_gamma(adaptive_threshold, 0.5)\n im_pil = Image.fromarray(adaptive_threshold)\n else:\n im_pil = Image.fromarray(gray)\n\n return im_pil\n\n\n# Shortcuts linked to specific functions\nshortcutList = {\n frozenset([Key.shift, KeyCode(vk=65)]): getStartZone, # shift + a (Get start coordinates)\n frozenset([Key.shift, KeyCode(vk=66)]): getEndZone, # shift + b (Get end coordinates)\n frozenset([Key.shift, KeyCode(vk=67)]): VPTSaE, # shift + c (Try to take screenshot and execute)\n}\n\ndef adjust_gamma(image, gamma=1.0):\n invGamma = 1.0 / gamma\n table = np.array([((i / 255.0) ** invGamma) * 255\n for i in np.arange(0, 256)]).astype(\"uint8\")\n\n return cv2.LUT(image, table)\n\n# List of keys pressed to be compared\npressed_vks = set()\n\n# Get VK for the key pressed\ndef get_vk(key):\n return key.vk if hasattr(key, 'vk') else key.value.vk\n\n# Check if the keys pressed match to the shortcuts defined\ndef checkShortcutPress(combination):\n return all([get_vk(key) in pressed_vks for key in combination])\n\n# Executed on any key press\ndef on_press(key):\n vk = get_vk(key)\n pressed_vks.add(vk)\n for combination in shortcutList:\n if checkShortcutPress(combination):\n shortcutList[combination]()\n\n# Executed on any key release\ndef on_release(key):\n vk = get_vk(key)\n if vk in pressed_vks:\n pressed_vks.remove(vk)\n\n\n# Shows message and start the listener for keys\nprint(\"Program in operation\\n\")\nwith Listener(on_press=on_press, on_release=on_release) as listener:\n listener.join()" ]
[ [ "numpy.array", "numpy.arange" ] ]
lyubomirr/AI-Algorithms
[ "53cb37572d084d4204f0b8632ebb1cea78b1dc8c" ]
[ "KMeans/clusterize.py" ]
[ "import random\nimport math\nimport matplotlib.pyplot as plt\nfrom itertools import accumulate\n\ndef k_means(k, dataset, iterations):\n best_centroids, best_clusters = k_means_core(k, dataset)\n best_wssd = calculate_total_wssd(best_centroids, best_clusters)\n\n for i in range(iterations - 1):\n centroids, clusters = k_means_core(k, dataset)\n wssd = calculate_total_wssd(centroids, clusters)\n\n if(wssd < best_wssd):\n best_centroids, best_clusters, best_wssd = centroids, clusters, wssd\n\n print_data(best_centroids, best_clusters)\n\ndef k_means_core(k, dataset):\n centroids = k_means_plus_plus_centroids(k, dataset)\n current_centroid_assignments = []\n has_changed = True\n\n while has_changed == True:\n current_centroid_assignments, has_changed = \\\n calculate_closest_centroid(centroids, dataset, current_centroid_assignments)\n \n cluster_dict = parse_assignments_to_dict(dataset, current_centroid_assignments, k)\n if(has_changed == True):\n # Don't need to calculate new centroids as nothing changed in clusters.\n centroids = calculate_centroids_new_position(k, dataset, cluster_dict)\n \n return centroids, cluster_dict\n\ndef k_means_plus_plus_centroids(k, dataset):\n centroids = []\n centroids.append(random.choice(dataset))\n\n for i in range(k-1):\n acc_probabilities = get_accumulated_probabilities(centroids, dataset)\n new_centroid_idx = get_choice_from_probabilities(acc_probabilities)\n centroids.append(dataset[new_centroid_idx])\n \n return centroids\n \ndef get_accumulated_probabilities(centroids, dataset):\n distances = [min([calculate_squared_eucledian_distance(point, centroid) for centroid in centroids]) \n for point in dataset]\n sum_of_distances = sum(distances)\n probabilities = [dist/sum_of_distances for dist in distances]\n return list(accumulate(probabilities))\n\ndef get_choice_from_probabilities(accumulated_probabilities):\n choice = random.uniform(0, 1)\n for idx, prob in enumerate(accumulated_probabilities):\n if choice <= prob:\n return idx \n\ndef calculate_closest_centroid(centroids, dataset, prev_centroid_assignments):\n has_changed = False\n centroid_assignments = []\n\n for i, point in enumerate(dataset):\n min_distance = None\n min_centroid_idx = -1\n\n for j, centroid in enumerate(centroids):\n distance = calculate_squared_eucledian_distance(point, centroid)\n if(min_distance == None or distance < min_distance):\n min_distance = distance\n min_centroid_idx = j\n \n centroid_assignments.append(min_centroid_idx)\n if (len(prev_centroid_assignments) == 0 # Initial calculation\n or prev_centroid_assignments[i] != min_centroid_idx):\n has_changed = True\n\n return centroid_assignments, has_changed\n\n\ndef calculate_squared_eucledian_distance(point_a, point_b):\n result = 0\n dims = len(point_a)\n\n for i in range(dims):\n result += (point_a[i] - point_b[i]) ** 2\n\n return result\n\ndef parse_assignments_to_dict(dataset, centroid_assignments, centroid_count):\n cluster_dict = {}\n for i in range(centroid_count):\n cluster_dict[i] = []\n\n for i, centroid_number in enumerate(centroid_assignments): \n cluster_dict[centroid_number].append(dataset[i])\n\n return cluster_dict\n\ndef calculate_centroids_new_position(centroid_count, dataset, cluster_dict):\n new_centroids = []\n for i in range(centroid_count):\n points = cluster_dict[i]\n sum_x = 0\n sum_y = 0\n\n for point in points:\n sum_x += point[0]\n sum_y += point[1]\n\n avg_x = sum_x / len(points)\n avg_y = sum_y / len(points)\n\n new_centroids.append([avg_x, avg_y])\n \n return new_centroids\n\ndef calculate_total_wssd(centroids, cluster_dict):\n total_wssd = 0\n for centroid_number in cluster_dict:\n for point in cluster_dict[centroid_number]:\n total_wssd += calculate_squared_eucledian_distance(point, centroids[centroid_number])\n\n return total_wssd\n\ndef print_data(centroids, cluster_dict):\n # Plot the centroids\n for centroid_coords in centroids:\n plt.plot(centroid_coords[0], centroid_coords[1], marker=\"X\", markersize=8, c=\"red\")\n\n for i in range(len(centroids)):\n color = [random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)]\n xs = [point[0] for idx, point in enumerate(cluster_dict[i])]\n ys = [point[1] for idx, point in enumerate(cluster_dict[i])]\n\n plt.scatter(xs, ys, c=[color])\n\n plt.show()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.plot", "matplotlib.pyplot.scatter" ] ]
uehir0/Gasyori100knock
[ "a38d3c516f5f965822610edcf113f59412905c03" ]
[ "Question_21_30/codes/question26.py" ]
[ "import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimg = cv2.imread(\"imori.jpg\").astype(np.float)\nH,W,C=img.shape\n\n# Nearest Neighbor\na = 1.5\naH = int(a * H)\naW = int(a * W)\n\ny = np.arange(aH).repeat(aW).reshape(aW,aH)\ny = (y / a)\nx = np.tile(np.arange(aW),(aH,1))\nx = (x / a)\n\nfy = np.floor(y).astype(np.int)\nfx = np.floor(x).astype(np.int)\n\nfy = np.minimum(fy, W-2)\nfx = np.minimum(fx, W-2)\n\ndy = y - fy\ndx = x - fx\n\ndy = np.repeat(np.expand_dims(dy, axis=-1), 3, axis=-1)\ndx = np.repeat(np.expand_dims(dx, axis=-1), 3, axis=-1)\n\n\nout = (1-dx) * (1-dy) * img[fy, fx] + dx * (1 - dy) * img[fy, fx+1] + (1 - dx) * dy * img[fy+1, fx] + dx * dy * img[fy+1, fx+1]\n\n#ここでやっていること\n# xが 1 1 1 1 1 2 2 2 2 2\n# yが 1 2 3 4 5 1 2 3 4 5 見たいな数列が与えられてる\n\n\nout[out>255]=255\nout = out.astype(np.uint8)\n\n# Save result\ncv2.imshow(\"result\", out)\ncv2.waitKey(0)\ncv2.imwrite(\"question26.jpg\", out)\n" ]
[ [ "numpy.floor", "numpy.arange", "numpy.expand_dims", "numpy.minimum" ] ]
hiromu/fairseq
[ "b8651bc984413e7e45f44294dffcc85692ba89c1" ]
[ "fairseq/data/token_block_dataset.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport numpy as np\nimport torch\nfrom fairseq.data import FairseqDataset, plasma_utils\nfrom fairseq.data.indexed_dataset import best_fitting_uint_dtype\n\nclass TokenBlockDataset(FairseqDataset):\n \"\"\"Break a Dataset of tokens into blocks.\n\n Args:\n dataset (~torch.utils.data.Dataset): dataset to break into blocks\n sizes (List[int]): sentence lengths (required for 'complete' and 'eos')\n block_size (int): maximum block size (ignored in 'eos' break mode)\n break_mode (str, optional): Mode used for breaking tokens. Values can\n be one of:\n - 'none': break tokens into equally sized blocks (up to block_size)\n - 'complete': break tokens into blocks (up to block_size) such that\n blocks contains complete sentences, although block_size may be\n exceeded if some sentences exceed block_size\n - 'complete_doc': similar to 'complete' mode, but do not\n cross document boundaries\n - 'eos': each block contains one sentence (block_size is ignored)\n include_targets (bool, optional): return next tokens as targets\n (default: False).\n document_sep_len (int, optional): document separator size (required for\n 'complete_doc' break mode). Typically 1 if the sentences have eos\n and 0 otherwise.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n sizes,\n block_size,\n pad,\n eos,\n break_mode=None,\n include_targets=False,\n document_sep_len=1,\n ):\n try:\n from fairseq.data.token_block_utils_fast import (\n _get_slice_indices_fast,\n _get_block_to_dataset_index_fast,\n )\n except ImportError:\n raise ImportError(\n \"Please build Cython components with: `pip install --editable .` \"\n \"or `python setup.py build_ext --inplace`\"\n )\n\n super().__init__()\n self.dataset = dataset\n self.pad = pad\n self.eos = eos\n self.include_targets = include_targets\n\n assert len(dataset) == len(sizes)\n assert len(dataset) > 0\n\n if isinstance(sizes, list):\n sizes = np.array(sizes, dtype=np.int64)\n else:\n if torch.is_tensor(sizes):\n sizes = sizes.numpy()\n sizes = sizes.astype(np.int64)\n\n break_mode = break_mode if break_mode is not None else \"none\"\n\n # For \"eos\" break-mode, block_size is not required parameters.\n if break_mode == \"eos\" and block_size is None:\n block_size = 0\n\n slice_indices = _get_slice_indices_fast(\n sizes, str(break_mode), block_size, document_sep_len\n )\n self._sizes = slice_indices[:, 1] - slice_indices[:, 0]\n\n # build index mapping block indices to the underlying dataset indices\n if break_mode == \"eos\":\n # much faster version for eos break mode\n block_to_dataset_index = np.stack(\n [\n np.arange(len(sizes)), # starting index in dataset\n np.zeros(\n len(sizes), dtype=np.long\n ), # starting offset within starting index\n np.arange(len(sizes)), # ending index in dataset\n ],\n 1,\n )\n else:\n block_to_dataset_index = _get_block_to_dataset_index_fast(\n sizes,\n slice_indices,\n )\n size_dtype = np.uint16 if block_size < 65535 else np.uint32\n slice_indices_dtype = best_fitting_uint_dtype(slice_indices[-1].max())\n\n self._slice_indices = plasma_utils.PlasmaArray(slice_indices.astype(slice_indices_dtype))\n self._sizes = plasma_utils.PlasmaArray(self._sizes.astype(size_dtype))\n self._block_to_dataset_index = plasma_utils.PlasmaArray(block_to_dataset_index.astype(slice_indices_dtype))\n\n @property\n def slice_indices(self):\n return self._slice_indices.array\n\n @property\n def sizes(self):\n return self._sizes.array\n\n @property\n def block_to_dataset_index(self):\n return self._block_to_dataset_index.array\n\n def attr(self, attr: str, index: int):\n start_ds_idx, _, _ = self.block_to_dataset_index[index]\n return self.dataset.attr(attr, start_ds_idx)\n\n def __getitem__(self, index):\n start_ds_idx, start_offset, end_ds_idx = self.block_to_dataset_index[index]\n\n buffer = torch.cat(\n [self.dataset[idx] for idx in range(start_ds_idx, end_ds_idx + 1)]\n )\n\n slice_s, slice_e = self.slice_indices[index]\n length = slice_e - slice_s\n s, e = start_offset, start_offset + length\n item = buffer[s:e]\n\n if self.include_targets:\n # *target* is the original sentence (=item)\n # *source* is shifted right by 1 (maybe left-padded with eos)\n # *past_target* is shifted right by 2 (left-padded as needed)\n if s == 0:\n source = torch.cat([item.new([self.eos]), buffer[0 : e - 1]])\n past_target = torch.cat(\n [item.new([self.pad, self.eos]), buffer[0 : e - 2]]\n )\n else:\n source = buffer[s - 1 : e - 1]\n if s == 1:\n past_target = torch.cat([item.new([self.eos]), buffer[0 : e - 2]])\n else:\n past_target = buffer[s - 2 : e - 2]\n\n return source, item, past_target\n\n return item\n\n def __len__(self):\n return len(self.slice_indices)\n\n @property\n def supports_prefetch(self):\n return getattr(self.dataset, \"supports_prefetch\", False)\n\n def prefetch(self, indices):\n self.dataset.prefetch(\n {\n ds_idx\n for index in indices\n for start_ds_idx, _, end_ds_idx in [self.block_to_dataset_index[index]]\n for ds_idx in range(start_ds_idx, end_ds_idx + 1)\n }\n )\n" ]
[ [ "torch.is_tensor", "numpy.array" ] ]
suri5471/skillmodels
[ "8ceeeae7892cbec859c5725e4e169f2b6d025be4" ]
[ "skillmodels/visualize_factor_distributions.py" ]
[ "import sys\nimport warnings\nfrom traceback import format_exception\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport scipy\nimport seaborn as sns\n\nfrom skillmodels.process_model import process_model\n\n\ndef plot_factor_distributions(\n model_dict,\n states,\n period,\n combine_plots_in_grid=True,\n add_3d_plots=False,\n n_points=50,\n lower_kde_kws=None,\n diag_kde_kws=None,\n surface_kws=None,\n):\n \"\"\"Visualize pairwise_factor_distributions in certain period.\n\n Args:\n model_dict (dict): The model specification. See: :ref:'model_specs'\n states (list, pandas.DataFrame): list of tidy DataFrames with filtered\n or simulated states or only one DataFrame with filtered or\n simulated states.They are used to estimate the state ranges in\n each period (if state_ranges are not given explicitly) and to\n estimate the distribution of the latent factors.\n period (int): The selected period of the filtered states that are plotted.\n combine_plots_in_grid (boolen): Return a figure containing subplots for each\n pair of factors or a dictionary of individual plots. Default True.\n add_3d_plots (boolen): Draw and return 3D plots or not. Default False.\n add_contour_plots (boolen): Draw and return contour plots or not. Default True.\n n_points (int): Number of grid points per axis and plot. Default 50.\n lower_kde_kws (dict): Keyword arguments for seaborn.kdeplot, used to generate\n the plots in the lower triangle of the grid, i.e. the two dimensional\n kdeplot for each factor pair.\n diag_kde_kws (dict): Keyword arguments for seaborn.kdeplot, used to generate\n the plots on the diagonal of the grid, i.e. the one dimensional\n kdeplot for each factor.\n surface_kws (dict): Keyword arguments for Axes.plot_surface, used to generate\n the plots in the upper triangle of the grid, i.e. the surface plot of the\n kernel density estimates for each factor pair.\n\n Returns:\n matplotlib.Figure: The grid plot or dict of individual plots\n\n \"\"\"\n if add_3d_plots and not isinstance(states, pd.DataFrame):\n raise ValueError(\"3d plots are only supported if states is a DataFrame\")\n\n lower_kde_kws = {} if lower_kde_kws is None else lower_kde_kws\n diag_kde_kws = {} if diag_kde_kws is None else diag_kde_kws\n surface_kws = {} if surface_kws is None else surface_kws\n\n model = process_model(model_dict)\n factors = model[\"labels\"][\"latent_factors\"]\n\n data, hue = _process_data(states, period, factors)\n\n grid = _get_axes_grid(\n factors=factors,\n combine_into_grid=combine_plots_in_grid,\n add_3d_plots=add_3d_plots,\n )\n for row, fac1 in enumerate(factors):\n for col, fac2 in enumerate(factors):\n ax = grid[row][col]\n\n if col < row:\n kwargs = {\n \"gridsize\": n_points,\n **lower_kde_kws,\n \"y\": fac1,\n \"x\": fac2,\n \"data\": data,\n \"hue\": hue,\n \"ax\": ax,\n }\n try:\n _ = sns.kdeplot(**kwargs)\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n msg = _get_error_message(data, [fac1, fac2], \"bivariate kdeplot\")\n warnings.warn(msg)\n\n elif col == row:\n kwargs = {\n \"gridsize\": n_points,\n **diag_kde_kws,\n \"y\": None,\n \"x\": fac1,\n \"data\": data,\n \"hue\": hue,\n \"ax\": ax,\n }\n try:\n _ = sns.kdeplot(**kwargs)\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n msg = _get_error_message(data, fac1, \"univariate kdeplot\")\n warnings.warn(msg)\n\n elif add_3d_plots:\n try:\n _ = _3d_kdeplot(\n x=fac1,\n y=fac2,\n data=data,\n n_points=n_points,\n ax=ax,\n surface_kws=surface_kws,\n )\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n msg = _get_error_message(data, [fac1, fac2], \"surface plot\")\n warnings.warn(msg)\n\n sns.despine()\n\n if combine_plots_in_grid:\n out = grid[0][0].get_figure()\n else:\n out = {}\n for row, fac1 in enumerate(factors):\n for col, fac2 in enumerate(factors):\n _, visible = _get_ax_properties(row, col, add_3d_plots)\n if visible:\n out[(fac1, fac2)] = grid[row][col].get_figure()\n\n return out\n\n\ndef _get_error_message(data, factors, plot_type):\n summary = data[factors].describe().round(3).to_string()\n tb = get_traceback()\n msg = (\n f\"\\n\\n\\nAn error occured while trying to generate a {plot_type} for the\\n\"\n f\"factors\\n\\n\\n {factors}\\n\\n\\nHere is some information on the factors:\"\n f\"\\n\\n\\n{summary}\\n\\n\\n The error was:\\n{tb}\"\n )\n return msg\n\n\ndef _3d_kdeplot(x, y, data, n_points, ax, surface_kws):\n xx, yy, f = _calculate_kde_for_3d(data_cleaned=data, a=x, b=y, n_points=n_points)\n kwargs = {\n \"rstride\": 1,\n \"cstride\": 1,\n \"linewidth\": 0,\n \"cmap\": \"coolwarm\",\n \"edgecolor\": \"none\",\n **surface_kws,\n }\n _ = ax.plot_surface(xx, yy, f, **kwargs)\n\n ax.w_xaxis.pane.fill = False\n ax.w_yaxis.pane.fill = False\n ax.w_zaxis.pane.fill = False\n\n\ndef _process_data(states, period, factors):\n if isinstance(states, pd.DataFrame):\n data = states.query(f\"period == {period}\")[factors]\n hue = None\n else:\n if not isinstance(states, dict):\n states = {i: df for i, df in enumerate(states)}\n to_concat = []\n for name, df in states.items():\n df = df.query(f\"period == {period}\")[factors]\n df[\"scenario\"] = name\n to_concat.append(df)\n data = pd.concat(to_concat)\n hue = \"scenario\"\n data = data.reset_index()\n return data, hue\n\n\ndef _get_axes_grid(factors, combine_into_grid, add_3d_plots):\n dim = len(factors)\n axes = []\n if combine_into_grid:\n fig = plt.figure(figsize=(dim * 5, dim * 5))\n gs = fig.add_gridspec(dim, dim)\n for row in range(len(factors)):\n grid_row = []\n for col in range(len(factors)):\n proj, visible = _get_ax_properties(row, col, add_3d_plots)\n ax = fig.add_subplot(gs[row, col], projection=proj)\n ax.set_visible(visible)\n grid_row.append(ax)\n axes.append(grid_row)\n else:\n for row in range(len(factors)):\n grid_row = []\n for col in range(len(factors)):\n proj, visible = _get_ax_properties(row, col, add_3d_plots)\n fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={\"projection\": proj})\n grid_row.append(ax)\n ax.set_visible(visible)\n axes.append(grid_row)\n\n return axes\n\n\ndef _get_ax_properties(row, col, add_3d):\n projection = \"3d\" if add_3d and col > row else None\n visible = col <= row or add_3d\n return projection, visible\n\n\ndef _calculate_kde_for_3d(data_cleaned, a, b, n_points):\n x = data_cleaned[a]\n y = data_cleaned[b]\n variables = [a, b]\n lb1, lb2 = data_cleaned[variables].min()\n ub1, ub2 = data_cleaned[variables].max()\n\n cp = complex(n_points)\n xx, yy = np.mgrid[lb1:ub1:cp, lb2:ub2:cp]\n positions = np.vstack([xx.ravel(), yy.ravel()])\n values = np.vstack([x, y])\n kernel = scipy.stats.gaussian_kde(values)\n f = np.reshape(kernel(positions).T, xx.shape)\n return xx, yy, f\n\n\ndef get_traceback():\n tb = format_exception(*sys.exc_info())\n if isinstance(tb, list):\n tb = \"\".join(tb)\n return tb\n" ]
[ [ "matplotlib.pyplot.subplots", "matplotlib.pyplot.figure", "scipy.stats.gaussian_kde", "pandas.concat", "numpy.vstack" ] ]
baagaard-usgs/eew-analyze
[ "5f9ec7d6eecd693fc0a3147d2695c957da64d4b2" ]
[ "eewperformance/analysisdb.py" ]
[ "# ======================================================================\n#\n# Brad T. Aagaard\n# U.S. Geological Survey\n#\n# ======================================================================\n#\n\nimport sqlite3\nimport sys\nimport logging\nimport datetime\nimport dateutil.parser\nimport pytz\n\nimport numpy\n\nTABLES = [\n (\"eew_alerts\", [\n \"server TEXT NOT NULL\",\n \"event_id INTEGER NOT NULL\",\n \"category TEXT NOT NULL\",\n \"message_type TEXT NOT NULL\",\n \"timestamp TEXT NOT NULL\",\n \"version INTEGER NOT NULL\",\n \"magnitude REAL NOT NULL\",\n \"magnitude_type TEXT DEFAULT Mw\",\n \"latitude REAL NOT NULL\",\n \"longitude REAL NOT NULL\",\n \"depth_km REAL NOT NULL\",\n \"origin_time TEXT NOT NULL\",\n \"num_stations INTEGER DEFAULT 0\",\n \"UNIQUE(server, event_id, version, category) ON CONFLICT FAIL\",\n ]),\n (\"comcat_events\", [\n \"event_id TEXT NOT NULL PRIMARY KEY\",\n \"latitude REAL NOT NULL\",\n \"longitude REAL NOT NULL\",\n \"depth_km REAL NOT NULL\",\n \"origin_time TEXT NOT NULL\",\n \"magnitude REAL NOT NULL\",\n \"magnitude_type TEXT DEFAULT Mw\",\n \"description TEXT\",\n \"UNIQUE(event_id) ON CONFLICT FAIL\",\n ]),\n (\"comcat_shakemaps\", [\n \"event_id TEXT NOT NULL PRIMARY KEY\",\n \"mmi_bias REAL\",\n \"mmi_max REAL\",\n \"pga_bias REAL\",\n \"pga_max REAL\",\n \"pgv_bias REAL\",\n \"pgv_max REAL\",\n \"psa03_bias REAL\",\n \"psa03_max REAL\",\n \"psa10_bias REAL\",\n \"psa10_max REAL\",\n \"psa30_bias REAL\",\n \"psa30_max REAL\",\n \"gmpe TEXT\",\n \"pgm2mi TEXT\",\n \"software_version TEXT\",\n \"UNIQUE(event_id) ON CONFLICT FAIL\",\n ]),\n (\"performance\", [\n \"comcat_id TEXT NOT NULL\",\n \"eew_server TEXT NOT NULL\",\n \"dm_id INTEGER NOT NULL\",\n \"dm_timestamp TEXT NOT NULL\",\n \"gmpe TEXT NOT NULL\",\n \"fragility TEXT NOT NULL\",\n \"magnitude_threshold REAL NOT NULL\",\n \"mmi_threshold REAL NOT NULL\",\n \"alert_latency_sec REAL NOT NULL\",\n \"area_damage REAL NOT NULL\",\n \"area_alert REAL NOT NULL\",\n \"area_alert_perfect REAL NOT NULL\",\n \"area_costsavings_eew REAL NOT NULL\",\n \"area_costsavings_perfecteew REAL NOT NULL\",\n \"population_damage REAL NOT NULL\",\n \"population_alert REAL NOT NULL\",\n \"population_alert_perfect REAL NOT NULL\",\n \"population_costsavings_eew REAL NOT NULL\",\n \"population_costsavings_perfecteew REAL NOT NULL\",\n \"UNIQUE(comcat_id, eew_server, dm_id, gmpe, fragility, alert_latency_sec, magnitude_threshold, mmi_threshold) ON CONFLICT FAIL\",\n ]),\n]\n\n\nclass Operation(object):\n \"\"\"Database operation object for minimizing locking behavior in database queries.\n\n Based on Tranaction object in beets library\n (https://github.com/beetbox/beets/blob/master/beets/dbcore/db.py)\n\n \"\"\"\n\n def __init__(self, filename):\n self.connection = sqlite3.connect(filename, timeout=20.0)\n self.connection.row_factory = sqlite3.Row\n self.cursor = self.connection.cursor()\n return \n \n def __enter__(self):\n return self\n \n def __exit__(self, exc_type, exc_value, traceback):\n if self.connection:\n self.connection.commit()\n self.connection.close()\n self.connection = None\n self.cursor = None\n return\n\n\nclass AnalysisData(object):\n \"\"\"SQLite database with DM alerts and ComCat events.\n \"\"\"\n\n def __init__(self, filename):\n \"\"\"Constructor with filename.\n\n :type filename: str\n :param filename: Filename of SQLite database\n\n \"\"\"\n self.filename = filename\n \n return\n\n def operation(self):\n return Operation(self.filename)\n \n \n def init(self, key):\n \"\"\"Create database.\n \"\"\"\n with self.operation() as op:\n if key == \"all\":\n for name, columns in TABLES[::-1]:\n op.cursor.execute(\"DROP TABLE IF EXISTS {}\".format(name))\n for name,columns in TABLES:\n op.cursor.execute(\"CREATE TABLE {name} ({fields})\".format(name=name, fields=\", \".join(columns)))\n else:\n for name, columns in TABLES:\n if name == key:\n op.cursor.execute(\"DROP TABLE IF EXISTS {}\".format(name))\n op.cursor.execute(\"CREATE TABLE {name} ({fields})\".format(name=name, fields=\", \".join(columns)))\n return\n\n def add_alerts(self, alerts, replace=False):\n \"\"\"Add alert info to database.\n \"\"\"\n COLUMNS = (\n \"server\",\n \"event_id\",\n \"category\",\n \"message_type\",\n \"timestamp\",\n \"version\",\n \"magnitude\",\n \"magnitude_type\",\n \"latitude\",\n \"longitude\",\n \"depth_km\",\n \"origin_time\",\n \"num_stations\",\n )\n insertCols = \", \".join(COLUMNS)\n valueCols = \", \".join([\":{}\".format(col) for col in COLUMNS])\n cmd = \"INSERT\"\n if replace:\n cmd += \" OR REPLACE\"\n with self.operation() as op:\n try:\n #self.cursor.executemany(\"INSERT INTO eew_alerts({}) VALUES({})\".format(insertCols, valueCols), alerts)\n for alert in alerts:\n op.cursor.execute(\"{} INTO eew_alerts({}) VALUES({})\".format(cmd, insertCols, valueCols), alert)\n except sqlite3.IntegrityError as ex:\n logging.getLogger(__name__).debug(str(ex))\n #logging.getLogger(__name__).debug(str(alerts))\n logging.getLogger(__name__).debug(str(alert))\n return\n \n def add_event(self, event, replace=False):\n \"\"\"Add ComCat event to database.\n\n :type event: Detail event\n :param event: ComCat event to add to database.\n \"\"\"\n COLUMNS = (\n \"event_id\",\n \"latitude\",\n \"longitude\",\n \"depth_km\",\n \"origin_time\",\n \"magnitude\",\n \"magnitude_type\",\n \"description\",\n )\n\n originTime = event.time if event.time.tzinfo else event.time.replace(tzinfo=pytz.UTC)\n insertCols = \", \".join(COLUMNS)\n eventValues = (event.id, event.latitude, event.longitude, event.depth, originTime, event.magnitude, event[\"magType\"], event.location)\n valueCols = \",\".join(\"?\"*len(eventValues))\n cmd = \"INSERT\"\n if replace:\n cmd += \" OR REPLACE\"\n with self.operation() as op:\n try:\n op.cursor.execute(\"{0} INTO comcat_events({1}) VALUES({2})\".format(cmd, insertCols, valueCols), eventValues)\n except sqlite3.IntegrityError as ex:\n logging.getLogger(__name__).debug(str(ex))\n logging.getLogger(__name__).debug(str(event))\n return\n\n def add_shakemap_info(self, info, replace=False):\n \"\"\"Add ComCat ShakeMap info to database.\n\n :type info: dict\n :param info: ComCat ShakeMap info (from info.json or info.xml).\n \"\"\"\n COLUMNS = (\n \"event_id\",\n \"mmi_bias\",\n \"mmi_max\",\n \"pga_bias\",\n \"pga_max\",\n \"pgv_bias\",\n \"pgv_max\",\n \"psa03_bias\",\n \"psa03_max\",\n \"psa10_bias\",\n \"psa10_max\",\n \"psa30_bias\",\n \"psa30_max\",\n \"gmpe\",\n \"pgm2mi\",\n \"software_version\",\n )\n\n insertCols = \", \".join(COLUMNS)\n\n gmmod = info[\"processing\"][\"ground_motion_modules\"]\n infoDict = {\n \"event_id\": info[\"event_id\"],\n \"mmi_bias\": 0.0,\n \"mmi_max\": 0.0,\n \"pga_bias\": 0.0,\n \"pga_max\": 0.0,\n \"pgv_bias\": 0.0,\n \"pgv_max\": 0.0,\n \"psa03_bias\": 0.0,\n \"psa03_max\": 0.0,\n \"psa10_bias\": 0.0,\n \"psa10_max\": 0.0,\n \"psa30_bias\": 0.0,\n \"psa30_max\": 0.0,\n \"gmpe\": gmmod[\"gmpe\"][\"module\"],\n \"pgm2mi\": gmmod[\"pgm2mi\"][\"module\"] if \"pgm2mi\" in gmmod.keys() else gmmod[\"gmice\"][\"module\"],\n \"software_version\": info[\"processing\"][\"shakemap_versions\"][\"shakemap_revision\"],\n }\n # Update infoDict with available values.\n gm = info[\"output\"][\"ground_motions\"]\n for key,value in gm.items():\n if key.startswith(\"SA\"):\n dkey = key.replace(\"SA(\",\"psa\").replace(\")\",\"\").replace(\".\",\"\")\n infoDict[dkey+\"_bias\"] = value[\"bias\"] or 0.0\n infoDict[dkey+\"_max\"] = value[\"max\"] or 0.0 \n elif key != \"intensity\":\n infoDict[key.lower()+\"_bias\"] = value[\"bias\"] or 0.0\n infoDict[key.lower()+\"_max\"] = value[\"max\"] or 0.0\n else:\n infoDict[\"mmi_bias\"] = value[\"bias\"] or 0.0\n infoDict[\"mmi_max\"] = value[\"max\"] or 0.0\n infoValues = tuple([infoDict[col] for col in COLUMNS])\n valueCols = \",\".join(\"?\"*len(infoValues))\n cmd = \"INSERT\"\n if replace:\n cmd += \" OR REPLACE\"\n with self.operation() as op:\n try:\n op.cursor.execute(\"{0} INTO comcat_shakemaps({1}) VALUES({2})\".format(cmd, insertCols, valueCols), infoValues)\n except sqlite3.IntegrityError as ex:\n logging.getLogger(__name__).debug(str(ex))\n logging.getLogger(__name__).debug(str(info))\n return\n\n def add_performance(self, stats, replace=False):\n \"\"\"Add performance stats to database.\n\n :type stats: dict\n :param stats: Performance stats to add to database.\n \"\"\"\n COLUMNS = (\n \"comcat_id\",\n \"eew_server\",\n \"dm_id\",\n \"dm_timestamp\",\n \"gmpe\",\n \"fragility\",\n \"magnitude_threshold\",\n \"mmi_threshold\",\n \"alert_latency_sec\",\n \"area_damage\",\n \"area_alert\",\n \"area_alert_perfect\",\n \"area_costsavings_eew\",\n \"area_costsavings_perfecteew\",\n \"population_damage\",\n \"population_alert\",\n \"population_alert_perfect\",\n \"population_costsavings_eew\",\n \"population_costsavings_perfecteew\",\n )\n NTRIES = 50\n \n insertCols = \", \".join(COLUMNS)\n perfValues = [stats[col] for col in COLUMNS]\n perfCols = \",\".join(\"?\"*len(perfValues))\n cmd = \"INSERT\"\n if replace:\n cmd += \" OR REPLACE\"\n\n with self.operation() as op:\n try:\n op.cursor.execute(\"{0} INTO performance({1}) VALUES({2})\".format(cmd, insertCols, perfCols), perfValues)\n \n except sqlite3.IntegrityError as ex:\n logging.getLogger(__name__).debug(str(ex))\n logging.getLogger(__name__).debug(str(stats))\n\n return\n\n def find_match(self, comcatId, server):\n \"\"\"Find initial alert matching ComCat event.\n \"\"\"\n from . import greatcircle\n \n MAX_DISTANCE_DEG = 3.0\n MAX_DISTANCE_KM = 150.0\n MAX_TIME_SECS = 15.0\n VS = 3.0e+3\n\n with self.operation() as op:\n op.cursor.execute(\"SELECT * FROM comcat_events WHERE event_id=?\", (comcatId,))\n event = op.cursor.fetchone()\n\n lat = event[\"latitude\"]\n lon = event[\"longitude\"]\n ot = dateutil.parser.parse(event[\"origin_time\"])\n dt = datetime.timedelta(seconds=MAX_TIME_SECS)\n conditions = [\n \"category=?\",\n \"message_type=?\",\n \"latitude BETWEEN ? AND ?\",\n \"longitude BETWEEN ? AND ?\",\n \"origin_time BETWEEN ? AND ?\",\n ]\n values = (\n \"live\",\n \"new\",\n lat-MAX_DISTANCE_DEG, lat+MAX_DISTANCE_DEG,\n lon-MAX_DISTANCE_DEG, lon+MAX_DISTANCE_DEG,\n ot-dt, ot+dt,\n )\n op.cursor.execute(\"SELECT * FROM eew_alerts WHERE \" + \" AND \".join(conditions), values)\n alerts = op.cursor.fetchall()\n if 0 == len(alerts):\n return None\n \n # Get closest alert, ignoring deleted alerts\n minDist = 1.0e+30\n alertMatch = None\n for alert in alerts:\n # :TODO: Ignore deleted alerts\n # Ignore alerts from non-target servers\n if alert[\"server\"] !=\"unknown\" and alert[\"server\"] !=\"eew2\" and alert[\"server\"] != server:\n continue\n # Limit to distance range \n dist = greatcircle.distance(lon, lat, alert[\"longitude\"], alert[\"latitude\"])\n if dist*1e-3 > MAX_DISTANCE_KM:\n continue\n distOT = abs((dateutil.parser.parse(alert[\"origin_time\"])-ot).total_seconds())*VS\n if dist + distOT < minDist:\n alertMatch = alert\n minDist = dist\n return alertMatch\n\n def alerts(self, comcatId, server):\n \"\"\"Get ShakeAlert alerts for event matching ComCat id.\n\n :type comcatId: str\n :param comcatId: ComCat event id.\n \"\"\"\n alert = self.find_match(comcatId, server)\n if alert is None:\n return []\n \n # Get subsequent alerts matching id and instance within 10 min\n timestamp = dateutil.parser.parse(alert[\"timestamp\"])\n conditions = [\n \"category=?\",\n \"(message_type=? OR message_type=?)\",\n \"event_id=?\",\n \"server=?\",\n \"timestamp BETWEEN ? AND ?\",\n ]\n values = (\n \"live\",\n \"new\",\n \"update\",\n alert[\"event_id\"],\n alert[\"server\"],\n timestamp, timestamp+datetime.timedelta(minutes=10.0),\n )\n with self.operation() as op:\n op.cursor.execute(\"SELECT * FROM eew_alerts WHERE \" + \" AND \".join(conditions), values)\n alerts = op.cursor.fetchall()\n return alerts\n\n def performance_stats(self, comcatId, server, gmpe, fragility, alertLatencySec, magnitudeThreshold=None, mmiThreshold=None):\n \"\"\"\n \"\"\"\n conditions = [\n \"comcat_id=?\",\n \"eew_server=?\",\n \"gmpe=?\",\n \"fragility=?\",\n ]\n values = (\n comcatId,\n server,\n gmpe,\n fragility,\n )\n with self.operation() as op:\n op.cursor.execute(\"SELECT * FROM performance WHERE \" + \" AND \".join(conditions) + \" ORDER BY magnitude_threshold,mmi_threshold\", values)\n # :TODO: :KLUDGE: to get structured array\n # Can we use PRAGMA TABLE_INFO to get column name and type (to map to numpy.dtype)?\n dtype = [\n (\"comcat_id\", \"|S32\"),\n (\"eew_server\", \"|S32\"),\n (\"dm_id\", \"int32\"),\n (\"dm_timestamp\", \"|S32\"),\n (\"gmpe\", \"|S32\"),\n (\"fragility\", \"|S32\"),\n (\"magnitude_threshold\", \"float32\"),\n (\"mmi_threshold\", \"float32\"),\n (\"alert_latency_sec\", \"float32\"),\n (\"area_damage\", \"float32\"),\n (\"area_alert\", \"float32\"),\n (\"area_alert_perfect\", \"float32\"),\n (\"area_costsavings_eew\", \"float32\"),\n (\"area_costsavings_perfecteew\", \"float32\"),\n (\"population_damage\", \"float32\"),\n (\"population_alert\", \"float32\"),\n (\"population_alert_perfect\", \"float32\"),\n (\"population_costsavings_eew\", \"float32\"),\n (\"population_costsavings_perfecteew\", \"float32\"),\n ]\n results = op.cursor.fetchall()\n \n nrows = len(results)\n stats = numpy.zeros(nrows, dtype=dtype)\n for iresult, result in enumerate(results):\n stats[iresult] = tuple([result[key] for key in result.keys()])\n\n mask = numpy.ma.masked_values(stats[\"alert_latency_sec\"], alertLatencySec).mask\n stats = stats[mask]\n if magnitudeThreshold:\n mask = numpy.ma.masked_values(stats[\"magnitude_threshold\"], magnitudeThreshold).mask\n stats = stats[mask]\n if mmiThreshold:\n mask = numpy.ma.masked_values(stats[\"mmi_threshold\"], mmiThreshold).mask\n stats = stats[mask]\n return stats\n \n def most_recent_alert(self, server):\n \"\"\"Get most recent alert in database.\n\n :type server: str\n :param server: Name of EEW server associated with alerts.\n\n :returns: Most recent alert for server in database.\n \"\"\"\n with self.operation() as op:\n op.cursor.execute(\"SELECT * from eew_alerts ORDER BY date(timestamp) DESC LIMIT 1\")\n alert = op.cursor.fetchone()\n return alert\n \n def comcat_event(self, comcatId):\n \"\"\"Get ComCat event information.\n\n :type comcatId: str\n :param comcatId: ComCat event id\n \"\"\"\n with self.operation() as op:\n op.cursor.execute(\"SELECT * FROM comcat_events WHERE event_id=?\", (comcatId,))\n event = op.cursor.fetchone()\n return event\n\n def comcat_shakemap(self, comcatId):\n \"\"\"Get ComCat ShakeMap information.\n\n :type comcatId: str\n :param comcatId: ComCat event id\n \"\"\"\n with self.operation() as op:\n op.cursor.execute(\"SELECT * FROM comcat_shakemaps WHERE event_id=?\", (comcatId,))\n shakemap = op.cursor.fetchone()\n return shakemap\n \n def tables_info(self):\n \"\"\"Returns string with database summary.\n \"\"\"\n sout = \"\"\n with self.operation() as op:\n for name,columns in TABLES:\n op.cursor.execute(\"PRAGMA TABLE_INFO({})\".format(name))\n info = op.cursor.fetchall()\n op.cursor.execute(\"SELECT COUNT(*) FROM {}\".format(name))\n nrows = op.cursor.fetchall()[0][0]\n\n sout += \"Table {}\\n\".format(name)\n sout += \" Columns\\n\"\n for column in info:\n sout += \" {name:16} {type:16}\\n\".format(name=column[1], type=column[2])\n sout += \" Number of rows: {}\\n\".format(nrows)\n return sout\n\n def summary(self):\n \"\"\"Returns string with database summary.\n \"\"\"\n sout = \"\"\n\n with self.operation() as op:\n \n # Comcat events\n sout += \"\\nComCat Events\\n\"\n op.cursor.execute(\"SELECT * FROM comcat_events ORDER BY origin_time\")\n rows = op.cursor.fetchall()\n for row in rows:\n ot = dateutil.parser.parse(row[\"origin_time\"])\n sout += \"{row[event_id]} {row[longitude]:9.4f} {row[latitude]:8.4f} {row[depth_km]:4.1f} {ot:%Y-%m-%dT%H:%M} {row[magnitude_type]:3s}{row[magnitude]:.2f} {row[description]}\\n\".format(row=row, ot=ot)\n \n # Comcat Shakemap\n sout += \"\\nShakeMap Info\\n\"\n op.cursor.execute(\"SELECT * FROM comcat_shakemaps ORDER BY event_id\")\n rows = op.cursor.fetchall()\n for row in rows:\n event = self.comcat_event(row[\"event_id\"])\n sout += \"{row[event_id]} {event[magnitude_type]:3s}{event[magnitude]:.2f} {row[mmi_max]:3.1f} {row[pga_max]:6.2f}%g {row[pgv_max]:5.1f}cm/s {row[mmi_bias]:5.2f} {row[pga_bias]:5.2f} {row[pgv_bias]:5.2f} {row[gmpe]} {row[pgm2mi]} v{row[software_version]} {event[description]}\\n\".format(row=row, event=event)\n \n # Alerts\n \n # Performance\n sout += \"\\nPerformance Data\\n\"\n op.cursor.execute(\"SELECT * FROM performance ORDER BY comcat_id,fragility,gmpe,magnitude_threshold,mmi_threshold,alert_latency_sec\")\n rows = op.cursor.fetchall()\n for row in rows:\n event = self.comcat_event(row[\"comcat_id\"])\n sout += \"{row[comcat_id]} {event[magnitude_type]:3s}{event[magnitude]:.2f} {row[gmpe]} {row[fragility]} {row[magnitude_threshold]:3.1f} {row[mmi_threshold]:3.1f} {row[alert_latency_sec]:3.1f} {row[area_costsavings_eew]:6.2f} {row[area_costsavings_perfecteew]:6.2f} {row[population_costsavings_eew]:6.2f} {row[population_costsavings_perfecteew]:6.2f} {event[description]}\\n\".format(row=row, event=event)\n return sout\n\n def show_matches(self, server):\n \"\"\"Show matches.\n \"\"\"\n with self.operation() as op:\n op.cursor.execute(\"SELECT * from comcat_events ORDER BY event_id\")\n events = op.cursor.fetchall()\n \n for event in events:\n alert = self.find_match(event[\"event_id\"], server)\n if alert:\n print(\"COMAT {event[event_id]} M{event[magnitude]:.2f} {event[longitude]:.3f} {event[latitude]:.3f} {event[origin_time]} ALERT {alert[event_id]} {alert[longitude]:.3f} {alert[latitude]:.3f} {alert[origin_time]} {alert[server]}\".format(event=event, alert=alert))\n else:\n print(\"COMAT {event[event_id]} M{event[magnitude]:.2f} {event[longitude]:.3f} {event[latitude]:.3f} {event[origin_time]} ALERT None\".format(event=event))\n return\n\n# End of file\n" ]
[ [ "numpy.ma.masked_values", "numpy.zeros" ] ]
ppnaumann/CSCF
[ "ea8af1f2fdec3a90a041324a32893d5dadc7e14b" ]
[ "src/cscf/decoder.py" ]
[ "import numpy as np\nimport decimal\n\n\nclass Decoder(object):\n \"\"\"\n docstring\n \"\"\"\n\n def __init__(self, problem):\n self.problem = problem\n self.invalid_genotype_value = 1.0\n\n def decode_without_repair(self, x):\n _x = x.copy()\n # two parts, first sequence, second values\n assert _x.ndim == 1\n\n split_point = len(x) // 2\n\n sequence_part = _x[:split_point]\n value_part = _x[split_point:]\n\n assert len(sequence_part) == len(value_part)\n sequence_phenotype, fixed_sequence_genotype = self.get_sequence_pheno(\n sequence_part\n )\n assert sequence_phenotype.dtype == np.float64, sequence_phenotype.dtype\n value_phenotype = self.get_values_pheno(value_part)\n assert value_phenotype.dtype == np.float64, value_phenotype.dtype\n\n # now twice as long\n phenotype = np.concatenate([sequence_phenotype, value_phenotype])\n assert phenotype.dtype == np.float64, phenotype.dtype\n return phenotype, np.concatenate([fixed_sequence_genotype, x[split_point:]])\n\n def get_decoded_and_fix(self, x):\n phenotype, fixed_genotype = self.decode_without_repair(x)\n assert phenotype.dtype == np.float64, phenotype.dtype\n assert fixed_genotype.dtype == x.dtype\n assert fixed_genotype.shape == x.shape\n return phenotype, fixed_genotype\n\n def decode(self, x):\n phenotype, fixed_genotype = self.decode_without_repair(x)\n assert phenotype.dtype == np.float64, phenotype.dtype\n assert phenotype.shape == x.shape\n return phenotype\n\n def get_sequence_pheno(self, sequence_genotype):\n \"\"\"\n Returns the sorted representation of the genotype\n\n Parameters\n ----------\n sequence_genotype : array\n Genotype of the sequence\n\n Returns\n -------\n array\n Indices of sorted genotype, i.e. phenotype\n \"\"\"\n inactive = self.evaluate_is_inactive(sequence_genotype)\n phenotype_candidate = np.argsort(sequence_genotype)\n phenotype = np.full(len(phenotype_candidate), -1)\n phenotype[: sum(~inactive)] = phenotype_candidate[: sum(~inactive)]\n assert set(np.arange(len(sequence_genotype))[~inactive]).union([-1]) == set(\n phenotype\n ).union([-1]), phenotype\n\n genotype_candidate = sequence_genotype.copy()\n genotype_candidate = genotype_candidate[phenotype_candidate]\n return phenotype.astype(np.float64), genotype_candidate.astype(np.float64)\n\n def evaluate_is_inactive(self, sequence_genotype):\n \"\"\"Simple heuristic that defines that an action is inactive\n if its value is above 0.5 in the genotype form\n\n Args:\n sequence_genotype ([type]): [description]\n\n Returns:\n [type]: [description]\n \"\"\"\n return sequence_genotype > 0.5\n\n def get_values_pheno(self, values_genotype):\n value_phenotype = np.zeros(len(values_genotype), dtype=np.float64)\n\n for action_dict_id, geno_val in enumerate(values_genotype):\n phenotype_value = self.get_interpolated_phenotype(geno_val, action_dict_id)\n value_phenotype[action_dict_id] = phenotype_value\n return value_phenotype\n\n def get_interpolated_phenotype(self, genotype_value, action_id):\n \"\"\"\n Interpolates the genotype interval of [0,1] to the respective phenotype range\n\n Parameters\n ----------\n genotype_value : float\n Genotype representation of the current value\n action_idx : int\n Index of the respective action\n\n Returns\n -------\n int or float\n Genotype representation\n \"\"\"\n assert 0.0 <= action_id <= self.problem.n_actions, action_id\n\n GENOTYPE_INTERVAL = [0.0, 1.0]\n\n xl = self.problem.xxl[int(action_id)]\n xu = self.problem.xxu[int(action_id)]\n\n phenotype_interval = [xl, xu]\n assert phenotype_interval[0] < phenotype_interval[1], phenotype_interval\n # * If xl == xu, then the interpolated value will always be the same\n # * (independent of the input value)\n phenotype_value = np.interp(\n genotype_value, GENOTYPE_INTERVAL, phenotype_interval\n )\n if action_id in self.problem.cat_actions_idx:\n # Phenotype value is only idx at this point\n # So we retrieve it from the provided mapping\n phenotype_value = self.problem.bounds_and_values[int(action_id)][\n int(phenotype_value)\n ]\n return float(phenotype_value)\n else:\n return float(phenotype_value)\n" ]
[ [ "numpy.concatenate", "numpy.interp", "numpy.argsort" ] ]
GiuppoUni/gym-pybullet-drones
[ "9339b803f471c7510cc6d9b14828982bb426466b" ]
[ "assignments/aer1216_fall2020_hw1_ctrl.py" ]
[ "\"\"\"Control implementation for assignment 1.\n\nThe script is used the simulation in file `aer1216_fall2020_hw1_sim.py`.\n\nExample\n-------\nTo run the simulation, type in a terminal:\n\n $ python aer1216_fall2020_hw1_sim.py\n\nNotes\n-----\nTune the PD coefficients in `HW1Control.__init__()`.\n\n\"\"\"\nimport numpy as np\nfrom gym_pybullet_drones.envs.BaseAviary import BaseAviary\n\nclass HW1Control():\n \"\"\"Control class for assignment 1.\"\"\"\n\n ################################################################################\n\n def __init__(self, env: BaseAviary):\n \"\"\" Initialization of class HW1Control.\n\n Parameters\n ----------\n env : BaseAviary\n The PyBullet-based simulation environment.\n\n \"\"\"\n self.g = env.G\n \"\"\"float: Gravity acceleration, in meters per second squared.\"\"\"\n self.mass = env.M\n \"\"\"float: The mass of quad from environment.\"\"\"\n self.timestep = env.TIMESTEP\n \"\"\"float: Simulation and control timestep.\"\"\"\n self.kf_coeff = env.KF\n \"\"\"float: RPMs to force coefficient.\"\"\"\n self.km_coeff = env.KM\n \"\"\"float: RPMs to torque coefficient.\"\"\"\n\n ############################################################\n ############################################################\n #### HOMEWORK CODE (START) #################################\n ############################################################\n ############################################################\n self.p_coeff_position = 1.5\n \"\"\"float: Proportional coefficient for position control.\"\"\"\n self.d_coeff_position = 0.0\n \"\"\"float: Derivative coefficient for position control.\"\"\"\n ############################################################\n ############################################################\n #### HOMEWORK CODE (END) ###################################\n ############################################################\n ############################################################\n\n self.reset()\n\n ################################################################################\n\n def reset(self):\n \"\"\" Resets the controller counters and variables (integral errors and\n previous control step errors).\n \"\"\"\n self.control_counter = 0\n\n ################################################################################\n\n def compute_control(self,\n current_position,\n current_velocity,\n target_position,\n target_velocity=np.zeros(3),\n target_acceleration=np.zeros(3),\n ):\n \"\"\"Compute the propellers' RPMs for the target state, given the\n current state.\n\n Parameters\n ----------\n current_position : ndarray\n (3,)-shaped array of floats containing global x, y, z, in meters.\n current_velocity : ndarray\n (3,)-shaped array of floats containing global vx, vy, vz, in m/s.\n target_position : ndarray\n (3,)-shaped array of float containing global x, y, z, in meters.\n target_velocity : ndarray, optional\n (3,)-shaped array of floats containing global, in m/s.\n target_acceleration : ndarray, optional\n (3,)-shaped array of floats containing global, in m/s^2.\n\n Returns\n -------\n ndarray\n (4,)-shaped array of ints containing the desired RPMs of each propeller.\n \"\"\"\n self.control_counter += 1\n \n ############################################################\n ############################################################\n #### HOMEWORK CODE (START) #################################\n ############################################################\n ############################################################\n\n ##### Calculate position and velocity errors ###############\n current_pos_error = target_position[2] - current_position[2]\n current_vel_error = target_velocity[2] - current_velocity[2]\n\n #### Calculate input with a PD controller ##################\n # u = desired_acceleration + Kv * velocity_error + Kp * position_error\n u = target_acceleration[2] \\\n + self.d_coeff_position * current_vel_error \\\n + self.p_coeff_position * current_pos_error\n\n ##### Calculate propeller turn rates given the PD input ####\n # turn_rate = sqrt( (m*u + m*g) / (4*Kf) )\n propellers_rpm = np.sqrt((u * self.mass + self.mass*self.g) / (4 * self.kf_coeff))\n\n # For up-down motion, assign the same turn rates to all motors\n propellers_0_and_3_rpm, propellers_1_and_2_rpm = propellers_rpm, propellers_rpm\n ############################################################\n ############################################################\n #### HOMEWORK CODE (END) ###################################\n ############################################################\n ############################################################\n\n #### Print relevant output #################################\n if self.control_counter%(1/self.timestep) == 0:\n print(\"current_position\", current_position)\n print(\"current_velocity\", current_velocity)\n print(\"target_position\", target_position)\n print(\"target_velocity\", target_velocity)\n print(\"target_acceleration\", target_acceleration)\n\n return np.array([propellers_0_and_3_rpm, propellers_1_and_2_rpm,\n propellers_1_and_2_rpm, propellers_0_and_3_rpm])\n" ]
[ [ "numpy.array", "numpy.sqrt", "numpy.zeros" ] ]
matln/Attentive-Filtering-Network
[ "cef007e68f1016b6f6daf2510feabe7565d4756b" ]
[ "src/data_reader/v7_dataset.py" ]
[ "import numpy as np\nimport torch\nfrom torch.utils import data\nimport adv_kaldi_io as ako\nimport kaldi_io as ko\n\n\"\"\"\nFor CNN+GRU where it loads one utterance at a time \n\"\"\"\n\nclass SpoofDataset(data.Dataset):\n \"\"\"PyTorch dataset that reads kaldi feature\n \"\"\"\n def __init__(self, scp_file, utt2label_file):\n 'Initialization'\n self.scp_file = scp_file\n self.utt2label = ako.read_key_label(utt2label_file)\n self.key_list = ako.read_all_key(scp_file)\n\n def __len__(self):\n 'Denotes the total number of samples'\n return len(self.key_list)\n\n def __getitem__(self, index):\n 'Generates one sample of data'\n utt_id = self.key_list[index]\n # Load data and get label\n tensor = ako.read_mat_key(self.scp_file, utt_id)\n X1 = np.expand_dims(tensor, axis=0)\n X2 = X1[:,128:,:]\n y = self.utt2label[utt_id]\n\n return X1, X2, y\n" ]
[ [ "numpy.expand_dims" ] ]
katrinleinweber/riemann_book
[ "0bd2320765a459249d938c6913cc39339cddb3fb" ]
[ "exact_solvers/euler_stripes.py" ]
[ "def plot_exact_riemann_solution_stripes(rho_l=3.,u_l=0.,p_l=3.,\n rho_r=1.,u_r=0.,p_r=1.,gamma=1.4,t=0.4): \n import matplotlib.pyplot as plt\n import numpy as np\n from exact_solvers import Euler \n from utils import riemann_tools\n q_l = Euler.primitive_to_conservative(rho_l,u_l,p_l)\n q_r = Euler.primitive_to_conservative(rho_r,u_r,p_r)\n \n x = np.linspace(-1.,1.,1000)\n states, speeds, reval, wave_types = Euler.exact_riemann_solution(q_l, q_r, gamma=gamma)\n if t == 0:\n q = np.zeros((3,len(x)))\n q[0,:] = q_l[0]*(x<=0) + q_r[0]*(x>0)\n q[1,:] = q_l[1]*(x<=0) + q_r[1]*(x>0)\n q[1,:] = q_l[2]*(x<=0) + q_r[2]*(x>0)\n else:\n q = reval(x/t)\n primitive = Euler.conservative_to_primitive(q[0],q[1],q[2])\n \n # compute particle trajectories:\n def reval_rho_u(x): \n eps = 1.e-16\n q = reval(x)\n rho = q[0]\n u = q[1]/(q[0]+eps)\n rho_u = np.vstack((rho,u))\n return rho_u\n \n # Specify density of trajectories to left and right:\n num_left = 10\n num_right = 10\n rho_left = q_l[0] / 10.\n rho_right = q_r[0] / 10.\n x_traj, t_traj, xmax = riemann_tools.compute_riemann_trajectories(states, speeds, reval_rho_u, wave_types,\n i_vel=1, xmax=1, rho_left=rho_left, rho_right=rho_right)\n \n fig = plt.figure(figsize=(9,8))\n names = ['Density','Velocity','Pressure']\n axes = [0]*3\n for i in range(3):\n axes[i] = fig.add_subplot(3,1,i+1)\n q = primitive[i]\n plt.plot(x,q,'-k',linewidth=3)\n plt.title(names[i])\n qmax = max(q)\n qmin = min(q)\n qdiff = qmax - qmin\n if qdiff == 0: qdiff = qmin*0.5\n axes[i].set_ylim((qmin-0.1*qdiff,qmax+0.1*qdiff))\n axes[i].set_xlim(-xmax,xmax)\n \n if i==0:\n # plot stripes only on density plot\n n = np.array([j for j,v in enumerate(t > t_traj) if v])\n if len(n)==0:\n n = 0\n else:\n n = min(n.max(), len(t_traj)-1)\n\n for i in range(1, x_traj.shape[1]-1):\n j1 = np.array([j for j,v in enumerate(x_traj[n,i] > x) if v])\n if len(j1)==0:\n j1 = 0\n else:\n j1 = min(j1.max(), len(x)-1)\n j2 = np.array([j for j,v in enumerate(x_traj[n,i+1] > x) if v])\n if len(j2)==0:\n j2 = 0\n else:\n j2 = min(j2.max(), len(x)-1)\n\n # set advected color for density plot:\n if x_traj[0,i]<0: \n # shades of red for fluid starting from x<0\n if np.mod(i,2)==0:\n c = [1,0,0]\n else:\n c = [1,0.8,0.8]\n else:\n # shades of blue for fluid starting from x<0\n if np.mod(i,2)==0:\n c = [0,0,1]\n else:\n c = [0.8,0.8,1]\n plt.fill_between(x[j1:j2],q[j1:j2],0,color=c)\n plt.tight_layout()\n plt.show()\n " ]
[ [ "numpy.mod", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.show", "numpy.linspace", "numpy.vstack" ] ]
lamarqued/tech-radar
[ "05563bdd47cca971caa5abab12d23939d6db04db" ]
[ "docs/make_data.py" ]
[ "import json\nimport getpass\nimport pandas as pd\n\nUSER = getpass.getuser()\nPATH = f\"C:/Users/{USER}/Ekimetrics/Ekimetrics. - Eki.Innovation/Opensource Tech Radar.xlsx\"\n\ndata = pd.read_excel(PATH).iloc[:,:6]\n\ndata[\"quadrant\"] = data[\"quadrant\"].str.lower().str.replace(\" \",\"\").replace({\"datascience\":2,\"dataengineering\":3,\"applicationdevelopment\":1,\"ops\":0})\ndata[\"ring\"] = data[\"ring\"].replace({\"ADOPT\":0,\"TRIAL\":1,\"ASSESS\":2,\"HOLD\":3})\ndata[\"active\"].fillna(True,inplace = True)\ndata[\"moved\"].fillna(0,inplace = True)\ndata.drop(\"url\",axis = 1,inplace = True)\ndata = data.drop_duplicates(subset = [\"label\"])\n\njson_data = data.to_json(orient = \"records\")\njs_data = f\"\"\"\nvar radarData = {json_data};\n\"\"\"\n\nwith open(\"technos.js\",\"w\") as file:\n\tfile.write(js_data)\n" ]
[ [ "pandas.read_excel" ] ]
isomerase/MyPyGLM
[ "d8c884f74f7b9e7953f6602a6cd01d27275f76d6" ]
[ "calc_STA.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nSpike Triggered Average calculator\n\nInput:\nstimulus (t)\nspike spikeTimes (t)\n if no spiketimes, generate randoms\n\nGiven stim and spiketimes, grabs the spike windows, and calcs the spike triggered average.\n\noutput:\nspike triggered average\n\n\nCreated on Wed Feb 11 21:20:00 2015\n@author: Richard Decal, [email protected]\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef gen_rand_spiketimes(number_of_spikes, STIM_LEN):\n \"\"\"given stimulus length and stim count, generate 10 random spikes\n \n TODO: make this a poisson process\n TODO: don't have random spiketimes: spike when convolution tells you to!\n \"\"\"\n rand_spike_times = np.zeros(STIM_LEN)\n timebins = []\n for i in range(number_of_spikes):\n timebin = np.random.randint(low=0, high=STIM_LEN-1)\n rand_spike_times[timebin] = 1\n timebins.append(timebin)\n return rand_spike_times, timebins\n\ndef window_grabber(stimulus, spikeTimes, WINDOW_LEN):\n \"\"\"\"when a spike happens, grab the window preceding the spike. return an nparray containing each window\n \n TODO: instead of discarding spikes at beginning, make vector with leading zeros??\n \"\"\"\n spike_trigger_windows = []\n for time in spikeTimes:\n if time > WINDOW_LEN: #discard spikes that are too close to the beginning.\n spike_trigger_windows.append(stimulus[time-WINDOW_LEN:time])\n spike_trigger_windows = np.array(spike_trigger_windows)\n return spike_trigger_windows\n\ndef spike_trigger_averager(spike_trigger_windows):\n \"\"\"given an array of many grabbed windows, average all the windows.\n return a Spike-triggered average window/vector\n \"\"\" \n spike_trigger_average = np.average(spike_trigger_windows, axis=0)\n return spike_trigger_average\n\ndef figplotter(WINDOW_LEN, spike_trigger_average):\n plt.plot(range(0,WINDOW_LEN), spike_trigger_average)\n plt.show()\n\ndef main(stimulus = np.genfromtxt(\"gauss_stimulus_3000dim.txt\"), WINDOW_LEN = 50):\n \"\"\"\"\n default imports vector of len(3000) of pts drawn from a gaussian dist w/ mean=0,stdev=1.0\n \n TODO: allow input of spikes and spikeTimes, generate if none are available\n \"\"\"\n STIM_LEN = len(stimulus)\n spike_timeseries, spikeTimes = gen_rand_spiketimes(1000, STIM_LEN) #TODO: replace with calculated spiketimes \n spike_trigger_windows = window_grabber(stimulus, spikeTimes, WINDOW_LEN)\n spike_trigger_average = spike_trigger_averager(spike_trigger_windows)\n return spike_trigger_average, WINDOW_LEN\n \n\nif __name__ == \"__main__\":\n spike_trigger_average, WINDOW_LEN = main()\n figplotter(WINDOW_LEN, spike_trigger_average)" ]
[ [ "numpy.array", "numpy.zeros", "numpy.genfromtxt", "numpy.random.randint", "numpy.average", "matplotlib.pyplot.show" ] ]
sdym-test/pytorch
[ "fc3c7fb7566639d0a36af88bbac0c7920f73ee3b", "fc3c7fb7566639d0a36af88bbac0c7920f73ee3b" ]
[ "torch/ao/quantization/fx/quantization_patterns.py", "test/test_reductions.py" ]
[ "import torch\nfrom torch.fx import GraphModule\nfrom torch.fx.graph import (\n Node,\n Graph,\n)\nfrom ..observer import (\n default_affine_fixed_qparams_observer,\n default_symmetric_fixed_qparams_observer,\n)\n\nfrom ..quantization_mappings import (\n get_static_quant_module_class,\n get_dynamic_quant_module_class,\n get_quantized_operator,\n)\nfrom ..utils import (\n get_swapped_custom_module_class,\n activation_is_statically_quantized,\n activation_is_int8_quantized,\n weight_is_statically_quantized,\n get_qconfig_dtypes,\n activation_dtype,\n get_qparam_dict,\n check_node,\n)\n\nfrom torch.ao.quantization.quantize import (\n is_activation_post_process,\n)\n\nfrom .pattern_utils import (\n register_quant_pattern,\n get_default_output_activation_post_process_map,\n Pattern,\n)\nfrom ..utils import _parent_name\nfrom .utils import (\n all_node_args_have_no_tensors,\n quantize_node,\n get_per_tensor_qparams,\n get_linear_prepack_op_for_dtype,\n create_qparam_nodes,\n get_qconv_prepack_op,\n get_qconv_op,\n create_node_from_old_node_preserve_meta,\n)\n\nfrom ..qconfig import QConfigAny\n\nfrom abc import ABC\nimport operator\nimport warnings\n\nfrom typing import Any, Callable, Dict, Union, Optional, Tuple, List\n\n# -------------------------\n# Pattern Registrations\n# -------------------------\n\n# 1. Post Training Static Quantization and Quantization Aware Training Patterns\n\n# Base Pattern Handler\nclass QuantizeHandler(ABC):\n \"\"\" Base handler class for the quantizer patterns\n \"\"\"\n def __init__(self, node: Node, modules: Dict[str, torch.nn.Module]):\n \"\"\" Records pattern information in __init__, which will be used\n in convert\n \"\"\"\n # this is an indicator of whether all the inputs are Node or not\n # since some op might be quantized differently depending on whether\n # all inputs are tensors or not, e.g. add/mul\n self.num_tensor_args = len(node.args)\n self.all_node_args_are_tensors = True\n # the last node of the matched pattern\n self.last_node = node\n\n def _maybe_get_last_node_only_observer(\n self,\n modules: Dict[str, torch.nn.Module]\n ) -> Optional[torch.nn.Module]:\n \"\"\"\n If the last node of the pattern is observed, return the observer\n instance. Otherwise, return None.\n \"\"\"\n for maybe_obs_node, _ in self.last_node.users.items():\n if maybe_obs_node.op == 'call_module':\n maybe_obs = modules[str(maybe_obs_node.target)]\n if is_activation_post_process(maybe_obs):\n return maybe_obs\n return None\n\n def input_output_observed(self) -> bool:\n \"\"\"\n Returns True if the pattern matched to this qhandler could be\n be observed, and False it it should not be observed.\n \"\"\"\n return True\n\n def is_general_tensor_value_op(self) -> bool:\n \"\"\"\n Returns True if the operator works for both floating point and\n quantized input, and does some computation based on the input Tensor,\n so we need to insert observer/fake_quant for the output of the\n operator since the distribution of values is different for input and output\n Tensors (for HistogramObserver)\n while they share the same quantization parameters\n Example: avgpool2d\n \"\"\"\n return False\n\n def is_general_tensor_shape_op(self) -> bool:\n \"\"\" Similar to is_general_tensor_value_op, this is a check\n for ops that works for both floating point and quantized input,\n that only re-arranges the Tensor values or query some metadata about the Tensor\n We don't insert observer/fake_quant for the output of these operators\n Example: reshape, transpose, maxpool2d\n \"\"\"\n return False\n\n def should_insert_observer_for_output(\n self,\n qconfig: Any,\n model_is_training: bool,\n ) -> bool:\n \"\"\"\n Returns true if an observer should be inserted for the output of\n the pattern matched to this QuantizeHandler instance during the\n prepare step.\n \"\"\"\n # TODO(future PR): potentially clean up and deduplicate these\n # mappings.\n return self.all_node_args_are_tensors and self.input_output_observed()\n\n def should_mark_output_quantized_from_input_quantized_status(\n self,\n qconfig: QConfigAny\n ) -> bool:\n \"\"\"\n Returns true if after convert, the output of the matched pattern is\n quantized iff the first input is also quantized.\n \"\"\"\n return False\n\n def get_activation_ctr(\n self,\n qconfig: Any,\n pattern: Pattern,\n is_training: bool,\n ) -> Optional[Callable]:\n \"\"\"\n Returns the constructor for the activation observer which should be\n used for the pattern matched to this handler. Some handlers override\n this to a different value than what is specified in the qconfig.\n \"\"\"\n return qconfig.activation\n\n def is_output_quantized(self, qconfig):\n \"\"\" Returns true if the output node of convert is quantized\n when is_reference is False, we would return float node when a certain dtype\n combination is not supported (since fbgemm/qnnpack only support certain dtype\n combinations), so the output may be float, but when is_reference is True,\n we support all dtype combinations so the output will always be quantized.\n\n TODO: This is fragile, whether output is quantized should not depend on `is_reference` since\n we want to make sure whether a Tensor is quantized\n should be the same in prepare and convert and is_reference\n is only available in convert currently\n\n \"\"\"\n return True\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n \"\"\" Convert the given node to a quantized node and insert\n it to the quantized graph\n \"\"\"\n return NotImplemented\n\n\n# Binary op configs\n\n# Supported combinations are:\n# quant_type | activation (compute_type) | weight\n# static quint8 qint8\n\n# tuple (activation_dtype, weight_dtype, compute_dtype)\n# these are supported types for common binary ops like add/mul etc.\nall_dtypes = [\n (torch.qint8, torch.qint8, None),\n (torch.quint8, torch.qint8, None),\n (torch.float16, torch.float16, None),\n]\nfp16_dtypes = [\n (torch.float16, torch.float16, None)\n]\nint8_dtypes = [\n (torch.qint8, torch.qint8, None),\n (torch.quint8, torch.qint8, None),\n]\nbinary_op_supported_dtypes : Dict[Union[Callable, str], List[Tuple[torch.dtype, torch.dtype, None]]] = {\n operator.add: all_dtypes,\n torch.add: all_dtypes,\n operator.mul: all_dtypes,\n torch.mul: all_dtypes,\n torch.bmm: fp16_dtypes,\n torch.sub: fp16_dtypes,\n operator.sub: fp16_dtypes,\n torch.div: fp16_dtypes,\n operator.truediv: fp16_dtypes,\n torch.matmul: int8_dtypes,\n}\n\ndefault_op_supported_dtypes = {\n torch.nn.ConvTranspose1d: int8_dtypes,\n torch.nn.ConvTranspose2d: int8_dtypes,\n torch.nn.ELU: int8_dtypes,\n torch.nn.LeakyReLU: int8_dtypes,\n torch.nn.Hardswish: int8_dtypes,\n torch.nn.InstanceNorm1d: int8_dtypes,\n torch.nn.InstanceNorm2d: int8_dtypes,\n torch.nn.InstanceNorm3d: int8_dtypes,\n torch.nn.LayerNorm: all_dtypes,\n torch.nn.SiLU: fp16_dtypes,\n torch.nn.Mish: fp16_dtypes,\n torch.nn.GELU: int8_dtypes,\n torch.nn.Dropout: int8_dtypes,\n torch.nn.Softmax: int8_dtypes,\n torch.nn.functional.elu: int8_dtypes,\n torch.nn.functional.hardswish: int8_dtypes,\n torch.nn.functional.instance_norm: int8_dtypes,\n torch.nn.functional.layer_norm: all_dtypes,\n torch.nn.functional.leaky_relu: int8_dtypes,\n torch.nn.functional.silu: fp16_dtypes,\n torch.nn.functional.mish: fp16_dtypes,\n torch.nn.functional.gelu: int8_dtypes,\n torch.nn.functional.softmax: int8_dtypes,\n torch.nn.functional.dropout: int8_dtypes,\n torch.sum: fp16_dtypes,\n}\n\nQAT_CONV_MODULE_CLASSES = \\\n (torch.nn.qat.Conv2d,\n torch.nn.qat.Conv3d,\n torch.nn.intrinsic.qat.ConvBn2d,\n torch.nn.intrinsic.qat.ConvBnReLU2d,\n torch.nn.intrinsic.qat.ConvReLU2d,\n torch.nn.intrinsic.qat.ConvBn3d,\n torch.nn.intrinsic.qat.ConvBnReLU3d,\n torch.nn.intrinsic.qat.ConvReLU3d)\n\n\n##########################\n# Helper Functions\n##########################\n\ndef _load_weight_qparams(\n self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n key = prefix + \"_weight_qparams\"\n if key in state_dict:\n self._weight_qparams = state_dict[key]\n state_dict.pop(key)\n\ndef _save_weight_qparams(self, destination, prefix, keep_vars):\n for attr_name in dir(self):\n if \"_weight_qparams\" == attr_name and \\\n isinstance(getattr(self, attr_name), dict):\n weight_qparams = getattr(self, attr_name)\n destination[prefix + attr_name] = weight_qparams\n\n\ndef _to_reference(float_module, weight_qparams):\n \"\"\" Make a weighted float module (e.g. conv and linear )a reference module by\n attaching _weight_qparams that records the qparams for weight\n and change the name for the module so that it's recognized\n when people print the model\n \"\"\"\n float_module._weight_qparams = weight_qparams\n float_module._register_state_dict_hook(_save_weight_qparams)\n float_module._register_load_state_dict_pre_hook(_load_weight_qparams, with_module=True)\n\n float_module_name = float_module._get_name()\n\n def _get_name():\n return float_module_name + \"(Reference)\"\n\n float_module._get_name = _get_name\n\n@register_quant_pattern(operator.add)\n@register_quant_pattern(operator.sub)\n@register_quant_pattern(operator.mul)\n@register_quant_pattern(operator.truediv)\n@register_quant_pattern(torch.add)\n@register_quant_pattern(torch.sub)\n@register_quant_pattern(torch.mul)\n@register_quant_pattern(torch.div)\n@register_quant_pattern(torch.bmm)\n@register_quant_pattern((torch.nn.ReLU, operator.add))\n@register_quant_pattern((torch.nn.ReLU, operator.mul))\n@register_quant_pattern((torch.nn.ReLU, torch.add))\n@register_quant_pattern((torch.nn.ReLU, torch.mul))\n@register_quant_pattern((torch.nn.functional.relu, operator.add))\n@register_quant_pattern((torch.nn.functional.relu, operator.mul))\n@register_quant_pattern((torch.nn.functional.relu, torch.add))\n@register_quant_pattern((torch.nn.functional.relu, torch.mul))\n@register_quant_pattern((torch.relu, operator.add))\n@register_quant_pattern((torch.relu, operator.mul))\n@register_quant_pattern(torch.matmul)\nclass BinaryOpQuantizeHandler(QuantizeHandler):\n def __init__(\n self,\n node: Node,\n modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n self.relu_node = None\n if (\n node.op == 'call_function' and\n node.target in (torch.nn.functional.relu, torch.relu)\n ) or (\n node.op == 'call_module' and\n isinstance(modules[str(node.target)], torch.nn.ReLU)\n ):\n self.relu_node = node\n node = node.args[0] # type: ignore[assignment]\n self.binary_op_node = node\n self.binary_op = node.target\n\n # determine how many of the first two args are Tensors (versus scalars)\n # this distinguishes things like \"x + y\" from \"x + 2\" or \"2 + x\"\n self.num_tensor_args = 0\n cache_for_no_tensor_check: Dict[Node, bool] = dict()\n for arg_idx in range(len(self.binary_op_node.args)):\n arg = self.binary_op_node.args[arg_idx]\n if isinstance(arg, Node) and (not all_node_args_have_no_tensors(arg, modules, cache_for_no_tensor_check)):\n self.num_tensor_args += 1\n self.all_node_args_are_tensors = \\\n (self.num_tensor_args == len(self.binary_op_node.args))\n\n qbin_op_mapping: Dict[Union[Callable, str], Callable] = {\n operator.add: torch.ops.quantized.add,\n torch.add: torch.ops.quantized.add,\n operator.mul: torch.ops.quantized.mul,\n torch.mul: torch.ops.quantized.mul,\n torch.matmul: torch.ops.quantized.matmul,\n }\n qbin_relu_op_mapping: Dict[Union[Callable, str], Callable] = {\n operator.add: torch.ops.quantized.add_relu,\n torch.add: torch.ops.quantized.add_relu,\n operator.mul: torch.ops.quantized.mul_relu,\n torch.mul: torch.ops.quantized.mul_relu,\n }\n # corresponding quantized op\n self.quantized_binary_op: Optional[Callable] = None\n if self.binary_op in qbin_op_mapping:\n self.quantized_binary_op = qbin_relu_op_mapping[self.binary_op] \\\n if self.relu_node is not None \\\n else qbin_op_mapping[self.binary_op]\n\n def should_insert_observer_for_output(\n self,\n qconfig: Any,\n model_is_training: bool,\n ) -> bool:\n \"\"\"\n Returns true if an observer should be inserted for the output of\n the pattern matched to this QuantizeHandler instance during the\n prepare step.\n \"\"\"\n dtypes = get_qconfig_dtypes(qconfig)\n if not (self.binary_op in binary_op_supported_dtypes and dtypes in binary_op_supported_dtypes[self.binary_op]):\n return False\n if self.num_tensor_args == 1:\n return True\n elif self.all_node_args_are_tensors and self.input_output_observed():\n return True\n else:\n return False\n\n def is_general_tensor_value_op(self) -> bool:\n return self.num_tensor_args == 1\n\n def input_output_observed(self):\n # for x + y where x and y are scalars, we do not observe anything\n return self.num_tensor_args > 0\n\n def is_output_quantized(self, qconfig):\n dtypes = get_qconfig_dtypes(qconfig)\n return self.binary_op in binary_op_supported_dtypes and \\\n dtypes in binary_op_supported_dtypes[self.binary_op]\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n\n if self.num_tensor_args == 0:\n # example: x + y, when x and y are scalars\n return quantized_graph.node_copy(\n node, load_arg(quantized=None))\n\n dtypes = get_qconfig_dtypes(qconfig)\n\n if is_reference:\n act_dtype = activation_dtype(qconfig)\n dtypes = get_qconfig_dtypes(qconfig)\n if act_dtype == torch.float or \\\n not (self.binary_op in binary_op_supported_dtypes and dtypes in binary_op_supported_dtypes[self.binary_op]):\n return quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n else:\n if self.num_tensor_args == 2:\n # make sure both inputs are quantized to act_dtype\n load_arg(quantized={0: act_dtype, 1: act_dtype})(self.binary_op_node.args)\n args = load_arg(quantized=torch.float)(self.binary_op_node.args)\n kwargs = load_arg(quantized=torch.float)(self.binary_op_node.kwargs)\n op_out = quantized_graph.node_copy(self.binary_op_node, load_arg(quantized=torch.float))\n\n def modified_load_arg(n: Node):\n if n.name == self.binary_op_node.name:\n return op_out\n else:\n return load_arg(quantized=torch.float)(n)\n\n if self.relu_node:\n op_out = quantized_graph.node_copy(self.relu_node, modified_load_arg)\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n return quantize_node(\n op_out, activation_post_process,\n node, modules, quantized_graph, node_name_to_scope, is_input=False)\n elif not is_reference and self.binary_op in binary_op_supported_dtypes and \\\n dtypes in binary_op_supported_dtypes[self.binary_op]:\n if dtypes in [(torch.quint8, torch.qint8, None)]:\n assert self.quantized_binary_op is not None\n if self.num_tensor_args == 1:\n # add/mul scalar\n first_arg = self.binary_op_node.args[0]\n cache_for_no_tensor_check: Dict[Node, bool] = dict()\n if isinstance(first_arg, Node) and (\n not all_node_args_have_no_tensors(\n first_arg, modules, cache_for_no_tensor_check)):\n quantized_index = 0\n else:\n quantized_index = 1\n\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\n 'call_function', self.quantized_binary_op,\n load_arg(quantized=[quantized_index])(self.binary_op_node.args),\n self.binary_op_node.kwargs\n ),\n self.binary_op_node)\n else:\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n scale, zero_point = activation_post_process.calculate_qparams() # type: ignore[operator]\n scale = float(scale)\n zero_point = int(zero_point)\n scale_arg, zero_point_arg = \\\n create_qparam_nodes(\n node.name, scale, zero_point, modules,\n quantized_graph, node_name_to_scope)\n kwargs = {**self.binary_op_node.kwargs}\n add_args = (*load_arg(quantized=activation_dtype(qconfig))(self.binary_op_node.args), scale_arg, zero_point_arg)\n op = create_node_from_old_node_preserve_meta(\n quantized_graph,\n ('call_function', self.quantized_binary_op, add_args, kwargs),\n self.binary_op_node)\n return op\n else:\n assert dtypes == (torch.float16, torch.float16, None)\n # TODO (refactor) this is duplicated, maybe have a helper function\n if self.relu_node:\n op_out = quantized_graph.node_copy(self.binary_op_node, load_arg(quantized=torch.float))\n relu_args = [op_out]\n relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))\n relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.relu, tuple(relu_args), relu_kwargs),\n self.relu_node)\n else:\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return quantized_graph.create_node(\n \"call_method\", \"to\", (op_out, torch.float16,), {}\n )\n else:\n # leave the op unquantized if the dtype,reference combination is not supported\n warnings.warn(\n \"dtype combination: {} is not \"\n \"supported by {} for is_reference={}. \"\n \"Supported non-reference dtype combinations are: {} \"\n \"\".format(dtypes,\n self.binary_op,\n is_reference,\n binary_op_supported_dtypes[self.binary_op]\n )\n )\n if self.relu_node:\n op_out = quantized_graph.node_copy(self.binary_op_node, load_arg(quantized=torch.float))\n relu_args = [op_out]\n relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))\n relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.relu, tuple(relu_args), relu_kwargs),\n self.relu_node)\n else:\n return quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n\n\n@register_quant_pattern(torch.cat)\nclass CatQuantizeHandler(QuantizeHandler):\n def is_general_tensor_value_op(self) -> bool:\n return True\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n if not self.all_node_args_are_tensors:\n return NotImplemented\n act_dtype = activation_dtype(qconfig)\n if act_dtype == torch.float:\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return op_out\n else:\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n # make sure the first argument is quantized to act_dtype\n load_arg(quantized={0: act_dtype})(node.args)\n args = list(load_arg(quantized=torch.float)(node.args))\n kwargs = load_arg(quantized=torch.float)(node.kwargs)\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return quantize_node(\n op_out,\n activation_post_process,\n node,\n modules,\n quantized_graph,\n node_name_to_scope,\n is_input=False)\n\n# handle conv, maybe followed by relu\n# NB: matching order is reversed, that is we match from the bottom of this list to the beginning\n@register_quant_pattern(torch.nn.Conv1d)\n@register_quant_pattern(torch.nn.Conv2d)\n@register_quant_pattern(torch.nn.Conv3d)\n@register_quant_pattern(torch.nn.functional.conv1d)\n@register_quant_pattern(torch.nn.functional.conv2d)\n@register_quant_pattern(torch.nn.functional.conv3d)\n# TODO: add qat.Conv1d\n@register_quant_pattern(torch.nn.qat.Conv2d)\n@register_quant_pattern(torch.nn.qat.Conv3d)\n@register_quant_pattern(torch.nn.intrinsic.ConvReLU1d)\n@register_quant_pattern(torch.nn.intrinsic.ConvReLU2d)\n@register_quant_pattern(torch.nn.intrinsic.ConvReLU3d)\n@register_quant_pattern(torch.nn.intrinsic.qat.ConvBn1d)\n@register_quant_pattern(torch.nn.intrinsic.qat.ConvBn2d)\n@register_quant_pattern(torch.nn.intrinsic.qat.ConvBn3d)\n@register_quant_pattern(torch.nn.intrinsic.qat.ConvBnReLU1d)\n@register_quant_pattern(torch.nn.intrinsic.qat.ConvBnReLU2d)\n@register_quant_pattern(torch.nn.intrinsic.qat.ConvBnReLU3d)\n@register_quant_pattern(torch.nn.intrinsic.qat.ConvReLU2d)\n@register_quant_pattern(torch.nn.intrinsic.qat.ConvReLU3d)\n@register_quant_pattern((torch.nn.functional.relu, torch.nn.functional.conv1d))\n@register_quant_pattern((torch.nn.functional.relu, torch.nn.functional.conv2d))\n@register_quant_pattern((torch.nn.functional.relu, torch.nn.functional.conv3d))\n@register_quant_pattern((torch.nn.ReLU, torch.nn.functional.conv1d))\n@register_quant_pattern((torch.nn.ReLU, torch.nn.functional.conv2d))\n@register_quant_pattern((torch.nn.ReLU, torch.nn.functional.conv3d))\n# just for error checks\n@register_quant_pattern((torch.nn.ReLU, torch.nn.Conv1d))\n@register_quant_pattern((torch.nn.ReLU, torch.nn.Conv2d))\n@register_quant_pattern((torch.nn.ReLU, torch.nn.Conv3d))\n@register_quant_pattern((torch.nn.functional.relu, torch.nn.Conv2d))\n@register_quant_pattern((torch.nn.functional.relu, torch.nn.Conv3d))\n# TODO: rename Relu -> ReLU to be more consistent with other classes\nclass ConvReluQuantizeHandler(QuantizeHandler):\n def __init__(self, node: Node, modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n self.relu_node = None\n if (node.op == 'call_function' and node.target is torch.nn.functional.relu) or \\\n (node.op == 'call_module' and isinstance(modules[str(node.target)], torch.nn.ReLU)):\n self.relu_node = node\n node = node.args[0] # type: ignore[assignment]\n self.conv_node = node\n if node.op == \"call_module\":\n self.conv = modules[str(self.conv_node.target)]\n elif node.op == \"call_function\":\n self.conv = node.target # type: ignore[assignment]\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n # Supported combinations are:\n # quant_type | activation (compute_type) | weight\n # static quint8 qint8\n\n # tuple (activation_dtype, weight_dtype, compute_dtype)\n supported_dtypes = [\n (torch.quint8, torch.qint8, None),\n ]\n\n # TODO: is_reference option for conv module\n dtypes = get_qconfig_dtypes(qconfig)\n # leave the op unquantized if the dtype combination is not supported\n if not is_reference and dtypes not in supported_dtypes:\n warnings.warn(\n \"dtype combination: {} is not \"\n \"supported by Conv \"\n \"supported dtype combinations are: {}\".format(dtypes, supported_dtypes))\n if self.relu_node:\n conv_out = quantized_graph.node_copy(self.conv_node, load_arg(quantized=torch.float))\n relu_args = [conv_out]\n relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))\n relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.relu, tuple(relu_args), relu_kwargs),\n self.relu_node)\n else:\n return quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n\n activation_int8_quantized = activation_is_int8_quantized(qconfig)\n\n if self.conv_node.op == 'call_module':\n # note that relu should already be fused into conv module in the fusion step\n assert self.relu_node is None, 'conv module and relu fusion is not executed, ' \\\n 'please make sure to run fusion before prepare'\n output_activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert output_activation_post_process is not None\n\n # We'll always produce reference pattern for torch.nn.Conv*d,\n # will remove the else branch after we migrated all use cases\n if is_reference or \\\n type(self.conv) in [torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d] and \\\n dtypes in [(torch.quint8, torch.qint8, None)]:\n # produce dequant - float_op - quant pattern\n dtype = torch.float\n if activation_int8_quantized:\n dtype = activation_dtype(qconfig)\n activation = load_arg(quantized=dtype)(self.conv_node.args[0])\n args = load_arg(quantized=torch.float)(self.conv_node.args)\n # Get the float conv and attach quantization scheme and quantization\n # parameters of weight to the module\n # and qparam is a dictionary of\n # {\"qscheme\": ..., \"scale\": ..., \"zero_point\": ...} for per tensor quantization or\n # {\"qscheme\": ..., \"scale\": ..., \"zero_point\": ..., \"axis\": ...} for per channel quantization\n float_conv = self.conv\n fused_conv = None\n if isinstance(\n float_conv,\n QAT_CONV_MODULE_CLASSES):\n # case 1. converting qat conv module to\n # a float conv module, we need to attch\n # weight fake_quant to the conv module,\n # weight fake_quant is assumed to be run during\n # QAT so we don't need to run it again here\n float_conv = self.conv.to_float() # type: ignore[operator]\n # change qat conv to conv\n parent_name, name = _parent_name(self.conv_node.target)\n setattr(modules[parent_name], name, float_conv)\n if isinstance(float_conv, torch.nn.intrinsic._FusedModule):\n fused_conv = float_conv\n float_conv = float_conv[0]\n weight_post_process = self.conv.weight_fake_quant\n else:\n # case 2. converting a conv module/fused conv module\n # to float conv module, we need to attach\n # weight observer to the conv module and run it\n # with conv weight\n if isinstance(float_conv, torch.nn.intrinsic._FusedModule):\n fused_conv = float_conv\n float_conv = float_conv[0] # type: ignore[index]\n assert qconfig is not None\n weight_post_process = qconfig.weight()\n # run weight observer\n weight_post_process(float_conv.weight) # type: ignore[operator]\n weight_qparams = get_qparam_dict(weight_post_process)\n # hardcoded for now, TODO: expose the api to user,\n # we can have a map from module to reference module\n # and allow user to register new ones\n qconv_cls = get_static_quant_module_class(\n type(float_conv), is_reference=True)\n ref_conv = qconv_cls.from_float(float_conv, weight_qparams) # type: ignore[attr-defined]\n # if the parent is a fused conv (Sequential), we can replace the first\n # item to ref conv, otherwise we can update\n # the conv instance in the module tree\n if fused_conv is not None:\n fused_conv[0] = ref_conv\n else:\n parent_name, name = _parent_name(self.conv_node.target)\n setattr(modules[parent_name], name, ref_conv)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n ('call_module', self.conv_node.target, args, {}),\n self.conv_node)\n if output_activation_post_process:\n op_out = quantize_node(\n op_out,\n output_activation_post_process,\n node,\n modules,\n quantized_graph,\n node_name_to_scope,\n is_input=False)\n return op_out\n else:\n if convert_custom_config_dict is None:\n convert_custom_config_dict = {}\n additional_static_quant_mapping = convert_custom_config_dict.get(\"static\", {})\n # 1. attach activation post process to module\n self.conv.activation_post_process = output_activation_post_process\n # 2. select quantized class\n qconv_cls = get_static_quant_module_class(\n type(self.conv), additional_static_quant_mapping, is_reference=is_reference)\n quantized = qconv_cls.from_float(self.conv)\n parent_name, name = _parent_name(self.conv_node.target)\n setattr(modules[parent_name], name, quantized)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\n 'call_module',\n self.conv_node.target,\n (load_arg(quantized=torch.quint8)(self.conv_node.args[0]),),\n {},\n ),\n self.conv_node)\n else: # call_function\n assert self.conv_node.op == \"call_function\"\n if is_reference:\n # make sure the input and weight are quantized to torch.quint8, torch.qint8, respectively\n load_arg(quantized={0: torch.quint8, 1: torch.qint8})(self.conv_node.args)\n args = load_arg(quantized=torch.float)(self.conv_node.args)\n kwargs = load_arg(quantized=torch.float)(self.conv_node.kwargs)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", self.conv, args, kwargs),\n self.conv_node)\n if self.relu_node:\n relu_args = [op_out]\n relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))\n relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.relu, tuple(relu_args), relu_kwargs),\n self.relu_node)\n\n if activation_int8_quantized:\n root_module = modules['']\n act_post_process_name = self.relu_node.name if self.relu_node else self.conv_node.name\n act_post_process_node = self.relu_node if self.relu_node else self.conv_node\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n return quantize_node(\n op_out,\n activation_post_process,\n act_post_process_node,\n modules,\n quantized_graph,\n node_name_to_scope,\n is_input=False)\n else:\n # output for dynamically quantized conv op is not quantized\n return op_out\n else:\n assert len(self.conv_node.args) >= 7, \\\n \"only conv2d calls with all arguments specified is supported right now in is_reference=False option\"\n # make sure the input and weight are quantized to torch.quint8, torch.qint8, respectively\n args = load_arg(quantized={0: torch.quint8, 1: torch.qint8})(self.conv_node.args)\n # pack weight\n weight = load_arg(quantized=torch.qint8)(self.conv_node.args[1])\n other_args = load_arg(quantized=torch.float)(self.conv_node.args[2:])\n bias, stride, padding, dilation, groups = other_args\n if self.conv == torch.nn.functional.conv1d:\n # F.conv1d can take `int` as well as `list[int]` for stride,\n # padding, dilation, but the prepack op cannot. Convert\n # these to lists if needed.\n stride = [stride] if isinstance(stride, int) else stride\n padding = [padding] if isinstance(padding, int) else padding\n dilation = [dilation] if isinstance(dilation, int) else dilation\n prepack_args = (weight, bias, stride, padding, dilation, groups)\n prepack_op = get_qconv_prepack_op(self.conv)\n packed_weight = quantized_graph.create_node(\n \"call_function\", prepack_op, prepack_args, {})\n assert activation_int8_quantized, \\\n \"currently only static quantization is supported for conv\"\n # construct conv input\n if activation_int8_quantized:\n qconv_op = get_qconv_op(self.conv, self.relu_node is not None)\n conv_input = load_arg(quantized=torch.quint8)(self.conv_node.args[0])\n\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n\n scale, zero_point, _ = get_per_tensor_qparams(activation_post_process)\n scale_node, zero_point_node = \\\n create_qparam_nodes(\n self.conv_node.name, scale, zero_point, modules,\n quantized_graph, node_name_to_scope)\n qconv_args = (conv_input, packed_weight, scale_node, zero_point_node)\n kwargs = load_arg(quantized=torch.float)(self.conv_node.kwargs)\n op = create_node_from_old_node_preserve_meta(\n quantized_graph,\n ('call_function', qconv_op, qconv_args, kwargs),\n self.conv_node)\n # Store the name of the fused op to get the path of node after fusion as well.\n # TODO: may need to change the key to Node regenerate the map in each transformation,\n # since we might not be able to rely on the name\n node_name_to_scope[op.name] = node_name_to_scope[self.conv_node.name]\n return op\n else:\n # conv2d_dyanmic branch\n raise Exception(\"Only static quant is supported for conv\")\n\n@register_quant_pattern(torch.nn.Linear)\n@register_quant_pattern(torch.nn.functional.linear)\n@register_quant_pattern(torch.nn.qat.Linear)\n@register_quant_pattern(torch.nn.intrinsic.LinearReLU)\n@register_quant_pattern(torch.nn.intrinsic.qat.LinearReLU)\n@register_quant_pattern((torch.nn.functional.relu, torch.nn.functional.linear))\n@register_quant_pattern((torch.nn.ReLU, torch.nn.functional.linear))\n# for error checks\n@register_quant_pattern((torch.nn.ReLU, torch.nn.Linear))\n@register_quant_pattern((torch.nn.functional.relu, torch.nn.Linear))\nclass LinearReLUQuantizeHandler(QuantizeHandler):\n def __init__(\n self,\n node: Node,\n modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n self.relu_node = None\n if (node.op == 'call_function' and node.target is torch.nn.functional.relu) or \\\n (node.op == 'call_module' and isinstance(modules[str(node.target)], torch.nn.ReLU)):\n self.relu_node = node\n node = node.args[0] # type: ignore[assignment]\n self.linear_node = node\n if node.op == 'call_module':\n self.linear = modules[str(self.linear_node.target)]\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n if convert_custom_config_dict is None:\n convert_custom_config_dict = {}\n # Supported combinations are:\n # quant_type | activation (compute_type) | weight\n # static quint8 qint8\n # dynamic float32 (quint8) qint8\n # weight_only float32 float16\n # tuple (activation_dtype, weight_dtype, compute_dtype)\n supported_dtypes = [\n (torch.quint8, torch.qint8, None),\n (torch.float32, torch.qint8, torch.quint8),\n (torch.float32, torch.float16, None),\n # static float16 quantization\n (torch.float16, torch.float16, None),\n ]\n dtypes = get_qconfig_dtypes(qconfig)\n # leave the op unquantized if the dtype combination is not supported\n if not is_reference and dtypes not in supported_dtypes:\n warnings.warn(\n \"dtype combination: {} is not \"\n \"supported by Linear \"\n \"supported dtype combinations are: {}\".format(dtypes, supported_dtypes))\n if self.relu_node:\n op_out = quantized_graph.node_copy(self.linear_node, load_arg(quantized=torch.float))\n relu_args = [op_out]\n relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))\n relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.relu, tuple(relu_args), relu_kwargs),\n self.relu_node)\n else:\n return quantized_graph.node_copy(node, load_arg(quantized=None))\n\n activation_int8_quantized = activation_is_int8_quantized(qconfig)\n activation_statically_quantized = activation_is_statically_quantized(qconfig)\n weight_dtype = dtypes[1]\n if self.linear_node.op == 'call_module':\n\n output_activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n\n # note that relu should already be fused into linear modul in the fusion step\n assert self.relu_node is None, 'linear module and relu fusion is not executed, ' \\\n 'please make sure to run fusion before prepare'\n # we'll always produce reference pattern for the following modules\n # will remove the else branch after we migrated all use cases\n module_allowlist = [\n torch.nn.Linear,\n torch.nn.qat.Linear,\n torch.nn.intrinsic.modules.fused.LinearReLU,\n torch.nn.intrinsic.qat.modules.linear_relu.LinearReLU\n ]\n if is_reference or type(self.linear) in module_allowlist and dtypes in [(torch.quint8, torch.qint8, None)]:\n # produce dequant - float_op - quant pattern\n dtype = torch.float\n if activation_int8_quantized:\n dtype = activation_dtype(qconfig)\n activation = load_arg(quantized=dtype)(self.linear_node.args[0])\n args = load_arg(quantized=torch.float)(self.linear_node.args)\n\n # Get the float linear and attach qscheme and qparams the the module\n float_linear = self.linear\n fused_linear = None\n if isinstance(float_linear, (torch.nn.qat.Linear, torch.nn.intrinsic.qat.LinearReLU)):\n float_linear = float_linear.to_float()\n # change qat linear to linear\n parent_name, name = _parent_name(self.linear_node.target)\n setattr(modules[parent_name], name, float_linear)\n # Attach weight fake quant to the linear module\n if isinstance(float_linear, torch.nn.intrinsic.LinearReLU):\n fused_linear = float_linear\n float_linear = float_linear[0]\n weight_post_process = self.linear.weight_fake_quant\n else:\n if isinstance(float_linear, torch.nn.intrinsic.LinearReLU):\n fused_linear = float_linear\n float_linear = self.linear[0] # type: ignore[index]\n # Attach the weight observer to the module\n weight_post_process = qconfig.weight() # type: ignore[union-attr]\n\n # Run weight observer\n # TODO: This is currently a hack for QAT to get the right shapes for scale and zero point.\n # In the future, we should require the user to calibrate the model after calling prepare\n weight_post_process(float_linear.weight) # type: ignore[operator]\n\n weight_qparams = get_qparam_dict(weight_post_process)\n # TODO: include the configuration in backend_config_dict\n # we can have a map from module to reference module\n # and allow user to register new ones\n qlinear_cls = get_static_quant_module_class(\n type(float_linear), is_reference=True)\n ref_linear = qlinear_cls.from_float(float_linear, weight_qparams)\n\n # if the parent is a fused linear (Sequential), we can replace the first\n # item to ref linear, otherwise we can update\n # the linear instance in the module tree\n if fused_linear is not None:\n fused_linear[0] = ref_linear\n else:\n parent_name, name = _parent_name(self.linear_node.target)\n setattr(modules[parent_name], name, ref_linear)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n ('call_module', self.linear_node.target, args, {}),\n self.linear_node)\n if output_activation_post_process:\n op_out = quantize_node(\n op_out,\n output_activation_post_process,\n node,\n modules,\n quantized_graph,\n node_name_to_scope,\n is_input=False)\n return op_out\n # non-reference option\n else:\n # 1. attach output activation post process to linear module\n if output_activation_post_process:\n self.linear.activation_post_process = output_activation_post_process\n\n # 2. select corresponding quantized linear class for the float linear class\n if activation_int8_quantized:\n additional_static_quant_mapping = convert_custom_config_dict.get(\"static\", {})\n qlinear = get_static_quant_module_class(\n type(self.linear), additional_static_quant_mapping)\n else:\n assert dtypes in [\n (torch.float32, torch.qint8, torch.quint8),\n (torch.float32, torch.float16, None),\n ], f\"dtype {dtypes} not supported yet\"\n additional_dynamic_quant_mapping = convert_custom_config_dict.get(\"dynamic\", {})\n qlinear = get_dynamic_quant_module_class(type(self.linear), additional_dynamic_quant_mapping)\n\n quantized = qlinear.from_float(self.linear)\n parent_name, name = _parent_name(self.linear_node.target)\n setattr(modules[parent_name], name, quantized)\n # activation needs to be quantized for static quantization\n dtype = torch.float\n if activation_int8_quantized:\n dtype = activation_dtype(qconfig)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\n 'call_module',\n self.linear_node.target,\n (load_arg(quantized=dtype)(self.linear_node.args[0]),), {},\n ),\n self.linear_node)\n else: # call_function\n assert self.linear_node.op == 'call_function'\n if is_reference:\n quantized_input_dtypes = [torch.float, torch.float]\n if activation_int8_quantized:\n quantized_input_dtypes[0] = torch.quint8\n if weight_is_statically_quantized(qconfig):\n quantized_input_dtypes[1] = torch.qint8\n args = load_arg(quantized=quantized_input_dtypes)(self.linear_node.args)\n args = load_arg(quantized=torch.float)(self.linear_node.args)\n kwargs = load_arg(quantized=torch.float)(self.linear_node.kwargs)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.linear, args, kwargs),\n self.linear_node)\n if self.relu_node:\n relu_args = [op_out]\n relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))\n relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.relu, tuple(relu_args), relu_kwargs),\n self.relu_node)\n\n if activation_statically_quantized:\n # quantize output for statically quantized linear op\n root_module = modules['']\n act_post_process_name = self.relu_node.name if self.relu_node else self.linear_node.name\n act_post_process_node = self.relu_node if self.relu_node else self.linear_node\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n return quantize_node(\n op_out,\n activation_post_process,\n act_post_process_node,\n modules,\n quantized_graph,\n node_name_to_scope,\n is_input=False)\n else:\n # output for dynamically quantized linear op is not quantized\n return op_out\n else: # non-reference option\n # prepacking weights for static int8 quant and dynamic quant\n if dtypes != (torch.float16, torch.float16, None):\n # linear args\n # (x, weight, bias, ...)\n # TODO: the name should be weight is int8 quantized\n weight_quantized = weight_is_statically_quantized(qconfig)\n dtype = weight_dtype if weight_quantized else torch.float\n linear_weight = load_arg(quantized=dtype)(self.linear_node.args[1])\n\n # get other arguments\n kwargs = {**load_arg(quantized=torch.float)(self.linear_node.kwargs)}\n # all args after bias, including bias\n other_args = load_arg(quantized=torch.float)(self.linear_node.args[2:])\n # bias might be either positional, or a keyword argument\n if len(self.linear_node.args) > 2:\n bias = load_arg(quantized=torch.float)(self.linear_node.args[2])\n other_args = other_args[1:] # remove the bias argument\n else:\n bias = kwargs.pop('bias', None)\n\n prepack_args = (linear_weight, bias)\n prepack_op = get_linear_prepack_op_for_dtype(weight_dtype)\n packed_weight = quantized_graph.create_node(\n 'call_function', prepack_op, prepack_args, {})\n # construct linear input\n if activation_int8_quantized:\n qlinear_op = torch.ops.quantized.linear_relu if self.relu_node else torch.ops.quantized.linear\n linear_input = load_arg(quantized=torch.quint8)(self.linear_node.args[0])\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n scale, zero_point, _ = get_per_tensor_qparams(activation_post_process)\n scale_node, zero_point_node = \\\n create_qparam_nodes(\n self.linear_node.name, scale, zero_point, modules,\n quantized_graph, node_name_to_scope)\n\n qlinear_args = (linear_input, packed_weight, scale_node, zero_point_node)\n op = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", qlinear_op, qlinear_args, kwargs),\n self.linear_node)\n # Store the name of the fused op to get the path of node after fusion as well.\n # TODO: may need to change the key to Node regenerate the map in each transformation,\n # since we might not be able to rely on the name\n node_name_to_scope[op.name] = node_name_to_scope[self.linear_node.name]\n return op\n elif dtypes in [(torch.float32, torch.qint8, torch.quint8),\n (torch.float32, torch.float16, None)]:\n # choose linear dynamic or linear dynamic fp16 op based on weight dtype\n if weight_dtype == torch.qint8:\n if self.relu_node:\n qlinear_op = torch.ops.quantized.linear_relu_dynamic\n else:\n qlinear_op = torch.ops.quantized.linear_dynamic\n else:\n if self.relu_node:\n qlinear_op = torch.ops.quantized.linear_relu_dynamic_fp16\n else:\n qlinear_op = torch.ops.quantized.linear_dynamic_fp16\n\n linear_input = load_arg(quantized=torch.float)(self.linear_node.args[0])\n qlinear_args = (linear_input, packed_weight) # type: ignore[assignment]\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", qlinear_op, qlinear_args, kwargs),\n self.linear_node)\n # Store the name of the dynamic op to get the path of node after replacement as well.\n # TODO: may need to change the key to Node regenerate the map in each transformation,\n # since we might not be able to rely on the name\n node_name_to_scope[op_out.name] = node_name_to_scope[self.linear_node.name]\n return op_out\n else:\n assert dtypes == (torch.float16, torch.float16, None)\n # TODO (refactor) this is duplicated, maybe have a helper function\n if self.relu_node:\n op_out = quantized_graph.node_copy(self.linear_node, load_arg(quantized=torch.float))\n relu_args = [op_out]\n relu_args.extend(load_arg(quantized=torch.float)(self.relu_node.args[1:]))\n relu_kwargs = load_arg(quantized=torch.float)(self.relu_node.kwargs)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", torch.nn.functional.relu, tuple(relu_args), relu_kwargs),\n self.relu_node)\n else:\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return quantized_graph.create_node(\n \"call_method\", \"to\", (op_out, torch.float16), {})\n\n@register_quant_pattern(torch.nn.BatchNorm2d)\n@register_quant_pattern(torch.nn.BatchNorm3d)\n@register_quant_pattern(torch.nn.intrinsic.BNReLU2d)\n@register_quant_pattern(torch.nn.intrinsic.BNReLU3d)\nclass BatchNormQuantizeHandler(QuantizeHandler):\n def __init__(\n self,\n node: Node,\n modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n assert node.op == 'call_module'\n self.bn_node = node\n self.bn = modules[str(self.bn_node.target)]\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n if convert_custom_config_dict is None:\n convert_custom_config_dict = {}\n additional_static_quant_mapping = convert_custom_config_dict.get(\"static\", {})\n # 1. attach activation post process to module\n output_activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert output_activation_post_process is not None\n if is_reference:\n # produce dequant - float_op - quant pattern\n dtype = activation_dtype(qconfig)\n activation = load_arg(quantized=dtype)(self.bn_node.args[0])\n args = load_arg(quantized=torch.float)(self.bn_node.args)\n op_out = create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_module\", self.bn_node.target, args, {}),\n self.bn_node)\n if output_activation_post_process:\n op_out = quantize_node(\n op_out,\n output_activation_post_process,\n node,\n modules,\n quantized_graph,\n node_name_to_scope,\n is_input=False)\n return op_out\n else:\n self.bn.activation_post_process = output_activation_post_process\n qbn_cls = get_static_quant_module_class(type(self.bn), additional_static_quant_mapping)\n quantized = qbn_cls.from_float(self.bn)\n parent_name, name = _parent_name(self.bn_node.target)\n setattr(modules[parent_name], name, quantized)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\n 'call_module',\n self.bn_node.target,\n load_arg(quantized=[0])(self.bn_node.args),\n load_arg(quantized=torch.float)(self.bn_node.kwargs),\n ),\n self.bn_node)\n\n@register_quant_pattern(torch.nn.qat.Embedding)\n@register_quant_pattern(torch.nn.qat.EmbeddingBag)\n@register_quant_pattern(torch.nn.Embedding)\n@register_quant_pattern(torch.nn.EmbeddingBag)\nclass EmbeddingQuantizeHandler(QuantizeHandler):\n def __init__(\n self,\n node: Node,\n modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n\n def input_output_observed(self) -> bool:\n return False\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n # Supported combinations are:\n # quant_type | activation | weight | activation_compute_type\n # weight_only | float32 | quint8 | None\n # weight_only | float32 | quint4x2 | None\n # tuple (activation_dtype, weight_dtype, compute_dtype)\n supported_dtypes = [\n (torch.float32, torch.quint8, None),\n (torch.float32, torch.quint4x2, None),\n ]\n assert node.op == 'call_module'\n emb_node = node\n dtypes = get_qconfig_dtypes(qconfig)\n # leave the op unquantized if the dtype combination is not supported\n if dtypes not in supported_dtypes:\n warnings.warn(\n \"dtype combination: {} is not \"\n \"supported by Embedding/EmbeddingBag, \"\n \"supported dtype combinations are: {}\".format(dtypes, supported_dtypes))\n return quantized_graph.node_copy(node, load_arg(quantized=None))\n\n emb = modules[str(emb_node.target)]\n qemb = get_static_quant_module_class(type(emb))\n quantized = qemb.from_float(emb)\n parent_name, name = _parent_name(emb_node.target)\n setattr(modules[parent_name], name, quantized)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\n 'call_module',\n emb_node.target,\n load_arg(quantized=torch.float)(emb_node.args),\n load_arg(quantized=torch.float)(emb_node.kwargs),\n ),\n emb_node)\n\n# TODO (maybe): merge with embedding quantize handler\n@register_quant_pattern(torch.nn.GRUCell)\n@register_quant_pattern(torch.nn.LSTMCell)\n@register_quant_pattern(torch.nn.RNNCell)\n@register_quant_pattern(torch.nn.LSTM)\nclass RNNDynamicQuantizeHandler(QuantizeHandler):\n def __init__(\n self,\n node: Node,\n modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n\n def input_output_observed(self) -> bool:\n return False\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n # Supported combinations are:\n # quant_type | activation | weight | activation_compute_type\n # dynamic | float32 | qint8 | quint8\n # dynamic | float32 | float16 | None\n # tuple (activation_dtype, weight_dtype, compute_dtype)\n supported_dtypes = [\n (torch.float32, torch.qint8, torch.quint8),\n (torch.float32, torch.float16, None),\n ]\n assert node.op == 'call_module'\n dtypes = get_qconfig_dtypes(qconfig)\n # leave the op unquantized if the dtype combination is not supported\n if dtypes not in supported_dtypes:\n warnings.warn(\n \"dtype combination: {} is not \"\n \"supported by Embedding/EmbeddingBag, \"\n \"supported dtype combinations are: {}\".format(dtypes, supported_dtypes))\n return quantized_graph.node_copy(node, load_arg(quantized=None))\n\n module = modules[str(node.target)]\n qmodule_cls = get_dynamic_quant_module_class(type(module))\n qmodule = qmodule_cls.from_float(module)\n parent_name, name = _parent_name(node.target)\n setattr(modules[parent_name], name, qmodule)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\n 'call_module',\n node.target,\n load_arg(quantized=torch.float)(node.args),\n load_arg(quantized=torch.float)(node.kwargs),\n ),\n node)\n\nARGS_TO_SKIP = {\n torch._ops.ops.quantized.hardswish: ['inplace'],\n torch._ops.ops.quantized.elu: ['inplace'],\n torch._ops.ops.quantized.dropout: ['inplace'],\n torch._ops.ops.quantized.instance_norm:\n ['running_mean', 'running_var', 'use_input_stats', 'momentum'],\n}\n@register_quant_pattern(torch.nn.ConvTranspose1d)\n@register_quant_pattern(torch.nn.ConvTranspose2d)\n@register_quant_pattern(torch.nn.ELU)\n@register_quant_pattern(torch.nn.LeakyReLU)\n@register_quant_pattern(torch.nn.Hardswish)\n@register_quant_pattern(torch.nn.InstanceNorm1d)\n@register_quant_pattern(torch.nn.InstanceNorm2d)\n@register_quant_pattern(torch.nn.InstanceNorm3d)\n@register_quant_pattern(torch.nn.LayerNorm)\n@register_quant_pattern(torch.nn.SiLU)\n@register_quant_pattern(torch.nn.Mish)\n@register_quant_pattern(torch.nn.Dropout)\n# we currently only support reference patterns for these ops so they have been removed\n# until they receive a proper fp16 kernel. To use the reference pattern, use a custom qconfig\n# @register_quant_pattern(torch.nn.GELU)\n# @register_quant_pattern(torch.nn.Softmax)\n@register_quant_pattern(torch.nn.functional.elu)\n@register_quant_pattern(torch.nn.functional.hardswish)\n@register_quant_pattern(torch.nn.functional.instance_norm)\n@register_quant_pattern(torch.nn.functional.layer_norm)\n@register_quant_pattern(torch.nn.functional.leaky_relu)\n@register_quant_pattern(torch.nn.functional.silu)\n@register_quant_pattern(torch.nn.functional.mish)\n@register_quant_pattern(torch.nn.functional.dropout)\n# we currently only support reference patterns for these ops so they have been removed\n# until they receive a proper fp16 kernel. To use the reference pattern, use a custom qconfig\n# @register_quant_pattern(torch.nn.functional.gelu)\n# @register_quant_pattern(torch.nn.functional.softmax)\n@register_quant_pattern(torch.sum)\nclass DefaultNodeQuantizeHandler(QuantizeHandler):\n \"\"\" Common quantized op, first input and first output will be quantized\n \"\"\"\n def __init__(\n self,\n node: Node,\n modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n if node.op == \"call_function\" or node.op == \"call_method\":\n self.op = node.target\n elif node.op == \"call_module\":\n self.op = type(modules[str(node.target)])\n\n def is_output_quantized(self, qconfig):\n dtypes = get_qconfig_dtypes(qconfig)\n return self.op in default_op_supported_dtypes and \\\n dtypes in default_op_supported_dtypes[self.op]\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n if not self.all_node_args_are_tensors:\n return NotImplemented\n assert node.op in ['call_module', 'call_function'], 'Only call_module and ' + \\\n 'call_function are handled in DefaultNode'\n if convert_custom_config_dict is None:\n convert_custom_config_dict = {}\n additional_static_quant_mapping = convert_custom_config_dict.get(\"static\", {})\n\n dtypes = get_qconfig_dtypes(qconfig)\n if not is_reference and dtypes not in default_op_supported_dtypes[self.op]:\n warnings.warn(\n \"dtype combination: {} is not \"\n \"supported by {} \"\n \"supported dtype combinations are: {}\".format(dtypes, self.op, default_op_supported_dtypes[self.op]))\n return quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n # TODO: make helper functions for (torch.quint8, torch.qint8, None)\n if not is_reference:\n if dtypes in [(torch.quint8, torch.qint8, None)]:\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n if node.op == 'call_module':\n module = modules[str(node.target)]\n module.activation_post_process = activation_post_process\n quantized_module_cls = get_static_quant_module_class(\n type(module), additional_static_quant_mapping)\n quantized_module = quantized_module_cls.from_float(module)\n parent_name, name = _parent_name(node.target)\n setattr(modules[parent_name], name, quantized_module)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\n 'call_module',\n node.target,\n load_arg(quantized=[0])(node.args),\n load_arg(quantized=torch.float)(node.kwargs),\n ),\n node)\n else:\n assert node.op == \"call_function\"\n # call_function\n scale, zero_point = activation_post_process.calculate_qparams() # type: ignore[operator]\n scale = float(scale)\n zero_point = int(zero_point)\n scale_arg, zero_point_arg = \\\n create_qparam_nodes(\n node.name, scale, zero_point, modules,\n quantized_graph, node_name_to_scope)\n\n assert not isinstance(node.target, str), \"Expecting node.target for \"\n \"call_function to be a function instead of a string\"\n quantized_op = get_quantized_operator(node.target)\n args = load_arg(quantized=[0])(node.args)\n kwargs = {**load_arg(quantized=torch.float)(node.kwargs), \"output_scale\": scale_arg,\n \"output_zero_point\": zero_point_arg}\n if quantized_op in ARGS_TO_SKIP:\n args_to_skip = ARGS_TO_SKIP[quantized_op]\n for arg in args_to_skip:\n if arg in kwargs:\n kwargs.pop(arg)\n return create_node_from_old_node_preserve_meta(\n quantized_graph,\n (\"call_function\", quantized_op, args, kwargs), # type: ignore[arg-type]\n node)\n else:\n assert dtypes in [(torch.float16, torch.float16, None)]\n # Generally fp16 kernels don't exist for fp16 ops\n warnings.warn(\n \"Only reference patterns are currently supported for {dtype} dtype with {op} op\"\n \"\".format(dtype=dtypes, op=self.op))\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return quantized_graph.create_node(\n \"call_method\", \"to\", (op_out, torch.float16), {})\n else:\n assert is_reference\n # We can produce reference for a dtypes including\n # (torch.quint8, torch.qint8, torch.qint32, torch.float16)\n act_dtype = activation_dtype(qconfig)\n if act_dtype == torch.float:\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return op_out\n else:\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n # make sure the input is quantized to act_dtype\n load_arg(quantized={0: act_dtype})(node.args)\n args = load_arg(quantized=torch.float)(node.args)\n kwargs = load_arg(quantized=torch.float)(node.kwargs)\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return quantize_node(\n op_out, activation_post_process,\n node, modules, quantized_graph, node_name_to_scope, is_input=False)\n\n@register_quant_pattern(torch.nn.Hardsigmoid, default_affine_fixed_qparams_observer)\n@register_quant_pattern(torch.nn.functional.hardsigmoid, default_affine_fixed_qparams_observer)\n@register_quant_pattern('hardsigmoid', default_affine_fixed_qparams_observer)\n@register_quant_pattern('hardsigmoid_', default_affine_fixed_qparams_observer)\n@register_quant_pattern(torch.nn.Sigmoid, default_affine_fixed_qparams_observer)\n@register_quant_pattern(torch.sigmoid, default_affine_fixed_qparams_observer)\n@register_quant_pattern('sigmoid', default_affine_fixed_qparams_observer)\n@register_quant_pattern('sigmoid_', default_affine_fixed_qparams_observer)\n@register_quant_pattern(torch.nn.Tanh, default_symmetric_fixed_qparams_observer)\n@register_quant_pattern(torch.tanh, default_symmetric_fixed_qparams_observer)\n@register_quant_pattern('tanh', default_symmetric_fixed_qparams_observer)\n@register_quant_pattern('tanh_', default_symmetric_fixed_qparams_observer)\nclass FixedQParamsOpQuantizeHandler(QuantizeHandler):\n def __init__(self,\n node: Node,\n modules: Dict[str, torch.nn.Module]):\n super().__init__(node, modules)\n self.node = node\n\n def should_mark_output_quantized_from_input_quantized_status(\n self,\n qconfig: QConfigAny\n ) -> bool:\n # FixQParamOps are the same as CopyNode in int8 quantization\n return activation_dtype(qconfig) in [torch.quint8, torch.qint8]\n\n # some qhandlers override the activations constructor\n def get_activation_ctr(self, qconfig, pattern, is_training) -> Optional[Callable]:\n act_dtype = activation_dtype(qconfig)\n if act_dtype == torch.quint8:\n return get_default_output_activation_post_process_map(is_training).get(\n pattern, qconfig.activation)\n else:\n return qconfig.activation\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n act_dtype = activation_dtype(qconfig)\n if act_dtype == torch.float:\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return op_out\n else:\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n # make sure the input is quantized to act_dtype\n load_arg(quantized={0: act_dtype})(node.args)\n args = load_arg(quantized=torch.float)(node.args)\n kwargs = load_arg(quantized=torch.float)(node.kwargs)\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return quantize_node(\n op_out, activation_post_process,\n node, modules, quantized_graph, node_name_to_scope, is_input=False)\n\n@register_quant_pattern(torch.nn.AdaptiveAvgPool1d)\n@register_quant_pattern(torch.nn.AdaptiveAvgPool2d)\n@register_quant_pattern(torch.nn.AdaptiveAvgPool3d)\n@register_quant_pattern(torch.nn.AvgPool1d)\n@register_quant_pattern(torch.nn.AvgPool2d)\n@register_quant_pattern(torch.nn.AvgPool3d)\n@register_quant_pattern(torch.nn.Hardtanh)\n@register_quant_pattern(torch.nn.MaxPool1d)\n@register_quant_pattern(torch.nn.MaxPool2d)\n@register_quant_pattern(torch.nn.MaxPool3d)\n@register_quant_pattern(torch.nn.ReLU)\n@register_quant_pattern(torch.nn.ReLU6)\n@register_quant_pattern(torch.adaptive_avg_pool1d)\n@register_quant_pattern(torch.nn.functional.adaptive_avg_pool2d)\n@register_quant_pattern(torch.nn.functional.adaptive_avg_pool3d)\n@register_quant_pattern(torch.nn.functional.hardtanh)\n@register_quant_pattern(torch.nn.functional.hardtanh_)\n@register_quant_pattern(torch.nn.functional.interpolate)\n@register_quant_pattern(torch.nn.functional.max_pool1d)\n@register_quant_pattern(torch.nn.functional.max_pool2d)\n@register_quant_pattern(torch.nn.functional.max_pool3d)\n@register_quant_pattern(torch.nn.functional.relu)\n@register_quant_pattern(torch.nn.functional.relu6)\n@register_quant_pattern(torch.avg_pool1d)\n@register_quant_pattern(torch._C._nn.avg_pool2d)\n@register_quant_pattern(torch._C._nn.avg_pool3d)\n@register_quant_pattern(torch.clamp)\n@register_quant_pattern(torch.flatten)\n@register_quant_pattern(torch.mean)\n@register_quant_pattern(operator.floordiv)\n@register_quant_pattern('clamp')\n@register_quant_pattern('mean')\n@register_quant_pattern('relu')\n@register_quant_pattern('relu_')\nclass CopyNodeQuantizeHandler(QuantizeHandler):\n \"\"\" Operators that works on both float and quantized input\n if input is quantized, the output Tensor shares\n the same quantization parameter with input.\n These ops will do computation on the input Tensor, e.g. average pool, so we will\n insert extra observer/fake_quant for the output of these operators.\n TODO: maybe rename this to TensorValueOpQuantizeHandler\n \"\"\"\n def should_mark_output_quantized_from_input_quantized_status(\n self,\n qconfig: QConfigAny\n ) -> bool:\n return True\n\n def is_general_tensor_value_op(self) -> bool:\n return True\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n\n is_call_function, is_call_method, is_call_module = check_node(node, modules)\n if is_reference or (is_call_function or is_call_method or is_call_module):\n # when activation dtype is torch.float, the node does not require\n # observation\n # e.g. dynamic quantization or weight_only quantization\n act_dtype = activation_dtype(qconfig)\n if act_dtype == torch.float:\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return op_out\n else:\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n # make sure the input is quantized to act_dtype\n load_arg(quantized={0: act_dtype})(node.args)\n args = list(load_arg(quantized=torch.float)(node.args))\n kwargs = load_arg(quantized=torch.float)(node.kwargs)\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return quantize_node(\n op_out,\n activation_post_process,\n node, modules, quantized_graph, node_name_to_scope, is_input=False)\n else:\n return quantized_graph.node_copy(node, load_arg(quantized=None))\n\nclass CustomModuleQuantizeHandler(QuantizeHandler):\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n \"\"\" Convert a float custom module to quantized custom module\n \"\"\"\n assert node.op == 'call_module'\n assert convert_custom_config_dict is not None\n custom_module_class_mapping = convert_custom_config_dict.get(\"observed_to_quantized_custom_module_class\", None)\n assert custom_module_class_mapping is not None\n observed_custom_module = modules[str(node.target)]\n if activation_is_statically_quantized(qconfig):\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n assert activation_post_process is not None\n observed_custom_module.activation_post_process = activation_post_process\n quantized_custom_module_class = get_swapped_custom_module_class(\n observed_custom_module, custom_module_class_mapping, qconfig)\n quantized_custom_module = \\\n quantized_custom_module_class.from_observed(observed_custom_module)\n parent_name, name = _parent_name(node.target)\n setattr(modules[parent_name], name, quantized_custom_module)\n # hardcoded the quntized input to be None (take whatever is in the environemnt),\n # we can extend this\n # if there is a need, e.g. get the indexes of quantized inputs from some\n # module attribute like module._QUANTIZED_INPUT_INDEXES\n return quantized_graph.node_copy(node, load_arg(quantized=None))\n\n@register_quant_pattern(torch.nn.Identity)\n@register_quant_pattern(torch.transpose)\n@register_quant_pattern(torch.repeat_interleave)\n@register_quant_pattern(torch.squeeze)\n@register_quant_pattern(torch.stack)\n@register_quant_pattern(torch.unsqueeze)\n@register_quant_pattern('contiguous')\n@register_quant_pattern('detach')\n@register_quant_pattern('detach_')\n@register_quant_pattern('permute')\n@register_quant_pattern('repeat')\n@register_quant_pattern('repeat_interleave')\n@register_quant_pattern('reshape')\n@register_quant_pattern('resize_')\n@register_quant_pattern('shape')\n@register_quant_pattern('size')\n@register_quant_pattern('squeeze')\n@register_quant_pattern('squeeze_')\n@register_quant_pattern('transpose')\n@register_quant_pattern('unsqueeze')\n@register_quant_pattern('unsqueeze_')\n@register_quant_pattern('view')\nclass GeneralTensorShapeOpQuantizeHandler(QuantizeHandler):\n \"\"\" Operators that works on both float and quantized input\n if input is quantized, the output Tensor shares\n the same quantization parameter with input.\n These ops only do rearrangement of Tensor values, for\n example reshape, or just query the information about Tensor\n e.g. size, and we do not insert extra observer/fake_quant\n for the output of the operator.\n \"\"\"\n def is_general_tensor_shape_op(self) -> bool:\n return True\n\n def should_mark_output_quantized_from_input_quantized_status(\n self,\n qconfig: QConfigAny\n ) -> bool:\n return True\n\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n # when activation dtype is torch.float, the node does not require\n # observation\n # e.g. dynamic quantization or weight_only quantization\n act_dtype = activation_dtype(qconfig)\n if act_dtype == torch.float:\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return op_out\n else:\n activation_post_process = \\\n self._maybe_get_last_node_only_observer(modules)\n if activation_post_process is not None:\n args = list(load_arg(quantized=torch.float)(node.args))\n kwargs = load_arg(quantized=torch.float)(node.kwargs)\n op_out = quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n return quantize_node(\n op_out,\n activation_post_process,\n node, modules, quantized_graph, node_name_to_scope, is_input=False)\n else:\n return quantized_graph.node_copy(node, load_arg(quantized=torch.float))\n\nclass StandaloneModuleQuantizeHandler(QuantizeHandler):\n \"\"\" Converts an observed standalone module to quantized standalone module\n by calling convert_fx on the observed standalone module.\n \"\"\"\n def convert(self,\n node: Node,\n qconfig: QConfigAny,\n modules: Dict[str, torch.nn.Module],\n quantized_graph: Graph,\n node_name_to_scope: Dict[str, Tuple[str, type]],\n load_arg: Callable,\n is_reference: bool = False,\n convert_custom_config_dict: Dict[str, Any] = None) -> Node:\n assert node.op == 'call_module'\n convert = torch.ao.quantization.quantize_fx._convert_standalone_module_fx # type: ignore[attr-defined]\n # We know that observed standalone module is a GraphModule since\n # it's produced by us\n observed_standalone_module : GraphModule = modules[str(node.target)] # type: ignore[assignment]\n input_quantized_idxs = observed_standalone_module._standalone_module_input_quantized_idxs.tolist() # type: ignore[operator]\n quantized_standalone_module = convert(observed_standalone_module, is_reference=is_reference)\n parent_name, name = _parent_name(node.target)\n # update the modules dict\n setattr(modules[parent_name], name, quantized_standalone_module)\n modules[str(node.target)] = quantized_standalone_module\n return quantized_graph.node_copy(node, load_arg(quantized=input_quantized_idxs))\n", "# Owner(s): [\"module: tests\"]\n\nimport torch\nimport numpy as np\n\nimport math\nfrom typing import Dict, List, Sequence\nimport random\nfrom functools import partial\nfrom itertools import product, combinations, permutations\nimport warnings\n\nfrom torch._six import inf, nan\nfrom torch.testing import make_tensor\nfrom torch.testing._internal.common_dtype import (\n get_all_dtypes, get_all_math_dtypes, get_all_int_dtypes, get_all_complex_dtypes, get_all_fp_dtypes,\n integral_types_and, floating_and_complex_types_and\n)\nfrom torch.testing._internal.common_utils import (\n TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict,\n IS_WINDOWS)\nfrom torch.testing._internal.common_device_type import (\n OpDTypes, expectedFailureMeta, instantiate_device_type_tests, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU,\n onlyNativeDeviceTypes, onlyCUDA, largeTensorTest, ops, precisionOverride)\nfrom torch.testing._internal.common_methods_invocations import (\n ReductionOpInfo, reduction_ops, reference_masked_ops)\n\n# TODO: replace with make_tensor\ndef _generate_input(shape, dtype, device, with_extremal):\n if shape == ():\n x = torch.tensor((), dtype=dtype, device=device)\n else:\n if dtype.is_floating_point or dtype.is_complex:\n # work around torch.randn not being implemented for bfloat16\n if dtype == torch.bfloat16:\n x = torch.randn(*shape, device=device) * random.randint(30, 100)\n x = x.to(torch.bfloat16)\n else:\n x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(30, 100)\n x[torch.randn(*shape) > 0.5] = 0\n if with_extremal and dtype.is_floating_point:\n # Use extremal values\n x[torch.randn(*shape) > 0.5] = float('nan')\n x[torch.randn(*shape) > 0.5] = float('inf')\n x[torch.randn(*shape) > 0.5] = float('-inf')\n elif with_extremal and dtype.is_complex:\n x[torch.randn(*shape) > 0.5] = complex('nan')\n x[torch.randn(*shape) > 0.5] = complex('inf')\n x[torch.randn(*shape) > 0.5] = complex('-inf')\n elif dtype == torch.bool:\n x = torch.zeros(shape, dtype=dtype, device=device)\n x[torch.randn(*shape) > 0.5] = True\n else:\n x = torch.randint(15, 100, shape, dtype=dtype, device=device)\n\n return x\n\n# TODO: replace with make_tensor\ndef _rand_shape(dim, min_size, max_size):\n shape = []\n for i in range(dim):\n shape.append(random.randint(min_size, max_size))\n return tuple(shape)\n\ndef _reduced_shape(shape, dim=None, keepdim=False):\n \"\"\"Computes the expected reduced shape given dim and keepdim\n\n Args:\n shape: The shape to reduce\n dim : The dimensions to reduce\n keepdim: If true, reduced dimensions have size 1 in the reduced shape,\n otherwise they are removed from the reduced shape.\n\n Returns:\n The reduced shape\n \"\"\"\n if dim is None:\n return [1] * len(shape) if keepdim else []\n\n # Wrap negative dims\n dim = dim if isinstance(dim, Sequence) else [dim]\n dim = set(i if i >= 0 else len(shape) + i for i in dim)\n\n result = []\n for i, size in enumerate(shape):\n if i not in dim:\n result.append(size)\n elif keepdim:\n result.append(1)\n\n return result\n\nclass TestReductions(TestCase):\n\n ###########################################################################\n # ReductionOpInfo unit tests\n ###########################################################################\n\n def _test_dim_keepdim(self, op: ReductionOpInfo, device, *, ndim, **dim_keepdim):\n \"\"\"Tests output shape for input with ndim and dim and keepdim kwargs\"\"\"\n shape = torch.randint(2, 5, (ndim,)).tolist()\n t = make_tensor(shape, device, torch.float)\n args, kwargs = next(op.generate_args_kwargs(t, **dim_keepdim))\n result = op(t, *args, **dim_keepdim, **kwargs)\n expected_shape = _reduced_shape(shape, **dim_keepdim)\n self.assertEqual(result.shape, expected_shape, f\"\"\"\n expected output shape to be {expected_shape} but got {list(result.shape)}\n for input shape {shape} and {dim_keepdim}\n \"\"\")\n\n # TODO(@heitorschueroff) combine cases with and without keepdim once\n # there's support for a @parametrize decorator.\n\n @ops(reduction_ops, dtypes=OpDTypes.none)\n def test_dim_default(self, device, op: ReductionOpInfo):\n \"\"\"Tests that the default dim reduces all dimensions.\"\"\"\n for ndim in range(3):\n self._test_dim_keepdim(op, device, ndim=ndim)\n\n @ops(reduction_ops, dtypes=OpDTypes.none)\n def test_dim_default_keepdim(self, device, op: ReductionOpInfo):\n \"\"\"Tests that the default dim, when keepdim=True, reduces all dimensions to size 1.\"\"\"\n for ndim in range(3):\n self._test_dim_keepdim(op, device, ndim=ndim, keepdim=True)\n\n @ops(reduction_ops, dtypes=OpDTypes.none)\n def test_dim_none(self, device, op: ReductionOpInfo):\n \"\"\"Tests that dim=None reduces all dimensions.\"\"\"\n for ndim in range(3):\n self._test_dim_keepdim(op, device, ndim=ndim, dim=None)\n\n @ops(reduction_ops, dtypes=OpDTypes.none)\n def test_dim_none_keepdim(self, device, op: ReductionOpInfo):\n \"\"\"Tests that dim=None, when keepdim=True, reduces all dimensions to size 1.\"\"\"\n for ndim in range(3):\n self._test_dim_keepdim(op, device, ndim=ndim, dim=None, keepdim=True)\n\n @ops(reduction_ops, dtypes=OpDTypes.none)\n def test_dim_single(self, device, op: ReductionOpInfo):\n \"\"\"Tests that dim=i reduces dimension i.\"\"\"\n self._test_dim_keepdim(op, device, ndim=0, dim=0)\n self._test_dim_keepdim(op, device, ndim=1, dim=0)\n self._test_dim_keepdim(op, device, ndim=2, dim=-1)\n self._test_dim_keepdim(op, device, ndim=3, dim=1)\n\n @ops(reduction_ops, dtypes=OpDTypes.none)\n def test_dim_single_keepdim(self, device, op: ReductionOpInfo):\n \"\"\"Tests that dim=i, when keepdim=True, reduces dimension i to size 1.\"\"\"\n self._test_dim_keepdim(op, device, ndim=0, dim=0, keepdim=True)\n self._test_dim_keepdim(op, device, ndim=1, dim=0, keepdim=True)\n self._test_dim_keepdim(op, device, ndim=2, dim=-1, keepdim=True)\n self._test_dim_keepdim(op, device, ndim=3, dim=1, keepdim=True)\n\n @ops(filter(lambda op: op.supports_multiple_dims, reduction_ops), dtypes=OpDTypes.none)\n def test_dim_empty(self, device, op: ReductionOpInfo):\n \"\"\"Tests that dim=[] is a no-op\"\"\"\n self._test_dim_keepdim(op, device, ndim=0, dim=[])\n self._test_dim_keepdim(op, device, ndim=2, dim=[])\n\n @ops(filter(lambda op: op.supports_multiple_dims, reduction_ops), dtypes=OpDTypes.none)\n def test_dim_empty_keepdim(self, device, op: ReductionOpInfo):\n \"\"\"Tests that dim=[], when keepdim=True, is a no-op\"\"\"\n self._test_dim_keepdim(op, device, ndim=0, dim=[], keepdim=True)\n self._test_dim_keepdim(op, device, ndim=2, dim=[], keepdim=True)\n\n @ops(filter(lambda op: op.supports_multiple_dims, reduction_ops), dtypes=OpDTypes.none)\n def test_dim_multi(self, device, op: ReductionOpInfo):\n \"\"\"Tests that dim=[i, j, ...] reduces dimensions i, j, ....\"\"\"\n self._test_dim_keepdim(op, device, ndim=1, dim=[0])\n self._test_dim_keepdim(op, device, ndim=3, dim=[0, 2])\n\n @ops(filter(lambda op: op.supports_multiple_dims, reduction_ops), dtypes=OpDTypes.none)\n def test_dim_multi_keepdim(self, device, op: ReductionOpInfo):\n \"\"\"Tests that dim=[i, j, ...], when keepdim=True, reduces dimensions i, j, .... to size 1.\"\"\"\n self._test_dim_keepdim(op, device, ndim=1, dim=[0], keepdim=True)\n self._test_dim_keepdim(op, device, ndim=3, dim=[0, 2], keepdim=True)\n\n @ops(filter(lambda op: op.supports_multiple_dims, reduction_ops), dtypes=OpDTypes.none)\n def test_dim_multi_unsorted(self, device, op: ReductionOpInfo):\n \"\"\"Tests that operator correctly handles unsorted dim list.\"\"\"\n self._test_dim_keepdim(op, device, ndim=4, dim=[3, 0, 2])\n\n @ops(filter(lambda op: op.supports_multiple_dims, reduction_ops), dtypes=OpDTypes.none)\n def test_dim_multi_unsorted_keepdim(self, device, op: ReductionOpInfo):\n \"\"\"Tests that operator correctly handles unsorted dim list when keepdim=True.\"\"\"\n self._test_dim_keepdim(op, device, ndim=4, dim=[3, 0, 2], keepdim=True)\n\n @ops(filter(lambda op: op.supports_multiple_dims, reduction_ops), dtypes=OpDTypes.none)\n def test_dim_multi_duplicate(self, device, op: ReductionOpInfo):\n \"\"\"Tests that an error is raised if dim has duplicate entries.\"\"\"\n with self.assertRaises(RuntimeError):\n self._test_dim_keepdim(op, device, ndim=3, dim=[0, 1, 1, 2])\n\n @ops(filter(lambda op: not op.supports_multiple_dims, reduction_ops), dtypes=OpDTypes.none)\n def test_dim_multi_unsupported(self, device, op: ReductionOpInfo):\n \"\"\"Tests that ops claiming to not support multi dim actually don't.\"\"\"\n with self.assertRaises(TypeError):\n self._test_dim_keepdim(op, device, ndim=3, dim=[0, 2])\n\n @ops(reduction_ops, dtypes=OpDTypes.none)\n def test_dim_offbounds(self, device, op: ReductionOpInfo):\n \"\"\"Tests that passing an off-bounds dim throws\"\"\"\n with self.assertRaises(IndexError):\n self._test_dim_keepdim(op, device, ndim=2, dim=2)\n\n @ops(reduction_ops, dtypes=OpDTypes.none)\n def test_dim_ndim_limit(self, device, op: ReductionOpInfo):\n \"\"\"Tests that an exception is raised when reducing a tensor with more\n than 64 dims along some specific dimensions. dim=None is ok\"\"\"\n t = make_tensor([1] * 65, device, torch.float)\n with self.assertRaisesRegex(RuntimeError, \"only tensors with up to 64 dims are supported\"):\n op(t, dim=0)\n\n @ops(filter(lambda op: op.identity is not None, reduction_ops), dtypes=OpDTypes.supported)\n def test_identity(self, device, dtype, op: ReductionOpInfo):\n \"\"\"Tests that the identity value is an identity for the operator\"\"\"\n t = make_tensor((10,), device, dtype)\n t[1::2] = op.identity\n args, kwargs = next(op.generate_args_kwargs(t))\n result = op(t[::2], *args, **kwargs)\n result_with_identity = op(t, *args, **kwargs)\n self.assertEqual(result, result_with_identity, \"\"\"\n Adding identity value to the input tensor should not change the result.\n \"\"\")\n\n # TODO(@heitorschueroff) Update these to use the nan_policy kwarg once\n # it is added to reduction operators.\n\n @ops(filter(lambda op: op.nan_policy == 'propagate', reduction_ops), dtypes=OpDTypes.supported,\n allowed_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16))\n def test_nan_policy_propagate(self, device, dtype, op: ReductionOpInfo):\n \"\"\"Tests that nan is propagated to the output by default\"\"\"\n t = make_tensor((5,), device, dtype)\n t[2] = torch.nan\n args, kwargs = next(op.generate_args_kwargs(t))\n result = op(t, *args, **kwargs)\n self.assertTrue(result.isnan())\n\n @ops(filter(lambda op: op.nan_policy == 'omit', reduction_ops), dtypes=OpDTypes.supported,\n allowed_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16))\n def test_nan_policy_omit(self, device, dtype, op: ReductionOpInfo):\n \"\"\"Tests that NaN values do not affect the result.\"\"\"\n t = make_tensor((10,), device, dtype)\n t[1::2] = torch.nan\n args, kwargs = next(op.generate_args_kwargs(t))\n result = op(t[::2], *args, **kwargs)\n result_with_nan = op(t, *args, **kwargs)\n self.assertEqual(result, result_with_nan)\n\n @ops(reduction_ops, dtypes=OpDTypes.supported)\n def test_result_dtype(self, device, dtype, op: ReductionOpInfo):\n \"\"\"Tests that the result has the correct dtype\"\"\"\n t = make_tensor((5,), device, dtype)\n args, kwargs = next(op.generate_args_kwargs(t))\n result: torch.Tensor = op(t, *args, **kwargs)\n is_integral = dtype in integral_types_and(torch.bool)\n if op.promotes_int_to_float and is_integral:\n self.assertTrue(torch.is_floating_point(result))\n elif op.promotes_int_to_int64 and is_integral:\n self.assertEqual(result.dtype, torch.int64)\n elif op.result_dtype is not None:\n self.assertEqual(result.dtype, op.result_dtype)\n else:\n self.assertEqual(result.dtype, dtype)\n\n @ops(reduction_ops, dtypes=OpDTypes.none)\n def test_empty_tensor_empty_slice(self, device, op: ReductionOpInfo):\n \"\"\"Tests for consistent behavior when reducing over an empty slice.\n\n The rules for reducing over an empty slice are as follows:\n - Return the identity value if the operator has one\n - Otherwise, return NaN if the operator promotes integral dtype to\n floating point dtypes.\n - Otherwise, raise an error\n\n See discussion here https://github.com/pytorch/pytorch/issues/61901\n \"\"\"\n t = make_tensor((0, 2, 3), device, torch.float)\n for dim in [0] + [[0, 2]] if op.supports_multiple_dims else []:\n args, kwargs = next(op.generate_args_kwargs(t, dim=dim))\n if op.identity is not None:\n # Reducing along empty slice should return identity\n result = op(t, *args, dim=dim, **kwargs)\n self.assertEqual(result, torch.full_like(result, op.identity))\n elif op.promotes_int_to_float:\n # Reducing along empty slice should return NaN\n result = op(t, *args, dim=dim, **kwargs)\n self.assertEqual(result, torch.full_like(result, torch.nan))\n else:\n # Reducing along empty slice should raise an error\n with self.assertRaises(IndexError):\n op(t, *args, dim=dim, **kwargs)\n\n @ops(reduction_ops, dtypes=OpDTypes.none)\n def test_empty_tensor_nonempty_slice(self, device, op: ReductionOpInfo):\n \"\"\"Tests that reducing a nonempty slice of an empty tensor returns an\n empty tensor with the dimensions reduced.\"\"\"\n t = make_tensor((0, 2, 3), device, torch.float)\n for dim in [1] + [[1, 2]] if op.supports_multiple_dims else []:\n args, kwargs = next(op.generate_args_kwargs(t, dim=dim))\n result = op(t, *args, dim=dim, **kwargs)\n self.assertEqual(result.shape, _reduced_shape(t.shape, dim))\n\n def _test_noncontiguous(self, op: ReductionOpInfo, t: torch.Tensor, **reduction_kwargs):\n \"\"\"Helper method to test noncontiguous input tensors.\"\"\"\n assert not t.is_contiguous()\n\n t_contig = t.contiguous()\n for args, kwargs in op.generate_args_kwargs(t_contig, **reduction_kwargs):\n kwargs.update(reduction_kwargs)\n result = op(t, *args, **kwargs)\n expected = op(t_contig, *args, **kwargs)\n self.assertEqual(result, expected)\n\n @ops(reduction_ops)\n def test_noncontiguous_innermost(self, device, dtype, op: ReductionOpInfo):\n \"\"\"Tests reducing along noncontiguous innermost dimension.\"\"\"\n t = make_tensor((10, 10), device, dtype, low=-1, high=1)\n self._test_noncontiguous(op, t[:, ::2], dim=1)\n\n @ops(reduction_ops)\n def test_noncontiguous_outermost(self, device, dtype, op: ReductionOpInfo):\n \"\"\"Tests reducing along noncontiguous outermost dimension.\"\"\"\n t = make_tensor((10, 10), device, dtype, low=-1, high=1)\n self._test_noncontiguous(op, t[::2, :], dim=0)\n\n @ops(reduction_ops)\n def test_noncontiguous_all(self, device, dtype, op: ReductionOpInfo):\n \"\"\"Tests reducing all dimensions of a noncontiguous tensor.\"\"\"\n t = make_tensor((5, 5, 5), device, dtype, low=-1, high=1)\n self._test_noncontiguous(op, t[::2, ::3, 1:-1:2])\n\n @ops(reduction_ops)\n def test_noncontiguous_transposed(self, device, dtype, op: ReductionOpInfo):\n \"\"\"Tests reducing a transposed tensor.\"\"\"\n t = make_tensor((5, 5), device, dtype, low=-1, high=1)\n self._test_noncontiguous(op, t.T)\n\n @ops(reduction_ops)\n def test_noncontiguous_expanded(self, device, dtype, op: ReductionOpInfo):\n \"\"\"Tests reducing a tensor with expanded singleton dimensions.\"\"\"\n t = make_tensor((2, 3), device, dtype, low=-1, high=1)\n self._test_noncontiguous(op, t.unsqueeze(1).expand(-1, 5, -1))\n\n # NumPy does not support BFloat16 so we don't test that against reference\n # implementations. We also don't compare dtypes or test for different\n # keepdim because we already have other tests covering those.\n # The test_reference_testing in test_ops.py only uses the samples from\n # sample_inputs_func which do not test as exhaustively as these tests.\n\n def _test_ref(self, op: ReductionOpInfo, t: torch.Tensor, **reduction_kwargs):\n \"\"\"Compares op against op.ref for the given input and reduction kwargs\"\"\"\n for args, kwargs in op.generate_args_kwargs(t, **reduction_kwargs):\n kwargs.update(reduction_kwargs)\n result = op(t, *args, **kwargs)\n expected = op.ref(t.detach().cpu().numpy(), *args, **kwargs)\n self.assertEqual(result, expected, exact_dtype=False)\n\n @ops(filter(lambda op: op.ref is not None, reduction_ops),\n allowed_dtypes=get_all_dtypes(include_bfloat16=False))\n def test_ref_scalar_input(self, device, dtype, op: ReductionOpInfo):\n \"\"\"Compares op against reference for scalar input tensors\"\"\"\n self._test_ref(op, make_tensor([], device, dtype))\n\n @ops(filter(lambda op: op.ref is not None, reduction_ops),\n allowed_dtypes=get_all_dtypes(include_bfloat16=False))\n def test_ref_small_input(self, device, dtype, op: ReductionOpInfo):\n \"\"\"Compares op against reference for small input tensors\"\"\"\n t = make_tensor((5, 3, 4, 2), device, dtype, low=-2, high=2, exclude_zero=True)\n self._test_ref(op, t)\n for dim in [0, 1, 3] + ([[0, 2], [1, 3]] if op.supports_multiple_dims else []):\n self._test_ref(op, t, dim=dim)\n\n @ops(filter(lambda op: op.ref is not None, reduction_ops),\n allowed_dtypes=[torch.float64])\n def test_ref_large_input_1D(self, device, dtype, op: ReductionOpInfo):\n \"\"\"Compares op against reference for a large 1D input tensor to check stability\"\"\"\n self._test_ref(op, make_tensor((2 ** 20,), device, dtype, low=-1, high=1, exclude_zero=True))\n\n @ops(filter(lambda op: op.ref is not None, reduction_ops),\n allowed_dtypes=[torch.float64])\n def test_ref_large_input_2D(self, device, dtype, op: ReductionOpInfo):\n \"\"\"Compares op against reference for a large 2D input tensor to test parallelism\"\"\"\n t = make_tensor((32, 2 ** 16), device, dtype, low=-1, high=1, exclude_zero=True)\n self._test_ref(op, t, dim=1)\n\n @ops(filter(lambda op: op.ref is not None, reduction_ops),\n allowed_dtypes=[torch.float64])\n def test_ref_large_input_64bit_indexing(self, device, dtype, op: ReductionOpInfo):\n \"\"\"Compares op against reference for a very large input tensor that requires 64 bit indexing\"\"\"\n self._test_ref(op, make_tensor((275000000,), device, dtype, low=-1, high=1, exclude_zero=True))\n\n @ops(filter(lambda op: op.ref is not None, reduction_ops),\n allowed_dtypes=get_all_dtypes(include_bfloat16=False))\n def test_ref_duplicate_values(self, device, dtype, op: ReductionOpInfo):\n \"\"\"Compares op against reference for input tensors with duplicate values\"\"\"\n t = make_tensor((4, 4), device, dtype, low=-2, high=2, exclude_zero=True)\n t[::2, ::2] = t[1::2, 1::2]\n self._test_ref(op, t)\n self._test_ref(op, t, dim=0)\n self._test_ref(op, t, dim=1)\n\n @ops(filter(lambda op: op.ref is not None, reduction_ops),\n allowed_dtypes=[torch.float32, torch.complex64])\n def test_ref_extremal_values(self, device, dtype, op: ReductionOpInfo):\n \"\"\"Compares op against reference for input tensors with extremal values\"\"\"\n t = make_tensor((5,), device, dtype, exclude_zero=True)\n extremals = [0, 1, nan, inf, -inf]\n for extremal in extremals:\n t[2] = extremal\n self._test_ref(op, t)\n\n ###########################################################################\n # TODO: Legacy tests - port to ReductionOpInfo\n ###########################################################################\n\n def test_var_unbiased(self, device):\n tensor = torch.randn(100, device=device)\n self.assertEqual(tensor.var(0), tensor.var(0, unbiased=True))\n self.assertEqual(tensor.var(), tensor.var(unbiased=True))\n self.assertEqual(tensor.var(unbiased=False), tensor.var(0, unbiased=False))\n\n tensor = torch.tensor([1.0, 2.0], device=device)\n self.assertEqual(tensor.var(unbiased=True), 0.5)\n self.assertEqual(tensor.var(unbiased=False), 0.25)\n\n tensor = torch.tensor([1.0, 2.0, 3.0], device=device)\n self.assertEqual(tensor.var(unbiased=True), 1.0)\n self.assertEqual(tensor.var(unbiased=False), 2.0 / 3.0)\n\n tensor = torch.randn(100, device=device)\n self.assertEqual(tensor.std(0), tensor.std(0, unbiased=True))\n self.assertEqual(tensor.std(), tensor.std(unbiased=True))\n self.assertEqual(tensor.std(unbiased=False), tensor.std(0, unbiased=False))\n\n def test_var_stability(self, device):\n tensor = torch.tensor([2281.5, 2281.25], device=device)\n self.assertEqual(tensor.var(dim=0), 0.03125)\n self.assertEqual(tensor.var(), 0.03125)\n\n def test_sum_dim_reduction_uint8_overflow(self, device):\n example = [[-1, 2, 1], [5, 3, 6]]\n x = torch.tensor(example, dtype=torch.uint8, device=device)\n self.assertEqual(x.sum(dtype=torch.uint8).item(), 16)\n self.assertEqual(x.sum(0, dtype=torch.uint8), torch.tensor([4, 5, 7], dtype=torch.uint8, device=device))\n self.assertEqual(x.sum(1, dtype=torch.uint8), torch.tensor([2, 14], dtype=torch.uint8, device=device))\n y = torch.tensor(example, dtype=torch.uint8, device=device)\n torch.sum(x, 0, out=y)\n self.assertEqual(x.sum(0, dtype=torch.uint8), y)\n\n def test_dim_reduction_less_than_64(self, device):\n sizes = [1] * 65\n x = torch.randn(sizes, device=device)\n ops = [torch.mean, torch.sum, torch.nansum, torch.std, torch.logsumexp, torch.std, torch.var,\n torch.amin, torch.amax, torch.norm]\n for op in ops:\n with self.assertRaisesRegex(RuntimeError, \"only tensors with up to 64 dims are supported\"):\n op(x, 64)\n with self.assertRaisesRegex(RuntimeError, \"only tensors with up to 64 dims are supported\"):\n op(x, -1)\n\n @onlyCPU\n @dtypes(torch.float, torch.bfloat16)\n def test_dim_reduction_lastdim(self, device, dtype):\n x = torch.randn(3, 5, 40, device=device, dtype=dtype)\n x = x[:, :, 0:40:2]\n x2 = x.contiguous()\n ops = [torch.norm, torch.argmax, torch.argmin]\n for op in ops:\n y = op(x, dim=-1)\n y2 = op(x2, dim=-1)\n self.assertEqual(y, y2)\n\n @skipIfNoSciPy\n def test_logsumexp(self, device):\n from scipy.special import logsumexp\n a = torch.randn(5, 4, device=device)\n a[0, 0] = inf\n a[1, :] = -inf\n actual = a.logsumexp(1)\n expected = logsumexp(a.cpu().numpy(), 1)\n self.assertEqual(expected.shape, actual.shape)\n self.assertEqual(expected, actual)\n\n # check that out is actually inplace\n b = torch.zeros(5, 2, device=device)\n c = b[:, 0]\n torch.logsumexp(a, 1, out=c)\n self.assertEqual(expected, b[:, 0])\n\n # check integral inputs is promoted to floating point\n e = torch.randint(-100, 100, [5, 4], device=device)\n actual = e.logsumexp(1).to(torch.float64)\n expected = logsumexp(e.cpu().numpy(), 1)\n self.assertEqual(expected.shape, actual.shape)\n self.assertEqual(expected, actual)\n\n @onlyCPU\n def test_sum_parallel(self, device):\n # To use parallel branches we'll need to compare on tensors\n # that are relatively large. Even if this is run on a single\n # core machine these tests will still give you signal on\n # the correctness\n\n def _run_test(size):\n for dim in range(len(size) + 1):\n nv = np.round(np.random.rand(*size)) # 0s and 1s\n tv = torch.from_numpy(nv)\n # Parallelisim is only used if numel is\n # larger than grainsize defined in Parallel.h\n self.assertTrue(tv.numel() > 32768)\n if dim == len(size):\n nvs = nv.sum()\n tvs = tv.sum()\n else:\n nvs = nv.sum(dim)\n tvs = tv.sum(dim)\n diff = np.abs(nvs - tvs.numpy()).sum()\n self.assertEqual(diff, 0)\n\n _run_test([2, 3, 3, 3, 3, 2, 2, 3, 2, 3, 2, 3, 3])\n _run_test([4, 4, 4, 4, 4, 4, 4, 4, 4, 4])\n _run_test([1, 32 * 8 * 32 * 8])\n _run_test([1, 32770])\n\n # TODO: kill map2_ (and similar) uses and update to compare with NumPy\n # only works on CPU since this uses map2_, which is only supported on CPU\n def _testCSelection(self, torchfn, mathfn):\n # Two tensors\n size = (100, 100)\n a = torch.rand(*size)\n b = torch.rand(*size)\n c = torchfn(a, b)\n expected_c = torch.zeros(*size)\n expected_c.map2_(a, b, lambda _, a, b: mathfn(a, b))\n self.assertEqual(expected_c, c, atol=0, rtol=0)\n\n @onlyCPU\n def test_max_elementwise(self, device):\n self._testCSelection(torch.max, max)\n\n @onlyCPU\n def test_min_elementwise(self, device):\n self._testCSelection(torch.min, min)\n\n def test_all_any(self, device):\n def test(size):\n x = torch.ones(*size, device=device).byte()\n self.assertTrue(x.all())\n self.assertTrue(x.any())\n\n x[3] = 0\n self.assertFalse(x.all())\n self.assertTrue(x.any())\n\n x.zero_()\n self.assertFalse(x.all())\n self.assertFalse(x.any())\n\n x.fill_(2)\n self.assertTrue(x.all())\n self.assertTrue(x.any())\n\n x = torch.ones(*size, device=device).bool()\n self.assertTrue(x.all())\n self.assertTrue(x.any())\n\n x[3] = False\n self.assertFalse(x.all())\n self.assertTrue(x.any())\n\n test((10,))\n test((5, 5))\n\n def test_all_any_with_dim(self, device):\n def test(x):\n r1 = x.prod(dim=0, keepdim=False).byte()\n r2 = x.all(dim=0, keepdim=False)\n self.assertEqual(r1.shape, r2.shape)\n self.assertTrue((r1 == r2).all())\n\n r3 = x.sum(dim=1, keepdim=True).clamp(0, 1).byte()\n r4 = x.any(dim=1, keepdim=True)\n self.assertEqual(r3.shape, r4.shape)\n self.assertTrue((r3 == r4).all())\n\n test(torch.tensor([[0, 0, 0],\n [0, 0, 1],\n [0, 1, 1],\n [1, 1, 1]], device=device, dtype=torch.uint8))\n\n def test_numpy_named_args(self, device):\n x1 = torch.randn(10, device=device)\n x2 = torch.randn(10, device=device)\n res1 = torch.add(input=x1, other=x2)\n res2 = torch.add(x1=x1, x2=x2)\n self.assertEqual(res1, res2)\n\n x1 = torch.randn(10, 10, 10, device=device)\n res1 = x1.sum(dim=(0, 2), keepdim=True)\n res2 = x1.sum(axis=(0, 2), keepdims=True)\n self.assertEqual(res1, res2)\n\n # TODO: kill this ane replace with common creation ops\n def _make_tensors(self, shape, val_range=(-100, 100), use_floating=True, use_integral=True,\n use_complex=False) -> Dict[str, List[torch.Tensor]]:\n float_types = [torch.double,\n torch.float]\n int_types = [torch.int64,\n torch.int32,\n torch.int16]\n\n complex_types = [torch.complex64,\n torch.complex128]\n\n def make_contiguous(shape, dtype) -> torch.Tensor:\n if dtype in float_types:\n val = torch.randn(shape, dtype=dtype)\n val = val * ((val_range[1] - val_range[0]) / (math.pi * 2.0))\n val = val + ((val_range[1] - val_range[0]) / 2.0)\n val = torch.clamp(val, min=val_range[0], max=val_range[1])\n return val\n result = torch.zeros(shape, dtype=dtype)\n result.apply_(lambda x: random.randint(val_range[0], val_range[1]))\n return result\n\n def make_non_contiguous(shape, dtype) -> torch.Tensor:\n contig = make_contiguous(shape, dtype)\n non_contig = torch.empty(shape + (2, 2), dtype=dtype)[..., 0]\n non_contig = non_contig.select(-1, -1)\n non_contig.copy_(contig)\n self.assertFalse(non_contig.is_contiguous())\n return non_contig\n\n def make_contiguous_slice(size, dtype) -> torch.Tensor:\n contig = make_contiguous((1, size), dtype)\n non_contig = contig[:1, 1:size - 1]\n self.assertTrue(non_contig.is_contiguous())\n return contig\n\n types = []\n if use_floating:\n types += float_types\n if use_integral:\n types += int_types\n if use_complex:\n types += complex_types\n tensors: Dict[str, List[torch.Tensor]] = {\"cont\": [], \"noncont\": [], \"slice\": []}\n for dtype in types:\n tensors[\"cont\"].append(make_contiguous(shape, dtype))\n tensors[\"noncont\"].append(make_non_contiguous(shape, dtype))\n tensors[\"slice\"].append(make_contiguous_slice(sum(list(shape)), dtype))\n\n return tensors\n\n # TODO: refactor this to use comparators from common_utils\n def _assert_matches_numpy(self, t, n):\n self.assertEqual(n.shape, t.shape)\n if t.dtype == torch.float:\n self.assertEqual(n, t, rtol=1e-03, atol=1e-05, equal_nan=True)\n else:\n self.assertEqual(n, t, equal_nan=True)\n\n # TODO: update this and tests that use it to use the device argument properly\n def _test_dim_ops(self, pytorch_op, numpy_op,\n use_floating=True, use_integral=True, use_complex=False):\n def do_one(tensors_dict, dim):\n for category, tensors in tensors_dict.items():\n if category == \"slice\":\n dim = 0\n for tensor in tensors:\n # we have no control over NumPy warnings...\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n expected = numpy_op(tensor.cpu().numpy(), dim)\n actual = pytorch_op(tensor, dim)\n self._assert_matches_numpy(actual, expected)\n if torch.cuda.is_available():\n self._assert_matches_numpy(pytorch_op(tensor.cuda(), dim).cpu(), expected)\n do_one(self._make_tensors((5, 400000), use_floating=use_floating,\n use_integral=use_integral, use_complex=use_complex), 1)\n do_one(self._make_tensors((3, 5, 7), use_floating=use_floating,\n use_integral=use_integral, use_complex=use_complex), 0)\n do_one(self._make_tensors((3, 5, 7), use_floating=use_floating,\n use_integral=use_integral, use_complex=use_complex), 1)\n do_one(self._make_tensors((3, 5, 7), use_floating=use_floating,\n use_integral=use_integral, use_complex=use_complex), 2)\n do_one(self._make_tensors((100000, ), use_floating=use_floating,\n use_integral=use_integral, use_complex=use_complex), -1)\n do_one(self._make_tensors((50, 50, 50), use_floating=use_floating,\n use_integral=use_integral, use_complex=use_complex), 0)\n do_one(self._make_tensors((50, 50, 50), use_floating=use_floating,\n use_integral=use_integral, use_complex=use_complex), 1)\n do_one(self._make_tensors((50, 50, 50), use_floating=use_floating,\n use_integral=use_integral, use_complex=use_complex), 2)\n do_one(self._make_tensors((50, 50, 50), use_floating=use_floating,\n use_integral=use_integral, use_complex=use_complex), (1, 2))\n do_one(self._make_tensors((50, 50, 50), use_floating=use_floating,\n use_integral=use_integral, use_complex=use_complex), (1, -1))\n do_one(self._make_tensors((50, 50, 50), use_floating=use_floating,\n use_integral=use_integral, use_complex=use_complex), (0, 2))\n do_one(self._make_tensors((50, 50, 50), use_floating=use_floating,\n use_integral=use_integral, use_complex=use_complex), (0, 2, 1))\n\n @slowTest\n @onlyCPU\n def test_sum_dim(self, device):\n self._test_dim_ops(\n lambda t, d: t.sum(d),\n lambda n, d: n.sum(d),\n use_floating=True, use_integral=True, use_complex=True)\n\n @onlyCPU\n def test_mean_dim(self, device):\n self._test_dim_ops(\n lambda t, d: t.mean(d),\n lambda n, d: n.mean(d),\n use_integral=False,\n use_complex=True)\n\n @onlyCPU\n def test_std_dim(self, device):\n for unbiased in [False, True]:\n self._test_dim_ops(\n lambda t, d: t.std(d, unbiased=unbiased),\n lambda n, d: n.std(d, ddof=1 if unbiased else 0),\n use_integral=False)\n\n @onlyCPU\n def test_var_dim(self, device):\n for unbiased in [False, True]:\n self._test_dim_ops(\n lambda t, d: t.var(d, unbiased=unbiased),\n lambda n, d: n.var(d, ddof=1 if unbiased else 0),\n use_integral=False)\n\n @onlyCPU\n @skipIfNoSciPy\n def test_logsumexp_dim(self, device):\n from scipy.special import logsumexp\n self._test_dim_ops(\n lambda t, d: t.logsumexp(d),\n lambda n, d: logsumexp(n, d),\n use_integral=False)\n\n # TODO: update this and tests that use it to handle device properly\n def _test_reduce_integer_upcast(self, fn, has_out=True, test_complex=True):\n shape = (3, 4, 5)\n reduced_shape = fn(torch.ones(shape)).shape\n\n def _test_out(dtype, other_dtype):\n out = torch.ones(reduced_shape, dtype=dtype)\n result = fn(x, out=out)\n self.assertIs(out.dtype, result.dtype)\n self.assertEqual(fn(x.to(dtype)), result, exact_dtype=False)\n result = fn(x, out=out, dtype=dtype)\n self.assertIs(out.dtype, result.dtype)\n self.assertEqual(fn(x.to(dtype)), result, exact_dtype=False)\n # 'out' is favored over dtype, check error\n self.assertRaises(RuntimeError, lambda: fn(x, out=out, dtype=other_dtype))\n\n for dtype in [dtype for dtype in get_all_math_dtypes('cpu') if dtype != torch.float16]:\n x = torch.ones(shape, dtype=dtype)\n expected_dtype = dtype if dtype.is_floating_point or dtype.is_complex else torch.int64\n self.assertIs(expected_dtype, fn(x).dtype)\n self.assertEqual(fn(x.to(expected_dtype)), fn(x))\n\n if dtype.is_floating_point:\n other_dtype = torch.float32 if dtype == torch.float64 else torch.float64\n elif dtype.is_complex:\n other_dtype = torch.complex64 if dtype == torch.complex128 else torch.complex128\n else:\n other_dtype = torch.int32 if dtype != torch.int32 else torch.int16\n self.assertIs(other_dtype, fn(x, dtype=other_dtype).dtype)\n self.assertEqual(fn(x.to(other_dtype)), fn(x, dtype=other_dtype), exact_dtype=False)\n\n # test mixed int/float/complex\n if dtype.is_floating_point:\n mixed_dtypes = [torch.int32, torch.complex64]\n elif dtype.is_complex:\n mixed_dtypes = [torch.int32, torch.float32]\n else:\n mixed_dtypes = [torch.float32, torch.complex64]\n\n for mixed_dtype in mixed_dtypes:\n self.assertIs(mixed_dtype, fn(x, dtype=mixed_dtype).dtype)\n self.assertEqual(fn(x.to(mixed_dtype)), fn(x, dtype=mixed_dtype), exact_dtype=False)\n\n if has_out:\n _test_out(dtype, other_dtype)\n _test_out(dtype, mixed_dtype)\n\n @onlyCPU\n def test_sum_integer_upcast(self, device):\n self._test_reduce_integer_upcast(lambda x, **kwargs: torch.sum(x, **kwargs), False)\n self._test_reduce_integer_upcast(lambda x, **kwargs: torch.sum(x, 0, **kwargs))\n\n @onlyCPU\n def test_prod_integer_upcast(self, device):\n self._test_reduce_integer_upcast(lambda x, **kwargs: torch.prod(x, **kwargs), False)\n self._test_reduce_integer_upcast(lambda x, **kwargs: torch.prod(x, 0, **kwargs))\n\n @onlyCPU\n def test_cumsum_integer_upcast(self, device):\n self._test_reduce_integer_upcast(lambda x, **kwargs: torch.cumsum(x, 0, **kwargs))\n\n @onlyCPU\n def test_cumprod_integer_upcast(self, device):\n self._test_reduce_integer_upcast(lambda x, **kwargs: torch.cumprod(x, 0, **kwargs))\n\n def test_mode(self, device):\n SIZE = 10\n x = torch.arange(1., SIZE * SIZE + 1, device=device).clone().resize_(SIZE, SIZE)\n x[:2] = 1\n x[:, :2] = 1\n x0 = x.clone()\n\n # Pre-calculated results.\n res1val = torch.ones(SIZE, device=device)\n # The indices are the position of the last appearance of the mode element.\n res1ind = torch.ones(SIZE, device=device, dtype=torch.long)\n res1ind[0] = SIZE - 1\n res1ind[1] = SIZE - 1\n\n res2val, res2ind = torch.mode(x, keepdim=False)\n self.assertEqual(res1val, res2val, atol=0, rtol=0)\n self.assertEqual(res1ind, res2ind, atol=0, rtol=0)\n\n # Test use of result tensor\n res2val = torch.tensor((), device=device)\n res2ind = torch.tensor((), device=device, dtype=torch.long)\n torch.mode(x, keepdim=False, out=(res2val, res2ind))\n self.assertEqual(res1val, res2val, atol=0, rtol=0)\n self.assertEqual(res1ind, res2ind, atol=0, rtol=0)\n\n # Test non-default dim\n res2val, res2ind = torch.mode(x, 0, False)\n self.assertEqual(res1val, res2val, atol=0, rtol=0)\n self.assertEqual(res1ind, res2ind, atol=0, rtol=0)\n\n # input unchanged\n self.assertEqual(x, x0, atol=0, rtol=0)\n\n def _test_mode_intervals(self, shape, intervals, device, v=1):\n x = torch.arange(0, shape[0] * shape[1], device=device)\n x[v] = x.numel()\n x = x.resize_(shape)\n\n # Set the value of each interval to the mode \"v\"\n for (beg, end) in intervals:\n x[:, beg:end] = v\n\n values, indices = torch.mode(x, -1, False)\n\n # Check whether the returned indices correspond to the returned values\n self.assertTrue((x.gather(1, indices.unsqueeze(1)).t() == values).all())\n # Check whether the returned values are the mode\n self.assertTrue((values == v).all().item())\n\n @onlyCUDA\n def test_mode_large(self, device):\n # i should be less than (d - 2) / 2\n def testset_for_shape(shape, i):\n d = shape[-1]\n # Mode only in the middle.\n self._test_mode_intervals(shape, [(i, d - i)], device)\n # Mode in discontiguous parts of the input.\n self._test_mode_intervals(shape, [(0, i), (i + 1, d - i - 1), (d - i, d)], device)\n\n # More than one line of (65535) thread blocks\n testset_for_shape((65536, 10), 3)\n\n # Max slice size (2048)\n testset_for_shape((10, 2048), 10)\n\n # Naive kernel for big slice sizes (> 2048)\n testset_for_shape((10, 4096), 10)\n\n @expectedFailureMeta # mode only supports CPU and CUDA device type\n @onlyNativeDeviceTypes\n def test_mode_wrong_dtype(self, device):\n def test_for_dtypes(x_ty, v_ty, i_ty, message):\n x = torch.ones(10, device=device, dtype=x_ty)\n v = torch.ones(10, device=device, dtype=v_ty)\n i = torch.ones(10, device=device, dtype=i_ty)\n\n with self.assertRaisesRegex(RuntimeError, message):\n torch.mode(x, -1, True, out=(v, i))\n\n err_msg = \"expected scalar type .* but got .* for \"\n values_err = err_msg + \"values\"\n indices_err = err_msg + \"indices\"\n\n test_for_dtypes(torch.uint8, torch.int8, torch.long, values_err)\n test_for_dtypes(torch.int8, torch.int16, torch.long, values_err)\n test_for_dtypes(torch.int32, torch.float32, torch.long, values_err)\n test_for_dtypes(torch.float32, torch.float64, torch.long, values_err)\n\n test_for_dtypes(torch.uint8, torch.uint8, torch.int8, indices_err)\n test_for_dtypes(torch.int8, torch.int8, torch.int16, indices_err)\n test_for_dtypes(torch.int32, torch.int32, torch.float32, indices_err)\n test_for_dtypes(torch.float32, torch.float32, torch.float64, indices_err)\n\n @onlyCUDA\n def test_mode_wrong_device(self, device):\n # CPU Input Tensor\n x = torch.ones(2)\n\n with self.assertRaisesRegex(RuntimeError,\n \"expected device .* but got .* for values\"):\n values = torch.tensor([], device=device)\n torch.mode(x, -1, True, out=(values, torch.tensor([], dtype=torch.long)))\n\n with self.assertRaisesRegex(RuntimeError,\n \"expected device .* but got .* for indices\"):\n indices = torch.tensor([], device=device)\n torch.mode(x, -1, True, out=(torch.tensor([]), indices))\n\n # TODO: make work on CUDA, too\n @onlyCPU\n def test_accreal_type(self, device) -> None:\n x = torch.ones(2, 3, 4)\n self.assertIsInstance(x.double().sum().item(), float)\n self.assertIsInstance(x.float().sum().item(), float)\n self.assertIsInstance(x.long().sum().item(), int)\n self.assertIsInstance(x.int().sum().item(), int)\n self.assertIsInstance(x.short().sum().item(), int)\n self.assertIsInstance(x.char().sum().item(), int)\n self.assertIsInstance(x.byte().sum().item(), int)\n\n def test_var_mean_some_dims(self, device):\n sizes = (4, 6, 7, 5, 3)\n dims = len(sizes)\n\n x = torch.rand(sizes, device=device)\n for num_of_dims in range(2, dims):\n dim_list = list(combinations(list(range(dims)), r=num_of_dims))\n for dim in dim_list:\n for unbiased in [False, True]:\n for keepdim in [False, True]:\n var1, mean1 = torch.var_mean(x, dim=dim, unbiased=unbiased, keepdim=keepdim)\n var2 = x.var(dim=dim, unbiased=unbiased, keepdim=keepdim)\n mean2 = x.mean(dim=dim, keepdim=keepdim)\n self.assertEqual(var1, var2)\n self.assertEqual(mean1, mean2)\n\n # TODO: this should be a generic opinfo test\n def test_all_any_empty(self, device):\n x = torch.ByteTensor().to(device)\n self.assertTrue(x.all())\n self.assertFalse(x.any())\n\n x = torch.BoolTensor().to(device)\n self.assertTrue(x.all())\n self.assertFalse(x.any())\n\n @dtypesIfCUDA(torch.half, torch.bfloat16, torch.float, torch.double)\n @dtypes(torch.half, torch.bfloat16, torch.float, torch.double)\n def test_max_with_inf(self, device, dtype):\n a = torch.tensor([[-inf, -inf, inf, 3], [inf, inf, -inf, -1]], dtype=dtype, device=device)\n self.assertTrue(torch.all(torch.max(a, dim=1).values == inf).item())\n self.assertTrue(torch.all(torch.amax(a, dim=1) == inf).item())\n self.assertTrue(torch.max(a).item() == inf)\n self.assertTrue(torch.amax(a).item() == inf)\n\n @dtypesIfCUDA(torch.half, torch.bfloat16, torch.float, torch.double)\n @dtypes(torch.half, torch.float, torch.bfloat16, torch.double)\n def test_min_with_inf(self, device, dtype):\n a = torch.tensor([[-inf, -inf, inf, 3], [inf, inf, -inf, -1]], dtype=dtype, device=device)\n self.assertTrue(torch.all(torch.min(a, dim=1).values == (-inf)).item())\n self.assertTrue(torch.all(torch.amin(a, dim=1) == (-inf)).item())\n self.assertTrue(torch.min(a).item() == -inf)\n self.assertTrue(torch.amin(a).item() == -inf)\n\n def _test_minmax_helper(self, torchfn, reffn, device, dtype, skip_indices=False):\n def create_input(shape, device, dtype):\n if dtype.is_floating_point:\n return torch.randn(*shape, device=device, dtype=dtype)\n else:\n low = 0 if dtype == torch.bool else -1000\n high = 2 if dtype == torch.bool else 1000\n return torch.randint(low, high, shape, device=device, dtype=dtype)\n x = create_input((100, 100), device, dtype)\n self.compare_with_numpy(torchfn, reffn, x)\n # non contiguous\n x = create_input((10, 10, 10), device, dtype)\n x = x[:, 4]\n self.compare_with_numpy(torchfn, reffn, x)\n\n def get_values(x):\n if isinstance(x, tuple):\n return x[0]\n return x\n\n # indices\n if not skip_indices:\n size = 5\n x = create_input((size, size), device, dtype)\n inputs = (x, x.t())\n dims = (0, 1)\n for xinp, d in product(inputs, dims):\n self.compare_with_numpy(lambda x: get_values(torchfn(x, d, False)), lambda x: reffn(x, d, keepdims=False), xinp)\n result = torchfn(xinp, d, False)\n if isinstance(result, tuple):\n v, i = result\n if d == 1:\n self.assertEqual(xinp[torch.arange(size), i], v, atol=0, rtol=0)\n else:\n self.assertEqual(xinp[i, torch.arange(size)], v, atol=0, rtol=0)\n # nan\n if dtype.is_floating_point:\n for index in (0, 4, 99):\n x = create_input((100,), device, dtype)\n x[index] = nan\n if not skip_indices:\n result = torchfn(x, 0)\n v = get_values(result)\n self.assertEqual(v, nan)\n if isinstance(result, tuple):\n i = result[1]\n self.assertEqual(i, index)\n self.assertEqual(torchfn(x), nan)\n\n @dtypesIfCPU(torch.float, torch.double, torch.long, torch.bool, torch.half)\n @dtypesIfCUDA(torch.half, torch.float, torch.long, torch.bool)\n @dtypes(torch.half, torch.float, torch.double)\n def test_max(self, device, dtype):\n self._test_minmax_helper(torch.max, np.amax, device, dtype)\n\n @dtypesIfCPU(torch.float, torch.double, torch.long, torch.bool, torch.half)\n @dtypesIfCUDA(torch.half, torch.float, torch.long, torch.bool)\n @dtypes(torch.half, torch.float, torch.double)\n def test_min(self, device, dtype):\n self._test_minmax_helper(torch.min, np.amin, device, dtype)\n\n @dtypesIfCPU(torch.half, torch.float, torch.double, torch.int, torch.long, torch.bool)\n @dtypesIfCUDA(torch.half, torch.float, torch.int, torch.long, torch.bool)\n @dtypes(torch.half, torch.float, torch.double)\n def test_amin(self, device, dtype):\n self._test_minmax_helper(torch.amin, np.amin, device, dtype)\n\n @dtypesIfCPU(torch.half, torch.float, torch.double, torch.int, torch.long, torch.bool)\n @dtypesIfCUDA(torch.half, torch.float, torch.int, torch.long, torch.bool)\n @dtypes(torch.float, torch.double)\n def test_amax(self, device, dtype):\n self._test_minmax_helper(torch.amax, np.amax, device, dtype)\n\n @onlyNativeDeviceTypes\n @dtypes(torch.float, torch.double)\n @dtypesIfCUDA(torch.half, torch.float, torch.bfloat16)\n def test_aminmax(self, device, dtype):\n\n def _amin_wrapper(x, dim=None, keepdims=False):\n with self.assertWarnsOnceRegex(UserWarning, \"_aminmax is deprecated\"):\n if dim is None:\n return torch._aminmax(x)[0]\n else:\n return torch._aminmax(x, dim, keepdims)[0]\n\n def _amax_wrapper(x, dim=None, keepdims=False):\n with self.assertWarnsOnceRegex(UserWarning, \"_aminmax is deprecated\"):\n if dim is None:\n return torch._aminmax(x)[1]\n else:\n return torch._aminmax(x, dim, keepdims)[1]\n\n self._test_minmax_helper(_amin_wrapper, np.amin, device, dtype)\n self._test_minmax_helper(_amax_wrapper, np.amax, device, dtype)\n\n # TODO: bincount isn't a classic reduction -- maybe this test suite is\n # reductions and summary ops?\n def test_bincount(self, device):\n # negative input throws\n with self.assertRaisesRegex(RuntimeError, '1-d non-negative integral'):\n torch.bincount(torch.tensor([1, -1], device=device))\n # n-d input, with n > 1 throws\n with self.assertRaisesRegex(RuntimeError, '1-d non-negative integral'):\n torch.bincount(torch.tensor([[1, 2], [3, 4]], device=device))\n # floating input type throws\n with self.assertRaisesRegex(RuntimeError, 'not implemented'):\n torch.bincount(torch.tensor([1., 0.3], device=device))\n # minlength < 0 throws\n with self.assertRaisesRegex(RuntimeError, 'minlength should be >= 0'):\n torch.bincount(torch.tensor([1, 3], device=device),\n torch.tensor([.2, .2], device=device),\n minlength=-1)\n # input and weights dim mismatch\n with self.assertRaisesRegex(RuntimeError, 'same length'):\n torch.bincount(torch.tensor([1, 0], device=device),\n torch.tensor([1., 0.3, 0.5], device=device))\n # 1-d input with no elements and default minlength\n self.assertEqual(torch.bincount(torch.tensor([], device=device, dtype=torch.long)),\n torch.zeros(0, dtype=torch.long, device=device))\n # 1-d input with no elements and specified minlength\n self.assertEqual(torch.bincount(torch.tensor([], device=device, dtype=torch.long), minlength=10),\n torch.zeros(10, dtype=torch.long, device=device))\n\n # test tensor method without weights\n long_counts = torch.tensor(\n [0, 3, 2, 1, 3], dtype=torch.uint8, device=device).bincount()\n self.assertEqual(\n torch.tensor([1, 1, 1, 2], dtype=torch.int64, device=device),\n long_counts)\n # test minlength functionality\n int_counts = torch.bincount(\n torch.tensor([1, 1, 1, 1], device=device), minlength=5)\n self.assertEqual(\n torch.tensor([0, 4, 0, 0, 0], dtype=torch.int64, device=device),\n int_counts)\n # test weights\n byte_counts = torch.bincount(\n torch.tensor([0, 1, 1, 1, 4], device=device),\n torch.tensor([.1, .2, .3, .4, .5], device=device))\n self.assertEqual(\n torch.tensor([0.1, 0.9, 0, 0, 0.5], device=device), byte_counts)\n byte_counts = torch.bincount(\n torch.tensor([0, 1, 1, 1, 4], device=device),\n torch.tensor([1, 2, 3, 4, 5], dtype=torch.int8, device=device))\n self.assertEqual(\n torch.tensor([1, 9, 0, 0, 5], device=device, dtype=torch.float64), byte_counts)\n # test non-contiguous inputs and weights\n inputs = torch.tensor([[0, 0], [3, 1], [2, 1], [1, 1], [3, 4]], device=device)\n weights = torch.tensor([[.1, 1], [.2, 2], [.3, 3], [.4, 4], [.5, 5]], device=device)\n for i in [0, 1]:\n assert not inputs[:, i].is_contiguous(), \"Inputs are supposed to be non-contiguous\"\n assert not weights[:, i].is_contiguous(), \"Weights are supposed to be non-contiguous\"\n # inputs are non-contiguous but weights are contiguous\n self.assertEqual(inputs[:, 0].bincount(), torch.tensor([1, 1, 1, 2]))\n # inputs and weights are non-contiguous\n self.assertEqual(\n inputs[:, 1].bincount(weights[:, 1]),\n torch.tensor([1, 9, 0, 0, 5], dtype=torch.float32))\n # weights are non-contiguous but inputs are contiguous\n self.assertEqual(inputs[:, 1].contiguous().bincount(weights[:, 1]),\n torch.tensor([1, 9, 0, 0, 5], dtype=torch.float32))\n\n # test bincount on non-contiguous slices\n all0s = torch.zeros((32, 2), dtype=torch.int64, device=device)\n self.assertEqual(all0s[:, 0].bincount(), torch.tensor([32]))\n\n all1s = torch.ones((32, 2), dtype=torch.int64, device=device)\n self.assertEqual(all1s[:, 0].bincount(), torch.tensor([0, 32]))\n\n # test large number of bins - global memory use\n big_exp = torch.zeros(10000000, device=device)\n big_exp[-1] = 50.0\n big_w = torch.tensor([.5] * 100, device=device)\n big_out = torch.tensor([9999999] * 100, device=device).bincount(big_w)\n self.assertEqual(big_exp, big_out)\n # test large input size\n big_exp = torch.zeros(2, device=device, dtype=torch.int64)\n big_exp[1] = 1000000\n big_out = torch.ones(1000000, dtype=torch.int8, device=device).bincount()\n self.assertEqual(big_exp, big_out)\n\n # TODO: how many var stability tests are there?\n def test_var_stability2(self, device):\n tensor = torch.FloatTensor([2281.5, 2281.25]).to(device)\n\n # Stability for inner dim\n self.assertEqual(tensor.var(0), 0.03125)\n\n # General stability\n self.assertEqual(tensor.var(), 0.03125)\n\n # Stability for outer dimensions\n tensor = tensor.unsqueeze(1)\n self.assertEqual(tensor.var(0), 0.03125)\n\n @onlyCPU\n @dtypes(torch.bool, torch.double)\n def test_sum_all(self, device, dtype) -> None:\n def check_sum_all(tensor: torch.Tensor) -> None:\n pylist = tensor.reshape(-1).tolist()\n self.assertEqual(tensor.sum(), sum(pylist))\n\n if dtype != torch.bool:\n check_sum_all(torch.tensor([1, 2, 3, 4, 5], dtype=dtype, device=device))\n check_sum_all(torch.randn(200000, dtype=dtype, device=device))\n check_sum_all(torch.randn(2000, 2, dtype=dtype, device=device)[:, 0])\n else:\n check_sum_all(torch.tensor([True, False, True], dtype=torch.bool, device=device))\n\n def _test_memory_format_transformations(self, device, input_generator_fn, transformation_fn,\n memory_format, compare_data=True, default_is_preserve=False):\n\n assert(memory_format == torch.channels_last or memory_format == torch.channels_last_3d)\n\n # xc is a channels last tensor\n xc = input_generator_fn(device)\n # xc is not memory dense, but looks like channels last\n if memory_format == torch.channels_last:\n xc = xc[..., ::2, ::2]\n else:\n xc = xc[..., ::2, ::2, ::2]\n\n clone = transformation_fn(xc, memory_format=torch.preserve_format)\n self.assertFalse(clone.is_contiguous())\n self.assertTrue(clone.is_contiguous(memory_format=memory_format))\n self.assertFalse(xc.is_contiguous())\n self.assertFalse(xc.is_contiguous(memory_format=memory_format))\n if compare_data:\n self.assertEqual(xc, clone.to(xc))\n\n xc = input_generator_fn(device)\n clone = transformation_fn(xc, memory_format=torch.contiguous_format)\n self.assertTrue(clone.is_contiguous())\n self.assertFalse(clone.is_contiguous(memory_format=memory_format))\n if compare_data:\n self.assertEqual(xc, clone.to(xc))\n\n xc = input_generator_fn(device)\n clone = transformation_fn(xc)\n\n if default_is_preserve:\n self.assertFalse(clone.is_contiguous())\n self.assertTrue(clone.is_contiguous(memory_format=memory_format))\n else:\n self.assertTrue(clone.is_contiguous())\n self.assertFalse(clone.is_contiguous(memory_format=memory_format))\n if compare_data:\n self.assertEqual(xc, clone.to(xc))\n\n x = torch.randn((3, 4, 5, 6, 7, 8, 9), device=device)\n for _ in range(10):\n permutation = list(range(len(x.shape)))\n random.shuffle(permutation)\n x = x.permute(permutation)\n self.assertEqual(x.stride(), transformation_fn(x, memory_format=torch.preserve_format).stride())\n\n @onlyCPU\n @dtypes(torch.double)\n def test_sum_out(self, device, dtype: torch.dtype) -> None:\n x = torch.rand(100, 100, dtype=dtype, device=device)\n res1 = torch.sum(x, 1)\n res2 = torch.tensor((), dtype=dtype, device=device)\n torch.sum(x, 1, out=res2)\n self.assertEqual(res1, res2)\n x = torch.rand(100, 100, 100, dtype=dtype, device=device)\n res1 = x.sum(2).sum(1)\n res2 = torch.tensor((), dtype=dtype, device=device)\n torch.sum(x, (2, 1), out=res2)\n self.assertEqual(res1, res2)\n\n @onlyCUDA\n @dtypes(torch.float16, torch.float32)\n def test_prod_gpu(self, device, dtype):\n x = torch.tensor([2, 3, 6, 9, 8], dtype=dtype, device=device)\n\n # Check all combinations: fp16 input - fp16 output, fp16 input - fp32\n # output, fp32 input - fp16 output, fp32 input - fp32 output\n for dtype_output in [torch.float16, torch.float32]:\n result_expected = torch.tensor(2592, dtype=dtype_output, device=device)\n output = torch.prod(x, dtype=dtype_output)\n self.assertEqual(output, result_expected)\n\n output = x.prod(dtype=dtype_output)\n self.assertEqual(output, result_expected)\n\n @onlyCPU\n @dtypes(torch.float)\n def test_prod(self, device, dtype):\n x = torch.rand(100, 100, dtype=dtype, device=device)\n res1 = torch.prod(x, 1)\n res2 = torch.tensor((), dtype=dtype, device=device)\n torch.prod(x, 1, out=res2)\n self.assertEqual(res1, res2)\n\n def test_prod_bool(self, device):\n vals = [[True, True], [True, False], [False, False], []]\n for val in vals:\n result = torch.prod(torch.tensor(val, device=device), dtype=torch.bool).item()\n expect = np.prod(np.array(val), dtype=np.bool)\n self.assertEqual(result, expect)\n\n result = torch.prod(torch.tensor(val, device=device)).item()\n expect = np.prod(np.array(val))\n self.assertEqual(result, expect)\n\n @onlyCPU\n def test_max_mixed_devices(self, device):\n a = torch.randn(10, device=device)\n if torch.cuda.is_available():\n values = torch.randn(10).cuda()\n indices = torch.cuda.LongTensor()\n self.assertRaises(RuntimeError,\n lambda: torch.max(a, 0, out=(values, indices)))\n self.assertRaises(RuntimeError,\n lambda: torch.amax(a, 0, out=values))\n\n @onlyCPU\n def test_min_mixed_devices(self, device):\n a = torch.randn(10, device=device)\n if torch.cuda.is_available():\n values = torch.randn(10).cuda()\n indices = torch.cuda.LongTensor()\n self.assertRaises(RuntimeError,\n lambda: torch.min(a, 0, out=(values, indices)))\n self.assertRaises(RuntimeError,\n lambda: torch.amin(a, 0, out=values))\n\n # TODO: consider refactoring with bincount test\n def test_bucketization(self, device):\n values_1d = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9], device=device)\n values_3d = torch.tensor([[[1, 3, 5], [2, 4, 6]], [[1, 2, 3], [4, 5, 6]]], device=device)\n\n # simple 1d boundary and 3d input value\n boundaries = torch.tensor([1, 2, 3, 4, 5, 6], device=device)\n expected_result = torch.tensor([[[0, 2, 4], [1, 3, 5]], [[0, 1, 2], [3, 4, 5]]], device=device)\n output = torch.empty(2, 2, 3, device=device, dtype=torch.int64)\n self.assertEqual(torch.bucketize(values_3d, boundaries), expected_result)\n self.assertEqual(torch.bucketize(values_3d, boundaries, out=output), expected_result)\n expected_result = torch.tensor([[[1, 3, 5], [2, 4, 6]], [[1, 2, 3], [4, 5, 6]]], device=device)\n self.assertEqual(torch.bucketize(values_3d, boundaries, right=True), expected_result)\n self.assertEqual(torch.bucketize(values_3d, boundaries, out=output, right=True), expected_result)\n\n # simple float 1d boundary and 1d input with output int32 type\n for dtype in [torch.float32, torch.float16]:\n values_1d_float = values_1d.to(dtype)\n boundaries = torch.tensor([0.9, 1, 2, 2, 3, 3, 4, 4.1, 9, 9], device=device, dtype=dtype)\n expected_result = torch.tensor([1, 2, 4, 6, 8, 8, 8, 8, 8], device=device, dtype=torch.int32)\n self.assertEqual(torch.searchsorted(boundaries, values_1d_float, out_int32=True), expected_result)\n self.assertEqual(torch.bucketize(values_1d_float, boundaries, out_int32=True), expected_result)\n\n # multiple dimension input with 0 elements\n boundaries = torch.tensor([1, 2, 3, 4, 5, 6], device=device, dtype=torch.int64)\n values_0_el = torch.tensor([[[]]], device=device, dtype=torch.int64)\n expected_result = values_0_el.to(torch.int64)\n self.assertEqual(torch.searchsorted(boundaries, values_0_el), expected_result)\n self.assertEqual(torch.bucketize(values_0_el, boundaries), expected_result)\n\n # nan input\n values_nan = torch.tensor([1.0, float('nan'), 2.0, float('nan')], device=device, dtype=torch.float64)\n boundaries = torch.tensor([0.0, 1.0, 2.0, 3.0], device=device, dtype=torch.float64)\n expected_result = torch.tensor([1, 4, 2, 4], device=device)\n self.assertEqual(torch.searchsorted(boundaries, values_nan), expected_result)\n expected_result = torch.tensor([2, 4, 3, 4], device=device)\n self.assertEqual(torch.searchsorted(boundaries, values_nan, right=True), expected_result)\n self.assertEqual(torch.searchsorted(boundaries, values_nan, side='right'), expected_result)\n\n # type promotion and non contiguous tensors\n values_3d_permute = values_3d.permute(2, 1, 0).to(torch.int32)\n boundaries_permute = values_3d.permute(2, 1, 0).to(torch.float64)\n expected_result = torch.tensor([[[0, 0], [0, 1]], [[2, 0], [0, 1]], [[2, 0], [0, 0]]], device=device)\n if self.device_type != 'xla':\n self.assertWarnsRegex(\n UserWarning, \"tensor is non-contiguous\",\n lambda: self.assertEqual(torch.searchsorted(boundaries_permute, values_3d_permute), expected_result))\n else:\n # All tensors in XLA is contiguous even doing permute, no warning msg will be generate in XLA\n self.assertEqual(torch.searchsorted(boundaries_permute, values_3d_permute), expected_result)\n\n # scalar type\n boundaries = torch.tensor([1.5, 2.5, 3.5], device=device)\n expected_result = torch.tensor(1, device=device)\n self.assertEqual(torch.searchsorted(boundaries, 2), expected_result)\n self.assertEqual(torch.bucketize(torch.tensor(2, device=device), boundaries), expected_result)\n expected_result = torch.tensor(3, device=device)\n scalar_tensor_nan = torch.tensor(float('nan'), device=device)\n self.assertEqual(torch.searchsorted(boundaries, scalar_tensor_nan), expected_result)\n self.assertEqual(torch.bucketize(float('nan'), boundaries, right=True), expected_result)\n\n # invalid input dimensions\n boundaries = torch.tensor([[1, 2, 3], [4, 5, 6]], device=device)\n with self.assertRaisesRegex(\n RuntimeError, \"first N-1 dimensions of boundaries tensor and input value tensor must match\"):\n torch.searchsorted(boundaries, values_3d)\n with self.assertRaisesRegex(\n RuntimeError, \"boundaries tensor must be 1 dimension\"):\n torch.bucketize(values_3d, boundaries)\n with self.assertRaisesRegex(\n RuntimeError, \"only when boundaries tensor dimension is 1\"):\n torch.searchsorted(boundaries, 1)\n\n # incompatiable output tensor's dtype\n def test_output_dtype(dtype, is_int32):\n output = values_1d.to(dtype)\n with self.assertRaisesRegex(\n RuntimeError, \"output tensor's dtype is wrong\"):\n torch.searchsorted(values_1d, values_1d, out=output, out_int32=is_int32)\n\n test_output_dtype(torch.float32, False)\n test_output_dtype(torch.int32, False)\n test_output_dtype(torch.int64, True)\n\n # invalid side argument\n with self.assertRaisesRegex(RuntimeError, \"side can only be 'left' or 'right'\"):\n torch.searchsorted(values_1d, values_1d, side='bad')\n\n # invalid sorter argument, wrong size\n with self.assertRaisesRegex(RuntimeError, \"boundary and sorter must have the same size\"):\n sequence = torch.rand_like(values_1d, dtype=torch.float)\n _, sorted_idx = torch.sort(sequence)\n torch.searchsorted(sequence, values_1d, sorter=sorted_idx[:-1])\n\n # invalid sorter argument, is not dtype long\n with self.assertRaisesRegex(RuntimeError, \"sorter must be a tensor of long dtype\"):\n sequence = torch.rand_like(values_1d, dtype=torch.float)\n _, sorted_idx = torch.sort(sequence)\n torch.searchsorted(sequence, values_1d, sorter=sorted_idx.to(torch.float32))\n\n # scalar type bfloat16\n if self.device_type == 'cpu':\n def test_dtype_bfloat16(values_bf16=False, boundaries_bf16=False):\n values_1d_float = values_1d.to(torch.float32)\n boundaries = torch.tensor([0.9, 1, 2, 2, 3, 3, 4, 4.1, 9, 9], device=device, dtype=torch.float32)\n if values_bf16:\n values_1d_float = values_1d_float.to(torch.bfloat16)\n if boundaries_bf16:\n boundaries = boundaries.to(torch.bfloat16)\n expected_result = torch.tensor([1, 2, 4, 6, 8, 8, 8, 8, 8], device=device, dtype=torch.int32)\n self.assertEqual(torch.bucketize(values_1d_float, boundaries, out_int32=True), expected_result)\n\n test_dtype_bfloat16(True, False)\n test_dtype_bfloat16(False, True)\n test_dtype_bfloat16(True, True)\n\n @dtypes(*get_all_dtypes(include_bool=False, include_complex=False))\n def test_nansum(self, device, dtype):\n args = product(\n (True, False), # noncontiguous\n (0, 1, None), # dim\n )\n zero = torch.zeros((), device=device, dtype=dtype)\n\n for noncontiguous, dim in args:\n # Randomly scale the values\n scale = random.randint(10, 100)\n x = make_tensor((17, 17), device=device, dtype=dtype,\n low=-scale, high=scale, noncontiguous=noncontiguous)\n\n if dtype.is_floating_point:\n nan_mask = x < 0.2 * scale\n x_nonan = torch.where(nan_mask, zero, x)\n x[nan_mask] = np.nan\n else:\n x_nonan = x\n\n dim_kwargs = {} if dim is None else {\"dim\": dim}\n expect = torch.sum(x_nonan, **dim_kwargs)\n actual = torch.nansum(x, **dim_kwargs)\n self.assertEqual(expect, actual)\n\n def _test_reduction_function_with_numpy(self, torch_func, np_func, device, dtype,\n with_extremal=False, atol=None, rtol=None,\n exact_dtype=True, with_keepdim=False):\n # Test 0-d to 3-d tensors.\n for ndims in range(0, 4):\n shape = _rand_shape(ndims, min_size=5, max_size=10)\n for n in range(ndims + 1):\n for c in combinations(list(range(ndims)), n):\n for count_dim in permutations(c):\n # Generate Input.\n x = _generate_input(shape, dtype, device, with_extremal)\n\n if count_dim == ():\n # Default `dims=None` case\n self.compare_with_numpy(torch_func, np_func, x, device=None, dtype=None,\n atol=atol, rtol=rtol, exact_dtype=exact_dtype)\n else:\n # With `dims: tuple of ints` case\n if with_keepdim:\n torch_func_partial = partial(torch_func, keepdim=True, dim=count_dim)\n np_func_partial = partial(np_func, keepdims=True, axis=count_dim)\n else:\n torch_func_partial = partial(torch_func, dim=count_dim)\n np_func_partial = partial(np_func, axis=count_dim)\n self.compare_with_numpy(torch_func_partial, np_func_partial, x, device=None, dtype=None,\n atol=atol, rtol=rtol, exact_dtype=exact_dtype)\n\n @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes(include_bfloat16=False) +\n get_all_complex_dtypes()))\n def test_count_nonzero(self, device, dtype):\n self._test_reduction_function_with_numpy(torch.count_nonzero, np.count_nonzero, device, dtype)\n self._test_reduction_function_with_numpy(torch.count_nonzero, np.count_nonzero, device, dtype, True)\n\n def _test_sum_reduction_vs_numpy(self, torch_fn, np_fn, device, dtype, with_keepdim=False, with_extremal=False):\n def is_integral(dtype):\n return dtype in get_all_int_dtypes()\n\n # On Windows CI, the current version of `numpy` promotes all lower integers\n # dtypes to int32 while `torch` promotes them to int64. Hence we skip on checking\n # the exact dtype.\n # Reference : https://dr.pytorch.org/api/view-log-full?build_id=122051580\n # PR : https://github.com/pytorch/pytorch/pull/38628#issuecomment-655905370\n exact_dtype = False if (IS_WINDOWS and is_integral(dtype)) else True\n\n if dtype == torch.uint8:\n with self.assertRaises(TypeError):\n self._test_reduction_function_with_numpy(torch_fn, np_fn, device, dtype, with_extremal=with_extremal)\n else:\n # TODO: Investigate why the output is not close to numpy.\n if dtype == torch.float16:\n atol = 0.4\n rtol = 1e-2\n elif dtype == torch.float32:\n atol = 7e-05\n rtol = 3e-06\n else:\n # Default values\n atol = None\n rtol = None\n self._test_reduction_function_with_numpy(torch_fn, np_fn, device, dtype,\n atol=atol, rtol=rtol, exact_dtype=exact_dtype,\n with_keepdim=with_keepdim, with_extremal=with_extremal)\n\n @onlyNativeDeviceTypes\n @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes(include_bfloat16=False)))\n def test_sum_vs_numpy(self, device, dtype):\n self._test_sum_reduction_vs_numpy(torch.sum, np.sum, device, dtype)\n self._test_sum_reduction_vs_numpy(torch.sum, np.sum, device, dtype, with_extremal=True)\n self._test_sum_reduction_vs_numpy(torch.sum, np.sum, device, dtype, with_keepdim=True)\n\n @onlyNativeDeviceTypes\n @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes(include_bfloat16=False)))\n def test_nansum_vs_numpy(self, device, dtype):\n self._test_sum_reduction_vs_numpy(torch.nansum, np.nansum, device, dtype)\n self._test_sum_reduction_vs_numpy(torch.nansum, np.nansum, device, dtype, with_extremal=True)\n self._test_sum_reduction_vs_numpy(torch.nansum, np.nansum, device, dtype, with_keepdim=True)\n\n @dtypes(*(get_all_complex_dtypes()))\n def test_nansum_complex(self, device, dtype):\n x = torch.randn((3, 3, 3), device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"nansum does not support complex inputs\"):\n torch.nansum(x)\n\n def test_nansum_out_dtype(self, device):\n dtypes = list(get_all_int_dtypes() + get_all_fp_dtypes(include_bfloat16=False))\n for inp_dtype, out_dtype in combinations(dtypes, 2):\n shape = _rand_shape(random.randint(2, 5), min_size=5, max_size=10)\n x = _generate_input(shape, inp_dtype, device, with_extremal=False)\n torch_fn = partial(torch.nansum, dtype=out_dtype)\n np_out_dtype = torch_to_numpy_dtype_dict[out_dtype]\n np_fn = partial(np.nansum, dtype=np_out_dtype)\n self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)\n\n @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes(include_bfloat16=False)))\n def test_argminmax_multiple(self, device, dtype):\n # Case: All Ones\n t = torch.ones(3, 3, device=device, dtype=dtype)\n self.compare_with_numpy(torch.argmax, np.argmax, t)\n self.compare_with_numpy(torch.argmin, np.argmin, t)\n\n # Case: With single `nan` present.\n if dtype in get_all_fp_dtypes():\n t[2, 2] = float('nan')\n self.compare_with_numpy(torch.argmax, np.argmax, t)\n self.compare_with_numpy(torch.argmin, np.argmin, t)\n\n # Case: Randomly Generated Tensors\n for ndims in range(1, 5):\n shape = _rand_shape(ndims, min_size=5, max_size=10)\n for with_extremal in [False, True]:\n for contiguous in [False, True]:\n # Generate Input.\n x = _generate_input(shape, dtype, device, with_extremal)\n\n if dtype == torch.half:\n max_val = torch.max(x.to(torch.float))\n min_val = torch.min(x.to(torch.float))\n else:\n max_val = torch.max(x)\n min_val = torch.min(x)\n\n mask = torch.randn(x.shape) > 0.5\n x[mask] = torch.tensor(max_val + 1, dtype=dtype)\n\n mask = torch.randn(x.shape) > 0.5\n x[mask] = torch.tensor(min_val - 1, dtype=dtype)\n\n if not contiguous:\n x = x.T\n\n self.compare_with_numpy(torch.argmax, np.argmax, x, device=None, dtype=None)\n self.compare_with_numpy(torch.argmin, np.argmin, x, device=None, dtype=None)\n\n # Verify indices returned by max and min.\n if dtype != torch.half:\n rand_dim = random.randint(0, ndims - 1)\n self.compare_with_numpy(lambda x: torch.max(x, dim=rand_dim)[1],\n lambda x: np.argmax(x, axis=rand_dim), x, device=None, dtype=None)\n self.compare_with_numpy(lambda x: torch.min(x, dim=rand_dim)[1],\n lambda x: np.argmin(x, axis=rand_dim), x, device=None, dtype=None)\n\n def verify_against_numpy(t):\n # Argmax\n torch_fn = partial(torch.argmax, dim=1)\n np_fn = partial(np.argmax, axis=1)\n self.compare_with_numpy(torch_fn, np_fn, t)\n # Non-contiguous input\n self.compare_with_numpy(torch_fn, np_fn, t.T)\n\n # Verify indices returned by max.\n if dtype != torch.half:\n self.compare_with_numpy(lambda x: torch.max(x, dim=1)[1], np_fn, x, device=None, dtype=None)\n self.compare_with_numpy(lambda x: torch.max(x, dim=1)[1], np_fn, x.T, device=None, dtype=None)\n\n # Argmin\n torch_fn = partial(torch.argmin, dim=1)\n np_fn = partial(np.argmin, axis=1)\n self.compare_with_numpy(torch_fn, np_fn, t)\n # Non-contiguous input\n self.compare_with_numpy(torch_fn, np_fn, t.T)\n\n # Verify indices returned by min.\n if dtype != torch.half:\n self.compare_with_numpy(lambda x: torch.min(x, dim=1)[1], np_fn, x, device=None, dtype=None)\n self.compare_with_numpy(lambda x: torch.min(x, dim=1)[1], np_fn, x.T, device=None, dtype=None)\n\n # Case: Sample from issue: https://github.com/pytorch/pytorch/issues/41998\n t = torch.tensor([[1, 5],\n [2, 10],\n [3, 3]], device=device, dtype=dtype)\n verify_against_numpy(t)\n\n # Case: Sample from issue: https://github.com/pytorch/pytorch/issues/41998\n t = torch.tensor([[1, 5],\n [2, 10],\n [0, 0]], device=device, dtype=dtype)\n verify_against_numpy(t)\n\n @dtypes(*(get_all_dtypes(include_half=True, include_bfloat16=False,\n include_bool=True, include_complex=True)))\n def test_all_any_vs_numpy(self, device, dtype):\n # Note [all, any uint8 compatibility]: However for compatibility reason,\n # for `uint8`, they return Tensor of same dtype `uint8`.\n # Reference: https://github.com/pytorch/pytorch/pull/47878#issuecomment-747108561\n exact_dtype = True if dtype != torch.uint8 else False\n\n def _test_all_any(x):\n self.compare_with_numpy(torch.all, np.all, x)\n self.compare_with_numpy(torch.any, np.any, x)\n\n def _test_all_any_with_dim(x, dim):\n torch_fn = partial(torch.all, dim=dim)\n np_fn = partial(np.all, axis=dim)\n self.compare_with_numpy(torch_fn, np_fn, x, exact_dtype=exact_dtype)\n\n torch_fn = partial(torch.any, dim=dim)\n np_fn = partial(np.any, axis=dim)\n self.compare_with_numpy(torch_fn, np_fn, x, exact_dtype=exact_dtype)\n\n def _test_out_variant(x, dim):\n out = torch.empty_like(x)\n if dtype == torch.bool or dtype == torch.uint8:\n expected = torch.all(x, dim)\n torch.all(x, dim, out=out)\n self.assertEqual(expected, out)\n\n expected = torch.any(x, dim)\n torch.any(x, dim, out=out)\n self.assertEqual(expected, out)\n else:\n with self.assertRaisesRegex(RuntimeError, \"all only supports bool tensor for result, got\"):\n torch.all(x, dim, out=out)\n\n with self.assertRaisesRegex(RuntimeError, \"any only supports bool tensor for result, got\"):\n torch.any(x, dim, out=out)\n\n def _test_all_any_with_dim_keepdim(x, dim, keepdim):\n torch_fn = partial(torch.all, dim=dim, keepdim=keepdim)\n np_fn = partial(np.all, axis=dim, keepdims=keepdim)\n self.compare_with_numpy(torch_fn, np_fn, x, exact_dtype=exact_dtype)\n\n torch_fn = partial(torch.any, dim=dim, keepdim=keepdim)\n np_fn = partial(np.any, axis=dim, keepdims=keepdim)\n self.compare_with_numpy(torch_fn, np_fn, x, exact_dtype=exact_dtype)\n\n def _test_output_dtype(x):\n # This test will fail once the functions return bool output\n # for uint8 input.\n expected_dtype = torch.uint8 if dtype == torch.uint8 else torch.bool\n self.assertEqual(torch.all(x).dtype, expected_dtype)\n self.assertEqual(torch.any(x).dtype, expected_dtype)\n\n self.assertEqual(torch.all(x, dim=0).dtype, expected_dtype)\n self.assertEqual(torch.any(x, dim=0).dtype, expected_dtype)\n\n for ndim in range(5):\n shape = _rand_shape(ndim, 1, 5)\n x = _generate_input(shape, dtype, device, with_extremal=False)\n _test_all_any(x)\n _test_all_any(x.T)\n _test_all_any(x[..., ::2])\n\n x = _generate_input(shape, dtype, device, with_extremal=True)\n _test_all_any(x)\n _test_all_any(x.T)\n _test_all_any(x[..., ::2])\n\n x = torch.zeros_like(x)\n _test_all_any(x)\n _test_all_any(x.T)\n _test_all_any(x[..., ::2])\n\n x = torch.ones_like(x)\n _test_all_any(x)\n _test_all_any(x.T)\n _test_all_any(x[..., ::2])\n _test_output_dtype(x)\n for dim in range(ndim):\n x = _generate_input(shape, dtype, device, with_extremal=False)\n _test_all_any_with_dim(x, dim)\n _test_all_any_with_dim(x.T, dim)\n _test_all_any_with_dim(x[..., ::2], dim)\n _test_out_variant(x, dim)\n _test_all_any_with_dim_keepdim(x, dim, keepdim=True)\n _test_all_any_with_dim_keepdim(x, dim, keepdim=False)\n\n x = _generate_input(shape, dtype, device, with_extremal=True)\n _test_all_any_with_dim(x, dim)\n _test_all_any_with_dim(x.T, dim)\n _test_all_any_with_dim(x[..., ::2], dim)\n _test_out_variant(x, dim)\n _test_all_any_with_dim_keepdim(x, dim, keepdim=True)\n _test_all_any_with_dim_keepdim(x, dim, keepdim=False)\n\n x = torch.zeros_like(x)\n _test_all_any_with_dim(x, dim)\n _test_all_any_with_dim(x.T, dim)\n _test_all_any_with_dim(x[..., ::2], dim)\n _test_out_variant(x, dim)\n _test_all_any_with_dim_keepdim(x, dim, keepdim=True)\n _test_all_any_with_dim_keepdim(x, dim, keepdim=False)\n\n x = torch.ones_like(x)\n _test_all_any_with_dim(x, dim)\n _test_all_any_with_dim(x.T, dim)\n _test_all_any_with_dim(x[..., ::2], dim)\n _test_out_variant(x, dim)\n _test_all_any_with_dim_keepdim(x, dim, keepdim=True)\n _test_all_any_with_dim_keepdim(x, dim, keepdim=False)\n\n # TODO: part of this test covers torch.norm, with should be covered by test_linalg\n @onlyNativeDeviceTypes\n def test_repeated_dim(self, device):\n ops = [torch.mean, torch.sum, torch.nansum, torch.std, torch.logsumexp, torch.std, torch.var,\n torch.amin, torch.amax, torch.norm]\n x = torch.randn(3, 3, 3, 3, device=device)\n\n error_msg = r'appears multiple times in the list of dims'\n norm_error_msg = r'Expected dims to be different, got'\n for op in ops:\n for dim in [(0, 0), (0, -4)]:\n e_msg = norm_error_msg if op == torch.norm else error_msg\n with self.assertRaisesRegex(RuntimeError, e_msg):\n op(x, dim=dim)\n\n # TODO: update this test to comapre against NumPy\n @onlyCUDA\n def test_var(self, device):\n cpu_tensor = torch.randn(2, 3, 3)\n device_tensor = cpu_tensor.to(device)\n self.assertEqual(device_tensor.var(), cpu_tensor.var())\n self.assertEqual(device_tensor.var(1), cpu_tensor.var(1))\n self.assertEqual(device_tensor.var(2), cpu_tensor.var(2))\n self.assertEqual(device_tensor.std(), cpu_tensor.std())\n self.assertEqual(device_tensor.std(1), cpu_tensor.std(1))\n self.assertEqual(device_tensor.var(2), cpu_tensor.var(2))\n\n cpu_tensor = torch.randn(100)\n device_tensor = cpu_tensor.to(device)\n self.assertEqual(device_tensor.var(), cpu_tensor.var())\n\n # TODO: update this test to compare against NumPy\n @onlyCUDA\n def test_var_large_input(self, device):\n # Large, not-nice input\n cpu_tensor = torch.randn(2 * 32 * 1024 + 1, 2, 67)\n device_tensor = cpu_tensor.to(device)\n\n self.assertEqual(cpu_tensor.var(2), device_tensor.var(2))\n\n # TODO: update this to compare against NumPy instead of CPU\n @onlyCUDA\n @dtypes(torch.double)\n def test_sum_noncontig(self, device, dtype):\n x = torch.randn(1, 75, 57, 20, dtype=dtype, device=device).permute(0, 3, 1, 2)\n y = x.cpu()\n self.assertEqual(x.sum().cpu(), y.sum())\n self.assertEqual(x.sum(dim=(-1, -2)).cpu(), y.sum(dim=(-1, -2)))\n self.assertEqual(x.sum(dim=(1, 3)).cpu(), y.sum(dim=(1, 3)))\n\n # TODO: update this to compare against NumPy instead of CPU\n @onlyCUDA\n def test_min_max_nan(self, device):\n tests = [(lambda x: x.min(), 'min'),\n (lambda x: x.max(), 'max'),\n (lambda x: x.amin(), 'amin'),\n (lambda x: x.amax(), 'amax'),\n (lambda x: x.min(0).values, 'min_dim'),\n (lambda x: x.max(0).values, 'max_dim'),\n (lambda x: x.amin(0), 'amin_dim'),\n (lambda x: x.amax(0), 'amax_dim')]\n for f, name in tests:\n a = torch.arange(25.0).view(5, 5)\n a[2, 2] = nan\n actual = f(a.to(device)).cpu()\n expected = f(a).cpu()\n self.assertEqual(torch.isnan(actual), torch.isnan(expected), msg='nans for {}'.format(name))\n self.assertEqual(actual[~torch.isnan(actual)],\n expected[~torch.isnan(expected)], msg='nans for {}'.format(name))\n\n # TODO: make this test generic using OpInfos\n @onlyCUDA\n def test_sum_cpu_device_mismatch(self, device):\n x = torch.randn(20, dtype=torch.float32, device=device)\n y = torch.randn(1, dtype=torch.float32)\n\n err_string = f\"Expected out tensor to have device {device}, but got cpu instead\"\n\n with self.assertRaisesRegex(RuntimeError, err_string):\n torch.sum(x, dim=[0], dtype=torch.float32, out=y)\n\n # tests half to float promotion\n if self.device_type == 'cuda':\n x = x.half()\n with self.assertRaisesRegex(RuntimeError, err_string):\n torch.sum(x, dim=[0], dtype=torch.float32, out=y)\n\n # Assert for illegal dtype would not be raised on XLA\n @onlyNativeDeviceTypes\n def test_minmax_illegal_dtype(self, device):\n x = torch.randn(5, 5, dtype=torch.float32, device=device)\n valid_values = torch.empty(5, dtype=torch.float32, device=device)\n valid_indices = torch.empty(5, dtype=torch.long, device=device)\n illegal_values = torch.empty(5, dtype=torch.int, device=device)\n illegal_indices = torch.empty(5, dtype=torch.double, device=device)\n torch.max(x, dim=0, out=(valid_values, valid_indices))\n torch.min(x, dim=0, out=(valid_values, valid_indices))\n torch.amax(x, dim=0, out=valid_values)\n torch.amin(x, dim=0, out=valid_values)\n rmsg = r'scalar type|dtype'\n with self.assertRaisesRegex(RuntimeError, rmsg):\n torch.max(x, dim=0, out=(illegal_values, valid_indices))\n with self.assertRaisesRegex(RuntimeError, rmsg):\n torch.min(x, dim=0, out=(illegal_values, valid_indices))\n with self.assertRaisesRegex(RuntimeError, rmsg):\n torch.amax(x, dim=0, out=illegal_values)\n with self.assertRaisesRegex(RuntimeError, rmsg):\n torch.amin(x, dim=0, out=illegal_values)\n with self.assertRaisesRegex(RuntimeError, rmsg):\n torch.max(x, dim=0, out=(valid_values, illegal_indices))\n with self.assertRaisesRegex(RuntimeError, rmsg):\n torch.min(x, dim=0, out=(valid_values, illegal_indices))\n with self.assertRaisesRegex(RuntimeError, rmsg):\n torch.max(x, dim=0, out=(illegal_values, illegal_indices))\n with self.assertRaisesRegex(RuntimeError, rmsg):\n torch.min(x, dim=0, out=(illegal_values, illegal_indices))\n\n @dtypes(*get_all_dtypes(include_bool=False, include_complex=False))\n def test_dim_arg_reduction_scalar(self, device, dtype):\n example = 4.0\n\n x = torch.tensor(example, device=device, dtype=dtype)\n self.assertEqual(x.argmax().item(), 0)\n self.assertEqual(x.argmax(dim=None).item(), 0)\n self.assertEqual(x.argmax(dim=0).item(), 0)\n self.assertEqual(x.argmax(dim=0, keepdim=True), torch.tensor(0, dtype=torch.int64))\n\n x = torch.tensor(example, device=device, dtype=dtype)\n self.assertEqual(x.argmin().item(), 0)\n self.assertEqual(x.argmin(dim=None).item(), 0)\n self.assertEqual(x.argmin(dim=0).item(), 0)\n self.assertEqual(x.argmin(dim=0, keepdim=True), torch.tensor(0, dtype=torch.int64))\n\n\n @precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-2})\n @dtypes(*(set(get_all_dtypes(include_bool=False, include_complex=False)) - {torch.uint8}))\n def test_dim_reduction(self, device, dtype):\n example = [[-1, 2, 1], [5, 3, 6]]\n\n sum_dtype = {\n torch.bfloat16: torch.bfloat16,\n torch.double: torch.double,\n torch.float: torch.float,\n torch.half: torch.half,\n torch.int64: torch.int64,\n torch.int32: torch.int64,\n torch.int16: torch.int64,\n torch.int8: torch.int64\n }\n\n # This won't test for 256bit instructions, since we usually\n # only work on 1 cacheline (512bit) at a time and these\n # examples aren't big enough to trigger that.\n x = torch.tensor(example, device=device, dtype=dtype)\n self.assertEqual(x.sum().item(), 16)\n self.assertEqual(x.sum(0), torch.tensor([4, 5, 7], dtype=sum_dtype[dtype]))\n self.assertEqual(x.sum(1), torch.tensor([2, 14], dtype=sum_dtype[dtype]))\n y = torch.tensor(example, device=device, dtype=sum_dtype[dtype])\n torch.sum(x, 0, out=y)\n self.assertEqual(x.sum(0), y)\n\n # Mean not supported for Int types\n if dtype in [torch.float16, torch.bfloat16, torch.float32, torch.float64]:\n x = torch.tensor(example, device=device, dtype=dtype)\n self.assertEqual(x.mean().item(), 16.0 / 6)\n self.assertEqual(x.mean(0), torch.tensor([2.0, 2.5, 7.0 / 2], dtype=dtype))\n self.assertEqual(x.mean(1), torch.tensor([2.0 / 3, 14.0 / 3], dtype=dtype))\n self.assertEqual(x.mean(), x.mean((0, 1)))\n\n prod_dtype = {\n torch.bfloat16: torch.bfloat16,\n torch.double: torch.double,\n torch.float: torch.float,\n torch.float16: torch.float16,\n torch.int64: torch.int64,\n torch.int32: torch.int64,\n torch.int16: torch.int64,\n torch.int8: torch.int64,\n }\n\n # prod is not supported for float16 & bfloat16 on CPU\n if not (self.device_type == 'cpu' and dtype in [torch.float16, torch.bfloat16]):\n x = torch.tensor(example, device=device, dtype=dtype)\n self.assertEqual(x.prod().item(), -180)\n self.assertEqual(x.prod(0), torch.tensor([-5, 6, 6], dtype=prod_dtype[dtype]))\n self.assertEqual(x.prod(1), torch.tensor([-2, 90], dtype=prod_dtype[dtype]))\n\n x = torch.tensor(example, device=device, dtype=dtype)\n\n self.assertEqual(x.min().item(), -1)\n self.assertEqual(x.argmin().item(), 0)\n\n # TODO: torch.min does not support the same operation as argmin\n # for the same case, should we enable it?\n self.assertEqual(x.argmin(dim=None).item(), 0)\n\n self.assertEqual(x.min(0), (torch.tensor([-1, 2, 1], dtype=dtype),\n torch.tensor([0, 0, 0], dtype=torch.int64)))\n self.assertEqual(x.amin(0), torch.tensor([-1, 2, 1], dtype=dtype))\n self.assertEqual(x.argmin(0), torch.tensor([0, 0, 0], dtype=torch.int64))\n\n self.assertEqual(x.min(dim=0, keepdim=True), (torch.tensor([[-1, 2, 1]], dtype=dtype),\n torch.tensor([[0, 0, 0]], dtype=torch.int64)))\n self.assertEqual(x.amin(dim=0, keepdim=True), torch.tensor([[-1, 2, 1]], dtype=dtype))\n self.assertEqual(x.argmin(dim=0, keepdim=True), torch.tensor([[0, 0, 0]], dtype=torch.int64))\n\n self.assertEqual(x.min(1), (torch.tensor([-1, 3], dtype=dtype),\n torch.tensor([0, 1], dtype=torch.int64)))\n self.assertEqual(x.amin(1), torch.tensor([-1, 3], dtype=dtype))\n self.assertEqual(x.argmin(1), torch.tensor([0, 1], dtype=torch.int64))\n\n self.assertEqual(x.min(dim=1, keepdim=True), (torch.tensor([[-1], [3]], dtype=dtype),\n torch.tensor([[0], [1]], dtype=torch.int64)))\n self.assertEqual(x.amin(dim=1, keepdim=True), torch.tensor([[-1], [3]], dtype=dtype))\n self.assertEqual(x.argmin(dim=1, keepdim=True), torch.tensor([[0], [1]], dtype=torch.int64))\n\n # test that non-contiguous tensors work\n self.assertEqual(x[:, :2].min().item(), -1)\n self.assertEqual(x[:, :2].amin().item(), -1)\n self.assertEqual(x[:, :2].argmin().item(), 0)\n\n x = torch.tensor(example, device=device, dtype=dtype)\n\n self.assertEqual(x.max().item(), 6)\n self.assertEqual(x.amax().item(), 6)\n self.assertEqual(x.argmax().item(), 5)\n\n self.assertEqual(x.max(0), (torch.tensor([5, 3, 6], dtype=dtype),\n torch.tensor([1, 1, 1], dtype=torch.int64)))\n self.assertEqual(x.amax(0), torch.tensor([5, 3, 6], dtype=dtype))\n self.assertEqual(x.argmax(dim=0), torch.tensor([1, 1, 1], dtype=torch.int64))\n\n self.assertEqual(x.max(dim=0, keepdim=True), (torch.tensor([[5, 3, 6]], dtype=dtype),\n torch.tensor([[1, 1, 1]], dtype=torch.int64)))\n self.assertEqual(x.amax(dim=0, keepdim=True), torch.tensor([[5, 3, 6]], dtype=dtype))\n self.assertEqual(x.argmax(dim=0, keepdim=True), torch.tensor([[1, 1, 1]], dtype=torch.int64))\n\n self.assertEqual(x.max(1), (torch.tensor([2, 6], dtype=dtype),\n torch.tensor([1, 2], dtype=torch.int64)))\n self.assertEqual(x.amax(1), torch.tensor([2, 6], dtype=dtype))\n self.assertEqual(x.argmax(dim=1), torch.tensor([1, 2], dtype=torch.int64))\n\n self.assertEqual(x.max(1, keepdim=True), (torch.tensor([[2], [6]], dtype=dtype),\n torch.tensor([[1], [2]], dtype=torch.int64)))\n self.assertEqual(x.amax(1, keepdim=True), torch.tensor([[2], [6]], dtype=dtype))\n self.assertEqual(x.argmax(dim=1, keepdim=True), torch.tensor([[1], [2]], dtype=torch.int64))\n\n # test that non-contiguous tensors work\n self.assertEqual(x[:, :2].max().item(), 5)\n self.assertEqual(x[:, :2].amax().item(), 5)\n self.assertEqual(x[:, :2].argmax().item(), 2)\n\n dim_red_fns = [\n \"mean\", \"median\", \"nanmedian\", \"mode\", \"norm\", \"prod\",\n \"std\", \"sum\", \"var\", \"max\", \"min\", \"amax\", \"amin\"]\n\n def normfn_attr(t, dim, keepdim=False, out=None):\n attr = torch.norm\n return attr(t, 2, dim, keepdim, out=out)\n\n for fn_name in dim_red_fns:\n fn_attr = getattr(torch, fn_name) if fn_name != \"norm\" else normfn_attr\n\n def fn(x, dim, keepdim=False, out=None):\n ans = fn_attr(x, dim, keepdim=keepdim, out=out)\n return ans if not isinstance(ans, tuple) else ans[0]\n\n def fn_tuple(x, dim, keepdim=False, out=None):\n return fn_attr(x, dim, keepdim=keepdim, out=out)\n\n def test_multidim(x, dim):\n self.assertEqual(fn(x, dim).unsqueeze(dim), fn(x, dim, keepdim=True))\n self.assertEqual(x.ndimension() - 1, fn(x, dim).ndimension())\n self.assertEqual(x.ndimension(), fn(x, dim, keepdim=True).ndimension())\n\n # general case\n x = torch.randn(3, 4, 5, device=device)\n dim = random.randint(0, 2)\n test_multidim(x, dim)\n\n # check 1-d behavior\n x = torch.randn(1, device=device)\n dim = 0\n self.assertEqual(fn(x, dim).shape, ())\n self.assertEqual(fn(x, dim, keepdim=True).shape, (1,))\n\n # check reducing of a singleton dimension\n dims = [3, 4, 5]\n singleton_dim = random.randint(0, 2)\n dims[singleton_dim] = 1\n x = torch.randn(dims, device=device)\n test_multidim(x, singleton_dim)\n\n # check reducing with output kwargs\n if fn_name in ['median', 'nanmedian', 'mode', 'max', 'min']:\n y = torch.randn(5, 3, device=device)\n values = torch.randn(5, 3, device=device)\n indices = torch.zeros(5, 3, device=device).long() - 1\n fn_tuple(y, 1, keepdim=False, out=(values[:, 1], indices[:, 1]))\n values_expected, indices_expected = fn_tuple(y, 1, keepdim=False)\n self.assertEqual(values[:, 1], values_expected,\n msg='{} values with out= kwarg'.format(fn_name))\n self.assertEqual(indices[:, 1], indices_expected,\n msg='{} indices with out= kwarg'.format(fn_name))\n continue\n\n x = torch.randn(5, 3, device=device)\n y = torch.randn(5, 3, device=device)\n fn(y, 1, keepdim=False, out=x[:, 1])\n expected = fn(y, 1, keepdim=False)\n self.assertEqual(x[:, 1], expected, msg='{} with out= kwarg'.format(fn_name))\n\n @onlyCUDA\n @largeTensorTest('10GB')\n def test_reduction_split(self, device):\n # Test reduction when there is a 32bit-indexing split\n # https://github.com/pytorch/pytorch/issues/37583\n input_ = torch.randn(5, 14400, 14400, device=device)\n result = input_.sum(dim=0)\n expect = input_[0] + input_[1] + input_[2] + input_[3] + input_[4]\n self.assertEqual(result, expect)\n\n @onlyCUDA\n @dtypes(torch.half, torch.float, torch.double, torch.bfloat16)\n def test_reduction_vectorize_along_input_corner(self, device, dtype):\n # 1D case: sum\n size = 1024 * 1024 * 64 + 3\n shift = 1\n x = torch.zeros(size, dtype=dtype, device=device)\n y = x[shift:]\n for i in range(100):\n x.zero_()\n x[i] = 1\n self.assertEqual(x.sum(), 1.0)\n if i < shift:\n self.assertEqual(y.sum(), 0.0)\n else:\n self.assertEqual(y.sum(), 1.0)\n for i in range(1, 100):\n x.zero_()\n x[-i] = 1\n self.assertEqual(x.sum(), 1.0)\n self.assertEqual(y.sum(), 1.0)\n # 1D case: argmax\n size = 1024 * 1024 * 64 + 3\n shift = 1\n ysize = size - shift\n x = torch.zeros(size, dtype=dtype, device=device)\n y = x[shift:]\n for i in range(100):\n x.zero_()\n x[i] = 1\n self.assertEqual(x.argmax().item(), i)\n if i >= shift:\n self.assertEqual(y.argmax().item(), i - shift)\n for i in range(1, 100):\n x.zero_()\n x[-i] = 1\n self.assertEqual(x.argmax().item(), size - i)\n self.assertEqual(y.argmax().item(), ysize - i)\n # 2D case: sum\n size = (7, 1024 * 1024 + 3)\n x = torch.zeros(size, dtype=dtype, device=device)\n for i in range(100):\n x.zero_()\n for j in range(7):\n x[j][i] = j\n xs = x.sum(dim=-1)\n for j in range(7):\n self.assertEqual(xs[j].item(), float(j))\n for i in range(100):\n x.zero_()\n for j in range(7):\n x[j][-i] = j\n xs = x.sum(dim=-1)\n for j in range(7):\n self.assertEqual(xs[j].item(), float(j))\n # 2D case: max/argmax\n size = (7, 1024 * 1024 + 3)\n x = torch.zeros(size, dtype=dtype, device=device)\n for i in range(100):\n x.zero_()\n for j in range(7):\n x[j][i] = j + 1\n xs1 = x.argmax(dim=-1)\n xs2 = x.max(dim=-1).indices\n for j in range(7):\n self.assertEqual(xs1[j].item(), i)\n self.assertEqual(xs2[j].item(), i)\n for i in range(1, 100):\n x.zero_()\n for j in range(7):\n x[j][-i] = j + 1\n xs1 = x.argmax(dim=-1)\n xs2 = x.max(dim=-1).indices\n for j in range(7):\n self.assertEqual(xs1[j].item(), size[1] - i)\n self.assertEqual(xs2[j].item(), size[1] - i)\n # 2D case: min/argmin\n size = (7, 1024 * 1024 + 3)\n x = torch.zeros(size, dtype=dtype, device=device)\n for i in range(100):\n x.zero_()\n for j in range(7):\n x[j][i] = -(j + 1)\n xs1 = x.argmin(dim=-1)\n xs2 = x.min(dim=-1).indices\n for j in range(7):\n self.assertEqual(xs1[j].item(), i)\n self.assertEqual(xs2[j].item(), i)\n for i in range(1, 100):\n x.zero_()\n for j in range(7):\n x[j][-i] = -(j + 1)\n xs1 = x.argmin(dim=-1)\n xs2 = x.min(dim=-1).indices\n for j in range(7):\n self.assertEqual(xs1[j].item(), size[1] - i)\n self.assertEqual(xs2[j].item(), size[1] - i)\n\n @onlyCUDA\n @dtypes(torch.half, torch.float, torch.double, torch.bfloat16)\n def test_reduction_vectorize_along_output(self, device, dtype):\n def run_test(input_):\n M, N = input_.shape\n input_.zero_()\n for i in range(min(M, N)):\n input_[i][i] = 1\n output1 = input_.argmax(dim=0)\n output2 = input_.sum(dim=0)\n for i in range(min(M, N)):\n self.assertEqual(output1[i], i)\n self.assertEqual(output2[i], 1)\n # vec 4\n run_test(torch.zeros(64, 64, dtype=dtype, device=device))\n # vec 2\n run_test(torch.zeros(64 * 64 + 2, dtype=dtype, device=device)[2:].view(64, 64))\n run_test(torch.zeros(64, 62, dtype=dtype, device=device))\n run_test(torch.zeros(64, 2, dtype=dtype, device=device))\n # vec 1\n run_test(torch.zeros(64 * 64 + 1, dtype=dtype, device=device)[1:].view(64, 64))\n run_test(torch.zeros(64, 61, dtype=dtype, device=device))\n run_test(torch.zeros(64, 1, dtype=dtype, device=device))\n\n @onlyCUDA\n def test_argminmax_large_axis(self, device):\n # Regression test for gh-32863\n x = torch.zeros(2**31, device=device, dtype=torch.int8)\n x[-1] = 1\n self.assertEqual(x.argmax(0), x.shape[0] - 1)\n self.assertEqual(x.max(0).indices, x.shape[0] - 1)\n x[-1] = -1\n self.assertEqual(x.argmin(0), x.shape[0] - 1)\n self.assertEqual(x.min(0).indices, x.shape[0] - 1)\n\n def test_argminmax_axis_with_dim_one(self, device):\n # See: https://github.com/pytorch/pytorch/issues/38922\n n = 32768\n x = torch.zeros(1, n)\n self.assertEqual(x.argmax(dim=0), torch.zeros(n, dtype=torch.int64))\n self.assertEqual(x.argmin(dim=0), torch.zeros(n, dtype=torch.int64))\n\n self.assertEqual(x.argmax(dim=-2), torch.zeros(n, dtype=torch.int64))\n self.assertEqual(x.argmin(dim=-2), torch.zeros(n, dtype=torch.int64))\n\n self.assertEqual(x.argmax(dim=0, keepdim=True), torch.zeros(1, n, dtype=torch.int64))\n self.assertEqual(x.argmin(dim=0, keepdim=True), torch.zeros(1, n, dtype=torch.int64))\n\n self.assertEqual(x.argmax(dim=-2, keepdim=True), torch.zeros(1, n, dtype=torch.int64))\n self.assertEqual(x.argmin(dim=-2, keepdim=True), torch.zeros(1, n, dtype=torch.int64))\n\n @dtypes(torch.int, torch.long, torch.float, torch.double)\n @dtypesIfCUDA(torch.int, torch.long, torch.half, torch.float, torch.double)\n def test_median_real_values(self, device, dtype):\n # Generate random 0-3D sizes\n sizes = [random.sample(range(1, 32), i) for i in range(4) for _ in range(2)]\n for size in sizes:\n # Create random input tensor\n t = torch.randn(size, device=device).type(dtype)\n t_numpy = t.cpu().numpy()\n res = t.median()\n self.assertEqual(res, t.nanmedian())\n k = int((t.numel() - 1) / 2)\n self.assertEqual(res, t.view(-1).sort()[0][k])\n if t.numel() % 2 == 1:\n # We can only test agains numpy for odd reductions because numpy\n # returns the mean of the two medians and torch returns the lower\n self.assertEqual(res.cpu().numpy(), np.median(t_numpy))\n for dim in range(t.ndim):\n res = t.median(dim, True)\n self.assertEqual(res, t.nanmedian(dim, True))\n size = t.size(dim) if t.ndim > 0 else 1\n k = int((size - 1) / 2)\n self.assertEqual(res[0], (t.sort(dim)[0]).select(dim, k).unsqueeze_(dim))\n self.assertEqual(res[0], t.gather(dim, res[1]))\n if size % 2 == 1:\n # We can only test agains numpy for odd reductions because numpy\n # returns the mean of the two medians and torch returns the lower\n self.assertEqual(res[0].cpu().numpy(), np.median(t_numpy, dim, keepdims=True), exact_dtype=False)\n\n @dtypes(torch.float, torch.double)\n @dtypesIfCUDA(torch.half, torch.float, torch.double)\n def test_median_nan_values(self, device, dtype):\n # Generate random 0-3D sizes\n sizes = [random.sample(range(1, 32), i) for i in range(4) for _ in range(2)]\n for size in sizes:\n # Create random input tensor with nan values\n t = torch.rand(size, device=device, dtype=dtype)\n t.masked_fill_(t < 0.1, float('nan'))\n t_numpy = t.cpu().numpy()\n for op in [torch.median, torch.nanmedian]:\n numpy_op = np.median if op == torch.median else np.nanmedian\n res = op(t)\n num_nan = t.isnan().sum()\n if op == torch.median and num_nan > 0:\n k = t.numel() - 1\n else:\n k = int((t.numel() - num_nan - 1) / 2)\n self.assertEqual(res, t.view(-1).sort()[0][k])\n if (t.numel() - num_nan) % 2 == 1:\n # We can only test agains numpy for odd reductions because numpy\n # returns the mean of the two medians and torch returns the lower\n self.assertEqual(res.item(), numpy_op(t.cpu().numpy()))\n for dim in range(t.ndim):\n res = op(t, dim, True)\n size = t.size(dim) if t.ndim > 0 else 1\n num_nan = t.isnan().sum(dim, True)\n if op == torch.median:\n k = torch.where(num_nan > 0, size - 1, int((size - 1) / 2))\n else:\n k = ((size - num_nan - 1) / 2).type(torch.long)\n self.assertEqual(res[0], (t.sort(dim)[0]).gather(dim, k))\n self.assertEqual(res[0], t.gather(dim, res[1]))\n # We can only test agains numpy for odd reductions because numpy\n # returns the mean of the two medians and torch returns the lower\n mask = (size - num_nan) % 2 == 1\n res = res[0].masked_select(mask).cpu()\n ref = numpy_op(t_numpy, dim, keepdims=True)[mask.cpu().numpy()]\n self.assertEqual(res, torch.from_numpy(ref))\n\n def test_median_corner_cases(self, device):\n def check(op, a, args, key):\n t = torch.tensor(a, device=device)\n res = op(t, *args)\n if not args:\n key = torch.tensor(key, device=device)\n else:\n if len(key) == 1:\n key = torch.tensor(key[0], device=device)\n res = res[0]\n else:\n key = (torch.tensor(key[0], device=device), torch.tensor(key[1], device=device))\n self.assertEqual(res, key)\n\n nan = float('nan')\n check(torch.median, nan, [], nan)\n check(torch.median, [], [], nan)\n check(torch.nanmedian, nan, [], nan)\n check(torch.median, nan, [0], [nan, 0])\n check(torch.nanmedian, nan, [0], [nan, 0])\n check(torch.median, [nan], [0, True], [[nan], [0]])\n check(torch.nanmedian, [nan], [0, True], [[nan], [0]])\n check(torch.median, [nan], [0, True], [[nan], [0]])\n check(torch.nanmedian, [nan], [0, True], [[nan], [0]])\n\n # Indices are not deterministic here so can only check values\n check(torch.median, [[nan, nan], [1, 2]], [0], [[nan, nan]])\n check(torch.nanmedian, [[nan, nan], [1, 2]], [0], [[1, 2.]])\n check(torch.median, [[nan, nan], [1, 2]], [1], [[nan, 1]])\n check(torch.nanmedian, [[nan, nan], [1, 2]], [1], [[nan, 1.]])\n\n # Discontiguous and strided tensors\n a = torch.arange(12, device=device)\n self.assertEqual(a[::2].median(), torch.tensor(4, device=device))\n self.assertEqual(a[::2].nanmedian(), torch.tensor(4, device=device))\n\n a.resize_(3, 4)\n self.assertEqual(a.T.median(), torch.tensor(5, device=device))\n self.assertEqual(a.T.nanmedian(), torch.tensor(5, device=device))\n self.assertEqual(a[::2, ::2].median(-1)[0], torch.tensor([0, 8], device=device))\n self.assertEqual(a[::2, ::2].nanmedian(-1)[0], torch.tensor([0, 8], device=device))\n\n a.resize_(2, 3, 2)\n self.assertEqual(a.T.median(), torch.tensor(5, device=device))\n self.assertEqual(a.T.nanmedian(), torch.tensor(5, device=device))\n self.assertEqual(a[:, ::2, :].median(-1)[0], torch.tensor([[0, 4], [6, 10]], device=device))\n self.assertEqual(a[:, ::2, :].nanmedian(-1)[0], torch.tensor([[0, 4], [6, 10]], device=device))\n\n\n @onlyNativeDeviceTypes\n @dtypes(torch.float, torch.double)\n def test_quantile(self, device, dtype):\n # Generate some random test cases\n ops = ['quantile', 'nanquantile']\n inputs = [tuple(np.random.randint(2, 10, size=i)) for i in range(1, 4)]\n quantiles = [tuple(np.random.rand(i)) for i in range(0, 5)]\n keepdims = [True, False]\n\n # Add corner cases\n inputs.extend([0.75, (1,), (1, 1), (1, 2, 1)])\n inputs.extend([[float('nan')], [[float('nan'), float('nan')], [1, 2]]])\n inputs.extend([[[float('nan'), float('nan')], [float('nan'), 2]]])\n quantiles.extend([0.5, [0., 1.], np.random.rand(10)])\n\n # Enumerate all input combinations\n for op, x, q, keepdim in product(ops, inputs, quantiles, keepdims):\n if type(x) is tuple:\n a = torch.randn(x, dtype=dtype, device=device)\n # Make some random elements NaN\n a.masked_fill_(torch.randint_like(a, 20) == 0, float('nan'))\n else:\n a = torch.tensor(x, dtype=dtype, device=device)\n\n q = torch.tensor(q, dtype=dtype, device=device)\n\n torch_op = getattr(torch, op)\n numpy_op = getattr(np, op)\n\n # Compute quantile along every dimension and flattened tensor\n interpolations = ('linear', 'lower', 'higher', 'midpoint', 'nearest')\n for interpolation, dim in product(interpolations,\n [None] + list(range(a.ndim))):\n result = torch_op(a, q, dim=dim, keepdim=keepdim, interpolation=interpolation)\n expected = numpy_op(a.cpu().numpy(), q.cpu().numpy(), dim,\n interpolation=interpolation, keepdims=keepdim)\n self.assertEqual(result.cpu(), torch.from_numpy(np.array(expected)).type(result.type()))\n\n # Test out variation\n out = torch.empty_like(result)\n torch_op(a, q, dim=dim, keepdim=keepdim, interpolation=interpolation, out=out)\n self.assertEqual(out.cpu(), result.cpu())\n\n def test_quantile_backward(self, device):\n def check(a, q, dim, expected_grad, ops=(torch.quantile, torch.nanquantile)):\n for op in ops:\n t = torch.tensor(a, device=device, requires_grad=True)\n op(t, torch.tensor(q, device=device), dim).sum().backward()\n self.assertEqual(t.grad, expected_grad)\n\n check([1., 2, 3], 0.5, 0, [0, 1, 0])\n check([1., 2, 3, 4], 0.5, 0, [0, 0.5, 0.5, 0])\n check([3., 1, 4, 2], 0.5, 0, [0.5, 0, 0, 0.5])\n check([1., 2, 3, 4], [0.25, 0.5, 0.75], 0, [0.25, 1.25, 1.25, 0.25])\n check([[1., 2], [2, 1]], 0., 0, [[1, 0], [0, 1]])\n check([[1., 2], [4, 3]], 1., 1, [[0, 1], [1, 0]])\n check([1, float('nan'), 2], 0.5, 0, [0, 1, 0], [torch.quantile])\n check([1, float('nan'), 2], 0.5, 0, [0.5, 0, 0.5], [torch.nanquantile])\n\n def test_quantile_error(self, device):\n def check(a, q, args, kwargs, message):\n with self.assertRaisesRegex(RuntimeError, r'quantile\\(\\) ' + message):\n at = torch.tensor(a, device=device)\n qt = torch.tensor(q, device=device) if isinstance(q, list) else q\n torch.quantile(at, qt, *args, **kwargs)\n\n check([], 0.5, [], {}, r'input tensor must be non-empty')\n check([1.], [[1.]], [], {}, r'q must be a scalar or 1D tensor')\n check([1], 0.5, [], {}, r'input tensor must be either float or double dtype')\n check([1.], [1], [], {}, r'q tensor must be same dtype as the input tensor')\n check([1.], -1., [], {}, r'q must be in the range \\[0, 1\\] but got -1')\n check([1.], 1.1, [], {}, r'q must be in the range \\[0, 1\\] but got 1.1')\n check([1.], 0.5, [], {'out': torch.empty([], dtype=torch.int32, device=device)},\n r'out tensor must be same dtype as the input tensor')\n check([1.], [1.], [None, False], {'interpolation': 'random_mode'},\n r\"interpolation must be one of linear, lower, higher, midpoint or nearest, but got random_mode\")\n\n if self.device_type == \"cpu\":\n check([1.], [0.5, 1.1, -1], [], {}, r'q values must be in the range \\[0, 1\\]')\n\n if self.device_type == \"cuda\":\n with self.assertRaisesRegex(\n RuntimeError, r'quantile\\(\\) q tensor must be on the same device as the input tensor'):\n torch.randn(1, device=device).quantile(torch.tensor(0.5))\n with self.assertRaisesRegex(\n RuntimeError, r'quantile\\(\\) out tensor must be on the same device as the input tensor'):\n torch.quantile(torch.randn(1, device=device), 0.5, out=torch.scalar_tensor(1))\n\n def test_std_mean(self, device):\n x = torch.rand(100, 50, 20, device=device)\n for dim in range(x.dim()):\n for unbiased in [False, True]:\n for keepdim in [False, True]:\n std1, mean1 = torch.std_mean(x, dim=dim, unbiased=unbiased, keepdim=keepdim)\n std2 = x.std(dim=dim, unbiased=unbiased, keepdim=keepdim)\n mean2 = x.mean(dim=dim, keepdim=keepdim)\n self.assertEqual(std1, std2)\n self.assertEqual(mean1, mean2)\n\n def test_std_mean_all_dims(self, device):\n x = torch.rand(100, 50, 20, device=device)\n for unbiased in [False, True]:\n std1, mean1 = torch.std_mean(x, unbiased=unbiased)\n std2 = x.std(unbiased=unbiased)\n mean2 = x.mean()\n self.assertEqual(std1, std2)\n self.assertEqual(mean1, mean2)\n\n def test_var_mean(self, device):\n x = torch.rand(100, 300, 50, device=device)\n for dim in range(x.dim()):\n for unbiased in [False, True]:\n for keepdim in [False, True]:\n var1, mean1 = torch.var_mean(x, dim=dim, unbiased=unbiased, keepdim=keepdim)\n var2 = x.var(dim=dim, unbiased=unbiased, keepdim=keepdim)\n mean2 = x.mean(dim=dim, keepdim=keepdim)\n self.assertEqual(var1, var2)\n self.assertEqual(mean1, mean2)\n\n def test_var_mean_all_dims(self, device):\n x = torch.rand(100, 50, 20, device=device)\n for unbiased in [False, True]:\n var1, mean1 = torch.var_mean(x, unbiased=unbiased)\n var2 = x.var(unbiased=unbiased)\n mean2 = x.mean()\n self.assertEqual(var1, var2)\n self.assertEqual(mean1, mean2)\n\n def test_std_mean_some_dims(self, device):\n sizes = (4, 6, 7, 5, 3)\n dims = len(sizes)\n x = torch.rand(sizes, device=device)\n for num_of_dims in range(2, dims):\n dim_list = list(combinations(list(range(dims)), r=num_of_dims))\n for dim in dim_list:\n for unbiased in [False, True]:\n for keepdim in [False, True]:\n std1, mean1 = torch.std_mean(x, dim=dim, unbiased=unbiased, keepdim=keepdim)\n std2 = x.std(dim=dim, unbiased=unbiased, keepdim=keepdim)\n mean2 = x.mean(dim=dim, keepdim=keepdim)\n self.assertEqual(std1, std2)\n self.assertEqual(mean1, mean2)\n\n def _compare_std_var_with_numpy(self, op, device, dtype, input, dim,\n keepdim, unbiased, use_out):\n a = input.cpu().numpy() if input.dtype is not torch.bfloat16 else input.float().cpu().numpy()\n numpy_kwargs = {\n 'axis' : dim,\n 'keepdims' : keepdim,\n 'ddof' : 1 if unbiased else 0,\n }\n\n if dim is None:\n del numpy_kwargs['axis']\n del numpy_kwargs['keepdims']\n\n if op == 'var':\n torch_op = torch.var\n numpy_op = np.var\n elif op == 'std':\n torch_op = torch.std\n numpy_op = np.std\n else:\n self.fail(\"Unknown op!\")\n\n numpy_result = numpy_op(a, **numpy_kwargs)\n\n if dim is None and use_out is False:\n torch_result = torch_op(input, unbiased)\n elif dim is not None and use_out is False:\n torch_result = torch_op(input, dim, unbiased, keepdim)\n elif dim is not None and use_out is True:\n out = torch.empty(0, device=device, dtype=dtype)\n torch_result = torch_op(input, dim, unbiased, keepdim, out=out)\n else:\n out = torch.empty(0, device=device, dtype=dtype)\n try:\n torch_result = torch_op(input, dim, unbiased, keepdim, out=out)\n except RuntimeError:\n return\n self.fail(\"Failed to hit RuntimeError!\")\n\n exact_dtype = input.dtype not in (torch.bfloat16, torch.complex32, torch.complex64, torch.complex128)\n self.assertEqual(torch_result, numpy_result, exact_dtype=exact_dtype)\n\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_var_vs_numpy(self, device, dtype):\n _size = (20, 20)\n\n for test_case in product((torch.randn(_size, device=device, dtype=dtype),),\n (None, 0, 1),\n (False, True),\n (False, True),\n (False, True),):\n self._compare_std_var_with_numpy('var', device, dtype, *test_case)\n\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_std_vs_numpy(self, device, dtype):\n _size = (20, 20)\n\n for test_case in product((torch.randn(_size, device=device, dtype=dtype),),\n (None, 0, 1),\n (False, True),\n (False, True),\n (False, True),):\n self._compare_std_var_with_numpy('std', device, dtype, *test_case)\n\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_var_correction_vs_numpy(self, device, dtype):\n _size = (20, 20)\n test_args = [\n *product(\n # dim\n (None, 0, 1),\n # correction\n (None, 0, 10, 30),\n # keepdim\n (False, True),\n ),\n [None, -100, True], # Negative correction\n ]\n\n tensor = make_tensor(_size, device=device, dtype=dtype)\n array = tensor.cpu().numpy()\n\n for dim, correction, keepdim in test_args:\n numpy_kwargs = dict(axis=dim, ddof=correction, keepdims=keepdim)\n if correction is None:\n # NumPy default is not compatible with torch.std (gh-50010)\n numpy_kwargs['ddof'] = 1\n\n numpy_res = np.asarray(np.var(array, **numpy_kwargs))\n torch_res = torch.var(tensor, dim=dim, correction=correction, keepdim=keepdim)\n\n # inf vs. nan results are sensitive to machine precision,\n # just treat them as equivalent\n numpy_res[np.isinf(numpy_res)] = np.nan\n torch_res[torch_res.isinf()] = np.nan\n\n self.assertEqual(torch_res, numpy_res)\n\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_std_correction_vs_numpy(self, device, dtype):\n _size = (20, 20)\n test_args = [\n *product(\n # dim\n (None, 0, 1),\n # correction\n (None, 0, 10, 30),\n # keepdim\n (False, True),\n ),\n [None, -100, True], # Negative correction\n ]\n\n tensor = make_tensor(_size, device=device, dtype=dtype)\n array = tensor.cpu().numpy()\n\n for dim, correction, keepdim in test_args:\n numpy_kwargs = dict(axis=dim, ddof=correction, keepdims=keepdim)\n if correction is None:\n # NumPy default is incompatible with torch.std (gh-50010)\n numpy_kwargs['ddof'] = 1\n\n numpy_res = np.asarray(np.std(array, **numpy_kwargs))\n torch_res = torch.std(tensor, dim=dim, correction=correction, keepdim=keepdim)\n\n # inf vs. nan results are sensitive to machine precision,\n # just treat them as equivalent\n numpy_res[np.isinf(numpy_res)] = np.nan\n torch_res[torch_res.isinf()] = np.nan\n\n self.assertEqual(torch_res, numpy_res)\n\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_std_mean_correction(self, device, dtype):\n _size = (20, 20)\n test_args = [\n *product(\n # dim\n (None, 0, 1),\n # correction\n (None, 0, 10, 30),\n # keepdim\n (False, True),\n ),\n [None, -100, True], # Negative correction\n ]\n\n tensor = make_tensor(_size, device=device, dtype=dtype)\n\n for dim, correction, keepdim in test_args:\n kwargs = dict(dim=dim, correction=correction, keepdim=keepdim)\n std1 = torch.std(tensor, **kwargs)\n if dim is not None:\n mean1 = torch.mean(tensor, dim=dim, keepdim=keepdim)\n else:\n mean1 = torch.mean(tensor)\n if keepdim:\n mean1 = mean1.reshape((1,) * tensor.ndim)\n std2, mean2 = torch.std_mean(tensor, **kwargs)\n\n self.assertEqual(std1, std2)\n self.assertEqual(mean1, mean2)\n\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_var_mean_correction(self, device, dtype):\n _size = (20, 20)\n test_args = [\n *product(\n # dim\n (None, 0, 1),\n # correction\n (None, 0, 10, 30),\n # keepdim\n (False, True),\n ),\n [None, -100, True], # Negative correction\n ]\n\n tensor = make_tensor(_size, device=device, dtype=dtype)\n\n for dim, correction, keepdim in test_args:\n kwargs = dict(dim=dim, correction=correction, keepdim=keepdim)\n var1 = torch.var(tensor, **kwargs)\n if dim is not None:\n mean1 = torch.mean(tensor, dim=dim, keepdim=keepdim)\n else:\n mean1 = torch.mean(tensor)\n if keepdim:\n mean1 = mean1.reshape((1,) * tensor.ndim)\n var2, mean2 = torch.var_mean(tensor, **kwargs)\n\n self.assertEqual(var1, var2)\n self.assertEqual(mean1, mean2)\n\n def test_amin_amax_some_dims(self, device):\n sizes = (4, 6, 7, 5, 3)\n dims = len(sizes)\n x = torch.rand(sizes, device=device)\n for num_of_dims in range(2, dims):\n dim_list = list(combinations(list(range(dims)), r=num_of_dims))\n for dim in dim_list:\n for keepdim in [False, True]:\n amin1 = torch.amin(x, dim=dim, keepdim=keepdim)\n amax1 = torch.amax(x, dim=dim, keepdim=keepdim)\n amin2 = x\n amax2 = x\n for i, d in enumerate(dim):\n if not keepdim:\n d -= i\n amin2 = torch.amin(amin2, dim=d, keepdim=keepdim)\n amax2 = torch.amax(amax2, dim=d, keepdim=keepdim)\n self.assertEqual(amin1, amin2)\n self.assertEqual(amax1, amax2)\n\n def test_histc(self, device):\n # negative nbins throws\n with self.assertRaisesRegex(RuntimeError, 'bins must be > 0'):\n torch.histc(torch.tensor([1], dtype=torch.float, device=device), bins=-1)\n # empty tensor\n actual = torch.histc(torch.tensor([], device=device), min=0, max=3)\n expected = torch.zeros(100, dtype=torch.float, device=device)\n self.assertEqual(expected, actual)\n\n # without nbins\n actual = torch.histc(\n torch.tensor([2, 5], dtype=torch.float, device=device))\n expected = torch.zeros(100, dtype=torch.float, device=device)\n expected[0] = 1\n expected[99] = 1\n self.assertEqual(expected, actual)\n # tensor with the same element\n actual = torch.histc(torch.ones(5, dtype=torch.float, device=device), bins=5)\n self.assertEqual(\n torch.tensor([0, 0, 5, 0, 0], dtype=torch.float, device=device),\n actual)\n # no element falls between [min, max]\n actual = torch.histc(\n torch.ones(5, dtype=torch.float, device=device), bins=5, min=2, max=3)\n self.assertEqual(\n torch.tensor([0, 0, 0, 0, 0], dtype=torch.float, device=device),\n actual)\n # element falls below min + integral bin size and\n actual = torch.histc(\n torch.tensor([2, 4, 2, 2, 5, 4], dtype=torch.float, device=device),\n bins=5, min=1, max=5)\n self.assertEqual(\n torch.tensor([0, 3, 0, 2, 1], dtype=torch.float, device=device),\n actual)\n # non-integral bin size\n actual = torch.histc(\n torch.tensor([1, 2, 1], dtype=torch.float, device=device),\n bins=4, min=0, max=3)\n self.assertEqual(\n torch.tensor([0, 2, 1, 0], dtype=torch.float, device=device),\n actual)\n # double input\n actual = torch.histc(\n torch.tensor([1, 2, 1], dtype=torch.double, device=device), bins=4, min=0, max=3)\n self.assertEqual(\n torch.tensor([0, 2, 1, 0], dtype=torch.double, device=device),\n actual)\n self.assertEqual(actual.dtype, torch.double)\n # mixed input\n actual = torch.histc(\n torch.tensor([1., 2, 1], dtype=torch.float, device=device),\n bins=4, min=0, max=3)\n self.assertEqual(\n torch.tensor([0, 2, 1, 0], dtype=torch.float, device=device),\n actual)\n self.assertEqual(actual.dtype, torch.float)\n # scalar input and 1 bin -- should return a 1-dimensional tensor, not a scalar.\n actual = torch.histc(\n torch.tensor(0, dtype=torch.float, device=device),\n bins=1, min=0, max=3)\n self.assertEqual(\n torch.tensor([1], dtype=torch.float, device=device),\n actual)\n # tensors with inf; min, max not provided -- should throw a RuntimeError\n with self.assertRaisesRegex(RuntimeError, r'range of \\[inf, inf\\] is not finite'):\n torch.histc(torch.tensor([float(\"inf\")], dtype=torch.float, device=device))\n with self.assertRaisesRegex(RuntimeError, r'range of \\[1, inf\\] is not finite'):\n torch.histc(torch.tensor([1., 2., float(\"inf\")], dtype=torch.float, device=device))\n # tensors with inf; min, max provided\n self.assertEqual(\n torch.histc(torch.tensor([float(\"inf\")], dtype=torch.float, device=device),\n bins=1, min=0, max=3),\n torch.tensor([0], dtype=torch.float, device=device))\n self.assertEqual(\n torch.histc(torch.tensor([1., 2., float(\"inf\")], dtype=torch.float, device=device),\n bins=4, max=3),\n torch.tensor([0, 1, 1, 0], dtype=torch.float, device=device))\n # tensor with nan -- should throw a RuntimeError\n with self.assertRaisesRegex(RuntimeError, r'range of \\[nan, nan\\] is not finite'):\n torch.histc(torch.tensor([float(\"nan\")], dtype=torch.float, device=device))\n # tensors with min > max -- should throw a RuntimeError\n with self.assertRaisesRegex(RuntimeError, \"max must be larger than min\"):\n torch.histc(torch.tensor([1., 2., 3.], dtype=torch.float, device=device),\n bins=4, min=5, max=1)\n\n # test against numpy.histogram()\n def test_against_np(tensor, bins=100, min=0, max=0):\n if min == 0 and max == 0:\n min = tensor.min().item()\n max = tensor.max().item()\n nparr = tensor.cpu().numpy()\n actual = torch.histc(tensor, bins=bins, min=min, max=max)\n expected = torch.from_numpy(np.histogram(nparr, bins=bins, range=(min, max))[0])\n actual_cpu = actual.cpu()\n # NB: Numpy returns a int64 tensor, like normal people...\n self.assertEqual(actual, expected.to(actual_cpu))\n\n test_against_np(torch.tensor([1., 2, 1], device=device))\n test_against_np(torch.randn(5000, device=device))\n\n # Test bins arg\n test_against_np(torch.randn(301, device=device), bins=10)\n\n # Test truncated range\n test_against_np(torch.randn(201, device=device), min=0.1, max=1)\n\n noncontig = torch.randn(100, 3, device=device)[:, 2]\n test_against_np(noncontig)\n\n multidim = torch.randn(3, 5, 7, 2, device=device)\n test_against_np(multidim)\n\n expanded = torch.randn(1, 5, 1, 2, device=device).expand(3, 5, 7, 2)\n test_against_np(expanded)\n\n \"\"\"\n Runs torch.histogram and numpy.histogram on the specified input parameters\n and asserts that their output is equal.\n \"\"\"\n def _test_histogram_numpy(self, t, bins, bin_range, weights, density):\n def to_np(t):\n if not torch.is_tensor(t):\n return t\n else:\n return t.cpu().numpy()\n\n # Wrapper around numpy.histogram performing conversions between torch tensors and numpy arrays.\n def reference_histogram(self, t, bins, bin_range, weights, density, dtype):\n (np_t, np_bins, np_weights) = map(to_np, [t, bins, weights])\n (np_hist, np_bin_edges) = np.histogram(np_t, np_bins, range=bin_range, weights=np_weights, density=density)\n return (torch.from_numpy(np_hist).to(dtype), torch.from_numpy(np_bin_edges).to(dtype))\n\n # Doesn't pass a 'range' kwarg unless necessary because the override of histogram with Tensor bins doesn't accept one\n if bin_range:\n (actual_hist, actual_bin_edges) = torch.histogram(t, bins, range=bin_range, weight=weights, density=density)\n else:\n (actual_hist, actual_bin_edges) = torch.histogram(t, bins, weight=weights, density=density)\n\n (expected_hist, expected_bin_edges) = reference_histogram(self, t, bins, bin_range, weights, density, actual_hist.dtype)\n\n \"\"\"\n Works around linspace discrepancies by passing torch's constructed bin_edges to numpy.\n When bin edges are not explicitly defined, histogram uses the linspace operator internally\n to construct the sequence of bin edges. In some cases, torch.linspace output differs slightly\n from numpy.linspace output.\n Issue: https://github.com/pytorch/pytorch/issues/58758\n \"\"\"\n if not torch.is_tensor(bins):\n self.assertEqual(actual_bin_edges, expected_bin_edges, atol=1e-5, rtol=1e-5)\n # Calls numpy.histogram again, passing torch's actual_bin_edges as the bins argument\n (expected_hist, expected_bin_edges) = reference_histogram(\n self, t, actual_bin_edges, bin_range, weights, density, actual_hist.dtype)\n\n self.assertEqual(actual_hist, expected_hist)\n self.assertEqual(actual_bin_edges, expected_bin_edges)\n\n # Test passing non-contiguous output tensors\n hist_out = make_tensor(expected_hist.shape, device=expected_hist.device, dtype=expected_hist.dtype,\n noncontiguous=True)\n bin_edges_out = make_tensor(expected_bin_edges.shape, device=expected_bin_edges.device, dtype=expected_bin_edges.dtype,\n noncontiguous=True)\n\n # Doesn't pass a 'range' kwarg unless necessary because the override of histogram with Tensor bins doesn't accept one\n if bin_range:\n torch.histogram(t, bins, range=bin_range, weight=weights, density=density, out=(hist_out, bin_edges_out))\n else:\n torch.histogram(t, bins, weight=weights, density=density, out=(hist_out, bin_edges_out))\n\n self.assertEqual(hist_out, expected_hist)\n self.assertEqual(bin_edges_out, expected_bin_edges)\n\n @onlyCPU\n @dtypes(torch.float32)\n def test_histogram(self, device, dtype):\n shapes = (\n (),\n (0,),\n (1,),\n (1, 5),\n (3, 5),\n (1, 5, 1),\n (2, 3, 5))\n\n for contig, bins_contig, bin_ct, weighted, density, shape in \\\n product([True, False], [True, False], range(1, 10), [True, False], [True, False], shapes):\n values = make_tensor(shape, device, dtype, low=-9, high=9, noncontiguous=not contig)\n weights = make_tensor(shape, device, dtype, low=0, high=9, noncontiguous=not contig) if weighted else None\n\n # Tests passing just the bin_ct\n self._test_histogram_numpy(values, bin_ct, None, weights, density)\n\n # Tests with caller-specified histogram range\n bin_range = sorted((random.uniform(-9, 9), random.uniform(-9, 9)))\n self._test_histogram_numpy(values, bin_ct, bin_range, weights, density)\n\n # Tests with range min=max\n bin_range[1] = bin_range[0]\n self._test_histogram_numpy(values, bin_ct, bin_range, weights, density)\n\n # Tests with caller-specified bin edges\n bin_edges = make_tensor(bin_ct + 1, device, dtype, low=-9, high=9).msort()\n if not bins_contig:\n # Necessary because msort always produces contiguous output\n bin_edges_noncontig = make_tensor(bin_ct + 1, device, dtype, noncontiguous=not bins_contig)\n bin_edges_noncontig.copy_(bin_edges)\n bin_edges = bin_edges_noncontig\n self.assertEqual(bin_edges.is_contiguous(), bins_contig)\n self._test_histogram_numpy(values, bin_edges, None, weights, density)\n\n # Tests with input tensor in which all elements are equal\n elt = random.uniform(-9, 9)\n values = make_tensor(shape, device, dtype, low=elt, high=elt, noncontiguous=not contig)\n self._test_histogram_numpy(values, bin_ct, bin_range, weights, density)\n self._test_histogram_numpy(values, bin_edges, None, weights, density)\n\n # Tests with input equal to bin_edges\n weights = make_tensor(bin_ct + 1, device, dtype, low=0, high=9, noncontiguous=not contig) if weighted else None\n self._test_histogram_numpy(bin_edges, bin_edges, None, weights, density)\n\n # Tests values of default args\n for bin_ct, shape in product(range(1, 10), shapes):\n values = make_tensor(shape, device, dtype, low=-9, high=9)\n (actual_hist, actual_bin_edges) = torch.histogram(values, bin_ct)\n (expected_hist, expected_bin_edges) = torch.histogram(\n values, bin_ct, range=None, weight=None, density=False)\n self.assertEqual(actual_hist, expected_hist)\n self.assertEqual(actual_bin_edges, expected_bin_edges)\n\n \"\"\"\n Runs torch.histogramdd and numpy.histogramdd on the specified input parameters\n and asserts that their output is equal.\n \"\"\"\n def _test_histogramdd_numpy(self, t, bins, bin_range, weights, density):\n def to_np(t):\n if type(t) == list:\n return list(map(to_np, t))\n if not torch.is_tensor(t):\n return t\n return t.cpu().numpy()\n\n # Wrapper around numpy.histogram performing conversions between torch tensors and numpy arrays.\n def reference_histogramdd(t, bins, bin_range, weights, density, dtype):\n (np_t, np_bins, np_weights) = map(to_np, [t, bins, weights])\n\n # numpy.histogramdd accepts only (N, D) shapes\n D = np_t.shape[-1]\n N = np.prod(np_t.shape[:-1])\n reshaped_t = np.reshape(np_t, (N, D))\n reshaped_wt = np.reshape(np_weights, (N,)) if np_weights is not None else None\n\n # numpy.histogramdd throws an error for D=0\n if D == 0:\n return (torch.tensor(float('nan') if density else 0.), [])\n\n # numpy.histogramdd expects range to be specified as a sequence of D (lower, upper) tuples\n reshaped_range = None if not bin_range else [(bin_range[2 * i], bin_range[2 * i + 1]) for i in range(D)]\n\n (np_hist, np_bin_edges) = np.histogramdd(reshaped_t, np_bins,\n range=reshaped_range, weights=reshaped_wt, density=density)\n\n return (torch.from_numpy(np_hist).to(dtype), [torch.from_numpy(t).to(dtype) for t in np_bin_edges])\n\n (actual_hist, actual_bin_edges) = torch.histogramdd(t, bins, range=bin_range, weight=weights, density=density)\n (expected_hist, expected_bin_edges) = reference_histogramdd(t, bins, bin_range, weights, density, actual_hist.dtype)\n\n D = len(actual_bin_edges)\n self.assertEqual(D, len(expected_bin_edges))\n\n \"\"\"\n Works around linspace discrepancies by passing torch's constructed bin_edges to numpy.\n When bin edges are not explicitly defined, histogram uses the linspace operator internally\n to construct the sequence of bin edges. In some cases, torch.linspace output differs slightly\n from numpy.linspace output.\n Issue: https://github.com/pytorch/pytorch/issues/58758\n \"\"\"\n if not torch.is_tensor(bins):\n for dim in range(D):\n self.assertEqual(actual_bin_edges[dim], expected_bin_edges[dim], atol=1e-5, rtol=1e-5)\n # Calls numpy.histogram again, passing torch's actual_bin_edges as the bins argument\n (expected_hist, expected_bin_edges) = reference_histogramdd(\n t, actual_bin_edges, bin_range, weights, density, actual_hist.dtype)\n self.assertEqual(D, len(expected_bin_edges))\n\n self.assertEqual(actual_hist, expected_hist)\n for dim in range(D):\n self.assertEqual(actual_bin_edges[dim], expected_bin_edges[dim])\n\n @onlyCPU\n @dtypes(torch.float32)\n def test_histogramdd(self, device, dtype):\n shapes = (\n (1, 5),\n (3, 5),\n (1, 5, 1),\n (2, 3, 5),\n (7, 7, 7, 7),\n (16, 8, 4, 2),\n (10, 10, 10),\n (7, 0, 3),\n (5, 0),)\n\n for contig, bins_contig, weighted, density, shape in \\\n product([True, False], [True, False], [True, False], [True, False], shapes):\n D = shape[-1]\n\n values = make_tensor(shape, device, dtype, low=-9, high=9, noncontiguous=not contig)\n weights = make_tensor(shape[:-1], device, dtype, low=0, high=9, noncontiguous=not contig) if weighted else None\n\n # Tests passing a single bin count\n bin_ct = random.randint(1, 5)\n self._test_histogramdd_numpy(values, bin_ct, None, weights, density)\n\n # Tests passing a bin count for each dimension\n bin_ct = [random.randint(1, 5) for dim in range(D)]\n self._test_histogramdd_numpy(values, bin_ct, None, weights, density)\n\n # Tests with caller-specified histogram range\n bin_range_tuples = [sorted((random.uniform(-9, 9), random.uniform(-9, 9))) for dim in range(D)]\n bin_range = [elt for t in bin_range_tuples for elt in t]\n self._test_histogramdd_numpy(values, bin_ct, bin_range, weights, density)\n\n # Tests with range min=max\n for dim in range(D):\n bin_range[2 * dim + 1] = bin_range[2 * dim]\n self._test_histogramdd_numpy(values, bin_ct, bin_range, weights, density)\n\n # Tests with caller-specified bin edges\n bin_edges = [make_tensor(ct + 1, device, dtype, low=-9, high=9).msort() for ct in bin_ct]\n if not bins_contig:\n # Necessary because msort always produces contiguous output\n bin_edges_noncontig = [make_tensor(ct + 1, device, dtype, noncontiguous=not bins_contig) for ct in bin_ct]\n for dim in range(D):\n bin_edges_noncontig[dim].copy_(bin_edges[dim])\n bin_edges = bin_edges_noncontig\n for dim in range(D):\n self.assertEqual(bin_edges[dim].is_contiguous(), bins_contig)\n self._test_histogramdd_numpy(values, bin_edges, None, weights, density)\n\n @onlyCPU\n @dtypes(torch.float32)\n def test_histogram_error_handling(self, device, dtype):\n with self.assertRaisesRegex(RuntimeError, 'not implemented for'):\n values = make_tensor((), device, dtype=torch.int32)\n torch.histogram(values, 1)\n\n inconsistent_dtype = torch.float32 if dtype != torch.float32 else torch.float64\n\n with self.assertRaisesRegex(RuntimeError, 'input tensor and bins tensors should have the same dtype'):\n values = make_tensor((), device, dtype=dtype)\n bins = make_tensor((), device, dtype=inconsistent_dtype)\n torch.histogram(values, bins)\n\n with self.assertRaisesRegex(RuntimeError, 'input tensor and weight tensor should have the same dtype'):\n values = make_tensor((), device, dtype=dtype)\n weight = make_tensor((), device, dtype=inconsistent_dtype)\n torch.histogram(values, 1, weight=weight)\n\n with self.assertRaisesRegex(RuntimeError, 'input tensor and hist tensor should have the same dtype'):\n values = make_tensor((), device, dtype=dtype)\n hist = make_tensor((), device, dtype=inconsistent_dtype)\n bin_edges = make_tensor((), device, dtype=dtype)\n torch.histogram(values, 1, out=(hist, bin_edges))\n\n with self.assertRaisesRegex(RuntimeError, 'input tensor and bin_edges tensor should have the same dtype'):\n values = make_tensor((), device, dtype=dtype)\n hist = make_tensor((), device, dtype=dtype)\n bin_edges = make_tensor((), device, dtype=inconsistent_dtype)\n torch.histogram(values, 1, out=(hist, bin_edges))\n\n with self.assertRaisesRegex(RuntimeError, 'bins tensor should have one dimension'):\n t = make_tensor((2, 2), device, dtype=dtype)\n torch.histogram(t, t)\n\n with self.assertRaisesRegex(RuntimeError, 'bins tensor should have at least 1 element'):\n t = make_tensor((0), device, dtype=dtype)\n torch.histogram(t, t)\n\n with self.assertRaisesRegex(RuntimeError, 'bins must be > 0'):\n values = make_tensor((), device, dtype=dtype)\n torch.histogram(values, -1)\n\n with self.assertRaisesRegex(RuntimeError, 'if weight tensor is provided it should have the same shape \\\nas the input tensor excluding its innermost dimension'):\n values = make_tensor((2, 2), device, dtype=dtype)\n weight = make_tensor((1), device, dtype=dtype)\n torch.histogram(values, 1, weight=weight)\n\n with self.assertRaisesRegex(TypeError, 'received an invalid combination of arguments'):\n values = make_tensor((), device, dtype=dtype)\n bin_edges = make_tensor((), device, dtype=dtype)\n torch.histogram(values, bin_edges, range=(0, 1))\n\n with self.assertRaisesRegex(RuntimeError, 'min should not exceed max'):\n values = make_tensor((), device, dtype=dtype)\n torch.histogram(values, 2, range=(1, 0))\n\n with self.assertRaisesRegex(RuntimeError, r'range \\[nan, nan\\] is not finite'):\n values = torch.tensor([float(\"nan\")], device=device, dtype=dtype)\n torch.histogram(values, 2)\n\n # Tests to ensure that reduction functions employing comparison operators are usable when there\n # exists a zero dimension (i.e. when the the tensors are empty) in the tensor. These tests specifically\n # cater to functions where specifying the `dim` parameter is necessary.\n def test_tensor_compare_ops_empty(self, device):\n shape = (2, 0, 4)\n master_input = torch.randn(shape, device=device)\n np_input = np.empty(shape)\n test_functions = [\n ('amax', torch.amax, np.amax),\n ('amin', torch.amin, np.amin),\n ('max', lambda *args, **kwargs: torch.max(*args, **kwargs).values, np.max),\n ('min', lambda *args, **kwargs: torch.min(*args, **kwargs).values, np.min),\n ('median', lambda *args, **kwargs: torch.median(*args, **kwargs).values, np.median),\n ]\n\n for name, fn, np_function in test_functions:\n # Check if reduction happens along the specified dim with and without keepdim. Check with\n # numpy to maintain compatibility with numpy functions.\n error_msg = f\"test function: {name}\"\n self.assertEqual(torch.empty((2, 0), device=device), fn(master_input, dim=2), msg=error_msg)\n self.assertEqual(np_function(np_input, axis=2),\n fn(master_input, dim=2).cpu().numpy(), msg=error_msg, exact_dtype=False)\n\n self.assertEqual(torch.empty((2, 0), device=device), fn(master_input, dim=-1), msg=error_msg)\n self.assertEqual(np_function(np_input, axis=-1),\n fn(master_input, dim=-1).cpu().numpy(), msg=error_msg, exact_dtype=False)\n\n self.assertEqual(torch.empty((2, 0, 1), device=device), fn(master_input, dim=2, keepdim=True),\n msg=error_msg)\n self.assertEqual(np_function(np_input, axis=2, keepdims=True),\n fn(master_input, dim=2, keepdim=True).cpu().numpy(), msg=error_msg, exact_dtype=False)\n\n self.assertEqual(torch.empty((2, 0, 1), device=device), fn(master_input, dim=-1, keepdim=True),\n msg=error_msg)\n self.assertEqual(np_function(np_input, axis=-1, keepdims=True),\n fn(master_input, dim=-1, keepdim=True).cpu().numpy(), msg=error_msg, exact_dtype=False)\n\n # Check if function raises error on specified zero'd dimension as reduction dim.\n self.assertRaisesRegex(IndexError, \"Expected reduction dim\", lambda: fn(master_input, dim=1))\n\n # Tests to ensure that reduction of zero-dim tensors (i.e. empty tensors) using comparison operators\n # raises an error if no `dim` parameter is specified. This exists separately from tests in\n # test_tensot_compare_ops_empty because not specifying a `dim` parameter in the former tests does\n # not throw errors. Also, checking the return type of argmax requires supplying a different dtype\n # argument than that for the input tensor. There is also variantion in numpy testing.\n def test_tensor_compare_ops_argmax_argmix_kthvalue_dim_empty(self, device):\n shape = (2, 0, 4)\n master_input = torch.randn(shape, device=device)\n np_input = np.empty(shape)\n test_functions = [\n ('argmax', torch.argmax, {'dtype': torch.int64}, np.argmax),\n ('argmin', torch.argmin, {'dtype': torch.int64}, np.argmin),\n ('kthvalue', lambda *args, k=1, **kwargs: torch.kthvalue(*args, k=1, **kwargs).values,\n {}, lambda *args, k=1, axis=None, **kwargs: np.partition(*args, k, **kwargs).take(k - 1, axis=axis))\n ]\n\n for name, fn, dtype, np_function in test_functions:\n error_msg = f\"test function: {name}\"\n self.assertEqual(torch.empty((2, 0), device=device, **dtype), fn(master_input, dim=2), msg=error_msg)\n self.assertEqual(\n np_function(np_input, axis=2), fn(master_input, dim=2).cpu().numpy(), msg=error_msg, exact_dtype=False\n )\n\n self.assertEqual(torch.empty((2, 0), device=device, **dtype), fn(master_input, dim=-1), msg=error_msg)\n self.assertEqual(\n np_function(np_input, axis=-1), fn(master_input, dim=-1).cpu().numpy(), msg=error_msg, exact_dtype=False\n )\n\n # keepdim variant does not exist for numpy\n self.assertEqual(torch.empty((2, 0, 1), device=device, **dtype), fn(master_input, dim=2, keepdim=True),\n msg=error_msg)\n self.assertEqual(torch.empty((2, 0, 1), device=device, **dtype), fn(master_input, dim=-1, keepdim=True),\n msg=error_msg)\n\n # Check if function raises error on specified zero'd dimension as reduction dim.\n self.assertRaisesRegex(IndexError, \"Expected reduction dim\", lambda: fn(master_input, dim=1))\n if name != 'kthvalue':\n self.assertRaisesRegex(IndexError, \"Expected reduction dim\", lambda: fn(master_input))\n\n # Tests to ensure that reduction of zero-dim tensors (i.e. empty tensors) using math operators works when a\n # non-zero dim is specified for the reduction and throws an error when the dim specified is 0. Although\n # there is some repetition with test_tensor_compare_ops_optional_dim_empty and test_tensor_compare_ops_empty,\n # these tests are kept separate since tests for math operators also require checking for correctness of the\n # returned data using allclose() or isinf() which does not exists in the former tests.\n @skipIfNoSciPy\n def test_tensor_reduce_ops_empty(self, device):\n from scipy.special import logsumexp\n shape = (2, 0, 4)\n master_input = torch.randn(shape, device=device)\n np_input = np.empty(shape)\n test_functions = [\n ('prod', torch.prod, 1., np.prod),\n ('sum', torch.sum, 0., np.sum),\n ('norm', torch.norm, 0., np.linalg.norm),\n ('mean', torch.mean, nan, np.mean),\n ('var', torch.var, nan, np.var),\n ('std', torch.std, nan, np.std),\n ('logsumexp', torch.logsumexp, -inf, logsumexp),\n ]\n\n for name, fn, return_value, np_function in test_functions:\n # Check if reduction happens along the specified dimension.\n error_msg = f\"test function: {name}\"\n self.assertEqual(torch.empty((2, 0), device=device), fn(master_input, dim=2), msg=error_msg)\n self.assertEqual(np_function(np_input, axis=2), fn(master_input, dim=2).cpu().numpy(), msg=error_msg,\n exact_dtype=False)\n\n self.assertEqual(torch.empty((2, 0), device=device), fn(master_input, dim=-1), msg=error_msg)\n self.assertEqual(np_function(np_input, axis=-1), fn(master_input, dim=-1).cpu().numpy(), msg=error_msg,\n exact_dtype=False)\n\n self.assertEqual(torch.empty((2, 0, 1), device=device), fn(master_input, dim=2, keepdim=True),\n msg=error_msg)\n self.assertEqual(np_function(np_input, axis=2, keepdims=True), fn(master_input, dim=2, keepdim=True),\n msg=error_msg, exact_dtype=False)\n\n self.assertEqual(torch.empty((2, 0, 1), device=device), fn(master_input, dim=-1, keepdim=True),\n msg=error_msg)\n self.assertEqual(np_function(np_input, axis=-1, keepdims=True), fn(master_input, dim=-1, keepdim=True),\n msg=error_msg, exact_dtype=False)\n\n self.assertEqual(torch.full((2, 4), return_value, device=device), fn(master_input, dim=1), msg=error_msg)\n self.assertEqual(torch.full((2, 4), return_value, device=device), fn(master_input, dim=-2), msg=error_msg)\n self.assertEqual(torch.full((2, 1, 4), return_value, device=device), fn(master_input, dim=1, keepdim=True),\n msg=error_msg)\n self.assertEqual(torch.full((2, 1, 4), return_value, device=device), fn(master_input, dim=-2, keepdim=True),\n msg=error_msg)\n\n if name != 'logsumexp':\n # The scipy function does not work for reduction the zero dimension\n self.assertEqual(np.float32(np_function(np_input, axis=1)), fn(master_input, dim=1).cpu().numpy(),\n msg=error_msg)\n self.assertEqual(np.float32(np_function(np_input, axis=-2)), fn(master_input, dim=-2).cpu().numpy(),\n msg=error_msg)\n self.assertEqual(np.float32(np_function(np_input, axis=1, keepdims=True)),\n fn(master_input, dim=1, keepdim=True).cpu().numpy(),\n msg=error_msg)\n self.assertEqual(np.float32(np_function(np_input, axis=-2, keepdims=True)),\n fn(master_input, dim=-2, keepdim=True).cpu().numpy(),\n msg=error_msg)\n\n # logsumexp throws a type error when not specifying dim so test separately.\n self.assertEqual(torch.full((), return_value, device=device), fn(master_input), msg=error_msg)\n else:\n self.assertRaises(TypeError, lambda: fn(master_input))\n\n # Tests to ensure that any() and all() functions work with zero-dim tensors. Kept separate from\n # other tests for checking reduction with zero-dim tensors because these tests have significantly\n # different testing behaviour than that used for the former tests.\n def test_reduction_empty_any_all(self, device):\n shape = (2, 0, 4)\n x = torch.randn(shape, device=device)\n\n for dtype in get_all_dtypes(include_half=True, include_bfloat16=False,\n include_bool=True, include_complex=True):\n # Refer: [all, any uint8 compatibility]\n if dtype == torch.uint8:\n out_dtype = torch.uint8\n else:\n out_dtype = torch.bool # output of all/any is bool irrespective of input dtype\n\n xb = x.to(dtype)\n yb = x.to(dtype)\n # any\n self.assertEqual((2, 0), xb.any(2).shape)\n self.assertEqual((2, 0, 1), xb.any(2, keepdim=True).shape)\n self.assertEqual(torch.zeros((2, 4), device=device, dtype=out_dtype), xb.any(1))\n self.assertEqual(torch.zeros((2, 1, 4), device=device, dtype=out_dtype), xb.any(1, keepdim=True))\n self.assertEqual(torch.zeros((), device=device, dtype=out_dtype), xb.any())\n\n # all\n self.assertEqual((2, 0), xb.all(2).shape)\n self.assertEqual((2, 0, 1), xb.all(2, keepdim=True).shape)\n self.assertEqual(torch.ones((2, 4), device=device, dtype=out_dtype), xb.all(1))\n self.assertEqual(torch.ones((2, 1, 4), device=device, dtype=out_dtype), xb.all(1, keepdim=True))\n self.assertEqual(torch.ones((), device=device, dtype=out_dtype), xb.all())\n\n # TODO: can these be merged with their respective OpInfos?\n def test_reduce_dtype(self, device):\n def test_reduction(op, has_no_dim, takes_dtype=True):\n x = torch.randn(3, 3, dtype=torch.float, requires_grad=True, device=device)\n\n if has_no_dim:\n grad1, = torch.autograd.grad([op(x)], [x])\n grad2, = torch.autograd.grad([op(x, dtype=torch.double)], [x])\n self.assertEqual(grad1, grad2)\n self.assertEqual(grad2.dtype, torch.float)\n\n gi = torch.randn(op(x, dim=0).shape, dtype=torch.float, device=device)\n grad1, = torch.autograd.grad([op(x, dim=0)], [x], gi)\n if takes_dtype:\n grad2, = torch.autograd.grad([op(x, dim=0, dtype=torch.double)], [x], gi.double())\n else:\n grad2, = torch.autograd.grad([op(x.double(), dim=0)], [x], gi.double())\n self.assertEqual(grad1, grad2)\n self.assertEqual(grad2.dtype, torch.float)\n\n test_reduction(torch.sum, True)\n test_reduction(torch.prod, True)\n test_reduction(torch.cumsum, False)\n test_reduction(torch.cumprod, False)\n test_reduction(torch.logcumsumexp, False, takes_dtype=False)\n\n @ops(reference_masked_ops)\n def test_reference_masked(self, device, dtype, op):\n \"\"\"Test masked reduction operations on strided-only tensors using\n numpy reductions as reference.\n \"\"\"\n\n def to_numpy(input):\n if input.dtype is torch.bfloat16:\n return input.cpu().to(torch.float32).numpy()\n else:\n return input.cpu().numpy()\n\n samples = op.sample_inputs_func(op, device, dtype, requires_grad=False)\n for sample_input in samples:\n t = sample_input.input\n actual = op(t, *sample_input.args, **sample_input.kwargs)\n exact_dtype = not (t.dtype is torch.bfloat16\n or (op.promotes_int_to_float and not torch.is_floating_point(t)))\n expected = op.ref(to_numpy(t), *sample_input.args,\n **dict(\n # `identity` is mapped to numpy reduction `initial` argument\n identity=torch._masked._reduction_identity(op.name, t),\n **sample_input.kwargs))\n\n # Workaround https://github.com/pytorch/pytorch/issues/66556\n expected = np.asarray(expected) # transform numpy scalars to numpy.ndarray instances\n\n msg = (\"Failed to produce expected results! Input tensor was\"\n \" {0}, torch result is {1}, and reference result is\"\n \" {2}.\").format(t, actual, expected) if t.numel() < 10 else None\n\n self.assertEqual(actual, expected, msg, exact_dtype=exact_dtype)\n\n\ninstantiate_device_type_tests(TestReductions, globals())\n\nif __name__ == '__main__':\n run_tests()\n" ]
[ [ "torch.ao.quantization.quantize.is_activation_post_process" ], [ "torch.prod", "numpy.random.rand", "numpy.median", "numpy.empty", "torch.histogram", "torch.tensor", "numpy.prod", "torch._aminmax", "torch.testing._internal.common_dtype.get_all_math_dtypes", "torch.min", "torch.clamp", "torch.full_like", "torch.std_mean", "torch.testing._internal.common_device_type.precisionOverride", "torch.BoolTensor", "torch.cumsum", "numpy.isinf", "torch.testing._internal.common_dtype.get_all_complex_dtypes", "torch.any", "torch.var_mean", "torch.from_numpy", "torch.scalar_tensor", "torch.testing._internal.common_dtype.get_all_fp_dtypes", "torch.testing._internal.common_utils.run_tests", "torch.kthvalue", "torch.isnan", "torch.testing._internal.common_dtype.floating_and_complex_types_and", "torch.testing._internal.common_device_type.ops", "torch.quantile", "torch.mode", "torch.sum", "scipy.special.logsumexp", "torch.randint", "torch.searchsorted", "torch.zeros_like", "torch.zeros", "numpy.array", "torch.max", "torch.testing._internal.common_dtype.integral_types_and", "torch.cumprod", "torch.rand", "torch.bucketize", "torch.rand_like", "torch._masked._reduction_identity", "torch.std", "torch.add", "torch.all", "torch.mean", "numpy.argmin", "torch.amax", "torch.testing.make_tensor", "numpy.histogramdd", "torch.randint_like", "torch.testing._internal.common_device_type.dtypesIfCPU", "torch.where", "numpy.histogram", "torch.testing._internal.common_device_type.largeTensorTest", "torch.var", "torch.is_tensor", "torch.testing._internal.common_dtype.get_all_int_dtypes", "torch.FloatTensor", "torch.nansum", "numpy.argmax", "numpy.std", "numpy.asarray", "torch.amin", "torch.histogramdd", "torch.median", "torch.ones", "torch.cuda.is_available", "torch.testing._internal.common_dtype.get_all_dtypes", "torch.ByteTensor", "numpy.random.randint", "numpy.partition", "torch.empty", "numpy.reshape", "torch.testing._internal.common_device_type.dtypesIfCUDA", "torch.cuda.LongTensor", "torch.full", "torch.sort", "torch.arange", "torch.histc", "torch.logsumexp", "torch.is_floating_point", "torch.ones_like", "torch.randn", "numpy.var", "torch.empty_like" ] ]
chengyi-wu/nlp-web
[ "a3250fb78e53f5a1f4422699160d33e5f52eb551" ]
[ "app/text_classifier/model.py" ]
[ "# coding: utf-8\nfrom __future__ import print_function\nimport os, sys\nimport tensorflow.contrib.keras as kr\nimport tensorflow as tf\nimport numpy as np\nfrom collections import Counter\nimport time\nfrom datetime import timedelta\nimport csv\nimport random\n\nif sys.version_info[0] > 2:\n is_py3 = True\nelse:\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n is_py3 = False\n\ndef get_time_dif(start_time):\n \"\"\"获取已使用时间\"\"\"\n end_time = time.time()\n time_dif = end_time - start_time\n return timedelta(seconds=int(round(time_dif)))\n\ndef native_word(word, encoding='utf-8'):\n \"\"\"如果在python2下面使用python3训练的模型,可考虑调用此函数转化一下字符编码\"\"\"\n if not is_py3:\n return word.encode(encoding)\n else:\n return word\n\ndef native_content(content):\n if not is_py3:\n return content.decode('utf-8')\n else:\n return content\n\ndef open_file(filename, mode='r'):\n \"\"\"\n 常用文件操作,可在python2和python3间切换.\n mode: 'r' or 'w' for read or write\n \"\"\"\n if is_py3:\n return open(filename, mode, encoding='utf-8', errors='ignore')\n else:\n return open(filename, mode)\n\ndef read_file(filename):\n \"\"\"读取文件数据\"\"\"\n contents, labels = [], []\n with open_file(filename) as f:\n for line in f:\n try:\n label, content = line.strip().split('\\t')\n if content:\n contents.append(list(native_content(content)))\n labels.append(native_content(label))\n except:\n pass\n return contents, labels\n\ndef build_vocab(train_dir, vocab_dir, vocab_size=5000):\n \"\"\"根据训练集构建词汇表,存储\"\"\"\n data_train, _ = read_file(train_dir)\n\n all_data = []\n for content in data_train:\n all_data.extend(content)\n\n counter = Counter(all_data)\n count_pairs = counter.most_common(vocab_size - 1)\n words, _ = list(zip(*count_pairs))\n # 添加一个 <PAD> 来将所有文本pad为同一长度\n words = ['<PAD>'] + list(words)\n open_file(vocab_dir, mode='w').write('\\n'.join(words) + '\\n')\n\ndef read_vocab(vocab_dir):\n \"\"\"读取词汇表\"\"\"\n # words = open_file(vocab_dir).read().strip().split('\\n')\n with open_file(vocab_dir) as fp:\n # 如果是py2 则每个值都转化为unicode\n words = [native_content(_.strip()) for _ in fp.readlines()]\n word_to_id = dict(zip(words, range(len(words))))\n return words, word_to_id\n\ndef process_file(filename, word_to_id, cat_to_id, max_length=600):\n \"\"\"将文件转换为id表示\"\"\"\n contents, labels = read_file(filename)\n\n data_id, label_id = [], []\n for i in range(len(contents)):\n data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id])\n label_id.append(cat_to_id[labels[i]])\n\n # 使用keras提供的pad_sequences来将文本pad为固定长度\n x_pad = kr.preprocessing.sequence.pad_sequences(data_id, max_length)\n y_pad = kr.utils.to_categorical(label_id, num_classes=len(cat_to_id)) # 将标签转换为one-hot表示\n\n return x_pad, y_pad\n\ndef batch_iter(x, y, batch_size=64):\n \"\"\"生成批次数据\"\"\"\n data_len = len(x)\n num_batch = int((data_len - 1) / batch_size) + 1\n\n indices = np.random.permutation(np.arange(data_len))\n x_shuffle = x[indices]\n y_shuffle = y[indices]\n\n for i in range(num_batch):\n start_id = i * batch_size\n end_id = min((i + 1) * batch_size, data_len)\n yield x_shuffle[start_id:end_id], y_shuffle[start_id:end_id]\n\nclass TCNNConfig(object):\n embedding_dim = 64 # 词向量维度\n seq_length = 4000 # 序列长度\n num_classes = 0 # 类别数\n num_filters = 256 # 卷积核数目\n kernel_size = 5 # 卷积核尺寸\n vocab_size = 5000 # 词汇表达小\n\n hidden_dim = 128 # 全连接层神经元\n\n dropout_keep_prob = 0.5 # dropout保留比例\n learning_rate = 1e-3 # 学习率\n\n batch_size = 64 # 每批训练大小\n num_epochs = 2 # 总迭代轮次\n\n print_per_batch = 100 # 每多少轮输出一次结果\n\nclass TextCNN(object):\n \"\"\"文本分类,CNN模型\"\"\"\n\n def __init__(self, config):\n self.config = config\n\n # 三个待输入的数据\n self.input_x = tf.placeholder(tf.int32, [None, self.config.seq_length], name='input_x')\n self.input_y = tf.placeholder(tf.float32, [None, self.config.num_classes], name='input_y')\n self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n\n self.cnn()\n\n def cnn(self):\n \"\"\"CNN模型\"\"\"\n # 词向量映射\n with tf.device('/cpu:0'):\n embedding = tf.get_variable('embedding', [self.config.vocab_size, self.config.embedding_dim])\n embedding_inputs = tf.nn.embedding_lookup(embedding, self.input_x)\n\n with tf.name_scope(\"cnn\"):\n # CNN layer\n conv = tf.layers.conv1d(embedding_inputs, self.config.num_filters, self.config.kernel_size, name='conv')\n # global max pooling layer\n gmp = tf.reduce_max(conv, reduction_indices=[1], name='gmp')\n\n with tf.name_scope(\"score\"):\n # 全连接层,后面接dropout以及relu激活\n fc = tf.layers.dense(gmp, self.config.hidden_dim, name='fc1')\n fc = tf.contrib.layers.dropout(fc, self.keep_prob)\n fc = tf.nn.relu(fc)\n\n # 分类器\n self.logits = tf.layers.dense(fc, self.config.num_classes, name='fc2')\n self.y_pred_cls = tf.argmax(tf.nn.softmax(self.logits), 1) # 预测类别\n\n with tf.name_scope(\"optimize\"):\n # 损失函数,交叉熵\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.input_y)\n self.loss = tf.reduce_mean(cross_entropy)\n # 优化器\n self.optim = tf.train.AdamOptimizer(learning_rate=self.config.learning_rate).minimize(self.loss)\n\n with tf.name_scope(\"accuracy\"):\n # 准确率\n correct_pred = tf.equal(tf.argmax(self.input_y, 1), self.y_pred_cls)\n self.acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\nclass CnnModel(object):\n def __init__(self, vocab_dir, categories, config = None):\n self.vocab_dir = vocab_dir\n self.categories = categories\n self.cat_to_id = dict(zip(categories, range(len(categories))))\n self.words, self.word_to_id = read_vocab(self.vocab_dir)\n self.config = config\n if self.config is None:\n self.config = TCNNConfig()\n self.config.num_classes = len(categories)\n \n tf.reset_default_graph()\n self.model = TextCNN(self.config)\n c = tf.ConfigProto()\n c.gpu_options.allow_growth = True # Stop GPU from OOM\n self.session = tf.Session(config=c)\n self.session.run(tf.global_variables_initializer())\n self.saver = tf.train.Saver()\n\n def load(self, save_path):\n '''\n save_path: path to the best_validation\n '''\n print(\"Restore from %s\" % save_path)\n self.save_path = save_path\n self.saver.restore(sess=self.session, save_path=save_path)\n \n def predict(self, content):\n '''\n content: full text to be predicted\n '''\n data = [self.word_to_id[x] for x in content if x in self.word_to_id]\n\n feed_dict = {\n self.model.input_x: kr.preprocessing.sequence.pad_sequences([data], self.model.config.seq_length),\n self.model.keep_prob: 1.0\n }\n\n y_pred_cls = self.session.run(self.model.y_pred_cls, feed_dict=feed_dict)\n return self.categories[y_pred_cls[0]]\n\n @staticmethod\n def train(vocab_dir, categories, save_dir, train_dir, val_dir, config = None, full = False, num_epochs = 1):\n '''\n This is not supposed to be called by REST APIs\n\n vocab_dir: path to the vocab_dir\n categories: categories\n save_dir: parent dir to best_validation\n train_dir: path to the training file\n val_dir: path to the val file\n config: TCNNConfig\n full: whether to train from last time\n num_epochs: number of epochs\n '''\n save_path = os.path.join(save_dir, 'best_validation')\n if config is None:\n config = TCNNConfig()\n if full:\n print(\"Build vocab %s\" % vocab_dir)\n build_vocab(train_dir, vocab_dir, config.vocab_size)\n\n cnnModel = CnnModel(vocab_dir, categories, config)\n saver = cnnModel.saver\n session = cnnModel.session\n cat_to_id = cnnModel.cat_to_id\n word_to_id = cnnModel.word_to_id\n words = cnnModel.words\n config = cnnModel.config\n\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n \n if not full:\n cnnModel.load(save_path)\n\n model = cnnModel.model # <= TextCNN\n\n def feed_data(x_batch, y_batch, keep_prob):\n feed_dict = {\n model.input_x: x_batch,\n model.input_y: y_batch,\n model.keep_prob: keep_prob\n }\n return feed_dict\n\n def evaluate(sess, x_, y_):\n \"\"\"评估在某一数据上的准确率和损失\"\"\"\n data_len = len(x_)\n batch_eval = batch_iter(x_, y_, 128)\n total_loss = 0.0\n total_acc = 0.0\n for x_batch, y_batch in batch_eval:\n batch_len = len(x_batch)\n feed_dict = feed_data(x_batch, y_batch, 1.0)\n loss, acc = sess.run([model.loss, model.acc], feed_dict=feed_dict)\n total_loss += loss * batch_len\n total_acc += acc * batch_len\n\n return total_loss / data_len, total_acc / data_len\n\n print(\"Loading training and validation data...\")\n # 载入训练集与验证集\n start_time = time.time()\n x_train, y_train = process_file(train_dir, word_to_id, cat_to_id, config.seq_length)\n x_val, y_val = process_file(val_dir, word_to_id, cat_to_id, config.seq_length)\n time_dif = get_time_dif(start_time)\n print(\"Time usage:\", time_dif)\n\n print('Training and evaluating...')\n start_time = time.time()\n total_batch = 0 # 总批次\n best_acc_val = 0.0 # 最佳验证集准确率\n last_improved = 0 # 记录上一次提升批次\n require_improvement = 1000 # 如果超过1000轮未提升,提前结束训练\n\n flag = False\n for epoch in range(num_epochs):\n print('Epoch: %d / %d' % (epoch + 1, num_epochs))\n batch_train = batch_iter(x_train, y_train, config.batch_size)\n for x_batch, y_batch in batch_train:\n feed_dict = feed_data(x_batch, y_batch, config.dropout_keep_prob)\n\n if total_batch % config.print_per_batch == 0:\n # 每多少轮次输出在训练集和验证集上的性能\n feed_dict[model.keep_prob] = 1.0\n loss_train, acc_train = session.run([model.loss, model.acc], feed_dict=feed_dict)\n loss_val, acc_val = evaluate(session, x_val, y_val) # todo\n\n if acc_val > best_acc_val:\n # 保存最好结果\n best_acc_val = acc_val\n last_improved = total_batch\n saver.save(sess=session, save_path=save_path)\n improved_str = '*'\n else:\n improved_str = ''\n\n time_dif = get_time_dif(start_time)\n msg = 'Iter: {0:>6}, Train Loss: {1:>6.2}, Train Acc: {2:>7.2%},' \\\n + ' Val Loss: {3:>6.2}, Val Acc: {4:>7.2%}, Time: {5} {6}'\n print(msg.format(total_batch, loss_train, acc_train, loss_val, acc_val, time_dif, improved_str))\n\n session.run(model.optim, feed_dict=feed_dict) # 运行优化\n total_batch += 1\n\n if total_batch - last_improved > require_improvement:\n # 验证集正确率长期不提升,提前结束训练\n print(\"No optimization for a long time, auto-stopping...\")\n flag = True\n break # 跳出循环\n if flag: # 同上\n break\n\ndef writetofile(rows, filename):\n '''\n rows: json from ES\n filename: loation where the file is stored as csv\n '''\n with open_file(filename, 'w') as f:\n writer = csv.writer(f)\n writer.writerow('sentiment', 'businessline', 'tag', 'content') # header\n for row in rows:\n content = row['content']\n sentiment = row['sentiment']\n business = row['businessline']\n tag = row['tag']\n writer.writerow([sentiment, business, tag, content])\n\ndef split_file(filename, base_dir, categories):\n '''\n filename: writetofile(filename)\n base_dir: base_dir to store the trainging files\n categories = {\n 'sentiment' : ['正面', '负面', '中立'],\n }\n '''\n for cat in categories:\n generate_training_files(filename, base_dir, cat, categories[cat])\n\ndef generate_training_files(filename, base_dir, category_name, categories, split_ratio=0.2):\n '''\n base_dir = /static/ => becomes /static/category_name/train.txt + /static/category_name/val.txt\n categories = ['正面', '负面', '中立']\n category_name = 'sentiment'\n split_ratio=0.2 : random.randint(1,10) <= 10 * (1 - split_ratio) goes to train\n '''\n base_dir = os.path.join(base_dir, category_name)\n train_file = os.path.join(base_dir, 'train.txt')\n val_file = os.path.join(base_dir, 'val.txt')\n\n train_file = open_file(train_file, 'w')\n val_file = open_file(val_file, 'w')\n\n x = 10 - int(10 * split_ratio)\n\n with open_file(filename, 'r') as f:\n reader = csv.reader(f)\n header = True\n pos = -1\n for row in reader:\n if header:\n for i, c in enumerate(row):\n if c == category_name:\n pos = i\n else:\n if random.randint(1, 10) <= x:\n train_file.write('%s\\t%s\\n' % (row[pos], row[-1]))\n else:\n val_file.write('%s\\t%s\\n' % (row[pos], row[-1]))\n train_file.close()\n val_file.close()\n\nclass PSRModel(object):\n def __init__(self):\n self.safety_5 = ['死亡', '杀人', '绑架', '抢救无效', '衰竭', '被害人']\n self.safety_4 = ['故意伤害', '强奸', '性侵', '恶性案件', '自杀']\n self.safety_3 = ['抢救', '殴打', '猥亵', '较大财产损失', '生命危险', '救治', '车祸', '重伤', '重大事故']\n self.safety_2 = ['受伤', '肢体冲突', '人身骚扰', '事故', '损伤', '追尾', '纠纷', '刮蹭', '交通事故']\n self.safety_1 = []\n\n self.platform_5 = ['宕机', '瘫痪', '产品漏洞', '系统漏洞']\n self.platform_4 = ['定价不明', '价格不明', '定价不合理', '价格不合理', '制度不合理', '规则不合理', '管理混乱', '收入体系不合理']\n self.platform_3 = ['交通事故', '保险不赔', '人身安全', '财产受威胁']\n self.platform_2 = ['物品遗失', '物品丢失', '包丢失', '包遗失', '手机遗失', '手机丢失', '无法找回']\n self.platform_1 = []\n\n def identify_safety_level(self, content):\n nums = []\n freqs = []\n\n nums.append(0)\n freqs.append(0)\n\n num, freq = self.find_total_matching_times(content, self.safety_2)\n nums.append(num)\n freqs.append(freq)\n\n num, freq = self.find_total_matching_times(content, self.safety_3)\n nums.append(num)\n freqs.append(freq)\n\n num, freq = self.find_total_matching_times(content, self.safety_4)\n nums.append(num)\n freqs.append(freq)\n\n num, freq = self.find_total_matching_times(content, self.safety_5)\n nums.append(num)\n freqs.append(freq)\n\n platform_level = nums.index(max(nums)) + 1\n\n print('NUM:', nums, 'Max index=', nums.index(max(nums)), 'Max num=', max(nums))\n print('FREQ: ', freqs, 'Max index=', freqs.index(max(freqs)), 'Max freq=', max(freqs))\n\n return platform_level\n\n def identify_platform_level(self, content):\n nums = []\n freqs = []\n\n nums.append(0)\n freqs.append(0)\n\n num, freq = self.find_total_matching_times(content, self.platform_2)\n nums.append(num)\n freqs.append(freq)\n\n num, freq = self.find_total_matching_times(content, self.platform_3)\n nums.append(num)\n freqs.append(freq)\n\n num, freq = self.find_total_matching_times(content, self.platform_4)\n nums.append(num)\n freqs.append(freq)\n\n num, freq = self.find_total_matching_times(content, self.platform_5)\n nums.append(num)\n freqs.append(freq)\n\n safety_level = nums.index(max(nums)) + 1\n\n print('NUM:', nums, 'Max index=', nums.index(max(nums)), 'Max num=', max(nums))\n print('FREQ: ', freqs, 'Max index=', freqs.index(max(freqs)), 'Max freq=', max(freqs))\n\n return safety_level\n\n\n def find_total_matching_times(self, content, keywords):\n total_times = 0\n for keyword in keywords:\n times = self.find_matching_times(content, keyword)\n total_times = total_times + times\n frequency = total_times / len(keywords)\n return total_times, frequency\n\n\n def find_matching_times(self, content, keyword):\n i = 0\n count = 0\n start = time.time()\n while i <= (len(content)-len(keyword)):\n j = 0\n while content[i] == keyword[j]:\n i = i + 1\n j = j + 1\n if j == len(keyword):\n break\n elif j == len(keyword)-1:\n count = count + 1\n else:\n i = i+1\n j = 0\n #print(count)\n #print(time.time()-start)\n return count\n\n def identify_propagation_level(self, media, duplicates, reads):\n if media == 5 or duplicates >= 50 or reads >= 100 * 1000:\n return 5\n\n if media == -1:\n return -1\n\n if media == 4 or duplicates >= 20 or reads >= 10 * 1000:\n return 4\n \n if media == 3 or duplicates >= 10 or reads >= 1000:\n return 3\n \n if media == 2 or duplicates >= 1 or reads >= 100:\n return 2\n \n return 1\n\n def identify_severity(self, P, S, R):\n '''\n P:{-1:未知, 1 - 4 } \n S: {0:非负面,1 - 4 } \n R: {0:非负面, 1 - 4 } \n PSR : {-1:未知, 0:非负面, 1 - 125}\n Severity: { -1:未知, 0:非负面, 1 - 4 : 4 = A(非常严重)} 任意一个为5 = 4,其他有未知 = -1\n\n '''\n severity = psr = P * S * R\n\n if 1 <= psr <= 10:\n severity = 1\n elif 10 < psr <= 30:\n severity = 2\n elif 30 < psr <=60:\n severity = 3\n elif 60 < psr <= 150:\n severity = 4\n\n if P == 5 or S == 5 or R == 5:\n severity = 4\n elif P == -1:\n severity = -1\n \n return severity" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.nn.embedding_lookup", "tensorflow.nn.softmax", "tensorflow.global_variables_initializer", "tensorflow.cast", "tensorflow.argmax", "tensorflow.train.Saver", "tensorflow.layers.conv1d", "tensorflow.ConfigProto", "numpy.arange", "tensorflow.layers.dense", "tensorflow.contrib.keras.preprocessing.sequence.pad_sequences", "tensorflow.train.AdamOptimizer", "tensorflow.nn.relu", "tensorflow.Session", "tensorflow.placeholder", "tensorflow.get_variable", "tensorflow.name_scope", "tensorflow.contrib.layers.dropout", "tensorflow.reset_default_graph", "tensorflow.reduce_max", "tensorflow.device", "tensorflow.reduce_mean" ] ]
ryanjmccall/prod_mle_capstone
[ "027a62368703a52318354630114e59ac3012100c" ]
[ "tests/sentiment_classifier/task/test_checkpoint.py" ]
[ "import os\n\nimport numpy as np\nimport pandas as pd\nimport shutil\nimport unittest\n\nfrom sentiment_classifier.context import DATA_DIR\nfrom sentiment_classifier.task.checkpoint import (_CHECKPOINT_DF_FNAME, checkpoint_exists, load_checkpoint,\n write_checkpoint)\n\n\nclass TestCheckpoint(unittest.TestCase):\n\n def setUp(self) -> None:\n barray = np.array([1, 2, 3], dtype=np.float32).tobytes()\n self.df = pd.DataFrame({'foo': [1, 2], 'features': [barray, barray]})\n self.df.set_index('foo')\n self.checkpoint_dir = os.path.join(DATA_DIR, 'testing')\n self.checkpoint_file = os.path.join(self.checkpoint_dir, _CHECKPOINT_DF_FNAME)\n\n def tearDown(self) -> None:\n if os.path.exists(self.checkpoint_dir):\n shutil.rmtree(self.checkpoint_dir)\n\n def test_write_checkpoint(self):\n write_checkpoint.run(self.df, self.checkpoint_dir)\n\n assert os.path.exists(self.checkpoint_file)\n\n def test_checkpoint_exists_false(self):\n assert not checkpoint_exists.run(self.checkpoint_dir)\n\n def test_checkpoint_exists_true(self):\n write_checkpoint.run(self.df, self.checkpoint_dir)\n\n assert checkpoint_exists.run(self.checkpoint_dir)\n\n def test_load_checkpoint(self):\n write_checkpoint.run(self.df, self.checkpoint_dir)\n\n result = load_checkpoint.run(self.checkpoint_dir)\n\n self.df['features'] = self.df['features'].apply(lambda x: np.frombuffer(x, dtype=np.float32))\n assert self.df.equals(result)\n" ]
[ [ "pandas.DataFrame", "numpy.array", "numpy.frombuffer" ] ]
jsantoso2/Household_Amenity_Detection
[ "dd05fe86f31b3eb3478f7675080ebc0f59ef0b6c" ]
[ "docker_test/app.py" ]
[ "# common imports\r\nimport pandas as pd\r\nimport numpy as np\r\nimport os\r\nimport streamlit as st\r\nfrom PIL import Image\r\nimport time\r\nimport cv2\r\nimport torch, torchvision\r\n\r\n# Some basic setup:\r\nimport detectron2\r\n\r\n# import some common detectron2 utilities\r\nfrom detectron2.engine import DefaultPredictor\r\nfrom detectron2.config import get_cfg\r\nfrom detectron2.utils.visualizer import Visualizer\r\nfrom detectron2.structures import Instances\r\nfrom detectron2.data.catalog import Metadata\r\n\r\n\r\[email protected]\r\ndef load_model():\r\n cfg = get_cfg()\r\n\r\n # add project-specific config\r\n cfg.merge_from_file('config_' + 'RN-101-3x' + '.yaml')\r\n\r\n # change inference type to cpu\r\n cfg.MODEL.DEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\n\r\n # load_model from saved file\r\n cfg.DATASETS.TEST = (\"data_test\", )\r\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set threshold for this model\r\n cfg.MODEL.WEIGHTS = 'model_' + 'RN-101-3x' + '_final.pth'\r\n\r\n return DefaultPredictor(cfg)\r\n\r\n\r\[email protected]\r\ndef inference(im, predictor, data_metadata, threshold):\r\n # Convert PIL image to array\r\n im = np.asarray(im)\r\n \r\n outputs = predictor(im)\r\n v = Visualizer(im[:, :, ::-1], \r\n metadata = data_metadata,\r\n scale = 0.5)\r\n\r\n # take only predictions with 25% confidence only for RetinaNet as they tend to overdraw\r\n filtered = outputs['instances'].to(\"cpu\")._fields\r\n filtered_idx = []\r\n for i in range(len(filtered['scores'])):\r\n if filtered['scores'][i] >= threshold:\r\n filtered_idx.append(i)\r\n\r\n filt_instance = Instances(image_size=(im.shape[0],im.shape[1]), pred_boxes = outputs['instances']._fields['pred_boxes'][filtered_idx], \r\n pred_classes = outputs['instances']._fields['pred_classes'][filtered_idx], \r\n scores = outputs['instances']._fields['scores'][filtered_idx])\r\n\r\n v = v.draw_instance_predictions(filt_instance.to(\"cpu\"))\r\n\r\n return v.get_image(), filt_instance\r\n\r\n\r\ndef main():\r\n st.title('Household Amenity Detection Project 👁')\r\n st.write(\"This Project is inspired by [Airbnb's machine learning powered amenity detection](https://medium.com/airbnb-engineering/amenity-detection-and-beyond-new-frontiers-of-computer-vision-at-airbnb-144a4441b72e).\")\r\n st.write(\"And also by [Daniel Bourke's Airbnb amenity detection replication](https://github.com/mrdbourke/airbnb-amenity-detection).\")\r\n\r\n st.subheader('How does it work?')\r\n st.write(\"1. Upload an image in either JPG or PNG or JPEG format.\")\r\n st.write(\"2. Pick a probability threshold to determine what object + boxes to render.\") \r\n st.write(\" Only objects with higher than threshold probability will be rendered.\")\r\n st.write(\"3. Click the Make Prediction Button to run the model.\")\r\n st.image(Image.open('demo.jpg'), use_column_width = True)\r\n\r\n\r\n st.subheader('Input File')\r\n\r\n objects = ['Bathtub', 'Bed', 'Billiard table', 'Ceiling fan', \\\r\n 'Coffeemaker', 'Couch', 'Countertop', 'Dishwasher', \\\r\n 'Fireplace', 'Fountain', 'Gas stove', 'Jacuzzi', \\\r\n 'Kitchen & dining room table', 'Microwave oven', \\\r\n 'Mirror', 'Oven', 'Pillow', 'Porch', 'Refrigerator', \\\r\n 'Shower', 'Sink', 'Sofa bed', 'Stairs', 'Swimming pool', \\\r\n 'Television', 'Toilet', 'Towel', 'Tree house', 'Washing machine', 'Wine rack']\r\n\r\n # load model\r\n predictor = load_model()\r\n\r\n # create metadata\r\n data_metadata = Metadata(name = 'data_train', evaluator_type='coco', \r\n thing_classes = objects)\r\n\r\n\r\n uploaded_file = st.file_uploader(\"Upload an Image\", type=[\"png\", \"jpg\", \"jpeg\", \"JPG\", \"PNG\", \"JPEG\"])\r\n if uploaded_file is not None:\r\n image = Image.open(uploaded_file)\r\n st.image(image, caption='Uploaded Image', use_column_width=True)\r\n\r\n # Make sure image is RGB\r\n image = image.convert(\"RGB\")\r\n\r\n st.subheader('Output:')\r\n st.write(\"Pick a prediction threshold where only objects with probabilities above the threshold will be displayed!\")\r\n pred_threshold = st.slider('Prediction Threshold:', 0.0, 1.0, 0.25)\r\n\r\n # get inference on image and display if button is clicked\r\n if st.button(\"Make Prediction\"):\r\n start_time = time.time()\r\n\r\n # Some number in the range 0-1 (probabilities)\r\n with st.spinner(\"Doing Prediction...\"):\r\n custom_pred, filt_instance = inference(image, predictor, data_metadata, pred_threshold)\r\n\r\n end_time = time.time()\r\n\r\n st.subheader('Predictions: ')\r\n # need to convert CV2 format to PIL format\r\n custom_pred = cv2.cvtColor(custom_pred, cv2.COLOR_RGB2BGR)\r\n st.image(custom_pred, caption = 'Predictions Image', use_column_width = True)\r\n\r\n st.write('Predicted Classes and Probabilities: ')\r\n # save predictions to dataframe\r\n pred_df = pd.DataFrame()\r\n object_name = []\r\n for elem in filt_instance.pred_classes.numpy():\r\n object_name.append(objects[elem])\r\n\r\n pred_df['Classes'] = object_name\r\n pred_df['Probabilities'] = filt_instance.scores.numpy()\r\n \r\n if pred_df.shape[0] == 0:\r\n st.write('No Objects Detected!')\r\n else:\r\n st.write(pred_df)\r\n\r\n # write prediction time\r\n pred_time = end_time - start_time\r\n st.write('Prediction Time: ' + ' {0:.2f}'.format(pred_time) + ' seconds')\r\n\r\n\r\n st.write(\"\")\r\n st.subheader(\"What is under the hood?\")\r\n st.write(\"Detectron2 RetinaNet model (PyTorch) and Streamlit web application\")\r\n st.image(Image.open('logo.jpg'), use_column_width = True)\r\n\r\n st.subheader(\"Supported Classes/Objects:\")\r\n st.write(\"• Bathtub • Bed • Billiard Table\")\r\n st.write(\"• Ceiling Fan • Coffeemaker • Couch\")\r\n st.write(\"• Countertop • Dishwasher • Fireplace\")\r\n st.write(\"• Fountain • Gas Stove • Jacuzzi\")\r\n st.write(\"• Dining Table • Microwave Oven • Mirror\")\r\n st.write(\"• Oven • Pillow • Porch\")\r\n st.write(\"• Refrigerator • Shower • Sink\")\r\n st.write(\"• Sofa bed • Stairs • Swimming Pool\")\r\n st.write(\"• Television • Toilet • Towel\")\r\n st.write(\"• Tree house • Washing Machine • Wine Rack\")\r\n\r\n\r\nif __name__ == '__main__':\r\n main()" ]
[ [ "pandas.DataFrame", "torch.cuda.is_available", "numpy.asarray" ] ]
iammosespaulr/crnn
[ "ec536e05b1eac25097d1e473800a5a33db3356f4" ]
[ "tool/create_dataset.py" ]
[ "import os\nimport lmdb # install lmdb by \"pip install lmdb\"\nimport cv2\nimport numpy as np\n\n\ndef checkImageIsValid(imageBin):\n if imageBin is None:\n return False\n imageBuf = np.fromstring(imageBin, dtype=np.uint8)\n img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)\n imgH, imgW = img.shape[0], img.shape[1]\n if imgH * imgW == 0:\n return False\n return True\n\n\ndef writeCache(env, cache):\n with env.begin(write=True) as txn:\n for k, v in cache.items():\n txn.put(k.encode())\n\n\ndef createDataset(outputPath, imagePathList, labelList, lexiconList=None, checkValid=True):\n \"\"\"\n Create LMDB dataset for CRNN training.\n\n ARGS:\n outputPath : LMDB output path\n imagePathList : list of image path\n labelList : list of corresponding groundtruth texts\n lexiconList : (optional) list of lexicon lists\n checkValid : if true, check the validity of every image\n \"\"\"\n assert(len(imagePathList) == len(labelList))\n nSamples = len(imagePathList)\n env = lmdb.open(outputPath, map_size=1099511627776)\n cache = {}\n cnt = 1\n for i in range(nSamples):\n imagePath = imagePathList[i]\n label = labelList[i]\n if not os.path.exists(imagePath):\n print('%s does not exist' % imagePath)\n continue\n with open(imagePath, 'rb') as f:\n imageBin = f.read()\n if checkValid:\n if not checkImageIsValid(imageBin):\n print('%s is not a valid image' % imagePath)\n continue\n\n imageKey = 'image-%09d' % cnt\n labelKey = 'label-%09d' % cnt\n cache[imageKey] = imageBin\n cache[labelKey] = label\n if lexiconList:\n lexiconKey = 'lexicon-%09d' % cnt\n cache[lexiconKey] = ' '.join(lexiconList[i])\n if cnt % 1000 == 0:\n writeCache(env, cache)\n cache = {}\n print('Written %d / %d' % (cnt, nSamples))\n cnt += 1\n nSamples = cnt-1\n cache['num-samples'] = str(nSamples)\n writeCache(env, cache)\n print('Created dataset with %d samples' % nSamples)\n\n\nif __name__ == '__main__':\n pass\n" ]
[ [ "numpy.fromstring" ] ]
xiamo311/AquaSCALE
[ "28968d1b349c2370d8c20bda5b6675270e4ab65d" ]
[ "examples/resilience_metrics.py" ]
[ "from __future__ import print_function\nimport wntr\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\ndef topographic_metrics(wn):\n # Get a copy of the graph\n G = wn.get_graph_deep_copy()\n\n # Print general topographic information\n print(nx.info(G))\n\n # Plot node and edge attributes.\n junction_attr = wn.query_node_attribute('elevation',\n node_type=wntr.network.Junction)\n pipe_attr = wn.query_link_attribute('length', link_type=wntr.network.Pipe)\n wntr.graphics.plot_network(wn, node_attribute=junction_attr,\n link_attribute=pipe_attr,\n title='Node elevation and pipe length',\n node_size=40, link_width=2)\n\n # Compute link density\n print(\"Link density: \" + str(nx.density(G)))\n\n # Compute node degree\n node_degree = dict(G.degree())\n wntr.graphics.plot_network(wn, node_attribute=node_degree,\n title='Node Degree', node_size=40, node_range=[1,5])\n\n # Compute number of terminal nodes\n terminal_nodes = G.terminal_nodes()\n wntr.graphics.plot_network(wn, node_attribute=terminal_nodes,\n title='Terminal nodes', node_size=40, node_range=[0,1])\n print(\"Number of terminal nodes: \" + str(len(terminal_nodes)))\n print(\" \" + str(terminal_nodes))\n\n # Compute number of non-zero demand (NZD) nodes\n nzd_nodes = wn.query_node_attribute('base_demand', np.greater, 0.0)\n wntr.graphics.plot_network(wn, node_attribute=list(nzd_nodes.keys()),\n title='NZD nodes', node_size=40, node_range=[0,1])\n print(\"Number of NZD nodes: \" + str(len(nzd_nodes)))\n print(\" \" + str(nzd_nodes.keys()))\n\n # Compute pipes with diameter > threshold\n diameter = 0.508 # m (20 inches)\n pipes = wn.query_link_attribute('diameter', np.greater, diameter)\n wntr.graphics.plot_network(wn, link_attribute=list(pipes.keys()),\n title='Pipes > 20 inches', link_width=2,\n link_range=[0,1])\n print(\"Number of pipes > 20 inches: \" + str(len(pipes)))\n print(\" \" + str(pipes))\n\n # Compute nodes with elevation <= treshold\n elevation = 1.524 # m (5 feet)\n nodes = wn.query_node_attribute('elevation', np.less_equal, elevation)\n wntr.graphics.plot_network(wn, node_attribute=list(nodes.keys()),\n title='Nodes <= 5 ft elevation', node_size=40,\n node_range=[0,1])\n print(\"Number of nodes <= 5 ft elevation: \" + str(len(nodes)))\n print(\" \" + str(nodes))\n\n # Compute eccentricity, diameter, and average shortest path length\n # These all use an undirected graph\n uG = G.to_undirected() # undirected graph\n if nx.is_connected(uG):\n ecc = nx.eccentricity(uG)\n wntr.graphics.plot_network(wn, node_attribute=ecc, title='Eccentricity',\n node_size=40, node_range=[15, 30])\n\n print(\"Diameter: \" + str(nx.diameter(uG)))\n\n ASPL = nx.average_shortest_path_length(uG)\n print(\"Average shortest path length: \" + str(ASPL))\n\n # Compute cluster coefficient\n clust_coefficients = nx.clustering(nx.Graph(G))\n wntr.graphics.plot_network(wn, node_attribute=clust_coefficients,\n title='Clustering Coefficient', node_size=40)\n\n # Compute betweenness centrality\n bet_cen = nx.betweenness_centrality(G)\n wntr.graphics.plot_network(wn, node_attribute=bet_cen,\n title='Betweenness Centrality', node_size=40,\n node_range=[0, 0.4])\n central_pt_dom = G.central_point_dominance()\n print(\"Central point dominance: \" + str(central_pt_dom))\n\n # Compute articulation points\n Nap = list(nx.articulation_points(uG))\n Nap = list(set(Nap)) # get the unique nodes in Nap\n Nap_density = float(len(Nap))/uG.number_of_nodes()\n print(\"Density of articulation points: \" + str(Nap_density))\n wntr.graphics.plot_network(wn, node_attribute=Nap, title='Articulation Point',\n node_size=40, node_range=[0,1])\n\n # Compute bridges\n bridges = G.bridges()\n wntr.graphics.plot_network(wn, link_attribute=bridges, title='Bridges',\n link_width=2, link_range=[0,1])\n Nbr_density = float(len(bridges))/G.number_of_edges()\n print(\"Density of bridges: \" + str(Nbr_density))\n\n # Compute spectal gap\n spectral_gap = G.spectral_gap()\n print(\"Spectal gap: \" + str(spectral_gap))\n\n # Compute algebraic connectivity\n alg_con = G.algebraic_connectivity()\n print(\"Algebraic connectivity: \" + str(alg_con))\n\n # Critical ratio of defragmentation\n fc = G.critical_ratio_defrag()\n print(\"Critical ratio of defragmentation: \" + str(fc))\n\n # Compute closeness centrality\n clo_cen = nx.closeness_centrality(G)\n wntr.graphics.plot_network(wn, node_attribute=clo_cen,\n title='Closeness Centrality', node_size=40)\n\ndef hydraulic_metrics(wn):\n # Set nominal pressure\n for name, node in wn.junctions():\n node.nominal_pressure = 15\n\n # Simulate hydraulics\n sim = wntr.sim.WNTRSimulator(wn, mode='PDD')\n results = sim.run_sim()\n\n # Create list of node names\n junctions = [name for name, node in wn.junctions()]\n\n # Define pressure lower bound\n P_lower = 21.09 # m (30 psi)\n\n # Query pressure\n pressure = results.node.loc['pressure', :, junctions]\n mask = wntr.metrics.query(pressure, np.greater, P_lower)\n pressure_regulation = mask.all(axis=0).sum() # True over all time\n print(\"Fraction of nodes > 30 psi: \" + str(pressure_regulation))\n print(\"Average node pressure: \" +str(pressure.mean().mean()) + \" m\")\n wntr.graphics.plot_network(wn, node_attribute=pressure.min(axis=0), node_size=40,\n title= 'Min pressure')\n\n # Compute todini index\n todini = wntr.metrics.todini(results.node,results.link,wn, P_lower)\n plt.figure()\n plt.plot(todini)\n plt.ylabel('Todini Index')\n plt.xlabel('Time, hr')\n print(\"Todini Index\")\n print(\" Mean: \" + str(np.mean(todini)))\n print(\" Max: \" + str(np.max(todini)))\n print(\" Min: \" + str(np.min(todini)))\n\n # Create a weighted graph for flowrate at time 36 hours\n t = 36*3600\n attr = results.link.loc['flowrate', t, :]\n G_flowrate_36hrs = wn.get_graph_deep_copy()\n G_flowrate_36hrs.weight_graph(link_attribute=attr)\n\n # Compute betweenness-centrality at time 36 hours\n bet_cen = nx.betweenness_centrality(G_flowrate_36hrs)\n wntr.graphics.plot_network(wn, node_attribute=bet_cen,\n title='Betweenness Centrality', node_size=40)\n central_pt_dom = G_flowrate_36hrs.central_point_dominance()\n print(\"Central point dominance: \" + str(central_pt_dom))\n\n # Compute entropy at time 36, for node 185\n [S, Shat] = wntr.metrics.entropy(G_flowrate_36hrs, sources=None, sinks=['185'])\n\n # Plot all simple paths between the Lake/River and node 185\n link_count = G_flowrate_36hrs.links_in_simple_paths(sources=['Lake', 'River'], sinks=['185'])\n wntr.graphics.plot_network(wn, link_attribute=link_count, link_width=1,\n node_attribute = {'River': 1, 'Lake': 1, '185': 1},\n node_size=30, title='Link count in paths')\n\n # Calculate entropy for 1 day, all nodes\n shat = []\n G_flowrate_t = wn.get_graph_deep_copy()\n for t in np.arange(0, 24*3600+1,3600):\n attr = results.link.loc['flowrate', t, :]\n G_flowrate_t.weight_graph(link_attribute=attr)\n entropy = wntr.metrics.entropy(G_flowrate_t)\n shat.append(entropy[1])\n plt.figure()\n plt.plot(shat)\n plt.ylabel('System Entropy')\n plt.xlabel('Time, hr')\n print(\"Entropy\")\n print(\" Mean: \" + str(np.mean(shat)))\n print(\" Max: \" + str(np.nanmax(shat)))\n print(\" Min: \" + str(np.nanmin(shat)))\n\n # Compute fraction delivered volume and fraction delivered demand\n demand_factor = 0.9\n average_times = True\n average_nodes = False\n fdv = wntr.metrics.fdv(results.node, average_times, average_nodes)\n fdd = wntr.metrics.fdd(results.node, demand_factor, average_times, average_nodes)\n wntr.graphics.plot_network(wn, node_attribute=fdv, node_size=40,\n node_range=[0,1], title='FDV averaged over all times')\n wntr.graphics.plot_network(wn, node_attribute=fdd, node_size=40,\n node_range=[0,1], title='FDD averaged over all times')\n\n\ndef water_quality_metrics(wn):\n # Simulate hydraulics and water quality\n sim = wntr.sim.EpanetSimulator(wn)\n wn.options.quality = 'CHEMICAL'\n wn.add_pattern('SourcePattern', start_time=2*3600, end_time=15*3600)\n wn.add_source('Source1', '121', 'SETPOINT', 1000, 'SourcePattern')\n wn.add_source('Source2', '123', 'SETPOINT', 1000, 'SourcePattern')\n results_CHEM = sim.run_sim()\n \n wn.options.quality = 'AGE'\n results_AGE = sim.run_sim()\n \n wn.options.quality = 'TRACE'\n wn.options.quality_value = '111'\n results_TRACE = sim.run_sim()\n\n # plot chem scenario\n CHEM_at_5hr = results_CHEM.node.loc['quality', 5*3600, :]\n wntr.graphics.plot_network(wn, node_attribute=CHEM_at_5hr, node_size=20,\n title='Chemical concentration, time = 5 hours')\n CHEM_at_node = results_CHEM.node.loc['quality', :, '208']\n plt.figure()\n CHEM_at_node.plot(title='Chemical concentration, node 208')\n\n # Plot age scenario (convert to hours)\n AGE_at_5hr = results_AGE.node.loc['quality', 5*3600, :]/3600.0\n wntr.graphics.plot_network(wn, node_attribute=AGE_at_5hr, node_size=20,\n title='Water age (hrs), time = 5 hours')\n AGE_at_node = results_AGE.node.loc['quality', :, '208']/3600.0\n plt.figure()\n AGE_at_node.plot(title='Water age, node 208')\n\n # Plot trace scenario\n TRACE_at_5hr = results_TRACE.node.loc['quality', 5*3600, :]\n wntr.graphics.plot_network(wn, node_attribute=TRACE_at_5hr, node_size=20,\n title='Trace percent, time = 5 hours')\n TRACE_at_node = results_TRACE.node.loc['quality', :, '208']\n plt.figure()\n TRACE_at_node.plot(title='Trace percent, node 208')\n\n # Calculate average water age (last 48 hours)\n age = results_AGE.node.loc['quality',:,:]\n age_last_48h = age.loc[age.index[-1]-48*3600:age.index[-1]]/3600\n age_last_48h.index = age_last_48h.index/3600\n age_last_48h.plot(legend=False)\n plt.ylabel('Water age (h)')\n plt.xlabel('Time (h)')\n wntr.graphics.plot_network(wn, node_attribute=age_last_48h.mean(),\n title='Average water age (last 48 hours)', node_size=40)\n print(\"Average water age (last 48 hours): \" +str(age_last_48h.mean().mean()) + \" hr\")\n\n # Query concentration\n chem_upper_bound = 750\n chem = results_CHEM.node.loc['quality', :, :]\n mask = wntr.metrics.query(chem, np.greater, chem_upper_bound)\n chem_regulation = mask.any(axis=0) # True for any time\n wntr.graphics.plot_network(wn, node_attribute=chem_regulation, node_size=40,\n title= 'Nodes with conc > upper bound')\n wntr.graphics.plot_network(wn, node_attribute=chem.max(axis=0), node_size=40,\n title= 'Max concentration')\n print(\"Fraction of nodes > chem upper bound: \" + str(chem_regulation.sum()))\n print(\"Average node concentration: \" +str(chem.mean().mean()))\n\n quality_upper_bound = 0.0035 # kg/m3 (3.5 mg/L)\n average_times = True\n average_nodes = False\n fdq = wntr.metrics.fdq(results_CHEM.node, quality_upper_bound, average_times, average_nodes)\n wntr.graphics.plot_network(wn, node_attribute=fdq, node_size=40,\n node_range=[0,1], title='FDQ averaged over all times')\n\ndef water_security_metrics(wn):\n # Define WQ scenario\n wn.options.quality = 'CHEMICAL'\n wn.add_pattern('SourcePattern', start_time=2*3600, end_time=15*3600)\n wn.add_source('Source1', '121', 'SETPOINT', 1000, 'SourcePattern')\n\n # Simulate hydraulics and water quality for each scenario\n sim = wntr.sim.EpanetSimulator(wn)\n results_CHEM = sim.run_sim()\n\n MC = wntr.metrics.mass_contaminant_consumed(results_CHEM.node)\n VC = wntr.metrics.volume_contaminant_consumed(results_CHEM.node, 0.001)\n EC = wntr.metrics.extent_contaminant(results_CHEM.node, results_CHEM.link, wn, 0.001)\n\n wntr.graphics.plot_network(wn, node_attribute=MC.sum(axis=0), node_range = [0,400], node_size=40,\n title='Total mass consumed')\n\n plt.figure()\n EC.sum(axis=1).plot(title='Extent of contamination')\n\n\ndef population_impacted_metrics(wn):\n # Compute population per node\n pop = wntr.metrics.population(wn)\n total_population = pop.sum()\n print(\"Total population: \" + str(total_population))\n wntr.graphics.plot_network(wn, node_attribute=pop, node_range = [0,400], node_size=40,\n title='Population, Total = ' + str(total_population))\n\n # Find population and nodes impacted by pressure less than 40 m\n sim = wntr.sim.EpanetSimulator(wn)\n results = sim.run_sim()\n junctions = [name for name, node in wn.junctions()]\n pop_impacted = wntr.metrics.population_impacted(pop, results.node['pressure',:,junctions], np.less, 40)\n plt.figure()\n pop_impacted.sum(axis=1).plot(title='Total population with pressure < 40 m')\n nodes_impacted = wntr.metrics.query(results.node['pressure',:,junctions], np.less, 40)\n wntr.graphics.plot_network(wn, node_attribute=nodes_impacted.any(axis=0), node_size=40,\n title='Nodes impacted')\n\ndef cost_ghg_metrics(wn):\n # Copute network cost\n network_cost = wntr.metrics.cost(wn)\n print(\"Network cost: $\" + str(round(network_cost,2)))\n\n # COmpute green house gas emissions\n network_ghg = wntr.metrics.ghg_emissions(wn)\n print(\"Network GHG emissions: \" + str(round(network_ghg,2)))\n\n\nif __name__ == '__main__':\n # Create a water network model\n inp_file = 'networks/Net3.inp'\n wn = wntr.network.WaterNetworkModel(inp_file)\n\n # Compute resilience metrics\n topographic_metrics(wn)\n hydraulic_metrics(wn)\n water_quality_metrics(wn)\n water_security_metrics(wn)\n population_impacted_metrics(wn)\n cost_ghg_metrics(wn)\n" ]
[ [ "numpy.max", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "numpy.min", "numpy.mean", "matplotlib.pyplot.figure", "numpy.nanmin", "numpy.arange", "matplotlib.pyplot.ylabel", "numpy.nanmax" ] ]
rochi88/dshare
[ "9dc46baff822be2ae7a7541fa10535a0299fbb5e" ]
[ "bdshare/stock/trading.py" ]
[ "import time\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom bdshare.util import vars as vs\n\n\ndef get_current_trade_data(symbol=None, retry_count=1, pause=0.001):\n \"\"\"\n get last stock price.\n :param symbol: str, Instrument symbol e.g.: 'ACI' or 'aci'\n :return: dataframecd \n \"\"\"\n\n for _ in range(retry_count):\n time.sleep(pause)\n try:\n r = requests.get(vs.DSE_URL+vs.DSE_LSP_URL)\n if r.status_code != 200:\n r = requests.get(vs.DSE_ALT_URL+vs.DSE_LSP_URL)\n except Exception as e:\n print(e)\n else:\n soup = BeautifulSoup(r.content, 'html5lib')\n quotes = [] # a list to store quotes\n table = soup.find('table', attrs={\n 'class': 'table table-bordered background-white shares-table fixedHeader'})\n\n # print(table)\n for row in table.find_all('tr')[1:]:\n cols = row.find_all('td')\n quotes.append({'symbol': cols[1].text.strip().replace(\",\", \"\"),\n 'ltp': cols[2].text.strip().replace(\",\", \"\"),\n 'high': cols[3].text.strip().replace(\",\", \"\"),\n 'low': cols[4].text.strip().replace(\",\", \"\"),\n 'close': cols[5].text.strip().replace(\",\", \"\"),\n 'ycp': cols[6].text.strip().replace(\",\", \"\"),\n 'change': cols[7].text.strip().replace(\"--\", \"0\"),\n 'trade': cols[8].text.strip().replace(\",\", \"\"),\n 'value': cols[9].text.strip().replace(\",\", \"\"),\n 'volume': cols[10].text.strip().replace(\",\", \"\")\n })\n df = pd.DataFrame(quotes)\n if symbol:\n df = df.loc[df.symbol == symbol.upper()]\n return df\n else:\n return df\n\n\ndef get_dsex_data(symbol=None, retry_count=1, pause=0.001):\n \"\"\"\n get dseX share price.\n :param symbol: str, Instrument symbol e.g.: 'ACI' or 'aci'\n :return: dataframe\n \"\"\"\n\n for _ in range(retry_count):\n time.sleep(pause)\n try:\n r = requests.get(vs.DSE_URL+vs.DSEX_INDEX_VALUE)\n if r.status_code != 200:\n r = requests.get(vs.DSE_ALT_URL+vs.DSEX_INDEX_VALUE)\n except Exception as e:\n print(e)\n else:\n soup = BeautifulSoup(r.content, 'html5lib')\n quotes = [] # a list to store quotes\n table = soup.find('table', attrs={\n 'class': 'table table-bordered background-white shares-table'})\n\n # print(table)\n for row in table.find_all('tr')[1:]:\n cols = row.find_all('td')\n quotes.append({'symbol': cols[1].text.strip().replace(\",\", \"\"),\n 'ltp': cols[2].text.strip().replace(\",\", \"\"),\n 'high': cols[3].text.strip().replace(\",\", \"\"),\n 'low': cols[4].text.strip().replace(\",\", \"\"),\n 'close': cols[5].text.strip().replace(\",\", \"\"),\n 'ycp': cols[6].text.strip().replace(\",\", \"\"),\n 'change': cols[7].text.strip().replace(\"--\", \"0\"),\n 'trade': cols[8].text.strip().replace(\",\", \"\"),\n 'value': cols[9].text.strip().replace(\",\", \"\"),\n 'volume': cols[10].text.strip().replace(\",\", \"\")\n })\n df = pd.DataFrame(quotes)\n if symbol:\n df = df.loc[df.symbol == symbol.upper()]\n return df\n else:\n return df\n\n\ndef get_current_trading_code():\n \"\"\"\n get last stock codes.\n :return: dataframe\n \"\"\"\n try:\n r = requests.get(vs.DSE_URL+vs.DSE_LSP_URL)\n if r.status_code != 200:\n r = requests.get(vs.DSE_ALT_URL+vs.DSE_LSP_URL)\n except Exception as e:\n print(e)\n #soup = BeautifulSoup(r.text, 'html.parser')\n soup = BeautifulSoup(r.content, 'html5lib')\n quotes = [] # a list to store quotes\n table = soup.find('table', attrs={\n 'class': 'table table-bordered background-white shares-table fixedHeader'})\n for row in table.find_all('tr')[1:]:\n cols = row.find_all('td')\n quotes.append({'symbol': cols[1].text.strip().replace(\",\", \"\")})\n df = pd.DataFrame(quotes)\n return df\n\n\ndef get_hist_data(start=None, end=None, code='All Instrument'):\n \"\"\"\n get historical stock price.\n :param start: str, Start date e.g.: '2020-03-01'\n :param end: str, End date e.g.: '2020-03-02'\n :param code: str, Instrument symbol e.g.: 'ACI'\n :return: dataframe\n \"\"\"\n # data to be sent to post request\n data = {'startDate': start,\n 'endDate': end,\n 'inst': code,\n 'archive': 'data'}\n try:\n r = requests.get(url=vs.DSE_URL+vs.DSE_DEA_URL, params=data)\n if r.status_code != 200:\n r = requests.get(url=vs.DSE_ALT_URL+vs.DSE_DEA_URL, params=data)\n except Exception as e:\n print(e)\n\n #soup = BeautifulSoup(r.text, 'html.parser')\n soup = BeautifulSoup(r.content, 'html5lib')\n\n quotes = [] # a list to store quotes\n\n table = soup.find('table', attrs={\n 'class': 'table table-bordered background-white shares-table fixedHeader'})\n for row in table.find_all('tr')[1:]:\n cols = row.find_all('td')\n quotes.append({'date': cols[1].text.strip().replace(\",\", \"\"),\n 'symbol': cols[2].text.strip().replace(\",\", \"\"),\n 'ltp': cols[3].text.strip().replace(\",\", \"\"),\n 'high': cols[4].text.strip().replace(\",\", \"\"),\n 'low': cols[5].text.strip().replace(\",\", \"\"),\n 'open': cols[6].text.strip().replace(\",\", \"\"),\n 'close': cols[7].text.strip().replace(\",\", \"\"),\n 'ycp': cols[8].text.strip().replace(\",\", \"\"),\n 'trade': cols[9].text.strip().replace(\",\", \"\"),\n 'value': cols[10].text.strip().replace(\",\", \"\"),\n 'volume': cols[11].text.strip().replace(\",\", \"\")\n })\n df = pd.DataFrame(quotes)\n if 'date' in df.columns:\n df = df.set_index('date')\n df = df.sort_index(ascending=False)\n else:\n print('No data found')\n return df\n\n\ndef get_basic_hist_data(start=None, end=None, code='All Instrument', index=None, retry_count=1, pause=0.001):\n \"\"\"\n get historical stock price.\n :param start: str, Start date e.g.: '2020-03-01'\n :param end: str, End date e.g.: '2020-03-02'\n :param code: str, Instrument symbol e.g.: 'ACI'\n :param retry_count : int, e.g.: 3\n :param pause : int, e.g.: 0\n :return: dataframe\n \"\"\"\n # data to be sent to post request\n data = {'startDate': start,\n 'endDate': end,\n 'inst': code,\n 'archive': 'data'}\n\n for _ in range(retry_count):\n time.sleep(pause)\n try:\n r = requests.get(url=vs.DSE_URL+vs.DSE_DEA_URL, params=data)\n if r.status_code != 200:\n r = requests.get(url=vs.DSE_ALT_URL+vs.DSE_DEA_URL, params=data)\n except Exception as e:\n print(e)\n else:\n #soup = BeautifulSoup(r.text, 'html.parser')\n soup = BeautifulSoup(r.content, 'html5lib')\n\n # columns: date, open, high, close, low, volume\n quotes = [] # a list to store quotes\n\n table = soup.find('table', attrs={\n 'class': 'table table-bordered background-white shares-table fixedHeader'})\n\n for row in table.find_all('tr')[1:]:\n cols = row.find_all('td')\n quotes.append({'date': cols[1].text.strip().replace(\",\", \"\"),\n 'open': float(cols[6].text.strip().replace(\",\", \"\")),\n 'high': float(cols[4].text.strip().replace(\",\", \"\")),\n 'low': float(cols[5].text.strip().replace(\",\", \"\")),\n 'close': float(cols[7].text.strip().replace(\",\", \"\")),\n 'volume': int(cols[11].text.strip().replace(\",\", \"\"))\n })\n df = pd.DataFrame(quotes)\n if 'date' in df.columns:\n if (index == 'date'):\n df = df.set_index('date')\n df = df.sort_index(ascending=True)\n df = df.sort_index(ascending=True)\n else:\n print('No data found')\n return df\n\n\ndef get_close_price_data(start=None, end=None, code='All Instrument'):\n \"\"\"\n get stock close price.\n :param start: str, Start date e.g.: '2020-03-01'\n :param end: str, End date e.g.: '2020-03-02'\n :param code: str, Instrument symbol e.g.: 'ACI'\n :return: dataframe\n \"\"\"\n # data to be sent to post request\n data = {'startDate': start,\n 'endDate': end,\n 'inst': code,\n 'archive': 'data'}\n try:\n r = requests.get(url=vs.DSE_URL+vs.DSE_CLOSE_PRICE_URL, params=data)\n if r.status_code != 200:\n r = requests.get(url=vs.DSE_ALT_URL+vs.DSE_CLOSE_PRICE_URL, params=data)\n except Exception as e:\n print(e)\n else:\n soup = BeautifulSoup(r.content, 'html5lib')\n\n # columns: date, open, high, close, low, volume\n quotes = [] # a list to store quotes\n\n table = soup.find(\n 'table', attrs={'class': 'table table-bordered background-white'})\n\n for row in table.find_all('tr')[1:]:\n cols = row.find_all('td')\n quotes.append({'date': cols[1].text.strip().replace(\",\", \"\"),\n 'symbol': cols[2].text.strip().replace(\",\", \"\"),\n 'close': cols[3].text.strip().replace(\",\", \"\"),\n 'ycp': cols[4].text.strip().replace(\",\", \"\")\n })\n df = pd.DataFrame(quotes)\n if 'date' in df.columns:\n df = df.set_index('date')\n df = df.sort_index(ascending=False)\n else:\n print('No data found')\n return df\n\n\ndef get_last_trade_price_data():\n df = pd.read_fwf('https://dsebd.org/datafile/quotes.txt', sep='\\t', skiprows=4)\n return df\n\n\ndef get_cse_current_trade_data(symbol=None):\n \"\"\"\n get last stock price.\n :param symbol: str, Instrument symbol e.g.: 'ACI' or 'aci'\n :return: dataframe\n \"\"\"\n try:\n r = requests.get(vs.CSE_URL+vs.CSE_LSP_URL)\n except Exception as e:\n print(e)\n soup = BeautifulSoup(r.text, 'html.parser')\n quotes = [] # a list to store quotes\n table = soup.find('table', attrs={'id': 'dataTable'})\n for row in table.find_all('tr')[1:]:\n cols = row.find_all('td')\n quotes.append({'symbol': cols[1].text.strip().replace(\",\", \"\"),\n 'ltp': cols[2].text.strip().replace(\",\", \"\"),\n 'open': cols[3].text.strip().replace(\",\", \"\"),\n 'high': cols[4].text.strip().replace(\",\", \"\"),\n 'low': cols[5].text.strip().replace(\",\", \"\"),\n 'ycp': cols[6].text.strip().replace(\",\", \"\"),\n 'trade': cols[7].text.strip().replace(\",\", \"\"),\n 'value': cols[8].text.strip().replace(\",\", \"\"),\n 'volume': cols[9].text.strip().replace(\",\", \"\")\n })\n df = pd.DataFrame(quotes)\n if symbol:\n df = df.loc[df.symbol == symbol.upper()]\n return df\n else:\n return df" ]
[ [ "pandas.DataFrame", "pandas.read_fwf" ] ]
MasterEndless/Final-year-project
[ "ee9fc9e31a3b0855668077231de12da881c09035" ]
[ "Audio model/Audio Network/test.py" ]
[ "#coding:utf8\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom config import opt\n\nfrom loader import get_loader\nfrom models import get_model\nimport numpy as np\n\ntest_loader = DataLoader(get_loader('imagenet','/home/liuhan/C3D/Mel_spectrum_resize/val'), batch_size=1, shuffle=False)\n \nmodel = get_model('ResNet', in_channels=3, img_rows=224, num_classes=7)\nmodel.cuda(0)\nmodel.eval()\nmodel.load(\"/home/liuhan/C3D/pytorch-classification-master/checkpoints/ResNet.ckpt\")\ncriterion = nn.CrossEntropyLoss().cuda()\n\ntest_loss = 0\ncorrect = 0\n\n\nall_result = np.zeros((len(test_loader.dataset),7))\nlabel_list = np.zeros(len(test_loader.dataset))\ncount = 0\nfor data, target in test_loader:\n data, target = Variable(data.cuda(0)), Variable(target.cuda(0))\n label_list[count] = np.array(target)\n output = model(data)\n probs = nn.Softmax(dim=1)(output)\n all_result[count] = np.array(probs.data)[0]\n count = count + 1\n \n test_loss += criterion(output, target).data[0]\n pred = output.data.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n\nnp.save(\"Audio.npy\",all_result)\nprint(label_list.shape)\n\n" ]
[ [ "numpy.array", "torch.nn.Softmax", "torch.nn.CrossEntropyLoss", "numpy.save" ] ]
JulianYu123456/icnn
[ "0aaf4b5cd13d71d98b0d05f367e1f71657ea6eb8" ]
[ "RL/src/helper.py" ]
[ "import tensorflow as tf\n\ndef variable_summaries(var, name=None, suffix=None):\n if name is None:\n if suffix is None:\n name = var.name\n else:\n name = '/'.join(var.name.split('/')[:-1])+'/'+suffix\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.scalar_summary('mean/' + name, mean)\n with tf.name_scope('stdev'):\n stdev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.scalar_summary('stdev/' + name, stdev)\n tf.scalar_summary('max/' + name, tf.reduce_max(var))\n tf.scalar_summary('min/' + name, tf.reduce_min(var))\n tf.histogram_summary(name, var)\n" ]
[ [ "tensorflow.reduce_min", "tensorflow.histogram_summary", "tensorflow.scalar_summary", "tensorflow.reduce_max", "tensorflow.name_scope", "tensorflow.reduce_mean", "tensorflow.square" ] ]
thangnguyenminh/MaskRCNN
[ "ae6aa0018b9fbc146319d1e99caf807e331b4c64" ]
[ "mrcnn/utils.py" ]
[ "\"\"\"\nMask R-CNN\nCommon utility functions and classes.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport sys\nimport os\nimport logging\nimport math\nimport random\nimport numpy as np\nimport tensorflow as tf\nimport scipy\nimport skimage.color\nimport skimage.io\nimport skimage.transform\ntry: #python3\n from urllib.request import urlopen\nexcept: #python2\n from urllib2 import urlopen\nimport shutil\nimport warnings\nfrom distutils.version import LooseVersion\n\n# URL from which to download the latest COCO trained weights\nCOCO_MODEL_URL = \"https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5\"\n\n\n############################################################\n# Bounding Boxes\n############################################################\n\ndef extract_bboxes(mask):\n \"\"\"Compute bounding boxes from masks.\n mask: [height, width, num_instances]. Mask pixels are either 1 or 0.\n\n Returns: bbox array [num_instances, (y1, x1, y2, x2)].\n \"\"\"\n boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)\n for i in range(mask.shape[-1]):\n m = mask[:, :, i]\n # Bounding box.\n horizontal_indicies = np.where(np.any(m, axis=0))[0]\n vertical_indicies = np.where(np.any(m, axis=1))[0]\n if horizontal_indicies.shape[0]:\n x1, x2 = horizontal_indicies[[0, -1]]\n y1, y2 = vertical_indicies[[0, -1]]\n # x2 and y2 should not be part of the box. Increment by 1.\n x2 += 1\n y2 += 1\n else:\n # No mask for this instance. Might happen due to\n # resizing or cropping. Set bbox to zeros\n x1, x2, y1, y2 = 0, 0, 0, 0\n boxes[i] = np.array([y1, x1, y2, x2])\n return boxes.astype(np.int32)\n\n\ndef compute_iou(box, boxes, box_area, boxes_area):\n \"\"\"Calculates IoU of the given box with the array of the given boxes.\n box: 1D vector [y1, x1, y2, x2]\n boxes: [boxes_count, (y1, x1, y2, x2)]\n box_area: float. the area of 'box'\n boxes_area: array of length boxes_count.\n\n Note: the areas are passed in rather than calculated here for\n efficiency. Calculate once in the caller to avoid duplicate work.\n \"\"\"\n # Calculate intersection areas\n y1 = np.maximum(box[0], boxes[:, 0])\n y2 = np.minimum(box[2], boxes[:, 2])\n x1 = np.maximum(box[1], boxes[:, 1])\n x2 = np.minimum(box[3], boxes[:, 3])\n intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)\n union = box_area + boxes_area[:] - intersection[:]\n iou = intersection / union\n return iou\n\n\ndef compute_overlaps(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n\n For better performance, pass the largest set first and the smaller second.\n \"\"\"\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps\n\n\ndef compute_overlaps_masks(masks1, masks2):\n \"\"\"Computes IoU overlaps between two sets of masks.\n masks1, masks2: [Height, Width, instances]\n \"\"\"\n \n # If either set of masks is empty return empty result\n if masks1.shape[-1] == 0 or masks2.shape[-1] == 0:\n return np.zeros((masks1.shape[-1], masks2.shape[-1]))\n # flatten masks and compute their areas\n masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32)\n masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32)\n area1 = np.sum(masks1, axis=0)\n area2 = np.sum(masks2, axis=0)\n\n # intersections and union\n intersections = np.dot(masks1.T, masks2)\n union = area1[:, None] + area2[None, :] - intersections\n overlaps = intersections / union\n\n return overlaps\n\n\ndef non_max_suppression(boxes, scores, threshold):\n \"\"\"Performs non-maximum suppression and returns indices of kept boxes.\n boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.\n scores: 1-D array of box scores.\n threshold: Float. IoU threshold to use for filtering.\n \"\"\"\n assert boxes.shape[0] > 0\n if boxes.dtype.kind != \"f\":\n boxes = boxes.astype(np.float32)\n\n # Compute box areas\n y1 = boxes[:, 0]\n x1 = boxes[:, 1]\n y2 = boxes[:, 2]\n x2 = boxes[:, 3]\n area = (y2 - y1) * (x2 - x1)\n\n # Get indicies of boxes sorted by scores (highest first)\n ixs = scores.argsort()[::-1]\n\n pick = []\n while len(ixs) > 0:\n # Pick top box and add its index to the list\n i = ixs[0]\n pick.append(i)\n # Compute IoU of the picked box with the rest\n iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])\n # Identify boxes with IoU over the threshold. This\n # returns indices into ixs[1:], so add 1 to get\n # indices into ixs.\n remove_ixs = np.where(iou > threshold)[0] + 1\n # Remove indices of the picked and overlapped boxes.\n ixs = np.delete(ixs, remove_ixs)\n ixs = np.delete(ixs, 0)\n return np.array(pick, dtype=np.int32)\n\n\ndef apply_box_deltas(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, (y1, x1, y2, x2)]. Note that (y2, x2) is outside the box.\n deltas: [N, (dy, dx, log(dh), log(dw))]\n \"\"\"\n boxes = boxes.astype(np.float32)\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= np.exp(deltas[:, 2])\n width *= np.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n return np.stack([y1, x1, y2, x2], axis=1)\n\n\ndef box_refinement_graph(box, gt_box):\n \"\"\"Compute refinement needed to transform box to gt_box.\n box and gt_box are [N, (y1, x1, y2, x2)]\n \"\"\"\n box = tf.cast(box, tf.float32)\n gt_box = tf.cast(gt_box, tf.float32)\n\n height = box[:, 2] - box[:, 0]\n width = box[:, 3] - box[:, 1]\n center_y = box[:, 0] + 0.5 * height\n center_x = box[:, 1] + 0.5 * width\n\n gt_height = gt_box[:, 2] - gt_box[:, 0]\n gt_width = gt_box[:, 3] - gt_box[:, 1]\n gt_center_y = gt_box[:, 0] + 0.5 * gt_height\n gt_center_x = gt_box[:, 1] + 0.5 * gt_width\n\n dy = (gt_center_y - center_y) / height\n dx = (gt_center_x - center_x) / width\n dh = tf.log(gt_height / height)\n dw = tf.log(gt_width / width)\n\n result = tf.stack([dy, dx, dh, dw], axis=1)\n return result\n\n\ndef box_refinement(box, gt_box):\n \"\"\"Compute refinement needed to transform box to gt_box.\n box and gt_box are [N, (y1, x1, y2, x2)]. (y2, x2) is\n assumed to be outside the box.\n \"\"\"\n box = box.astype(np.float32)\n gt_box = gt_box.astype(np.float32)\n\n height = box[:, 2] - box[:, 0]\n width = box[:, 3] - box[:, 1]\n center_y = box[:, 0] + 0.5 * height\n center_x = box[:, 1] + 0.5 * width\n\n gt_height = gt_box[:, 2] - gt_box[:, 0]\n gt_width = gt_box[:, 3] - gt_box[:, 1]\n gt_center_y = gt_box[:, 0] + 0.5 * gt_height\n gt_center_x = gt_box[:, 1] + 0.5 * gt_width\n\n dy = (gt_center_y - center_y) / height\n dx = (gt_center_x - center_x) / width\n dh = np.log(gt_height / height)\n dw = np.log(gt_width / width)\n\n return np.stack([dy, dx, dh, dw], axis=1)\n\n\n############################################################\n# Dataset\n############################################################\n\nclass Dataset(object):\n \"\"\"The base class for dataset classes.\n To use it, create a new class that adds functions specific to the dataset\n you want to use. For example:\n\n class CatsAndDogsDataset(Dataset):\n def load_cats_and_dogs(self):\n ...\n def load_mask(self, image_id):\n ...\n def image_reference(self, image_id):\n ...\n\n See COCODataset and ShapesDataset as examples.\n \"\"\"\n\n def __init__(self, class_map=None):\n self._image_ids = []\n self.image_info = []\n # Background is always the first class\n self.class_info = [{\"source\": \"\", \"id\": 0, \"name\": \"BG\"}]\n self.source_class_ids = {}\n\n def add_class(self, source, class_id, class_name):\n assert \".\" not in source, \"Source name cannot contain a dot\"\n # Does the class exist already?\n for info in self.class_info:\n if info['source'] == source and info[\"id\"] == class_id:\n # source.class_id combination already available, skip\n return\n # Add the class\n self.class_info.append({\n \"source\": source,\n \"id\": class_id,\n \"name\": class_name,\n })\n\n def add_image(self, source, image_id, path, **kwargs):\n image_info = {\n \"id\": image_id,\n \"source\": source,\n \"path\": path,\n }\n image_info.update(kwargs)\n self.image_info.append(image_info)\n\n def image_reference(self, image_id):\n \"\"\"Return a link to the image in its source Website or details about\n the image that help looking it up or debugging it.\n\n Override for your dataset, but pass to this function\n if you encounter images not in your dataset.\n \"\"\"\n return \"\"\n\n def prepare(self, class_map=None):\n \"\"\"Prepares the Dataset class for use.\n\n TODO: class map is not supported yet. When done, it should handle mapping\n classes from different datasets to the same class ID.\n \"\"\"\n\n def clean_name(name):\n \"\"\"Returns a shorter version of object names for cleaner display.\"\"\"\n return \",\".join(name.split(\",\")[:1])\n\n # Build (or rebuild) everything else from the info dicts.\n self.num_classes = len(self.class_info)\n self.class_ids = np.arange(self.num_classes)\n self.class_names = [clean_name(c[\"name\"]) for c in self.class_info]\n self.num_images = len(self.image_info)\n self._image_ids = np.arange(self.num_images)\n\n # Mapping from source class and image IDs to internal IDs\n self.class_from_source_map = {\"{}.{}\".format(info['source'], info['id']): id\n for info, id in zip(self.class_info, self.class_ids)}\n self.image_from_source_map = {\"{}.{}\".format(info['source'], info['id']): id\n for info, id in zip(self.image_info, self.image_ids)}\n\n # Map sources to class_ids they support\n self.sources = list(set([i['source'] for i in self.class_info]))\n self.source_class_ids = {}\n # Loop over datasets\n for source in self.sources:\n self.source_class_ids[source] = []\n # Find classes that belong to this dataset\n for i, info in enumerate(self.class_info):\n # Include BG class in all datasets\n if i == 0 or source == info['source']:\n self.source_class_ids[source].append(i)\n\n def map_source_class_id(self, source_class_id):\n \"\"\"Takes a source class ID and returns the int class ID assigned to it.\n\n For example:\n dataset.map_source_class_id(\"coco.12\") -> 23\n \"\"\"\n return self.class_from_source_map[source_class_id]\n\n def get_source_class_id(self, class_id, source):\n \"\"\"Map an internal class ID to the corresponding class ID in the source dataset.\"\"\"\n info = self.class_info[class_id]\n assert info['source'] == source\n return info['id']\n\n @property\n def image_ids(self):\n return self._image_ids\n\n def source_image_link(self, image_id):\n \"\"\"Returns the path or URL to the image.\n Override this to return a URL to the image if it's available online for easy\n debugging.\n \"\"\"\n return self.image_info[image_id][\"path\"]\n\n def load_image(self, image_id):\n \"\"\"Load the specified image and return a [H,W,3] Numpy array.\n \"\"\"\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image\n\n def load_mask(self, image_id):\n \"\"\"Load instance masks for the given image.\n\n Different datasets use different ways to store masks. Override this\n method to load instance masks and return them in the form of am\n array of binary masks of shape [height, width, instances].\n\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n a binary mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n # Override this function to load a mask from your dataset.\n # Otherwise, it returns an empty mask.\n logging.warning(\"You are using the default load_mask(), maybe you need to define your own one.\")\n mask = np.empty([0, 0, 0])\n class_ids = np.empty([0], np.int32)\n return mask, class_ids\n\n\ndef resize_image(image, min_dim=None, max_dim=None, min_scale=None, mode=\"square\"):\n \"\"\"Resizes an image keeping the aspect ratio unchanged.\n\n min_dim: if provided, resizes the image such that it's smaller\n dimension == min_dim\n max_dim: if provided, ensures that the image longest side doesn't\n exceed this value.\n min_scale: if provided, ensure that the image is scaled up by at least\n this percent even if min_dim doesn't require it.\n mode: Resizing mode.\n none: No resizing. Return the image unchanged.\n square: Resize and pad with zeros to get a square image\n of size [max_dim, max_dim].\n pad64: Pads width and height with zeros to make them multiples of 64.\n If min_dim or min_scale are provided, it scales the image up\n before padding. max_dim is ignored in this mode.\n The multiple of 64 is needed to ensure smooth scaling of feature\n maps up and down the 6 levels of the FPN pyramid (2**6=64).\n crop: Picks random crops from the image. First, scales the image based\n on min_dim and min_scale, then picks a random crop of\n size min_dim x min_dim. Can be used in training only.\n max_dim is not used in this mode.\n\n Returns:\n image: the resized image\n window: (y1, x1, y2, x2). If max_dim is provided, padding might\n be inserted in the returned image. If so, this window is the\n coordinates of the image part of the full image (excluding\n the padding). The x2, y2 pixels are not included.\n scale: The scale factor used to resize the image\n padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]\n \"\"\"\n # Keep track of image dtype and return results in the same dtype\n image_dtype = image.dtype\n # Default window (y1, x1, y2, x2) and default scale == 1.\n h, w = image.shape[:2]\n window = (0, 0, h, w)\n scale = 1\n padding = [(0, 0), (0, 0), (0, 0)]\n crop = None\n\n if mode == \"none\":\n return image, window, scale, padding, crop\n\n # Scale?\n if min_dim:\n # Scale up but not down\n scale = max(1, min_dim / min(h, w))\n if min_scale and scale < min_scale:\n scale = min_scale\n\n # Does it exceed max dim?\n if max_dim and mode == \"square\":\n image_max = max(h, w)\n if round(image_max * scale) > max_dim:\n scale = max_dim / image_max\n\n # Resize image using bilinear interpolation\n if scale != 1:\n image = resize(image, (round(h * scale), round(w * scale)),\n preserve_range=True)\n\n # Need padding or cropping?\n if mode == \"square\":\n # Get new height and width\n h, w = image.shape[:2]\n top_pad = (max_dim - h) // 2\n bottom_pad = max_dim - h - top_pad\n left_pad = (max_dim - w) // 2\n right_pad = max_dim - w - left_pad\n padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\n image = np.pad(image, padding, mode='constant', constant_values=0)\n window = (top_pad, left_pad, h + top_pad, w + left_pad)\n elif mode == \"pad64\":\n h, w = image.shape[:2]\n # Both sides must be divisible by 64\n assert min_dim % 64 == 0, \"Minimum dimension must be a multiple of 64\"\n # Height\n if h % 64 > 0:\n max_h = h - (h % 64) + 64\n top_pad = (max_h - h) // 2\n bottom_pad = max_h - h - top_pad\n else:\n top_pad = bottom_pad = 0\n # Width\n if w % 64 > 0:\n max_w = w - (w % 64) + 64\n left_pad = (max_w - w) // 2\n right_pad = max_w - w - left_pad\n else:\n left_pad = right_pad = 0\n padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\n image = np.pad(image, padding, mode='constant', constant_values=0)\n window = (top_pad, left_pad, h + top_pad, w + left_pad)\n elif mode == \"crop\":\n # Pick a random crop\n h, w = image.shape[:2]\n y = random.randint(0, (h - min_dim))\n x = random.randint(0, (w - min_dim))\n crop = (y, x, min_dim, min_dim)\n image = image[y:y + min_dim, x:x + min_dim]\n window = (0, 0, min_dim, min_dim)\n else:\n raise Exception(\"Mode {} not supported\".format(mode))\n return image.astype(image_dtype), window, scale, padding, crop\n\n\ndef resize_mask(mask, scale, padding, crop=None):\n \"\"\"Resizes a mask using the given scale and padding.\n Typically, you get the scale and padding from resize_image() to\n ensure both, the image and the mask, are resized consistently.\n\n scale: mask scaling factor\n padding: Padding to add to the mask in the form\n [(top, bottom), (left, right), (0, 0)]\n \"\"\"\n # Suppress warning from scipy 0.13.0, the output shape of zoom() is\n # calculated with round() instead of int()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1], order=0)\n if crop is not None:\n y, x, h, w = crop\n mask = mask[y:y + h, x:x + w]\n else:\n mask = np.pad(mask, padding, mode='constant', constant_values=0)\n return mask\n\n\ndef minimize_mask(bbox, mask, mini_shape):\n \"\"\"Resize masks to a smaller version to reduce memory load.\n Mini-masks can be resized back to image scale using expand_masks()\n\n See inspect_data.ipynb notebook for more details.\n \"\"\"\n mini_mask = np.zeros(mini_shape + (mask.shape[-1],), dtype=bool)\n for i in range(mask.shape[-1]):\n # Pick slice and cast to bool in case load_mask() returned wrong dtype\n m = mask[:, :, i].astype(bool)\n y1, x1, y2, x2 = bbox[i][:4]\n m = m[y1:y2, x1:x2]\n if m.size == 0:\n raise Exception(\"Invalid bounding box with area of zero\")\n # Resize with bilinear interpolation\n m = resize(m, mini_shape)\n mini_mask[:, :, i] = np.around(m).astype(np.bool)\n return mini_mask\n\n\ndef expand_mask(bbox, mini_mask, image_shape):\n \"\"\"Resizes mini masks back to image size. Reverses the change\n of minimize_mask().\n\n See inspect_data.ipynb notebook for more details.\n \"\"\"\n mask = np.zeros(image_shape[:2] + (mini_mask.shape[-1],), dtype=bool)\n for i in range(mask.shape[-1]):\n m = mini_mask[:, :, i]\n y1, x1, y2, x2 = bbox[i][:4]\n h = y2 - y1\n w = x2 - x1\n # Resize with bilinear interpolation\n m = resize(m, (h, w))\n mask[y1:y2, x1:x2, i] = np.around(m).astype(np.bool)\n return mask\n\n\n# TODO: Build and use this function to reduce code duplication\ndef mold_mask(mask, config):\n pass\n\n\ndef unmold_mask(mask, bbox, image_shape):\n \"\"\"Converts a mask generated by the neural network to a format similar\n to its original shape.\n mask: [height, width] of type float. A small, typically 28x28 mask.\n bbox: [y1, x1, y2, x2]. The box to fit the mask in.\n\n Returns a binary mask with the same size as the original image.\n \"\"\"\n threshold = 0.5\n y1, x1, y2, x2 = bbox\n mask = resize(mask, (y2 - y1, x2 - x1))\n mask = np.where(mask >= threshold, 1, 0).astype(np.bool)\n\n # Put the mask in the right location.\n full_mask = np.zeros(image_shape[:2], dtype=np.bool)\n full_mask[y1:y2, x1:x2] = mask\n return full_mask\n\n\n############################################################\n# Anchors\n############################################################\n\ndef generate_anchors(scales, ratios, shape, feature_stride, anchor_stride):\n \"\"\"\n scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]\n ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]\n shape: [height, width] spatial shape of the feature map over which\n to generate anchors.\n feature_stride: Stride of the feature map relative to the image in pixels.\n anchor_stride: Stride of anchors on the feature map. For example, if the\n value is 2 then generate anchors for every other feature map pixel.\n \"\"\"\n # Get all combinations of scales and ratios\n scales, ratios = np.meshgrid(np.array(scales), np.array(ratios))\n scales = scales.flatten()\n ratios = ratios.flatten()\n\n # Enumerate heights and widths from scales and ratios\n heights = scales / np.sqrt(ratios)\n widths = scales * np.sqrt(ratios)\n\n # Enumerate shifts in feature space\n shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride\n shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride\n shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y)\n\n # Enumerate combinations of shifts, widths, and heights\n box_widths, box_centers_x = np.meshgrid(widths, shifts_x)\n box_heights, box_centers_y = np.meshgrid(heights, shifts_y)\n\n # Reshape to get a list of (y, x) and a list of (h, w)\n box_centers = np.stack(\n [box_centers_y, box_centers_x], axis=2).reshape([-1, 2])\n box_sizes = np.stack([box_heights, box_widths], axis=2).reshape([-1, 2])\n\n # Convert to corner coordinates (y1, x1, y2, x2)\n boxes = np.concatenate([box_centers - 0.5 * box_sizes,\n box_centers + 0.5 * box_sizes], axis=1)\n return boxes\n\n\ndef generate_pyramid_anchors(scales, ratios, feature_shapes, feature_strides,\n anchor_stride):\n \"\"\"Generate anchors at different levels of a feature pyramid. Each scale\n is associated with a level of the pyramid, but each ratio is used in\n all levels of the pyramid.\n\n Returns:\n anchors: [N, (y1, x1, y2, x2)]. All generated anchors in one array. Sorted\n with the same order of the given scales. So, anchors of scale[0] come\n first, then anchors of scale[1], and so on.\n \"\"\"\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n anchors = []\n for i in range(len(scales)):\n anchors.append(generate_anchors(scales[i], ratios, feature_shapes[i],\n feature_strides[i], anchor_stride))\n return np.concatenate(anchors, axis=0)\n\n\n############################################################\n# Miscellaneous\n############################################################\n\ndef trim_zeros(x):\n \"\"\"It's common to have tensors larger than the available data and\n pad with zeros. This function removes rows that are all zeros.\n\n x: [rows, columns].\n \"\"\"\n assert len(x.shape) == 2\n return x[~np.all(x == 0, axis=1)]\n\n\ndef compute_matches(gt_boxes, gt_class_ids, gt_masks,\n pred_boxes, pred_class_ids, pred_scores, pred_masks,\n iou_threshold=0.5, score_threshold=0.0):\n \"\"\"Finds matches between prediction and ground truth instances.\n\n Returns:\n gt_match: 1-D array. For each GT box it has the index of the matched\n predicted box.\n pred_match: 1-D array. For each predicted box, it has the index of\n the matched ground truth box.\n overlaps: [pred_boxes, gt_boxes] IoU overlaps.\n \"\"\"\n # Trim zero padding\n # TODO: cleaner to do zero unpadding upstream\n gt_boxes = trim_zeros(gt_boxes)\n gt_masks = gt_masks[..., :gt_boxes.shape[0]]\n pred_boxes = trim_zeros(pred_boxes)\n pred_scores = pred_scores[:pred_boxes.shape[0]]\n # Sort predictions by score from high to low\n indices = np.argsort(pred_scores)[::-1]\n pred_boxes = pred_boxes[indices]\n pred_class_ids = pred_class_ids[indices]\n pred_scores = pred_scores[indices]\n pred_masks = pred_masks[..., indices]\n\n # Compute IoU overlaps [pred_masks, gt_masks]\n overlaps = compute_overlaps_masks(pred_masks, gt_masks)\n\n # Loop through predictions and find matching ground truth boxes\n match_count = 0\n pred_match = -1 * np.ones([pred_boxes.shape[0]])\n gt_match = -1 * np.ones([gt_boxes.shape[0]])\n for i in range(len(pred_boxes)):\n # Find best matching ground truth box\n # 1. Sort matches by score\n sorted_ixs = np.argsort(overlaps[i])[::-1]\n # 2. Remove low scores\n low_score_idx = np.where(overlaps[i, sorted_ixs] < score_threshold)[0]\n if low_score_idx.size > 0:\n sorted_ixs = sorted_ixs[:low_score_idx[0]]\n # 3. Find the match\n for j in sorted_ixs:\n # If ground truth box is already matched, go to next one\n if gt_match[j] > -1:\n continue\n # If we reach IoU smaller than the threshold, end the loop\n iou = overlaps[i, j]\n if iou < iou_threshold:\n break\n # Do we have a match?\n if pred_class_ids[i] == gt_class_ids[j]:\n match_count += 1\n gt_match[j] = i\n pred_match[i] = j\n break\n\n return gt_match, pred_match, overlaps\n\n\ndef compute_ap(gt_boxes, gt_class_ids, gt_masks,\n pred_boxes, pred_class_ids, pred_scores, pred_masks,\n iou_threshold=0.5):\n \"\"\"Compute Average Precision at a set IoU threshold (default 0.5).\n\n Returns:\n mAP: Mean Average Precision\n precisions: List of precisions at different class score thresholds.\n recalls: List of recall values at different class score thresholds.\n overlaps: [pred_boxes, gt_boxes] IoU overlaps.\n \"\"\"\n # Get matches and overlaps\n gt_match, pred_match, overlaps = compute_matches(\n gt_boxes, gt_class_ids, gt_masks,\n pred_boxes, pred_class_ids, pred_scores, pred_masks,\n iou_threshold)\n\n # Compute precision and recall at each prediction box step\n precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1)\n recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match)\n\n # Pad with start and end values to simplify the math\n precisions = np.concatenate([[0], precisions, [0]])\n recalls = np.concatenate([[0], recalls, [1]])\n\n # Ensure precision values decrease but don't increase. This way, the\n # precision value at each recall threshold is the maximum it can be\n # for all following recall thresholds, as specified by the VOC paper.\n for i in range(len(precisions) - 2, -1, -1):\n precisions[i] = np.maximum(precisions[i], precisions[i + 1])\n\n # Compute mean AP over recall range\n indices = np.where(recalls[:-1] != recalls[1:])[0] + 1\n mAP = np.sum((recalls[indices] - recalls[indices - 1]) *\n precisions[indices])\n\n return mAP, precisions, recalls, overlaps\n\n\ndef compute_ap_range(gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n iou_thresholds=None, verbose=1):\n \"\"\"Compute AP over a range or IoU thresholds. Default range is 0.5-0.95.\"\"\"\n # Default is 0.5 to 0.95 with increments of 0.05\n iou_thresholds = iou_thresholds or np.arange(0.5, 1.0, 0.05)\n \n # Compute AP over range of IoU thresholds\n AP = []\n for iou_threshold in iou_thresholds:\n ap, precisions, recalls, overlaps =\\\n compute_ap(gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n iou_threshold=iou_threshold)\n if verbose:\n print(\"AP @{:.2f}:\\t {:.3f}\".format(iou_threshold, ap))\n AP.append(ap)\n AP = np.array(AP).mean()\n if verbose:\n print(\"AP @{:.2f}-{:.2f}:\\t {:.3f}\".format(\n iou_thresholds[0], iou_thresholds[-1], AP))\n return AP\n\n\ndef compute_recall(pred_boxes, gt_boxes, iou):\n \"\"\"Compute the recall at the given IoU threshold. It's an indication\n of how many GT boxes were found by the given prediction boxes.\n\n pred_boxes: [N, (y1, x1, y2, x2)] in image coordinates\n gt_boxes: [N, (y1, x1, y2, x2)] in image coordinates\n \"\"\"\n # Measure overlaps\n overlaps = compute_overlaps(pred_boxes, gt_boxes)\n iou_max = np.max(overlaps, axis=1)\n iou_argmax = np.argmax(overlaps, axis=1)\n positive_ids = np.where(iou_max >= iou)[0]\n matched_gt_boxes = iou_argmax[positive_ids]\n\n recall = len(set(matched_gt_boxes)) / gt_boxes.shape[0]\n return recall, positive_ids\n\n\n# ## Batch Slicing\n# Some custom layers support a batch size of 1 only, and require a lot of work\n# to support batches greater than 1. This function slices an input tensor\n# across the batch dimension and feeds batches of size 1. Effectively,\n# an easy way to support batches > 1 quickly with little code modification.\n# In the long run, it's more efficient to modify the code to support large\n# batches and getting rid of this function. Consider this a temporary solution\ndef batch_slice(inputs, graph_fn, batch_size, names=None):\n \"\"\"Splits inputs into slices and feeds each slice to a copy of the given\n computation graph and then combines the results. It allows you to run a\n graph on a batch of inputs even if the graph is written to support one\n instance only.\n\n inputs: list of tensors. All must have the same first dimension length\n graph_fn: A function that returns a TF tensor that's part of a graph.\n batch_size: number of slices to divide the data into.\n names: If provided, assigns names to the resulting tensors.\n \"\"\"\n if not isinstance(inputs, list):\n inputs = [inputs]\n\n outputs = []\n for i in range(batch_size):\n inputs_slice = [x[i] for x in inputs]\n output_slice = graph_fn(*inputs_slice)\n if not isinstance(output_slice, (tuple, list)):\n output_slice = [output_slice]\n outputs.append(output_slice)\n # Change outputs from a list of slices where each is\n # a list of outputs to a list of outputs and each has\n # a list of slices\n outputs = list(zip(*outputs))\n\n if names is None:\n names = [None] * len(outputs)\n\n result = [tf.stack(o, axis=0, name=n)\n for o, n in zip(outputs, names)]\n if len(result) == 1:\n result = result[0]\n\n return result\n\n\ndef download_trained_weights(coco_model_path, verbose=1):\n \"\"\"Download COCO trained weights from Releases.\n\n coco_model_path: local path of COCO trained weights\n \"\"\"\n if verbose > 0:\n print(\"Downloading pretrained model to \" + coco_model_path + \" ...\")\n with urllib.request.urlopen(COCO_MODEL_URL) as resp, open(coco_model_path, 'wb') as out:\n shutil.copyfileobj(resp, out)\n if verbose > 0:\n print(\"... done downloading pretrained model!\")\n\n\ndef norm_boxes(boxes, shape):\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\n boxes: [N, (y1, x1, y2, x2)] in pixel coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [N, (y1, x1, y2, x2)] in normalized coordinates\n \"\"\"\n h, w = shape\n scale = np.array([h - 1, w - 1, h - 1, w - 1])\n shift = np.array([0, 0, 1, 1])\n return np.divide((boxes - shift), scale).astype(np.float32)\n\n\ndef denorm_boxes(boxes, shape):\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\n boxes: [N, (y1, x1, y2, x2)] in normalized coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [N, (y1, x1, y2, x2)] in pixel coordinates\n \"\"\"\n h, w = shape\n scale = np.array([h - 1, w - 1, h - 1, w - 1])\n shift = np.array([0, 0, 1, 1])\n return np.around(np.multiply(boxes, scale) + shift).astype(np.int32)\n\n\ndef resize(image, output_shape, order=1, mode='constant', cval=0, clip=True,\n preserve_range=False, anti_aliasing=False, anti_aliasing_sigma=None):\n \"\"\"A wrapper for Scikit-Image resize().\n\n Scikit-Image generates warnings on every call to resize() if it doesn't\n receive the right parameters. The right parameters depend on the version\n of skimage. This solves the problem by using different parameters per\n version. And it provides a central place to control resizing defaults.\n \"\"\"\n if LooseVersion(skimage.__version__) >= LooseVersion(\"0.14\"):\n # New in 0.14: anti_aliasing. Default it to False for backward\n # compatibility with skimage 0.13.\n return skimage.transform.resize(\n image, output_shape,\n order=order, mode=mode, cval=cval, clip=clip,\n preserve_range=preserve_range, anti_aliasing=anti_aliasing,\n anti_aliasing_sigma=anti_aliasing_sigma)\n else:\n return skimage.transform.resize(\n image, output_shape,\n order=order, mode=mode, cval=cval, clip=clip,\n preserve_range=preserve_range)\n" ]
[ [ "numpy.dot", "numpy.minimum", "numpy.exp", "numpy.multiply", "numpy.where", "tensorflow.stack", "numpy.cumsum", "tensorflow.cast", "numpy.concatenate", "numpy.max", "numpy.divide", "numpy.empty", "numpy.log", "numpy.argmax", "numpy.arange", "numpy.sqrt", "numpy.around", "numpy.array", "numpy.delete", "numpy.pad", "numpy.reshape", "numpy.zeros", "tensorflow.log", "numpy.stack", "numpy.argsort", "scipy.ndimage.zoom", "numpy.sum", "numpy.ones", "numpy.any", "numpy.all", "numpy.meshgrid", "numpy.maximum" ] ]
BywinTec/OpenKS
[ "379732a9a4a418c5960cd5c47391099147a15ca5" ]
[ "openks/models/model.py" ]
[ "# Copyright (c) 2021 OpenKS Authors, DCD Research Lab, Zhejiang University. \n# All Rights Reserved.\n\n\"\"\"\nAn abstract class for openks models to be trained with Paddle\n\"\"\"\nimport logging\nfrom typing import Tuple, List, Any\nimport torch\nimport torch.nn as nn\nfrom torch.utils import data\nimport paddle.fluid as fluid\nfrom paddle.fluid import Variable\nfrom ..common.register import Register\nfrom ..abstract.mtg import MTG\nfrom ..abstract.mmd import MMD\n\nlogger = logging.getLogger(__name__)\n\n\nclass PaddleModel(Register):\n\tdef __init__(self, **kwargs):\n\t\tself.forward()\n\n\tdef forward(self, *args):\n\t\treturn NotImplemented\n\n\tdef train_forward(self, *args):\n\t\treturn NotImplemented\n\n\tdef test_forward(self, *args):\n\t\treturn NotImplemented\n\n\tdef backward(self, loss, opt):\n\t\treturn NotImplemented\n\n\tdef loss(self, *args):\n\t\treturn NotImplemented\n\n\t@staticmethod\n\tdef _algorithm(*args):\n\t\treturn NotImplemented\n\n\n\nclass TorchModel(nn.Module, Register):\n\tdef __init__(self, **kwargs):\n\t\tsuper(TorchModel, self).__init__()\n\n\tdef forward(self, *args):\n\t\treturn NotImplemented\n\n\tdef loss(self, *args):\n\t\treturn NotImplemented\n\n\tdef predict(self, *args):\n\t\treturn NotImplemented\n\n\tdef _algorithm(self, *args):\n\t\treturn NotImplemented\n\n\t# getter and setter for Ray distributed training\n\tdef get_weights(self):\n\t\treturn {k: v.cpu() for k, v in self.state_dict().items()}\n\n\tdef set_weights(self, weights):\n\t\tself.load_state_dict(weights)\n\n\tdef get_gradients(self):\n\t\tgrads = []\n\t\tfor p in self.parameters():\n\t\t\tgrad = None if p.grad is None else p.grad.data.cpu().numpy()\n\t\t\tgrads.append(grad)\n\t\treturn grads\n\n\tdef set_gradients(self, gradients):\n\t\tfor g, p in zip(gradients, self.parameters()):\n\t\t\tif g is not None:\n\t\t\t\tp.grad = torch.from_numpy(g)\n\n\nclass TorchDataset(data.Dataset):\n\tdef __init__(self, samples):\n\t\tself.samples = samples\n\n\tdef __len__(self):\n\t\treturn len(self.samples)\n\n\tdef __getitem__(self, index):\n\t\titem = self.samples[index]\n\t\treturn item\n\n\nclass TFModel(Register):\n\tdef __init__(self, **kwargs):\n\t\treturn NotImplemented\n\n\nclass MLModel(Register):\n\tdef __init__(self, **kwargs):\n\t\tself.process()\n\n\tdef process(self, *args):\n\t\treturn NotImplemented\n\n\nclass OpenKSModel(Register):\n\tdef __init__(self):\n\t\tpass\n\n\nclass KGLearnModel(OpenKSModel):\n\t''' Base class for knowledge graph representation learning trainer '''\n\tdef __init__(self, name: str = 'model-name', graph: MTG = None, args: List = None):\n\t\tself.name = name\n\t\tself.graph = graph\n\n\tdef parse_args(self):\n\t\treturn NotImplemented\n\n\tdef triples_reader(self, *args):\n\t\treturn NotImplemented\n\n\tdef triples_generator(self, *args):\n\t\treturn NotImplemented\n\n\tdef evaluate(self, *args):\n\t\treturn NotImplemented\n\n\tdef load_model(self, *args):\n\t\treturn NotImplemented\n\n\tdef save_model(self, *args):\n\t\treturn NotImplemented\n\n\tdef run(self, *args):\n\t\treturn NotImplemented\n\n\nclass KELearnModel(OpenKSModel):\n\t''' Base class for knowledge graph building trainer, such as text and image information extraction '''\n\tdef __init__(self, name: str = 'model-name', dataset: MMD = None, args: List = None):\n\t\tself.name = name\n\t\tself.dataset = dataset\n\n\tdef parse_args(self):\n\t\treturn NotImplemented\n\n\tdef data_reader(self, *args):\n\t\treturn NotImplemented\n\n\tdef evaluate(self, *args):\n\t\treturn NotImplemented\n\n\tdef load_model(self, *args):\n\t\treturn NotImplemented\n\n\tdef save_model(self, *args):\n\t\treturn NotImplemented\n\n\tdef run(self, *args):\n\t\treturn NotImplemented\n\n\nclass RecModel(OpenKSModel):\n\t''' Base class for recommendation trainer, such as text and image information extraction '''\n\tdef __init__(self, name: str = 'model-name', dataset: MMD = None, args: List = None):\n\t\tself.name = name\n\t\tself.dataset = dataset\n\n\tdef parse_args(self):\n\t\treturn NotImplemented\n\n\tdef data_reader(self, *args):\n\t\treturn NotImplemented\n\n\tdef evaluate(self, *args):\n\t\treturn NotImplemented\n\n\tdef load_model(self, *args):\n\t\treturn NotImplemented\n\n\tdef save_model(self, *args):\n\t\treturn NotImplemented\n\n\tdef run(self, *args):\n\t\treturn NotImplemented\n" ]
[ [ "torch.from_numpy" ] ]
yihui-he/KL-Loss
[ "962a687c7caca56b3b8562b437a8370077a59074" ]
[ "detectron/modeling/retinanet_heads.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\n\"\"\"RetinaNet model heads and losses. See: https://arxiv.org/abs/1708.02002.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\n\nfrom detectron.core.config import cfg\nimport detectron.utils.blob as blob_utils\n\nfrom detectron.utils.c2 import const_fill\nfrom detectron.utils.c2 import gauss_fill\nimport detectron.utils.c2 as c2_utils\nfrom caffe2.python import workspace, core\n\ndef get_retinanet_bias_init(model):\n \"\"\"Initialize the biases for the conv ops that predict class probabilities.\n Initialization is performed such that at the start of training, all\n locations are predicted to be background with high probability\n (e.g., ~0.99 = 1 - cfg.RETINANET.PRIOR_PROB). See the Focal Loss paper for\n details.\n \"\"\"\n prior_prob = cfg.RETINANET.PRIOR_PROB\n scales_per_octave = cfg.RETINANET.SCALES_PER_OCTAVE\n aspect_ratios = len(cfg.RETINANET.ASPECT_RATIOS)\n if cfg.RETINANET.SOFTMAX:\n # Multiclass softmax case\n bias = np.zeros((model.num_classes, 1), dtype=np.float32)\n bias[0] = np.log(\n (model.num_classes - 1) * (1 - prior_prob) / (prior_prob)\n )\n bias = np.vstack(\n [bias for _ in range(scales_per_octave * aspect_ratios)]\n )\n bias_init = (\n 'GivenTensorFill', {\n 'values': bias.astype(dtype=np.float32)\n }\n )\n else:\n # Per-class sigmoid (binary classification) case\n bias_init = (\n 'ConstantFill', {\n 'value': -np.log((1 - prior_prob) / prior_prob)\n }\n )\n return bias_init\n\n\ndef add_fpn_retinanet_outputs(model, blobs_in, dim_in, spatial_scales):\n \"\"\"RetinaNet head. For classification and box regression, we can chose to\n have the same conv tower or a separate tower. \"bl_feat_list\" stores the list\n of feature blobs for bbox prediction. These blobs can be shared cls feature\n blobs if we share the tower or else are independent blobs.\n \"\"\"\n dim_out = dim_in\n k_max = cfg.FPN.RPN_MAX_LEVEL # coarsest level of pyramid\n k_min = cfg.FPN.RPN_MIN_LEVEL # finest level of pyramid\n A = len(cfg.RETINANET.ASPECT_RATIOS) * cfg.RETINANET.SCALES_PER_OCTAVE\n\n # compute init for bias\n bias_init = get_retinanet_bias_init(model)\n\n assert len(blobs_in) == k_max - k_min + 1\n bbox_feat_list = []\n cls_pred_dim = (\n model.num_classes if cfg.RETINANET.SOFTMAX else (model.num_classes - 1)\n )\n # unpacked bbox feature and add prediction layers\n bbox_regr_dim = (\n 4 * (model.num_classes - 1) if cfg.RETINANET.CLASS_SPECIFIC_BBOX else 4\n )\n\n # ==========================================================================\n # classification tower with logits and prob prediction\n # ==========================================================================\n for lvl in range(k_min, k_max + 1):\n bl_in = blobs_in[k_max - lvl] # blobs_in is in reversed order\n # classification tower stack convolution starts\n for nconv in range(cfg.RETINANET.NUM_CONVS):\n suffix = 'n{}_fpn{}'.format(nconv, lvl)\n dim_in, dim_out = dim_in, dim_in\n if lvl == k_min:\n bl_out = model.Conv(\n bl_in,\n 'retnet_cls_conv_' + suffix,\n dim_in,\n dim_out,\n 3,\n stride=1,\n pad=1,\n weight_init=('GaussianFill', {\n 'std': 0.01\n }),\n bias_init=('ConstantFill', {\n 'value': 0.\n })\n )\n else:\n bl_out = model.ConvShared(\n bl_in,\n 'retnet_cls_conv_' + suffix,\n dim_in,\n dim_out,\n 3,\n stride=1,\n pad=1,\n weight='retnet_cls_conv_n{}_fpn{}_w'.format(nconv, k_min),\n bias='retnet_cls_conv_n{}_fpn{}_b'.format(nconv, k_min)\n )\n bl_in = model.Relu(bl_out, bl_out)\n bl_feat = bl_in\n # cls tower stack convolution ends. Add the logits layer now\n if lvl == k_min:\n retnet_cls_pred = model.Conv(\n bl_feat,\n 'retnet_cls_pred_fpn{}'.format(lvl),\n dim_in,\n cls_pred_dim * A,\n 3,\n pad=1,\n stride=1,\n weight_init=('GaussianFill', {\n 'std': 0.01\n }),\n bias_init=bias_init\n )\n else:\n retnet_cls_pred = model.ConvShared(\n bl_feat,\n 'retnet_cls_pred_fpn{}'.format(lvl),\n dim_in,\n cls_pred_dim * A,\n 3,\n pad=1,\n stride=1,\n weight='retnet_cls_pred_fpn{}_w'.format(k_min),\n bias='retnet_cls_pred_fpn{}_b'.format(k_min)\n )\n if not model.train:\n if cfg.RETINANET.SOFTMAX:\n model.net.GroupSpatialSoftmax(\n retnet_cls_pred,\n 'retnet_cls_prob_fpn{}'.format(lvl),\n num_classes=cls_pred_dim\n )\n else:\n model.net.Sigmoid(\n retnet_cls_pred, 'retnet_cls_prob_fpn{}'.format(lvl)\n )\n if cfg.RETINANET.SHARE_CLS_BBOX_TOWER:\n bbox_feat_list.append(bl_feat)\n\n # ==========================================================================\n # bbox tower if not sharing features with the classification tower with\n # logits and prob prediction\n # ==========================================================================\n if not cfg.RETINANET.SHARE_CLS_BBOX_TOWER:\n for lvl in range(k_min, k_max + 1):\n bl_in = blobs_in[k_max - lvl] # blobs_in is in reversed order\n for nconv in range(cfg.RETINANET.NUM_CONVS):\n suffix = 'n{}_fpn{}'.format(nconv, lvl)\n dim_in, dim_out = dim_in, dim_in\n if lvl == k_min:\n bl_out = model.Conv(\n bl_in,\n 'retnet_bbox_conv_' + suffix,\n dim_in,\n dim_out,\n 3,\n stride=1,\n pad=1,\n weight_init=('GaussianFill', {\n 'std': 0.01\n }),\n bias_init=('ConstantFill', {\n 'value': 0.\n })\n )\n else:\n bl_out = model.ConvShared(\n bl_in,\n 'retnet_bbox_conv_' + suffix,\n dim_in,\n dim_out,\n 3,\n stride=1,\n pad=1,\n weight='retnet_bbox_conv_n{}_fpn{}_w'.format(\n nconv, k_min\n ),\n bias='retnet_bbox_conv_n{}_fpn{}_b'.format(\n nconv, k_min\n )\n )\n bl_in = model.Relu(bl_out, bl_out)\n # Add octave scales and aspect ratio\n # At least 1 convolution for dealing different aspect ratios\n bl_feat = bl_in\n bbox_feat_list.append(bl_feat)\n # Depending on the features [shared/separate] for bbox, add prediction layer\n for i, lvl in enumerate(range(k_min, k_max + 1)):\n bbox_pred = 'retnet_bbox_pred_fpn{}'.format(lvl)\n bl_feat = bbox_feat_list[i]\n if lvl == k_min:\n model.Conv(\n bl_feat,\n bbox_pred,\n dim_in,\n bbox_regr_dim * A,\n 3,\n pad=1,\n stride=1,\n weight_init=('GaussianFill', {\n 'std': 0.01\n }),\n bias_init=('ConstantFill', {\n 'value': 0.\n })\n )\n else:\n model.ConvShared(\n bl_feat,\n bbox_pred,\n dim_in,\n bbox_regr_dim * A,\n 3,\n pad=1,\n stride=1,\n weight='retnet_bbox_pred_fpn{}_w'.format(k_min),\n bias='retnet_bbox_pred_fpn{}_b'.format(k_min)\n )\n\ndef add_fpn_retinanet_losses(model):\n loss_gradients = {}\n gradients, losses = [], []\n\n k_max = cfg.FPN.RPN_MAX_LEVEL # coarsest level of pyramid\n k_min = cfg.FPN.RPN_MIN_LEVEL # finest level of pyramid\n\n model.AddMetrics(['retnet_fg_num', 'retnet_bg_num'])\n # ==========================================================================\n # bbox regression loss - SelectSmoothL1Loss for multiple anchors at a location\n # ==========================================================================\n for lvl in range(k_min, k_max + 1):\n suffix = 'fpn{}'.format(lvl)\n bbox_loss = model.net.SelectSmoothL1Loss(\n [\n 'retnet_bbox_pred_' + suffix,\n 'retnet_roi_bbox_targets_' + suffix,\n 'retnet_roi_fg_bbox_locs_' + suffix, 'retnet_fg_num'\n ],\n 'retnet_loss_bbox_' + suffix,\n beta=cfg.RETINANET.BBOX_REG_BETA,\n scale=model.GetLossScale() * cfg.RETINANET.BBOX_REG_WEIGHT\n )\n gradients.append(bbox_loss)\n losses.append('retnet_loss_bbox_' + suffix)\n\n # ==========================================================================\n # cls loss - depends on softmax/sigmoid outputs\n # ==========================================================================\n for lvl in range(k_min, k_max + 1):\n suffix = 'fpn{}'.format(lvl)\n cls_lvl_logits = 'retnet_cls_pred_' + suffix\n if not cfg.RETINANET.SOFTMAX:\n cls_focal_loss = model.net.SigmoidFocalLoss(\n [\n cls_lvl_logits, 'retnet_cls_labels_' + suffix,\n 'retnet_fg_num'\n ],\n ['fl_{}'.format(suffix)],\n gamma=cfg.RETINANET.LOSS_GAMMA,\n alpha=cfg.RETINANET.LOSS_ALPHA,\n scale=model.GetLossScale(),\n num_classes=model.num_classes - 1\n )\n gradients.append(cls_focal_loss)\n losses.append('fl_{}'.format(suffix))\n else:\n cls_focal_loss, gated_prob = model.net.SoftmaxFocalLoss(\n [\n cls_lvl_logits, 'retnet_cls_labels_' + suffix,\n 'retnet_fg_num'\n ],\n ['fl_{}'.format(suffix), 'retnet_prob_{}'.format(suffix)],\n gamma=cfg.RETINANET.LOSS_GAMMA,\n alpha=cfg.RETINANET.LOSS_ALPHA,\n scale=model.GetLossScale(),\n num_classes=model.num_classes\n )\n gradients.append(cls_focal_loss)\n losses.append('fl_{}'.format(suffix))\n\n loss_gradients.update(blob_utils.get_loss_gradients(model, gradients))\n model.AddLosses(losses)\n return loss_gradients\n" ]
[ [ "numpy.log", "numpy.zeros" ] ]
lutzkuen/statarb
[ "0da5e5a5c44e81fe7154e2aa7ef5d33a9ade17b1" ]
[ "analyst.py" ]
[ "#!/usr/bin/env python \n\nfrom pandas.stats.moments import ewma\n\nfrom loaddata import *\nfrom regress import *\nfrom util import *\n\n\ndef calc_rtg_daily(daily_df, horizon):\n print(\"Caculating daily rtg...\")\n result_df = filter_expandable(daily_df)\n print(\"Calculating rtg0...\")\n # result_df['dk'] = np.exp( -1.0 * halflife * (result_df['gdate'] - result_df['last']).astype('timedelta64[D]').astype(int) )\n\n result_df['cum_ret'] = pd.rolling_sum(result_df['log_ret'], horizon)\n\n result_df['sum'] = result_df['mean'] * result_df['count']\n result_df['det_diff'] = result_df['sum'].diff()\n result_df['det_diff_dk'] = ewma(result_df['det_diff'], halflife=horizon)\n result_df['rtg0'] = result_df['det_diff_dk'] * result_df['det_diff_dk']\n\n # result_df['median'] = -1.0 * (result_df['median'] - 3)\n # result_df['med_diff'] = result_df['median'].unstack().diff().stack()\n # result_df['med_diff_dk'] = pd.rolling_sum( result_df['dk'] * result_df['med_diff'], window=horizon )\n # result_df['rtg0'] = (np.sign(result_df['med_diff_dk']) * np.sign(result_df['cum_ret'])).clip(lower=0) * result_df['med_diff_dk']\n\n demean = lambda x: (x - x.mean())\n indgroups = result_df[['rtg0', 'gdate', 'ind1']].groupby(['gdate', 'ind1'], sort=True).transform(demean)\n result_df['rtg0_ma'] = indgroups['rtg0']\n\n # result_df['rtg0_ma'] = result_df['rtg0_ma'] * (np.sign(result_df['rtg0_ma']) * np.sign(result_df['cum_ret']))\n\n # result_df['rtg0_ma'] = result_df['rtg0']\n\n shift_df = result_df.unstack().shift(1).stack()\n result_df['rtg1_ma'] = shift_df['rtg0_ma']\n\n return result_df\n\n\ndef rtg_fits(daily_df, horizon, name, middate=None):\n insample_daily_df = daily_df\n if middate is not None:\n insample_daily_df = daily_df[daily_df.index.get_level_values('date') < middate]\n outsample_daily_df = daily_df[daily_df.index.get_level_values('date') >= middate]\n\n outsample_daily_df['rtg'] = np.nan\n\n fits_df = pd.DataFrame(columns=['horizon', 'coef', 'indep', 'tstat', 'nobs', 'stderr'])\n for ii in range(1, horizon + 1):\n fitresults_df = regress_alpha(insample_daily_df, 'rtg0_ma', ii, False, 'daily')\n fits_df = fits_df.append(fitresults_df, ignore_index=True)\n plot_fit(fits_df, \"rtg_daily_\" + name + \"_\" + df_dates(insample_daily_df))\n fits_df.set_index(keys=['indep', 'horizon'], inplace=True)\n\n coef0 = fits_df.ix['rtg0_ma'].ix[horizon].ix['coef']\n print(\"Coef{}: {}\".format(0, coef0))\n outsample_daily_df['rtg0_ma_coef'] = coef0\n\n outsample_daily_df['rtg'] = outsample_daily_df['rtg0_ma'] * outsample_daily_df['rtg0_ma_coef']\n\n return outsample_daily_df\n\n\ndef calc_rtg_forecast(daily_df, horizon, middate):\n daily_results_df = calc_rtg_daily(daily_df, horizon)\n forwards_df = calc_forward_returns(daily_df, horizon)\n daily_results_df = pd.concat([daily_results_df, forwards_df], axis=1)\n\n # results = list()\n # for sector_name in daily_results_df['sector_name'].dropna().unique():\n # print \"Running rtg for sector {}\".format(sector_name)\n # sector_df = daily_results_df[ daily_results_df['sector_name'] == sector_name ]\n # result_df = rtg_fits(sector_df, horizon, sector_name, middate)\n # results.append(result_df)\n # result_df = pd.concat(results, verify_integrity=True)\n\n result_df = rtg_fits(daily_results_df, horizon, \"\", middate)\n\n # res1 = rtg_fits( daily_results_df[ daily_results_df['med_diff_dk'] > 0 ], horizon, \"up\", middate)\n # res2 = rtg_fits( daily_results_df[ daily_results_df['med_diff_dk'] < 0 ], horizon, \"dn\", middate)\n # result_df = pd.concat([res1, res2], verify_integrity=True)\n\n return result_df\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='G')\n parser.add_argument(\"--start\", action=\"store\", dest=\"start\", default=None)\n parser.add_argument(\"--end\", action=\"store\", dest=\"end\", default=None)\n parser.add_argument(\"--mid\", action=\"store\", dest=\"mid\", default=None)\n parser.add_argument(\"--lag\", action=\"store\", dest=\"lag\", default=20)\n # parser.add_argument(\"--horizon\",action=\"store\",dest=\"horizon\",default=20)\n args = parser.parse_args()\n\n start = args.start\n end = args.end\n lookback = 30\n horizon = int(args.lag)\n pname = \"./rtg\" + start + \".\" + end\n start = dateparser.parse(start)\n end = dateparser.parse(end)\n middate = dateparser.parse(args.mid)\n lag = int(args.lag)\n\n loaded = False\n try:\n # daily data frames are expected to sit in the format rtg_{start}_{end}_daily.h5 in local dir\n daily_df = pd.read_hdf(pname + \"_daily.h5\", 'table')\n loaded = True\n except FileNotFoundError:\n print(\"Did not load cached data...\")\n\n if not loaded:\n # if not df present go ahead and construct it\n uni_df = get_uni(start, end, lookback)\n BARRA_COLS = ['ind1']\n barra_df = load_barra(uni_df, start, end, BARRA_COLS)\n PRICE_COLS = ['close']\n price_df = load_prices(uni_df, start, end, PRICE_COLS)\n\n daily_df = merge_barra_data(price_df, barra_df)\n analyst_df = load_ratings_hist(price_df[['ticker']], start, end)\n daily_df = merge_daily_calcs(analyst_df, daily_df)\n\n daily_df.to_hdf(pname + \"_daily.h5\", 'table', complib='zlib')\n\n result_df = calc_rtg_forecast(daily_df, horizon, middate)\n dump_daily_alpha(result_df, 'rtg')\n" ]
[ [ "pandas.stats.moments.ewma" ] ]
MichelML/ml-aging
[ "b54470c00450da7d5b50e7be4a1f162f1c4b8531" ]
[ "my_notebooks/efficientnet.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# ## Load libraries\n\n# In[1]:\n\n\nget_ipython().system('pip install -q -r requirements.txt')\n\n\n# In[1]:\n\n\nimport sys\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport rxrxutils.rxrx.io as rio\nfrom scipy import misc\n\nfrom PIL import Image\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data as D\nfrom torch.optim.lr_scheduler import ExponentialLR\nimport torch.nn.functional as F\n\nfrom torchvision import models, transforms\n\nfrom ignite.engine import Events, create_supervised_evaluator, create_supervised_trainer\nfrom ignite.metrics import Loss, Accuracy\nfrom ignite.contrib.handlers.tqdm_logger import ProgressBar\nfrom ignite.contrib.handlers import LinearCyclicalScheduler\nfrom ignite.handlers import EarlyStopping, ModelCheckpoint\n\nfrom efficientnet_pytorch import EfficientNet\n\nfrom tqdm import tqdm_notebook\n\nfrom sklearn.model_selection import train_test_split\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nfrom scripts.device import get_device\n\n# %matplotlib inline\n\n\n# In[2]:\n\n\nlearning_rate_str, model_name = ['10e-5', 'efficientnet-b4']\nlearning_rate = float(learning_rate_str)\n\nprint(f'learning rate: {learning_rate}')\nprint(f'model name: {model_name}')\n\n\n# ## Define dataset and model\n\n# In[3]:\n\n\nimg_dir = '../input/rxrxairgb'\npath_data = '../input/rxrxaicsv'\ndevice = get_device()\nbatch_size = 16\ntorch.manual_seed(0)\nprint(device)\n\n\n# In[4]:\n\n\nclass ImagesDS(D.Dataset):\n def __init__(self, df, img_dir=img_dir, mode='train', site=1, channels=[1,2,3,4,5,6]):\n self.records = df.to_records(index=False)\n self.channels = channels\n self.site = site\n self.mode = mode\n self.img_dir = img_dir\n self.len = df.shape[0]\n \n @staticmethod\n def _load_img_as_tensor(file_name):\n with Image.open(file_name) as img:\n return transforms.ToTensor()(img)\n\n def _get_img_path(self, index):\n experiment, well, plate = self.records[index].experiment, self.records[index].well, self.records[index].plate\n return f'{self.img_dir}/{self.mode}/{experiment}_{plate}_{well}_s{self.site}.jpeg'\n \n def __getitem__(self, index):\n img = self._load_img_as_tensor(self._get_img_path(index))\n if self.mode == 'train':\n return img, int(self.records[index].sirna)\n else:\n return img, self.records[index].id_code\n\n def __len__(self):\n return self.len\n\n\n# In[5]:\n\n\n# dataframes for training, cross-validation, and testing\ndf = pd.read_csv(path_data+'/train.csv')\ndf_train, df_val = train_test_split(df, test_size = 0.05, random_state=42)\ndf_test = pd.read_csv(path_data+'/test.csv')\n\n# pytorch training dataset & loader\nds = ImagesDS(df_train, mode='train')\nloader = D.DataLoader(ds, batch_size=batch_size, shuffle=True, num_workers=4)\n\n# pytorch cross-validation dataset & loader\nds_val = ImagesDS(df_val, mode='train')\nval_loader = D.DataLoader(ds_val, batch_size=batch_size, shuffle=True, num_workers=4)\n\n# pytorch test dataset & loader\nds_test = ImagesDS(df_test, mode='test')\ntloader = D.DataLoader(ds_test, batch_size=batch_size, shuffle=False, num_workers=4)\n\n\n# In[6]:\n\n\nclasses = 1108\nmodel = EfficientNet.from_pretrained('efficientnet-b4', num_classes=classes) \n\n\n# In[7]:\n\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n\n# In[8]:\n\n\nmetrics = {\n 'loss': Loss(criterion),\n 'accuracy': Accuracy(),\n}\n\ntrainer = create_supervised_trainer(model, optimizer, criterion, device=device)\nval_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)\n\n\n# In[9]:\n\n\[email protected](Events.EPOCH_COMPLETED)\ndef compute_and_display_val_metrics(engine):\n epoch = engine.state.epoch\n metrics = val_evaluator.run(val_loader).metrics\n print(\"Validation Results - Epoch: {} | LR: {:.4f} Average Loss: {:.4f} | Accuracy: {:.4f} \"\n .format(engine.state.epoch, optimizer.param_groups[0]['lr'], metrics['loss'], metrics['accuracy']))\n\n\n# In[10]:\n\n\n# lr_scheduler = ExponentialLR(optimizer, gamma=0.99)\n\n# @trainer.on(Events.EPOCH_COMPLETED)\n# def update_lr_scheduler(engine):\n# lr_scheduler.step()\n# lr = float(optimizer.param_groups[0]['lr'])\n# print(\"Learning rate: {}\".format(lr))\n\n# scheduler = LinearCyclicalScheduler(optimizer, 'lr', 35e-5, 15e-5, len(loader))\n# trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)\n\n\n# In[11]:\n\n\n# @trainer.on(Events.EPOCH_STARTED)\n# def turn_on_layers(engine):\n# epoch = engine.state.epoch\n# if epoch == 1:\n# for name, child in model.named_children():\n# if name == '_fc':\n# pbar.log_message(name + ' is unfrozen')\n# for param in child.parameters():\n# param.requires_grad = True\n# else:\n# pbar.log_message(name + ' is frozen')\n# for param in child.parameters():\n# param.requires_grad = False\n# if epoch == 3:\n# pbar.log_message(\"Turn on all the layers\")\n# for name, child in model.named_children():\n# for param in child.parameters():\n# param.requires_grad = True\n\n\n# In[12]:\n\n\nhandler = EarlyStopping(patience=6, score_function=lambda engine: engine.state.metrics['accuracy'], trainer=trainer)\nval_evaluator.add_event_handler(Events.COMPLETED, handler)\n\n\n# In[13]:\n\n\ncheckpoints = ModelCheckpoint('models', f'Model_{model_name}_3channels', save_interval=2, n_saved=10, create_dir=True)\ntrainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoints, {f'{learning_rate_str}': model})\n\n\n# In[14]:\n\n\npbar = ProgressBar(bar_format='')\npbar.attach(trainer, output_transform=lambda x: {'loss': x})\n\n\n# In[15]:\n\n\nprint('Training started')\ntrainer.run(loader, max_epochs=20)\n\n" ]
[ [ "torch.manual_seed", "torch.utils.data.DataLoader", "sklearn.model_selection.train_test_split", "pandas.read_csv", "torch.nn.CrossEntropyLoss" ] ]
rasmusbergpalm/pymc3
[ "7e464e59bcb0adb28df94f379b3e8d4af12bd4d1" ]
[ "pymc3/tests/test_tuning.py" ]
[ "# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom numpy import inf\n\nfrom pymc3.step_methods.metropolis import tune\nfrom pymc3.tests import models\nfrom pymc3.tests.helpers import select_by_precision\nfrom pymc3.tuning import find_MAP, scaling\n\n\ndef test_adjust_precision():\n a = np.array([-10, -0.01, 0, 10, 1e300, -inf, inf])\n a1 = scaling.adjust_precision(a)\n assert all((a1 > 0) & (a1 < 1e200))\n\n\ndef test_guess_scaling():\n start, model, _ = models.non_normal(n=5)\n a1 = scaling.guess_scaling(start, model=model)\n assert all((a1 > 0) & (a1 < 1e200))\n\n\ndef test_mle_jacobian():\n \"\"\"Test MAP / MLE estimation for distributions with flat priors.\"\"\"\n truth = 10.0 # Simple normal model should give mu=10.0\n rtol = select_by_precision(float64=1e-6, float32=1e-4)\n\n start, model, _ = models.simple_normal(bounded_prior=False)\n with model:\n map_estimate = find_MAP(method=\"BFGS\", model=model)\n np.testing.assert_allclose(map_estimate[\"mu_i\"], truth, rtol=rtol)\n\n start, model, _ = models.simple_normal(bounded_prior=True)\n with model:\n map_estimate = find_MAP(method=\"BFGS\", model=model)\n np.testing.assert_allclose(map_estimate[\"mu_i\"], truth, rtol=rtol)\n\n\ndef test_tune_not_inplace():\n orig_scaling = np.array([0.001, 0.1])\n returned_scaling = tune(orig_scaling, acc_rate=0.6)\n assert not returned_scaling is orig_scaling\n assert np.all(orig_scaling == np.array([0.001, 0.1]))\n pass\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.array" ] ]
arjunbhagoji/blackbox-attacks-eccv
[ "d577745c64dfa47963d02bad40d6c5e65b02845f" ]
[ "clarifai/attack_clarifai.py" ]
[ "from clarifai.rest import ClarifaiApp\nimport matplotlib.image as mpimg\nimport numpy as np\nfrom clarifai.rest import Image as ClImage\nimport time\nimport argparse\nimport StringIO\n\ndef dict_reader(concepts_list, preds_array):\n if args.target_model == 'moderation':\n preds_array[0]=filter(lambda concept: concept['name'] == 'safe', concepts_list)[0]['value']\n preds_array[1]=filter(lambda concept: concept['name'] == 'suggestive', concepts_list)[0]['value']\n preds_array[2]=filter(lambda concept: concept['name'] == 'explicit', concepts_list)[0]['value']\n preds_array[3]=filter(lambda concept: concept['name'] == 'drug', concepts_list)[0]['value']\n preds_array[4]=filter(lambda concept: concept['name'] == 'gore', concepts_list)[0]['value']\n elif args.target_model == 'nsfw-v1.0':\n preds_array[0]=filter(lambda concept: concept['name'] == 'sfw', concepts_list)[0]['value']\n preds_array[1]=filter(lambda concept: concept['name'] == 'nsfw', concepts_list)[0]['value']\n return preds_array\n\ndef nsfw_dict_reader(concepts_list, preds_array):\n preds_array[0]=filter(lambda concept: concept['name'] == 'sfw', concepts_list)[0]['value']\n preds_array[1]=filter(lambda concept: concept['name'] == 'nsfw', concepts_list)[0]['value']\n\ndef CW_est_batch(pred_plus_batch, pred_minus_batch, curr_target, max_index):\n logit_plus = np.log(pred_plus_batch)\n logit_plus_t = logit_plus[:, curr_target]\n logit_plus_max = logit_plus[:, max_index]\n logit_minus = np.log(pred_minus_batch)\n logit_minus_t = logit_minus[:, curr_target]\n logit_minus_max = logit_minus[:, max_index]\n\n logit_t_grad_est = (logit_plus_t - logit_minus_t) / delta / 2.0\n logit_max_grad_est = (logit_plus_max - logit_minus_max) / delta / 2.0\n return logit_max_grad_est - logit_t_grad_est\n\ndef xent_est_batch(pred_plus_batch, pred_minus_batch, curr_target):\n pred_plus_t = pred_plus_batch[:, curr_target]\n pred_minus_t = pred_minus_batch[:, curr_target]\n\n return (pred_plus_t - pred_minus_t) / delta / 2.0\n\ndef finite_diff_method(curr_sample, curr_target, p_t, max_index, U=None):\n # Randomly assign groups of group_size\n random_indices = np.random.permutation(dim)\n num_groups = dim / group_size\n print ('Num_groups: {}'.format(num_groups))\n group_indices = np.array_split(random_indices, num_groups)\n\n buffers = []\n\n for j in range(num_groups):\n # Create a perturbation for this group\n basis_vec = np.zeros((IMAGE_ROWS, IMAGE_COLS, NUM_CHANNELS))\n basis_vec_flat = basis_vec.reshape(-1)\n basis_vec_flat[group_indices[j]] = 1.\n\n # Generate perturbed images\n image_plus_i = np.clip(curr_sample + delta * basis_vec, CLIP_MIN, CLIP_MAX)\n image_minus_i = np.clip(curr_sample - delta * basis_vec, CLIP_MIN, CLIP_MAX)\n\n # Serialize perturbed images for submission\n buf_plus = StringIO.StringIO()\n mpimg.imsave(buf_plus, np.round(image_plus_i).astype(np.uint8), format='png')\n buffers.append(buf_plus)\n buf_minus = StringIO.StringIO()\n mpimg.imsave(buf_minus, np.round(image_minus_i).astype(np.uint8), format='png')\n buffers.append(buf_minus)\n\n # Submit the perturbed images\n num_queries = num_groups * 2\n inputs = [ClImage(file_obj=buf) for buf in buffers]\n batch_size = 30\n num_batches = int(num_queries/batch_size)\n result = []\n if num_batches>0:\n for i in range(num_batches):\n curr_input = inputs[i*batch_size:(i+1)*batch_size]\n result.extend(model.predict(curr_input)['outputs'])\n curr_input = inputs[num_batches*batch_size:]\n result.extend(model.predict(curr_input)['outputs'])\n else:\n result.extend(model.predict(inputs)['outputs'])\n\n for buf in buffers:\n buf.close()\n\n # Extract the output\n pred_plus_batch = np.zeros((num_groups, num_classes))\n for pred_plus, output in zip(pred_plus_batch, result[0:num_queries:2]):\n dict_reader(output['data']['concepts'], pred_plus)\n pred_minus_batch = np.zeros((num_groups, num_classes))\n for pred_minus, output in zip(pred_minus_batch, result[1:num_queries:2]):\n dict_reader(output['data']['concepts'], pred_minus)\n\n # Do the actual finite difference gradient estimate\n group_grad_est = CW_est_batch(pred_plus_batch, pred_minus_batch, curr_target, max_index)\n grad_est = np.zeros((IMAGE_ROWS, IMAGE_COLS, NUM_CHANNELS))\n grad_est_flat = grad_est.reshape(-1)\n for indices, single_grad_est in zip(group_indices, group_grad_est):\n grad_est_flat[indices] = single_grad_est\n \n # Getting gradient of the loss\n# loss_grad = -1.0 * grad_est/p_t\n loss_grad = grad_est\n\n return loss_grad\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"target_image_name\", help=\"Image to misclassify\")\nparser.add_argument(\"--target_model\", type=str, default='nsfw-v1.0', \n help=\"target model for attack\")\nparser.add_argument(\"--eps\", type=int, default=16, \n help=\"perturbation magnitude to use\")\nparser.add_argument(\"--num_iter\", type=int, default=5, \n help=\"number of iterations to run\")\nparser.add_argument(\"--group_size\", type=int, default=10000,\n help=\"Number of features to group together\")\nparser.add_argument(\"--delta\", type=float, default=1.0,\n help=\"local perturbation\")\n\nargs = parser.parse_args()\n\napp = ClarifaiApp()\n\nmodel = app.models.get(args.target_model)\n\ntime1 = time.time()\nsuccess = 0\navg_l2_perturb = 0\ncurr_image=args.target_image_name+'.jpg'\ncurr_sample = np.array(mpimg.imread(curr_image),dtype=float)\narray_shape = curr_sample.shape\nif len(curr_sample.shape)>2:\n curr_sample=curr_sample[:,:,:3]\nelse:\n curr_sample = curr_sample.reshape((array_shape[0],array_shape[1],1))\n\nBATCH_SIZE=1\nIMAGE_ROWS=curr_sample.shape[0]\nIMAGE_COLS=curr_sample.shape[1]\nNUM_CHANNELS=curr_sample.shape[2]\ndim=IMAGE_ROWS*IMAGE_COLS*NUM_CHANNELS\ndelta=args.delta\nCLIP_MIN=0\nCLIP_MAX=255\ngroup_size=args.group_size\neps=args.eps\nnorm='linf'\nalpha = float(args.eps/args.num_iter)\nif args.target_model == 'moderation':\n num_classes = 5\nelif args.target_model == 'nsfw-v1.0':\n num_classes = 2\n\ncurr_prediction = np.zeros((num_classes))\nimage_cl=ClImage(file_obj=open(curr_image,'rb'))\ncurr_predict_dict = model.predict([image_cl])['outputs'][0]['data']['concepts']\ncurr_prediction = dict_reader(curr_predict_dict, curr_prediction)\norig_index = np.argmax(curr_prediction)\nprint(\"Original prediction: {}\".format(curr_prediction))\n\ntemp_sample = curr_sample\ntemp_image = curr_image\ncurr_target = 0\n\nfor i in range(args.num_iter):\n image_cl=ClImage(file_obj=open(temp_image,'rb'))\n\n temp_prediction = np.zeros((num_classes))\n temp_predict_dict = model.predict([image_cl])['outputs'][0]['data']['concepts']\n temp_prediction = dict_reader(temp_predict_dict, temp_prediction)\n temp_logits = np.log(temp_prediction)\n max_index = np.argmax(temp_prediction)\n loss_value = temp_logits[orig_index] - temp_logits[curr_target]\n print('Current loss value: {}'.format(loss_value))\n print('Current prediction: {}'.format(temp_prediction))\n\n p_t = temp_prediction[curr_target]\n\n loss_grad = finite_diff_method(temp_sample, curr_target, p_t, max_index)\n\n # Getting signed gradient of loss\n if norm == 'linf':\n normed_loss_grad = np.sign(loss_grad)\n elif norm == 'l2':\n grad_norm = np.linalg.norm(loss_grad.reshape(dim))\n normed_loss_grad = np.zeros_like(curr_sample)\n normed_loss_grad = loss_grad/grad_norm\n\n # eps_mod = eps - args.alpha\n image_adv = temp_sample - alpha * normed_loss_grad\n r = np.clip(image_adv-curr_sample, -eps, eps)\n temp_sample = np.clip(curr_sample + r, CLIP_MIN, CLIP_MAX)\n temp_image = args.target_image_name+'temp.png'\n mpimg.imsave(temp_image, np.round(temp_sample).astype(np.uint8))\n\nx_adv = args.target_image_name+'_adv_'+str(args.eps)+'_'+str(args.num_iter)+'_'+str(args.delta)+'_'+str(args.group_size)+'.png'\nmpimg.imsave(x_adv, np.round(temp_sample).astype(np.uint8))\n\n# Getting the norm of the perturbation\nperturb_norm = np.linalg.norm((image_adv-curr_sample).reshape(dim))\nperturb_norm_batch = np.mean(perturb_norm)\navg_l2_perturb += perturb_norm_batch\n\nimage_adv_cl=ClImage(file_obj=open(x_adv,'rb'))\n\nadv_prediction = np.zeros((num_classes))\nadv_predict_dict = model.predict([image_adv_cl])['outputs'][0]['data']['concepts']\nadv_prediction = dict_reader(adv_predict_dict, adv_prediction)\nadv_logits = np.log(adv_prediction)\nloss_value = adv_logits[orig_index] - adv_logits[curr_target]\nsuccess += np.sum(np.argmax(adv_prediction) == curr_target)\n\nsuccess = 100.0 * float(success)\n\nprint('Final loss: {}'.format(loss_value))\nprint('Final prediction: {}'.format(adv_prediction))\nprint('Success: {}'.format(success))\n\nofile=open(args.target_image_name+'.txt','a')\n\nofile.write('eps: {}, num_iter: {}, group_size: {}, delta: {}, model: {} ---- success: {} \\n'.format(eps, args.num_iter, args.group_size, args.delta, args.target_model, success))\nofile.write(\"Original prediction: {} \\n\".format(curr_prediction))\nofile.write(\"Final prediction: {}\\n\".format(adv_prediction))\nofile.close()\n\n# success = 100.0 - success\n\ntime2 = time.time()\nprint('Average l2 perturbation: {}'.format(avg_l2_perturb))\nprint('Total time: {}'.format(time2-time1))" ]
[ [ "numpy.zeros_like", "numpy.log", "numpy.zeros", "numpy.round", "numpy.random.permutation", "matplotlib.image.imread", "numpy.mean", "numpy.sign", "numpy.argmax", "numpy.clip", "numpy.array_split" ] ]
nickp60/genvis_lite
[ "7ff9f66b3c1f6fb5cb445141685fe6dc1e9a9258" ]
[ "webapp/api/views.py" ]
[ "from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.core.serializers.json import DjangoJSONEncoder\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom api.tags import method\nfrom core import settings\n\nimport json\nimport numpy as np\nimport pandas as pd\n\n\nclass NumpyEncoder(DjangoJSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return super(NumpyEncoder, self).default(obj)\n\n\nall_data = pd.DataFrame.from_csv(\"../data/norm_data/norm_all.csv\")\ncolumn_components = pd.DataFrame.from_csv(\"../data/norm_data/norm_metadata.csv\")\nmouse_genemap = pd.read_csv(\"../data/mouse_geneid_map_GRCm38_081517.csv\", dtype=str)\n\ndef generate_data(dataset, xaxis, series, restrictions):\n data = all_data\n columns = column_components\n for field, op, value in restrictions:\n if field == dataset[\"index_name\"]:\n if op == \"eq\":\n data = data.loc[value]\n elif op == \"in\":\n data = data.loc[value]\n else:\n raise Exception(\"op {} is not valid for {}\".format(op, field))\n elif op == \"eq\":\n columns = columns[columns[field]==value]\n elif op == \"in\":\n columns = columns[columns[field].isin(value)]\n else:\n raise Exception(\"op {} is not valid for {}\".format(op, field))\n\n def calculate_field_values(values):\n if len(values) > 10:\n return {\"truncated\": True, \"values\": values[0:10]}\n else:\n return {\"truncated\": False, \"values\": values} \n\n field_values = {}\n field_values[dataset[\"index_name\"]] = calculate_field_values(sorted(data.index))\n for serie_name in dataset[\"series\"].keys():\n field_values[serie_name] = calculate_field_values(sorted(columns[serie_name].unique()))\n\n if xaxis == dataset[\"index_name\"]:\n xvalues = list(data.index)\n yaxes = sorted(set(columns[series]))\n result = []\n for current in yaxes:\n yindices = columns[columns[series]==current].index\n values = data[yindices]\n mean = list(values.mean(axis=1))\n std = list(values.std(axis=1))\n result.append((mean, std))\n return field_values, xvalues, list(zip(yaxes, result))\n \n if series == dataset[\"index_name\"]:\n xvalues = sorted(set(columns[xaxis].dropna()))\n yaxes = sorted(set(data.index))\n result = []\n for current in data.index:\n yvalues = []\n for xvalue in xvalues:\n yindices = columns[columns[xaxis]==xvalue].index\n values = data[yindices].loc[current]\n mean = values.mean()\n std = values.std()\n yvalues.append((mean, std))\n result.append(yvalues)\n return field_values, xvalues, list(zip(yaxes, result))\n \n else:\n xvalues = sorted(set(columns[xaxis].dropna()))\n yaxes = sorted(set(columns[series].dropna()))\n result = []\n for yaxis in yaxes:\n yvalues = []\n for current in xvalues:\n values = data[columns[(columns[series]==yaxis) & (columns[xaxis]==current)].index].stack()\n mean = values.mean()\n std = values.std()\n yvalues.append((mean, std))\n result.append(yvalues)\n return field_values, xvalues, list(zip(yaxes, result))\n\n\n@csrf_exempt\n@method(allowed=['POST'])\ndef time_series(request):\n body = json.loads(request.body.decode(\"utf-8\"))\n dataset_name = body.get(\"dataset\", None)\n dataset = settings.DATASETS.get(dataset_name, None)\n xaxis = body.get(\"xaxis\", None)\n series = body.get(\"series\", None)\n restrictions = body.get(\"restrictions\", [])\n\n print(\"*\" * 80)\n print(\"dataset: {}\".format(dataset))\n print(\"xaxis: {}\".format(xaxis))\n print(\"series: {}\".format(series))\n print(\"restr: {}\".format(restrictions))\n print(\"*\" * 80)\n \n if None in [dataset_name, dataset]:\n result = {\"ok\": False,\n \"message\": \"dataset not valid\"}\n return JsonResponse(result)\n\n if xaxis is None:\n result = {\"ok\": False,\n \"message\": \"xaxis not valid\"}\n return JsonResponse(result)\n\n if series is None:\n result = {\"ok\": False,\n \"message\": \"series not valid\"}\n return JsonResponse(result)\n\n field_values, xvalues, series_values = generate_data(dataset, xaxis, series, restrictions)\n result = {\"ok\": True,\n \"dataset\": dataset_name,\n \"field_values\": field_values,\n \"xvalues\": xvalues,\n \"series\": [{\"name\": v[0], \"values\": v[1]} for v in series_values]}\n return JsonResponse(result, encoder=NumpyEncoder)\n\n\nMAX_SERIE_DETAIL_VALUES = 25\n\n@csrf_exempt\n@method(allowed=['POST'])\ndef series_detail(request):\n body = json.loads(request.body.decode(\"utf-8\"))\n dataset_name = body.get(\"dataset\", None)\n dataset = settings.DATASETS.get(dataset_name, None)\n serie = body.get(\"serie\", None)\n\n if None in [dataset_name, dataset]:\n result = {\"ok\": False,\n \"message\": \"dataset not valid\"}\n return JsonResponse(result)\n\n if serie is None:\n result = {\"ok\": False,\n \"message\": \"serie not valid\"}\n return JsonResponse(result)\n \n if serie == dataset[\"index_name\"]:\n result = {\"ok\": True,\n \"wizard\": \"gene_wizard\"}\n return JsonResponse(result)\n\n wizard = dataset[\"series\"][serie][\"wizard\"]\n result = {\"ok\": True,\n \"values\": sorted(column_components[serie].unique())[0:MAX_SERIE_DETAIL_VALUES],\n \"wizard\": wizard}\n return JsonResponse(result)\n\n\nMAX_GENE_RESULT = 10\n\n@csrf_exempt\n@method(allowed=['POST'])\ndef series_find(request):\n body = json.loads(request.body.decode(\"utf-8\"))\n dataset_name = body.get(\"dataset\", None)\n dataset = settings.DATASETS.get(dataset_name, None)\n serie = body.get(\"serie\", None)\n text = body.get(\"text\", \"\")\n\n if None in [dataset_name, dataset]:\n result = {\"ok\": False,\n \"message\": \"dataset not valid\"}\n return JsonResponse(result)\n\n if serie is None:\n result = {\"ok\": False,\n \"message\": \"serie not valid\"}\n return JsonResponse(result)\n \n if serie == dataset[\"index_name\"]:\n data = mouse_genemap[\n (mouse_genemap[\"ensembl_gene_id\"].notnull() & mouse_genemap[\"ensembl_gene_id\"].str.contains(text)) |\n (mouse_genemap[\"external_gene_name\"].notnull() & mouse_genemap[\"external_gene_name\"].str.contains(text)) |\n (mouse_genemap[\"entrezgene\"].notnull() & mouse_genemap[\"entrezgene\"].str.contains(text))] \n result_values = []\n for i, row in enumerate(data[[\"ensembl_gene_id\", \"external_gene_name\", \"entrezgene\"]].iterrows()):\n if i > MAX_GENE_RESULT:\n break\n index, (ensembl_gene_id, external_gene_name, entrezgene) = row\n result_values.append((ensembl_gene_id, external_gene_name, entrezgene))\n\n result = {\"ok\": True,\n \"dataset\": dataset_name,\n \"result\": [list(s) for s in result_values]}\n print(\"*\" * 80)\n print(\"result: {}\".format(result))\n print(\"*\" * 80)\n return JsonResponse(result)\n\n else:\n result = {\"ok\": False,\n \"message\": \"not implemented\"}\n return JsonResponse(result)" ]
[ [ "pandas.read_csv", "pandas.DataFrame.from_csv" ] ]
AnnaNylander/exjobb
[ "74bdf299aa5e51adb7757364188e09a3e3986660" ]
[ "network/nn_practice/mnist/mnist_classifier.py" ]
[ "# -*- coding: utf-8 -*-\nimport torch\nimport pandas\nimport numpy\nimport matplotlib.pyplot as plt\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, DataLoader\nfrom MnistDataSet import MnistDataSet\nfrom Network import Net\n\n# N is batch size; D_in is input dimension;\n# H is hidden dimension; D_out is output dimension.\nN, D_in, side, H, D_out = 20, 784, 28, 100, 10\nkernel_size = 5\nlearning_rate = 1e-5\n\n# Create Tensors to hold inputs and outputs, and wrap them in Variables\ntrain_dataset = MnistDataSet(csv_file = 'data/mnist_train.csv')\ntest_dataset = MnistDataSet(csv_file = 'data/mnist_test.csv')\ndataloader = DataLoader(train_dataset, batch_size=N, shuffle=True, num_workers=4)\ndataloader_test = DataLoader(test_dataset)\n\n# Construct our model by instantiating the class defined above\n#model = torch.nn.Sequential(\n #torch.nn.Linear(D_in,H),\n# torch.nn.Conv2d(1,1,3), #as input it wants a 4d tensor with (batch size, channels in, img width, img height)\n# torch.nn.ReLU(),\n# torch.nn.Linear(26,H),\n# torch.nn.ReLU(),\n# torch.nn.Linear(H, D_out)\n#)\n#model = torch.load('saved/model.pt')\nmodel = Net(side, H, D_out,kernel_size)\nmodel.cuda()\n\n#print(torch.cuda.is_available())\n\nloss_fn = torch.nn.MultiLabelSoftMarginLoss()\nloss_fn.cuda()\n\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\nfor epoch in range(10):\n for batch_i, batch in enumerate(dataloader):\n\n\n images = (batch['image']).type(torch.cuda.FloatTensor)\n labels = (batch['label']).type(torch.cuda.FloatTensor)\n images = torch.unsqueeze(images, 1)\n x = Variable(images)\n y = Variable(labels)\n\n y_pred = model(x)\n\n loss = loss_fn(y_pred, y)\n #if batch_i%N == 0:\n #print(batch_i, loss.data[0])\n\n # nollställ gradienterna\n optimizer.zero_grad()\n\n loss.backward()\n\n optimizer.step()\n\n test_loss = 0\n for batch_i, batch in enumerate(dataloader_test):\n images = (batch['image']).type(torch.cuda.FloatTensor)\n labels = (batch['label']).type(torch.cuda.FloatTensor)\n images = torch.unsqueeze(images, 1)\n x = Variable(images)\n y = Variable(labels)\n\n y_pred = model(x)\n\n loss = loss_fn(y_pred, y)\n test_loss = test_loss + loss.data[0]\n\n avg_loss = test_loss/len(test_dataset);\n print(avg_loss)\n\ntorch.save(model, 'saved/model3.pt')\n" ]
[ [ "torch.nn.MultiLabelSoftMarginLoss", "torch.autograd.Variable", "torch.save", "torch.unsqueeze", "torch.utils.data.DataLoader" ] ]
medvidov/PyNomaly
[ "789c0ca7587b86343f636b132dcf1f475ee6b90b" ]
[ "pynom-env/lib/python3.6/site-packages/pydataset/datasets_handler.py" ]
[ "# datasets_handler.py\n# dataset handling file\n\nimport pandas as pd\nfrom .utils import html2text\nfrom .locate_datasets import __items_dict, __docs_dict, __get_data_folder_path\n\nitems = __items_dict()\ndocs = __docs_dict()\n\n# make dataframe layout (of __datasets_desc()) terminal-friendly\npd.set_option('display.max_rows', 170)\npd.set_option('display.max_colwidth', 90)\n# for terminal, auto-detect\npd.set_option('display.width', None)\n\n\n# HELPER\n\ndef __filter_doc(raw):\n note = \"PyDataset Documentation (adopted from R Documentation. \" \\\n \"The displayed examples are in R)\"\n txt = raw.replace('R Documentation', note)\n return txt\n\n\ndef __read_docs(path):\n # raw html\n html = open(path, 'r').read()\n # html handler\n h = html2text.HTML2Text()\n h.ignore_links = True\n h.ignore_images = True\n txt = h.handle(html)\n\n return txt\n\n\n# MAIN\n\ndef __get_csv_path(item):\n \"\"\"return the full path of the item's csv file\"\"\"\n return items[item]\n\n\ndef __read_csv(item):\n path = __get_csv_path(item)\n df = pd.read_csv(path, index_col=0)\n # display 'optional' log msg \"loaded: Titanic <class 'numpy.ndarray'>\"\n # print('loaded: {} {}'.format(item, type(df)))\n return df\n\n\ndef __get_doc_path(item):\n return docs[item]\n\n\ndef __print_item_docs(item):\n path = __get_doc_path(item)\n doc = __read_docs(path) # html format\n txt = __filter_doc(doc) # edit R related txt\n print(txt)\n\n\ndef __datasets_desc():\n \"\"\"return a df of the available datasets with description\"\"\"\n datasets = __get_data_folder_path() + 'datasets.csv'\n df = pd.read_csv(datasets)\n df = df[['Item', 'Title']]\n df.columns = ['dataset_id', 'title']\n # print('a list of the available datasets:')\n return df\n" ]
[ [ "pandas.read_csv", "pandas.set_option" ] ]
barnrang/omniglot
[ "c93d333687b1d182e1c20aa7e6798c7a0bcc2474" ]
[ "priorloader.py" ]
[ "import numpy as np\nfrom keras.utils import np_utils\nimport tensorflow\nimport keras\nimport random\nfrom python.dataloader import loader\n\nclass DataGenerator(tensorflow.keras.utils.Sequence):\n 'Generates data for Keras'\n def __init__(self, data_type='train', dim=(28,28), n_channels=1,\n way=20, shot=1, query=1, num_batch=500):\n 'Initialization'\n self.type = data_type\n # if self.type == 'train':\n # self.is_training = np.array([True for _ in range(batch_size)])\n # else:\n # self.is_training = np.array([False for _ in range(batch_size)])\n self.dim = dim\n #self.batch_size = batch_size\n self.n_channels = n_channels\n self.num_per_class = 20\n self.num_batch = num_batch\n #self.y_target = np.zeros(self.batch_size)\n self.build_data(self.type)\n self.on_epoch_end()\n self.way = way\n self.shot = shot\n self.query = query\n #TODO!!!!\n #self.hard_batch = np.zeros(batch_size, *dim, n_channels)\n\n def build_data(self, data_type):\n if data_type == 'train':\n self.class_data = np.load('python/train.npy')\n else:\n self.class_data = np.load('python/test.npy')\n\n self.n_classes = len(self.class_data)\n\n def __len__(self):\n 'Denotes the number of batches per epoch'\n return self.num_batch\n\n def __getitem__(self, index):\n 'Generate one batch of data'\n # Generate data\n X_sample, X_query, label = self.__data_generation()\n #way = np.ones((self.way * self.shot, 1)) * self.way\n\n\n return [X_sample, X_query], label\n\n def on_epoch_end(self):\n 'Updates indexes after each epoch'\n pass\n\n def __data_generation(self):\n 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)\n # Initialization\n X_sample = np.empty((self.way, self.shot, *self.dim, self.n_channels))\n X_query = np.empty((self.way, self.query, *self.dim, self.n_channels))\n chosen_class = random.sample(range(self.n_classes), self.way)\n label = np.empty(self.way * self.query)\n # print(pos, neg)\n # print(self.class_data[pos][0].shape)\n # Generate data\n for i in range(self.way):\n sample_idx = random.sample(range(self.num_per_class), self.shot + self.query)\n sample_data = self.class_data[chosen_class[i]][sample_idx]/255.\n X_sample[i] = sample_data[:self.shot]\n X_query[i] = sample_data[self.shot:self.shot + self.query]\n label[i * self.query: (i+1) * self.query] = i\n return X_sample, X_query, np_utils.to_categorical(label)\n #return X, keras.utils.to_categorical(y, num_classes=self.n_classes)\n" ]
[ [ "numpy.empty", "numpy.load" ] ]
musicinmybrain/NeuroM
[ "8aa8813f7f1a4a8363863c9c2fc94a0a11d2b328" ]
[ "tests/geom/test_transform.py" ]
[ "# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project\n# All rights reserved.\n#\n# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. Neither the name of the copyright holder nor the names of\n# its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport math\nfrom pathlib import Path\n\nimport neurom.geom.transform as gtr\nimport numpy as np\nfrom neurom import COLS, load_neuron\nfrom neurom.features import neuritefunc as _nf\n\nimport pytest\nfrom numpy.testing import assert_almost_equal\n\nTEST_UVEC = np.array([0.01856633, 0.37132666, 0.92831665])\nTEST_ANGLE = np.pi / 3.\nDATA_PATH = Path(__file__).parent.parent / 'data'\nH5_NRN_PATH = DATA_PATH / 'h5/v1/Neuron.h5'\nSWC_NRN_PATH = DATA_PATH / 'swc/Neuron.swc'\n\n\ndef _Rx(angle):\n sn = np.sin(angle)\n cs = np.cos(angle)\n return np.array([[1., 0., 0.],\n [0., cs, -sn],\n [0., sn, cs]])\n\n\ndef _Ry(angle):\n sn = np.sin(angle)\n cs = np.cos(angle)\n return np.array([[cs, 0., sn],\n [0., 1., 0.],\n [-sn, 0., cs]])\n\n\ndef _Rz(angle):\n sn = np.sin(angle)\n cs = np.cos(angle)\n return np.array([[cs, -sn, 0.],\n [sn, cs, 0.],\n [0., 0., 1.]])\n\n\ndef test_not_implemented_transform_call_raises():\n with pytest.raises(NotImplementedError):\n class Dummy(gtr.Transform3D):\n pass\n\n d = Dummy()\n d([1, 2, 3])\n\n\ndef test_translate_bad_type_raises():\n with pytest.raises(NotImplementedError):\n gtr.translate(\"hello\", [1, 2, 3])\n\n\ndef test_rotate_bad_type_raises():\n with pytest.raises(NotImplementedError):\n gtr.rotate(\"hello\", [1, 0, 0], math.pi)\n\n\ndef test_translate_point():\n\n t = gtr.Translation([100, -100, 100])\n point = [1, 2, 3]\n assert t(point).tolist() == [101, -98, 103]\n\n\ndef test_translate_points():\n\n t = gtr.Translation([100, -100, 100])\n points = np.array([[1, 2, 3], [11, 22, 33], [111, 222, 333]])\n assert np.all(t(points) == np.array([[101, -98, 103],\n [111, -78, 133],\n [211, 122, 433]]))\n\n\nROT_90 = np.array([[0, -1, 0],\n [1, 0, 0],\n [0, 0, 1]])\n\nROT_180 = np.array([[-1, 0, 0],\n [0, -1, 0],\n [0, 0, 1]])\n\nROT_270 = np.array([[0, 1, 0],\n [-1, 0, 0],\n [0, 0, 1]])\n\n\ndef test_rotate_point():\n\n rot = gtr.Rotation(ROT_90)\n assert rot([2, 0, 0]).tolist() == [0, 2, 0]\n assert rot([0, 2, 0]).tolist() == [-2, 0, 0]\n assert rot([0, 0, 2]).tolist() == [0, 0, 2]\n\n rot = gtr.Rotation(ROT_180)\n assert rot([2, 0, 0]).tolist() == [-2, 0, 0]\n assert rot([0, 2, 0]).tolist() == [0, -2, 0]\n assert rot([0, 0, 2]).tolist() == [0, 0, 2]\n\n rot = gtr.Rotation(ROT_270)\n assert rot([2, 0, 0]).tolist() == [0, -2, 0]\n assert rot([0, 2, 0]).tolist() == [2, 0, 0]\n assert rot([0, 0, 2]).tolist() == [0, 0, 2]\n\n\ndef test_rotate_points():\n\n rot = gtr.Rotation(ROT_90)\n\n points = np.array([[2, 0, 0],\n [0, 2, 0],\n [0, 0, 2],\n [3, 0, 3]])\n\n assert np.all(rot(points) == np.array([[0, 2, 0],\n [-2, 0, 0],\n [0, 0, 2],\n [0, 3, 3]]))\n\n rot = gtr.Rotation(ROT_180)\n assert np.all(rot(points) == np.array([[-2, 0, 0],\n [0, -2, 0],\n [0, 0, 2],\n [-3, 0, 3]]))\n\n rot = gtr.Rotation(ROT_270)\n assert np.all(rot(points) == np.array([[0, -2, 0],\n [2, 0, 0],\n [0, 0, 2],\n [0, -3, 3]]))\n\n\ndef test_pivot_rotate_point():\n\n point = [1, 2, 3]\n\n new_orig = np.array([10., 45., 50.])\n\n t = gtr.Translation(new_orig)\n t_inv = gtr.Translation(new_orig * -1)\n\n R = gtr._rodrigues_to_dcm(TEST_UVEC, np.pi)\n\n # change origin, rotate 180\n p1 = gtr.PivotRotation(R, new_orig)(point)\n\n # do the steps manually\n p2 = t_inv(point)\n p2 = gtr.Rotation(R)(p2)\n p2 = t(p2)\n\n assert p1.tolist() == p2.tolist()\n\n\ndef test_pivot_rotate_points():\n\n points = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n [10, 11, 12]])\n\n new_orig = np.array([10., 45., 50.])\n\n t = gtr.Translation(new_orig)\n t_inv = gtr.Translation(new_orig * -1)\n\n R = gtr._rodrigues_to_dcm(TEST_UVEC, np.pi)\n\n # change origin, rotate 180\n p1 = gtr.PivotRotation(R, new_orig)(points)\n\n # do the steps manually\n p2 = t_inv(points)\n p2 = gtr.Rotation(R)(p2)\n p2 = t(p2)\n\n assert np.all(p1 == p2)\n\n\ndef _check_fst_nrn_translate(nrn_a, nrn_b, t):\n\n # soma points\n assert np.allclose(\n (nrn_b.soma.points[:, COLS.XYZ] - nrn_a.soma.points[:, COLS.XYZ]), t)\n _check_fst_neurite_translate(nrn_a.neurites, nrn_b.neurites, t)\n\n\ndef _check_fst_neurite_translate(nrts_a, nrts_b, t):\n # neurite sections\n for sa, sb in zip(_nf.iter_sections(nrts_a),\n _nf.iter_sections(nrts_b)):\n assert np.allclose((sb.points[:, COLS.XYZ] - sa.points[:, COLS.XYZ]), t)\n\n\ndef test_translate_fst_neuron_swc():\n\n t = np.array([100., 100., 100.])\n nrn = load_neuron(SWC_NRN_PATH)\n tnrn = gtr.translate(nrn, t)\n _check_fst_nrn_translate(nrn, tnrn, t)\n\n\ndef test_transform_translate_neuron_swc():\n t = np.array([100., 100., 100.])\n nrn = load_neuron(SWC_NRN_PATH)\n tnrn = nrn.transform(gtr.Translation(t))\n _check_fst_nrn_translate(nrn, tnrn, t)\n\n\ndef test_translate_fst_neuron_h5():\n\n t = np.array([100., 100., 100.])\n nrn = load_neuron(H5_NRN_PATH)\n tnrn = gtr.translate(nrn, t)\n\n _check_fst_nrn_translate(nrn, tnrn, t)\n\n\ndef test_transform_translate_neuron_h5():\n t = np.array([100., 100., 100.])\n nrn = load_neuron(H5_NRN_PATH)\n tnrn = nrn.transform(gtr.Translation(t))\n _check_fst_nrn_translate(nrn, tnrn, t)\n\n\ndef _apply_rot(points, rot_mat):\n return np.dot(rot_mat, np.array(points).T).T\n\n\ndef _check_fst_nrn_rotate(nrn_a, nrn_b, rot_mat):\n\n # soma points\n assert np.allclose(_apply_rot(nrn_a.soma.points[:, COLS.XYZ], rot_mat),\n nrn_b.soma.points[:, COLS.XYZ])\n\n # neurite sections\n _check_fst_neurite_rotate(nrn_a.neurites, nrn_b.neurites, rot_mat)\n\n\ndef _check_fst_neurite_rotate(nrt_a, nrt_b, rot_mat):\n for sa, sb in zip(_nf.iter_sections(nrt_a),\n _nf.iter_sections(nrt_b)):\n assert np.allclose(sb.points[:, COLS.XYZ],\n _apply_rot(sa.points[:, COLS.XYZ], rot_mat))\n\n\ndef test_rotate_neuron_swc():\n nrn_a = load_neuron(SWC_NRN_PATH)\n nrn_b = gtr.rotate(nrn_a, [0, 0, 1], math.pi/2.0)\n rot = gtr._rodrigues_to_dcm([0, 0, 1], math.pi/2.0)\n _check_fst_nrn_rotate(nrn_a, nrn_b, rot)\n\n\ndef test_transform_rotate_neuron_swc():\n rot = gtr.Rotation(ROT_90)\n nrn_a = load_neuron(SWC_NRN_PATH)\n nrn_b = nrn_a.transform(rot)\n _check_fst_nrn_rotate(nrn_a, nrn_b, ROT_90)\n\n\ndef test_rotate_neuron_h5():\n nrn_a = load_neuron(H5_NRN_PATH)\n nrn_b = gtr.rotate(nrn_a, [0, 0, 1], math.pi/2.0)\n rot = gtr._rodrigues_to_dcm([0, 0, 1], math.pi/2.0)\n _check_fst_nrn_rotate(nrn_a, nrn_b, rot)\n\n\ndef test_transform_rotate_neuron_h5():\n rot = gtr.Rotation(ROT_90)\n nrn_a = load_neuron(H5_NRN_PATH)\n nrn_b = nrn_a.transform(rot)\n _check_fst_nrn_rotate(nrn_a, nrn_b, ROT_90)\n\n\ndef test_rodrigues_to_dcm():\n\n RES = np.array([[0.50017235, -0.80049871, 0.33019604],\n [0.80739289, 0.56894174, 0.15627544],\n [-0.3129606, 0.18843328, 0.9308859]])\n\n R = gtr._rodrigues_to_dcm(TEST_UVEC, TEST_ANGLE)\n\n # assess rotation matrix properties:\n\n # detR = +=1\n assert_almost_equal(np.linalg.det(R), 1.)\n\n # R.T = R^-1\n assert np.allclose(np.linalg.inv(R), R.transpose())\n\n # check against calculated matrix\n assert np.allclose(R, RES)\n\n # check if opposite sign generates inverse\n Rinv = gtr._rodrigues_to_dcm(TEST_UVEC, -TEST_ANGLE)\n\n assert np.allclose(np.dot(Rinv, R), np.identity(3))\n\n # check basic rotations with a range of angles\n for angle in np.linspace(0., 2. * np.pi, 10):\n\n Rx = gtr._rodrigues_to_dcm(np.array([1., 0., 0.]), angle)\n Ry = gtr._rodrigues_to_dcm(np.array([0., 1., 0.]), angle)\n Rz = gtr._rodrigues_to_dcm(np.array([0., 0., 1.]), angle)\n\n assert np.allclose(Rx, _Rx(angle))\n assert np.allclose(Ry, _Ry(angle))\n assert np.allclose(Rz, _Rz(angle))\n" ]
[ [ "numpy.array", "numpy.sin", "numpy.dot", "numpy.linalg.det", "numpy.identity", "numpy.allclose", "numpy.cos", "numpy.all", "numpy.linspace", "numpy.linalg.inv" ] ]
piojanu/neptune-contrib
[ "7793c325af1c225cbda972bc0f89fa45f8da6cf3" ]
[ "neptunecontrib/versioning/data.py" ]
[ "#\n# Copyright (c) 2019, Neptune Labs Sp. z o.o.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport os\nimport hashlib\n\nimport boto3\nimport matplotlib.pyplot as plt\nimport neptune\nimport numpy as np\n\nfrom neptunecontrib.monitoring.utils import send_figure\n\n__all__ = [\n 'log_data_version',\n 'log_s3_data_version',\n 'log_image_dir_snapshots',\n]\n\ndef log_data_version(path, prefix='', experiment=None):\n \"\"\"Logs data version of file or folder to Neptune\n\n For a path it calculates the hash and logs it along with the path itself as a property to Neptune experiment.\n Path to dataset can be a file or directory.\n\n Args:\n path(str): path to the file or directory,\n prefix(str): Prefix that will be added before 'data_version' and 'data_path'\n experiment(neptune.experiemnts.Experiment or None): if the data should be logged to a particular\n neptune experiment it can be passed here. By default it is logged to the current experiment.\n\n Examples:\n Initialize Neptune::\n\n import neptune\n from neptunecontrib.versioning.data import log_data_version\n neptune.init('USER_NAME/PROJECT_NAME')\n\n Log data version from filepath::\n\n FILEPATH = '/path/to/data/my_data.csv'\n with neptune.create_experiment():\n log_data_version(FILEPATH)\n\n \"\"\"\n\n _exp = experiment if experiment else neptune\n\n _exp.set_property('{}data_path'.format(prefix), path)\n _exp.set_property('{}data_version'.format(prefix), _md5_hash_path(path))\n\n\ndef log_s3_data_version(bucket_name, path, prefix='', experiment=None):\n \"\"\"Logs data version of s3 bucket to Neptune\n\n For a bucket and path it calculates the hash and logs it along with the path itself as a property to\n Neptune experiment.\n Path is either the s3 bucket key to a file or the begining of a key (in case you use a \"folder\" structure).\n\n Args:\n bucket_name(str): name of the s3 bucket\n path(str): path to the file or directory on s3 bucket\n prefix(str): Prefix that will be added before 'data_version' and 'data_path'\n experiment(neptune.experiemnts.Experiment or None): if the data should be logged to a particular\n neptune experiment it can be passed here. By default it is logged to the current experiment.\n\n Examples:\n Initialize Neptune::\n\n import neptune\n from neptunecontrib.versioning.data import log_s3_data_version\n neptune.init('USER_NAME/PROJECT_NAME')\n\n Log data version from bucket::\n\n BUCKET = 'my-bucket'\n PATH = 'train_dir/'\n with neptune.create_experiment():\n log_s3_data_version(BUCKET, PATH)\n\n \"\"\"\n\n _exp = experiment if experiment else neptune\n\n _exp.set_property('{}data_path'.format(prefix), '{}/{}'.format(bucket_name, path))\n _exp.set_property('{}data_version'.format(prefix), _md5_hash_bucket(bucket_name, path))\n\n\ndef log_image_dir_snapshots(image_dir, channel_name='image_dir_snapshots', experiment=None, sample=16, seed=1234):\n \"\"\"Logs visual snapshot of the directory with image data to Neptune.\n\n For a given directory with images it logs a sample of images as figure to Neptune.\n If the `image_dir` specified contains multiple folders it will sample per folder and create\n multiple figures naming each figure with the folder name.\n See snapshots per class here https://ui.neptune.ai/jakub-czakon/examples/e/EX-95/channels.\n\n Args:\n image_dir(str): path to directory with images.\n sample(int): number of images that should be sampled for plotting.\n channel_name(str): name of the neptune channel. Default is 'image_dir_snapshots'.\n experiment(neptune.experiemnts.Experiment or None): if the data should be logged to a particular\n neptune experiment it can be passed here. By default it is logged to the current experiment.\n seed(int): random state for the sampling of images.\n\n Examples:\n Initialize Neptune::\n\n import neptune\n from neptunecontrib.versioning.data import log_image_dir_snapshots\n neptune.init('USER_NAME/PROJECT_NAME')\n\n Log visual snapshot of image directory::\n\n PATH = 'train_dir/'\n with neptune.create_experiment():\n log_image_dir_snapshots(PATH)\n\n \"\"\"\n _exp = experiment if experiment else neptune\n\n figs = _get_collated_images(image_dir, sample=sample, seed=seed)\n for fig in figs:\n send_figure(fig, channel_name=channel_name, experiment=_exp)\n\n\ndef _md5_hash_path(path):\n if os.path.isdir(path):\n return _md5_hash_dir(path)\n elif os.path.isfile(path):\n return _md5_hash_file(path)\n else:\n raise NotImplementedError\n\n\ndef _md5_hash_file(filepath):\n hash_md5 = hashlib.md5()\n hash_md5 = _update_hash_md5(hash_md5, filepath)\n return hash_md5.hexdigest()\n\n\ndef _md5_hash_dir(dirpath):\n hash_md5 = hashlib.md5()\n\n for root, _, files in os.walk(dirpath):\n for names in files:\n filepath = os.path.join(root, names)\n\n # Hash the path and add to the digest to account for empty files/directories\n hash_md5.update(hashlib.sha1(filepath[len(dirpath):].encode()).digest())\n\n if os.path.isfile(filepath):\n hash_md5 = _update_hash_md5(hash_md5, filepath)\n\n return hash_md5.hexdigest()\n\n\ndef _md5_hash_bucket(bucket_name, path):\n s3 = boto3.resource('s3')\n bucket = s3.Bucket(bucket_name)\n\n hash_md5 = hashlib.md5()\n\n for obj in bucket.objects.all():\n if obj.key.startswith(path):\n hash_md5.update(obj.e_tag.encode('utf-8'))\n\n return hash_md5.hexdigest()\n\n\ndef _update_hash_md5(hash_md5, filepath):\n with open(filepath, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5\n\n\ndef _get_collated_images(image_dir, sample, seed):\n np.random.seed(seed)\n\n labels = _get_labels(image_dir)\n filepaths = _get_filepaths(image_dir)\n\n figures = []\n if labels:\n for label in labels:\n label_paths = [path for path in filepaths\n if path.startswith(os.path.join(image_dir, label))]\n if len(label_paths) > sample:\n sample_paths = np.random.choice(label_paths, size=sample, replace=False)\n else:\n sample_paths = label_paths\n collated_image = _get_collated_image(sample_paths, label)\n figures.append(collated_image)\n else:\n if len(filepaths) > sample:\n sample_paths = np.random.choice(filepaths, size=sample, replace=False)\n else:\n sample_paths = filepaths\n collated_image = _get_collated_image(sample_paths)\n figures.append(collated_image)\n\n return figures\n\n\ndef _get_labels(dir_path):\n labels = []\n for fname in os.listdir(dir_path):\n if os.path.isdir(os.path.join(dir_path, fname)):\n labels.append(fname)\n return labels\n\n\ndef _get_filepaths(dir_path):\n filepaths = []\n for path, _, files in os.walk(dir_path):\n for name in files:\n filepaths.append(os.path.join(path, name))\n return filepaths\n\n\ndef _get_collated_image(filepaths, label=None, figsize=(16, 12), title_size=30):\n n = len(filepaths)\n yn = int(np.floor(np.sqrt(n)))\n xn = int(np.ceil(n / yn))\n\n fig, axs = plt.subplots(yn, xn, figsize=figsize)\n fig.suptitle(label, fontsize=title_size)\n\n for i, filepath in enumerate(filepaths):\n yi, xi = i // xn, i % xn\n image = plt.imread(filepath)\n axs[yi, xi].imshow(image)\n axs[yi, xi].set_xticks([])\n axs[yi, xi].set_yticks([])\n plt.tight_layout()\n fig.subplots_adjust(top=0.92)\n\n return fig\n" ]
[ [ "numpy.ceil", "numpy.random.choice", "numpy.random.seed", "matplotlib.pyplot.subplots", "matplotlib.pyplot.tight_layout", "numpy.sqrt", "matplotlib.pyplot.imread" ] ]
CaptorAB/OpenSeries
[ "d09cafb0d049d174c4c07b3b6493558ca18938ff" ]
[ "openseries/sim_price.py" ]
[ "# -*- coding: utf-8 -*-\nfrom typing import Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom openseries.stoch_processes import (\n ModelParameters,\n geometric_brownian_motion_log_returns,\n heston_model_levels,\n geometric_brownian_motion_jump_diffusion_levels,\n)\n\n\nclass ReturnSimulation(object):\n \"\"\"\n A general class to hold return simulations.\n \"\"\"\n\n number_of_sims: int\n trading_days: int\n trading_days_in_year: int\n mean_annual_return: float\n mean_annual_vol: float\n df: pd.DataFrame\n\n def __init__(self, d: dict):\n \"\"\"\n :param d: Dictionary containing class attributes set by class method.\n \"\"\"\n\n self.__dict__ = d\n\n @classmethod\n def from_normal(\n cls,\n n: int,\n d: int,\n mu: float,\n vol: float,\n t: int = 252,\n seed: Union[int, None] = 71,\n ):\n \"\"\"\n This function generates n number of random prices over\n t number of trading days.\n :param n: The number of simulations to generate.\n :param d: Number of trading days to simulate.\n :param mu: The mean return.\n :param vol: The mean standard deviation.\n :param t: The number of trading days used to annualize return and\n volatility.\n :param seed: This is the random seed going into numpy.random.seed().\n \"\"\"\n if seed:\n np.random.seed(seed)\n daily_returns = np.random.normal(\n loc=mu / t, scale=vol / np.sqrt(t), size=(n, d)\n )\n output = {\n \"number_of_sims\": n,\n \"trading_days\": d,\n \"trading_days_in_year\": t,\n \"mean_annual_return\": mu,\n \"mean_annual_vol\": vol,\n \"df\": pd.DataFrame(data=daily_returns),\n }\n return cls(d=output)\n\n @classmethod\n def from_lognormal(\n cls,\n n: int,\n d: int,\n mu: float,\n vol: float,\n t: int = 252,\n seed: Union[int, None] = 71,\n ):\n \"\"\"\n This function generates n number of random prices over\n t number of trading days.\n :param n: The number of simulations to generate.\n :param d: Number of trading days to simulate.\n :param mu: The mean return.\n :param vol: The mean standard deviation.\n :param t: The number of trading days used to annualize return and\n volatility.\n :param seed: This is the random seed going into numpy.random.seed().\n \"\"\"\n if seed:\n np.random.seed(seed)\n daily_returns = (\n np.random.lognormal(\n mean=mu / t, sigma=vol / np.sqrt(t), size=(n, d)\n )\n - 1\n )\n output = {\n \"number_of_sims\": n,\n \"trading_days\": d,\n \"trading_days_in_year\": t,\n \"mean_annual_return\": mu,\n \"mean_annual_vol\": vol,\n \"df\": pd.DataFrame(data=daily_returns),\n }\n return cls(d=output)\n\n @classmethod\n def from_gbm(\n cls,\n n: int,\n d: int,\n mu: float,\n vol: float,\n t: int = 252,\n seed: Union[int, None] = 71,\n ):\n \"\"\"\n This method constructs a sequence of log returns which, when\n exponentiated, produce a random Geometric Brownian Motion (GBM).\n :param n: The number of simulations to generate.\n :param d: Number of trading days to simulate.\n :param mu: The mean return.\n :param vol: The mean standard deviation.\n :param t: The number of trading days used to annualize return and\n volatility.\n :param seed: This is the random seed going into numpy.random.seed().\n \"\"\"\n mp = ModelParameters(\n all_s0=1, all_time=d, all_delta=1.0 / t, all_sigma=vol, gbm_mu=mu\n )\n if seed:\n np.random.seed(seed)\n daily_returns = []\n for i in range(n):\n daily_returns.append(geometric_brownian_motion_log_returns(mp))\n output = {\n \"number_of_sims\": n,\n \"trading_days\": d,\n \"trading_days_in_year\": t,\n \"mean_annual_return\": mu,\n \"mean_annual_vol\": vol,\n \"df\": pd.DataFrame(data=daily_returns),\n }\n return cls(d=output)\n\n @classmethod\n def from_heston(\n cls,\n n: int,\n d: int,\n mu: float,\n vol: float,\n heston_mu: float,\n heston_a: float,\n t: int = 252,\n seed: Union[int, None] = 71,\n ):\n \"\"\"\n NOTE - this method is dodgy! Need to debug!\n The Heston model is the geometric brownian motion model with\n stochastic volatility.\n :param n: The number of simulations to generate.\n :param d: Number of trading days to simulate.\n :param mu: The mean return.\n :param vol: This is the volatility of the stochastic processes and the\n starting volatility for the Heston model.\n :param heston_mu: This is the long run average volatility for\n the Heston model.\n :param heston_a: This is the rate of mean reversion for volatility in\n the Heston model.\n :param t: The number of trading days used to annualize return and\n volatility.\n :param seed: This is the random seed going into numpy.random.seed().\n \"\"\"\n mp = ModelParameters(\n all_s0=1,\n all_time=d,\n all_delta=1.0 / t,\n all_sigma=vol,\n gbm_mu=mu,\n heston_vol0=vol,\n heston_mu=heston_mu,\n heston_a=heston_a,\n )\n if seed:\n np.random.seed(seed)\n daily_returns = []\n for i in range(n):\n aray = heston_model_levels(mp)[0]\n r = aray[1:] / aray[:-1] - 1\n r = np.insert(r, 0, 0.0)\n daily_returns.append(r)\n output = {\n \"number_of_sims\": n,\n \"trading_days\": d,\n \"trading_days_in_year\": t,\n \"mean_annual_return\": mu,\n \"mean_annual_vol\": vol,\n \"df\": pd.DataFrame(data=daily_returns),\n }\n return cls(d=output)\n\n @classmethod\n def from_heston_vol(\n cls,\n n: int,\n d: int,\n mu: float,\n vol: float,\n heston_mu: float,\n heston_a: float,\n t: int = 252,\n seed: Union[int, None] = 71,\n ):\n \"\"\"\n\n :param n: The number of simulations to generate.\n :param d: Number of trading days to simulate.\n :param mu: The mean return.\n :param vol: This is the volatility of the stochastic processes and the\n starting volatility for the Heston model.\n :param heston_mu: This is the long run average volatility for\n the Heston model.\n :param heston_a: This is the rate of mean reversion for volatility in\n the Heston model.\n :param t: The number of trading days used to annualize return and\n volatility.\n :param seed: This is the random seed going into numpy.random.seed().\n \"\"\"\n mp = ModelParameters(\n all_s0=1,\n all_time=d,\n all_delta=1.0 / t,\n all_sigma=vol,\n gbm_mu=mu,\n heston_vol0=vol,\n heston_mu=heston_mu,\n heston_a=heston_a,\n )\n if seed:\n np.random.seed(seed)\n daily_returns = []\n for i in range(n):\n aray = heston_model_levels(mp)[1]\n r = aray[1:] / aray[:-1] - 1\n r = np.insert(r, 0, 0.0)\n daily_returns.append(r)\n output = {\n \"number_of_sims\": n,\n \"trading_days\": d,\n \"trading_days_in_year\": t,\n \"mean_annual_return\": mu,\n \"mean_annual_vol\": vol,\n \"df\": pd.DataFrame(data=daily_returns),\n }\n return cls(d=output)\n\n @classmethod\n def from_merton_jump_gbm(\n cls,\n n: int,\n d: int,\n mu: float,\n vol: float,\n jumps_lamda: float,\n jumps_sigma: float,\n jumps_mu: float,\n t: int = 252,\n seed: Union[int, None] = 71,\n ):\n \"\"\"\n\n :param n: The number of simulations to generate.\n :param d: Number of trading days to simulate.\n :param mu: The mean return.\n :param vol: This is the volatility of the stochastic processes and\n the starting volatility for the Heston model.\n :param jumps_lamda: This is the probability of a jump happening at\n each point in time.\n :param jumps_sigma: This is the volatility of the jump size.\n :param jumps_mu: This is the average jump size.\n :param t: The number of trading days used to annualize return and\n volatility.\n :param seed: This is the random seed going into numpy.random.seed().\n \"\"\"\n mp = ModelParameters(\n all_s0=1,\n all_time=d,\n all_delta=1.0 / t,\n all_sigma=vol,\n gbm_mu=mu,\n jumps_lamda=jumps_lamda,\n jumps_sigma=jumps_sigma,\n jumps_mu=jumps_mu,\n )\n if seed:\n np.random.seed(seed)\n daily_returns = []\n for i in range(n):\n aray = geometric_brownian_motion_jump_diffusion_levels(mp)\n r = aray[1:] / aray[:-1] - 1\n r = np.insert(r, 0, 0.0)\n daily_returns.append(r)\n output = {\n \"number_of_sims\": n,\n \"trading_days\": d,\n \"trading_days_in_year\": t,\n \"mean_annual_return\": mu,\n \"mean_annual_vol\": vol,\n \"df\": pd.DataFrame(data=daily_returns),\n }\n return cls(d=output)\n\n @property\n def results(self) -> pd.Series:\n return self.df.add(1.0).cumprod(axis=\"columns\").iloc[:, -1]\n\n @property\n def realized_mean_return(self) -> float:\n return (\n (self.results.mean() - 1)\n * self.trading_days_in_year\n / self.trading_days\n )\n\n @property\n def realized_vol(self) -> float:\n return self.results.add(1.0).std() / np.sqrt(self.trading_days_in_year)\n" ]
[ [ "numpy.random.seed", "pandas.DataFrame", "numpy.insert", "numpy.sqrt" ] ]
nicolaslrveiga/spark
[ "f079002aeec4f6d85ea367edf99c0ccb33928d27" ]
[ "python/pyspark/pandas/series.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nA wrapper class for Spark Column to behave similar to pandas Series.\n\"\"\"\nimport datetime\nimport re\nimport inspect\nimport sys\nfrom collections.abc import Mapping\nfrom functools import partial, reduce\nfrom typing import (\n Any,\n Callable,\n Dict,\n Generic,\n IO,\n Iterable,\n List,\n Optional,\n Sequence,\n Tuple,\n Type,\n Union,\n cast,\n no_type_check,\n overload,\n TYPE_CHECKING,\n)\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.core.accessor import CachedAccessor\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.api.types import ( # type: ignore[attr-defined]\n is_list_like,\n is_hashable,\n CategoricalDtype,\n)\nfrom pandas.tseries.frequencies import DateOffset\nfrom pyspark.sql import functions as F, Column, DataFrame as SparkDataFrame\nfrom pyspark.sql.types import (\n ArrayType,\n BooleanType,\n DecimalType,\n DoubleType,\n FloatType,\n IntegerType,\n IntegralType,\n LongType,\n NumericType,\n Row,\n StructType,\n TimestampType,\n)\nfrom pyspark.sql.window import Window\n\nfrom pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.\nfrom pyspark.pandas._typing import Axis, Dtype, Label, Name, Scalar, T\nfrom pyspark.pandas.accessors import PandasOnSparkSeriesMethods\nfrom pyspark.pandas.categorical import CategoricalAccessor\nfrom pyspark.pandas.config import get_option\nfrom pyspark.pandas.base import IndexOpsMixin\nfrom pyspark.pandas.exceptions import SparkPandasIndexingError\nfrom pyspark.pandas.frame import DataFrame\nfrom pyspark.pandas.generic import Frame\nfrom pyspark.pandas.internal import (\n InternalField,\n InternalFrame,\n DEFAULT_SERIES_NAME,\n NATURAL_ORDER_COLUMN_NAME,\n SPARK_DEFAULT_INDEX_NAME,\n SPARK_DEFAULT_SERIES_NAME,\n)\nfrom pyspark.pandas.missing.series import MissingPandasLikeSeries\nfrom pyspark.pandas.plot import PandasOnSparkPlotAccessor\nfrom pyspark.pandas.ml import corr\nfrom pyspark.pandas.utils import (\n combine_frames,\n is_name_like_tuple,\n is_name_like_value,\n name_like_string,\n same_anchor,\n scol_for,\n sql_conf,\n validate_arguments_and_invoke_function,\n validate_axis,\n validate_bool_kwarg,\n verify_temp_column_name,\n SPARK_CONF_ARROW_ENABLED,\n log_advice,\n)\nfrom pyspark.pandas.datetimes import DatetimeMethods\nfrom pyspark.pandas.spark import functions as SF\nfrom pyspark.pandas.spark.accessors import SparkSeriesMethods\nfrom pyspark.pandas.strings import StringMethods\nfrom pyspark.pandas.typedef import (\n infer_return_type,\n spark_type_to_pandas_dtype,\n ScalarType,\n SeriesType,\n create_type_for_series_type,\n)\n\nif TYPE_CHECKING:\n from pyspark.sql._typing import ColumnOrName\n\n from pyspark.pandas.groupby import SeriesGroupBy\n from pyspark.pandas.indexes import Index\n from pyspark.pandas.spark.accessors import SparkIndexOpsMethods\n\n# This regular expression pattern is complied and defined here to avoid to compile the same\n# pattern every time it is used in _repr_ in Series.\n# This pattern basically seeks the footer string from pandas'\nREPR_PATTERN = re.compile(r\"Length: (?P<length>[0-9]+)\")\n\n_flex_doc_SERIES = \"\"\"\nReturn {desc} of series and other, element-wise (binary operator `{op_name}`).\n\nEquivalent to ``{equiv}``\n\nParameters\n----------\nother : Series or scalar value\n\nReturns\n-------\nSeries\n The result of the operation.\n\nSee Also\n--------\nSeries.{reverse}\n\n{series_examples}\n\"\"\"\n\n_add_example_SERIES = \"\"\"\nExamples\n--------\n>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],\n... 'b': [2, np.nan, 2, np.nan]},\n... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n>>> df\n a b\na 2.0 2.0\nb 2.0 NaN\nc 4.0 2.0\nd NaN NaN\n\n>>> df.a.add(df.b)\na 4.0\nb NaN\nc 6.0\nd NaN\ndtype: float64\n\n>>> df.a.radd(df.b)\na 4.0\nb NaN\nc 6.0\nd NaN\ndtype: float64\n\"\"\"\n\n_sub_example_SERIES = \"\"\"\nExamples\n--------\n>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],\n... 'b': [2, np.nan, 2, np.nan]},\n... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n>>> df\n a b\na 2.0 2.0\nb 2.0 NaN\nc 4.0 2.0\nd NaN NaN\n\n>>> df.a.subtract(df.b)\na 0.0\nb NaN\nc 2.0\nd NaN\ndtype: float64\n\n>>> df.a.rsub(df.b)\na 0.0\nb NaN\nc -2.0\nd NaN\ndtype: float64\n\"\"\"\n\n_mul_example_SERIES = \"\"\"\nExamples\n--------\n>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],\n... 'b': [2, np.nan, 2, np.nan]},\n... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n>>> df\n a b\na 2.0 2.0\nb 2.0 NaN\nc 4.0 2.0\nd NaN NaN\n\n>>> df.a.multiply(df.b)\na 4.0\nb NaN\nc 8.0\nd NaN\ndtype: float64\n\n>>> df.a.rmul(df.b)\na 4.0\nb NaN\nc 8.0\nd NaN\ndtype: float64\n\"\"\"\n\n_div_example_SERIES = \"\"\"\nExamples\n--------\n>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],\n... 'b': [2, np.nan, 2, np.nan]},\n... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n>>> df\n a b\na 2.0 2.0\nb 2.0 NaN\nc 4.0 2.0\nd NaN NaN\n\n>>> df.a.divide(df.b)\na 1.0\nb NaN\nc 2.0\nd NaN\ndtype: float64\n\n>>> df.a.rdiv(df.b)\na 1.0\nb NaN\nc 0.5\nd NaN\ndtype: float64\n\"\"\"\n\n_pow_example_SERIES = \"\"\"\nExamples\n--------\n>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],\n... 'b': [2, np.nan, 2, np.nan]},\n... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n>>> df\n a b\na 2.0 2.0\nb 2.0 NaN\nc 4.0 2.0\nd NaN NaN\n\n>>> df.a.pow(df.b)\na 4.0\nb NaN\nc 16.0\nd NaN\ndtype: float64\n\n>>> df.a.rpow(df.b)\na 4.0\nb NaN\nc 16.0\nd NaN\ndtype: float64\n\"\"\"\n\n_mod_example_SERIES = \"\"\"\nExamples\n--------\n>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],\n... 'b': [2, np.nan, 2, np.nan]},\n... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n>>> df\n a b\na 2.0 2.0\nb 2.0 NaN\nc 4.0 2.0\nd NaN NaN\n\n>>> df.a.mod(df.b)\na 0.0\nb NaN\nc 0.0\nd NaN\ndtype: float64\n\n>>> df.a.rmod(df.b)\na 0.0\nb NaN\nc 2.0\nd NaN\ndtype: float64\n\"\"\"\n\n_floordiv_example_SERIES = \"\"\"\nExamples\n--------\n>>> df = ps.DataFrame({'a': [2, 2, 4, np.nan],\n... 'b': [2, np.nan, 2, np.nan]},\n... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n>>> df\n a b\na 2.0 2.0\nb 2.0 NaN\nc 4.0 2.0\nd NaN NaN\n\n>>> df.a.floordiv(df.b)\na 1.0\nb NaN\nc 2.0\nd NaN\ndtype: float64\n\n>>> df.a.rfloordiv(df.b)\na 1.0\nb NaN\nc 0.0\nd NaN\ndtype: float64\n\"\"\"\n\n# Needed to disambiguate Series.str and str type\nstr_type = str\n\n\nclass Series(Frame, IndexOpsMixin, Generic[T]):\n \"\"\"\n pandas-on-Spark Series that corresponds to pandas Series logically. This holds Spark Column\n internally.\n\n :ivar _internal: an internal immutable Frame to manage metadata.\n :type _internal: InternalFrame\n :ivar _psdf: Parent's pandas-on-Spark DataFrame\n :type _psdf: ps.DataFrame\n\n Parameters\n ----------\n data : array-like, dict, or scalar value, pandas Series\n Contains data stored in Series\n Note that if `data` is a pandas Series, other arguments should not be used.\n index : array-like or Index (1d)\n Values must be hashable and have the same length as `data`.\n Non-unique index values are allowed. Will default to\n RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index\n sequence are used, the index will override the keys found in the\n dict.\n dtype : numpy.dtype or None\n If None, dtype will be inferred\n copy : boolean, default False\n Copy input data\n \"\"\"\n\n def __init__( # type: ignore[no-untyped-def]\n self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False\n ):\n assert data is not None\n\n self._anchor: DataFrame\n self._col_label: Label\n if isinstance(data, DataFrame):\n assert dtype is None\n assert name is None\n assert not copy\n assert not fastpath\n\n self._anchor = data\n self._col_label = index\n else:\n if isinstance(data, pd.Series):\n assert index is None\n assert dtype is None\n assert name is None\n assert not copy\n assert not fastpath\n s = data\n else:\n s = pd.Series(\n data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath\n )\n internal = InternalFrame.from_pandas(pd.DataFrame(s))\n if s.name is None:\n internal = internal.copy(column_labels=[None])\n anchor = DataFrame(internal)\n\n self._anchor = anchor\n self._col_label = anchor._internal.column_labels[0]\n object.__setattr__(anchor, \"_psseries\", {self._column_label: self})\n\n @property\n def _psdf(self) -> DataFrame:\n return self._anchor\n\n @property\n def _internal(self) -> InternalFrame:\n return self._psdf._internal.select_column(self._column_label)\n\n @property\n def _column_label(self) -> Optional[Label]:\n return self._col_label\n\n def _update_anchor(self, psdf: DataFrame) -> None:\n assert psdf._internal.column_labels == [self._column_label], (\n psdf._internal.column_labels,\n [self._column_label],\n )\n self._anchor = psdf\n object.__setattr__(psdf, \"_psseries\", {self._column_label: self})\n\n def _with_new_scol(self, scol: Column, *, field: Optional[InternalField] = None) -> \"Series\":\n \"\"\"\n Copy pandas-on-Spark Series with the new Spark Column.\n\n :param scol: the new Spark Column\n :return: the copied Series\n \"\"\"\n name = name_like_string(self._column_label)\n internal = self._internal.copy(\n data_spark_columns=[scol.alias(name)],\n data_fields=[\n field if field is None or field.struct_field is None else field.copy(name=name)\n ],\n )\n return first_series(DataFrame(internal))\n\n spark: \"SparkIndexOpsMethods\" = CachedAccessor( # type: ignore[assignment]\n \"spark\", SparkSeriesMethods\n )\n\n @property\n def dtypes(self) -> Dtype:\n \"\"\"Return the dtype object of the underlying data.\n\n >>> s = ps.Series(list('abc'))\n >>> s.dtype == s.dtypes\n True\n \"\"\"\n return self.dtype\n\n @property\n def axes(self) -> List[\"Index\"]:\n \"\"\"\n Return a list of the row axis labels.\n\n Examples\n --------\n\n >>> psser = ps.Series([1, 2, 3])\n >>> psser.axes\n [Int64Index([0, 1, 2], dtype='int64')]\n \"\"\"\n return [self.index]\n\n # Arithmetic Operators\n def add(self, other: Any) -> \"Series\":\n return self + other\n\n add.__doc__ = _flex_doc_SERIES.format(\n desc=\"Addition\",\n op_name=\"+\",\n equiv=\"series + other\",\n reverse=\"radd\",\n series_examples=_add_example_SERIES,\n )\n\n def radd(self, other: Any) -> \"Series\":\n return other + self\n\n radd.__doc__ = _flex_doc_SERIES.format(\n desc=\"Reverse Addition\",\n op_name=\"+\",\n equiv=\"other + series\",\n reverse=\"add\",\n series_examples=_add_example_SERIES,\n )\n\n def div(self, other: Any) -> \"Series\":\n return self / other\n\n div.__doc__ = _flex_doc_SERIES.format(\n desc=\"Floating division\",\n op_name=\"/\",\n equiv=\"series / other\",\n reverse=\"rdiv\",\n series_examples=_div_example_SERIES,\n )\n\n divide = div\n\n def rdiv(self, other: Any) -> \"Series\":\n return other / self\n\n rdiv.__doc__ = _flex_doc_SERIES.format(\n desc=\"Reverse Floating division\",\n op_name=\"/\",\n equiv=\"other / series\",\n reverse=\"div\",\n series_examples=_div_example_SERIES,\n )\n\n def truediv(self, other: Any) -> \"Series\":\n return self / other\n\n truediv.__doc__ = _flex_doc_SERIES.format(\n desc=\"Floating division\",\n op_name=\"/\",\n equiv=\"series / other\",\n reverse=\"rtruediv\",\n series_examples=_div_example_SERIES,\n )\n\n def rtruediv(self, other: Any) -> \"Series\":\n return other / self\n\n rtruediv.__doc__ = _flex_doc_SERIES.format(\n desc=\"Reverse Floating division\",\n op_name=\"/\",\n equiv=\"other / series\",\n reverse=\"truediv\",\n series_examples=_div_example_SERIES,\n )\n\n def mul(self, other: Any) -> \"Series\":\n return self * other\n\n mul.__doc__ = _flex_doc_SERIES.format(\n desc=\"Multiplication\",\n op_name=\"*\",\n equiv=\"series * other\",\n reverse=\"rmul\",\n series_examples=_mul_example_SERIES,\n )\n\n multiply = mul\n\n def rmul(self, other: Any) -> \"Series\":\n return other * self\n\n rmul.__doc__ = _flex_doc_SERIES.format(\n desc=\"Reverse Multiplication\",\n op_name=\"*\",\n equiv=\"other * series\",\n reverse=\"mul\",\n series_examples=_mul_example_SERIES,\n )\n\n def sub(self, other: Any) -> \"Series\":\n return self - other\n\n sub.__doc__ = _flex_doc_SERIES.format(\n desc=\"Subtraction\",\n op_name=\"-\",\n equiv=\"series - other\",\n reverse=\"rsub\",\n series_examples=_sub_example_SERIES,\n )\n\n subtract = sub\n\n def rsub(self, other: Any) -> \"Series\":\n return other - self\n\n rsub.__doc__ = _flex_doc_SERIES.format(\n desc=\"Reverse Subtraction\",\n op_name=\"-\",\n equiv=\"other - series\",\n reverse=\"sub\",\n series_examples=_sub_example_SERIES,\n )\n\n def mod(self, other: Any) -> \"Series\":\n return self % other\n\n mod.__doc__ = _flex_doc_SERIES.format(\n desc=\"Modulo\",\n op_name=\"%\",\n equiv=\"series % other\",\n reverse=\"rmod\",\n series_examples=_mod_example_SERIES,\n )\n\n def rmod(self, other: Any) -> \"Series\":\n return other % self\n\n rmod.__doc__ = _flex_doc_SERIES.format(\n desc=\"Reverse Modulo\",\n op_name=\"%\",\n equiv=\"other % series\",\n reverse=\"mod\",\n series_examples=_mod_example_SERIES,\n )\n\n def pow(self, other: Any) -> \"Series\":\n return self ** other\n\n pow.__doc__ = _flex_doc_SERIES.format(\n desc=\"Exponential power of series\",\n op_name=\"**\",\n equiv=\"series ** other\",\n reverse=\"rpow\",\n series_examples=_pow_example_SERIES,\n )\n\n def rpow(self, other: Any) -> \"Series\":\n return other ** self\n\n rpow.__doc__ = _flex_doc_SERIES.format(\n desc=\"Reverse Exponential power\",\n op_name=\"**\",\n equiv=\"other ** series\",\n reverse=\"pow\",\n series_examples=_pow_example_SERIES,\n )\n\n def floordiv(self, other: Any) -> \"Series\":\n return self // other\n\n floordiv.__doc__ = _flex_doc_SERIES.format(\n desc=\"Integer division\",\n op_name=\"//\",\n equiv=\"series // other\",\n reverse=\"rfloordiv\",\n series_examples=_floordiv_example_SERIES,\n )\n\n def rfloordiv(self, other: Any) -> \"Series\":\n return other // self\n\n rfloordiv.__doc__ = _flex_doc_SERIES.format(\n desc=\"Reverse Integer division\",\n op_name=\"//\",\n equiv=\"other // series\",\n reverse=\"floordiv\",\n series_examples=_floordiv_example_SERIES,\n )\n\n # create accessor for pandas-on-Spark specific methods.\n pandas_on_spark = CachedAccessor(\"pandas_on_spark\", PandasOnSparkSeriesMethods)\n\n # keep the name \"koalas\" for backward compatibility.\n koalas = CachedAccessor(\"koalas\", PandasOnSparkSeriesMethods)\n\n # Comparison Operators\n def eq(self, other: Any) -> \"Series\":\n \"\"\"\n Compare if the current value is equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.a == 1\n a True\n b False\n c False\n d False\n Name: a, dtype: bool\n\n >>> df.b.eq(1)\n a True\n b False\n c True\n d False\n Name: b, dtype: bool\n \"\"\"\n return self == other\n\n equals = eq\n\n def gt(self, other: Any) -> \"Series\":\n \"\"\"\n Compare if the current value is greater than the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.a > 1\n a False\n b True\n c True\n d True\n Name: a, dtype: bool\n\n >>> df.b.gt(1)\n a False\n b False\n c False\n d False\n Name: b, dtype: bool\n \"\"\"\n return self > other\n\n def ge(self, other: Any) -> \"Series\":\n \"\"\"\n Compare if the current value is greater than or equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.a >= 2\n a False\n b True\n c True\n d True\n Name: a, dtype: bool\n\n >>> df.b.ge(2)\n a False\n b False\n c False\n d False\n Name: b, dtype: bool\n \"\"\"\n return self >= other\n\n def lt(self, other: Any) -> \"Series\":\n \"\"\"\n Compare if the current value is less than the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.a < 1\n a False\n b False\n c False\n d False\n Name: a, dtype: bool\n\n >>> df.b.lt(2)\n a True\n b False\n c True\n d False\n Name: b, dtype: bool\n \"\"\"\n return self < other\n\n def le(self, other: Any) -> \"Series\":\n \"\"\"\n Compare if the current value is less than or equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.a <= 2\n a True\n b True\n c False\n d False\n Name: a, dtype: bool\n\n >>> df.b.le(2)\n a True\n b False\n c True\n d False\n Name: b, dtype: bool\n \"\"\"\n return self <= other\n\n def ne(self, other: Any) -> \"Series\":\n \"\"\"\n Compare if the current value is not equal to the other.\n\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4],\n ... 'b': [1, np.nan, 1, np.nan]},\n ... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])\n\n >>> df.a != 1\n a False\n b True\n c True\n d True\n Name: a, dtype: bool\n\n >>> df.b.ne(1)\n a False\n b True\n c False\n d True\n Name: b, dtype: bool\n \"\"\"\n return self != other\n\n def divmod(self, other: Any) -> Tuple[\"Series\", \"Series\"]:\n \"\"\"\n Return Integer division and modulo of series and other, element-wise\n (binary operator `divmod`).\n\n Parameters\n ----------\n other : Series or scalar value\n\n Returns\n -------\n 2-Tuple of Series\n The result of the operation.\n\n See Also\n --------\n Series.rdivmod\n \"\"\"\n return self.floordiv(other), self.mod(other)\n\n def rdivmod(self, other: Any) -> Tuple[\"Series\", \"Series\"]:\n \"\"\"\n Return Integer division and modulo of series and other, element-wise\n (binary operator `rdivmod`).\n\n Parameters\n ----------\n other : Series or scalar value\n\n Returns\n -------\n 2-Tuple of Series\n The result of the operation.\n\n See Also\n --------\n Series.divmod\n \"\"\"\n return self.rfloordiv(other), self.rmod(other)\n\n def between(self, left: Any, right: Any, inclusive: bool = True) -> \"Series\":\n \"\"\"\n Return boolean Series equivalent to left <= series <= right.\n This function returns a boolean vector containing `True` wherever the\n corresponding Series element is between the boundary values `left` and\n `right`. NA values are treated as `False`.\n\n Parameters\n ----------\n left : scalar or list-like\n Left boundary.\n right : scalar or list-like\n Right boundary.\n inclusive : bool, default True\n Include boundaries.\n\n Returns\n -------\n Series\n Series representing whether each element is between left and\n right (inclusive).\n\n See Also\n --------\n Series.gt : Greater than of series and other.\n Series.lt : Less than of series and other.\n\n Notes\n -----\n This function is equivalent to ``(left <= ser) & (ser <= right)``\n\n Examples\n --------\n >>> s = ps.Series([2, 0, 4, 8, np.nan])\n\n Boundary values are included by default:\n\n >>> s.between(1, 4)\n 0 True\n 1 False\n 2 True\n 3 False\n 4 False\n dtype: bool\n\n With `inclusive` set to ``False`` boundary values are excluded:\n\n >>> s.between(1, 4, inclusive=False)\n 0 True\n 1 False\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n `left` and `right` can be any scalar value:\n\n >>> s = ps.Series(['Alice', 'Bob', 'Carol', 'Eve'])\n >>> s.between('Anna', 'Daniel')\n 0 False\n 1 True\n 2 True\n 3 False\n dtype: bool\n \"\"\"\n if inclusive:\n lmask = self >= left\n rmask = self <= right\n else:\n lmask = self > left\n rmask = self < right\n\n return lmask & rmask\n\n def cov(self, other: \"Series\", min_periods: Optional[int] = None) -> float:\n \"\"\"\n Compute covariance with Series, excluding missing values.\n\n .. versionadded:: 3.3.0\n\n Parameters\n ----------\n other : Series\n Series with which to compute the covariance.\n min_periods : int, optional\n Minimum number of observations needed to have a valid result.\n\n Returns\n -------\n float\n Covariance between Series and other\n\n Examples\n --------\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option(\"compute.ops_on_diff_frames\", True)\n >>> s1 = ps.Series([0.90010907, 0.13484424, 0.62036035])\n >>> s2 = ps.Series([0.12528585, 0.26962463, 0.51111198])\n >>> s1.cov(s2)\n -0.016857626527158744\n >>> reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n if not isinstance(other, Series):\n raise TypeError(\"unsupported type: %s\" % type(other))\n if not np.issubdtype(self.dtype, np.number): # type: ignore[arg-type]\n raise TypeError(\"unsupported dtype: %s\" % self.dtype)\n if not np.issubdtype(other.dtype, np.number): # type: ignore[arg-type]\n raise TypeError(\"unsupported dtype: %s\" % other.dtype)\n\n min_periods = 1 if min_periods is None else min_periods\n\n if same_anchor(self, other):\n sdf = self._internal.spark_frame.select(self.spark.column, other.spark.column)\n else:\n combined = combine_frames(self.to_frame(), other.to_frame())\n sdf = combined._internal.spark_frame.select(*combined._internal.data_spark_columns)\n\n sdf = sdf.dropna()\n\n if len(sdf.head(min_periods)) < min_periods:\n return np.nan\n else:\n return sdf.select(F.covar_samp(*sdf.columns)).head(1)[0][0]\n\n # TODO: NaN and None when ``arg`` is an empty dict\n # TODO: Support ps.Series ``arg``\n def map(\n self, arg: Union[Dict, Callable[[Any], Any], pd.Series], na_action: Optional[str] = None\n ) -> \"Series\":\n \"\"\"\n Map values of Series according to input correspondence.\n\n Used for substituting each value in a Series with another value,\n that may be derived from a function, a ``dict``.\n\n .. note:: make sure the size of the dictionary is not huge because it could\n downgrade the performance or throw OutOfMemoryError due to a huge\n expression within Spark. Consider the input as a functions as an\n alternative instead in this case.\n\n Parameters\n ----------\n arg : function, dict or pd.Series\n Mapping correspondence.\n na_action :\n If `ignore`, propagate NA values, without passing them to the mapping correspondence.\n\n Returns\n -------\n Series\n Same index as caller.\n\n See Also\n --------\n Series.apply : For applying more complex functions on a Series.\n DataFrame.applymap : Apply a function elementwise on a whole DataFrame.\n\n Notes\n -----\n When ``arg`` is a dictionary, values in Series that are not in the\n dictionary (as keys) are converted to ``None``. However, if the\n dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.\n provides a method for default values), then this default is used\n rather than ``None``.\n\n Examples\n --------\n >>> s = ps.Series(['cat', 'dog', None, 'rabbit'])\n >>> s\n 0 cat\n 1 dog\n 2 None\n 3 rabbit\n dtype: object\n\n ``map`` accepts a ``dict``. Values that are not found\n in the ``dict`` are converted to ``None``, unless the dict has a default\n value (e.g. ``defaultdict``):\n\n >>> s.map({'cat': 'kitten', 'dog': 'puppy'})\n 0 kitten\n 1 puppy\n 2 None\n 3 None\n dtype: object\n\n It also accepts a pandas Series:\n\n >>> pser = pd.Series(['kitten', 'puppy'], index=['cat', 'dog'])\n >>> s.map(pser)\n 0 kitten\n 1 puppy\n 2 None\n 3 None\n dtype: object\n\n It also accepts a function:\n\n >>> def format(x) -> str:\n ... return 'I am a {}'.format(x)\n\n >>> s.map(format)\n 0 I am a cat\n 1 I am a dog\n 2 I am a None\n 3 I am a rabbit\n dtype: object\n\n To avoid applying the function to missing values (and keep them as NaN)\n na_action='ignore' can be used:\n\n >>> s.map('I am a {}'.format, na_action='ignore')\n 0 I am a cat\n 1 I am a dog\n 2 None\n 3 I am a rabbit\n dtype: object\n \"\"\"\n if isinstance(arg, (dict, pd.Series)):\n is_start = True\n # In case dictionary is empty.\n current = F.when(SF.lit(False), SF.lit(None).cast(self.spark.data_type))\n\n for to_replace, value in arg.items():\n if is_start:\n current = F.when(self.spark.column == SF.lit(to_replace), value)\n is_start = False\n else:\n current = current.when(self.spark.column == SF.lit(to_replace), value)\n\n if hasattr(arg, \"__missing__\"):\n tmp_val = arg[np._NoValue] # type: ignore[attr-defined]\n # Remove in case it's set in defaultdict.\n del arg[np._NoValue] # type: ignore[attr-defined]\n current = current.otherwise(SF.lit(tmp_val))\n else:\n current = current.otherwise(SF.lit(None).cast(self.spark.data_type))\n return self._with_new_scol(current)\n else:\n return self.pandas_on_spark.transform_batch(lambda pser: pser.map(arg, na_action))\n\n @property\n def shape(self) -> Tuple[int]:\n \"\"\"Return a tuple of the shape of the underlying data.\"\"\"\n return (len(self),)\n\n @property\n def name(self) -> Name:\n \"\"\"Return name of the Series.\"\"\"\n name = self._column_label\n if name is not None and len(name) == 1:\n return name[0]\n else:\n return name\n\n @name.setter\n def name(self, name: Name) -> None:\n self.rename(name, inplace=True)\n\n # TODO: Currently, changing index labels taking dictionary/Series is not supported.\n def rename(\n self, index: Optional[Union[Name, Callable[[Any], Any]]] = None, **kwargs: Any\n ) -> \"Series\":\n \"\"\"\n Alter Series index labels or name.\n\n Parameters\n ----------\n index : scalar or function, optional\n Functions are transformations to apply to the index.\n Scalar will alter the Series.name attribute.\n\n inplace : bool, default False\n Whether to return a new Series. If True then value of copy is\n ignored.\n\n Returns\n -------\n Series\n Series with index labels or name altered.\n\n Examples\n --------\n\n >>> s = ps.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n >>> s.rename(\"my_name\") # scalar, changes Series.name\n 0 1\n 1 2\n 2 3\n Name: my_name, dtype: int64\n\n >>> s.rename(lambda x: x ** 2) # function, changes labels\n 0 1\n 1 2\n 4 3\n dtype: int64\n \"\"\"\n if index is None:\n pass\n if callable(index):\n if kwargs.get(\"inplace\", False):\n raise ValueError(\"inplace True is not supported yet for a function 'index'\")\n frame = self.to_frame()\n new_index_name = verify_temp_column_name(frame, \"__index_name__\")\n frame[new_index_name] = self.index.map(index)\n frame.set_index(new_index_name, inplace=True)\n frame.index.name = self.index.name\n return first_series(frame).rename(self.name)\n elif isinstance(index, (pd.Series, dict)):\n raise ValueError(\"'index' of %s type is not supported yet\" % type(index).__name__)\n elif not is_hashable(index):\n raise TypeError(\"Series.name must be a hashable type\")\n elif not isinstance(index, tuple):\n index = (index,)\n name = name_like_string(index)\n scol = self.spark.column.alias(name)\n field = self._internal.data_fields[0].copy(name=name)\n\n internal = self._internal.copy(\n column_labels=[index],\n data_spark_columns=[scol],\n data_fields=[field],\n column_label_names=None,\n )\n psdf: DataFrame = DataFrame(internal)\n\n if kwargs.get(\"inplace\", False):\n self._col_label = index\n self._update_anchor(psdf)\n return self\n else:\n return first_series(psdf)\n\n def rename_axis(\n self, mapper: Optional[Any] = None, index: Optional[Any] = None, inplace: bool = False\n ) -> Optional[\"Series\"]:\n \"\"\"\n Set the name of the axis for the index or columns.\n\n Parameters\n ----------\n mapper, index : scalar, list-like, dict-like or function, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to the index values.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Series.\n\n Returns\n -------\n Series, or None if `inplace` is True.\n\n See Also\n --------\n Series.rename : Alter Series index labels or name.\n DataFrame.rename : Alter DataFrame index labels or name.\n Index.rename : Set new names on index.\n\n Examples\n --------\n >>> s = ps.Series([\"dog\", \"cat\", \"monkey\"], name=\"animal\")\n >>> s # doctest: +NORMALIZE_WHITESPACE\n 0 dog\n 1 cat\n 2 monkey\n Name: animal, dtype: object\n >>> s.rename_axis(\"index\").sort_index() # doctest: +NORMALIZE_WHITESPACE\n index\n 0 dog\n 1 cat\n 2 monkey\n Name: animal, dtype: object\n\n **MultiIndex**\n\n >>> index = pd.MultiIndex.from_product([['mammal'],\n ... ['dog', 'cat', 'monkey']],\n ... names=['type', 'name'])\n >>> s = ps.Series([4, 4, 2], index=index, name='num_legs')\n >>> s # doctest: +NORMALIZE_WHITESPACE\n type name\n mammal dog 4\n cat 4\n monkey 2\n Name: num_legs, dtype: int64\n >>> s.rename_axis(index={'type': 'class'}).sort_index() # doctest: +NORMALIZE_WHITESPACE\n class name\n mammal cat 4\n dog 4\n monkey 2\n Name: num_legs, dtype: int64\n >>> s.rename_axis(index=str.upper).sort_index() # doctest: +NORMALIZE_WHITESPACE\n TYPE NAME\n mammal cat 4\n dog 4\n monkey 2\n Name: num_legs, dtype: int64\n \"\"\"\n psdf = self.to_frame().rename_axis(mapper=mapper, index=index, inplace=False)\n if inplace:\n self._update_anchor(psdf)\n return None\n else:\n return first_series(psdf)\n\n @property\n def index(self) -> \"ps.Index\":\n \"\"\"The index (axis labels) Column of the Series.\n\n See Also\n --------\n Index\n \"\"\"\n return self._psdf.index\n\n @property\n def is_unique(self) -> bool:\n \"\"\"\n Return boolean if values in the object are unique\n\n Returns\n -------\n is_unique : boolean\n\n >>> ps.Series([1, 2, 3]).is_unique\n True\n >>> ps.Series([1, 2, 2]).is_unique\n False\n >>> ps.Series([1, 2, 3, None]).is_unique\n True\n \"\"\"\n scol = self.spark.column\n\n # Here we check:\n # 1. the distinct count without nulls and count without nulls for non-null values\n # 2. count null values and see if null is a distinct value.\n #\n # This workaround is in order to calculate the distinct count including nulls in\n # single pass. Note that COUNT(DISTINCT expr) in Spark is designed to ignore nulls.\n return self._internal.spark_frame.select(\n (F.count(scol) == F.countDistinct(scol))\n & (F.count(F.when(scol.isNull(), 1).otherwise(None)) <= 1)\n ).collect()[0][0]\n\n def reset_index(\n self,\n level: Optional[Union[int, Name, Sequence[Union[int, Name]]]] = None,\n drop: bool = False,\n name: Optional[Name] = None,\n inplace: bool = False,\n ) -> Optional[Union[\"Series\", DataFrame]]:\n \"\"\"\n Generate a new DataFrame or Series with the index reset.\n\n This is useful when the index needs to be treated as a column,\n or when the index is meaningless and needs to be reset\n to the default before another operation.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default optional\n For a Series with a MultiIndex, only remove the specified levels from the index.\n Removes all levels by default.\n drop : bool, default False\n Just reset the index, without inserting it as a column in the new DataFrame.\n name : object, optional\n The name to use for the column containing the original Series values.\n Uses self.name by default. This argument is ignored when drop is True.\n inplace : bool, default False\n Modify the Series in place (do not create a new object).\n\n Returns\n -------\n Series or DataFrame\n When `drop` is False (the default), a DataFrame is returned.\n The newly created columns will come first in the DataFrame,\n followed by the original Series values.\n When `drop` is True, a `Series` is returned.\n In either case, if ``inplace=True``, no value is returned.\n\n Examples\n --------\n >>> s = ps.Series([1, 2, 3, 4], index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))\n\n Generate a DataFrame with default index.\n\n >>> s.reset_index()\n idx 0\n 0 a 1\n 1 b 2\n 2 c 3\n 3 d 4\n\n To specify the name of the new column use `name`.\n\n >>> s.reset_index(name='values')\n idx values\n 0 a 1\n 1 b 2\n 2 c 3\n 3 d 4\n\n To generate a new Series with the default set `drop` to True.\n\n >>> s.reset_index(drop=True)\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n To update the Series in place, without generating a new one\n set `inplace` to True. Note that it also requires ``drop=True``.\n\n >>> s.reset_index(inplace=True, drop=True)\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if inplace and not drop:\n raise TypeError(\"Cannot reset_index inplace on a Series to create a DataFrame\")\n\n if drop:\n psdf = self._psdf[[self.name]]\n else:\n psser = self\n if name is not None:\n psser = psser.rename(name)\n psdf = psser.to_frame()\n psdf = psdf.reset_index(level=level, drop=drop)\n if drop:\n if inplace:\n self._update_anchor(psdf)\n return None\n else:\n return first_series(psdf)\n else:\n return psdf\n\n def to_frame(self, name: Optional[Name] = None) -> DataFrame:\n \"\"\"\n Convert Series to DataFrame.\n\n Parameters\n ----------\n name : object, default None\n The passed name should substitute for the series name (if it has\n one).\n\n Returns\n -------\n DataFrame\n DataFrame representation of Series.\n\n Examples\n --------\n >>> s = ps.Series([\"a\", \"b\", \"c\"])\n >>> s.to_frame()\n 0\n 0 a\n 1 b\n 2 c\n\n >>> s = ps.Series([\"a\", \"b\", \"c\"], name=\"vals\")\n >>> s.to_frame()\n vals\n 0 a\n 1 b\n 2 c\n \"\"\"\n if name is not None:\n renamed = self.rename(name)\n elif self._column_label is None:\n renamed = self.rename(DEFAULT_SERIES_NAME)\n else:\n renamed = self\n return DataFrame(renamed._internal)\n\n to_dataframe = to_frame\n\n def to_string(\n self,\n buf: Optional[IO[str]] = None,\n na_rep: str = \"NaN\",\n float_format: Optional[Callable[[float], str]] = None,\n header: bool = True,\n index: bool = True,\n length: bool = False,\n dtype: bool = False,\n name: bool = False,\n max_rows: Optional[int] = None,\n ) -> Optional[str]:\n \"\"\"\n Render a string representation of the Series.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, set max_rows parameter.\n\n Parameters\n ----------\n buf : StringIO-like, optional\n buffer to write to\n na_rep : string, optional\n string representation of NAN to use, default 'NaN'\n float_format : one-parameter function, optional\n formatter function to apply to columns' elements if they are floats\n default None\n header : boolean, default True\n Add the Series header (index name)\n index : bool, optional\n Add index (row) labels, default True\n length : boolean, default False\n Add the Series length\n dtype : boolean, default False\n Add the Series dtype\n name : boolean, default False\n Add the Series name if not None\n max_rows : int, optional\n Maximum number of rows to show before truncating. If None, show\n all.\n\n Returns\n -------\n formatted : string (if not buffer passed)\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats'])\n >>> print(df['dogs'].to_string())\n 0 0.2\n 1 0.0\n 2 0.6\n 3 0.2\n\n >>> print(df['dogs'].to_string(max_rows=2))\n 0 0.2\n 1 0.0\n \"\"\"\n # Make sure locals() call is at the top of the function so we don't capture local variables.\n args = locals()\n if max_rows is not None:\n psseries = self.head(max_rows)\n else:\n psseries = self\n\n return validate_arguments_and_invoke_function(\n psseries._to_internal_pandas(), self.to_string, pd.Series.to_string, args\n )\n\n def to_clipboard(self, excel: bool = True, sep: Optional[str] = None, **kwargs: Any) -> None:\n # Docstring defined below by reusing DataFrame.to_clipboard's.\n args = locals()\n psseries = self\n\n return validate_arguments_and_invoke_function(\n psseries._to_internal_pandas(), self.to_clipboard, pd.Series.to_clipboard, args\n )\n\n to_clipboard.__doc__ = DataFrame.to_clipboard.__doc__\n\n def to_dict(self, into: Type = dict) -> Mapping:\n \"\"\"\n Convert Series to {label -> value} dict or dict-like object.\n\n .. note:: This method should only be used if the resulting pandas DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n into : class, default dict\n The collections.abc.Mapping subclass to use as the return\n object. Can be the actual class or an empty\n instance of the mapping type you want. If you want a\n collections.defaultdict, you must pass it initialized.\n\n Returns\n -------\n collections.abc.Mapping\n Key-value representation of Series.\n\n Examples\n --------\n >>> s = ps.Series([1, 2, 3, 4])\n >>> s_dict = s.to_dict()\n >>> sorted(s_dict.items())\n [(0, 1), (1, 2), (2, 3), (3, 4)]\n\n >>> from collections import OrderedDict, defaultdict\n >>> s.to_dict(OrderedDict)\n OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])\n\n >>> dd = defaultdict(list)\n >>> s.to_dict(dd) # doctest: +ELLIPSIS\n defaultdict(<class 'list'>, {...})\n \"\"\"\n # Make sure locals() call is at the top of the function so we don't capture local variables.\n args = locals()\n psseries = self\n return validate_arguments_and_invoke_function(\n psseries._to_internal_pandas(), self.to_dict, pd.Series.to_dict, args\n )\n\n def to_latex(\n self,\n buf: Optional[IO[str]] = None,\n columns: Optional[List[Name]] = None,\n col_space: Optional[int] = None,\n header: bool = True,\n index: bool = True,\n na_rep: str = \"NaN\",\n formatters: Optional[\n Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]]\n ] = None,\n float_format: Optional[Callable[[float], str]] = None,\n sparsify: Optional[bool] = None,\n index_names: bool = True,\n bold_rows: bool = False,\n column_format: Optional[str] = None,\n longtable: Optional[bool] = None,\n escape: Optional[bool] = None,\n encoding: Optional[str] = None,\n decimal: str = \".\",\n multicolumn: Optional[bool] = None,\n multicolumn_format: Optional[str] = None,\n multirow: Optional[bool] = None,\n ) -> Optional[str]:\n\n args = locals()\n psseries = self\n return validate_arguments_and_invoke_function(\n psseries._to_internal_pandas(), self.to_latex, pd.Series.to_latex, args\n )\n\n to_latex.__doc__ = DataFrame.to_latex.__doc__\n\n def to_pandas(self) -> pd.Series:\n \"\"\"\n Return a pandas Series.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Examples\n --------\n >>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], columns=['dogs', 'cats'])\n >>> df['dogs'].to_pandas()\n 0 0.2\n 1 0.0\n 2 0.6\n 3 0.2\n Name: dogs, dtype: float64\n \"\"\"\n log_advice(\n \"`to_pandas` loads all data into the driver's memory. \"\n \"It should only be used if the resulting pandas Series is expected to be small.\"\n )\n return self._to_pandas()\n\n def _to_pandas(self) -> pd.Series:\n \"\"\"\n Same as `to_pandas()`, without issueing the advice log for internal usage.\n \"\"\"\n return self._to_internal_pandas().copy()\n\n def to_list(self) -> List:\n \"\"\"\n Return a list of the values.\n\n These are each a scalar type, which is a Python scalar\n (for str, int, float) or a pandas scalar\n (for Timestamp/Timedelta/Interval/Period)\n\n .. note:: This method should only be used if the resulting list is expected\n to be small, as all the data is loaded into the driver's memory.\n\n \"\"\"\n log_advice(\n \"`to_list` loads all data into the driver's memory. \"\n \"It should only be used if the resulting list is expected to be small.\"\n )\n return self._to_internal_pandas().tolist()\n\n tolist = to_list\n\n def drop_duplicates(self, keep: str = \"first\", inplace: bool = False) -> Optional[\"Series\"]:\n \"\"\"\n Return Series with duplicate values removed.\n\n Parameters\n ----------\n keep : {'first', 'last', ``False``}, default 'first'\n Method to handle dropping duplicates:\n - 'first' : Drop duplicates except for the first occurrence.\n - 'last' : Drop duplicates except for the last occurrence.\n - ``False`` : Drop all duplicates.\n inplace : bool, default ``False``\n If ``True``, performs operation inplace and returns None.\n\n Returns\n -------\n Series\n Series with duplicates dropped.\n\n Examples\n --------\n Generate a Series with duplicated entries.\n\n >>> s = ps.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],\n ... name='animal')\n >>> s.sort_index()\n 0 lama\n 1 cow\n 2 lama\n 3 beetle\n 4 lama\n 5 hippo\n Name: animal, dtype: object\n\n With the 'keep' parameter, the selection behaviour of duplicated values\n can be changed. The value 'first' keeps the first occurrence for each\n set of duplicated entries. The default value of keep is 'first'.\n\n >>> s.drop_duplicates().sort_index()\n 0 lama\n 1 cow\n 3 beetle\n 5 hippo\n Name: animal, dtype: object\n\n The value 'last' for parameter 'keep' keeps the last occurrence for\n each set of duplicated entries.\n\n >>> s.drop_duplicates(keep='last').sort_index()\n 1 cow\n 3 beetle\n 4 lama\n 5 hippo\n Name: animal, dtype: object\n\n The value ``False`` for parameter 'keep' discards all sets of\n duplicated entries. Setting the value of 'inplace' to ``True`` performs\n the operation inplace and returns ``None``.\n\n >>> s.drop_duplicates(keep=False, inplace=True)\n >>> s.sort_index()\n 1 cow\n 3 beetle\n 5 hippo\n Name: animal, dtype: object\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n psdf = self._psdf[[self.name]].drop_duplicates(keep=keep)\n\n if inplace:\n self._update_anchor(psdf)\n return None\n else:\n return first_series(psdf)\n\n def reindex(self, index: Optional[Any] = None, fill_value: Optional[Any] = None) -> \"Series\":\n \"\"\"\n Conform Series to new index with optional filling logic, placing\n NA/NaN in locations having no value in the previous index. A new object\n is produced.\n\n Parameters\n ----------\n index: array-like, optional\n New labels / index to conform to, should be specified using keywords.\n Preferably an Index object to avoid duplicating data\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n \"compatible\" value.\n\n Returns\n -------\n Series with changed index.\n\n See Also\n --------\n Series.reset_index : Remove row labels or move them to new columns.\n\n Examples\n --------\n\n Create a series with some fictional data.\n\n >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']\n >>> ser = ps.Series([200, 200, 404, 404, 301],\n ... index=index, name='http_status')\n >>> ser\n Firefox 200\n Chrome 200\n Safari 404\n IE10 404\n Konqueror 301\n Name: http_status, dtype: int64\n\n Create a new index and reindex the Series. By default\n values in the new index that do not have corresponding\n records in the Series are assigned ``NaN``.\n\n >>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',\n ... 'Chrome']\n >>> ser.reindex(new_index).sort_index()\n Chrome 200.0\n Comodo Dragon NaN\n IE10 404.0\n Iceweasel NaN\n Safari 404.0\n Name: http_status, dtype: float64\n\n We can fill in the missing values by passing a value to\n the keyword ``fill_value``.\n\n >>> ser.reindex(new_index, fill_value=0).sort_index()\n Chrome 200\n Comodo Dragon 0\n IE10 404\n Iceweasel 0\n Safari 404\n Name: http_status, dtype: int64\n\n To further illustrate the filling functionality in\n ``reindex``, we will create a Series with a\n monotonically increasing index (for example, a sequence\n of dates).\n\n >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')\n >>> ser2 = ps.Series([100, 101, np.nan, 100, 89, 88],\n ... name='prices', index=date_index)\n >>> ser2.sort_index()\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n Name: prices, dtype: float64\n\n Suppose we decide to expand the series to cover a wider\n date range.\n\n >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')\n >>> ser2.reindex(date_index2).sort_index()\n 2009-12-29 NaN\n 2009-12-30 NaN\n 2009-12-31 NaN\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n Name: prices, dtype: float64\n \"\"\"\n\n return first_series(self.to_frame().reindex(index=index, fill_value=fill_value)).rename(\n self.name\n )\n\n def reindex_like(self, other: Union[\"Series\", \"DataFrame\"]) -> \"Series\":\n \"\"\"\n Return a Series with matching indices as other object.\n\n Conform the object to the same index on all axes. Places NA/NaN in locations\n having no value in the previous index.\n\n Parameters\n ----------\n other : Series or DataFrame\n Its row and column indices are used to define the new indices\n of this object.\n\n Returns\n -------\n Series\n Series with changed indices on each axis.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex : Change to new indices or expand indices.\n\n Notes\n -----\n Same as calling\n ``.reindex(index=other.index, ...)``.\n\n Examples\n --------\n\n >>> s1 = ps.Series([24.3, 31.0, 22.0, 35.0],\n ... index=pd.date_range(start='2014-02-12',\n ... end='2014-02-15', freq='D'),\n ... name=\"temp_celsius\")\n >>> s1\n 2014-02-12 24.3\n 2014-02-13 31.0\n 2014-02-14 22.0\n 2014-02-15 35.0\n Name: temp_celsius, dtype: float64\n\n >>> s2 = ps.Series([\"low\", \"low\", \"medium\"],\n ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',\n ... '2014-02-15']),\n ... name=\"winspeed\")\n >>> s2\n 2014-02-12 low\n 2014-02-13 low\n 2014-02-15 medium\n Name: winspeed, dtype: object\n\n >>> s2.reindex_like(s1).sort_index()\n 2014-02-12 low\n 2014-02-13 low\n 2014-02-14 None\n 2014-02-15 medium\n Name: winspeed, dtype: object\n \"\"\"\n if isinstance(other, (Series, DataFrame)):\n return self.reindex(index=other.index)\n else:\n raise TypeError(\"other must be a pandas-on-Spark Series or DataFrame\")\n\n def fillna(\n self,\n value: Optional[Any] = None,\n method: Optional[str] = None,\n axis: Optional[Axis] = None,\n inplace: bool = False,\n limit: Optional[int] = None,\n ) -> Optional[\"Series\"]:\n \"\"\"Fill NA/NaN values.\n\n .. note:: the current implementation of 'method' parameter in fillna uses Spark's Window\n without specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n value : scalar, dict, Series\n Value to use to fill holes. alternately a dict/Series of values\n specifying which value to use for each column.\n DataFrame is not supported.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series pad / ffill: propagate last valid\n observation forward to next valid backfill / bfill:\n use NEXT valid observation to fill gap\n axis : {0 or `index`}\n 1 and `columns` are not supported.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n limit : int, default None\n If method is specified, this is the maximum number of consecutive NaN values to\n forward/backward fill. In other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. If method is not specified,\n this is the maximum number of entries along the entire axis where NaNs will be filled.\n Must be greater than 0 if not None\n\n Returns\n -------\n Series\n Series with NA entries filled.\n\n Examples\n --------\n >>> s = ps.Series([np.nan, 2, 3, 4, np.nan, 6], name='x')\n >>> s\n 0 NaN\n 1 2.0\n 2 3.0\n 3 4.0\n 4 NaN\n 5 6.0\n Name: x, dtype: float64\n\n Replace all NaN elements with 0s.\n\n >>> s.fillna(0)\n 0 0.0\n 1 2.0\n 2 3.0\n 3 4.0\n 4 0.0\n 5 6.0\n Name: x, dtype: float64\n\n We can also propagate non-null values forward or backward.\n\n >>> s.fillna(method='ffill')\n 0 NaN\n 1 2.0\n 2 3.0\n 3 4.0\n 4 4.0\n 5 6.0\n Name: x, dtype: float64\n\n >>> s = ps.Series([np.nan, 'a', 'b', 'c', np.nan], name='x')\n >>> s.fillna(method='ffill')\n 0 None\n 1 a\n 2 b\n 3 c\n 4 c\n Name: x, dtype: object\n \"\"\"\n psser = self._fillna(value=value, method=method, axis=axis, limit=limit)\n\n if method is not None:\n psser = DataFrame(psser._psdf._internal.resolved_copy)._psser_for(self._column_label)\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if inplace:\n self._psdf._update_internal_frame(psser._psdf._internal, requires_same_anchor=False)\n return None\n else:\n return psser.copy()\n\n def _fillna(\n self,\n value: Optional[Any] = None,\n method: Optional[str] = None,\n axis: Optional[Axis] = None,\n limit: Optional[int] = None,\n part_cols: Sequence[\"ColumnOrName\"] = (),\n ) -> \"Series\":\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError(\"fillna currently only works for axis=0 or axis='index'\")\n if (value is None) and (method is None):\n raise ValueError(\"Must specify a fillna 'value' or 'method' parameter.\")\n if (method is not None) and (method not in [\"ffill\", \"pad\", \"backfill\", \"bfill\"]):\n raise ValueError(\"Expecting 'pad', 'ffill', 'backfill' or 'bfill'.\")\n\n scol = self.spark.column\n\n if not self.spark.nullable and not isinstance(\n self.spark.data_type, (FloatType, DoubleType)\n ):\n return self._psdf.copy()._psser_for(self._column_label)\n\n cond = self.isnull().spark.column\n\n if value is not None:\n if not isinstance(value, (float, int, str, bool)):\n raise TypeError(\"Unsupported type %s\" % type(value).__name__)\n if limit is not None:\n raise NotImplementedError(\"limit parameter for value is not support now\")\n scol = F.when(cond, value).otherwise(scol)\n else:\n if method in [\"ffill\", \"pad\"]:\n func = F.last\n end = Window.currentRow - 1\n if limit is not None:\n begin = Window.currentRow - limit\n else:\n begin = Window.unboundedPreceding\n elif method in [\"bfill\", \"backfill\"]:\n func = F.first\n begin = Window.currentRow + 1\n if limit is not None:\n end = Window.currentRow + limit\n else:\n end = Window.unboundedFollowing\n\n window = (\n Window.partitionBy(*part_cols)\n .orderBy(NATURAL_ORDER_COLUMN_NAME)\n .rowsBetween(begin, end)\n )\n scol = F.when(cond, func(scol, True).over(window)).otherwise(scol)\n\n return DataFrame(\n self._psdf._internal.with_new_spark_column(\n self._column_label, scol.alias(name_like_string(self.name)) # TODO: dtype?\n )\n )._psser_for(self._column_label)\n\n def dropna(self, axis: Axis = 0, inplace: bool = False, **kwargs: Any) -> Optional[\"Series\"]:\n \"\"\"\n Return a new Series with missing values removed.\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n There is only one axis to drop values from.\n inplace : bool, default False\n If True, do operation inplace and return None.\n **kwargs\n Not in use.\n\n Returns\n -------\n Series\n Series with NA entries dropped from it.\n\n Examples\n --------\n >>> ser = ps.Series([1., 2., np.nan])\n >>> ser\n 0 1.0\n 1 2.0\n 2 NaN\n dtype: float64\n\n Drop NA values from a Series.\n\n >>> ser.dropna()\n 0 1.0\n 1 2.0\n dtype: float64\n\n Keep the Series with valid entries in the same variable.\n\n >>> ser.dropna(inplace=True)\n >>> ser\n 0 1.0\n 1 2.0\n dtype: float64\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n # TODO: last two examples from pandas produce different results.\n psdf = self._psdf[[self.name]].dropna(axis=axis, inplace=False)\n if inplace:\n self._update_anchor(psdf)\n return None\n else:\n return first_series(psdf)\n\n def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> \"Series\":\n \"\"\"\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values.\n\n Parameters\n ----------\n lower : float or int, default None\n Minimum threshold value. All values below this threshold will be set to it.\n upper : float or int, default None\n Maximum threshold value. All values above this threshold will be set to it.\n\n Returns\n -------\n Series\n Series with the values outside the clip boundaries replaced\n\n Examples\n --------\n >>> ps.Series([0, 2, 4]).clip(1, 3)\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n Notes\n -----\n One difference between this implementation and pandas is that running\n `pd.Series(['a', 'b']).clip(0, 1)` will crash with \"TypeError: '<=' not supported between\n instances of 'str' and 'int'\" while `ps.Series(['a', 'b']).clip(0, 1)` will output the\n original Series, simply ignoring the incompatible types.\n \"\"\"\n if is_list_like(lower) or is_list_like(upper):\n raise TypeError(\n \"List-like value are not supported for 'lower' and 'upper' at the \" + \"moment\"\n )\n\n if lower is None and upper is None:\n return self\n\n if isinstance(self.spark.data_type, NumericType):\n scol = self.spark.column\n if lower is not None:\n scol = F.when(scol < lower, lower).otherwise(scol)\n if upper is not None:\n scol = F.when(scol > upper, upper).otherwise(scol)\n return self._with_new_scol(\n scol.alias(self._internal.data_spark_column_names[0]),\n field=self._internal.data_fields[0],\n )\n else:\n return self\n\n def drop(\n self,\n labels: Optional[Union[Name, List[Name]]] = None,\n index: Optional[Union[Name, List[Name]]] = None,\n level: Optional[int] = None,\n ) -> \"Series\":\n \"\"\"\n Return Series with specified index labels removed.\n\n Remove elements of a Series based on specifying the index labels.\n When using a multi-index, labels on different levels can be removed by specifying the level.\n\n Parameters\n ----------\n labels : single label or list-like\n Index labels to drop.\n index : None\n Redundant for application on Series, but index can be used instead of labels.\n level : int or level name, optional\n For MultiIndex, level for which the labels will be removed.\n\n Returns\n -------\n Series\n Series with specified index labels removed.\n\n See Also\n --------\n Series.dropna\n\n Examples\n --------\n >>> s = ps.Series(data=np.arange(3), index=['A', 'B', 'C'])\n >>> s\n A 0\n B 1\n C 2\n dtype: int64\n\n Drop single label A\n\n >>> s.drop('A')\n B 1\n C 2\n dtype: int64\n\n Drop labels B and C\n\n >>> s.drop(labels=['B', 'C'])\n A 0\n dtype: int64\n\n With 'index' rather than 'labels' returns exactly same result.\n\n >>> s.drop(index='A')\n B 1\n C 2\n dtype: int64\n\n >>> s.drop(index=['B', 'C'])\n A 0\n dtype: int64\n\n Also support for MultiIndex\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],\n ... index=midx)\n >>> s\n lama speed 45.0\n weight 200.0\n length 1.2\n cow speed 30.0\n weight 250.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n dtype: float64\n\n >>> s.drop(labels='weight', level=1)\n lama speed 45.0\n length 1.2\n cow speed 30.0\n length 1.5\n falcon speed 320.0\n length 0.3\n dtype: float64\n\n >>> s.drop(('lama', 'weight'))\n lama speed 45.0\n length 1.2\n cow speed 30.0\n weight 250.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n dtype: float64\n\n >>> s.drop([('lama', 'speed'), ('falcon', 'weight')])\n lama weight 200.0\n length 1.2\n cow speed 30.0\n weight 250.0\n length 1.5\n falcon speed 320.0\n length 0.3\n dtype: float64\n \"\"\"\n return first_series(self._drop(labels=labels, index=index, level=level))\n\n def _drop(\n self,\n labels: Optional[Union[Name, List[Name]]] = None,\n index: Optional[Union[Name, List[Name]]] = None,\n level: Optional[int] = None,\n ) -> DataFrame:\n if labels is not None:\n if index is not None:\n raise ValueError(\"Cannot specify both 'labels' and 'index'\")\n return self._drop(index=labels, level=level)\n if index is not None:\n internal = self._internal\n if level is None:\n level = 0\n if level >= internal.index_level:\n raise ValueError(\"'level' should be less than the number of indexes\")\n\n if is_name_like_tuple(index):\n index_list = [cast(Label, index)]\n elif is_name_like_value(index):\n index_list = [(index,)]\n elif all(is_name_like_value(idxes, allow_tuple=False) for idxes in index):\n index_list = [(idex,) for idex in index]\n elif not all(is_name_like_tuple(idxes) for idxes in index):\n raise ValueError(\n \"If the given index is a list, it \"\n \"should only contains names as all tuples or all non tuples \"\n \"that contain index names\"\n )\n else:\n index_list = cast(List[Label], index)\n\n drop_index_scols = []\n for idxes in index_list:\n try:\n index_scols = [\n internal.index_spark_columns[lvl] == idx\n for lvl, idx in enumerate(idxes, level)\n ]\n except IndexError:\n raise KeyError(\n \"Key length ({}) exceeds index depth ({})\".format(\n internal.index_level, len(idxes)\n )\n )\n drop_index_scols.append(reduce(lambda x, y: x & y, index_scols))\n\n cond = ~reduce(lambda x, y: x | y, drop_index_scols)\n\n return DataFrame(internal.with_filter(cond))\n else:\n raise ValueError(\"Need to specify at least one of 'labels' or 'index'\")\n\n def head(self, n: int = 5) -> \"Series\":\n \"\"\"\n Return the first n rows.\n\n This function returns the first n rows for the object based on position.\n It is useful for quickly testing if your object has the right type of data in it.\n\n Parameters\n ----------\n n : Integer, default = 5\n\n Returns\n -------\n The first n rows of the caller object.\n\n Examples\n --------\n >>> df = ps.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion']})\n >>> df.animal.head(2) # doctest: +NORMALIZE_WHITESPACE\n 0 alligator\n 1 bee\n Name: animal, dtype: object\n \"\"\"\n return first_series(self.to_frame().head(n)).rename(self.name)\n\n def last(self, offset: Union[str, DateOffset]) -> \"Series\":\n \"\"\"\n Select final periods of time series data based on a date offset.\n\n When having a Series with dates as index, this function can\n select the last few elements based on a date offset.\n\n Parameters\n ----------\n offset : str or DateOffset\n The offset length of the data that will be selected. For instance,\n '3D' will display all the rows having their index within the last 3 days.\n\n Returns\n -------\n Series\n A subset of the caller.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n Examples\n --------\n >>> index = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> psser = ps.Series([1, 2, 3, 4], index=index)\n >>> psser\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n dtype: int64\n\n Get the rows for the last 3 days:\n\n >>> psser.last('3D')\n 2018-04-13 3\n 2018-04-15 4\n dtype: int64\n\n Notice the data for 3 last calendar days were returned, not the last\n 3 observed days in the dataset, and therefore data for 2018-04-11 was\n not returned.\n \"\"\"\n return first_series(self.to_frame().last(offset)).rename(self.name)\n\n def first(self, offset: Union[str, DateOffset]) -> \"Series\":\n \"\"\"\n Select first periods of time series data based on a date offset.\n\n When having a Series with dates as index, this function can\n select the first few elements based on a date offset.\n\n Parameters\n ----------\n offset : str or DateOffset\n The offset length of the data that will be selected. For instance,\n '3D' will display all the rows having their index within the first 3 days.\n\n Returns\n -------\n Series\n A subset of the caller.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n Examples\n --------\n >>> index = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> psser = ps.Series([1, 2, 3, 4], index=index)\n >>> psser\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n dtype: int64\n\n Get the rows for the first 3 days:\n\n >>> psser.first('3D')\n 2018-04-09 1\n 2018-04-11 2\n dtype: int64\n\n Notice the data for 3 first calendar days were returned, not the first\n 3 observed days in the dataset, and therefore data for 2018-04-13 was\n not returned.\n \"\"\"\n return first_series(self.to_frame().first(offset)).rename(self.name)\n\n # TODO: Categorical type isn't supported (due to PySpark's limitation) and\n # some doctests related with timestamps were not added.\n def unique(self) -> \"Series\":\n \"\"\"\n Return unique values of Series object.\n\n Uniques are returned in order of appearance. Hash table-based unique,\n therefore does NOT sort.\n\n .. note:: This method returns newly created Series whereas pandas returns\n the unique values as a NumPy array.\n\n Returns\n -------\n Returns the unique values as a Series.\n\n See Also\n --------\n Index.unique\n groupby.SeriesGroupBy.unique\n\n Examples\n --------\n >>> psser = ps.Series([2, 1, 3, 3], name='A')\n >>> psser.unique().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS\n <BLANKLINE>\n ... 1\n ... 2\n ... 3\n Name: A, dtype: int64\n\n >>> ps.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()\n 0 2016-01-01\n dtype: datetime64[ns]\n\n >>> psser.name = ('x', 'a')\n >>> psser.unique().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS\n <BLANKLINE>\n ... 1\n ... 2\n ... 3\n Name: (x, a), dtype: int64\n \"\"\"\n sdf = self._internal.spark_frame.select(self.spark.column).distinct()\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=None,\n column_labels=[self._column_label],\n data_spark_columns=[scol_for(sdf, self._internal.data_spark_column_names[0])],\n data_fields=[self._internal.data_fields[0]],\n column_label_names=self._internal.column_label_names,\n )\n return first_series(DataFrame(internal))\n\n def sort_values(\n self, ascending: bool = True, inplace: bool = False, na_position: str = \"last\"\n ) -> Optional[\"Series\"]:\n \"\"\"\n Sort by the values.\n\n Sort a Series in ascending or descending order by some criterion.\n\n Parameters\n ----------\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of\n the by.\n inplace : bool, default False\n if True, perform operation in-place\n na_position : {'first', 'last'}, default 'last'\n `first` puts NaNs at the beginning, `last` puts NaNs at the end\n\n Returns\n -------\n sorted_obj : Series ordered by values.\n\n Examples\n --------\n >>> s = ps.Series([np.nan, 1, 3, 10, 5])\n >>> s\n 0 NaN\n 1 1.0\n 2 3.0\n 3 10.0\n 4 5.0\n dtype: float64\n\n Sort values ascending order (default behaviour)\n\n >>> s.sort_values(ascending=True)\n 1 1.0\n 2 3.0\n 4 5.0\n 3 10.0\n 0 NaN\n dtype: float64\n\n Sort values descending order\n\n >>> s.sort_values(ascending=False)\n 3 10.0\n 4 5.0\n 2 3.0\n 1 1.0\n 0 NaN\n dtype: float64\n\n Sort values inplace\n\n >>> s.sort_values(ascending=False, inplace=True)\n >>> s\n 3 10.0\n 4 5.0\n 2 3.0\n 1 1.0\n 0 NaN\n dtype: float64\n\n Sort values putting NAs first\n\n >>> s.sort_values(na_position='first')\n 0 NaN\n 1 1.0\n 2 3.0\n 4 5.0\n 3 10.0\n dtype: float64\n\n Sort a series of strings\n\n >>> s = ps.Series(['z', 'b', 'd', 'a', 'c'])\n >>> s\n 0 z\n 1 b\n 2 d\n 3 a\n 4 c\n dtype: object\n\n >>> s.sort_values()\n 3 a\n 1 b\n 4 c\n 2 d\n 0 z\n dtype: object\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n psdf = self._psdf[[self.name]]._sort(\n by=[self.spark.column], ascending=ascending, na_position=na_position\n )\n\n if inplace:\n self._update_anchor(psdf)\n return None\n else:\n return first_series(psdf)\n\n def sort_index(\n self,\n axis: Axis = 0,\n level: Optional[Union[int, List[int]]] = None,\n ascending: bool = True,\n inplace: bool = False,\n kind: str = None,\n na_position: str = \"last\",\n ) -> Optional[\"Series\"]:\n \"\"\"\n Sort object by labels (along an axis)\n\n Parameters\n ----------\n axis : index, columns to direct sorting. Currently, only axis = 0 is supported.\n level : int or level name or list of ints or list of level names\n if not None, sort on values in specified index level(s)\n ascending : boolean, default True\n Sort ascending vs. descending\n inplace : bool, default False\n if True, perform operation in-place\n kind : str, default None\n pandas-on-Spark does not allow specifying the sorting algorithm at the moment,\n default None\n na_position : {‘first’, ‘last’}, default ‘last’\n first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for\n MultiIndex.\n\n Returns\n -------\n sorted_obj : Series\n\n Examples\n --------\n >>> df = ps.Series([2, 1, np.nan], index=['b', 'a', np.nan])\n\n >>> df.sort_index()\n a 1.0\n b 2.0\n NaN NaN\n dtype: float64\n\n >>> df.sort_index(ascending=False)\n b 2.0\n a 1.0\n NaN NaN\n dtype: float64\n\n >>> df.sort_index(na_position='first')\n NaN NaN\n a 1.0\n b 2.0\n dtype: float64\n\n >>> df.sort_index(inplace=True)\n >>> df\n a 1.0\n b 2.0\n NaN NaN\n dtype: float64\n\n >>> df = ps.Series(range(4), index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]], name='0')\n\n >>> df.sort_index()\n a 0 3\n 1 2\n b 0 1\n 1 0\n Name: 0, dtype: int64\n\n >>> df.sort_index(level=1) # doctest: +SKIP\n a 0 3\n b 0 1\n a 1 2\n b 1 0\n Name: 0, dtype: int64\n\n >>> df.sort_index(level=[1, 0])\n a 0 3\n b 0 1\n a 1 2\n b 1 0\n Name: 0, dtype: int64\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n psdf = self._psdf[[self.name]].sort_index(\n axis=axis, level=level, ascending=ascending, kind=kind, na_position=na_position\n )\n\n if inplace:\n self._update_anchor(psdf)\n return None\n else:\n return first_series(psdf)\n\n def swaplevel(\n self, i: Union[int, Name] = -2, j: Union[int, Name] = -1, copy: bool = True\n ) -> \"Series\":\n \"\"\"\n Swap levels i and j in a MultiIndex.\n Default is to swap the two innermost levels of the index.\n\n Parameters\n ----------\n i, j : int, str\n Level of the indices to be swapped. Can pass level name as string.\n copy : bool, default True\n Whether to copy underlying data. Must be True.\n\n Returns\n -------\n Series\n Series with levels swapped in MultiIndex.\n\n Examples\n --------\n >>> midx = pd.MultiIndex.from_arrays([['a', 'b'], [1, 2]], names = ['word', 'number'])\n >>> midx # doctest: +SKIP\n MultiIndex([('a', 1),\n ('b', 2)],\n names=['word', 'number'])\n >>> psser = ps.Series(['x', 'y'], index=midx)\n >>> psser\n word number\n a 1 x\n b 2 y\n dtype: object\n >>> psser.swaplevel()\n number word\n 1 a x\n 2 b y\n dtype: object\n >>> psser.swaplevel(0, 1)\n number word\n 1 a x\n 2 b y\n dtype: object\n >>> psser.swaplevel('number', 'word')\n number word\n 1 a x\n 2 b y\n dtype: object\n \"\"\"\n assert copy is True\n\n return first_series(self.to_frame().swaplevel(i, j, axis=0)).rename(self.name)\n\n def swapaxes(self, i: Axis, j: Axis, copy: bool = True) -> \"Series\":\n \"\"\"\n Interchange axes and swap values axes appropriately.\n\n Parameters\n ----------\n i: {0 or 'index', 1 or 'columns'}. The axis to swap.\n j: {0 or 'index', 1 or 'columns'}. The axis to swap.\n copy : bool, default True.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> psser = ps.Series([1, 2, 3], index=[\"x\", \"y\", \"z\"])\n >>> psser\n x 1\n y 2\n z 3\n dtype: int64\n >>>\n >>> psser.swapaxes(0, 0)\n x 1\n y 2\n z 3\n dtype: int64\n \"\"\"\n assert copy is True\n\n i = validate_axis(i)\n j = validate_axis(j)\n if not i == j == 0:\n raise ValueError(\"Axis must be 0 for Series\")\n\n return self.copy()\n\n def add_prefix(self, prefix: str) -> \"Series\":\n \"\"\"\n Prefix labels with string `prefix`.\n\n For Series, the row labels are prefixed.\n For DataFrame, the column labels are prefixed.\n\n Parameters\n ----------\n prefix : str\n The string to add before each label.\n\n Returns\n -------\n Series\n New Series with updated labels.\n\n See Also\n --------\n Series.add_suffix: Suffix column labels with string `suffix`.\n DataFrame.add_suffix: Suffix column labels with string `suffix`.\n DataFrame.add_prefix: Prefix column labels with string `prefix`.\n\n Examples\n --------\n >>> s = ps.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_prefix('item_')\n item_0 1\n item_1 2\n item_2 3\n item_3 4\n dtype: int64\n \"\"\"\n assert isinstance(prefix, str)\n internal = self._internal.resolved_copy\n sdf = internal.spark_frame.select(\n [\n F.concat(SF.lit(prefix), index_spark_column).alias(index_spark_column_name)\n for index_spark_column, index_spark_column_name in zip(\n internal.index_spark_columns, internal.index_spark_column_names\n )\n ]\n + internal.data_spark_columns\n )\n return first_series(\n DataFrame(internal.with_new_sdf(sdf, index_fields=([None] * internal.index_level)))\n )\n\n def add_suffix(self, suffix: str) -> \"Series\":\n \"\"\"\n Suffix labels with string suffix.\n\n For Series, the row labels are suffixed.\n For DataFrame, the column labels are suffixed.\n\n Parameters\n ----------\n suffix : str\n The string to add after each label.\n\n Returns\n -------\n Series\n New Series with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n DataFrame.add_prefix: Prefix column labels with string `prefix`.\n DataFrame.add_suffix: Suffix column labels with string `suffix`.\n\n Examples\n --------\n >>> s = ps.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_suffix('_item')\n 0_item 1\n 1_item 2\n 2_item 3\n 3_item 4\n dtype: int64\n \"\"\"\n assert isinstance(suffix, str)\n internal = self._internal.resolved_copy\n sdf = internal.spark_frame.select(\n [\n F.concat(index_spark_column, SF.lit(suffix)).alias(index_spark_column_name)\n for index_spark_column, index_spark_column_name in zip(\n internal.index_spark_columns, internal.index_spark_column_names\n )\n ]\n + internal.data_spark_columns\n )\n return first_series(\n DataFrame(internal.with_new_sdf(sdf, index_fields=([None] * internal.index_level)))\n )\n\n def corr(self, other: \"Series\", method: str = \"pearson\") -> float:\n \"\"\"\n Compute correlation with `other` Series, excluding missing values.\n\n Parameters\n ----------\n other : Series\n method : {'pearson', 'spearman'}\n * pearson : standard correlation coefficient\n * spearman : Spearman rank correlation\n\n Returns\n -------\n correlation : float\n\n Examples\n --------\n >>> df = ps.DataFrame({'s1': [.2, .0, .6, .2],\n ... 's2': [.3, .6, .0, .1]})\n >>> s1 = df.s1\n >>> s2 = df.s2\n >>> s1.corr(s2, method='pearson') # doctest: +ELLIPSIS\n -0.851064...\n\n >>> s1.corr(s2, method='spearman') # doctest: +ELLIPSIS\n -0.948683...\n\n Notes\n -----\n There are behavior differences between pandas-on-Spark and pandas.\n\n * the `method` argument only accepts 'pearson', 'spearman'\n * the data should not contain NaNs. pandas-on-Spark will return an error.\n * pandas-on-Spark doesn't support the following argument(s).\n\n * `min_periods` argument is not supported\n \"\"\"\n # This implementation is suboptimal because it computes more than necessary,\n # but it should be a start\n columns = [\"__corr_arg1__\", \"__corr_arg2__\"]\n psdf = self._psdf.assign(__corr_arg1__=self, __corr_arg2__=other)[columns]\n psdf.columns = columns\n c = corr(psdf, method=method)\n return c.loc[tuple(columns)]\n\n def nsmallest(self, n: int = 5) -> \"Series\":\n \"\"\"\n Return the smallest `n` elements.\n\n Parameters\n ----------\n n : int, default 5\n Return this many ascending sorted values.\n\n Returns\n -------\n Series\n The `n` smallest values in the Series, sorted in increasing order.\n\n See Also\n --------\n Series.nlargest: Get the `n` largest elements.\n Series.sort_values: Sort Series by values.\n Series.head: Return the first `n` rows.\n\n Notes\n -----\n Faster than ``.sort_values().head(n)`` for small `n` relative to\n the size of the ``Series`` object.\n In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer,\n the two would have same performance.\n\n Examples\n --------\n >>> data = [1, 2, 3, 4, np.nan ,6, 7, 8]\n >>> s = ps.Series(data)\n >>> s\n 0 1.0\n 1 2.0\n 2 3.0\n 3 4.0\n 4 NaN\n 5 6.0\n 6 7.0\n 7 8.0\n dtype: float64\n\n The `n` largest elements where ``n=5`` by default.\n\n >>> s.nsmallest()\n 0 1.0\n 1 2.0\n 2 3.0\n 3 4.0\n 5 6.0\n dtype: float64\n\n >>> s.nsmallest(3)\n 0 1.0\n 1 2.0\n 2 3.0\n dtype: float64\n \"\"\"\n return self.sort_values(ascending=True).head(n)\n\n def nlargest(self, n: int = 5) -> \"Series\":\n \"\"\"\n Return the largest `n` elements.\n\n Parameters\n ----------\n n : int, default 5\n\n Returns\n -------\n Series\n The `n` largest values in the Series, sorted in decreasing order.\n\n See Also\n --------\n Series.nsmallest: Get the `n` smallest elements.\n Series.sort_values: Sort Series by values.\n Series.head: Return the first `n` rows.\n\n Notes\n -----\n Faster than ``.sort_values(ascending=False).head(n)`` for small `n`\n relative to the size of the ``Series`` object.\n\n In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer,\n the two would have same performance.\n\n Examples\n --------\n >>> data = [1, 2, 3, 4, np.nan ,6, 7, 8]\n >>> s = ps.Series(data)\n >>> s\n 0 1.0\n 1 2.0\n 2 3.0\n 3 4.0\n 4 NaN\n 5 6.0\n 6 7.0\n 7 8.0\n dtype: float64\n\n The `n` largest elements where ``n=5`` by default.\n\n >>> s.nlargest()\n 7 8.0\n 6 7.0\n 5 6.0\n 3 4.0\n 2 3.0\n dtype: float64\n\n >>> s.nlargest(n=3)\n 7 8.0\n 6 7.0\n 5 6.0\n dtype: float64\n\n\n \"\"\"\n return self.sort_values(ascending=False).head(n)\n\n def append(\n self, to_append: \"Series\", ignore_index: bool = False, verify_integrity: bool = False\n ) -> \"Series\":\n \"\"\"\n Concatenate two or more Series.\n\n Parameters\n ----------\n to_append : Series or list/tuple of Series\n ignore_index : boolean, default False\n If True, do not use the index labels.\n verify_integrity : boolean, default False\n If True, raise Exception on creating index with duplicates\n\n Returns\n -------\n appended : Series\n\n Examples\n --------\n >>> s1 = ps.Series([1, 2, 3])\n >>> s2 = ps.Series([4, 5, 6])\n >>> s3 = ps.Series([4, 5, 6], index=[3,4,5])\n\n >>> s1.append(s2)\n 0 1\n 1 2\n 2 3\n 0 4\n 1 5\n 2 6\n dtype: int64\n\n >>> s1.append(s3)\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n 5 6\n dtype: int64\n\n With ignore_index set to True:\n\n >>> s1.append(s2, ignore_index=True)\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n 5 6\n dtype: int64\n \"\"\"\n return first_series(\n self.to_frame().append(to_append.to_frame(), ignore_index, verify_integrity)\n ).rename(self.name)\n\n def sample(\n self,\n n: Optional[int] = None,\n frac: Optional[float] = None,\n replace: bool = False,\n random_state: Optional[int] = None,\n ) -> \"Series\":\n return first_series(\n self.to_frame().sample(n=n, frac=frac, replace=replace, random_state=random_state)\n ).rename(self.name)\n\n sample.__doc__ = DataFrame.sample.__doc__\n\n @no_type_check\n def hist(self, bins=10, **kwds):\n return self.plot.hist(bins, **kwds)\n\n hist.__doc__ = PandasOnSparkPlotAccessor.hist.__doc__\n\n def apply(self, func: Callable, args: Sequence[Any] = (), **kwds: Any) -> \"Series\":\n \"\"\"\n Invoke function on values of Series.\n\n Can be a Python function that only works on the Series.\n\n .. note:: this API executes the function once to infer the type which is\n potentially expensive, for instance, when the dataset is created after\n aggregations or sorting.\n\n To avoid this, specify return type in ``func``, for instance, as below:\n\n >>> def square(x) -> np.int32:\n ... return x ** 2\n\n pandas-on-Spark uses return type hint and does not try to infer the type.\n\n Parameters\n ----------\n func : function\n Python function to apply. Note that type hint for return type is required.\n args : tuple\n Positional arguments passed to func after the series value.\n **kwds\n Additional keyword arguments passed to func.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.aggregate : Only perform aggregating type operations.\n Series.transform : Only perform transforming type operations.\n DataFrame.apply : The equivalent function for DataFrame.\n\n Examples\n --------\n Create a Series with typical summer temperatures for each city.\n\n >>> s = ps.Series([20, 21, 12],\n ... index=['London', 'New York', 'Helsinki'])\n >>> s\n London 20\n New York 21\n Helsinki 12\n dtype: int64\n\n\n Square the values by defining a function and passing it as an\n argument to ``apply()``.\n\n >>> def square(x) -> np.int64:\n ... return x ** 2\n >>> s.apply(square)\n London 400\n New York 441\n Helsinki 144\n dtype: int64\n\n\n Define a custom function that needs additional positional\n arguments and pass these additional arguments using the\n ``args`` keyword\n\n >>> def subtract_custom_value(x, custom_value) -> np.int64:\n ... return x - custom_value\n\n >>> s.apply(subtract_custom_value, args=(5,))\n London 15\n New York 16\n Helsinki 7\n dtype: int64\n\n\n Define a custom function that takes keyword arguments\n and pass these arguments to ``apply``\n\n >>> def add_custom_values(x, **kwargs) -> np.int64:\n ... for month in kwargs:\n ... x += kwargs[month]\n ... return x\n\n >>> s.apply(add_custom_values, june=30, july=20, august=25)\n London 95\n New York 96\n Helsinki 87\n dtype: int64\n\n\n Use a function from the Numpy library\n\n >>> def numpy_log(col) -> np.float64:\n ... return np.log(col)\n >>> s.apply(numpy_log)\n London 2.995732\n New York 3.044522\n Helsinki 2.484907\n dtype: float64\n\n\n You can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> s.apply(np.log)\n London 2.995732\n New York 3.044522\n Helsinki 2.484907\n dtype: float64\n\n \"\"\"\n assert callable(func), \"the first argument should be a callable function.\"\n try:\n spec = inspect.getfullargspec(func)\n return_sig = spec.annotations.get(\"return\", None)\n should_infer_schema = return_sig is None\n except TypeError:\n # Falls back to schema inference if it fails to get signature.\n should_infer_schema = True\n\n def apply_each(s: Any) -> pd.Series:\n return s.apply(func, args=args, **kwds)\n\n if should_infer_schema:\n return self.pandas_on_spark._transform_batch(apply_each, None)\n else:\n sig_return = infer_return_type(func)\n if not isinstance(sig_return, ScalarType):\n raise ValueError(\n \"Expected the return type of this function to be of scalar type, \"\n \"but found type {}\".format(sig_return)\n )\n return_type = sig_return\n return self.pandas_on_spark._transform_batch(apply_each, return_type)\n\n # TODO: not all arguments are implemented comparing to pandas' for now.\n def aggregate(self, func: Union[str, List[str]]) -> Union[Scalar, \"Series\"]:\n \"\"\"Aggregate using one or more operations over the specified axis.\n\n Parameters\n ----------\n func : str or a list of str\n function name(s) as string apply to series.\n\n Returns\n -------\n scalar, Series\n The return can be:\n - scalar : when Series.agg is called with single function\n - Series : when Series.agg is called with several functions\n\n Notes\n -----\n `agg` is an alias for `aggregate`. Use the alias.\n\n See Also\n --------\n Series.apply : Invoke function on a Series.\n Series.transform : Only perform transforming type operations.\n Series.groupby : Perform operations over groups.\n DataFrame.aggregate : The equivalent function for DataFrame.\n\n Examples\n --------\n >>> s = ps.Series([1, 2, 3, 4])\n >>> s.agg('min')\n 1\n\n >>> s.agg(['min', 'max']).sort_index()\n max 4\n min 1\n dtype: int64\n \"\"\"\n if isinstance(func, list):\n return first_series(self.to_frame().aggregate(func)).rename(self.name)\n elif isinstance(func, str):\n return getattr(self, func)()\n else:\n raise TypeError(\"func must be a string or list of strings\")\n\n agg = aggregate\n\n def transpose(self, *args: Any, **kwargs: Any) -> \"Series\":\n \"\"\"\n Return the transpose, which is by definition self.\n\n Examples\n --------\n It returns the same object as the transpose of the given series object, which is by\n definition self.\n\n >>> s = ps.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n >>> s.transpose()\n 0 1\n 1 2\n 2 3\n dtype: int64\n \"\"\"\n return self.copy()\n\n T = property(transpose)\n\n def transform(\n self, func: Union[Callable, List[Callable]], axis: Axis = 0, *args: Any, **kwargs: Any\n ) -> Union[\"Series\", DataFrame]:\n \"\"\"\n Call ``func`` producing the same type as `self` with transformed values\n and that has the same axis length as input.\n\n .. note:: this API executes the function once to infer the type which is\n potentially expensive, for instance, when the dataset is created after\n aggregations or sorting.\n\n To avoid this, specify return type in ``func``, for instance, as below:\n\n >>> def square(x) -> np.int32:\n ... return x ** 2\n\n pandas-on-Spark uses return type hint and does not try to infer the type.\n\n Parameters\n ----------\n func : function or list\n A function or a list of functions to use for transforming the data.\n axis : int, default 0 or 'index'\n Can only be set to 0 at the moment.\n *args\n Positional arguments to pass to `func`.\n **kwargs\n Keyword arguments to pass to `func`.\n\n Returns\n -------\n An instance of the same type with `self` that must have the same length as input.\n\n See Also\n --------\n Series.aggregate : Only perform aggregating type operations.\n Series.apply : Invoke function on Series.\n DataFrame.transform : The equivalent function for DataFrame.\n\n Examples\n --------\n\n >>> s = ps.Series(range(3))\n >>> s\n 0 0\n 1 1\n 2 2\n dtype: int64\n\n >>> def sqrt(x) -> float:\n ... return np.sqrt(x)\n >>> s.transform(sqrt)\n 0 0.000000\n 1 1.000000\n 2 1.414214\n dtype: float64\n\n Even though the resulting instance must have the same length as the\n input, it is possible to provide several input functions:\n\n >>> def exp(x) -> float:\n ... return np.exp(x)\n >>> s.transform([sqrt, exp])\n sqrt exp\n 0 0.000000 1.000000\n 1 1.000000 2.718282\n 2 1.414214 7.389056\n\n You can omit the type hint and let pandas-on-Spark infer its type.\n\n >>> s.transform([np.sqrt, np.exp])\n sqrt exp\n 0 0.000000 1.000000\n 1 1.000000 2.718282\n 2 1.414214 7.389056\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n if isinstance(func, list):\n applied = []\n for f in func:\n applied.append(self.apply(f, args=args, **kwargs).rename(f.__name__))\n\n internal = self._internal.with_new_columns(applied)\n return DataFrame(internal)\n else:\n return self.apply(func, args=args, **kwargs)\n\n def round(self, decimals: int = 0) -> \"Series\":\n \"\"\"\n Round each value in a Series to the given number of decimals.\n\n Parameters\n ----------\n decimals : int\n Number of decimal places to round to (default: 0).\n If decimals is negative, it specifies the number of\n positions to the left of the decimal point.\n\n Returns\n -------\n Series object\n\n See Also\n --------\n DataFrame.round\n\n Examples\n --------\n >>> df = ps.Series([0.028208, 0.038683, 0.877076], name='x')\n >>> df\n 0 0.028208\n 1 0.038683\n 2 0.877076\n Name: x, dtype: float64\n\n >>> df.round(2)\n 0 0.03\n 1 0.04\n 2 0.88\n Name: x, dtype: float64\n \"\"\"\n if not isinstance(decimals, int):\n raise TypeError(\"decimals must be an integer\")\n scol = F.round(self.spark.column, decimals)\n return self._with_new_scol(\n scol,\n field=(\n self._internal.data_fields[0].copy(nullable=True)\n if not isinstance(self.spark.data_type, DecimalType)\n else None\n ),\n )\n\n # TODO: add 'interpolation' parameter.\n def quantile(\n self, q: Union[float, Iterable[float]] = 0.5, accuracy: int = 10000\n ) -> Union[Scalar, \"Series\"]:\n \"\"\"\n Return value at the given quantile.\n\n .. note:: Unlike pandas', the quantile in pandas-on-Spark is an approximated quantile\n based upon approximate percentile computation because computing quantile across\n a large dataset is extremely expensive.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n 0 <= q <= 1, the quantile(s) to compute.\n accuracy : int, optional\n Default accuracy of approximation. Larger value means better accuracy.\n The relative error can be deduced by 1.0 / accuracy.\n\n Returns\n -------\n float or Series\n If the current object is a Series and ``q`` is an array, a Series will be\n returned where the index is ``q`` and the values are the quantiles, otherwise\n a float will be returned.\n\n Examples\n --------\n >>> s = ps.Series([1, 2, 3, 4, 5])\n >>> s.quantile(.5)\n 3.0\n\n >>> (s + 1).quantile(.5)\n 4.0\n\n >>> s.quantile([.25, .5, .75])\n 0.25 2.0\n 0.50 3.0\n 0.75 4.0\n dtype: float64\n\n >>> (s + 1).quantile([.25, .5, .75])\n 0.25 3.0\n 0.50 4.0\n 0.75 5.0\n dtype: float64\n \"\"\"\n if isinstance(q, Iterable):\n return first_series(\n cast(\n \"ps.DataFrame\",\n self.to_frame().quantile(q=q, axis=0, numeric_only=False, accuracy=accuracy),\n )\n ).rename(self.name)\n else:\n if not isinstance(accuracy, int):\n raise TypeError(\n \"accuracy must be an integer; however, got [%s]\" % type(accuracy).__name__\n )\n\n if not isinstance(q, float):\n raise TypeError(\n \"q must be a float or an array of floats; however, [%s] found.\" % type(q)\n )\n q_float = q\n if q_float < 0.0 or q_float > 1.0:\n raise ValueError(\"percentiles should all be in the interval [0, 1].\")\n\n def quantile(psser: Series) -> Column:\n spark_type = psser.spark.data_type\n spark_column = psser.spark.column\n if isinstance(spark_type, (BooleanType, NumericType)):\n return F.percentile_approx(spark_column.cast(DoubleType()), q_float, accuracy)\n else:\n raise TypeError(\n \"Could not convert {} ({}) to numeric\".format(\n spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()\n )\n )\n\n return self._reduce_for_stat_function(quantile, name=\"quantile\")\n\n # TODO: add axis, numeric_only, pct, na_option parameter\n def rank(self, method: str = \"average\", ascending: bool = True) -> \"Series\":\n \"\"\"\n Compute numerical data ranks (1 through n) along axis. Equal values are\n assigned a rank that is the average of the ranks of those values.\n\n .. note:: the current implementation of rank uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n method : {'average', 'min', 'max', 'first', 'dense'}\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n\n Returns\n -------\n ranks : same type as caller\n\n Examples\n --------\n >>> s = ps.Series([1, 2, 2, 3], name='A')\n >>> s\n 0 1\n 1 2\n 2 2\n 3 3\n Name: A, dtype: int64\n\n >>> s.rank()\n 0 1.0\n 1 2.5\n 2 2.5\n 3 4.0\n Name: A, dtype: float64\n\n If method is set to 'min', it use lowest rank in group.\n\n >>> s.rank(method='min')\n 0 1.0\n 1 2.0\n 2 2.0\n 3 4.0\n Name: A, dtype: float64\n\n If method is set to 'max', it use highest rank in group.\n\n >>> s.rank(method='max')\n 0 1.0\n 1 3.0\n 2 3.0\n 3 4.0\n Name: A, dtype: float64\n\n If method is set to 'first', it is assigned rank in order without groups.\n\n >>> s.rank(method='first')\n 0 1.0\n 1 2.0\n 2 3.0\n 3 4.0\n Name: A, dtype: float64\n\n If method is set to 'dense', it leaves no gaps in group.\n\n >>> s.rank(method='dense')\n 0 1.0\n 1 2.0\n 2 2.0\n 3 3.0\n Name: A, dtype: float64\n \"\"\"\n return self._rank(method, ascending).spark.analyzed\n\n def _rank(\n self,\n method: str = \"average\",\n ascending: bool = True,\n *,\n part_cols: Sequence[\"ColumnOrName\"] = (),\n ) -> \"Series\":\n if method not in [\"average\", \"min\", \"max\", \"first\", \"dense\"]:\n msg = \"method must be one of 'average', 'min', 'max', 'first', 'dense'\"\n raise ValueError(msg)\n\n if self._internal.index_level > 1:\n raise NotImplementedError(\"rank do not support MultiIndex now\")\n\n if ascending:\n asc_func = Column.asc\n else:\n asc_func = Column.desc\n\n if method == \"first\":\n window = (\n Window.orderBy(\n asc_func(self.spark.column),\n asc_func(F.col(NATURAL_ORDER_COLUMN_NAME)),\n )\n .partitionBy(*part_cols)\n .rowsBetween(Window.unboundedPreceding, Window.currentRow)\n )\n scol = F.row_number().over(window)\n elif method == \"dense\":\n window = (\n Window.orderBy(asc_func(self.spark.column))\n .partitionBy(*part_cols)\n .rowsBetween(Window.unboundedPreceding, Window.currentRow)\n )\n scol = F.dense_rank().over(window)\n else:\n if method == \"average\":\n stat_func = F.mean\n elif method == \"min\":\n stat_func = F.min\n elif method == \"max\":\n stat_func = F.max\n window1 = (\n Window.orderBy(asc_func(self.spark.column))\n .partitionBy(*part_cols)\n .rowsBetween(Window.unboundedPreceding, Window.currentRow)\n )\n\n window2 = Window.partitionBy(\n cast(\"List[ColumnOrName]\", [self.spark.column]) + list(part_cols)\n ).rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)\n scol = stat_func(F.row_number().over(window1)).over(window2)\n return self._with_new_scol(scol.cast(DoubleType()))\n\n def filter(\n self,\n items: Optional[Sequence[Any]] = None,\n like: Optional[str] = None,\n regex: Optional[str] = None,\n axis: Optional[Axis] = None,\n ) -> \"Series\":\n axis = validate_axis(axis)\n if axis == 1:\n raise ValueError(\"Series does not support columns axis.\")\n return first_series(\n self.to_frame().filter(items=items, like=like, regex=regex, axis=axis),\n ).rename(self.name)\n\n filter.__doc__ = DataFrame.filter.__doc__\n\n def describe(self, percentiles: Optional[List[float]] = None) -> \"Series\":\n return first_series(self.to_frame().describe(percentiles)).rename(self.name)\n\n describe.__doc__ = DataFrame.describe.__doc__\n\n def diff(self, periods: int = 1) -> \"Series\":\n \"\"\"\n First discrete difference of element.\n\n Calculates the difference of a Series element compared with another element in the\n DataFrame (default is the element in the same column of the previous row).\n\n .. note:: the current implementation of diff uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for calculating difference, accepts negative values.\n\n Returns\n -------\n diffed : Series\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6],\n ... 'b': [1, 1, 2, 3, 5, 8],\n ... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])\n >>> df\n a b c\n 0 1 1 1\n 1 2 1 4\n 2 3 2 9\n 3 4 3 16\n 4 5 5 25\n 5 6 8 36\n\n >>> df.b.diff()\n 0 NaN\n 1 0.0\n 2 1.0\n 3 1.0\n 4 2.0\n 5 3.0\n Name: b, dtype: float64\n\n Difference with previous value\n\n >>> df.c.diff(periods=3)\n 0 NaN\n 1 NaN\n 2 NaN\n 3 15.0\n 4 21.0\n 5 27.0\n Name: c, dtype: float64\n\n Difference with following value\n\n >>> df.c.diff(periods=-1)\n 0 -3.0\n 1 -5.0\n 2 -7.0\n 3 -9.0\n 4 -11.0\n 5 NaN\n Name: c, dtype: float64\n \"\"\"\n return self._diff(periods).spark.analyzed\n\n def _diff(self, periods: int, *, part_cols: Sequence[\"ColumnOrName\"] = ()) -> \"Series\":\n if not isinstance(periods, int):\n raise TypeError(\"periods should be an int; however, got [%s]\" % type(periods).__name__)\n window = (\n Window.partitionBy(*part_cols)\n .orderBy(NATURAL_ORDER_COLUMN_NAME)\n .rowsBetween(-periods, -periods)\n )\n scol = self.spark.column - F.lag(self.spark.column, periods).over(window)\n return self._with_new_scol(scol, field=self._internal.data_fields[0].copy(nullable=True))\n\n def idxmax(self, skipna: bool = True) -> Union[Tuple, Any]:\n \"\"\"\n Return the row label of the maximum value.\n\n If multiple values equal the maximum, the first row label with that\n value is returned.\n\n Parameters\n ----------\n skipna : bool, default True\n Exclude NA/null values. If the entire Series is NA, the result\n will be NA.\n\n Returns\n -------\n Index\n Label of the maximum value.\n\n Raises\n ------\n ValueError\n If the Series is empty.\n\n See Also\n --------\n Series.idxmin : Return index *label* of the first occurrence\n of minimum of values.\n\n Examples\n --------\n >>> s = ps.Series(data=[1, None, 4, 3, 5],\n ... index=['A', 'B', 'C', 'D', 'E'])\n >>> s\n A 1.0\n B NaN\n C 4.0\n D 3.0\n E 5.0\n dtype: float64\n\n >>> s.idxmax()\n 'E'\n\n If `skipna` is False and there is an NA value in the data,\n the function returns ``nan``.\n\n >>> s.idxmax(skipna=False)\n nan\n\n In case of multi-index, you get a tuple:\n\n >>> index = pd.MultiIndex.from_arrays([\n ... ['a', 'a', 'b', 'b'], ['c', 'd', 'e', 'f']], names=('first', 'second'))\n >>> s = ps.Series(data=[1, None, 4, 5], index=index)\n >>> s\n first second\n a c 1.0\n d NaN\n b e 4.0\n f 5.0\n dtype: float64\n\n >>> s.idxmax()\n ('b', 'f')\n\n If multiple values equal the maximum, the first row label with that\n value is returned.\n\n >>> s = ps.Series([1, 100, 1, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])\n >>> s\n 10 1\n 3 100\n 5 1\n 2 100\n 1 1\n 8 100\n dtype: int64\n\n >>> s.idxmax()\n 3\n \"\"\"\n sdf = self._internal.spark_frame\n scol = self.spark.column\n index_scols = self._internal.index_spark_columns\n\n if skipna:\n sdf = sdf.orderBy(scol.desc_nulls_last(), NATURAL_ORDER_COLUMN_NAME)\n else:\n sdf = sdf.orderBy(scol.desc_nulls_first(), NATURAL_ORDER_COLUMN_NAME)\n\n results = sdf.select([scol] + index_scols).take(1)\n if len(results) == 0:\n raise ValueError(\"attempt to get idxmin of an empty sequence\")\n if results[0][0] is None:\n # This will only happens when skipna is False because we will\n # place nulls first.\n return np.nan\n values = list(results[0][1:])\n if len(values) == 1:\n return values[0]\n else:\n return tuple(values)\n\n def idxmin(self, skipna: bool = True) -> Union[Tuple, Any]:\n \"\"\"\n Return the row label of the minimum value.\n\n If multiple values equal the minimum, the first row label with that\n value is returned.\n\n Parameters\n ----------\n skipna : bool, default True\n Exclude NA/null values. If the entire Series is NA, the result\n will be NA.\n\n Returns\n -------\n Index\n Label of the minimum value.\n\n Raises\n ------\n ValueError\n If the Series is empty.\n\n See Also\n --------\n Series.idxmax : Return index *label* of the first occurrence\n of maximum of values.\n\n Notes\n -----\n This method is the Series version of ``ndarray.argmin``. This method\n returns the label of the minimum, while ``ndarray.argmin`` returns\n the position. To get the position, use ``series.values.argmin()``.\n\n Examples\n --------\n >>> s = ps.Series(data=[1, None, 4, 0],\n ... index=['A', 'B', 'C', 'D'])\n >>> s\n A 1.0\n B NaN\n C 4.0\n D 0.0\n dtype: float64\n\n >>> s.idxmin()\n 'D'\n\n If `skipna` is False and there is an NA value in the data,\n the function returns ``nan``.\n\n >>> s.idxmin(skipna=False)\n nan\n\n In case of multi-index, you get a tuple:\n\n >>> index = pd.MultiIndex.from_arrays([\n ... ['a', 'a', 'b', 'b'], ['c', 'd', 'e', 'f']], names=('first', 'second'))\n >>> s = ps.Series(data=[1, None, 4, 0], index=index)\n >>> s\n first second\n a c 1.0\n d NaN\n b e 4.0\n f 0.0\n dtype: float64\n\n >>> s.idxmin()\n ('b', 'f')\n\n If multiple values equal the minimum, the first row label with that\n value is returned.\n\n >>> s = ps.Series([1, 100, 1, 100, 1, 100], index=[10, 3, 5, 2, 1, 8])\n >>> s\n 10 1\n 3 100\n 5 1\n 2 100\n 1 1\n 8 100\n dtype: int64\n\n >>> s.idxmin()\n 10\n \"\"\"\n sdf = self._internal.spark_frame\n scol = self.spark.column\n index_scols = self._internal.index_spark_columns\n\n if skipna:\n sdf = sdf.orderBy(scol.asc_nulls_last(), NATURAL_ORDER_COLUMN_NAME)\n else:\n sdf = sdf.orderBy(scol.asc_nulls_first(), NATURAL_ORDER_COLUMN_NAME)\n\n results = sdf.select([scol] + index_scols).take(1)\n if len(results) == 0:\n raise ValueError(\"attempt to get idxmin of an empty sequence\")\n if results[0][0] is None:\n # This will only happens when skipna is False because we will\n # place nulls first.\n return np.nan\n values = list(results[0][1:])\n if len(values) == 1:\n return values[0]\n else:\n return tuple(values)\n\n def pop(self, item: Name) -> Union[\"Series\", Scalar]:\n \"\"\"\n Return item and drop from series.\n\n Parameters\n ----------\n item : label\n Label of index to be popped.\n\n Returns\n -------\n Value that is popped from series.\n\n Examples\n --------\n >>> s = ps.Series(data=np.arange(3), index=['A', 'B', 'C'])\n >>> s\n A 0\n B 1\n C 2\n dtype: int64\n\n >>> s.pop('A')\n 0\n\n >>> s\n B 1\n C 2\n dtype: int64\n\n >>> s = ps.Series(data=np.arange(3), index=['A', 'A', 'C'])\n >>> s\n A 0\n A 1\n C 2\n dtype: int64\n\n >>> s.pop('A')\n A 0\n A 1\n dtype: int64\n\n >>> s\n C 2\n dtype: int64\n\n Also support for MultiIndex\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],\n ... index=midx)\n >>> s\n lama speed 45.0\n weight 200.0\n length 1.2\n cow speed 30.0\n weight 250.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n dtype: float64\n\n >>> s.pop('lama')\n speed 45.0\n weight 200.0\n length 1.2\n dtype: float64\n\n >>> s\n cow speed 30.0\n weight 250.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n dtype: float64\n\n Also support for MultiIndex with several indexs.\n\n >>> midx = pd.MultiIndex([['a', 'b', 'c'],\n ... ['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 0, 0, 0, 1, 1, 1],\n ... [0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 0, 2]]\n ... )\n >>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],\n ... index=midx)\n >>> s\n a lama speed 45.0\n weight 200.0\n length 1.2\n cow speed 30.0\n weight 250.0\n length 1.5\n b falcon speed 320.0\n speed 1.0\n length 0.3\n dtype: float64\n\n >>> s.pop(('a', 'lama'))\n speed 45.0\n weight 200.0\n length 1.2\n dtype: float64\n\n >>> s\n a cow speed 30.0\n weight 250.0\n length 1.5\n b falcon speed 320.0\n speed 1.0\n length 0.3\n dtype: float64\n\n >>> s.pop(('b', 'falcon', 'speed'))\n (b, falcon, speed) 320.0\n (b, falcon, speed) 1.0\n dtype: float64\n \"\"\"\n if not is_name_like_value(item):\n raise TypeError(\"'key' should be string or tuple that contains strings\")\n if not is_name_like_tuple(item):\n item = (item,)\n if self._internal.index_level < len(item):\n raise KeyError(\n \"Key length ({}) exceeds index depth ({})\".format(\n len(item), self._internal.index_level\n )\n )\n\n internal = self._internal\n scols = internal.index_spark_columns[len(item) :] + [self.spark.column]\n rows = [internal.spark_columns[level] == index for level, index in enumerate(item)]\n sdf = internal.spark_frame.filter(reduce(lambda x, y: x & y, rows)).select(scols)\n\n psdf = self._drop(item)\n self._update_anchor(psdf)\n\n if self._internal.index_level == len(item):\n # if spark_frame has one column and one data, return data only without frame\n pdf = sdf.limit(2).toPandas()\n length = len(pdf)\n if length == 1:\n val = pdf[internal.data_spark_column_names[0]].iloc[0]\n if isinstance(self.dtype, CategoricalDtype):\n return self.dtype.categories[val]\n else:\n return val\n\n item_string = name_like_string(item)\n sdf = sdf.withColumn(SPARK_DEFAULT_INDEX_NAME, SF.lit(str(item_string)))\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],\n column_labels=[self._column_label],\n data_fields=[self._internal.data_fields[0]],\n )\n return first_series(DataFrame(internal))\n else:\n internal = internal.copy(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in internal.index_spark_column_names[len(item) :]\n ],\n index_fields=internal.index_fields[len(item) :],\n index_names=self._internal.index_names[len(item) :],\n data_spark_columns=[scol_for(sdf, internal.data_spark_column_names[0])],\n )\n return first_series(DataFrame(internal))\n\n def copy(self, deep: bool = True) -> \"Series\":\n \"\"\"\n Make a copy of this object's indices and data.\n\n Parameters\n ----------\n deep : bool, default True\n this parameter is not supported but just dummy parameter to match pandas.\n\n Returns\n -------\n copy : Series\n\n Examples\n --------\n >>> s = ps.Series([1, 2], index=[\"a\", \"b\"])\n >>> s\n a 1\n b 2\n dtype: int64\n >>> s_copy = s.copy()\n >>> s_copy\n a 1\n b 2\n dtype: int64\n \"\"\"\n return first_series(DataFrame(self._internal))\n\n def mode(self, dropna: bool = True) -> \"Series\":\n \"\"\"\n Return the mode(s) of the dataset.\n\n Always returns Series even if only one value is returned.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't consider counts of NaN/NaT.\n\n Returns\n -------\n Series\n Modes of the Series.\n\n Examples\n --------\n >>> s = ps.Series([0, 0, 1, 1, 1, np.nan, np.nan, np.nan])\n >>> s\n 0 0.0\n 1 0.0\n 2 1.0\n 3 1.0\n 4 1.0\n 5 NaN\n 6 NaN\n 7 NaN\n dtype: float64\n\n >>> s.mode()\n 0 1.0\n dtype: float64\n\n If there are several same modes, all items are shown\n\n >>> s = ps.Series([0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3,\n ... np.nan, np.nan, np.nan])\n >>> s\n 0 0.0\n 1 0.0\n 2 1.0\n 3 1.0\n 4 1.0\n 5 2.0\n 6 2.0\n 7 2.0\n 8 3.0\n 9 3.0\n 10 3.0\n 11 NaN\n 12 NaN\n 13 NaN\n dtype: float64\n\n >>> s.mode().sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS\n <BLANKLINE>\n ... 1.0\n ... 2.0\n ... 3.0\n dtype: float64\n\n With 'dropna' set to 'False', we can also see NaN in the result\n\n >>> s.mode(False).sort_values() # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS\n <BLANKLINE>\n ... 1.0\n ... 2.0\n ... 3.0\n ... NaN\n dtype: float64\n \"\"\"\n ser_count = self.value_counts(dropna=dropna, sort=False)\n sdf_count = ser_count._internal.spark_frame\n most_value = ser_count.max()\n sdf_most_value = sdf_count.filter(\"count == {}\".format(most_value))\n sdf = sdf_most_value.select(\n F.col(SPARK_DEFAULT_INDEX_NAME).alias(SPARK_DEFAULT_SERIES_NAME)\n )\n internal = InternalFrame(spark_frame=sdf, index_spark_columns=None, column_labels=[None])\n\n return first_series(DataFrame(internal))\n\n def keys(self) -> \"ps.Index\":\n \"\"\"\n Return alias for index.\n\n Returns\n -------\n Index\n Index of the Series.\n\n Examples\n --------\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> psser = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)\n\n >>> psser.keys() # doctest: +SKIP\n MultiIndex([( 'lama', 'speed'),\n ( 'lama', 'weight'),\n ( 'lama', 'length'),\n ( 'cow', 'speed'),\n ( 'cow', 'weight'),\n ( 'cow', 'length'),\n ('falcon', 'speed'),\n ('falcon', 'weight'),\n ('falcon', 'length')],\n )\n \"\"\"\n return self.index\n\n # TODO: introduce 'method', 'limit', 'in_place'; fully support 'regex'\n def replace(\n self,\n to_replace: Optional[Union[Any, List, Tuple, Dict]] = None,\n value: Optional[Union[List, Tuple]] = None,\n regex: Union[str, bool] = False,\n ) -> \"Series\":\n \"\"\"\n Replace values given in to_replace with value.\n Values of the Series are replaced with other values dynamically.\n\n .. note:: For partial pattern matching, the replacement is against the whole string,\n which is different from pandas'. That's by the nature of underlying Spark API.\n\n Parameters\n ----------\n to_replace : str, list, tuple, dict, Series, int, float, or None\n How to find the values that will be replaced.\n * numeric, str:\n\n - numeric: numeric values equal to to_replace will be replaced with value\n - str: string exactly matching to_replace will be replaced with value\n\n * list of str or numeric:\n\n - if to_replace and value are both lists or tuples, they must be the same length.\n - str and numeric rules apply as above.\n\n * dict:\n\n - Dicts can be used to specify different replacement values for different\n existing values.\n For example, {'a': 'b', 'y': 'z'} replaces the value ‘a’ with ‘b’ and ‘y’\n with ‘z’. To use a dict in this way the value parameter should be None.\n - For a DataFrame a dict can specify that different values should be replaced\n in different columns. For example, {'a': 1, 'b': 'z'} looks for the value 1\n in column ‘a’ and the value ‘z’ in column ‘b’ and replaces these values with\n whatever is specified in value.\n The value parameter should not be None in this case.\n You can treat this as a special case of passing two lists except that you are\n specifying the column to search in.\n\n See the examples section for examples of each of these.\n\n value : scalar, dict, list, tuple, str default None\n Value to replace any values matching to_replace with.\n For a DataFrame a dict of values can be used to specify which value to use\n for each column (columns not in the dict will not be filled).\n Regular expressions, strings and lists or dicts of such objects are also allowed.\n\n regex: bool or str, default False\n Whether to interpret to_replace and/or value as regular expressions.\n If this is True then to_replace must be a string.\n Alternatively, this could be a regular expression in which case to_replace must be None.\n\n\n Returns\n -------\n Series\n Object after replacement.\n\n Examples\n --------\n\n Scalar `to_replace` and `value`\n\n >>> s = ps.Series([0, 1, 2, 3, 4])\n >>> s\n 0 0\n 1 1\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> s.replace(0, 5)\n 0 5\n 1 1\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n List-like `to_replace`\n\n >>> s.replace([0, 4], 5000)\n 0 5000\n 1 1\n 2 2\n 3 3\n 4 5000\n dtype: int64\n\n >>> s.replace([1, 2, 3], [10, 20, 30])\n 0 0\n 1 10\n 2 20\n 3 30\n 4 4\n dtype: int64\n\n Dict-like `to_replace`\n\n >>> s.replace({1: 1000, 2: 2000, 3: 3000, 4: 4000})\n 0 0\n 1 1000\n 2 2000\n 3 3000\n 4 4000\n dtype: int64\n\n Also support for MultiIndex\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],\n ... index=midx)\n >>> s\n lama speed 45.0\n weight 200.0\n length 1.2\n cow speed 30.0\n weight 250.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n dtype: float64\n\n >>> s.replace(45, 450)\n lama speed 450.0\n weight 200.0\n length 1.2\n cow speed 30.0\n weight 250.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n dtype: float64\n\n >>> s.replace([45, 30, 320], 500)\n lama speed 500.0\n weight 200.0\n length 1.2\n cow speed 500.0\n weight 250.0\n length 1.5\n falcon speed 500.0\n weight 1.0\n length 0.3\n dtype: float64\n\n >>> s.replace({45: 450, 30: 300})\n lama speed 450.0\n weight 200.0\n length 1.2\n cow speed 300.0\n weight 250.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n dtype: float64\n\n Regular expression `to_replace`\n\n >>> psser = ps.Series(['bat', 'foo', 'bait', 'abc', 'bar', 'zoo'])\n >>> psser.replace(to_replace=r'^ba.$', value='new', regex=True)\n 0 new\n 1 foo\n 2 bait\n 3 abc\n 4 new\n 5 zoo\n dtype: object\n\n >>> psser.replace(value='new', regex=r'^.oo$')\n 0 bat\n 1 new\n 2 bait\n 3 abc\n 4 bar\n 5 new\n dtype: object\n\n For partial pattern matching, the replacement is against the whole string\n\n >>> psser.replace('ba', 'xx', regex=True)\n 0 xx\n 1 foo\n 2 xx\n 3 abc\n 4 xx\n 5 zoo\n dtype: object\n \"\"\"\n if isinstance(regex, str):\n if to_replace is not None:\n raise ValueError(\"'to_replace' must be 'None' if 'regex' is not a bool\")\n to_replace = regex\n regex = True\n elif not isinstance(regex, bool):\n raise NotImplementedError(\"'regex' of %s type is not supported\" % type(regex).__name__)\n elif regex is True:\n assert isinstance(\n to_replace, str\n ), \"If 'regex' is True then 'to_replace' must be a string\"\n\n if to_replace is None:\n return self.fillna(method=\"ffill\")\n if not isinstance(to_replace, (str, list, tuple, dict, int, float)):\n raise TypeError(\"'to_replace' should be one of str, list, tuple, dict, int, float\")\n\n to_replace = list(to_replace) if isinstance(to_replace, tuple) else to_replace\n value = list(value) if isinstance(value, tuple) else value\n if isinstance(to_replace, list) and isinstance(value, list):\n if not len(to_replace) == len(value):\n raise ValueError(\n \"Replacement lists must match in length. Expecting {} got {}\".format(\n len(to_replace), len(value)\n )\n )\n to_replace = {k: v for k, v in zip(to_replace, value)}\n if isinstance(to_replace, dict):\n is_start = True\n if len(to_replace) == 0:\n current = self.spark.column\n else:\n for to_replace_, value in to_replace.items():\n cond = (\n (F.isnan(self.spark.column) | self.spark.column.isNull())\n if pd.isna(to_replace_)\n else (self.spark.column == SF.lit(to_replace_))\n )\n if is_start:\n current = F.when(cond, value)\n is_start = False\n else:\n current = current.when(cond, value)\n current = current.otherwise(self.spark.column)\n else:\n if regex:\n # to_replace must be a string\n cond = self.spark.column.rlike(to_replace)\n else:\n cond = self.spark.column.isin(to_replace)\n # to_replace may be a scalar\n if np.array(pd.isna(to_replace)).any():\n cond = cond | F.isnan(self.spark.column) | self.spark.column.isNull()\n current = F.when(cond, value).otherwise(self.spark.column)\n\n return self._with_new_scol(current) # TODO: dtype?\n\n def update(self, other: \"Series\") -> None:\n \"\"\"\n Modify Series in place using non-NA values from passed Series. Aligns on index.\n\n Parameters\n ----------\n other : Series\n\n Examples\n --------\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option(\"compute.ops_on_diff_frames\", True)\n >>> s = ps.Series([1, 2, 3])\n >>> s.update(ps.Series([4, 5, 6]))\n >>> s.sort_index()\n 0 4\n 1 5\n 2 6\n dtype: int64\n\n >>> s = ps.Series(['a', 'b', 'c'])\n >>> s.update(ps.Series(['d', 'e'], index=[0, 2]))\n >>> s.sort_index()\n 0 d\n 1 b\n 2 e\n dtype: object\n\n >>> s = ps.Series([1, 2, 3])\n >>> s.update(ps.Series([4, 5, 6, 7, 8]))\n >>> s.sort_index()\n 0 4\n 1 5\n 2 6\n dtype: int64\n\n >>> s = ps.Series([1, 2, 3], index=[10, 11, 12])\n >>> s\n 10 1\n 11 2\n 12 3\n dtype: int64\n\n >>> s.update(ps.Series([4, 5, 6]))\n >>> s.sort_index()\n 10 1\n 11 2\n 12 3\n dtype: int64\n\n >>> s.update(ps.Series([4, 5, 6], index=[11, 12, 13]))\n >>> s.sort_index()\n 10 1\n 11 4\n 12 5\n dtype: int64\n\n If ``other`` contains NaNs the corresponding values are not updated\n in the original Series.\n\n >>> s = ps.Series([1, 2, 3])\n >>> s.update(ps.Series([4, np.nan, 6]))\n >>> s.sort_index()\n 0 4.0\n 1 2.0\n 2 6.0\n dtype: float64\n\n >>> reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n if not isinstance(other, Series):\n raise TypeError(\"'other' must be a Series\")\n\n if same_anchor(self, other):\n scol = (\n F.when(other.spark.column.isNotNull(), other.spark.column)\n .otherwise(self.spark.column)\n .alias(self._psdf._internal.spark_column_name_for(self._column_label))\n )\n internal = self._psdf._internal.with_new_spark_column(\n self._column_label, scol # TODO: dtype?\n )\n self._psdf._update_internal_frame(internal)\n else:\n combined = combine_frames(self._psdf, other._psdf, how=\"leftouter\")\n\n this_scol = combined[\"this\"]._internal.spark_column_for(self._column_label)\n that_scol = combined[\"that\"]._internal.spark_column_for(other._column_label)\n\n scol = (\n F.when(that_scol.isNotNull(), that_scol)\n .otherwise(this_scol)\n .alias(self._psdf._internal.spark_column_name_for(self._column_label))\n )\n\n internal = combined[\"this\"]._internal.with_new_spark_column(\n self._column_label, scol # TODO: dtype?\n )\n\n self._psdf._update_internal_frame(internal.resolved_copy, requires_same_anchor=False)\n\n def where(self, cond: \"Series\", other: Any = np.nan) -> \"Series\":\n \"\"\"\n Replace values where the condition is False.\n\n Parameters\n ----------\n cond : boolean Series\n Where cond is True, keep the original value. Where False,\n replace with corresponding value from other.\n other : scalar, Series\n Entries where cond is False are replaced with corresponding value from other.\n\n Returns\n -------\n Series\n\n Examples\n --------\n\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option(\"compute.ops_on_diff_frames\", True)\n >>> s1 = ps.Series([0, 1, 2, 3, 4])\n >>> s2 = ps.Series([100, 200, 300, 400, 500])\n >>> s1.where(s1 > 0).sort_index()\n 0 NaN\n 1 1.0\n 2 2.0\n 3 3.0\n 4 4.0\n dtype: float64\n\n >>> s1.where(s1 > 1, 10).sort_index()\n 0 10\n 1 10\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> s1.where(s1 > 1, s1 + 100).sort_index()\n 0 100\n 1 101\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> s1.where(s1 > 1, s2).sort_index()\n 0 100\n 1 200\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n assert isinstance(cond, Series)\n\n # We should check the DataFrame from both `cond` and `other`.\n should_try_ops_on_diff_frame = not same_anchor(cond, self) or (\n isinstance(other, Series) and not same_anchor(other, self)\n )\n\n if should_try_ops_on_diff_frame:\n # Try to perform it with 'compute.ops_on_diff_frame' option.\n psdf = self.to_frame()\n tmp_cond_col = verify_temp_column_name(psdf, \"__tmp_cond_col__\")\n tmp_other_col = verify_temp_column_name(psdf, \"__tmp_other_col__\")\n\n psdf[tmp_cond_col] = cond\n psdf[tmp_other_col] = other\n\n # above logic makes a Spark DataFrame looks like below:\n # +-----------------+---+----------------+-----------------+\n # |__index_level_0__| 0|__tmp_cond_col__|__tmp_other_col__|\n # +-----------------+---+----------------+-----------------+\n # | 0| 0| false| 100|\n # | 1| 1| false| 200|\n # | 3| 3| true| 400|\n # | 2| 2| true| 300|\n # | 4| 4| true| 500|\n # +-----------------+---+----------------+-----------------+\n condition = (\n F.when(\n psdf[tmp_cond_col].spark.column,\n psdf._psser_for(psdf._internal.column_labels[0]).spark.column,\n )\n .otherwise(psdf[tmp_other_col].spark.column)\n .alias(psdf._internal.data_spark_column_names[0])\n )\n\n internal = psdf._internal.with_new_columns(\n [condition], column_labels=self._internal.column_labels\n )\n return first_series(DataFrame(internal))\n else:\n if isinstance(other, Series):\n other = other.spark.column\n condition = (\n F.when(cond.spark.column, self.spark.column)\n .otherwise(other)\n .alias(self._internal.data_spark_column_names[0])\n )\n return self._with_new_scol(condition)\n\n def mask(self, cond: \"Series\", other: Any = np.nan) -> \"Series\":\n \"\"\"\n Replace values where the condition is True.\n\n Parameters\n ----------\n cond : boolean Series\n Where cond is False, keep the original value. Where True,\n replace with corresponding value from other.\n other : scalar, Series\n Entries where cond is True are replaced with corresponding value from other.\n\n Returns\n -------\n Series\n\n Examples\n --------\n\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option(\"compute.ops_on_diff_frames\", True)\n >>> s1 = ps.Series([0, 1, 2, 3, 4])\n >>> s2 = ps.Series([100, 200, 300, 400, 500])\n >>> s1.mask(s1 > 0).sort_index()\n 0 0.0\n 1 NaN\n 2 NaN\n 3 NaN\n 4 NaN\n dtype: float64\n\n >>> s1.mask(s1 > 1, 10).sort_index()\n 0 0\n 1 1\n 2 10\n 3 10\n 4 10\n dtype: int64\n\n >>> s1.mask(s1 > 1, s1 + 100).sort_index()\n 0 0\n 1 1\n 2 102\n 3 103\n 4 104\n dtype: int64\n\n >>> s1.mask(s1 > 1, s2).sort_index()\n 0 0\n 1 1\n 2 300\n 3 400\n 4 500\n dtype: int64\n\n >>> reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n return self.where(~cond, other)\n\n def xs(self, key: Name, level: Optional[int] = None) -> \"Series\":\n \"\"\"\n Return cross-section from the Series.\n\n This method takes a `key` argument to select data at a particular\n level of a MultiIndex.\n\n Parameters\n ----------\n key : label or tuple of label\n Label contained in the index, or partially in a MultiIndex.\n level : object, defaults to first n levels (n=1 or len(key))\n In case of a key partially contained in a MultiIndex, indicate\n which levels are used. Levels can be referred by label or position.\n\n Returns\n -------\n Series\n Cross-section from the original Series\n corresponding to the selected index levels.\n\n Examples\n --------\n >>> midx = pd.MultiIndex([['a', 'b', 'c'],\n ... ['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> s = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],\n ... index=midx)\n >>> s\n a lama speed 45.0\n weight 200.0\n length 1.2\n b cow speed 30.0\n weight 250.0\n length 1.5\n c falcon speed 320.0\n weight 1.0\n length 0.3\n dtype: float64\n\n Get values at specified index\n\n >>> s.xs('a')\n lama speed 45.0\n weight 200.0\n length 1.2\n dtype: float64\n\n Get values at several indexes\n\n >>> s.xs(('a', 'lama'))\n speed 45.0\n weight 200.0\n length 1.2\n dtype: float64\n\n Get values at specified index and level\n\n >>> s.xs('lama', level=1)\n a speed 45.0\n weight 200.0\n length 1.2\n dtype: float64\n \"\"\"\n if not isinstance(key, tuple):\n key = (key,)\n if level is None:\n level = 0\n\n internal = self._internal\n scols = (\n internal.index_spark_columns[:level]\n + internal.index_spark_columns[level + len(key) :]\n + [self.spark.column]\n )\n rows = [internal.spark_columns[lvl] == index for lvl, index in enumerate(key, level)]\n sdf = internal.spark_frame.filter(reduce(lambda x, y: x & y, rows)).select(scols)\n\n if internal.index_level == len(key):\n # if spark_frame has one column and one data, return data only without frame\n pdf = sdf.limit(2).toPandas()\n length = len(pdf)\n if length == 1:\n return pdf[self._internal.data_spark_column_names[0]].iloc[0]\n\n index_spark_column_names = (\n internal.index_spark_column_names[:level]\n + internal.index_spark_column_names[level + len(key) :]\n )\n index_names = internal.index_names[:level] + internal.index_names[level + len(key) :]\n index_fields = internal.index_fields[:level] + internal.index_fields[level + len(key) :]\n\n internal = internal.copy(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in index_spark_column_names],\n index_names=index_names,\n index_fields=index_fields,\n data_spark_columns=[scol_for(sdf, internal.data_spark_column_names[0])],\n )\n return first_series(DataFrame(internal))\n\n def pct_change(self, periods: int = 1) -> \"Series\":\n \"\"\"\n Percentage change between the current and a prior element.\n\n .. note:: the current implementation of this API uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming percent change.\n\n Returns\n -------\n Series\n\n Examples\n --------\n\n >>> psser = ps.Series([90, 91, 85], index=[2, 4, 1])\n >>> psser\n 2 90\n 4 91\n 1 85\n dtype: int64\n\n >>> psser.pct_change()\n 2 NaN\n 4 0.011111\n 1 -0.065934\n dtype: float64\n\n >>> psser.sort_index().pct_change()\n 1 NaN\n 2 0.058824\n 4 0.011111\n dtype: float64\n\n >>> psser.pct_change(periods=2)\n 2 NaN\n 4 NaN\n 1 -0.055556\n dtype: float64\n \"\"\"\n scol = self.spark.column\n\n window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-periods, -periods)\n prev_row = F.lag(scol, periods).over(window)\n\n return self._with_new_scol((scol - prev_row) / prev_row).spark.analyzed\n\n def combine_first(self, other: \"Series\") -> \"Series\":\n \"\"\"\n Combine Series values, choosing the calling Series's values first.\n\n Parameters\n ----------\n other : Series\n The value(s) to be combined with the `Series`.\n\n Returns\n -------\n Series\n The result of combining the Series with the other object.\n\n See Also\n --------\n Series.combine : Perform elementwise operation on two Series\n using a given function.\n\n Notes\n -----\n Result index will be the union of the two indexes.\n\n Examples\n --------\n >>> s1 = ps.Series([1, np.nan])\n >>> s2 = ps.Series([3, 4])\n >>> with ps.option_context(\"compute.ops_on_diff_frames\", True):\n ... s1.combine_first(s2)\n 0 1.0\n 1 4.0\n dtype: float64\n \"\"\"\n if not isinstance(other, ps.Series):\n raise TypeError(\"`combine_first` only allows `Series` for parameter `other`\")\n if same_anchor(self, other):\n this = self.spark.column\n that = other.spark.column\n combined = self._psdf\n else:\n combined = combine_frames(self._psdf, other._psdf)\n this = combined[\"this\"]._internal.spark_column_for(self._column_label)\n that = combined[\"that\"]._internal.spark_column_for(other._column_label)\n # If `self` has missing value, use value of `other`\n cond = F.when(this.isNull(), that).otherwise(this)\n # If `self` and `other` come from same frame, the anchor should be kept\n if same_anchor(self, other):\n return self._with_new_scol(cond) # TODO: dtype?\n index_scols = combined._internal.index_spark_columns\n sdf = combined._internal.spark_frame.select(\n *index_scols, cond.alias(self._internal.data_spark_column_names[0])\n ).distinct()\n internal = self._internal.with_new_sdf(\n sdf, index_fields=combined._internal.index_fields, data_fields=[None] # TODO: dtype?\n )\n return first_series(DataFrame(internal))\n\n def dot(self, other: Union[\"Series\", DataFrame]) -> Union[Scalar, \"Series\"]:\n \"\"\"\n Compute the dot product between the Series and the columns of other.\n\n This method computes the dot product between the Series and another\n one, or the Series and each columns of a DataFrame.\n\n It can also be called using `self @ other` in Python >= 3.5.\n\n .. note:: This API is slightly different from pandas when indexes from both Series\n are not aligned and config 'compute.eager_check' is False. pandas raises an exception;\n however, pandas-on-Spark just proceeds and performs by ignoring mismatches with NaN\n permissively.\n\n >>> pdf1 = pd.Series([1, 2, 3], index=[0, 1, 2])\n >>> pdf2 = pd.Series([1, 2, 3], index=[0, 1, 3])\n >>> pdf1.dot(pdf2) # doctest: +SKIP\n ...\n ValueError: matrices are not aligned\n\n >>> psdf1 = ps.Series([1, 2, 3], index=[0, 1, 2])\n >>> psdf2 = ps.Series([1, 2, 3], index=[0, 1, 3])\n >>> with ps.option_context(\"compute.eager_check\", False):\n ... psdf1.dot(psdf2) # doctest: +SKIP\n ...\n 5\n\n Parameters\n ----------\n other : Series, DataFrame.\n The other object to compute the dot product with its columns.\n\n Returns\n -------\n scalar, Series\n Return the dot product of the Series and other if other is a\n Series, the Series of the dot product of Series and each rows of\n other if other is a DataFrame.\n\n Notes\n -----\n The Series and other has to share the same index if other is a Series\n or a DataFrame.\n\n Examples\n --------\n >>> s = ps.Series([0, 1, 2, 3])\n\n >>> s.dot(s)\n 14\n\n >>> s @ s\n 14\n\n >>> psdf = ps.DataFrame({'x': [0, 1, 2, 3], 'y': [0, -1, -2, -3]})\n >>> psdf\n x y\n 0 0 0\n 1 1 -1\n 2 2 -2\n 3 3 -3\n\n >>> with ps.option_context(\"compute.ops_on_diff_frames\", True):\n ... s.dot(psdf)\n ...\n x 14\n y -14\n dtype: int64\n \"\"\"\n if not same_anchor(self, other):\n if get_option(\"compute.eager_check\") and not self.index.sort_values().equals(\n other.index.sort_values()\n ):\n raise ValueError(\"matrices are not aligned\")\n elif len(self.index) != len(other.index):\n raise ValueError(\"matrices are not aligned\")\n\n if isinstance(other, DataFrame):\n other_copy: DataFrame = other.copy()\n column_labels = other_copy._internal.column_labels\n\n self_column_label = verify_temp_column_name(other_copy, \"__self_column__\")\n other_copy[self_column_label] = self\n self_psser = other_copy._psser_for(self_column_label)\n\n product_pssers = [\n cast(Series, other_copy._psser_for(label) * self_psser) for label in column_labels\n ]\n\n dot_product_psser = DataFrame(\n other_copy._internal.with_new_columns(product_pssers, column_labels=column_labels)\n ).sum()\n\n return cast(Series, dot_product_psser).rename(self.name)\n\n else:\n assert isinstance(other, Series)\n return (self * other).sum()\n\n def __matmul__(self, other: Union[\"Series\", DataFrame]) -> Union[Scalar, \"Series\"]:\n \"\"\"\n Matrix multiplication using binary `@` operator in Python>=3.5.\n \"\"\"\n return self.dot(other)\n\n def repeat(self, repeats: Union[int, \"Series\"]) -> \"Series\":\n \"\"\"\n Repeat elements of a Series.\n\n Returns a new Series where each element of the current Series\n is repeated consecutively a given number of times.\n\n Parameters\n ----------\n repeats : int or Series\n The number of repetitions for each element. This should be a\n non-negative integer. Repeating 0 times will return an empty\n Series.\n\n Returns\n -------\n Series\n Newly created Series with repeated elements.\n\n See Also\n --------\n Index.repeat : Equivalent function for Index.\n\n Examples\n --------\n >>> s = ps.Series(['a', 'b', 'c'])\n >>> s\n 0 a\n 1 b\n 2 c\n dtype: object\n >>> s.repeat(2)\n 0 a\n 1 b\n 2 c\n 0 a\n 1 b\n 2 c\n dtype: object\n >>> ps.Series([1, 2, 3]).repeat(0)\n Series([], dtype: int64)\n \"\"\"\n if not isinstance(repeats, (int, Series)):\n raise TypeError(\n \"`repeats` argument must be integer or Series, but got {}\".format(type(repeats))\n )\n\n if isinstance(repeats, Series):\n if not same_anchor(self, repeats):\n psdf = self.to_frame()\n temp_repeats = verify_temp_column_name(psdf, \"__temp_repeats__\")\n psdf[temp_repeats] = repeats\n return (\n psdf._psser_for(psdf._internal.column_labels[0])\n .repeat(psdf[temp_repeats])\n .rename(self.name)\n )\n else:\n scol = F.explode(\n F.array_repeat(self.spark.column, repeats.astype(\"int32\").spark.column)\n ).alias(name_like_string(self.name))\n sdf = self._internal.spark_frame.select(self._internal.index_spark_columns + [scol])\n internal = self._internal.copy(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in self._internal.index_spark_column_names\n ],\n data_spark_columns=[scol_for(sdf, name_like_string(self.name))],\n )\n return first_series(DataFrame(internal))\n else:\n if repeats < 0:\n raise ValueError(\"negative dimensions are not allowed\")\n\n psdf = self._psdf[[self.name]]\n if repeats == 0:\n return first_series(DataFrame(psdf._internal.with_filter(SF.lit(False))))\n else:\n return first_series(cast(\"ps.DataFrame\", ps.concat([psdf] * repeats)))\n\n def asof(self, where: Union[Any, List]) -> Union[Scalar, \"Series\"]:\n \"\"\"\n Return the last row(s) without any NaNs before `where`.\n\n The last row (for each element in `where`, if list) without any\n NaN is taken.\n\n If there is no good value, NaN is returned.\n\n .. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`\n which is expensive.\n\n Parameters\n ----------\n where : index or array-like of indices\n\n Returns\n -------\n scalar or Series\n\n The return can be:\n\n * scalar : when `self` is a Series and `where` is a scalar\n * Series: when `self` is a Series and `where` is an array-like\n\n Return scalar or Series\n\n Notes\n -----\n Indices are assumed to be sorted. Raises if this is not the case and config\n 'compute.eager_check' is True. If 'compute.eager_check' is False pandas-on-Spark just\n proceeds and performs by ignoring the indeces's order\n\n Examples\n --------\n >>> s = ps.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])\n >>> s\n 10 1.0\n 20 2.0\n 30 NaN\n 40 4.0\n dtype: float64\n\n A scalar `where`.\n\n >>> s.asof(20)\n 2.0\n\n For a sequence `where`, a Series is returned. The first value is\n NaN, because the first element of `where` is before the first\n index value.\n\n >>> s.asof([5, 20]).sort_index()\n 5 NaN\n 20 2.0\n dtype: float64\n\n Missing values are not considered. The following is ``2.0``, not\n NaN, even though NaN is at the index location for ``30``.\n\n >>> s.asof(30)\n 2.0\n\n >>> s = ps.Series([1, 2, np.nan, 4], index=[10, 30, 20, 40])\n >>> with ps.option_context(\"compute.eager_check\", False):\n ... s.asof(20)\n ...\n 1.0\n \"\"\"\n should_return_series = True\n if isinstance(self.index, ps.MultiIndex):\n raise ValueError(\"asof is not supported for a MultiIndex\")\n if isinstance(where, (ps.Index, ps.Series, DataFrame)):\n raise ValueError(\"where cannot be an Index, Series or a DataFrame\")\n if get_option(\"compute.eager_check\") and not self.index.is_monotonic_increasing:\n raise ValueError(\"asof requires a sorted index\")\n if not is_list_like(where):\n should_return_series = False\n where = [where]\n internal = self._internal.resolved_copy\n index_scol = internal.index_spark_columns[0]\n index_type = internal.spark_type_for(index_scol)\n spark_column = internal.data_spark_columns[0]\n monotonically_increasing_id_column = verify_temp_column_name(\n internal.spark_frame, \"__monotonically_increasing_id__\"\n )\n cond = [\n F.max_by(\n spark_column,\n F.when(\n (index_scol <= SF.lit(index).cast(index_type)) & spark_column.isNotNull()\n if pd.notna(index)\n # If index is nan and the value of the col is not null\n # then return monotonically_increasing_id .This will let max by\n # to return last index value , which is the behaviour of pandas\n else spark_column.isNotNull(),\n monotonically_increasing_id_column,\n ),\n )\n for index in where\n ]\n\n sdf = internal.spark_frame.withColumn(\n monotonically_increasing_id_column, F.monotonically_increasing_id()\n ).select(cond)\n\n if not should_return_series:\n with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):\n # Disable Arrow to keep row ordering.\n result = sdf.limit(1).toPandas().iloc[0, 0]\n return result if result is not None else np.nan\n\n # The data is expected to be small so it's fine to transpose/use default index.\n with ps.option_context(\"compute.default_index_type\", \"distributed\", \"compute.max_rows\", 1):\n if len(where) == len(set(where)) and not isinstance(index_type, TimestampType):\n psdf: DataFrame = DataFrame(sdf)\n psdf.columns = pd.Index(where)\n return first_series(psdf.transpose()).rename(self.name)\n else:\n # If `where` has duplicate items, leverage the pandas directly\n # since pandas API on Spark doesn't support the duplicate column name.\n pdf: pd.DataFrame = sdf.limit(1).toPandas()\n pdf.columns = pd.Index(where)\n return first_series(DataFrame(pdf.transpose())).rename(self.name)\n\n def mad(self) -> float:\n \"\"\"\n Return the mean absolute deviation of values.\n\n Examples\n --------\n >>> s = ps.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.mad()\n 1.0\n \"\"\"\n\n sdf = self._internal.spark_frame\n spark_column = self.spark.column\n avg = unpack_scalar(sdf.select(F.avg(spark_column)))\n mad = unpack_scalar(sdf.select(F.avg(F.abs(spark_column - avg))))\n\n return mad\n\n def unstack(self, level: int = -1) -> DataFrame:\n \"\"\"\n Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame.\n The level involved will automatically get sorted.\n\n Notes\n -----\n Unlike pandas, pandas-on-Spark doesn't check whether an index is duplicated or not\n because the checking of duplicated index requires scanning whole data which\n can be quite expensive.\n\n Parameters\n ----------\n level : int, str, or list of these, default last level\n Level(s) to unstack, can pass level name.\n\n Returns\n -------\n DataFrame\n Unstacked Series.\n\n Examples\n --------\n >>> s = ps.Series([1, 2, 3, 4],\n ... index=pd.MultiIndex.from_product([['one', 'two'],\n ... ['a', 'b']]))\n >>> s\n one a 1\n b 2\n two a 3\n b 4\n dtype: int64\n\n >>> s.unstack(level=-1).sort_index()\n a b\n one 1 2\n two 3 4\n\n >>> s.unstack(level=0).sort_index()\n one two\n a 1 3\n b 2 4\n \"\"\"\n if not isinstance(self.index, ps.MultiIndex):\n raise ValueError(\"Series.unstack only support for a MultiIndex\")\n index_nlevels = self.index.nlevels\n if level > 0 and (level > index_nlevels - 1):\n raise IndexError(\n \"Too many levels: Index has only {} levels, not {}\".format(index_nlevels, level + 1)\n )\n elif level < 0 and (level < -index_nlevels):\n raise IndexError(\n \"Too many levels: Index has only {} levels, {} is not a valid level number\".format(\n index_nlevels, level\n )\n )\n\n internal = self._internal.resolved_copy\n\n index_map = list(\n zip(internal.index_spark_column_names, internal.index_names, internal.index_fields)\n )\n pivot_col, column_label_names, _ = index_map.pop(level)\n index_scol_names, index_names, index_fields = zip(*index_map)\n col = internal.data_spark_column_names[0]\n\n sdf = internal.spark_frame\n sdf = sdf.groupby(list(index_scol_names)).pivot(pivot_col).agg(F.first(scol_for(sdf, col)))\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in index_scol_names],\n index_names=list(index_names),\n index_fields=list(index_fields),\n column_label_names=[column_label_names],\n )\n internal = internal.copy(\n data_fields=[\n field.copy(dtype=self._internal.data_fields[0].dtype)\n for field in internal.data_fields\n ]\n )\n return DataFrame(internal)\n\n def item(self) -> Scalar:\n \"\"\"\n Return the first element of the underlying data as a Python scalar.\n\n Returns\n -------\n scalar\n The first element of Series.\n\n Raises\n ------\n ValueError\n If the data is not length-1.\n\n Examples\n --------\n >>> psser = ps.Series([10])\n >>> psser.item()\n 10\n \"\"\"\n return self.head(2)._to_internal_pandas().item()\n\n def iteritems(self) -> Iterable[Tuple[Name, Any]]:\n \"\"\"\n Lazily iterate over (index, value) tuples.\n\n This method returns an iterable tuple (index, value). This is\n convenient if you want to create a lazy iterator.\n\n .. note:: Unlike pandas', the iteritems in pandas-on-Spark returns generator rather\n zip object\n\n Returns\n -------\n iterable\n Iterable of tuples containing the (index, value) pairs from a\n Series.\n\n See Also\n --------\n DataFrame.items : Iterate over (column name, Series) pairs.\n DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.\n\n Examples\n --------\n >>> s = ps.Series(['A', 'B', 'C'])\n >>> for index, value in s.items():\n ... print(\"Index : {}, Value : {}\".format(index, value))\n Index : 0, Value : A\n Index : 1, Value : B\n Index : 2, Value : C\n \"\"\"\n internal_index_columns = self._internal.index_spark_column_names\n internal_data_column = self._internal.data_spark_column_names[0]\n\n def extract_kv_from_spark_row(row: Row) -> Tuple[Name, Any]:\n k = (\n row[internal_index_columns[0]]\n if len(internal_index_columns) == 1\n else tuple(row[c] for c in internal_index_columns)\n )\n v = row[internal_data_column]\n return k, v\n\n for k, v in map(\n extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator()\n ):\n yield k, v\n\n def items(self) -> Iterable[Tuple[Name, Any]]:\n \"\"\"This is an alias of ``iteritems``.\"\"\"\n return self.iteritems()\n\n def droplevel(self, level: Union[int, Name, List[Union[int, Name]]]) -> \"Series\":\n \"\"\"\n Return Series with requested index level(s) removed.\n\n Parameters\n ----------\n level : int, str, or list-like\n If a string is given, must be the name of a level\n If list-like, elements must be names or positional indexes\n of levels.\n\n Returns\n -------\n Series\n Series with requested index level(s) removed.\n\n Examples\n --------\n >>> psser = ps.Series(\n ... [1, 2, 3],\n ... index=pd.MultiIndex.from_tuples(\n ... [(\"x\", \"a\"), (\"x\", \"b\"), (\"y\", \"c\")], names=[\"level_1\", \"level_2\"]\n ... ),\n ... )\n >>> psser\n level_1 level_2\n x a 1\n b 2\n y c 3\n dtype: int64\n\n Removing specific index level by level\n\n >>> psser.droplevel(0)\n level_2\n a 1\n b 2\n c 3\n dtype: int64\n\n Removing specific index level by name\n\n >>> psser.droplevel(\"level_2\")\n level_1\n x 1\n x 2\n y 3\n dtype: int64\n \"\"\"\n return first_series(self.to_frame().droplevel(level=level, axis=0)).rename(self.name)\n\n def tail(self, n: int = 5) -> \"Series\":\n \"\"\"\n Return the last `n` rows.\n\n This function returns last `n` rows from the object based on\n position. It is useful for quickly verifying data, for example,\n after sorting or appending rows.\n\n For negative values of `n`, this function returns all rows except\n the first `n` rows, equivalent to ``df[n:]``.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n type of caller\n The last `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.head : The first `n` rows of the caller object.\n\n Examples\n --------\n >>> psser = ps.Series([1, 2, 3, 4, 5])\n >>> psser\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n dtype: int64\n\n >>> psser.tail(3) # doctest: +SKIP\n 2 3\n 3 4\n 4 5\n dtype: int64\n \"\"\"\n return first_series(self.to_frame().tail(n=n)).rename(self.name)\n\n def explode(self) -> \"Series\":\n \"\"\"\n Transform each element of a list-like to a row.\n\n Returns\n -------\n Series\n Exploded lists to rows; index will be duplicated for these rows.\n\n See Also\n --------\n Series.str.split : Split string values on specified separator.\n Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex\n to produce DataFrame.\n DataFrame.melt : Unpivot a DataFrame from wide format to long format.\n DataFrame.explode : Explode a DataFrame from list-like\n columns to long format.\n\n Examples\n --------\n >>> psser = ps.Series([[1, 2, 3], [], [3, 4]])\n >>> psser\n 0 [1, 2, 3]\n 1 []\n 2 [3, 4]\n dtype: object\n\n >>> psser.explode() # doctest: +SKIP\n 0 1.0\n 0 2.0\n 0 3.0\n 1 NaN\n 2 3.0\n 2 4.0\n dtype: float64\n \"\"\"\n if not isinstance(self.spark.data_type, ArrayType):\n return self.copy()\n\n scol = F.explode_outer(self.spark.column).alias(name_like_string(self._column_label))\n\n internal = self._internal.with_new_columns([scol], keep_order=False)\n return first_series(DataFrame(internal))\n\n def argsort(self) -> \"Series\":\n \"\"\"\n Return the integer indices that would sort the Series values.\n Unlike pandas, the index order is not preserved in the result.\n\n Returns\n -------\n Series\n Positions of values within the sort order with -1 indicating\n nan values.\n\n Examples\n --------\n >>> psser = ps.Series([3, 3, 4, 1, 6, 2, 3, 7, 8, 7, 10])\n >>> psser\n 0 3\n 1 3\n 2 4\n 3 1\n 4 6\n 5 2\n 6 3\n 7 7\n 8 8\n 9 7\n 10 10\n dtype: int64\n\n >>> psser.argsort().sort_index()\n 0 3\n 1 5\n 2 0\n 3 1\n 4 6\n 5 2\n 6 4\n 7 7\n 8 9\n 9 8\n 10 10\n dtype: int64\n \"\"\"\n notnull = self.loc[self.notnull()]\n\n sdf_for_index = notnull._internal.spark_frame.select(notnull._internal.index_spark_columns)\n\n tmp_join_key = verify_temp_column_name(sdf_for_index, \"__tmp_join_key__\")\n sdf_for_index = InternalFrame.attach_distributed_sequence_column(\n sdf_for_index, tmp_join_key\n )\n # sdf_for_index:\n # +----------------+-----------------+\n # |__tmp_join_key__|__index_level_0__|\n # +----------------+-----------------+\n # | 0| 0|\n # | 1| 1|\n # | 2| 2|\n # | 3| 3|\n # | 4| 4|\n # +----------------+-----------------+\n\n sdf_for_data = notnull._internal.spark_frame.select(\n notnull.spark.column.alias(\"values\"), NATURAL_ORDER_COLUMN_NAME\n )\n sdf_for_data = InternalFrame.attach_distributed_sequence_column(\n sdf_for_data, SPARK_DEFAULT_SERIES_NAME\n )\n # sdf_for_data:\n # +---+------+-----------------+\n # | 0|values|__natural_order__|\n # +---+------+-----------------+\n # | 0| 3| 25769803776|\n # | 1| 3| 51539607552|\n # | 2| 4| 77309411328|\n # | 3| 1| 103079215104|\n # | 4| 2| 128849018880|\n # +---+------+-----------------+\n\n sdf_for_data = sdf_for_data.sort(\n scol_for(sdf_for_data, \"values\"), NATURAL_ORDER_COLUMN_NAME\n ).drop(\"values\", NATURAL_ORDER_COLUMN_NAME)\n\n tmp_join_key = verify_temp_column_name(sdf_for_data, \"__tmp_join_key__\")\n sdf_for_data = InternalFrame.attach_distributed_sequence_column(sdf_for_data, tmp_join_key)\n # sdf_for_index: sdf_for_data:\n # +----------------+-----------------+ +----------------+---+\n # |__tmp_join_key__|__index_level_0__| |__tmp_join_key__| 0|\n # +----------------+-----------------+ +----------------+---+\n # | 0| 0| | 0| 3|\n # | 1| 1| | 1| 4|\n # | 2| 2| | 2| 0|\n # | 3| 3| | 3| 1|\n # | 4| 4| | 4| 2|\n # +----------------+-----------------+ +----------------+---+\n\n sdf = sdf_for_index.join(sdf_for_data, on=tmp_join_key).drop(tmp_join_key)\n\n internal = self._internal.with_new_sdf(\n spark_frame=sdf,\n data_columns=[SPARK_DEFAULT_SERIES_NAME],\n index_fields=[\n InternalField(dtype=field.dtype) for field in self._internal.index_fields\n ],\n data_fields=[None],\n )\n psser = first_series(DataFrame(internal))\n\n return cast(\n Series,\n ps.concat([psser, self.loc[self.isnull()].spark.transform(lambda _: SF.lit(-1))]),\n )\n\n def argmax(self) -> int:\n \"\"\"\n Return int position of the largest value in the Series.\n\n If the maximum is achieved in multiple locations,\n the first row position is returned.\n\n Returns\n -------\n int\n Row position of the maximum value.\n\n Examples\n --------\n Consider dataset containing cereal calories\n\n >>> s = ps.Series({'Corn Flakes': 100.0, 'Almond Delight': 110.0,\n ... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0})\n >>> s # doctest: +SKIP\n Corn Flakes 100.0\n Almond Delight 110.0\n Cinnamon Toast Crunch 120.0\n Cocoa Puff 110.0\n dtype: float64\n\n >>> s.argmax() # doctest: +SKIP\n 2\n \"\"\"\n sdf = self._internal.spark_frame.select(self.spark.column, NATURAL_ORDER_COLUMN_NAME)\n max_value = sdf.select(\n F.max(scol_for(sdf, self._internal.data_spark_column_names[0])),\n F.first(NATURAL_ORDER_COLUMN_NAME),\n ).head()\n if max_value[1] is None:\n raise ValueError(\"attempt to get argmax of an empty sequence\")\n elif max_value[0] is None:\n return -1\n # We should remember the natural sequence started from 0\n seq_col_name = verify_temp_column_name(sdf, \"__distributed_sequence_column__\")\n sdf = InternalFrame.attach_distributed_sequence_column(\n sdf.drop(NATURAL_ORDER_COLUMN_NAME), seq_col_name\n )\n # If the maximum is achieved in multiple locations, the first row position is returned.\n return sdf.filter(\n scol_for(sdf, self._internal.data_spark_column_names[0]) == max_value[0]\n ).head()[0]\n\n def argmin(self) -> int:\n \"\"\"\n Return int position of the smallest value in the Series.\n\n If the minimum is achieved in multiple locations,\n the first row position is returned.\n\n Returns\n -------\n int\n Row position of the minimum value.\n\n Examples\n --------\n Consider dataset containing cereal calories\n\n >>> s = ps.Series({'Corn Flakes': 100.0, 'Almond Delight': 110.0,\n ... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0})\n >>> s # doctest: +SKIP\n Corn Flakes 100.0\n Almond Delight 110.0\n Cinnamon Toast Crunch 120.0\n Cocoa Puff 110.0\n dtype: float64\n\n >>> s.argmin() # doctest: +SKIP\n 0\n \"\"\"\n sdf = self._internal.spark_frame.select(self.spark.column, NATURAL_ORDER_COLUMN_NAME)\n min_value = sdf.select(\n F.min(scol_for(sdf, self._internal.data_spark_column_names[0])),\n F.first(NATURAL_ORDER_COLUMN_NAME),\n ).head()\n if min_value[1] is None:\n raise ValueError(\"attempt to get argmin of an empty sequence\")\n elif min_value[0] is None:\n return -1\n # We should remember the natural sequence started from 0\n seq_col_name = verify_temp_column_name(sdf, \"__distributed_sequence_column__\")\n sdf = InternalFrame.attach_distributed_sequence_column(\n sdf.drop(NATURAL_ORDER_COLUMN_NAME), seq_col_name\n )\n # If the minimum is achieved in multiple locations, the first row position is returned.\n return sdf.filter(\n scol_for(sdf, self._internal.data_spark_column_names[0]) == min_value[0]\n ).head()[0]\n\n def compare(\n self, other: \"Series\", keep_shape: bool = False, keep_equal: bool = False\n ) -> DataFrame:\n \"\"\"\n Compare to another Series and show the differences.\n\n .. note:: This API is slightly different from pandas when indexes from both Series\n are not identical and config 'compute.eager_check' is False. pandas raises an exception;\n however, pandas-on-Spark just proceeds and performs by ignoring mismatches.\n\n >>> psser1 = ps.Series([1, 2, 3, 4, 5], index=pd.Index([1, 2, 3, 4, 5]))\n >>> psser2 = ps.Series([1, 2, 3, 4, 5], index=pd.Index([1, 2, 4, 3, 6]))\n >>> psser1.compare(psser2) # doctest: +SKIP\n ...\n ValueError: Can only compare identically-labeled Series objects\n\n >>> with ps.option_context(\"compute.eager_check\", False):\n ... psser1.compare(psser2) # doctest: +SKIP\n ...\n self other\n 3 3.0 4.0\n 4 4.0 3.0\n 5 5.0 NaN\n 6 NaN 5.0\n\n Parameters\n ----------\n other : Series\n Object to compare with.\n keep_shape : bool, default False\n If true, all rows and columns are kept.\n Otherwise, only the ones with different values are kept.\n keep_equal : bool, default False\n If true, the result keeps values that are equal.\n Otherwise, equal values are shown as NaNs.\n\n Returns\n -------\n DataFrame\n\n Notes\n -----\n Matching NaNs will not appear as a difference.\n\n Examples\n --------\n\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option(\"compute.ops_on_diff_frames\", True)\n >>> s1 = ps.Series([\"a\", \"b\", \"c\", \"d\", \"e\"])\n >>> s2 = ps.Series([\"a\", \"a\", \"c\", \"b\", \"e\"])\n\n Align the differences on columns\n\n >>> s1.compare(s2).sort_index()\n self other\n 1 b a\n 3 d b\n\n Keep all original rows\n\n >>> s1.compare(s2, keep_shape=True).sort_index()\n self other\n 0 None None\n 1 b a\n 2 None None\n 3 d b\n 4 None None\n\n Keep all original rows and also all original values\n\n >>> s1.compare(s2, keep_shape=True, keep_equal=True).sort_index()\n self other\n 0 a a\n 1 b a\n 2 c c\n 3 d b\n 4 e e\n\n >>> reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n combined: DataFrame\n if same_anchor(self, other):\n self_column_label = verify_temp_column_name(other.to_frame(), \"__self_column__\")\n other_column_label = verify_temp_column_name(self.to_frame(), \"__other_column__\")\n combined = DataFrame(\n self._internal.with_new_columns(\n [self.rename(self_column_label), other.rename(other_column_label)]\n )\n )\n else:\n if get_option(\"compute.eager_check\") and not self.index.equals(other.index):\n raise ValueError(\"Can only compare identically-labeled Series objects\")\n\n combined = combine_frames(self.to_frame(), other.to_frame())\n\n this_column_label = \"self\"\n that_column_label = \"other\"\n if keep_equal and keep_shape:\n combined.columns = pd.Index([this_column_label, that_column_label])\n return combined\n\n this_data_scol = combined._internal.data_spark_columns[0]\n that_data_scol = combined._internal.data_spark_columns[1]\n index_scols = combined._internal.index_spark_columns\n sdf = combined._internal.spark_frame\n if keep_shape:\n this_scol = (\n F.when(this_data_scol == that_data_scol, None)\n .otherwise(this_data_scol)\n .alias(this_column_label)\n )\n this_field = combined._internal.data_fields[0].copy(\n name=this_column_label, nullable=True\n )\n\n that_scol = (\n F.when(this_data_scol == that_data_scol, None)\n .otherwise(that_data_scol)\n .alias(that_column_label)\n )\n that_field = combined._internal.data_fields[1].copy(\n name=that_column_label, nullable=True\n )\n else:\n sdf = sdf.filter(~this_data_scol.eqNullSafe(that_data_scol))\n\n this_scol = this_data_scol.alias(this_column_label)\n this_field = combined._internal.data_fields[0].copy(name=this_column_label)\n\n that_scol = that_data_scol.alias(that_column_label)\n that_field = combined._internal.data_fields[1].copy(name=that_column_label)\n\n sdf = sdf.select(*index_scols, this_scol, that_scol, NATURAL_ORDER_COLUMN_NAME)\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in self._internal.index_spark_column_names\n ],\n index_names=self._internal.index_names,\n index_fields=combined._internal.index_fields,\n column_labels=[(this_column_label,), (that_column_label,)],\n data_spark_columns=[scol_for(sdf, this_column_label), scol_for(sdf, that_column_label)],\n data_fields=[this_field, that_field],\n column_label_names=[None],\n )\n return DataFrame(internal)\n\n def align(\n self,\n other: Union[DataFrame, \"Series\"],\n join: str = \"outer\",\n axis: Optional[Axis] = None,\n copy: bool = True,\n ) -> Tuple[\"Series\", Union[DataFrame, \"Series\"]]:\n \"\"\"\n Align two objects on their axes with the specified join method.\n\n Join method is specified for each axis Index.\n\n Parameters\n ----------\n other : DataFrame or Series\n join : {{'outer', 'inner', 'left', 'right'}}, default 'outer'\n axis : allowed axis of the other object, default None\n Align on index (0), columns (1), or both (None).\n copy : bool, default True\n Always returns new objects. If copy=False and no reindexing is\n required then original objects are returned.\n\n Returns\n -------\n (left, right) : (Series, type of other)\n Aligned objects.\n\n Examples\n --------\n >>> ps.set_option(\"compute.ops_on_diff_frames\", True)\n >>> s1 = ps.Series([7, 8, 9], index=[10, 11, 12])\n >>> s2 = ps.Series([\"g\", \"h\", \"i\"], index=[10, 20, 30])\n\n >>> aligned_l, aligned_r = s1.align(s2)\n >>> aligned_l.sort_index()\n 10 7.0\n 11 8.0\n 12 9.0\n 20 NaN\n 30 NaN\n dtype: float64\n >>> aligned_r.sort_index()\n 10 g\n 11 None\n 12 None\n 20 h\n 30 i\n dtype: object\n\n Align with the join type \"inner\":\n\n >>> aligned_l, aligned_r = s1.align(s2, join=\"inner\")\n >>> aligned_l.sort_index()\n 10 7\n dtype: int64\n >>> aligned_r.sort_index()\n 10 g\n dtype: object\n\n Align with a DataFrame:\n\n >>> df = ps.DataFrame({\"a\": [1, 2, 3], \"b\": [\"a\", \"b\", \"c\"]}, index=[10, 20, 30])\n >>> aligned_l, aligned_r = s1.align(df)\n >>> aligned_l.sort_index()\n 10 7.0\n 11 8.0\n 12 9.0\n 20 NaN\n 30 NaN\n dtype: float64\n >>> aligned_r.sort_index()\n a b\n 10 1.0 a\n 11 NaN None\n 12 NaN None\n 20 2.0 b\n 30 3.0 c\n\n >>> ps.reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n axis = validate_axis(axis)\n if axis == 1:\n raise ValueError(\"Series does not support columns axis.\")\n\n self_df = self.to_frame()\n left, right = self_df.align(other, join=join, axis=axis, copy=False)\n\n if left is self_df:\n left_ser = self\n else:\n left_ser = first_series(left).rename(self.name)\n\n return (left_ser.copy(), right.copy()) if copy else (left_ser, right)\n\n def between_time(\n self,\n start_time: Union[datetime.time, str],\n end_time: Union[datetime.time, str],\n include_start: bool = True,\n include_end: bool = True,\n axis: Axis = 0,\n ) -> \"Series\":\n \"\"\"\n Select values between particular times of the day (example: 9:00-9:30 AM).\n\n By setting ``start_time`` to be later than ``end_time``,\n you can get the times that are *not* between the two times.\n\n Parameters\n ----------\n start_time : datetime.time or str\n Initial time as a time filter limit.\n end_time : datetime.time or str\n End time as a time filter limit.\n include_start : bool, default True\n Whether the start time needs to be included in the result.\n include_end : bool, default True\n Whether the end time needs to be included in the result.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine range time on index or columns value.\n\n Returns\n -------\n Series\n Data from the original object filtered to the specified dates range.\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n at_time : Select values at a particular time of the day.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_between_time : Get just the index locations for\n values between particular times of the day.\n\n Examples\n --------\n >>> idx = pd.date_range('2018-04-09', periods=4, freq='1D20min')\n >>> psser = ps.Series([1, 2, 3, 4], index=idx)\n >>> psser\n 2018-04-09 00:00:00 1\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n 2018-04-12 01:00:00 4\n dtype: int64\n\n >>> psser.between_time('0:15', '0:45')\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n dtype: int64\n \"\"\"\n return first_series(\n self.to_frame().between_time(start_time, end_time, include_start, include_end, axis)\n ).rename(self.name)\n\n def at_time(\n self, time: Union[datetime.time, str], asof: bool = False, axis: Axis = 0\n ) -> \"Series\":\n \"\"\"\n Select values at particular time of day (example: 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or str\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n Returns\n -------\n Series\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n between_time : Select values between particular times of the day.\n DatetimeIndex.indexer_at_time : Get just the index locations for\n values at particular time of the day.\n\n Examples\n --------\n >>> idx = pd.date_range('2018-04-09', periods=4, freq='12H')\n >>> psser = ps.Series([1, 2, 3, 4], index=idx)\n >>> psser\n 2018-04-09 00:00:00 1\n 2018-04-09 12:00:00 2\n 2018-04-10 00:00:00 3\n 2018-04-10 12:00:00 4\n dtype: int64\n\n >>> psser.at_time('12:00')\n 2018-04-09 12:00:00 2\n 2018-04-10 12:00:00 4\n dtype: int64\n \"\"\"\n return first_series(self.to_frame().at_time(time, asof, axis)).rename(self.name)\n\n def _cum(\n self,\n func: Callable[[Column], Column],\n skipna: bool,\n part_cols: Sequence[\"ColumnOrName\"] = (),\n ascending: bool = True,\n ) -> \"Series\":\n # This is used to cummin, cummax, cumsum, etc.\n\n if ascending:\n window = (\n Window.orderBy(F.asc(NATURAL_ORDER_COLUMN_NAME))\n .partitionBy(*part_cols)\n .rowsBetween(Window.unboundedPreceding, Window.currentRow)\n )\n else:\n window = (\n Window.orderBy(F.desc(NATURAL_ORDER_COLUMN_NAME))\n .partitionBy(*part_cols)\n .rowsBetween(Window.unboundedPreceding, Window.currentRow)\n )\n\n if skipna:\n # There is a behavior difference between pandas and PySpark. In case of cummax,\n #\n # Input:\n # A B\n # 0 2.0 1.0\n # 1 5.0 NaN\n # 2 1.0 0.0\n # 3 2.0 4.0\n # 4 4.0 9.0\n #\n # pandas:\n # A B\n # 0 2.0 1.0\n # 1 5.0 NaN\n # 2 5.0 1.0\n # 3 5.0 4.0\n # 4 5.0 9.0\n #\n # PySpark:\n # A B\n # 0 2.0 1.0\n # 1 5.0 1.0\n # 2 5.0 1.0\n # 3 5.0 4.0\n # 4 5.0 9.0\n\n scol = F.when(\n # Manually sets nulls given the column defined above.\n self.spark.column.isNull(),\n SF.lit(None),\n ).otherwise(func(self.spark.column).over(window))\n else:\n # Here, we use two Windows.\n # One for real data.\n # The other one for setting nulls after the first null it meets.\n #\n # There is a behavior difference between pandas and PySpark. In case of cummax,\n #\n # Input:\n # A B\n # 0 2.0 1.0\n # 1 5.0 NaN\n # 2 1.0 0.0\n # 3 2.0 4.0\n # 4 4.0 9.0\n #\n # pandas:\n # A B\n # 0 2.0 1.0\n # 1 5.0 NaN\n # 2 5.0 NaN\n # 3 5.0 NaN\n # 4 5.0 NaN\n #\n # PySpark:\n # A B\n # 0 2.0 1.0\n # 1 5.0 1.0\n # 2 5.0 1.0\n # 3 5.0 4.0\n # 4 5.0 9.0\n scol = F.when(\n # By going through with max, it sets True after the first time it meets null.\n F.max(self.spark.column.isNull()).over(window),\n # Manually sets nulls given the column defined above.\n SF.lit(None),\n ).otherwise(func(self.spark.column).over(window))\n\n return self._with_new_scol(scol)\n\n def _cumsum(self, skipna: bool, part_cols: Sequence[\"ColumnOrName\"] = ()) -> \"Series\":\n psser = self\n if isinstance(psser.spark.data_type, BooleanType):\n psser = psser.spark.transform(lambda scol: scol.cast(LongType()))\n elif not isinstance(psser.spark.data_type, NumericType):\n raise TypeError(\n \"Could not convert {} ({}) to numeric\".format(\n spark_type_to_pandas_dtype(psser.spark.data_type),\n psser.spark.data_type.simpleString(),\n )\n )\n return psser._cum(F.sum, skipna, part_cols)\n\n def _cumprod(self, skipna: bool, part_cols: Sequence[\"ColumnOrName\"] = ()) -> \"Series\":\n if isinstance(self.spark.data_type, BooleanType):\n scol = self._cum(\n lambda scol: F.min(F.coalesce(scol, SF.lit(True))), skipna, part_cols\n ).spark.column.cast(LongType())\n elif isinstance(self.spark.data_type, NumericType):\n num_zeros = self._cum(\n lambda scol: F.sum(F.when(scol == 0, 1).otherwise(0)), skipna, part_cols\n ).spark.column\n num_negatives = self._cum(\n lambda scol: F.sum(F.when(scol < 0, 1).otherwise(0)), skipna, part_cols\n ).spark.column\n sign = F.when(num_negatives % 2 == 0, 1).otherwise(-1)\n\n abs_prod = F.exp(\n self._cum(lambda scol: F.sum(F.log(F.abs(scol))), skipna, part_cols).spark.column\n )\n\n scol = F.when(num_zeros > 0, 0).otherwise(sign * abs_prod)\n\n if isinstance(self.spark.data_type, IntegralType):\n scol = F.round(scol).cast(LongType())\n else:\n raise TypeError(\n \"Could not convert {} ({}) to numeric\".format(\n spark_type_to_pandas_dtype(self.spark.data_type),\n self.spark.data_type.simpleString(),\n )\n )\n\n return self._with_new_scol(scol)\n\n # ----------------------------------------------------------------------\n # Accessor Methods\n # ----------------------------------------------------------------------\n dt = CachedAccessor(\"dt\", DatetimeMethods)\n str = CachedAccessor(\"str\", StringMethods)\n cat = CachedAccessor(\"cat\", CategoricalAccessor)\n plot = CachedAccessor(\"plot\", PandasOnSparkPlotAccessor)\n\n # ----------------------------------------------------------------------\n\n def _apply_series_op(\n self, op: Callable[[\"Series\"], Union[\"Series\", Column]], should_resolve: bool = False\n ) -> \"Series\":\n psser_or_scol = op(self)\n if isinstance(psser_or_scol, Series):\n psser = psser_or_scol\n else:\n psser = self._with_new_scol(psser_or_scol)\n if should_resolve:\n internal = psser._internal.resolved_copy\n return first_series(DataFrame(internal))\n else:\n return psser.copy()\n\n def _reduce_for_stat_function(\n self,\n sfun: Callable[[\"Series\"], Column],\n name: str_type,\n axis: Optional[Axis] = None,\n numeric_only: bool = True,\n **kwargs: Any,\n ) -> Scalar:\n \"\"\"\n Applies sfun to the column and returns a scalar\n\n Parameters\n ----------\n sfun : the stats function to be used for aggregation\n name : original pandas API name.\n axis : used only for sanity check because series only support index axis.\n numeric_only : not used by this implementation, but passed down by stats functions\n \"\"\"\n axis = validate_axis(axis)\n if axis == 1:\n raise NotImplementedError(\"Series does not support columns axis.\")\n\n scol = sfun(self)\n\n min_count = kwargs.get(\"min_count\", 0)\n if min_count > 0:\n scol = F.when(Frame._count_expr(self) >= min_count, scol)\n\n result = unpack_scalar(self._internal.spark_frame.select(scol))\n return result if result is not None else np.nan\n\n # Override the `groupby` to specify the actual return type annotation.\n def groupby(\n self,\n by: Union[Name, \"Series\", List[Union[Name, \"Series\"]]],\n axis: Axis = 0,\n as_index: bool = True,\n dropna: bool = True,\n ) -> \"SeriesGroupBy\":\n return cast(\n \"SeriesGroupBy\", super().groupby(by=by, axis=axis, as_index=as_index, dropna=dropna)\n )\n\n groupby.__doc__ = Frame.groupby.__doc__\n\n def _build_groupby(\n self, by: List[Union[\"Series\", Label]], as_index: bool, dropna: bool\n ) -> \"SeriesGroupBy\":\n from pyspark.pandas.groupby import SeriesGroupBy\n\n return SeriesGroupBy._build(self, by, as_index=as_index, dropna=dropna)\n\n def __getitem__(self, key: Any) -> Any:\n try:\n if (isinstance(key, slice) and any(type(n) == int for n in [key.start, key.stop])) or (\n type(key) == int\n and not isinstance(self.index.spark.data_type, (IntegerType, LongType))\n ):\n # Seems like pandas Series always uses int as positional search when slicing\n # with ints, searches based on index values when the value is int.\n return self.iloc[key]\n return self.loc[key]\n except SparkPandasIndexingError:\n raise KeyError(\n \"Key length ({}) exceeds index depth ({})\".format(\n len(key), self._internal.index_level\n )\n )\n\n def __getattr__(self, item: str_type) -> Any:\n if item.startswith(\"__\"):\n raise AttributeError(item)\n if hasattr(MissingPandasLikeSeries, item):\n property_or_func = getattr(MissingPandasLikeSeries, item)\n if isinstance(property_or_func, property):\n return property_or_func.fget(self)\n else:\n return partial(property_or_func, self)\n raise AttributeError(\"'Series' object has no attribute '{}'\".format(item))\n\n def _to_internal_pandas(self) -> pd.Series:\n \"\"\"\n Return a pandas Series directly from _internal to avoid overhead of copy.\n\n This method is for internal use only.\n \"\"\"\n return self._psdf._internal.to_pandas_frame[self.name]\n\n def __repr__(self) -> str_type:\n max_display_count = get_option(\"display.max_rows\")\n if max_display_count is None:\n return self._to_internal_pandas().to_string(\n name=bool(self.name), dtype=bool(self.dtype)\n )\n\n pser = self._psdf._get_or_create_repr_pandas_cache(max_display_count)[self.name]\n pser_length = len(pser)\n pser = pser.iloc[:max_display_count]\n if pser_length > max_display_count:\n repr_string = pser.to_string(length=True)\n rest, prev_footer = repr_string.rsplit(\"\\n\", 1)\n match = REPR_PATTERN.search(prev_footer)\n if match is not None:\n length = match.group(\"length\")\n dtype_name = str(self.dtype.name)\n if self.name is None:\n footer = \"\\ndtype: {dtype}\\nShowing only the first {length}\".format(\n length=length, dtype=pprint_thing(dtype_name)\n )\n else:\n footer = (\n \"\\nName: {name}, dtype: {dtype}\"\n \"\\nShowing only the first {length}\".format(\n length=length, name=self.name, dtype=pprint_thing(dtype_name)\n )\n )\n return rest + footer\n return pser.to_string(name=self.name, dtype=self.dtype)\n\n def __dir__(self) -> Iterable[str_type]:\n if not isinstance(self.spark.data_type, StructType):\n fields = []\n else:\n fields = [f for f in self.spark.data_type.fieldNames() if \" \" not in f]\n return list(super().__dir__()) + fields\n\n def __iter__(self) -> None:\n return MissingPandasLikeSeries.__iter__(self)\n\n if sys.version_info >= (3, 7):\n # In order to support the type hints such as Series[...]. See DataFrame.__class_getitem__.\n def __class_getitem__(cls, params: Any) -> Type[SeriesType]:\n return create_type_for_series_type(params)\n\n elif (3, 5) <= sys.version_info < (3, 7):\n # The implementation is in its metaclass so this flag is needed to distinguish\n # pandas-on-Spark Series.\n is_series = None\n\n\ndef unpack_scalar(sdf: SparkDataFrame) -> Any:\n \"\"\"\n Takes a dataframe that is supposed to contain a single row with a single scalar value,\n and returns this value.\n \"\"\"\n lst = sdf.limit(2).toPandas()\n assert len(lst) == 1, (sdf, lst)\n row = lst.iloc[0]\n lst2 = list(row)\n assert len(lst2) == 1, (row, lst2)\n return lst2[0]\n\n\n@overload\ndef first_series(df: DataFrame) -> Series:\n ...\n\n\n@overload\ndef first_series(df: pd.DataFrame) -> pd.Series:\n ...\n\n\ndef first_series(df: Union[DataFrame, pd.DataFrame]) -> Union[Series, pd.Series]:\n \"\"\"\n Takes a DataFrame and returns the first column of the DataFrame as a Series\n \"\"\"\n assert isinstance(df, (DataFrame, pd.DataFrame)), type(df)\n if isinstance(df, DataFrame):\n return df._psser_for(df._internal.column_labels[0])\n else:\n return df[df.columns[0]]\n\n\ndef _test() -> None:\n import os\n import doctest\n import sys\n from pyspark.sql import SparkSession\n import pyspark.pandas.series\n\n os.chdir(os.environ[\"SPARK_HOME\"])\n\n globs = pyspark.pandas.series.__dict__.copy()\n globs[\"ps\"] = pyspark.pandas\n spark = (\n SparkSession.builder.master(\"local[4]\").appName(\"pyspark.pandas.series tests\").getOrCreate()\n )\n (failure_count, test_count) = doctest.testmod(\n pyspark.pandas.series,\n globs=globs,\n optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,\n )\n spark.stop()\n if failure_count:\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n _test()\n" ]
[ [ "pandas.Index", "pandas.isna", "pandas.api.types.is_list_like", "pandas.api.types.is_hashable", "pandas.DataFrame", "pandas.notna", "pandas.core.accessor.CachedAccessor", "pandas.io.formats.printing.pprint_thing", "pandas.Series", "numpy.issubdtype" ] ]
captaincapsaicin/slip
[ "3c112f51cd11118f1e11c0c6fdd8c3d31d304d9b" ]
[ "models_test.py" ]
[ "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for models.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport numpy as np\n\nimport models\nimport utils\n\n\nclass ModelsTest(parameterized.TestCase):\n\n @parameterized.named_parameters(\n dict(\n testcase_name='one_sequence',\n sequences=[[1, 0, 0]],\n ),\n dict(\n testcase_name='two_sequences',\n sequences=[[0, 0, 0], [1, 0, 0]],\n ),\n dict(\n testcase_name='three_sequences',\n sequences=[[0, 0, 0], [1, 0, 0], [2, 2, 2]],\n ),\n )\n def test_fit_predict(self, sequences):\n sequence_length = 3\n vocab_size = 3\n model = models.KerasModelWrapper(\n models.build_cnn_model, sequence_length, vocab_size, fit_kwargs={})\n x = utils.onehot(np.array(sequences), num_classes=vocab_size)\n y = np.ones(len(sequences))\n model.fit(x, y)\n output_shape = (len(sequences),)\n self.assertEqual(model.predict(x).shape, output_shape)\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "numpy.array" ] ]
lleonart1984/rendezvous
[ "f8f5e73fa1ede7c33d8cf08548bce1475a0cc8da" ]
[ "tests/test_gpu_buffer_wrap.py" ]
[ "import torch\nfrom rendering.manager import *\n\nprint(torch.cuda.is_available())\n\n\nt = torch.zeros(3, device=torch.device('cuda:0'))\n# t[0] = t[0].item()\n\nt2 = torch.ones(3, device=torch.device('cuda:0'))\nt2[0] = t2[0].item()\n\nimage_width = 512\nimage_height = 512\n\npresenter = create_presenter(width=image_width, height=image_height, format=Format.VEC4, mode=PresenterMode.OFFLINE,\n usage=ImageUsage.STORAGE | ImageUsage.TRANSFER_SRC,\n debug=True)\n\npresenter.copy_on_the_gpu(t2.storage().data_ptr(), t.storage().data_ptr(), 4*3)\nt[0] = 2.0\npresenter.copy_on_the_gpu(t.storage().data_ptr(), t2.storage().data_ptr(), 4*3)\n# presenter.copy_buffer_to_gpu_pointer(t2.storage().data_ptr(), buffer)\n\nprint(t2)\nprint(t)\n" ]
[ [ "torch.device", "torch.cuda.is_available" ] ]
sergiovitale/pansharpening-cnn-python-version
[ "5cd5949572d6e797a90694bf99010c6c97dba8e2" ]
[ "fir_filter_wind.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCopyright (c) 2018 Image Processing Research Group of University Federico II of Naples ('GRIP-UNINA').\nAll rights reserved. This work should only be used for nonprofit purposes.\n\"\"\"\n\nimport numpy as np\n\ndef fir_filter_wind(Hd,w):\n \"\"\"\n\tcompute fir filter with window method\n\tHd: \tdesired freqeuncy response (2D)\n\tw: \t\twindow (2D)\n\t\"\"\"\n\t\n hd=np.rot90(np.fft.fftshift(np.rot90(Hd,2)),2)\n h=np.fft.fftshift(np.fft.ifft2(hd))\n h=np.rot90(h,2)\n h=h*w\n h=h/np.sum(h)\n \n return h\n \n" ]
[ [ "numpy.rot90", "numpy.sum", "numpy.fft.ifft2" ] ]
davxy/numeric
[ "1e8b44a72e1d570433a5ba81ae0795a750ce5921" ]
[ "python/chebyshev_poly.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy.polynomial.chebyshev import Chebyshev, cheb2poly\n\n\nmindeg, maxdeg = 0, 5\n\ncmap = plt.get_cmap('rainbow')\ncolors = cmap(np.linspace(0, 1, maxdeg-mindeg+1))\nprint(colors)\n\nl = list(np.zeros(mindeg, int)) + [1]\nxx = np.linspace(-1, 1, 100)\ntx = 0.2\nfor i,col in zip(range(mindeg,maxdeg+1),colors):\n c = Chebyshev(l)\n print('T({}) = {}'.format(i, cheb2poly(c.coef)))\n yy = [ c(x) for x in xx ]\n #col = colors.pop()\n plt.gcf().text(tx, 0.9, 'T({})'.format(i), color=col)\n tx += 0.1\n plt.plot(xx, yy, color=col)\n l.insert(0,0) # increment the degree\nplt.grid(True)\nplt.show()\n\n" ]
[ [ "numpy.zeros", "matplotlib.pyplot.grid", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.plot", "matplotlib.pyplot.gcf", "matplotlib.pyplot.show", "numpy.linspace", "numpy.polynomial.chebyshev.cheb2poly", "numpy.polynomial.chebyshev.Chebyshev" ] ]
RobMulla/kaggle-ieee-fraud-detection
[ "00cff8865aeb3b4524d7b054fef42c661b56a958" ]
[ "scripts/M044.py" ]
[ "\"\"\"\nCreated by: Rob Mulla\nSep 26\n\nIEEE Fraud Detection Model\n\n- FE013\n- Yang's Features\n- Raddars Features\n- Remove AV bad features automatically\n\n\"\"\"\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport os\nimport sys\nimport matplotlib.pylab as plt\nfrom sklearn.model_selection import KFold\nfrom datetime import datetime\nimport time\nimport logging\nfrom sklearn.metrics import roc_auc_score\nfrom catboost import CatBoostClassifier, Pool\nfrom timeit import default_timer as timer\nimport lightgbm as lgb\nimport gc\n\nstart = timer()\n\n##################\n# PARAMETERS\n###################\nrun_id = \"{:%m%d_%H%M}\".format(datetime.now())\nKERNEL_RUN = False\nMODEL_NUMBER = os.path.basename(__file__).split('.')[0]\n\nif KERNEL_RUN:\n INPUT_DIR = '../input/champs-scalar-coupling/'\n FE_DIR = '../input/molecule-fe024/'\n FOLDS_DIR = '../input/champs-3fold-ids/'\n\n\nTARGET = \"isFraud\"\nN_ESTIMATORS = 100000\nN_META_ESTIMATORS = 500000\nLEARNING_RATE = 0.005\nVERBOSE = 100\nEARLY_STOPPING_ROUNDS = 100\nRANDOM_STATE = 529\nN_THREADS = 58\nDEPTH = -1 #14\nN_FOLDS = 5\nSHUFFLE = False\nFE_SET = 'FE013' # Feature Engineering Version\nAV_THRESHOLD = 0.8\n\nMODEL_TYPE = \"lightgbm\"\n\n#####################\n## SETUP LOGGER\n#####################\ndef get_logger():\n \"\"\"\n credits to: https://www.kaggle.com/ogrellier/user-level-lightgbm-lb-1-4480\n \"\"\"\n os.environ[\"TZ\"] = \"US/Eastern\"\n time.tzset()\n FORMAT = \"[%(levelname)s]%(asctime)s:%(name)s:%(message)s\"\n logging.basicConfig(format=FORMAT)\n logger = logging.getLogger(\"main\")\n logger.setLevel(logging.DEBUG)\n handler = logging.StreamHandler(sys.stdout)\n fhandler = logging.FileHandler(f'../logs/{MODEL_NUMBER}_{run_id}.log')\n formatter = logging.Formatter(FORMAT)\n handler.setFormatter(formatter)\n# logger.addHandler(handler)\n logger.addHandler(fhandler)\n return logger\n\nlogger = get_logger()\n\nlogger.info(f'Running for Model Number {MODEL_NUMBER}')\n\n##################\n# PARAMETERS\n###################\n\nif MODEL_TYPE == 'xgboost':\n EVAL_METRIC = \"AUC\"\nelif MODEL_TYPE == 'lightgbm':\n EVAL_METRIC = 'auc'\nelif MODEL_TYPE == 'catboost':\n EVAL_METRIC = \"AUC\"\n\n##################\n# TRACKING FUNCTION\n###################\n\ndef update_tracking(run_id,\n field,\n value, csv_file=\"../tracking/tracking.csv\", integer=False, digits=None, drop_incomplete_rows=False):\n \"\"\"\n Function to update the tracking CSV with information about the model\n \"\"\"\n try:\n df = pd.read_csv(csv_file, index_col=[0])\n except FileNotFoundError:\n df = pd.DataFrame()\n if integer:\n value = round(value)\n elif digits is not None:\n value = round(value, digits)\n if drop_incomplete_rows:\n df = df.loc[~df['AUC'].isna()]\n df.loc[run_id, field] = value # Model number is index\n df.to_csv(csv_file)\n\nupdate_tracking(run_id, \"model_number\", MODEL_NUMBER, drop_incomplete_rows=True)\nupdate_tracking(run_id, \"n_estimators\", N_ESTIMATORS)\nupdate_tracking(run_id, \"early_stopping_rounds\", EARLY_STOPPING_ROUNDS)\nupdate_tracking(run_id, \"random_state\", RANDOM_STATE)\nupdate_tracking(run_id, \"n_threads\", N_THREADS)\nupdate_tracking(run_id, \"learning_rate\", LEARNING_RATE)\nupdate_tracking(run_id, \"n_fold\", N_FOLDS)\nupdate_tracking(run_id, \"model_type\", MODEL_TYPE)\nupdate_tracking(run_id, \"eval_metric\", EVAL_METRIC)\nupdate_tracking(run_id, \"depth\", DEPTH)\nupdate_tracking(run_id, \"shuffle\", SHUFFLE)\nupdate_tracking(run_id, \"fe\", FE_SET)\nupdate_tracking(run_id, \"av_threshold\", AV_THRESHOLD)\n\n#####################\n# PREPARE MODEL DATA\n#####################\nfolds = KFold(n_splits=N_FOLDS, random_state=RANDOM_STATE, shuffle=SHUFFLE)\n\nlogger.info('Loading Data...')\ntrain_df = pd.read_parquet(f'../data/train_{FE_SET}.parquet')\ntest_df = pd.read_parquet(f'../data/test_{FE_SET}.parquet')\nlogger.info('Done loading Data...')\n\n###########\n# FEATURES\n###########\n\nFEATURES = [ 'V85', 'bank_type_TransactionAmt_mean', 'D5_fq_enc', 'V12',\n 'V81', 'V282', 'bank_type_D7_std', 'id_15', 'V13', 'C12_fq_enc',\n 'anomaly', 'D7_DT_D_std_score', 'D3_DT_D_min_max', 'card4_count_full',\n 'D14_DT_D_min_max', 'card1_count_full', 'V169', 'D3_DT_M_min_max', 'V279',\n 'V91', 'bank_type_D10_std', 'D14', 'D6_DT_M_std_score', 'D4_DT_W_min_max',\n 'V152', 'V56', 'D3_intercept_bin0', 'D14_intercept_bin0', 'V220', 'V277',\n 'D12_intercept', 'ProductCD_W_00cents', 'D13_intercept_bin0', 'V291',\n 'V189', 'D15_DT_M_min_max', 'C5_fq_enc', 'D3_fq_enc', 'card5_fq_enc',\n 'addr1_count_full', 'V266', 'D11_intercept_bin2', 'V23',\n 'D4_intercept_bin3', 'bank_type_D10_mean', 'D2_intercept_bin3', 'V306',\n 'DeviceType', 'V285', 'D5_DT_W_std_score', 'V131', 'V37', 'V296',\n 'bank_type_D1_mean', 'V75', 'D3_DT_W_std_score', 'D10_DT_M_min_max',\n 'id_33_0', 'V67', 'D4_intercept_bin4', 'V256', 'V143', 'uid5_D6_std',\n 'ProductCD_target_mean', 'mxC3', 'V129', 'D13_DT_M_std_score', 'V24',\n 'D3_DT_M_std_score', 'mxC4', 'D9', 'id_30_version_fq_enc',\n 'D5_DT_D_std_score', 'D11_DT_M_std_score', 'uid5_D6_mean',\n 'D14_DT_M_std_score', 'card5_TransactionAmt_std', 'V20', 'C8_fq_enc',\n 'V70', 'V127', 'D6_intercept', 'D15_DT_W_min_max',\n 'sum_Cxx_binary_higher_than_q95', 'V156', 'uid4_D12_mean', 'C5',\n 'uid4_D12_std', 'id_30_fq_enc', 'V61', 'id_33', 'D15_to_std_addr1',\n 'bank_type_D9_mean', 'D5_intercept', 'D10_DT_W_min_max', 'V130',\n 'bank_type_D9_std', 'uid5_D7_std', 'bank_type_D14_mean',\n 'bank_type_D3_std', 'bank_type_D5_mean', 'ProductCD', 'M8', 'V44',\n 'D6_fq_enc', 'D15_DT_D_min_max', 'D11_intercept_bin0', 'V257',\n 'bank_type_D7_mean', 'V76', 'D15', 'V38', 'V55', 'V261', 'V149', 'D4',\n 'D8_intercept_bin0', 'M2', 'bank_type_D6_std', 'id_30_version',\n 'D4_intercept_bin1', 'D15_to_mean_card4', 'V82', 'D3_DT_D_std_score',\n 'D10_intercept_bin3', 'bank_type_D2_std', 'V77', 'M7', 'D11',\n 'D4_intercept_bin2', 'email_check', 'V294', 'V317', 'V308',\n 'id_33_fq_enc', 'bank_type_D5_std', 'D8_intercept', 'V62', 'V187',\n 'card5_TransactionAmt_mean', 'bank_type_D12_mean', 'id_33_count_dist',\n 'D2_intercept_bin2', 'C10', 'V86', 'D8_DT_M_min_max',\n 'D15_intercept_bin4', 'D6_DT_W_std_score', 'uid5_D7_mean', 'C9_fq_enc',\n 'mxC10', 'D14_DT_W_std_score', 'card2_count_full', 'V258',\n 'bank_type_D14_std', 'D10_intercept_bin4', 'V83', 'bank_type_D13_std',\n 'D8_DT_W_min_max', 'TransactionAmt', 'V312', 'D14_intercept', 'id_33_1',\n 'D15_intercept_bin2', 'D12_DT_W_std_score', 'V78', 'D8_D9_decimal_dist',\n 'M9', 'V281', 'bank_type_D12_std', 'V54', 'C9', 'M4_target_mean',\n 'sum_Cxx_binary_higher_than_q90', 'D10_DT_D_min_max', 'bank_type_D3_mean',\n 'bank_type_D8_mean', 'R_emaildomain_prefix', 'bank_type_D6_mean', 'V314',\n 'D11_DT_W_std_score', 'D10', 'D4_DT_D_min_max', 'V283',\n 'D10_intercept_bin2', 'D13_intercept', 'D8_DT_D_min_max', 'C2_fq_enc',\n 'V165', 'D1_intercept_bin4', 'bank_type_D13_mean', 'D3_intercept',\n 'TransactionAmt_2Dec', 'card3_div_Mean_D9_DOY', 'C12',\n 'D4_DT_M_std_score', 'D2_intercept_bin1', 'mxC8', 'D2_fq_enc',\n 'addr1_third_digit', 'D4_fq_enc', 'D1_fq_enc', 'mxC12', 'D8',\n 'D10_intercept_bin1', 'id_01', 'id_09', 'id_03', 'addr1_second_digit',\n 'D15_to_mean_addr1', 'sum_Cxx_binary_higher_than_q80', 'V53',\n 'TransactionAmt_decimal', 'card3_div_Mean_D6_DOY', 'D15_intercept_bin3',\n 'V45', 'id_02_to_std_card4', 'addr2_div_Mean_D10_DOY_productCD',\n 'DeviceInfo_version', 'DeviceInfo_device', 'D1_intercept_bin3',\n 'D11_intercept', 'DeviceInfo_version_fq_enc', 'C6', 'uid5_D13_std',\n 'TransactionAmt_DT_M_min_max', 'dist2', 'C8', 'D15_intercept_bin1', 'M3',\n 'R_emaildomain_fq_enc', 'DeviceInfo_device_fq_enc', 'D6_DT_D_std_score',\n 'sum_Cxx_binary_higher_than_q60', 'D11__DeviceInfo',\n 'TranAmt_div_Mean_D12_DOY_productCD', 'D10_DT_M_std_score',\n 'uid5_D13_mean', 'mxC5', 'id_30', 'addr2_div_Mean_D4_DOY', 'uid2_D12_std',\n 'C11_fq_enc', 'id_06', 'uid2_D12_mean', 'sum_Cxx_binary_higher_than_q70',\n 'V310', 'V307', 'C6_fq_enc', 'D8_fq_enc', 'dist2_fq_enc',\n 'D2_intercept_bin0', 'addr1_div_Mean_D10_DOY_productCD',\n 'addr1_div_Mean_D10_DOY', 'addr1_div_Mean_D11_DOY', 'uid2_D8_std',\n 'id_02__id_20', 'V313', 'D4_intercept_bin0', 'D11_DT_D_std_score',\n 'Transaction_day_of_week', 'card6_div_Mean_D3_DOY', 'uid2_D1_std',\n 'uid5_D11_mean', 'uid_fq_enc', 'D14_DT_D_std_score', 'D12_DT_D_std_score',\n 'id_02_to_mean_card4', 'uid4_D13_std', 'D1_intercept_bin1',\n 'id_02_to_std_card1', 'uid5_D11_std', 'P_emaildomain_prefix', 'DT_day',\n 'D8_DT_M_std_score', 'uid2_D1_mean', 'TransactionAmt_to_mean_card4',\n 'card5_div_Mean_D11_DOY', 'D15_DT_M_std_score', 'V87', 'uid_D12_std',\n 'id_31_device_fq_enc', 'uid2_D11_mean', 'card3_DT_W_week_day_dist_best',\n 'uid5_D14_std', 'uid2_D15_mean', 'sum_Cxx_binary_higher_than_q50',\n 'id_13', 'card3_div_Mean_D11_DOY', 'C11',\n 'bank_type_DT_W_week_day_dist_best', 'card4_div_Mean_D11_DOY',\n 'addr1_div_Mean_D1_DOY', 'uid2_D4_mean', 'card2_div_Mean_D11_DOY',\n 'C13_fq_enc', 'uid4_D13_mean', 'card5_DT_W_week_day_dist_best', 'id_02',\n 'uid5_D14_mean', 'uid2_D10_mean', 'id_01_count_dist',\n 'D13_DT_W_std_score', 'C2', 'C14', 'addr2_div_Mean_D10_DOY',\n 'uid2_D11_std', 'addr1_div_Mean_D1_DOY_productCD', 'id_02_to_mean_card1',\n 'dist1_fq_enc', 'card1_div_Mean_D11_DOY', 'D15_to_std_card1',\n 'TransactionAmt_DT_M_std_score', 'uid2_D6_std',\n 'TransactionAmt_to_std_card4', 'uid2_D15_std', 'uid3_D8_std',\n 'card6_div_Mean_D11_DOY', 'TranAmt_div_Mean_D14_DOY',\n 'card3_div_Mean_D14_DOY', 'D2', 'D1', 'uid_D15_mean', 'uid4_D6_std',\n 'uid_D15_std', 'D10_intercept_bin0', 'DeviceInfo_fq_enc', 'uid2_D13_std',\n 'uid_D12_mean', 'uid4_D6_mean', 'uid_D1_std', 'D1_intercept_bin2',\n 'uid_D10_mean', 'card2__id_20', 'uid4_D7_std', 'uid3_D13_std',\n 'C14_fq_enc', 'uid_D8_std', 'uid3_D13_mean', 'uid2_D4_std',\n 'addr1_div_Mean_D4_DOY', 'uid_D4_mean', 'D4_DT_W_std_score',\n 'addr2_div_Mean_D1_DOY_productCD', 'uid_D11_mean', 'D15_intercept_bin0',\n 'uid2_D10_std', 'uid_D13_std', 'uid2_fq_enc', 'uid2_D13_mean',\n 'uid2_D2_mean', 'D2_intercept', 'uid_D11_std', 'card2', 'uid4_D14_std',\n 'C_sum_after_clip75', 'R_emaildomain', 'dist1', 'id_05',\n 'uid_TransactionAmt_mean', 'uid_D1_mean', 'uid3_D1_std', 'uid5_D8_std',\n 'uid3_D6_std', 'Transaction_hour_of_day', 'uid4_D14_mean', 'uid5_D10_std',\n 'uid3_D10_std', 'uid5_D1_std', 'uid5_D15_std', 'uid2_D7_mean',\n 'uid3_D11_std', 'uid4_D8_std', 'D13_DT_D_std_score', 'uid3_D11_mean',\n 'uid2_D14_std', 'uid2_D7_std', 'uid2_D14_mean', 'uid_D13_mean',\n 'uid_D10_std', 'uid2_D3_std', 'uid_D6_std', 'uid3_D15_std',\n 'addr1_fq_enc', 'id_31', 'uid_TransactionAmt_std',\n 'card1_div_Mean_D4_DOY_productCD', 'uid2_TransactionAmt_mean',\n 'C_sum_after_clip90', 'uid2_TransactionAmt_std', 'uid4_D7_mean',\n 'uid2_D6_mean', 'uid3_D15_mean', 'D15_to_mean_card1', 'uid5_D15_mean',\n 'M4', 'uid3_D7_std', 'card2_div_Mean_D4_DOY',\n 'card5_div_Mean_D4_DOY_productCD', 'card5_div_Mean_D4_DOY',\n 'D4_intercept', 'uid_D4_std', 'card6_div_Mean_D4_DOY_productCD',\n 'card5__P_emaildomain', 'card1_fq_enc', 'uid5_D10_mean',\n 'card1_div_Mean_D4_DOY', 'C1', 'M6', 'uid2_D2_std',\n 'P_emaildomain_fq_enc', 'card1_TransactionAmt_mean', 'uid3_D10_mean',\n 'TransactionAmt_DT_W_min_max', 'uid5_D4_std',\n 'card1_div_Mean_D10_DOY_productCD', 'uid3_D1_mean',\n 'card1_div_Mean_D10_DOY', 'uid_D14_mean', 'mxC9',\n 'TranAmt_div_Mean_D4_DOY_productCD', 'D15_DT_W_std_score',\n 'DeviceInfo__P_emaildomain', 'uid3_D14_mean', 'bank_type_DT_M', 'mxC11',\n 'uid5_D1_mean', 'uid_D2_mean', 'D10_DT_W_std_score',\n 'card3_DT_M_month_day_dist_best', 'uid3_D2_std',\n 'TranAmt_div_Mean_D4_DOY', 'card1_TransactionAmt_std',\n 'card3_div_Mean_D4_DOY_productCD', 'D1_intercept_bin0', 'uid3_D4_std',\n 'card2_div_Mean_D10_DOY', 'uid_D2_std', 'uid3_D14_std', 'uid3_D4_mean',\n 'uid_D7_mean', 'uid5_D2_std', 'card4_div_Mean_D4_DOY_productCD',\n 'card6_div_Mean_D4_DOY', 'TranAmt_div_Mean_D10_DOY', 'uid2_D9_std',\n 'TransactionAmt_DT_W_std_score', 'C1_fq_enc', 'card1_div_Mean_D1_DOY',\n 'uid5_D4_mean', 'uid3_D6_mean', 'mxC14', 'uid5_D2_mean',\n 'card4_div_Mean_D4_DOY', 'card3_div_Mean_D4_DOY', 'uid_D14_std', 'M5',\n 'C13', 'mxC6', 'card5_div_Mean_D10_DOY_productCD',\n 'card3_DT_M_month_day_dist', 'card2_div_Mean_D10_DOY_productCD',\n 'uid_D7_std', 'card2_div_Mean_D4_DOY_productCD',\n 'bank_type_DT_M_month_day_dist', 'uid3_D7_mean', 'uid_D3_std',\n 'uid5_fq_enc', 'uid3_fq_enc', 'uid_D3_mean', 'D4_DT_D_std_score',\n 'uid3_D2_mean', 'uid4_D1_std', 'uid2_D5_std', 'uid4_D10_std',\n 'bank_type_DT_D_hour_dist_best', 'uid2_D8_mean',\n 'card6_div_Mean_D10_DOY_productCD', 'card1_div_Mean_D1_DOY_productCD',\n 'uid5_D9_std', 'card4_div_Mean_D10_DOY_productCD', 'uid2_D3_mean',\n 'uid_D6_mean', 'card2_div_Mean_D1_DOY', 'card5_div_Mean_D10_DOY', 'mxC2',\n 'card2_TransactionAmt_std', 'bank_type_DT_W_week_day_dist',\n 'card2_TransactionAmt_mean', 'uid4_D10_mean', 'id_31_count_dist',\n 'TranAmt_div_Mean_D1_DOY', 'uid3_D3_std', 'uid4_D15_std',\n 'card5_div_Mean_D1_DOY_productCD', 'card4_div_Mean_D10_DOY',\n 'card5_DT_D_hour_dist_best', 'uid4_D4_std', 'card5_DT_M_month_day_dist',\n 'bank_type_DT_W', 'addr1__card1', 'bank_type_DT_M_month_day_dist_best',\n 'card2_div_Mean_D1_DOY_productCD', 'card6_div_Mean_D10_DOY',\n 'uid2_D5_mean', 'uid_DT_M', 'card2__dist1', 'uid2_D9_mean',\n 'card5_DT_M_month_day_dist_best', 'TranAmt_div_Mean_D10_DOY_productCD',\n 'uid4_D11_std', 'uid_D5_mean', 'uid5_D3_std',\n 'TransactionAmt_DT_D_std_score', 'D8_DT_W_std_score',\n 'card5_DT_W_week_day_dist', 'uid5_D5_std', 'card3_DT_W_week_day_dist',\n 'uid4_D9_std', 'D10_intercept', 'uid3_D3_mean', 'uid4_D5_std',\n 'uid_D5_std', 'card5_div_Mean_D1_DOY', 'uid5_D3_mean', 'bank_type_DT_D',\n 'uid4_D1_mean', 'uid_D8_mean', 'uid3_D5_mean', 'D15_intercept',\n 'uid5_TransactionAmt_std', 'uid3_D5_std', 'uid4_D4_mean', 'uid4_D15_mean',\n 'uid5_D8_mean', 'uid5_D9_mean', 'uid_D9_std', 'uid_D9_mean',\n 'uid5_D5_mean', 'mtransamt', 'bank_type_DT_D_hour_dist', 'uid4_D11_mean',\n 'D15_DT_D_std_score', 'TransactionAmt_DT_D_min_max', 'uid4_D2_mean',\n 'ntrans', 'addr2_div_Mean_D1_DOY', 'uid5_TransactionAmt_mean',\n 'uid3_D9_std', 'TransactionAmt_Dec', 'uid3_TransactionAmt_std',\n 'card5_DT_D_hour_dist', 'card1', 'card4_div_Mean_D1_DOY_productCD',\n 'P_emaildomain__C2', 'card3_div_Mean_D10_DOY', 'uid4_D3_std',\n 'card3_DT_D_hour_dist_best', 'uid4_D8_mean', 'uid4_D2_std',\n 'card6_div_Mean_D1_DOY_productCD', 'uid_DT_W', 'Sum_TransAmt_Day',\n 'uid4_D5_mean', 'card4_div_Mean_D1_DOY',\n 'card3_div_Mean_D10_DOY_productCD', 'uid3_D8_mean',\n 'TransactionAmt_userid_median', 'uid4_fq_enc', 'uid3_TransactionAmt_mean',\n 'uid3_D9_mean', 'card6_div_Mean_D1_DOY', 'Trans_Count_Day', 'mxC1',\n 'D10_DT_D_std_score', 'card3_div_Mean_D1_DOY',\n 'TransactionAmt_to_mean_card1', 'card2_fq_enc', 'product_type',\n 'card3_div_Mean_D1_DOY_productCD', 'TransactionAmt_to_std_card1',\n 'uid_DT_D', 'uid4_D9_mean', 'D1_intercept', 'card3_DT_D_hour_dist',\n 'TranAmt_div_Mean_D1_DOY_productCD', 'product_type_DT_M', 'uid4_D3_mean',\n 'uid4_TransactionAmt_mean', 'uid4_TransactionAmt_std',\n 'D8_DT_D_std_score', 'Mean_TransAmt_Day', 'minDT', 'product_type_DT_W',\n 'mintransamt', 'maxtransamt', 'TransactionAmt_userid_std',\n 'P_emaildomain', 'card1__card5', 'product_type_DT_D', 'mxC13', 'maxDT',\n 'id_19', 'DeviceInfo', 'id_20', 'addr1', 'userid_min_C1', 'userid_max_C1',\n 'userid_max_minus_min_C1', 'userid_unique_C1', 'userid_mean_C1',\n 'userid_min_C2', 'userid_max_C2', 'userid_max_minus_min_C2',\n 'userid_unique_C2', 'userid_mean_C2', 'userid_min_C3', 'userid_max_C3',\n 'userid_max_minus_min_C3', 'userid_unique_C3', 'userid_mean_C3',\n 'userid_min_C4', 'userid_max_C4', 'userid_max_minus_min_C4',\n 'userid_unique_C4', 'userid_mean_C4', 'userid_min_C5', 'userid_max_C5',\n 'userid_max_minus_min_C5', 'userid_unique_C5', 'userid_mean_C5',\n 'userid_min_C6', 'userid_max_C6', 'userid_max_minus_min_C6',\n 'userid_unique_C6', 'userid_mean_C6', 'userid_min_C7', 'userid_max_C7',\n 'userid_max_minus_min_C7', 'userid_unique_C7', 'userid_mean_C7',\n 'userid_min_C8', 'userid_max_C8', 'userid_max_minus_min_C8',\n 'userid_unique_C8', 'userid_mean_C8', 'userid_min_C9', 'userid_max_C9',\n 'userid_max_minus_min_C9', 'userid_unique_C9', 'userid_mean_C9',\n 'userid_min_C10', 'userid_max_C10', 'userid_max_minus_min_C10',\n 'userid_unique_C10', 'userid_mean_C10', 'userid_min_C11',\n 'userid_max_C11', 'userid_max_minus_min_C11', 'userid_unique_C11',\n 'userid_mean_C11', 'userid_min_C12', 'userid_max_C12',\n 'userid_max_minus_min_C12', 'userid_unique_C12', 'userid_mean_C12',\n 'userid_min_C13', 'userid_max_C13', 'userid_max_minus_min_C13',\n 'userid_unique_C13', 'userid_mean_C13', 'userid_min_C14',\n 'userid_max_C14', 'userid_max_minus_min_C14', 'userid_unique_C14',\n 'userid_mean_C14', 'hour', 'hour_sin', 'week', 'week_sin', 'week_cos',\n 'month', 'life_of_customer', 'addr1_broad_area',\n 'uid6_TransactionAmt_mean', 'uid6_TransactionAmt_std',\n 'hour_TransactionAmt_mean', 'hour_TransactionAmt_std',\n 'week_TransactionAmt_mean', 'week_TransactionAmt_std', 'D1_diff',\n 'D10_diff', 'D15_diff', 'new_identity_M5_mean', 'new_identity_M6_mean',\n 'new_identity_V315_mean', 'new_identity_D1_diff_mean',\n 'new_identity_D3_mean', 'new_identity_D10_diff_mean',\n 'new_identity_D15_diff_mean', 'addr1_addr2_new_identity_M5_mean_mean',\n 'addr1_addr2_new_identity_M5_mean_std',\n 'addr1_addr2_new_identity_M6_mean_mean',\n 'addr1_addr2_new_identity_M6_mean_std',\n 'addr1_addr2_new_identity_V315_mean_mean',\n 'addr1_addr2_new_identity_V315_mean_std',\n 'addr1_addr2_new_identity_D1_diff_mean_mean',\n 'addr1_addr2_new_identity_D1_diff_mean_std',\n 'addr1_addr2_new_identity_D10_diff_mean_mean',\n 'addr1_addr2_new_identity_D10_diff_mean_std',\n 'addr1_addr2_new_identity_D15_diff_mean_mean',\n 'addr1_addr2_new_identity_D15_diff_mean_std',\n 'new_identity_ProductCD_TransactionAmt_mean', 'uid6_C1_mean',\n 'uid6_C1_std', 'uid6_V54_mean', 'uid6_V54_std', 'uid6_V281_mean',\n 'uid6_V281_std', 'uid6_C11_mean', 'uid6_C11_std', 'uid6_D4_mean',\n 'uid6_D4_std', 'uid6_V67_mean', 'uid6_V67_std', 'uid6_V320_mean',\n 'uid6_V320_std', 'uid6_M5_mean', 'uid6_M5_std', 'uid6_M6_mean',\n 'uid6_M6_std', 'uid3_V67_mean', 'uid3_V67_std', 'uid3_V83_mean',\n 'uid3_V83_std', 'uid6_fq_enc', 'card4_fq_enc', 'card6_fq_enc',\n 'ProductCD_fq_enc', 'M4_fq_enc', 'addr_fq_enc', 'R_emaildomain_V118_mean',\n 'R_emaildomain_V118_std', 'R_emaildomain_V119_mean',\n 'R_emaildomain_V119_std', 'card1_V20_mean', 'card1_V20_std',\n 'card1_V151_mean', 'card1_V151_std', 'card1_V67_mean', 'card1_V67_std',\n 'hour_V116_mean', 'hour_V116_std', 'V1max', 'V2max', 'V3max', 'V4max',\n 'V5max', 'V6max', 'V7max', 'V8max', 'V9max', 'V10max', 'V11max', 'V12max',\n 'V13max', 'V14max', 'V15max', 'V16max', 'V17max', 'V18max', 'V19max',\n 'V20max', 'V21max', 'V22max', 'V23max', 'V24max', 'V25max', 'V26max',\n 'V27max', 'V28max', 'V29max', 'V30max', 'V31max', 'V32max', 'V33max',\n 'V34max', 'V35max', 'V36max', 'V37max', 'V38max', 'V39max', 'V40max',\n 'V41max', 'V42max', 'V43max', 'V44max', 'V45max', 'V46max', 'V47max',\n 'V48max', 'V49max', 'V50max', 'V51max', 'V52max', 'V53max', 'V54max',\n 'V55max', 'V56max', 'V57max', 'V58max', 'V59max', 'V60max', 'V61max',\n 'V62max', 'V63max', 'V64max', 'V65max', 'V66max', 'V67max', 'V68max',\n 'V69max', 'V70max', 'V71max', 'V72max', 'V73max', 'V74max', 'V75max',\n 'V76max', 'V77max', 'V78max', 'V79max', 'V80max', 'V81max', 'V82max',\n 'V83max', 'V84max', 'V85max', 'V86max', 'V87max', 'V88max', 'V89max',\n 'V90max', 'V91max', 'V92max', 'V93max', 'V94max', 'V95max', 'V96max',\n 'V97max', 'V98max', 'V99max', 'V100max', 'V101max', 'V102max', 'V103max',\n 'V104max', 'V105max', 'V106max', 'V107max', 'V108max', 'V109max',\n 'V110max', 'V111max', 'V112max', 'V113max', 'V114max', 'V115max',\n 'V116max', 'V117max', 'V118max', 'V119max', 'V120max', 'V121max',\n 'V122max', 'V123max', 'V124max', 'V125max', 'V126max', 'V127max',\n 'V128max', 'V129max', 'V130max', 'V131max', 'V132max', 'V133max',\n 'V134max', 'V135max', 'V136max', 'V137max', 'V138max', 'V139max',\n 'V140max', 'V141max', 'V142max', 'V143max', 'V144max', 'V145max',\n 'V146max', 'V147max', 'V148max', 'V149max', 'V150max', 'V151max',\n 'V152max', 'V153max', 'V154max', 'V155max', 'V156max', 'V157max',\n 'V158max', 'V159max', 'V160max', 'V161max', 'V162max', 'V163max',\n 'V164max', 'V165max', 'V166max', 'V167max', 'V168max', 'V169max',\n 'V170max', 'V171max', 'V172max', 'V173max', 'V174max', 'V175max',\n 'V176max', 'V177max', 'V178max', 'V179max', 'V180max', 'V181max',\n 'V182max', 'V183max', 'V184max', 'V185max', 'V186max', 'V187max',\n 'V188max', 'V189max', 'V190max', 'V191max', 'V192max', 'V193max',\n 'V194max', 'V195max', 'V196max', 'V197max', 'V198max', 'V199max',\n 'V200max', 'V201max', 'V202max', 'V203max', 'V204max', 'V205max',\n 'V206max', 'V207max', 'V208max', 'V209max', 'V210max', 'V211max',\n 'V212max', 'V213max', 'V214max', 'V215max', 'V216max', 'V217max',\n 'V218max', 'V219max', 'V220max', 'V221max', 'V222max', 'V223max',\n 'V224max', 'V225max', 'V226max', 'V227max', 'V228max', 'V229max',\n 'V230max', 'V231max', 'V232max', 'V233max', 'V234max', 'V235max',\n 'V236max', 'V237max', 'V238max', 'V239max', 'V240max', 'V241max',\n 'V242max', 'V243max', 'V244max', 'V245max', 'V246max', 'V247max',\n 'V248max', 'V249max', 'V250max', 'V251max', 'V252max', 'V253max',\n 'V254max', 'V255max', 'V256max', 'V257max', 'V258max', 'V259max',\n 'V260max', 'V261max', 'V262max', 'V263max', 'V264max', 'V265max',\n 'V266max', 'V267max', 'V268max', 'V269max', 'V270max', 'V271max',\n 'V272max', 'V273max', 'V274max', 'V275max', 'V276max', 'V277max',\n 'V278max', 'V279max', 'V280max', 'V281max', 'V282max', 'V283max',\n 'V284max', 'V285max', 'V286max', 'V287max', 'V288max', 'V289max',\n 'V290max', 'V291max', 'V292max', 'V293max', 'V294max', 'V295max',\n 'V296max', 'V297max', 'V298max', 'V299max', 'V300max', 'V301max',\n 'V302max', 'V303max', 'V304max', 'V305max', 'V306max', 'V307max',\n 'V308max', 'V309max', 'V310max', 'V311max', 'V312max', 'V313max',\n 'V314max', 'V315max', 'V316max', 'V317max', 'V318max', 'V319max',\n 'V320max', 'V321max', 'V322max', 'V323max', 'V324max', 'V325max',\n 'V326max', 'V327max', 'V328max', 'V329max', 'V330max', 'V331max',\n 'V332max', 'V333max', 'V334max', 'V335max', 'V336max', 'V337max',\n 'V338max', 'V339max', 'ntrans', 'min_amt', 'mean_amt', 'max_amt',\n 'num_trans_ints', 'minC1', 'minC2', 'minC3', 'minC4', 'minC5', 'minC6',\n 'minC7', 'minC8', 'minC9', 'minC10', 'minC11', 'minC12', 'minC13',\n 'minC14', 'maxC1', 'maxC2', 'maxC3', 'maxC4', 'maxC5', 'maxC6', 'maxC7',\n 'maxC8', 'maxC9', 'maxC10', 'maxC11', 'maxC12', 'maxC13', 'maxC14',\n 'countC1_inc', 'countC2_inc', 'countC3_inc', 'countC4_inc', 'countC5_inc',\n 'countC6_inc', 'countC7_inc', 'countC8_inc', 'countC9_inc',\n 'countC10_inc', 'countC11_inc', 'countC12_inc', 'countC13_inc',\n 'countC14_inc', 'ndistM1', 'ndistM2', 'ndistM3', 'ndistM4', 'ndistM5',\n 'ndistM6', 'ndistM7', 'ndistM8', 'ndistM9']\n\ndf_av = pd.read_csv('../notebooks/AV/av002-output.csv')\nBAD_AV_FEATURES = df_av.loc[df_av['cv'].replace('Running',1) >= AV_THRESHOLD]['feature'].tolist()\n\nFEATURES = [f for f in FEATURES if f not in BAD_AV_FEATURES]\n\nCAT_FEATURES = ['ProductCD', 'card4', 'card6', 'id_12', 'id_13', 'id_14', 'id_15', 'id_16',\n 'id_17', 'id_18', 'id_19', 'id_20', 'id_21', 'id_22', 'id_23', 'id_24',\n 'id_25', 'id_26', 'id_27', 'id_28', 'id_29', 'id_32', 'id_34', 'id_35',\n 'id_36', 'id_37', 'id_38', 'DeviceType', 'DeviceInfo', 'M4','P_emaildomain',\n 'R_emaildomain', 'addr1', 'addr2', 'M1', 'M2', 'M3', 'M5', 'M6', 'M7', 'M8',\n 'M9', 'ProductCD_W_95cents','ProductCD_W_00cents','ProductCD_W_50cents',\n 'ProductCD_W_50_95_0_cents','ProductCD_W_NOT_50_95_0_cents']\n\nCAT_FEATURES = [c for c in CAT_FEATURES if c in FEATURES]\n\nX = train_df[FEATURES].copy()\ny = train_df[TARGET].copy()\nX_test = test_df[FEATURES].copy()\n\nX = X.fillna(-9999)\nX_test = X_test.fillna(-9999)\n\n\nlogger.info('Running with features...')\nlogger.info(FEATURES)\nlogger.info(f'Target is {TARGET}')\n\n\nupdate_tracking(run_id, \"n_features\", len(FEATURES), integer=True)\n\n\n############################\n#### TRAIN MODELS FUNCTIONS\n############################\n\ndef train_catboost(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance):\n train_dataset = Pool(data=X_train, label=y_train, cat_features=CAT_FEATURES)\n valid_dataset = Pool(data=X_valid, label=y_valid, cat_features=CAT_FEATURES)\n test_dataset = Pool(data=X_test, cat_features=CAT_FEATURES)\n\n model = CatBoostClassifier(\n iterations=N_ESTIMATORS,\n learning_rate=LEARNING_RATE,\n depth=DEPTH,\n eval_metric=EVAL_METRIC,\n verbose=VERBOSE,\n random_state=RANDOM_STATE,\n thread_count=N_THREADS,\n task_type=\"GPU\")\n\n model.fit(\n train_dataset,\n eval_set=valid_dataset,\n early_stopping_rounds=EARLY_STOPPING_ROUNDS,\n )\n y_pred_valid = model.predict_proba(valid_dataset)[:,1]\n y_pred = model.predict_proba(test_dataset)[:,1]\n\n fold_importance = pd.DataFrame()\n fold_importance[\"feature\"] = model.feature_names_\n fold_importance[\"importance\"] = model.get_feature_importance()\n fold_importance[\"fold\"] = fold_n + 1\n feature_importance = pd.concat([feature_importance, fold_importance],\n axis=0)\n best_iteration = model.best_iteration_\n return y_pred, y_pred_valid, feature_importance, best_iteration\n\n\nlgb_params = {\n 'objective':'binary',\n 'boosting_type':'gbdt',\n 'metric': EVAL_METRIC,\n 'n_jobs':N_THREADS,\n 'learning_rate':LEARNING_RATE,\n 'num_leaves': 2**8,\n 'max_depth':DEPTH,\n 'tree_learner':'serial',\n 'colsample_bytree': 0.85,\n 'subsample_freq':1,\n 'subsample':0.85,\n 'n_estimators':N_ESTIMATORS,\n 'max_bin':255,\n 'verbose':-1,\n 'seed': RANDOM_STATE,\n #'early_stopping_rounds':EARLY_STOPPING_ROUNDS,\n 'reg_alpha':0.3,\n 'reg_lamdba':0.243,\n #'categorical_feature': CAT_FEATURES\n }\n# lgb_params = {\n# 'min_data_in_leaf': 106,\n# 'num_leaves': 500,\n# 'learning_rate': LEARNING_RATE, #0.008,\n# 'min_child_weight': 0.03454472573214212,\n# 'bagging_fraction': 0.4181193142567742,\n# 'feature_fraction': 0.3797454081646243,\n# 'reg_lambda': 0.6485237330340494,\n# 'reg_alpha': 0.3899927210061127,\n# 'max_depth': DEPTH, #-1,\n# 'objective': 'binary',\n# 'seed': RANDOM_STATE, #13,\n# 'feature_fraction_seed': RANDOM_STATE, #13,\n# 'bagging_seed': RANDOM_STATE, #13,\n# 'drop_seed': RANDOM_STATE, #13,\n# 'data_random_seed': RANDOM_STATE, #13,\n# 'boosting_type': 'gbdt',\n# 'verbose': 1,\n# 'metric':'auc',\n# 'n_estimators':N_ESTIMATORS,\n# }\n\n\ndef train_lightgbm(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance):\n X_train = X_train.copy()\n X_valid = X_valid.copy()\n X_test = X_test.copy()\n if len(CAT_FEATURES) > 0:\n X_train[CAT_FEATURES] = X_train[CAT_FEATURES].astype('category')\n X_valid[CAT_FEATURES] = X_valid[CAT_FEATURES].astype('category')\n X_test[CAT_FEATURES] = X_test[CAT_FEATURES].astype('category')\n\n model = lgb.LGBMClassifier(**lgb_params)\n\n model.fit(X_train, y_train,\n eval_set = [(X_train, y_train),\n (X_valid, y_valid)],\n verbose = VERBOSE,\n early_stopping_rounds=EARLY_STOPPING_ROUNDS)\n\n y_pred_valid = model.predict_proba(X_valid)[:,1]\n y_pred = model.predict_proba(X_test)[:,1]\n\n fold_importance = pd.DataFrame()\n fold_importance[\"feature\"] = X_train.columns\n fold_importance[\"importance\"] = model.feature_importances_\n fold_importance[\"fold\"] = fold_n + 1\n feature_importance = pd.concat([feature_importance, fold_importance],\n axis=0)\n best_iteration = model.best_iteration_\n return y_pred, y_pred_valid, feature_importance, best_iteration\n\n################################\n# Dataframes for storing results\n#################################\n\nfeature_importance = pd.DataFrame()\noof = np.zeros(len(X))\npred = np.zeros(len(X_test))\noof_df = train_df[['isFraud']].copy()\noof_df['oof'] = np.nan\noof_df['fold'] = np.nan\nscores = []\nbest_iterations = []\n\ndel train_df, test_df\ngc.collect()\n\nfor fold_n, (train_idx, valid_idx) in enumerate(folds.split(X, y)):\n X_train = X.iloc[train_idx]\n y_train = y.iloc[train_idx]\n X_valid = X.iloc[valid_idx]\n y_valid = y.iloc[valid_idx]\n\n if MODEL_TYPE == \"catboost\":\n y_pred, y_pred_valid, feature_importance, best_iteration = train_catboost(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance)\n if MODEL_TYPE == 'lightgbm':\n y_pred, y_pred_valid, feature_importance, best_iteration = train_lightgbm(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance)\n best_iterations.append(best_iteration)\n\n fold_score = roc_auc_score(y_valid, y_pred_valid)\n scores.append(fold_score)\n\n update_tracking(run_id, \"AUC_f{}\".format(fold_n + 1),\n fold_score,\n integer=False,)\n logger.info('Fold {} of {} CV mean AUC score: {:.4f}. Best iteration {}'.format(fold_n + 1,\n N_FOLDS,\n fold_score,\n best_iteration))\n oof_df.iloc[valid_idx, oof_df.columns.get_loc('oof')] = y_pred_valid.reshape(-1)\n oof_df.iloc[valid_idx, oof_df.columns.get_loc('fold')] = fold_n + 1\n pred += y_pred\n\nupdate_tracking(run_id, 'avg_best_iteration',\n np.mean(best_iterations),\n integer=True)\n\n###############\n# Store Results\n###############\npred /= N_FOLDS\nscore = np.mean(scores)\nsub = pd.read_csv('../input/sample_submission.csv')\nsub['isFraud'] = pred\nsub.to_csv(f'../sub/sub_{MODEL_NUMBER}_{run_id}_{score:.4f}.csv', index=False)\noof_df.to_csv(f'../oof/oof_{MODEL_NUMBER}_{run_id}_{score:.4f}.csv')\nlogger.info('CV mean AUC score: {:.4f}, std: {:.4f}.'.format(np.mean(scores),\n np.std(scores)))\ntotal_score = roc_auc_score(oof_df['isFraud'], oof_df['oof'])\nfeature_importance.to_csv(f'../fi/fi_{MODEL_NUMBER}_{run_id}_{score:.4f}.csv')\n\nupdate_tracking(run_id, \"AUC\",\n total_score,\n integer=False,)\nlogger.info('OOF AUC Score: {:.4f}'.format(total_score))\nend = timer()\nupdate_tracking(run_id, \"training_time\", (end - start), integer=True)\nlogger.info('Done!')\n" ]
[ [ "pandas.DataFrame", "numpy.mean", "numpy.std", "pandas.concat", "sklearn.model_selection.KFold", "pandas.read_csv", "pandas.read_parquet", "sklearn.metrics.roc_auc_score" ] ]