repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
anubrata/PrototExNL
[ "f19e0a92f3a06771ef110fde28f6eeb4a0916ac7" ]
[ "preprocess.py" ]
[ "import pathlib\nfrom pathlib import Path\nimport numpy as np\nimport torch\n# from pytorch_pretrained_bert import BertTokenizer\n\nlabels_set={'Appeal_to_Authority',\n 'Appeal_to_fear-prejudice',\n 'Bandwagon',\n 'Black-and-White_Fallacy',\n 'Causal_Oversimplification',\n 'Doubt',\n 'Exaggeration,Minimisation',\n 'Flag-Waving',\n 'Loaded_Language',\n 'Name_Calling,Labeling',\n 'O',\n 'Obfuscation,Intentional_Vagueness,Confusion',\n 'Red_Herring',\n 'Reductio_ad_hitlerum',\n 'Repetition',\n 'Slogans',\n 'Straw_Men',\n 'Thought-terminating_Cliches',\n 'Whataboutism'}\n\n## Additional classes and functions for data preparation for ProtoTexNL\n\ndef create_labels(dataset):\n temp=[ set(i)-set(\"O\") for d in dataset[1] for i in d]\n return [ next(iter(i)) if len(i)>0 else \"O\" for i in temp]\n\nclass BinaryClassDataset(torch.utils.data.Dataset):\n def __init__(self, x,y,y_txt,tokenizer,it_is_train=1,pos_or_neg=None,fix_seq_len=256,balance=False,\n specific_label=None,for_protos=False):\n# temp=tokenizer(x)\n# self.input_ids,self.attention_mask=temp[\"input_ids\"],temp[\"attention_mask\"]\n self.x=[]\n self.attn_mask=[]\n self.labels_mask=[]\n self.y_txt=[]\n self.y=[]\n self.labels_ids={}\n for i in labels_set:\n self.labels_ids[i]=len(self.labels_ids)\n self.y_fine_int=[]\n it_is_train_proxy=it_is_train\n for split_sent,y_tags,y_sent in zip(x,y_txt,y):\n if specific_label is not None and specific_label!=y_sent: continue\n if pos_or_neg==\"pos\" and y_sent==\"O\": continue\n elif pos_or_neg==\"neg\" and y_sent!=\"O\": continue \n if y_sent==\"O\":\n it_is_train=0\n else:\n it_is_train=it_is_train_proxy \n tmp=tokenizer(split_sent,is_split_into_words=False)[\"input_ids\"]\n tmp_x=[]\n tmp_attn=[]\n tmp_y=[]\n for i,chunk in enumerate(tmp):\n if for_protos and y_tags[i]==\"O\":\n continue\n tmp_y.extend([y_tags[i]]*len(chunk))\n if y_tags[i]!=\"O\":\n mask=1\n else:\n if it_is_train:\n mask=0\n else:\n mask=1\n tmp_x.extend(chunk[1:-1])\n tmp_attn.extend([mask]*(len(chunk)-2))\n tmp_x.append(tokenizer.eos_token_id)\n tmp_x.insert(0,tokenizer.bos_token_id)\n tmp_attn.append(tmp_attn[-1])\n tmp_attn.insert(0,tmp_attn[0])\n self.x.append(tmp_x)\n self.attn_mask.append(tmp_attn)\n self.y_txt.append(tmp_y)\n self.y.append(1 if y_sent!=\"O\" else 0)\n self.y_fine_int.append(self.labels_ids[y_sent])\n for tokid_sent in self.x:\n tokid_sent.extend([tokenizer.pad_token_id]*(fix_seq_len-len(tokid_sent)))\n for mask_vec in self.attn_mask:\n mask_vec.extend([0]*(fix_seq_len-len(mask_vec)))\n# self.y=[1 if i!=\"O\" else 0 for i in y]\n if balance:\n num_pos=np.sum(self.y)\n assert num_pos<len(self.y_fine_int)//2\n# print(num_pos,len(self.y))\n \n pos_indices=np.random.choice([i for i in range(len(self.y)) if self.y[i]==1],\n size=len(self.y)-2*num_pos,replace=True)\n self.x.extend([self.x[i] for i in pos_indices])\n self.y.extend([1 for i in pos_indices])\n self.y_fine_int.extend([self.y_fine_int[i] for i in pos_indices])\n self.attn_mask.extend([self.attn_mask[i] for i in pos_indices])\n# print(np.sum(self.y),len(self.y))\n self.fix_seq_len=fix_seq_len\n def __len__(self):\n return len(self.x)\n def __getitem__(self, idx):\n return self.x[idx],self.attn_mask[idx],self.y[idx]\n def collate_fn(self,batch): \n return (torch.LongTensor([i[0] for i in batch]),\n torch.Tensor([i[1] for i in batch]),\n torch.LongTensor([i[2] for i in batch]))\n\n\n## Preprocess function from the Propaganda Detection paper \n## https://propaganda.math.unipd.it/fine-grained-propaganda-emnlp.html\ndef read_data(directory):\n ids = []\n texts = []\n labels = []\n for f in directory.glob('*.txt'):\n id = f.name.replace('article', '').replace('.txt','')\n ids.append(id)\n texts.append(f.read_text())\n labels.append(parse_label(f.as_posix().replace('.txt', '.labels.tsv')))\n # labels can be empty \n return ids, texts, labels\n\ndef parse_label(label_path):\n labels = []\n f= Path(label_path)\n \n if not f.exists():\n return labels\n\n for line in open(label_path):\n parts = line.strip().split('\\t')\n labels.append([int(parts[2]), int(parts[3]), parts[1], 0, 0])\n labels = sorted(labels) \n\n if labels:\n length = max([label[1] for label in labels]) \n visit = np.zeros(length)\n res = []\n for label in labels:\n if sum(visit[label[0]:label[1]]):\n label[3] = 1\n else:\n visit[label[0]:label[1]] = 1\n res.append(label)\n return res \n else:\n return labels\n\ndef clean_text(articles, ids):\n texts = []\n for article, id in zip(articles, ids):\n sentences = article.split('\\n')\n start = 0\n end = -1\n res = []\n for sentence in sentences:\n start = end + 1\n end = start + len(sentence) # length of sequence \n if sentence != \"\": # if not empty line\n res.append([id, sentence, start, end])\n texts.append(res)\n return texts\n\ndef make_dataset(directory):\n ids, texts, labels = read_data(directory)\n texts = clean_text(texts, ids)\n res = []\n for text, label in zip(texts, labels):\n # making positive examples\n tmp = [] \n pos_ind = [0] * len(text)\n for l in label:\n for i, sen in enumerate(text):\n if l[0] >= sen[2] and l[0] < sen[3] and l[1] > sen[3]:\n l[4] = 1\n tmp.append(sen + [l[0], sen[3], l[2], l[3], l[4]])\n pos_ind[i] = 1\n l[0] = sen[3] + 1\n elif l[0] != l[1] and l[0] >= sen[2] and l[0] < sen[3] and l[1] <= sen[3]: \n tmp.append(sen + l)\n pos_ind[i] = 1\n # making negative examples\n dummy = [0, 0, 'O', 0, 0]\n for k, sen in enumerate(text):\n if pos_ind[k] != 1:\n tmp.append(sen+dummy)\n res.append(tmp) \n return res\n \ndef make_bert_testset(dataset):\n\n words, tags, ids= [], [], []\n for article in dataset:\n tmp_doc, tmp_label, tmp_id = [], [], []\n tmp_sen = article[0][1]\n tmp_i = article[0][0]\n label = ['O'] * len(tmp_sen.split(' '))\n for sentence in article:\n tokens = sentence[1].split(' ')\n token_len = [len(token) for token in tokens]\n if len(sentence) == 9: # label exists\n if tmp_sen != sentence[1]:\n tmp_label.append(label)\n tmp_doc.append(tmp_sen.split(' '))\n tmp_id.append(tmp_i)\n label = ['O'] * len(token_len)\n start = sentence[4] - sentence[2] \n end = sentence[5] - sentence[2]\n if sentence[6] != 'O':\n for i in range(1, len(token_len)): \n token_len[i] += token_len[i-1] + 1\n token_len[-1] += 1\n token_len = np.asarray(token_len)\n s_ind = np.min(np.where(token_len > start))\n tmp = np.where(token_len >= end) \n if len(tmp[0]) != 0:\n e_ind = np.min(tmp)\n else: \n e_ind = s_ind\n for i in range(s_ind, e_ind+1):\n label[i] = sentence[6]\n tmp_sen = sentence[1]\n tmp_i = sentence[0]\n else:\n tmp_doc.append(tokens)\n tmp_id.append(sentence[0])\n if len(sentence) == 9:\n tmp_label.append(label)\n tmp_doc.append(tmp_sen.split(' '))\n tmp_id.append(tmp_i)\n words.append(tmp_doc) \n tags.append(tmp_label)\n ids.append(tmp_id)\n return words, tags, ids\n\n\ndef make_bert_dataset(dataset):\n words, tags, ids= [], [], []\n for article in dataset:\n tmp_doc, tmp_label, tmp_id = [], [], []\n tmp_sen = article[0][1]\n tmp_i = article[0][0]\n label = ['O'] * len(tmp_sen.split(' '))\n for sentence in article:\n tokens = sentence[1].split(' ')\n token_len = [len(token) for token in tokens]\n if len(sentence) == 9: # label exists\n if tmp_sen != sentence[1] or sentence[7]:\n tmp_label.append(label)\n tmp_doc.append(tmp_sen.split(' '))\n tmp_id.append(tmp_i)\n if tmp_sen != sentence[1]:\n label = ['O'] * len(token_len)\n start = sentence[4] - sentence[2] \n end = sentence[5] - sentence[2]\n if sentence[6] != 'O':\n for i in range(1, len(token_len)): \n token_len[i] += token_len[i-1] + 1\n token_len[-1] += 1\n token_len = np.asarray(token_len)\n s_ind = np.min(np.where(token_len > start))\n tmp = np.where(token_len >= end) \n if len(tmp[0]) != 0:\n e_ind = np.min(tmp)\n else: \n e_ind = s_ind\n for i in range(s_ind, e_ind+1):\n label[i] = sentence[6]\n tmp_sen = sentence[1]\n tmp_i = sentence[0]\n else:\n tmp_doc.append(tokens)\n tmp_id.append(sentence[0])\n if len(sentence) == 9:\n tmp_label.append(label)\n tmp_doc.append(tmp_sen.split(' '))\n tmp_id.append(tmp_i)\n words.append(tmp_doc) \n tags.append(tmp_label)\n ids.append(tmp_id)\n return words, tags, ids\n\n\ndef mda(dataset):\n words, tags, ids= [], [], []\n for article in dataset:\n tmp_doc, tmp_label, tmp_id = [], [], []\n for sentence in article:\n tokens = sentence[1].split(' ')\n token_len = [len(token) for token in tokens]\n if len(sentence) == 9: # label exists\n start = sentence[4] - sentence[2]\n end = sentence[5] - sentence[2]\n label = ['O'] * len(token_len)\n if sentence[6] != 'O':\n for i in range(1, len(token_len)):\n token_len[i] += token_len[i-1] + 1\n token_len[-1] += 1\n token_len = np.asarray(token_len)\n s_ind = np.min(np.where(token_len > start))\n tmp = np.where(token_len >= end) \n if len(tmp[0]) != 0:\n e_ind = np.min(tmp)\n else:\n e_ind = s_ind\n for i in range(s_ind, e_ind+1):\n label[i] = sentence[6]\n tmp_label.append(label)\n tmp_doc.append(tokens)\n tmp_id.append(sentence[0])\n words.append(tmp_doc)\n tags.append(tmp_label)\n ids.append(tmp_id)\n return words, tags, ids\n\n" ]
[ [ "numpy.asarray", "numpy.zeros", "numpy.sum", "numpy.min", "numpy.where", "torch.LongTensor", "torch.Tensor" ] ]
gdewael/cpg-transformer
[ "970248d192739af194c9e9df5823c9eda8c2fa75" ]
[ "cpg_transformer/cpgtransformer.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pytorch_lightning as pl\nfrom torchmetrics.functional import auroc, accuracy, f1\nfrom cpg_transformer.blocks import MultiDimWindowTransformerLayer\nfrom cpg_transformer.blocks import CnnL2h128, CnnL3h128, RnnL1, JointL2h512\n\n\nclass CpGEmbedder(nn.Module):\n def __init__(self, hidden_size, mode = 'binary'):\n super().__init__()\n if mode == 'binary':\n self.CpG_embed = nn.Embedding(3, hidden_size)\n self.forward = self.forward_binary\n elif mode == 'continuous':\n self.CpG_embed_linear = nn.Linear(1, hidden_size)\n self.mask_embed = self._init_mask(nn.Parameter(torch.Tensor(1, hidden_size)))\n self.forward = self.forward_continuous\n \n def forward_binary(self, y):\n return self.CpG_embed(y.long())\n \n def forward_continuous(self, y):\n z = self.CpG_embed_linear(y.unsqueeze(-1).to(self.CpG_embed_linear.weight.dtype) - 1)\n if (y == 0).any():\n z[(y == 0)] = self.mask_embed\n return z\n \n def _init_mask(self, mask):\n bound = 1/mask.size(1)**0.5\n return nn.init.uniform_(mask, -bound, bound) \n\nclass CpGTransformer(pl.LightningModule):\n def __init__(self, n_cells, RF=1001, n_conv_layers=2, CNN_do=.0, DNA_embed_size=32,\n cell_embed_size=32, CpG_embed_size=32, transf_hsz=64, transf_do=.20,\n act='relu', n_transformers=4, n_heads=8, head_dim=8, window=21,\n mode='axial', data_mode = 'binary', layernorm=True,\n lr=5e-4, lr_decay_factor=.90, warmup_steps=1000):\n super().__init__()\n assert (n_conv_layers == 2) or (n_conv_layers == 3), 'Number of conv layers should be 2 or 3.'\n self.RF = RF\n self.RF2 = int((self.RF-1)/2)\n \n # DNA embed:\n if n_conv_layers == 2:\n self.CNN = nn.Sequential(CnnL2h128(dropout=CNN_do, RF=RF), nn.ReLU(), nn.Linear(128,DNA_embed_size))\n else:\n self.CNN = nn.Sequential(CnnL3h128(dropout=CNN_do, RF=RF), nn.ReLU(), nn.Linear(128,DNA_embed_size))\n # cell embed:\n self.cell_embed = nn.Embedding(n_cells, cell_embed_size)\n # CpG embed:\n self.CpG_embed = CpGEmbedder(CpG_embed_size, mode = data_mode)\n \n self.combine_embeds = nn.Sequential(nn.Linear(cell_embed_size+CpG_embed_size+DNA_embed_size,\n transf_hsz), nn.ReLU())\n \n TF_layers = []\n for i in range(n_transformers):\n TF_layers += [MultiDimWindowTransformerLayer(transf_hsz, head_dim, n_heads,\n transf_hsz*4,dropout=transf_do,\n window=window, activation=act,\n layernorm=layernorm, mode=mode)]\n self.transformer = nn.Sequential(*TF_layers)\n self.output_head = nn.Linear(transf_hsz,1)\n \n self.save_hyperparameters()\n\n def process_batch(self, batch):\n x, y_orig, y_masked, pos, ind_train, cell_indices = batch\n x, y_orig = x.to(torch.long), y_orig.to(self.dtype)\n pos = pos.to(torch.long)\n return (x, y_masked, pos, cell_indices), (y_orig, ind_train)\n \n def forward(self, x, y_masked, pos, cells):\n bsz, seqlen, n_cells = y_masked.shape[:3]\n DNA_embed = self.CNN(x.view(-1,self.RF)).view(bsz, seqlen, -1) # bsz, seqlen, DNA_embed_size\n cell_embed = self.cell_embed(cells) # bsz, n_rep, embed_size\n CpG_embed = self.CpG_embed(y_masked) # bsz, seqlen, n_rep, cpg_size\n \n DNA_embed = DNA_embed.unsqueeze(-2).expand(-1,-1,n_cells,-1)\n cell_embed = cell_embed.unsqueeze(1).expand(-1,seqlen,-1,-1)\n x = torch.cat((CpG_embed, cell_embed, DNA_embed), -1)\n x = self.combine_embeds(x)\n\n x, _ = self.transformer((x, pos))\n return self.output_head(x).squeeze(-1)\n \n def training_step(self, batch, batch_idx):\n inputs, (y, ind_train) = self.process_batch(batch)\n y_hat = self(*inputs)\n\n y_hat = torch.diagonal(y_hat[:,ind_train[:,:,0], ind_train[:,:,1]]).reshape(-1)\n y = torch.diagonal(y[:,ind_train[:,:,0], ind_train[:,:,1]]).reshape(-1)\n \n \n if self.hparams.data_mode == 'binary':\n loss = F.binary_cross_entropy_with_logits(y_hat, y-1)\n elif self.hparams.data_mode == 'continuous':\n loss = F.mse_loss(y_hat, y-1)\n \n self.log('train_loss', loss, sync_dist=True)\n return loss\n \n def validation_step(self, batch, batch_idx):\n inputs, (y, ind_train) = self.process_batch(batch)\n y_hat = self(*inputs)\n\n y_hat = torch.diagonal(y_hat[:,ind_train[:,:,0], ind_train[:,:,1]]).reshape(-1)\n y = torch.diagonal(y[:,ind_train[:,:,0], ind_train[:,:,1]]).reshape(-1)\n return torch.stack((y_hat, y-1))\n \n \n def validation_epoch_end(self, validation_step_outputs):\n validation_step_outputs = torch.cat(validation_step_outputs,1)\n y_hat = validation_step_outputs[0]\n y = validation_step_outputs[1]\n \n if self.hparams.data_mode == 'binary':\n loss = F.binary_cross_entropy_with_logits(y_hat, y)\n y = y.to(torch.int)\n y_hat = torch.sigmoid(y_hat) \n self.log('val_loss', loss, sync_dist=True)\n self.log('AUROC', auroc(y_hat, y), sync_dist=True)\n self.log('F1', f1(y_hat, y), sync_dist=True)\n self.log('acc', accuracy(y_hat, y), sync_dist=True)\n \n elif self.hparams.data_mode == 'continuous':\n loss = F.mse_loss(y_hat, y)\n self.log('val_loss', loss, sync_dist=True)\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams.lr)\n lambd = lambda epoch: self.hparams.lr_decay_factor\n lr_scheduler = torch.optim.lr_scheduler.MultiplicativeLR(optimizer, lr_lambda=lambd)\n return [optimizer], [lr_scheduler]\n \n def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,\n optimizer_closure, on_tpu, using_native_amp, using_lbfgs):\n # warm up lr\n if self.trainer.global_step < self.hparams.warmup_steps:\n lr_scale = min(1., float(self.trainer.global_step + 1) / self.hparams.warmup_steps)\n for pg in optimizer.param_groups:\n pg['lr'] = lr_scale * self.hparams.lr\n\n # update params\n optimizer.step(closure=optimizer_closure)\n \n def n_params(self):\n params_per_layer = [(name, p.numel()) for name, p in self.named_parameters()]\n total_params = sum(p.numel() for p in self.parameters())\n params_per_layer += [('total', total_params)]\n return params_per_layer\n \n" ]
[ [ "torch.nn.Linear", "torch.nn.functional.binary_cross_entropy_with_logits", "torch.cat", "torch.sigmoid", "torch.stack", "torch.diagonal", "torch.nn.Sequential", "torch.nn.ReLU", "torch.nn.functional.mse_loss", "torch.optim.lr_scheduler.MultiplicativeLR", "torch.nn.init.uniform_", "torch.Tensor", "torch.nn.Embedding" ] ]
tmilliman/sirpy2
[ "f750e7ca861ff107e1bd9d081dcae315d547ecf8" ]
[ "sirpy2/sir2png.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nCreated Apr 7, 2017\nThis script creates a png image of the contents of a SIR file using\ndefault vmin/vmax values in file header\n\n@author: DGL at BYU\n\"\"\"\n\n# Imports\nimport os\nimport sys\nimport argparse\n\nimport numpy as np\n\nfrom .loadsir import loadsir\nimport png as png # PyPng module\nfrom matplotlib import pyplot as plt\n\n\n# Export function definition\ndef save_it(fname, img_dir=\".\"):\n \"\"\" Helper function to export figures \"\"\"\n if not os.access(img_dir, os.F_OK):\n os.mkdir(img_dir)\n plt.savefig(\"{0}/{1}.png\".format(img_dir, fname), dpi=150)\n # plt.savefig('{0}/{1}.pdf'.format(img_dir,fname))\n\n\n# Plot the lat/lon box over the sir\ndef sir2png(sir_fname, png_fname, vmin=0, vmax=0):\n \"\"\"\n sir2png(sir_fname,png_fname,vmin=vmin,vmax=vmax)\n\n sir_fname: input SIR file name\n png_fname: output png file name\n vmin,vmax: min,max values to display. uses SIR head default if vmin=vmax\n\n \"\"\"\n # Load in the SIR image\n sir = loadsir(sir_fname)\n # sir[0] is the image array; sir[1] is the header; sir[2]=iaopt; sir[3]=descript\n if vmin == vmax: # use default\n vmin = sir[1][49]\n vmax = sir[1][50]\n\n # create a .png image from the file data\n img = np.round((sir[0] - vmin) * 256.0 / (vmax - vmin))\n img[img > 255] = 255\n img[img < 0] = 0\n img = np.uint8(img)\n nsx = np.int(sir[1][0])\n nsy = np.int(sir[1][1])\n\n # write out as greyscale png\n f = open(png_fname, \"wb\")\n w = png.Writer(width=nsx, height=nsy, greyscale=True)\n w.write(f, img)\n f.close()\n print(\"png file %s created from %s\" % (png_fname, sir_fname))\n\n\ndef main(): # pass in a filename as argument\n\n \"\"\"\n convert SIR file to a PNG image\n \"\"\"\n\n parser = argparse.ArgumentParser(description=\"Convert SIR file to PNG image\")\n\n # options\n parser.add_argument(\n \"--verbose\", \"-v\", help=\"Verbose output\", action=\"store_true\", default=False\n )\n\n parser.add_argument(\n \"--vmin\", help=\"minimum value\", nargs=1, default=0.0, type=float\n )\n\n parser.add_argument(\n \"--vmax\", help=\"maximum value\", nargs=1, default=0.0, type=float\n )\n\n parser.add_argument(\"--outfile\", \"-o\", help=\"Output PNG filename\", nargs=1)\n\n # positional arguments\n parser.add_argument(\"sirfile\", help=\"Input SIR filename\")\n\n # get arguments\n args = parser.parse_args()\n verbose = args.verbose\n vmin = args.vmin\n vmax = args.vmax\n sir_path = args.sirfile\n sir_file = os.path.basename(sir_path)\n sir_base, sir_ext = os.path.splitext(sir_file)\n if args.outfile is not None:\n outfile = args.outfile\n else:\n outfile = sir_base + \".png\"\n\n if verbose:\n print(\"verbose: {}\".format(verbose))\n print(\"vmin: {}\".format(vmin))\n print(\"vmax: {}\".format(vmax))\n print(\"SIR file: {}\".format(sir_file))\n print(\"PNG file: {}\".format(outfile))\n\n vmin = 0\n vmax = 0\n\n # else: # no argument?, use a test image\n # sir_fname = \"greeni.sir\"\n # # sir_fname = 'queh-a-E2N04-10-10.sir'\n # png_fname = sir_fname + \".png\"\n\n sir2png(sir_path, outfile, vmin=vmin, vmax=vmax)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.round", "numpy.uint8", "numpy.int" ] ]
usgs/gems-tools-pro
[ "5227bee243d50bfabc611dec3a9da6732b4c2909" ]
[ "Scripts/GeMS_GeolexCheck_AGP2.py" ]
[ "#! python3\r\n\r\n\"\"\"\r\nGeologic Names Check\r\nname-fullname - This version checks for Geolex names in both the name and fullname and then processes the set \r\nwith the fewest entries to minimize the number of Geolex names and usages that are reported.\r\n\r\nArguments: \r\n DMU - GeMS DescriptionOfMapUnits table. Geodatabase, CSV, tab delimeted TXT, or DBF. Required.\r\n Extent - one or more (comma separated) state or US region abbreviations. Required.\r\n open report - open the Excel report file when finished. True (default) or False. Optional.\r\n \r\nEnclose any arguments with spaces within double-quotes.\r\n\"\"\"\r\n\r\n# INSTRUCTIONS FOR PYINSTALLER \r\n# (notes to myself for my specific setup to build the exe file necessary for the ArcMap version of this tool - ET)\r\n# 1. copy this file to \\exe\r\n# 2. be sure gems-tools-arcmap is currently pointing to the right environment - as of 1/12/21 that is names-check\r\n# 3. comment out arcpy\r\n# 4. find/replace arcpy.AddMessage with print\r\n# 5. run makeexe.bat\r\n\r\nimport os, sys\r\nimport string\r\nimport arcpy\r\nimport requests\r\nimport json\r\nfrom distutils.util import strtobool\r\nimport re\r\nimport pandas as pd\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.styles.borders import Border, Side\r\nfrom openpyxl.styles import Font, PatternFill, Alignment\r\nimport tempfile\r\nfrom GeMS_utilityFunctions import *\r\n\r\nversionString = \"GeMS_GeolexCheck_AGP2.py, 2/19/2021\"\r\nrawurl = 'https://raw.githubusercontent.com/usgs/gems-tools-pro/master/Scripts/GeMS_GeolexCheck_AGP2.py'\r\ncheckVersion(versionString, rawurl, 'gems-tools-pro')\r\n\r\n# initialize empty list to collect usage matches in order to avoid \r\n# displaying redundant matches.\r\nusages = []\r\n \r\n# STRING AND USAGE\r\ndef sanitize_text(usage_text):\r\n \"\"\"Clean up usage text so that it only includes alphanumeric characters \"\"\"\r\n # entire text might be enclosed in parantheses\r\n if usage_text[0] == '(' and usage_text[-1] == ')':\r\n usage_text = usage_text[1:len(usage_text) - 1]\r\n \r\n # remove all paranthetical phrases, inside of parantheses or square brackets\r\n usage_text = re.sub(r'\\([^)]*\\)', '', usage_text)\r\n # or square brackets\r\n usage_text = re.sub(r'\\[[^)]*\\]', '', usage_text)\r\n \r\n # strip all non-alphanumeric characters from string except for periods and apostrophes\r\n # to allow names like St. John's\r\n usage_text = re.sub(r\"[^\\w.']+\", ' ', usage_text)\r\n \r\n # replace multiple spaces with a single space\r\n usage_text = re.sub('[\\t+\\s+]',' ',usage_text)\r\n \r\n return usage_text.strip()\r\n \r\ndef sanitize_matches(list1, nametext):\r\n \"\"\"Remove names that occur in other, longer names, eg.,\r\n Saddle in Saddle Mountains or Basin in Basin City, etc.\r\n and returns a list; [position of Geolex name in fullname, Geolex name]\r\n position is first item in list so that the list can be sorted\"\"\"\r\n \r\n # remove duplicates\r\n list1 = set(list1)\r\n \r\n # make a dictionary with keys from the list\r\n match_d = dict.fromkeys(list1, '')\r\n list2 = list1\r\n for item1 in list1:\r\n for item2 in list2:\r\n if (item2.startswith(item1 + ' ') or\r\n item2.endswith(' ' + item1) or\r\n item2.find(' ' + item1 + ' ') != -1 and\r\n len(item2) > len(item1)):\r\n match_d[item1] = 'delete'\r\n\r\n # and now sort by position in the name text\r\n list3 = [item for item in list1 if not match_d[item] == 'delete']\r\n list4 = []\r\n for name in list3:\r\n list4.append([nametext.find(name), name])\r\n \r\n return sorted(list4)\r\n \r\ndef check_usage(glx_usage, fooname):\r\n \"\"\"Look for exact or partial match between the supplied name and usage\"\"\"\r\n usage = sanitize_text(glx_usage)\r\n if usage == fooname.strip() or fooname.strip().find(usage) >= 0:\r\n return True\r\n else:\r\n # there is no usage match\r\n return False\r\n \r\ndef ext_check(states_list, fn_ext):\r\n states_list = [foo.lower() for foo in states_list]\r\n fn_ext = [bar.lower() for bar in fn_ext]\r\n if set(states_list).intersection(set(fn_ext)):\r\n return True\r\n else:\r\n return False\r\n\r\ndef parse_age(age_str):\r\n return age_str.replace('\\r\\n', '\\n')\r\n \r\n# API \r\ndef units_query(name):\r\n \"\"\"Prepare and send the GET request\"\"\"\r\n units_api = r\"https://ngmdb.usgs.gov/connect/apiv1/geolex/units/?\"\r\n params = {'units_in': name}\r\n response = requests.get(units_api, params) #.text\r\n \r\n if not response.status_code == 200:\r\n arcpy.AddMessage(\"\") \r\n arcpy.AddMessage(f\"Server error {response.status_code} with the following url:\")\r\n arcpy.AddMessage(response.url)\r\n arcpy.AddMessage(\"The server may be down. Try again later or write to [email protected]\")\r\n arcpy.AddMessage(\"\") \r\n raise SystemError\r\n else:\r\n return response.json()['results']\r\n \r\n# EXCEL\r\ndef table_to_pandas_data_frame(feature_class):\r\n \"\"\"\r\n Load data into a Pandas Data Frame for subsequent analysis.\r\n :param feature_class: Input ArcGIS Feature Class.\r\n :param field_list: Fields for input.\r\n :return: Pandas DataFrame object.\r\n \r\n from https://joelmccune.com/arcgis-to-pandas-data-frame/\r\n discussion after that was posted suggests using a Spatially Enabled Dataframe\r\n https://developers.arcgis.com/python/guide/introduction-to-the-spatially-enabled-dataframe/\r\n but that requires install of ArcGIS API for Python which I can't guarantee will be \r\n available on any other computer\r\n \"\"\"\r\n field_list = ['hierarchykey', 'mapunit', 'name', 'fullname', 'age']\r\n return pd.DataFrame(\r\n arcpy.da.TableToNumPyArray(\r\n in_table=feature_class,\r\n field_names=field_list,\r\n skip_nulls=False,\r\n null_value = ''\r\n )\r\n )\r\n\r\ndef frame_it(d_path, ext_format):\r\n \"\"\"convert table to pandas dataframe\r\n gdbs and excel files need special consideration\r\n but character-delimited text files can be opened with read_table\r\n \"\"\"\r\n types = {'hierarchykey': str, 'name': str, 'fullname': str}\r\n \r\n # attempt to allow all cases of column names\r\n flds = ['hierarchykey', 'name', 'age', 'fullname', 'mapunit']\r\n if ext_format == 'gdb':\r\n # Have to look for case where this is being run from the EXE\r\n # Pyinstaller adds frozen flag to sys to figure this out\r\n if getattr(sys, 'frozen', False):\r\n try:\r\n # in this case, arcpy is not installed. Try using ogr2ogr with the off-chance\r\n # that it is installed and in the PATH\r\n # write the converted CSV to a temporary directory and then cast that to a data frame\r\n t_dir = tempfile.mkdtemp()\r\n t_dmu = os.path.join(t_dir, 'dmu.csv')\r\n gdb_p = os.path.dirname(d_path)\r\n dmu_table = os.path.basename(d_path)\r\n ogr_com = f'ogr2ogr -f CSV {t_dmu} {gdb_p} {dmu_table}'\r\n os.system(ogr_com)\r\n dmu_df = pd.read_csv(t_dmu, usecols=lambda x: x.lower() in flds, dtype=types)\r\n\r\n except:\r\n print(\"\")\r\n print(\"Cannot find ogr2ogr to convert file GDB table.\")\r\n print(\"Install GDAL binaries and make sure the location is in your PATH environment\") \r\n print(\"or convert the dmu to a CSV or Excel file and try again\")\r\n print(\"\")\r\n raise SystemError\r\n \r\n else:\r\n # to cast file GDB tables into a pandas data frame use\r\n # arcpy.da.TableToNumPyArray\r\n dmu_df = table_to_pandas_data_frame(d_path)\r\n\r\n elif ext_format == 'xls':\r\n file = os.path.dirname(d_path)\r\n sheet = os.path.basename(d_path)\r\n if sheet[-1:] == '$':\r\n sheet = sheet[:-1]\r\n dmu_df = pd.read_excel(file, sheet_name=sheet, engine=\"openpyxl\", usecols=lambda x: x.lower() in flds, dtype=types)\r\n \r\n elif ext_format == 'csv':\r\n dmu_df = pd.read_csv(d_path, usecols=lambda x: x.lower() in flds, dtype=types, keep_default_na=False)\r\n \r\n else:\r\n dmu_df = pd.read_table(d_path, usecols=lambda x: x.lower() in flds, dtype=types, keep_default_na=False)\r\n \r\n # smash all column names to lower case because we can't be sure of the case\r\n # in the input dmu\r\n dmu_df.columns = [c.lower() for c in dmu_df.columns]\r\n \r\n return dmu_df\r\n\r\ndef link(cell, link, display='link'):\r\n cell.value = '=HYPERLINK(\"%s\", \"%s\")' % (link, display)\r\n cell.font = Font(u='single', color='0000EE')\r\n\r\ndef format_excel(xlf):\r\n \"\"\"Format the output Excel spreadsheet\"\"\"\r\n wb = load_workbook(filename = xlf)\r\n ws = wb['Sheet1']\r\n #ws.delete_cols(1)\r\n\r\n # this is the regular Excel border style but it has to be applied after\r\n # applying the colors, which erases all borders\r\n border = Border(left=Side(border_style='thin', color='D3D3D3'),\r\n right=Side(border_style='thin', color='D3D3D3'),\r\n top=Side(border_style='thin', color='D3D3D3'),\r\n bottom=Side(border_style='thin', color='D3D3D3'))\r\n \r\n # black outline border for header column names\r\n blackBorder = Border(left=Side(border_style='thin', color='000000'),\r\n right=Side(border_style='thin', color='000000'),\r\n top=Side(border_style='thin', color='000000'),\r\n bottom=Side(border_style='thin', color='000000'))\r\n\r\n greenFill = PatternFill(start_color='ebf1de',\r\n end_color = 'ebf1de',\r\n fill_type='solid')\r\n\r\n yellowFill = PatternFill(start_color='ffff99',\r\n end_color = 'ffff99',\r\n fill_type='solid')\r\n\r\n orangeFill = PatternFill(start_color='fabf8f',\r\n end_color = 'fabf8f',\r\n fill_type='solid')\r\n\r\n ws.insert_rows(1)\r\n ws['A1'] = \"DMU Contents\"\r\n ws['A1'].font = Font(bold=True)\r\n ws['A1'].alignment = Alignment(horizontal='center')\r\n ws.merge_cells('A1:F1')\r\n\r\n ws['G1'] = \"Geolex Results\"\r\n ws['G1'].font = Font(bold=True)\r\n ws['G1'].alignment = Alignment(horizontal='center')\r\n ws.merge_cells('G1:L1')\r\n\r\n ws['M1'] = \"Author Review\"\r\n ws['M1'].font = Font(bold=True)\r\n ws['M1'].alignment = Alignment(horizontal='center')\r\n ws.merge_cells('M1:Q1')\r\n\r\n maxRow = ws.max_row + 1\r\n \r\n # color the sections\r\n for colNum in range(1, 7):\r\n for rowNum in range(1, maxRow):\r\n ws.cell(row=rowNum, column=colNum).fill = greenFill\r\n ws.cell(row=rowNum, column=colNum).border = border\r\n \r\n for colNum in range(7, 13):\r\n for rowNum in range(1, maxRow):\r\n ws.cell(row=rowNum, column=colNum).fill = yellowFill\r\n ws.cell(row=rowNum, column=colNum).border = border\r\n \r\n for colNum in range(13, 18):\r\n for rowNum in range(1, maxRow):\r\n ws.cell(row=rowNum, column=colNum).fill = orangeFill\r\n ws.cell(row=rowNum, column=colNum).border = border\r\n \r\n # apply hyperlink styling to column K, URL\r\n for rowNum in range(3, maxRow):\r\n ws_cell = ws.cell(row=rowNum, column=12)\r\n if not ws_cell.value is None:\r\n link(ws_cell, ws_cell.value, ws_cell.value)\r\n \r\n # re-apply black borders around the header cells\r\n for colNum in range(1, 18):\r\n for rowNum in range(1, 3):\r\n ws.cell(row=rowNum, column=colNum).border = blackBorder\r\n \r\n # adjust the width of the cells\r\n for i in list(string.ascii_uppercase[0:17]):\r\n ws.column_dimensions[i].width = 15\r\n \r\n # materialized paths often get imported to Excel as dates.\r\n # try to ensure HierarchyKey as text, not date\r\n for rowNum in range (1, maxRow):\r\n ws.cell(row=rowNum, column=1).number_format = '@'\r\n \r\n wb.save(xlf)\r\n\r\n # START\r\n#------------------------------------------------------------------------\r\nif len(sys.argv) == 1:\r\n print(__doc__)\r\n quit()\r\n\r\narcpy.AddMessage(versionString)\r\n\r\n# collect the path to the DMU table\r\ndmu = sys.argv[1]\r\narcpy.AddMessage(f\"Evaluating {dmu}\")\r\n\r\nif '.xlsx' in dmu or '.xls' in dmu:\r\n if not dmu[-1:] == '$':\r\n dmu = dmu + '$'\r\n \r\n# get parent directory\r\ndmu_home = os.path.dirname(dmu)\r\n# figure out what the file format is\r\nif os.path.splitext(dmu_home)[1] == '.gdb':\r\n out_name = os.path.basename(dmu_home)[:-4]\r\n dmu_home = os.path.dirname(dmu_home)\r\n dmu_df = frame_it(dmu, 'gdb')\r\n\r\nelif os.path.splitext(dmu_home)[1] == '.xlsx':\r\n out_name = os.path.basename(dmu_home)[:-5]\r\n dmu_home = os.path.dirname(dmu_home)\r\n dmu_df = frame_it(dmu, 'xls')\r\n\r\nelif os.path.splitext(dmu_home)[1] == '.xls':\r\n arcpy.AddMessage(\"XLS format files cannot be read by the tool\\n\" +\r\n \"Please choose an ESRI file geodatabase table, an XLSX Excel spreadsheet,\\n\" +\r\n \"a comma-delimited text file, or a tab-delimited text file\")\r\n raise SystemError\r\n\r\nelif os.path.splitext(dmu)[1] == '.csv':\r\n out_name = os.path.basename(dmu)[:-4]\r\n dmu_df = frame_it(dmu, 'csv')\r\n\r\nelif os.path.splitext(dmu)[1] == '.txt':\r\n out_name = os.path.basename(dmu)[:-4]\r\n dmu_df = frame_it(dmu, 'txt')\r\n\r\nelse:\r\n arcpy.AddMessage(\"The DMU file cannot be read\\n\" +\r\n \"Choose an ESRI file geodatabase table, an XLSX Excel spreadsheet,\\n\" +\r\n \"a comma-delimited text file, or a tab-delimited text file\")\r\n \r\n# collect and clean the extent of the DMU. \r\n# can be single state or list of states, comma separated,\r\n# can be upper or lower case\r\ndmu_str = sys.argv[2]\r\ndmu_str = dmu_str.strip(\"\\'\")\r\ndmu_str = dmu_str.replace(\" \", \"\")\r\ndmu_exts = re.split(';|,',dmu_str)\r\n \r\n# open the report after running?\r\nif len(sys.argv) == 4:\r\n open_xl = bool(strtobool(sys.argv[3]))\r\nelse:\r\n open_xl = True\r\n\r\n# units table of geolex db\r\nthis_py = os.path.realpath(__file__)\r\ngeolex_db = os.path.join(os.path.dirname(this_py),'..', 'Resources', 'geolex_units.json')\r\n\r\n# set up a pandas data frame\r\nd={}\r\ndf = pd.DataFrame(columns=['HierarchyKey', 'MapUnit', 'Name', 'Fullname', 'Age', 'Extent', # DMU Contents\r\n 'GeolexID', 'Name', 'Usage', 'Age', 'Extent', 'URL', # Geolex Results\r\n 'Extent Match?', 'Usage Match?', 'Age Match?', 'Remarks', 'References']) # Author Review\r\n \r\ndf['HierarchyKey'] = df['HierarchyKey'].astype('object')\r\n\r\nfields = ['hierarchykey', 'mapunit', 'name', 'fullname', 'age']\r\n\r\nn = 0\r\nfor row in dmu_df.itertuples():\r\n # only proceed if there is either a Name or Fullname. This will check for Geolex names in headings\r\n if row.name or row.fullname: \r\n # get some values from the input\r\n # map unit abbreviation\r\n mu = row.mapunit \r\n \r\n # short map unit name\r\n if not(pd.isna(row.name) or row.name ==\"\"):\r\n sn = row.name\r\n sn_subbed = sanitize_text(sn).strip().lower()\r\n sn_lower = sn.lower()\r\n else:\r\n sn = ''\r\n sn_subbed = ''\r\n sn_lower = ''\r\n \r\n # full map unit name\r\n if not (pd.isna(row.fullname) or row.fullname == \"\"):\r\n fn = row.fullname\r\n fn_subbed = sanitize_text(fn).strip().lower()\r\n fn_lower = fn.lower()\r\n else:\r\n fn =''\r\n fn_subbed = ''\r\n fn_lower = ''\r\n \r\n age = row.age\r\n \r\n # pandas might read in materialized paths as dates.\r\n # try to ensure HierarchyKey is read in as text, not date or a number_format\r\n hkey = str(row.hierarchykey)\r\n \r\n # Collect the geolex names that are in fullname and name\r\n # Case 1: set of geolex names that are in name same as those in fullname\r\n # use the set of geolex names\r\n # Case 2: there are more geolex names in fullname than in name\r\n # use only the geolex names that are in name\r\n # Case 3: no geolex names in name but there are geolex names in fullname\r\n # use the set of geolex names that are in fullname\r\n\r\n # pass the current name and fullname to the api-query function\r\n sn_matches = None\r\n fn_matches = None\r\n \r\n sn_results = None\r\n fn_results = None\r\n if sn:\r\n sn_results = units_query(sn)\r\n \r\n if fn:\r\n fn_results = units_query(fn)\r\n \r\n # if there are name and fullname matches, take the intersection of the sets\r\n if (sn_results and fn_results) or (sn_results and not fn_results):\r\n results = sn_results\r\n check_name = sn\r\n \r\n # if there are only fullname matches, take those\r\n elif fn_results and not sn_results:\r\n results = fn_results \r\n check_name = fn\r\n \r\n # if none of those, there are no matches\r\n else:\r\n results = None\r\n \r\n # initiate this row filling out the first 6 columns\r\n # needs to be defined outside of 'if matches' statement below for the case where\r\n # there are no valid matches\r\n #unit_list = [mu, fn, fm, age, ext]\r\n unit_list = [hkey, mu, sn, fn, age, ', '.join(dmu_exts)]\r\n \r\n # initialize counter to determine contents of unit_list as matches are recorded\r\n i = 0 \r\n \r\n # initialize incrementing character to append to HierarchyKey in case there are more than one usage\r\n # this will allow the output table to be sorted on HierarchyKey correctly \r\n ch = \"a\"\r\n \r\n if results:\r\n arcpy.AddMessage(f\"Looking for GEOLEX names in {check_name}\")\r\n names_only = [result['unit_name'] for result in results]\r\n names_only = sanitize_matches(names_only, check_name)\r\n else:\r\n names_only = None\r\n \r\n if names_only:\r\n for name in names_only:\r\n for r in [result for result in results if result['unit_name'] == name[1]]:\r\n arcpy.AddMessage(f\"Evaluating usages for {name[1]}\")\r\n glx_id = r['id']\r\n glx_name = name[1]\r\n glx_age = parse_age(r['age_description'][0])\r\n glx_url = r['url']\r\n \r\n # begin iterating the usages\r\n n = 0\r\n for usage_i in r['usages']:\r\n if usage_i['usage'].lower().find('recognized') == -1 or usage_i['usage'].lower().find('notably') == -1:\r\n # check the extent\r\n ext_bool = ext_check(usage_i['states'], dmu_exts)\r\n if ext_bool:\r\n ext_str = 'yes'\r\n else:\r\n ext_str = 'no'\r\n \r\n glx_ext = ', '.join(usage_i['states'])\r\n\r\n # re-write unit_list if the first row has already been written\r\n if i == 1:\r\n sub_hkey = f\"{hkey}-{ch}\"\r\n # 4 of the first 5 columns are empty in all rows following the first row\r\n # append an incrementing character to hkey so that the table can be sorted properly\r\n unit_list = [sub_hkey,'','','','','']\r\n ch = chr(ord(ch) + 1)\r\n \r\n if n == 1:\r\n glx_id = ''\r\n glx_name = ''\r\n glx_url = ''\r\n \r\n # extend the list with Geolex results\r\n unit_list.extend([glx_id, glx_name, usage_i['usage'], glx_age, glx_ext, glx_url, ext_str, '', '', '', ''])\r\n \r\n # add list to dataframe\r\n unit_series = pd.Series(unit_list, index = df.columns)\r\n df = df.append(unit_series, ignore_index=True)\r\n \r\n n = 1\r\n i = 1\r\n \r\n # there is no match\r\n else:\r\n nomatch = unit_list.extend(['', '', '', '', '', '', 'no', '', '', '', ''])\r\n \r\n # add list to dataframe\r\n unit_series = pd.Series(unit_list, index = df.columns)\r\n df = df.append(unit_series, ignore_index=True)\r\n \r\nxl_path = os.path.join(dmu_home, f'{out_name}_namescheck.xlsx')\r\narcpy.AddMessage(f\"Saving {xl_path}\")\r\nif os.path.exists(xl_path):\r\n os.remove(xl_path)\r\n\r\ndf.to_excel(xl_path, freeze_panes = (2,0), index=False, engine=\"openpyxl\")\r\nformat_excel(xl_path)\r\nif open_xl == True:\r\n os.startfile(xl_path)" ]
[ [ "pandas.DataFrame", "pandas.isna", "pandas.Series" ] ]
septasset/deep-high-resolution-net.pytorch
[ "eb59426a20a944199f62ed07f93d266638b80af7" ]
[ "lib/dataset/poseX2.py" ]
[ "# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import defaultdict\nfrom collections import OrderedDict\nimport logging\nimport os\n\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\nimport json_tricks as json\nimport numpy as np\n\nfrom dataset.JointsDataset import JointsDataset\nfrom nms.nms import oks_nms\nfrom nms.nms import soft_oks_nms\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass PoseX2Dataset(JointsDataset):\n '''\n \"keypoints\": {\n 0: \"head\",\n 1: \"neck\",\n 2: \"spine3\",\n 3: \"spine2\",\n 4: \"spine1\",\n 5: \"left_shoulder\",\n 6: \"right_shoulder\",\n 7: \"left_elbow\",\n 8: \"right_elbow\",\n 9: \"left_wrist\",\n 10: \"right_wrist\",\n 11: \"left_hip\",\n 12: \"right_hip\",\n 13: \"left_knee\",\n 14: \"right_knee\",\n 15: \"left_ankle\",\n 16: \"right_ankle\"\n },\n\t\"skeleton\": [\n [16,14],[14,12],[17,15],[15,13],[5,4],[4,3],\n [3,6],[3,7],[6,8],[7,9],[8,10],[9,11],\n [3,2],[2,1]\n ]\n '''\n def __init__(self, cfg, root, image_set, is_train, transform=None):\n super().__init__(cfg, root, image_set, is_train, transform)\n self.nms_thre = cfg.TEST.NMS_THRE\n self.image_thre = cfg.TEST.IMAGE_THRE\n self.soft_nms = cfg.TEST.SOFT_NMS\n self.oks_thre = cfg.TEST.OKS_THRE\n self.in_vis_thre = cfg.TEST.IN_VIS_THRE\n self.bbox_file = cfg.TEST.COCO_BBOX_FILE\n self.use_gt_bbox = cfg.TEST.USE_GT_BBOX\n self.image_width = cfg.MODEL.IMAGE_SIZE[0]\n self.image_height = cfg.MODEL.IMAGE_SIZE[1]\n self.aspect_ratio = self.image_width * 1.0 / self.image_height\n self.pixel_std = 200\n\n self.coco = COCO(self._get_ann_file_keypoint())\n\n # deal with class names\n cats = [cat['name']\n for cat in self.coco.loadCats(self.coco.getCatIds())]\n self.classes = ['__background__'] + cats\n logger.info('=> classes: {}'.format(self.classes))\n self.num_classes = len(self.classes)\n self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))\n self._class_to_coco_ind = dict(zip(cats, self.coco.getCatIds()))\n self._coco_ind_to_class_ind = dict(\n [\n (self._class_to_coco_ind[cls], self._class_to_ind[cls])\n for cls in self.classes[1:]\n ]\n )\n\n # load image file names\n self.image_set_index = self._load_image_set_index()\n self.num_images = len(self.image_set_index)\n logger.info('=> num_images: {}'.format(self.num_images))\n\n self.num_joints = 17\n self.flip_pairs = [[1, 2], [3, 4], [5, 6], [7, 8],\n [9, 10], [11, 12], [13, 14], [15, 16]]\n self.parent_ids = None\n self.upper_body_ids = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)\n self.lower_body_ids = (11, 12, 13, 14, 15, 16)\n\n self.joints_weight = np.array(\n [\n 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2,\n 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, 1.5\n ],\n dtype=np.float32\n ).reshape((self.num_joints, 1))\n\n self.db = self._get_db()\n\n if is_train and cfg.DATASET.SELECT_DATA:\n self.db = self.select_data(self.db)\n\n logger.info('=> load {} samples'.format(len(self.db)))\n\n def _get_ann_file_keypoint(self):\n \"\"\" self.root / annotations / person_keypoints_train2017.json \"\"\"\n prefix = 'person_keypoints' \\\n if 'test' not in self.image_set else 'person_keypoints'\n return os.path.join(\n self.root,\n 'annotations',\n prefix + '_' + self.image_set + '.json'\n )\n\n def _load_image_set_index(self):\n \"\"\" image id: int \"\"\"\n image_ids = self.coco.getImgIds()\n return image_ids\n\n def _get_db(self):\n if self.is_train or self.use_gt_bbox:\n # use ground truth bbox\n gt_db = self._load_coco_keypoint_annotations()\n else:\n # use bbox from detection\n gt_db = self._load_coco_person_detection_results()\n return gt_db\n\n def _load_coco_keypoint_annotations(self):\n \"\"\" ground truth bbox and keypoints \"\"\"\n gt_db = []\n for index in self.image_set_index:\n gt_db.extend(self._load_coco_keypoint_annotation_kernal(index))\n return gt_db\n\n def _load_coco_keypoint_annotation_kernal(self, index):\n \"\"\"\n coco ann: [u'segmentation', u'area', u'iscrowd', u'image_id', u'bbox', u'category_id', u'id']\n iscrowd:\n crowd instances are handled by marking their overlaps with all categories to -1\n and later excluded in training\n bbox:\n [x1, y1, w, h]\n :param index: coco image id\n :return: db entry\n \"\"\"\n im_ann = self.coco.loadImgs(index)[0]\n width = im_ann['width']\n height = im_ann['height']\n\n annIds = self.coco.getAnnIds(imgIds=index, iscrowd=False)\n objs = self.coco.loadAnns(annIds)\n\n # sanitize bboxes\n valid_objs = []\n for obj in objs:\n x, y, w, h = obj['bbox']\n x1 = np.max((0, x))\n y1 = np.max((0, y))\n x2 = np.min((width - 1, x1 + np.max((0, w - 1))))\n y2 = np.min((height - 1, y1 + np.max((0, h - 1))))\n if obj['area'] > 0 and x2 >= x1 and y2 >= y1:\n obj['clean_bbox'] = [x1, y1, x2-x1, y2-y1]\n valid_objs.append(obj)\n objs = valid_objs\n\n rec = []\n for obj in objs:\n cls = self._coco_ind_to_class_ind[obj['category_id']]\n if cls != 1:\n continue\n\n # ignore objs without keypoints annotation\n if max(obj['keypoints']) == 0:\n continue\n\n joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)\n joints_3d_vis = np.zeros((self.num_joints, 3), dtype=np.float)\n for ipt in range(self.num_joints):\n joints_3d[ipt, 0] = obj['keypoints'][ipt * 3 + 0]\n joints_3d[ipt, 1] = obj['keypoints'][ipt * 3 + 1]\n joints_3d[ipt, 2] = 0\n t_vis = obj['keypoints'][ipt * 3 + 2]\n if t_vis > 1:\n t_vis = 1\n joints_3d_vis[ipt, 0] = t_vis\n joints_3d_vis[ipt, 1] = t_vis\n joints_3d_vis[ipt, 2] = 0\n\n center, scale = self._box2cs(obj['clean_bbox'][:4])\n rec.append({\n 'image': self.image_path_from_index(index),\n 'center': center,\n 'scale': scale,\n 'joints_3d': joints_3d,\n 'joints_3d_vis': joints_3d_vis,\n 'filename': '',\n 'imgnum': 0,\n })\n\n return rec\n\n def _box2cs(self, box):\n x, y, w, h = box[:4]\n return self._xywh2cs(x, y, w, h)\n\n def _xywh2cs(self, x, y, w, h):\n center = np.zeros((2), dtype=np.float32)\n center[0] = x + w * 0.5\n center[1] = y + h * 0.5\n\n if w > self.aspect_ratio * h:\n h = w * 1.0 / self.aspect_ratio\n elif w < self.aspect_ratio * h:\n w = h * self.aspect_ratio\n scale = np.array(\n [w * 1.0 / self.pixel_std, h * 1.0 / self.pixel_std],\n dtype=np.float32)\n if center[0] != -1:\n scale = scale * 1.25\n\n return center, scale\n\n def image_path_from_index(self, index):\n \"\"\" example: images / train2017 / 000000119993.jpg \"\"\"\n if self.data_format == \"jpg\":\n file_name = '%012d.jpg' % index\n elif self.data_format == \"png\":\n file_name = '%012d.png' % index\n if '2014' in self.image_set:\n file_name = 'COCO_%s_' % self.image_set + file_name\n\n # prefix = 'test2017' if 'test' in self.image_set else self.image_set\n prefix = self.image_set if 'test' in self.image_set else self.image_set\n\n data_name = prefix + '.zip@' if self.data_format == 'zip' else prefix\n\n image_path = os.path.join(\n self.root, 'images', data_name, file_name)\n\n return image_path\n\n def _load_coco_person_detection_results(self):\n all_boxes = None\n with open(self.bbox_file, 'r') as f:\n all_boxes = json.load(f)\n\n if not all_boxes:\n logger.error('=> Load %s fail!' % self.bbox_file)\n return None\n\n logger.info('=> Total boxes: {}'.format(len(all_boxes)))\n\n kpt_db = []\n num_boxes = 0\n for n_img in range(0, len(all_boxes)):\n det_res = all_boxes[n_img]\n if det_res['category_id'] != 1:\n continue\n img_name = self.image_path_from_index(det_res['image_id'])\n box = det_res['bbox']\n score = det_res['score']\n\n if score < self.image_thre:\n continue\n\n num_boxes = num_boxes + 1\n\n center, scale = self._box2cs(box)\n joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)\n joints_3d_vis = np.ones(\n (self.num_joints, 3), dtype=np.float)\n kpt_db.append({\n 'image': img_name,\n 'center': center,\n 'scale': scale,\n 'score': score,\n 'joints_3d': joints_3d,\n 'joints_3d_vis': joints_3d_vis,\n })\n\n logger.info('=> Total boxes after fliter low score@{}: {}'.format(\n self.image_thre, num_boxes))\n return kpt_db\n\n def evaluate(self, cfg, preds, output_dir, all_boxes, img_path,\n *args, **kwargs):\n rank = cfg.RANK\n\n res_folder = os.path.join(output_dir, 'results')\n if not os.path.exists(res_folder):\n try:\n os.makedirs(res_folder)\n except Exception:\n logger.error('Fail to make {}'.format(res_folder))\n\n res_file = os.path.join(\n res_folder, 'keypoints_{}_results_{}.json'.format(\n self.image_set, rank)\n )\n\n # person x (keypoints)\n _kpts = []\n for idx, kpt in enumerate(preds):\n _kpts.append({\n 'keypoints': kpt,\n 'center': all_boxes[idx][0:2],\n 'scale': all_boxes[idx][2:4],\n 'area': all_boxes[idx][4],\n 'score': all_boxes[idx][5],\n 'image': int(img_path[idx][-16:-4])\n # 'image': int(img_path[idx].split(\"/\")[-1].strip(\".jpg\"))\n })\n # image x person x (keypoints)\n kpts = defaultdict(list)\n for kpt in _kpts:\n kpts[kpt['image']].append(kpt)\n\n # rescoring and oks nms\n num_joints = self.num_joints\n in_vis_thre = self.in_vis_thre\n oks_thre = self.oks_thre\n oks_nmsed_kpts = []\n for img in kpts.keys():\n img_kpts = kpts[img]\n for n_p in img_kpts:\n box_score = n_p['score']\n kpt_score = 0\n valid_num = 0\n for n_jt in range(0, num_joints):\n t_s = n_p['keypoints'][n_jt][2]\n if t_s > in_vis_thre:\n kpt_score = kpt_score + t_s\n valid_num = valid_num + 1\n if valid_num != 0:\n kpt_score = kpt_score / valid_num\n # rescoring\n n_p['score'] = kpt_score * box_score\n\n if self.soft_nms:\n keep = soft_oks_nms(\n [img_kpts[i] for i in range(len(img_kpts))],\n oks_thre\n )\n else:\n keep = oks_nms(\n [img_kpts[i] for i in range(len(img_kpts))],\n oks_thre\n )\n\n if len(keep) == 0:\n oks_nmsed_kpts.append(img_kpts)\n else:\n oks_nmsed_kpts.append([img_kpts[_keep] for _keep in keep])\n\n self._write_coco_keypoint_results(\n oks_nmsed_kpts, res_file)\n if 'test' not in self.image_set:\n info_str = self._do_python_keypoint_eval(\n res_file, res_folder)\n name_value = OrderedDict(info_str)\n return name_value, name_value['AP']\n else:\n # return {'Null': 0}, 0\n info_str = self._do_python_keypoint_eval(\n res_file, res_folder)\n name_value = OrderedDict(info_str)\n return name_value, name_value['AP']\n\n def _write_coco_keypoint_results(self, keypoints, res_file):\n data_pack = [\n {\n 'cat_id': self._class_to_coco_ind[cls],\n 'cls_ind': cls_ind,\n 'cls': cls,\n 'ann_type': 'keypoints',\n 'keypoints': keypoints\n }\n for cls_ind, cls in enumerate(self.classes) if not cls == '__background__'\n ]\n\n results = self._coco_keypoint_results_one_category_kernel(data_pack[0])\n logger.info('=> writing results json to %s' % res_file)\n with open(res_file, 'w') as f:\n json.dump(results, f, sort_keys=True, indent=4)\n try:\n json.load(open(res_file))\n except Exception:\n content = []\n with open(res_file, 'r') as f:\n for line in f:\n content.append(line)\n content[-1] = ']'\n with open(res_file, 'w') as f:\n for c in content:\n f.write(c)\n\n def _coco_keypoint_results_one_category_kernel(self, data_pack):\n cat_id = data_pack['cat_id']\n keypoints = data_pack['keypoints']\n cat_results = []\n\n for img_kpts in keypoints:\n if len(img_kpts) == 0:\n continue\n\n _key_points = np.array([img_kpts[k]['keypoints']\n for k in range(len(img_kpts))])\n key_points = np.zeros(\n (_key_points.shape[0], self.num_joints * 3), dtype=np.float\n )\n\n for ipt in range(self.num_joints):\n key_points[:, ipt * 3 + 0] = _key_points[:, ipt, 0]\n key_points[:, ipt * 3 + 1] = _key_points[:, ipt, 1]\n key_points[:, ipt * 3 + 2] = _key_points[:, ipt, 2] # keypoints score.\n\n result = [\n {\n 'image_id': img_kpts[k]['image'],\n 'category_id': cat_id,\n 'keypoints': list(key_points[k]),\n 'score': img_kpts[k]['score'],\n 'center': list(img_kpts[k]['center']),\n 'scale': list(img_kpts[k]['scale'])\n }\n for k in range(len(img_kpts))\n ]\n cat_results.extend(result)\n\n return cat_results\n\n def _do_python_keypoint_eval(self, res_file, res_folder):\n coco_dt = self.coco.loadRes(res_file)\n coco_eval = COCOeval(self.coco, coco_dt, 'keypoints')\n coco_eval.params.useSegm = None\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n # print(\"coco_eval.stats\", coco_eval.stats)\n stats_names = ['AP', 'Ap .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5', 'AR .75', 'AR (M)', 'AR (L)']\n\n info_str = []\n for ind, name in enumerate(stats_names):\n info_str.append((name, coco_eval.stats[ind]))\n\n return info_str\n" ]
[ [ "numpy.max", "numpy.array", "numpy.ones", "numpy.zeros" ] ]
Karin-S/deeplab_modify
[ "7f704729ebdf3ab78f9ce0ef0ee0bc245ae5455c" ]
[ "modeling/deeplab.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d\nfrom modeling.aspp import build_aspp\nfrom modeling.decoder import build_decoder\nfrom modeling.backbone import build_backbone\n\n\nclass DeepLab(nn.Module):\n def __init__(self, backbone='resnet', output_stride=16, num_classes=21, sync_bn=True, freeze_bn=False):\n\n super(DeepLab, self).__init__()\n if backbone == 'drn':\n output_stride = 8\n\n if sync_bn == True:\n BatchNorm = SynchronizedBatchNorm2d\n else:\n BatchNorm = nn.BatchNorm2d\n self.backbone = build_backbone(backbone, output_stride, BatchNorm)\n self.aspp = build_aspp(backbone, output_stride, BatchNorm)\n self.decoder = build_decoder(num_classes, backbone, BatchNorm)\n\n if freeze_bn:\n self.freeze_bn()\n\n def forward(self, input):\n x, low_level_feat = self.backbone(input)\n x = self.aspp(x)\n x = self.decoder(x, low_level_feat)\n x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)\n\n return x\n\n def freeze_bn(self):\n for m in self.modules():\n if isinstance(m, SynchronizedBatchNorm2d):\n m.eval()\n elif isinstance(m, nn.BatchNorm2d):\n m.eval()\n\n def get_1x_lr_params(self):\n modules = [self.backbone]\n for i in range(len(modules)):\n for m in modules[i].named_modules():\n if isinstance(m[1], nn.Conv2d):\n for p in m[1].parameters():\n if p.requires_grad:\n yield p\n\n def get_10x_lr_params(self):\n modules = [self.aspp, self.decoder]\n for i in range(len(modules)):\n for m in modules[i].named_modules():\n if isinstance(m[1], nn.Conv2d):\n for p in m[1].parameters():\n if p.requires_grad:\n yield p\n\n\nif __name__ == \"__main__\":\n model = DeepLab(backbone='mobilenet', output_stride=16)\n model.eval()\n input = torch.rand(1, 3, 513, 513)\n output = model(input)\n print(output.size())\n\n\n" ]
[ [ "torch.rand" ] ]
joseruiz1989/detectron2
[ "be67214add717fbdd0157521b2e63ea025164284" ]
[ "detectron2/modeling/roi_heads/fast_rcnn.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\nimport logging\nfrom typing import Callable, Dict, List, Optional, Tuple, Union\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom detectron2.config import configurable\nfrom detectron2.data.detection_utils import get_fed_loss_cls_weights\nfrom detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple\nfrom detectron2.modeling.box_regression import Box2BoxTransform, _dense_box_regression_loss\nfrom detectron2.structures import Boxes, Instances\nfrom detectron2.utils.events import get_event_storage\n\n__all__ = [\"fast_rcnn_inference\", \"FastRCNNOutputLayers\"]\n\n\nlogger = logging.getLogger(__name__)\n\n\"\"\"\nShape shorthand in this module:\n\n N: number of images in the minibatch\n R: number of ROIs, combined over all images, in the minibatch\n Ri: number of ROIs in image i\n K: number of foreground classes. E.g.,there are 80 foreground classes in COCO.\n\nNaming convention:\n\n deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box\n transform (see :class:`box_regression.Box2BoxTransform`).\n\n pred_class_logits: predicted class scores in [-inf, +inf]; use\n softmax(pred_class_logits) to estimate P(class).\n\n gt_classes: ground-truth classification labels in [0, K], where [0, K) represent\n foreground object classes and K represents the background class.\n\n pred_proposal_deltas: predicted box2box transform deltas for transforming proposals\n to detection box predictions.\n\n gt_proposal_deltas: ground-truth box2box transform deltas\n\"\"\"\n\n\ndef fast_rcnn_inference(\n boxes: List[torch.Tensor],\n scores: List[torch.Tensor],\n image_shapes: List[Tuple[int, int]],\n score_thresh: float,\n nms_thresh: float,\n topk_per_image: int,\n):\n \"\"\"\n Call `fast_rcnn_inference_single_image` for all images.\n\n Args:\n boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic\n boxes for each image. Element i has shape (Ri, K * 4) if doing\n class-specific regression, or (Ri, 4) if doing class-agnostic\n regression, where Ri is the number of predicted objects for image i.\n This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`.\n scores (list[Tensor]): A list of Tensors of predicted class scores for each image.\n Element i has shape (Ri, K + 1), where Ri is the number of predicted objects\n for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`.\n image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.\n score_thresh (float): Only return detections with a confidence score exceeding this\n threshold.\n nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].\n topk_per_image (int): The number of top scoring detections to return. Set < 0 to return\n all detections.\n\n Returns:\n instances: (list[Instances]): A list of N instances, one for each image in the batch,\n that stores the topk most confidence detections.\n kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates\n the corresponding boxes/scores index in [0, Ri) from the input, for image i.\n \"\"\"\n result_per_image = [\n fast_rcnn_inference_single_image(\n boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image\n )\n for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes)\n ]\n return [x[0] for x in result_per_image], [x[1] for x in result_per_image]\n\n\ndef _log_classification_stats(pred_logits, gt_classes, prefix=\"fast_rcnn\"):\n \"\"\"\n Log the classification metrics to EventStorage.\n\n Args:\n pred_logits: Rx(K+1) logits. The last column is for background class.\n gt_classes: R labels\n \"\"\"\n num_instances = gt_classes.numel()\n if num_instances == 0:\n return\n pred_classes = pred_logits.argmax(dim=1)\n bg_class_ind = pred_logits.shape[1] - 1\n\n fg_inds = (gt_classes >= 0) & (gt_classes < bg_class_ind)\n num_fg = fg_inds.nonzero().numel()\n fg_gt_classes = gt_classes[fg_inds]\n fg_pred_classes = pred_classes[fg_inds]\n\n num_false_negative = (fg_pred_classes == bg_class_ind).nonzero().numel()\n num_accurate = (pred_classes == gt_classes).nonzero().numel()\n fg_num_accurate = (fg_pred_classes == fg_gt_classes).nonzero().numel()\n\n storage = get_event_storage()\n storage.put_scalar(f\"{prefix}/cls_accuracy\", num_accurate / num_instances)\n if num_fg > 0:\n storage.put_scalar(f\"{prefix}/fg_cls_accuracy\", fg_num_accurate / num_fg)\n storage.put_scalar(f\"{prefix}/false_negative\", num_false_negative / num_fg)\n\n\ndef fast_rcnn_inference_single_image(\n boxes,\n scores,\n image_shape: Tuple[int, int],\n score_thresh: float,\n nms_thresh: float,\n topk_per_image: int,\n):\n \"\"\"\n Single-image inference. Return bounding-box detection results by thresholding\n on scores and applying non-maximum suppression (NMS).\n\n Args:\n Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes\n per image.\n\n Returns:\n Same as `fast_rcnn_inference`, but for only one image.\n \"\"\"\n valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)\n if not valid_mask.all():\n boxes = boxes[valid_mask]\n scores = scores[valid_mask]\n\n scores = scores[:, :-1]\n num_bbox_reg_classes = boxes.shape[1] // 4\n # Convert to Boxes to use the `clip` function ...\n boxes = Boxes(boxes.reshape(-1, 4))\n boxes.clip(image_shape)\n boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4\n\n # 1. Filter results based on detection scores. It can make NMS more efficient\n # by filtering out low-confidence detections.\n filter_mask = scores > score_thresh # R x K\n # R' x 2. First column contains indices of the R predictions;\n # Second column contains indices of classes.\n filter_inds = filter_mask.nonzero()\n if num_bbox_reg_classes == 1:\n boxes = boxes[filter_inds[:, 0], 0]\n else:\n boxes = boxes[filter_mask]\n scores = scores[filter_mask]\n\n # 2. Apply NMS for each class independently.\n keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)\n if topk_per_image >= 0:\n keep = keep[:topk_per_image]\n boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]\n\n result = Instances(image_shape)\n result.pred_boxes = Boxes(boxes)\n result.scores = scores\n result.pred_classes = filter_inds[:, 1]\n return result, filter_inds[:, 0]\n\n\nclass FastRCNNOutputLayers(nn.Module):\n \"\"\"\n Two linear layers for predicting Fast R-CNN outputs:\n\n 1. proposal-to-detection box regression deltas\n 2. classification scores\n \"\"\"\n\n @configurable\n def __init__(\n self,\n input_shape: ShapeSpec,\n *,\n box2box_transform,\n num_classes: int,\n test_score_thresh: float = 0.0,\n test_nms_thresh: float = 0.5,\n test_topk_per_image: int = 100,\n cls_agnostic_bbox_reg: bool = False,\n smooth_l1_beta: float = 0.0,\n box_reg_loss_type: str = \"smooth_l1\",\n loss_weight: Union[float, Dict[str, float]] = 1.0,\n use_fed_loss: bool = False,\n use_sigmoid_ce: bool = False,\n get_fed_loss_cls_weights: Optional[Callable] = None,\n fed_loss_num_classes: int = 50,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n input_shape (ShapeSpec): shape of the input feature to this module\n box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):\n num_classes (int): number of foreground classes\n test_score_thresh (float): threshold to filter predictions results.\n test_nms_thresh (float): NMS threshold for prediction results.\n test_topk_per_image (int): number of top predictions to produce per image.\n cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression\n smooth_l1_beta (float): transition point from L1 to L2 loss. Only used if\n `box_reg_loss_type` is \"smooth_l1\"\n box_reg_loss_type (str): Box regression loss type. One of: \"smooth_l1\", \"giou\",\n \"diou\", \"ciou\"\n loss_weight (float|dict): weights to use for losses. Can be single float for weighting\n all losses, or a dict of individual weightings. Valid dict keys are:\n * \"loss_cls\": applied to classification loss\n * \"loss_box_reg\": applied to box regression loss\n use_fed_loss (bool): whether to use federated loss which samples additional negative\n classes to calculate the loss\n use_sigmoid_ce (bool): whether to calculate the loss using weighted average of binary\n cross entropy with logits. This could be used together with federated loss\n get_fed_loss_cls_weights (Callable): a callable which takes dataset name and frequency\n weight power, and returns the probabilities to sample negative classes for\n federated loss. The implementation can be found in\n detectron2/data/detection_utils.py\n fed_loss_num_classes (int): number of federated classes to keep in total\n \"\"\"\n super().__init__()\n if isinstance(input_shape, int): # some backward compatibility\n input_shape = ShapeSpec(channels=input_shape)\n self.num_classes = num_classes\n input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1)\n # prediction layer for num_classes foreground classes and one background class (hence + 1)\n self.cls_score = nn.Linear(input_size, num_classes + 1)\n num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes\n box_dim = len(box2box_transform.weights)\n self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)\n\n nn.init.normal_(self.cls_score.weight, std=0.01)\n nn.init.normal_(self.bbox_pred.weight, std=0.001)\n for l in [self.cls_score, self.bbox_pred]:\n nn.init.constant_(l.bias, 0)\n\n self.box2box_transform = box2box_transform\n self.smooth_l1_beta = smooth_l1_beta\n self.test_score_thresh = test_score_thresh\n self.test_nms_thresh = test_nms_thresh\n self.test_topk_per_image = test_topk_per_image\n self.box_reg_loss_type = box_reg_loss_type\n if isinstance(loss_weight, float):\n loss_weight = {\"loss_cls\": loss_weight, \"loss_box_reg\": loss_weight}\n self.loss_weight = loss_weight\n self.use_fed_loss = use_fed_loss\n self.use_sigmoid_ce = use_sigmoid_ce\n self.fed_loss_num_classes = fed_loss_num_classes\n\n if self.use_fed_loss:\n assert self.use_sigmoid_ce, \"Please use sigmoid cross entropy loss with federated loss\"\n fed_loss_cls_weights = get_fed_loss_cls_weights()\n assert (\n len(fed_loss_cls_weights) == self.num_classes\n ), \"Please check the provided fed_loss_cls_weights. Their size should match num_classes\"\n self.register_buffer(\"fed_loss_cls_weights\", fed_loss_cls_weights)\n\n @classmethod\n def from_config(cls, cfg, input_shape):\n return {\n \"input_shape\": input_shape,\n \"box2box_transform\": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS),\n # fmt: off\n \"num_classes\" : cfg.MODEL.ROI_HEADS.NUM_CLASSES,\n \"cls_agnostic_bbox_reg\" : cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG,\n \"smooth_l1_beta\" : cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA,\n \"test_score_thresh\" : cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST,\n \"test_nms_thresh\" : cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,\n \"test_topk_per_image\" : cfg.TEST.DETECTIONS_PER_IMAGE,\n \"box_reg_loss_type\" : cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE,\n \"loss_weight\" : {\"loss_box_reg\": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT}, # noqa\n \"use_fed_loss\" : cfg.MODEL.ROI_BOX_HEAD.USE_FED_LOSS,\n \"use_sigmoid_ce\" : cfg.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE,\n \"get_fed_loss_cls_weights\" : lambda: get_fed_loss_cls_weights(dataset_names=cfg.DATASETS.TRAIN, freq_weight_power=cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT_POWER), # noqa\n \"fed_loss_num_classes\" : cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CLASSES,\n # fmt: on\n }\n\n def forward(self, x):\n \"\"\"\n Args:\n x: per-region features of shape (N, ...) for N bounding boxes to predict.\n\n Returns:\n (Tensor, Tensor):\n First tensor: shape (N,K+1), scores for each of the N box. Each row contains the\n scores for K object categories and 1 background class.\n\n Second tensor: bounding box regression deltas for each box. Shape is shape (N,Kx4),\n or (N,4) for class-agnostic regression.\n \"\"\"\n if x.dim() > 2:\n x = torch.flatten(x, start_dim=1)\n scores = self.cls_score(x)\n proposal_deltas = self.bbox_pred(x)\n return scores, proposal_deltas\n\n def losses(self, predictions, proposals):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were used\n to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``,\n ``gt_classes`` are expected.\n\n Returns:\n Dict[str, Tensor]: dict of losses\n \"\"\"\n scores, proposal_deltas = predictions\n\n # parse classification outputs\n gt_classes = (\n cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)\n )\n _log_classification_stats(scores, gt_classes)\n\n # parse box regression outputs\n if len(proposals):\n proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4\n assert not proposal_boxes.requires_grad, \"Proposals should not require gradients!\"\n # If \"gt_boxes\" does not exist, the proposals must be all negative and\n # should not be included in regression loss computation.\n # Here we just use proposal_boxes as an arbitrary placeholder because its\n # value won't be used in self.box_reg_loss().\n gt_boxes = cat(\n [(p.gt_boxes if p.has(\"gt_boxes\") else p.proposal_boxes).tensor for p in proposals],\n dim=0,\n )\n else:\n proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)\n\n if self.use_sigmoid_ce:\n loss_cls = self.sigmoid_cross_entropy_loss(scores, gt_classes)\n else:\n loss_cls = cross_entropy(scores, gt_classes, reduction=\"mean\")\n\n losses = {\n \"loss_cls\": loss_cls,\n \"loss_box_reg\": self.box_reg_loss(\n proposal_boxes, gt_boxes, proposal_deltas, gt_classes\n ),\n }\n return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}\n\n # Implementation from https://github.com/xingyizhou/CenterNet2/blob/master/projects/CenterNet2/centernet/modeling/roi_heads/fed_loss.py # noqa\n # with slight modifications\n def get_fed_loss_classes(self, gt_classes, num_fed_loss_classes, num_classes, weight):\n \"\"\"\n Args:\n gt_classes: a long tensor of shape R that contains the gt class label of each proposal.\n num_fed_loss_classes: minimum number of classes to keep when calculating federated loss.\n Will sample negative classes if number of unique gt_classes is smaller than this value.\n num_classes: number of foreground classes\n weight: probabilities used to sample negative classes\n\n Returns:\n Tensor:\n classes to keep when calculating the federated loss, including both unique gt\n classes and sampled negative classes.\n \"\"\"\n unique_gt_classes = torch.unique(gt_classes)\n prob = unique_gt_classes.new_ones(num_classes + 1).float()\n prob[-1] = 0\n if len(unique_gt_classes) < num_fed_loss_classes:\n prob[:num_classes] = weight.float().clone()\n prob[unique_gt_classes] = 0\n sampled_negative_classes = torch.multinomial(\n prob, num_fed_loss_classes - len(unique_gt_classes), replacement=False\n )\n fed_loss_classes = torch.cat([unique_gt_classes, sampled_negative_classes])\n else:\n fed_loss_classes = unique_gt_classes\n return fed_loss_classes\n\n # Implementation from https://github.com/xingyizhou/CenterNet2/blob/master/projects/CenterNet2/centernet/modeling/roi_heads/custom_fast_rcnn.py#L113 # noqa\n # with slight modifications\n def sigmoid_cross_entropy_loss(self, pred_class_logits, gt_classes):\n \"\"\"\n Args:\n pred_class_logits: shape (N, K+1), scores for each of the N box. Each row contains the\n scores for K object categories and 1 background class\n gt_classes: a long tensor of shape R that contains the gt class label of each proposal.\n \"\"\"\n if pred_class_logits.numel() == 0:\n return pred_class_logits.new_zeros([1])[0]\n\n N = pred_class_logits.shape[0]\n K = pred_class_logits.shape[1] - 1\n\n target = pred_class_logits.new_zeros(N, K + 1)\n target[range(len(gt_classes)), gt_classes] = 1\n target = target[:, :K]\n\n cls_loss = F.binary_cross_entropy_with_logits(\n pred_class_logits[:, :-1], target, reduction=\"none\"\n )\n\n if self.use_fed_loss:\n fed_loss_classes = self.get_fed_loss_classes(\n gt_classes,\n num_fed_loss_classes=self.fed_loss_num_classes,\n num_classes=K,\n weight=self.fed_loss_cls_weights,\n )\n fed_loss_classes_mask = fed_loss_classes.new_zeros(K + 1)\n fed_loss_classes_mask[fed_loss_classes] = 1\n fed_loss_classes_mask = fed_loss_classes_mask[:K]\n weight = fed_loss_classes_mask.view(1, K).expand(N, K).float()\n else:\n weight = 1\n\n loss = torch.sum(cls_loss * weight) / N\n return loss\n\n def box_reg_loss(self, proposal_boxes, gt_boxes, pred_deltas, gt_classes):\n \"\"\"\n Args:\n proposal_boxes/gt_boxes are tensors with the same shape (R, 4 or 5).\n pred_deltas has shape (R, 4 or 5), or (R, num_classes * (4 or 5)).\n gt_classes is a long tensor of shape R, the gt class label of each proposal.\n R shall be the number of proposals.\n \"\"\"\n box_dim = proposal_boxes.shape[1] # 4 or 5\n # Regression loss is only computed for foreground proposals (those matched to a GT)\n fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < self.num_classes))[0]\n if pred_deltas.shape[1] == box_dim: # cls-agnostic regression\n fg_pred_deltas = pred_deltas[fg_inds]\n else:\n fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[\n fg_inds, gt_classes[fg_inds]\n ]\n\n loss_box_reg = _dense_box_regression_loss(\n [proposal_boxes[fg_inds]],\n self.box2box_transform,\n [fg_pred_deltas.unsqueeze(0)],\n [gt_boxes[fg_inds]],\n ...,\n self.box_reg_loss_type,\n self.smooth_l1_beta,\n )\n\n # The reg loss is normalized using the total number of regions (R), not the number\n # of foreground regions even though the box regression loss is only defined on\n # foreground regions. Why? Because doing so gives equal training influence to\n # each foreground example. To see how, consider two different minibatches:\n # (1) Contains a single foreground region\n # (2) Contains 100 foreground regions\n # If we normalize by the number of foreground regions, the single example in\n # minibatch (1) will be given 100 times as much influence as each foreground\n # example in minibatch (2). Normalizing by the total number of regions, R,\n # means that the single example in minibatch (1) and each of the 100 examples\n # in minibatch (2) are given equal influence.\n return loss_box_reg / max(gt_classes.numel(), 1.0) # return 0 if empty\n\n def inference(self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were\n used to compute predictions. The ``proposal_boxes`` field is expected.\n\n Returns:\n list[Instances]: same as `fast_rcnn_inference`.\n list[Tensor]: same as `fast_rcnn_inference`.\n \"\"\"\n boxes = self.predict_boxes(predictions, proposals)\n scores = self.predict_probs(predictions, proposals)\n image_shapes = [x.image_size for x in proposals]\n return fast_rcnn_inference(\n boxes,\n scores,\n image_shapes,\n self.test_score_thresh,\n self.test_nms_thresh,\n self.test_topk_per_image,\n )\n\n def predict_boxes_for_gt_classes(self, predictions, proposals):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were used\n to compute predictions. The fields ``proposal_boxes``, ``gt_classes`` are expected.\n\n Returns:\n list[Tensor]:\n A list of Tensors of predicted boxes for GT classes in case of\n class-specific box head. Element i of the list has shape (Ri, B), where Ri is\n the number of proposals for image i and B is the box dimension (4 or 5)\n \"\"\"\n if not len(proposals):\n return []\n scores, proposal_deltas = predictions\n proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)\n N, B = proposal_boxes.shape\n predict_boxes = self.box2box_transform.apply_deltas(\n proposal_deltas, proposal_boxes\n ) # Nx(KxB)\n\n K = predict_boxes.shape[1] // B\n if K > 1:\n gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0)\n # Some proposals are ignored or have a background class. Their gt_classes\n # cannot be used as index.\n gt_classes = gt_classes.clamp_(0, K - 1)\n\n predict_boxes = predict_boxes.view(N, K, B)[\n torch.arange(N, dtype=torch.long, device=predict_boxes.device), gt_classes\n ]\n num_prop_per_image = [len(p) for p in proposals]\n return predict_boxes.split(num_prop_per_image)\n\n def predict_boxes(\n self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]\n ):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were\n used to compute predictions. The ``proposal_boxes`` field is expected.\n\n Returns:\n list[Tensor]:\n A list of Tensors of predicted class-specific or class-agnostic boxes\n for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is\n the number of proposals for image i and B is the box dimension (4 or 5)\n \"\"\"\n if not len(proposals):\n return []\n _, proposal_deltas = predictions\n num_prop_per_image = [len(p) for p in proposals]\n proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)\n predict_boxes = self.box2box_transform.apply_deltas(\n proposal_deltas,\n proposal_boxes,\n ) # Nx(KxB)\n return predict_boxes.split(num_prop_per_image)\n\n def predict_probs(\n self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]\n ):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were\n used to compute predictions.\n\n Returns:\n list[Tensor]:\n A list of Tensors of predicted class probabilities for each image.\n Element i has shape (Ri, K + 1), where Ri is the number of proposals for image i.\n \"\"\"\n scores, _ = predictions\n num_inst_per_image = [len(p) for p in proposals]\n if self.use_sigmoid_ce:\n probs = scores.sigmoid()\n else:\n probs = F.softmax(scores, dim=-1)\n return probs.split(num_inst_per_image, dim=0)\n" ]
[ [ "torch.nn.Linear", "torch.nn.functional.binary_cross_entropy_with_logits", "torch.cat", "torch.unique", "torch.arange", "torch.nn.init.constant_", "torch.isfinite", "torch.nn.init.normal_", "torch.nn.functional.softmax", "torch.flatten", "torch.empty", "torch.sum" ] ]
whitemike889/tensorflow
[ "9aaf74d733a38cf587a75f2ffaa05d8a51d8c32b" ]
[ "tensorflow/python/keras/optimizer_v2/optimizer_v2.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Version 2 of class Optimizer.\"\"\"\n# pylint: disable=g-bad-name\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport functools\n\nimport six\n\nfrom tensorflow.python.distribute import distribution_strategy_context as distribute_ctx\nfrom tensorflow.python.distribute import reduce_util as ds_reduce_util\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras.engine import base_layer_utils\nfrom tensorflow.python.keras.optimizer_v2 import learning_rate_schedule\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import gradients\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.saved_model import revived_types\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.tf_export import keras_export\n\n\ndef _deduplicate_indexed_slices(values, indices):\n \"\"\"Sums `values` associated with any non-unique `indices`.\n\n Args:\n values: A `Tensor` with rank >= 1.\n indices: A one-dimensional integer `Tensor`, indexing into the first\n dimension of `values` (as in an IndexedSlices object).\n\n Returns:\n A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a\n de-duplicated version of `indices` and `summed_values` contains the sum of\n `values` slices associated with each unique index.\n \"\"\"\n unique_indices, new_index_positions = array_ops.unique(indices)\n summed_values = math_ops.unsorted_segment_sum(\n values, new_index_positions,\n array_ops.shape(unique_indices)[0])\n return (summed_values, unique_indices)\n\n\[email protected]_metaclass(abc.ABCMeta)\n@keras_export(\"keras.optimizers.Optimizer\")\nclass OptimizerV2(trackable.Trackable):\n \"\"\"Updated base class for optimizers.\n\n This class defines the API to add Ops to train a model. You never use this\n class directly, but instead instantiate one of its subclasses such as\n `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`.\n\n ### Usage\n\n ```python\n # Create an optimizer with the desired parameters.\n opt = tf.keras.optimizers.SGD(learning_rate=0.1)\n # `loss` is a callable that takes no argument and returns the value\n # to minimize.\n loss = lambda: 3 * var1 * var1 + 2 * var2 * var2\n # In graph mode, returns op that minimizes the loss by updating the listed\n # variables.\n opt_op = opt.minimize(loss, var_list=[var1, var2])\n opt_op.run()\n # In eager mode, simply call minimize to update the list of variables.\n opt.minimize(loss, var_list=[var1, var2])\n ```\n\n ### Custom training loop with Keras models\n\n In Keras models, sometimes variables are created when the model is first\n called, instead of construction time. Examples include 1) sequential models\n without input shape pre-defined, or 2) subclassed models. Pass var_list as\n callable in these cases.\n\n Example:\n ```python\n opt = tf.keras.optimizers.SGD(learning_rate=0.1)\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Dense(num_hidden, activation='relu'))\n model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid'))\n loss_fn = lambda: tf.keras.losses.mse(model(input), output)\n var_list_fn = lambda: model.trainable_weights\n for input, output in data:\n opt.minimize(loss_fn, var_list_fn)\n ```\n\n ### Processing gradients before applying them.\n\n Calling `minimize()` takes care of both computing the gradients and\n applying them to the variables. If you want to process the gradients\n before applying them you can instead use the optimizer in three steps:\n\n 1. Compute the gradients with `tf.GradientTape`.\n 2. Process the gradients as you wish.\n 3. Apply the processed gradients with `apply_gradients()`.\n\n Example:\n\n ```python\n # Create an optimizer.\n opt = tf.keras.optimizers.SGD(learning_rate=0.1)\n\n # Compute the gradients for a list of variables.\n with tf.GradientTape() as tape:\n loss = <call_loss_function>\n vars = <list_of_variables>\n grads = tape.gradient(loss, vars)\n processed_grads = [process_gradient(g) for g in grads]\n grads_and_vars = zip(processed_grads, var_list)\n\n # grads_and_vars is a list of tuples (gradient, variable). Do whatever you\n # need to the 'gradient' part, for example cap them, etc.\n capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]\n\n # Ask the optimizer to apply the capped gradients.\n opt.apply_gradients(capped_grads_and_vars)\n ```\n\n ### Use with `tf.distribute.Strategy`.\n\n This optimizer class is `tf.distribute.Strategy` aware, which means it\n automatically sums gradients across all replicas. To average gradients,\n you divide your loss by the global batch size, which is done\n automatically if you use `tf.keras` built-in training or evaluation loops.\n See the `reduction` argument of your loss which should be set to\n `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or\n `tf.keras.losses.Reduction.SUM` for not.\n\n If you are not using these and you want to average gradients, you should use\n `tf.math.reduce_sum` to add up your per-example losses and then divide by the\n global batch size. Note that when using `tf.distribute.Strategy`, the first\n component of a tensor's shape is the *replica-local* batch size, which is off\n by a factor equal to the number of replicas being used to compute a single\n step. As a result, using `tf.math.reduce_mean` will give the wrong answer,\n resulting in gradients that can be many times too big.\n\n ### Variable Constraint\n\n All Keras optimizers respect variable constraints. If constraint function is\n passed to any variable, the constraint will be applied to the variable after\n the gradient has been applied to the variable.\n Important: If gradient is sparse tensor, variable constraint is not supported.\n\n ### Thread Compatibility\n\n The entire optimizer is currently thread compatible, not thread-safe. The user\n needs to perform synchronization if necessary.\n\n ### Slots\n\n Many optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage\n additional variables associated with the variables to train. These are called\n <i>Slots</i>. Slots have names and you can ask the optimizer for the names of\n the slots that it uses. Once you have a slot name you can ask the optimizer\n for the variable it created to hold the slot value.\n\n This can be useful if you want to log debug a training algorithm, report stats\n about the slots, etc.\n\n ### Hyper parameters\n\n These are arguments passed to the optimizer subclass constructor\n (the `__init__` method), and then passed to `self._set_hyper()`.\n They can be either regular Python values (like 1.0), tensors, or\n callables. If they are callable, the callable will be called during\n `apply_gradients()` to get the value for the hyper parameter.\n\n Hyper parameters can be overwritten through user code:\n\n Example:\n\n ```python\n # Create an optimizer with the desired parameters.\n opt = tf.keras.optimizers.SGD(learning_rate=0.1)\n # `loss` is a callable that takes no argument and returns the value\n # to minimize.\n loss = lambda: 3 * var1 + 2 * var2\n # In eager mode, simply call minimize to update the list of variables.\n opt.minimize(loss, var_list=[var1, var2])\n # update learning rate\n opt.learning_rate = 0.05\n opt.minimize(loss, var_list=[var1, var2])\n ```\n\n ### Write a customized optimizer.\n If you intend to create your own optimization algorithm, simply inherit from\n this class and override the following methods:\n\n - resource_apply_dense (update variable given gradient tensor is dense)\n - resource_apply_sparse (update variable given gradient tensor is sparse)\n - create_slots (if your optimizer algorithm requires additional variables)\n - get_config (serialization of the optimizer, include all hyper parameters)\n \"\"\"\n\n def __init__(self, name, **kwargs):\n \"\"\"Create a new Optimizer.\n\n This must be called by the constructors of subclasses.\n Note that Optimizer instances should not bind to a single graph,\n and so shouldn't keep Tensors as member variables. Generally\n you should be able to use the _set_hyper()/state.get_hyper()\n facility instead.\n\n This class in stateful and thread-compatible.\n\n Args:\n name: A non-empty string. The name to use for accumulators created\n for the optimizer.\n **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,\n `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip\n gradients by value, `decay` is included for backward compatibility to\n allow time inverse decay of learning rate. `lr` is included for backward\n compatibility, recommended to use `learning_rate` instead.\n\n Raises:\n ValueError: If name is malformed.\n RuntimeError: If _create_slots has been overridden instead of\n _create_vars.\n \"\"\"\n allowed_kwargs = {\"clipnorm\", \"clipvalue\", \"lr\", \"decay\"}\n for k in kwargs:\n if k not in allowed_kwargs:\n raise TypeError(\"Unexpected keyword argument \"\n \"passed to optimizer: \" + str(k))\n # checks that all keyword arguments are non-negative.\n if kwargs[k] < 0:\n raise ValueError(\"Expected {} >= 0, received: {}\".format(k, kwargs[k]))\n\n self._use_locking = True\n self._init_set_name(name)\n self._hyper = {}\n # dict: {variable name : {slot name : variable}}\n self._slots = {}\n self._slot_names = []\n self._weights = []\n self._iterations = None\n\n # For implementing Trackable. Stores information about how to restore\n # slot variables which have not yet been created\n # (trackable._CheckpointPosition objects).\n # {slot_name :\n # {_var_key(variable_to_train): [checkpoint_position, ... ], ... },\n # ... }\n self._deferred_slot_restorations = {}\n\n decay = kwargs.pop(\"decay\", 0.0)\n if decay < 0.:\n raise ValueError(\"decay cannot be less than 0: {}\".format(decay))\n self._initial_decay = decay\n if \"clipnorm\" in kwargs:\n self.clipnorm = kwargs.pop(\"clipnorm\")\n if \"clipvalue\" in kwargs:\n self.clipvalue = kwargs.pop(\"clipvalue\")\n\n self._hypers_created = False\n\n def minimize(self, loss, var_list, grad_loss=None, name=None):\n \"\"\"Minimize `loss` by updating `var_list`.\n\n This method simply computes gradient using `tf.GradientTape` and calls\n `apply_gradients()`. If you want to process the gradient before applying\n then call `tf.GradientTape` and `apply_gradients()` explicitly instead\n of using this function.\n\n Args:\n loss: A callable taking no arguments which returns the value to minimize.\n var_list: list or tuple of `Variable` objects to update to minimize\n `loss`, or a callable returning the list or tuple of `Variable` objects.\n Use callable when the variable list would otherwise be incomplete before\n `minimize` since the variables are created at the first time `loss` is\n called.\n grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.\n name: Optional name for the returned operation.\n\n Returns:\n An Operation that updates the variables in `var_list`. If `global_step`\n was not `None`, that operation also increments `global_step`.\n\n Raises:\n ValueError: If some of the variables are not `Variable` objects.\n\n \"\"\"\n grads_and_vars = self._compute_gradients(\n loss, var_list=var_list, grad_loss=grad_loss)\n\n return self.apply_gradients(grads_and_vars, name=name)\n\n def _compute_gradients(self, loss, var_list, grad_loss=None):\n \"\"\"Compute gradients of `loss` for the variables in `var_list`.\n\n This is the first part of `minimize()`. It returns a list\n of (gradient, variable) pairs where \"gradient\" is the gradient\n for \"variable\". Note that \"gradient\" can be a `Tensor`, an\n `IndexedSlices`, or `None` if there is no gradient for the\n given variable.\n\n Args:\n loss: A callable taking no arguments which returns the value to minimize.\n var_list: list or tuple of `Variable` objects to update to minimize\n `loss`, or a callable returning the list or tuple of `Variable` objects.\n Use callable when the variable list would otherwise be incomplete before\n `minimize` and the variables are created at the first time when `loss`\n is called.\n grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.\n\n Returns:\n A list of (gradient, variable) pairs. Variable is always present, but\n gradient can be `None`.\n\n Raises:\n TypeError: If `var_list` contains anything else than `Variable` objects.\n ValueError: If some arguments are invalid, or var_list is None.\n \"\"\"\n # TODO(josh11b): Test that we handle weight decay in a reasonable way.\n with backprop.GradientTape() as tape:\n if not callable(var_list):\n tape.watch(var_list)\n loss_value = loss()\n if callable(var_list):\n var_list = var_list()\n var_list = nest.flatten(var_list)\n with backend.name_scope(self._name + \"/gradients\"):\n grads = tape.gradient(loss_value, var_list, grad_loss)\n\n if hasattr(self, \"clipnorm\"):\n grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]\n if hasattr(self, \"clipvalue\"):\n grads = [\n clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)\n for g in grads\n ]\n\n grads_and_vars = list(zip(grads, var_list))\n self._assert_valid_dtypes([\n v for g, v in grads_and_vars\n if g is not None and v.dtype != dtypes.resource\n ])\n\n return grads_and_vars\n\n def get_gradients(self, loss, params):\n \"\"\"Returns gradients of `loss` with respect to `params`.\n\n Arguments:\n loss: Loss tensor.\n params: List of variables.\n\n Returns:\n List of gradient tensors.\n\n Raises:\n ValueError: In case any gradient cannot be computed (e.g. if gradient\n function not implemented).\n \"\"\"\n params = nest.flatten(params)\n with backend.get_graph().as_default(), backend.name_scope(self._name +\n \"/gradients\"):\n grads = gradients.gradients(loss, params)\n for grad, param in zip(grads, params):\n if grad is None:\n raise ValueError(\"Variable {} has `None` for gradient. \"\n \"Please make sure that all of your ops have a \"\n \"gradient defined (i.e. are differentiable). \"\n \"Common ops without gradient: \"\n \"K.argmax, K.round, K.eval.\".format(param))\n if hasattr(self, \"clipnorm\"):\n grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]\n if hasattr(self, \"clipvalue\"):\n grads = [\n clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)\n for g in grads\n ]\n return grads\n\n def apply_gradients(self, grads_and_vars, name=None):\n \"\"\"Apply gradients to variables.\n\n This is the second part of `minimize()`. It returns an `Operation` that\n applies gradients.\n\n Args:\n grads_and_vars: List of (gradient, variable) pairs.\n name: Optional name for the returned operation. Default to the name\n passed to the `Optimizer` constructor.\n\n Returns:\n An `Operation` that applies the specified gradients. If `global_step`\n was not None, that operation also increments `global_step`.\n\n Raises:\n TypeError: If `grads_and_vars` is malformed.\n ValueError: If none of the variables have gradients.\n \"\"\"\n grads_and_vars = _filter_grads(grads_and_vars)\n var_list = [v for (_, v) in grads_and_vars]\n\n with backend.name_scope(self._name):\n # Create iteration if necessary.\n with ops.init_scope():\n _ = self.iterations\n self._create_hypers()\n self._create_slots(var_list)\n\n apply_state = self._prepare(var_list)\n return distribute_ctx.get_replica_context().merge_call(\n functools.partial(self._distributed_apply, apply_state=apply_state),\n args=(grads_and_vars,),\n kwargs={\"name\": name})\n\n def _distributed_apply(self, distribution, grads_and_vars, name, apply_state):\n \"\"\"`apply_gradients` using a `DistributionStrategy`.\"\"\"\n reduced_grads = distribution.extended.batch_reduce_to(\n ds_reduce_util.ReduceOp.SUM, grads_and_vars)\n var_list = [v for _, v in grads_and_vars]\n grads_and_vars = zip(reduced_grads, var_list)\n\n def apply_grad_to_update_var(var, grad):\n \"\"\"Apply gradient to variable.\"\"\"\n if isinstance(var, ops.Tensor):\n raise NotImplementedError(\"Trying to update a Tensor \", var)\n\n apply_kwargs = {}\n if isinstance(grad, ops.IndexedSlices):\n if var.constraint is not None:\n raise RuntimeError(\n \"Cannot use a constraint function on a sparse variable.\")\n if \"apply_state\" in self._sparse_apply_args:\n apply_kwargs[\"apply_state\"] = apply_state\n return self._resource_apply_sparse_duplicate_indices(\n grad.values, var, grad.indices, **apply_kwargs)\n\n if \"apply_state\" in self._dense_apply_args:\n apply_kwargs[\"apply_state\"] = apply_state\n update_op = self._resource_apply_dense(grad, var, **apply_kwargs)\n if var.constraint is not None:\n with ops.control_dependencies([update_op]):\n return var.assign(var.constraint(var))\n else:\n return update_op\n\n update_ops = []\n with backend.name_scope(name or self._name):\n for grad, var in grads_and_vars:\n scope_name = (\"\" if ops.executing_eagerly_outside_functions() else\n \"_\" + var.op.name)\n with backend.name_scope(\"update\" + scope_name):\n update_ops.extend(\n distribution.extended.update(\n var, apply_grad_to_update_var, args=(grad,), group=False))\n\n any_symbolic = any(isinstance(i, ops.Operation) or\n tf_utils.is_symbolic_tensor(i) for i in update_ops)\n if not context.executing_eagerly() or any_symbolic:\n # If the current context is graph mode or any of the update ops are\n # symbolic then the step update should be carried out under a graph\n # context. (eager updates execute immediately)\n with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access\n with ops.control_dependencies(update_ops):\n return self._iterations.assign_add(1).op\n\n return self._iterations.assign_add(1)\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n grads_and_vars = list(zip(grads, params))\n self._assert_valid_dtypes([\n v for g, v in grads_and_vars\n if g is not None and v.dtype != dtypes.resource\n ])\n return [self.apply_gradients(grads_and_vars)]\n\n def _set_hyper(self, name, value):\n \"\"\"set hyper `name` to value. value can be callable, tensor, numeric.\"\"\"\n if isinstance(value, trackable.Trackable):\n self._track_trackable(value, name, overwrite=True)\n if name not in self._hyper:\n self._hyper[name] = value\n else:\n prev_value = self._hyper[name]\n if (callable(prev_value)\n or isinstance(prev_value,\n (ops.Tensor, int, float,\n learning_rate_schedule.LearningRateSchedule))\n or isinstance(value, learning_rate_schedule.LearningRateSchedule)):\n self._hyper[name] = value\n else:\n backend.set_value(self._hyper[name], value)\n\n def _get_hyper(self, name, dtype=None):\n if not self._hypers_created:\n self._create_hypers()\n value = self._hyper[name]\n if isinstance(value, learning_rate_schedule.LearningRateSchedule):\n return value\n if callable(value):\n value = value()\n if dtype:\n return math_ops.cast(value, dtype)\n else:\n return value\n\n def __getattribute__(self, name):\n \"\"\"Overridden to support hyperparameter access.\"\"\"\n try:\n return super(OptimizerV2, self).__getattribute__(name)\n except AttributeError as e:\n # Needed to avoid infinite recursion with __setattr__.\n if name == \"_hyper\":\n raise e\n # Backwards compatibility with Keras optimizers.\n if name == \"lr\":\n name = \"learning_rate\"\n if name in self._hyper:\n return self._get_hyper(name)\n raise e\n\n def __setattr__(self, name, value):\n \"\"\"Override setattr to support dynamic hyperparameter setting.\"\"\"\n # Backwards compatibility with Keras optimizers.\n if name == \"lr\":\n name = \"learning_rate\"\n if hasattr(self, \"_hyper\") and name in self._hyper:\n self._set_hyper(name, value)\n else:\n super(OptimizerV2, self).__setattr__(name, value)\n\n def get_slot_names(self):\n \"\"\"A list of names for this optimizer's slots.\"\"\"\n return self._slot_names\n\n def add_slot(self, var, slot_name, initializer=\"zeros\"):\n \"\"\"Add a new slot variable for `var`.\"\"\"\n if slot_name not in self._slot_names:\n self._slot_names.append(slot_name)\n var_key = _var_key(var)\n slot_dict = self._slots.setdefault(var_key, {})\n weight = slot_dict.get(slot_name, None)\n if weight is None:\n if isinstance(initializer, six.string_types) or callable(initializer):\n initializer = initializers.get(initializer)\n initial_value = functools.partial(\n initializer, shape=var.shape, dtype=var.dtype)\n else:\n initial_value = initializer\n strategy = distribute_ctx.get_strategy()\n with strategy.extended.colocate_vars_with(var):\n weight = tf_variables.Variable(\n name=\"%s/%s\" % (var._shared_name, slot_name), # pylint: disable=protected-access\n dtype=var.dtype,\n trainable=False,\n initial_value=initial_value)\n backend.track_variable(weight)\n slot_dict[slot_name] = weight\n self._restore_slot_variable(\n slot_name=slot_name, variable=var,\n slot_variable=weight)\n self._weights.append(weight)\n return weight\n\n def get_slot(self, var, slot_name):\n var_key = _var_key(var)\n slot_dict = self._slots[var_key]\n return slot_dict[slot_name]\n\n def _prepare(self, var_list):\n keys = set()\n for var in var_list:\n var_devices = (getattr(var, \"devices\", None) or # Distributed\n [var.device]) # Regular\n var_dtype = var.dtype.base_dtype\n for var_device in var_devices:\n keys.add((var_device, var_dtype))\n\n apply_state = {}\n for var_device, var_dtype in keys:\n apply_state[(var_device, var_dtype)] = {}\n with ops.device(var_device):\n self._prepare_local(var_device, var_dtype, apply_state)\n\n return apply_state\n\n def _prepare_local(self, var_device, var_dtype, apply_state):\n if \"learning_rate\" in self._hyper:\n lr_t = array_ops.identity(self._decayed_lr(var_dtype))\n apply_state[(var_device, var_dtype)][\"lr_t\"] = lr_t\n\n def _fallback_apply_state(self, var_device, var_dtype):\n \"\"\"Compatibility for subclasses that don't pass apply_state through.\"\"\"\n apply_state = {(var_device, var_dtype): {}}\n self._prepare_local(var_device, var_dtype, apply_state)\n return apply_state[(var_device, var_dtype)]\n\n def _create_hypers(self):\n if self._hypers_created:\n return\n # Iterate hyper values deterministically.\n for name, value in sorted(self._hyper.items()):\n if isinstance(value, ops.Tensor) or callable(value):\n continue\n else:\n self._hyper[name] = self.add_weight(\n name,\n shape=[],\n trainable=False,\n initializer=value,\n aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)\n self._hypers_created = True\n\n @property\n def iterations(self):\n \"\"\"Variable. The number of training steps this Optimizer has run.\"\"\"\n if self._iterations is None:\n self._iterations = self.add_weight(\n \"iter\",\n shape=[],\n dtype=dtypes.int64,\n trainable=False,\n aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)\n self._weights.append(self._iterations)\n return self._iterations\n\n @iterations.setter\n def iterations(self, variable):\n if self._iterations is not None:\n raise RuntimeError(\"Cannot set `iterations` to a new Variable after \"\n \"the Optimizer weights have been created\")\n self._iterations = variable\n self._weights.append(self._iterations)\n\n def _decayed_lr(self, var_dtype):\n \"\"\"Get decayed learning rate as a Tensor with dtype=var_dtype.\"\"\"\n lr_t = self._get_hyper(\"learning_rate\", var_dtype)\n if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule):\n local_step = math_ops.cast(self.iterations, var_dtype)\n lr_t = math_ops.cast(lr_t(local_step), var_dtype)\n if self._initial_decay > 0.:\n local_step = math_ops.cast(self.iterations, var_dtype)\n decay_t = self._get_hyper(\"decay\", var_dtype)\n lr_t = lr_t / (1. + decay_t * local_step)\n return lr_t\n\n @abc.abstractmethod\n def get_config(self):\n \"\"\"Returns the config of the optimimizer.\n\n An optimizer config is a Python dictionary (serializable)\n containing the configuration of an optimizer.\n The same optimizer can be reinstantiated later\n (without any saved state) from this configuration.\n\n Returns:\n Python dictionary.\n \"\"\"\n config = {\"name\": self._name}\n if hasattr(self, \"clipnorm\"):\n config[\"clipnorm\"] = self.clipnorm\n if hasattr(self, \"clipvalue\"):\n config[\"clipvalue\"] = self.clipvalue\n return config\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n \"\"\"Creates an optimizer from its config.\n\n This method is the reverse of `get_config`,\n capable of instantiating the same optimizer from the config\n dictionary.\n\n Arguments:\n config: A Python dictionary, typically the output of get_config.\n custom_objects: A Python dictionary mapping names to additional Python\n objects used to create this optimizer, such as a function used for a\n hyperparameter.\n\n Returns:\n An optimizer instance.\n \"\"\"\n if \"lr\" in config:\n config[\"learning_rate\"] = config.pop(\"lr\")\n if \"learning_rate\" in config:\n if isinstance(config[\"learning_rate\"], dict):\n config[\"learning_rate\"] = learning_rate_schedule.deserialize(\n config[\"learning_rate\"], custom_objects=custom_objects)\n return cls(**config)\n\n def _serialize_hyperparameter(self, hyperparameter_name):\n \"\"\"Serialize a hyperparameter that can be a float, callable, or Tensor.\"\"\"\n value = self._hyper[hyperparameter_name]\n if isinstance(value, learning_rate_schedule.LearningRateSchedule):\n return learning_rate_schedule.serialize(value)\n if callable(value):\n return value()\n if tensor_util.is_tensor(value):\n return backend.get_value(value)\n return value\n\n def variables(self):\n \"\"\"Returns variables of this Optimizer based on the order created.\"\"\"\n return self._weights\n\n @property\n def weights(self):\n \"\"\"Returns variables of this Optimizer based on the order created.\"\"\"\n return self._weights\n\n def get_weights(self):\n params = self.weights\n return backend.batch_get_value(params)\n\n # TODO(tanzheny): Maybe share this logic with base_layer.\n def set_weights(self, weights):\n params = self.weights\n if len(params) != len(weights):\n raise ValueError(\n \"You called `set_weights(weights)` on optimizer \" + self._name +\n \" with a weight list of length \" + str(len(weights)) +\n \", but the optimizer was expecting \" + str(len(params)) +\n \" weights. Provided weights: \" + str(weights)[:50] + \"...\")\n if not params:\n return\n weight_value_tuples = []\n param_values = backend.batch_get_value(params)\n for pv, p, w in zip(param_values, params, weights):\n if pv.shape != w.shape:\n raise ValueError(\"Optimizer weight shape \" + str(pv.shape) +\n \" not compatible with \"\n \"provided weight shape \" + str(w.shape))\n weight_value_tuples.append((p, w))\n backend.batch_set_value(weight_value_tuples)\n\n def add_weight(self,\n name,\n shape,\n dtype=None,\n initializer=\"zeros\",\n trainable=None,\n synchronization=tf_variables.VariableSynchronization.AUTO,\n aggregation=tf_variables.VariableAggregation.NONE):\n\n if dtype is None:\n dtype = dtypes.float32\n if isinstance(initializer, six.string_types) or callable(initializer):\n initializer = initializers.get(initializer)\n\n if synchronization == tf_variables.VariableSynchronization.ON_READ:\n if trainable:\n raise ValueError(\n \"Synchronization value can be set to \"\n \"VariableSynchronization.ON_READ only for non-trainable variables. \"\n \"You have specified trainable=True and \"\n \"synchronization=VariableSynchronization.ON_READ.\")\n else:\n # Set trainable to be false when variable is to be synced on read.\n trainable = False\n elif trainable is None:\n trainable = True\n\n variable = self._add_variable_with_custom_getter(\n name=name,\n shape=shape,\n getter=base_layer_utils.make_variable,\n overwrite=True,\n initializer=initializer,\n dtype=dtype,\n trainable=trainable,\n use_resource=True,\n synchronization=synchronization,\n aggregation=aggregation)\n backend.track_variable(variable)\n\n return variable\n\n def _init_set_name(self, name, zero_based=True):\n if not name:\n self._name = backend.unique_object_name(\n generic_utils.to_snake_case(self.__class__.__name__),\n zero_based=zero_based)\n else:\n self._name = name\n\n def _assert_valid_dtypes(self, tensors):\n \"\"\"Asserts tensors are all valid types (see `_valid_dtypes`).\n\n Args:\n tensors: Tensors to check.\n\n Raises:\n ValueError: If any tensor is not a valid type.\n \"\"\"\n valid_dtypes = self._valid_dtypes()\n for t in tensors:\n dtype = t.dtype.base_dtype\n if dtype not in valid_dtypes:\n raise ValueError(\"Invalid type %r for %s, expected: %s.\" %\n (dtype, t.name, [v for v in valid_dtypes]))\n\n def _valid_dtypes(self):\n \"\"\"Valid types for loss, variables and gradients.\n\n Subclasses should override to allow other float types.\n\n Returns:\n Valid types for loss, variables and gradients.\n \"\"\"\n return set(\n [dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])\n\n def _call_if_callable(self, param):\n \"\"\"Call the function if param is callable.\"\"\"\n return param() if callable(param) else param\n\n def _resource_apply_dense(self, grad, handle, apply_state):\n \"\"\"Add ops to apply dense gradients to the variable `handle`.\n\n Args:\n grad: a `Tensor` representing the gradient.\n handle: a `Tensor` of dtype `resource` which points to the variable to be\n updated.\n apply_state: A dict which is used across multiple apply calls.\n\n Returns:\n An `Operation` which updates the value of the variable.\n \"\"\"\n raise NotImplementedError()\n\n def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices,\n **kwargs):\n \"\"\"Add ops to apply sparse gradients to `handle`, with repeated indices.\n\n Optimizers which override this method must deal with repeated indices. See\n the docstring of `_apply_sparse_duplicate_indices` for details. By default\n the correct behavior, to sum non-unique indices and their associated\n gradients, is enforced by first pre-processing `grad` and `indices` and\n passing them on to `_resource_apply_sparse`. Optimizers which deal correctly\n with duplicate indices may instead override this method to avoid the\n overhead of summing.\n\n Args:\n grad: a `Tensor` representing the gradient for the affected indices.\n handle: a `Tensor` of dtype `resource` which points to the variable to be\n updated.\n indices: a `Tensor` of integral type representing the indices for which\n the gradient is nonzero. Indices may be repeated.\n **kwargs: May optionally contain `apply_state`\n\n Returns:\n An `Operation` which updates the value of the variable.\n \"\"\"\n summed_grad, unique_indices = _deduplicate_indexed_slices(\n values=grad, indices=indices)\n return self._resource_apply_sparse(summed_grad, handle, unique_indices,\n **kwargs)\n\n def _resource_apply_sparse(self, grad, handle, indices, apply_state):\n \"\"\"Add ops to apply sparse gradients to the variable `handle`.\n\n Similar to `_apply_sparse`, the `indices` argument to this method has been\n de-duplicated. Optimizers which deal correctly with non-unique indices may\n instead override `_resource_apply_sparse_duplicate_indices` to avoid this\n overhead.\n\n Args:\n grad: a `Tensor` representing the gradient for the affected indices.\n handle: a `Tensor` of dtype `resource` which points to the variable to be\n updated.\n indices: a `Tensor` of integral type representing the indices for which\n the gradient is nonzero. Indices are unique.\n apply_state: A dict which is used across multiple apply calls.\n\n Returns:\n An `Operation` which updates the value of the variable.\n \"\"\"\n raise NotImplementedError()\n\n def _resource_scatter_add(self, x, i, v):\n with ops.control_dependencies(\n [resource_variable_ops.resource_scatter_add(x.handle, i, v)]):\n return x.value()\n\n def _resource_scatter_update(self, x, i, v):\n with ops.control_dependencies(\n [resource_variable_ops.resource_scatter_update(x.handle, i, v)]):\n return x.value()\n\n @property\n @tracking.cached_per_instance\n def _dense_apply_args(self):\n return tf_inspect.getfullargspec(self._resource_apply_dense).args\n\n @property\n @tracking.cached_per_instance\n def _sparse_apply_args(self):\n return tf_inspect.getfullargspec(self._resource_apply_sparse).args\n\n # ---------------\n # For implementing the trackable interface\n # ---------------\n\n def _restore_slot_variable(self, slot_name, variable, slot_variable):\n \"\"\"Restore a newly created slot variable's value.\"\"\"\n variable_key = _var_key(variable)\n deferred_restorations = self._deferred_slot_restorations.get(\n slot_name, {}).pop(variable_key, [])\n # Iterate over restores, highest restore UID first to minimize the number\n # of assignments.\n deferred_restorations.sort(key=lambda position: position.restore_uid,\n reverse=True)\n for checkpoint_position in deferred_restorations:\n checkpoint_position.restore(slot_variable)\n\n def _create_or_restore_slot_variable(\n self, slot_variable_position, slot_name, variable):\n \"\"\"Restore a slot variable's value, possibly creating it.\n\n Called when a variable which has an associated slot variable is created or\n restored. When executing eagerly, we create the slot variable with a\n restoring initializer.\n\n No new variables are created when graph building. Instead,\n _restore_slot_variable catches these after normal creation and adds restore\n ops to the graph. This method is nonetheless important when graph building\n for the case when a slot variable has already been created but `variable`\n has just been added to a dependency graph (causing us to realize that the\n slot variable needs to be restored).\n\n Args:\n slot_variable_position: A `trackable._CheckpointPosition` object\n indicating the slot variable `Trackable` object to be restored.\n slot_name: The name of this `Optimizer`'s slot to restore into.\n variable: The variable object this slot is being created for.\n \"\"\"\n variable_key = _var_key(variable)\n slot_dict = self._slots.get(variable_key, {})\n slot_variable = slot_dict.get(slot_name, None)\n if (slot_variable is None and context.executing_eagerly() and\n slot_variable_position.is_simple_variable()\n # Defer slot variable creation if there is an active variable creator\n # scope. Generally we'd like to eagerly create/restore slot variables\n # when possible, but this may mean that scopes intended to catch\n # `variable` also catch its eagerly created slot variable\n # unintentionally (specifically make_template would add a dependency on\n # a slot variable if not for this case). Deferring is mostly harmless\n # (aside from double initialization), and makes variable creator scopes\n # behave the same way they do when graph building.\n and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access\n initializer = trackable.CheckpointInitialValue(\n checkpoint_position=slot_variable_position)\n slot_variable = self.add_slot(\n var=variable,\n initializer=initializer,\n slot_name=slot_name)\n # Slot variables are not owned by any one object (because we don't want to\n # save the slot variable if the optimizer is saved without the non-slot\n # variable, or if the non-slot variable is saved without the optimizer;\n # it's a dependency hypergraph with edges of the form (optimizer, non-slot\n # variable, variable)). So we don't _track_ slot variables anywhere, and\n # instead special-case this dependency and otherwise pretend it's a normal\n # graph.\n if slot_variable is not None:\n # If we've either made this slot variable, or if we've pulled out an\n # existing slot variable, we should restore it.\n slot_variable_position.restore(slot_variable)\n else:\n # We didn't make the slot variable. Defer restoring until it gets created\n # normally. We keep a list rather than the one with the highest restore\n # UID in case slot variables have their own dependencies, in which case\n # those could differ between restores.\n self._deferred_slot_restorations.setdefault(\n slot_name, {}).setdefault(variable_key, []).append(\n slot_variable_position)\n\n\ndef _filter_grads(grads_and_vars):\n \"\"\"Filter out iterable with grad equal to None.\"\"\"\n grads_and_vars = tuple(grads_and_vars)\n if not grads_and_vars:\n return grads_and_vars\n filtered = []\n vars_with_empty_grads = []\n for grad, var in grads_and_vars:\n if grad is None:\n vars_with_empty_grads.append(var)\n else:\n filtered.append((grad, var))\n filtered = tuple(filtered)\n if not filtered:\n raise ValueError(\"No gradients provided for any variable: %s.\" %\n ([v.name for _, v in grads_and_vars],))\n if vars_with_empty_grads:\n logging.warning(\n (\"Gradients does not exist for variables %s when minimizing the loss.\"),\n ([v.name for v in vars_with_empty_grads]))\n return filtered\n\n\ndef _var_key(var):\n \"\"\"Key for representing a primary variable, for looking up slots.\n\n In graph mode the name is derived from the var shared name.\n In eager mode the name is derived from the var unique id.\n If distribution strategy exists, get the primary variable first.\n\n Args:\n var: the variable.\n\n Returns:\n the unique name of the variable.\n \"\"\"\n\n # pylint: disable=protected-access\n # Get the distributed variable if it exists.\n if hasattr(var, \"_distributed_container\"):\n var = var._distributed_container()\n if var._in_graph_mode:\n return var._shared_name\n return var._unique_id\n\n\ndef _get_slot_key_from_var(var, slot_name):\n \"\"\"Get the slot key for the variable: var_name/slot_name.\"\"\"\n\n name = _var_key(var)\n return name + \"/\" + slot_name\n\n\nclass RestoredOptimizer(OptimizerV2):\n \"\"\"A non-functional Optimizer implementation for checkpoint compatibility.\n\n Holds slot variables and hyperparameters when an optimizer is restored from a\n SavedModel. These variables may be referenced in functions along with ops\n created by the original optimizer, but currently we do not support using the\n optimizer object iself (e.g. through `apply_gradients`).\n \"\"\"\n # TODO(allenl): Make the restored optimizer functional by tracing its apply\n # methods.\n\n def __init__(self):\n super(RestoredOptimizer, self).__init__(\"RestoredOptimizer\")\n self._hypers_created = True\n\n def get_config(self):\n # TODO(allenl): Save and restore the Optimizer's config\n raise NotImplementedError(\n \"Restoring functional Optimzers from SavedModels is not currently \"\n \"supported. Please file a feature request if this limitation bothers \"\n \"you.\")\n\nrevived_types.register_revived_type(\n \"optimizer\",\n lambda obj: isinstance(obj, OptimizerV2),\n versions=[revived_types.VersionedTypeRegistration(\n object_factory=lambda proto: RestoredOptimizer(),\n version=1,\n min_producer_version=1,\n min_consumer_version=1,\n setter=RestoredOptimizer._set_hyper # pylint: disable=protected-access\n )])\n" ]
[ [ "tensorflow.python.ops.variables.Variable", "tensorflow.python.keras.backend.get_graph", "tensorflow.python.framework.ops.init_scope", "tensorflow.python.ops.array_ops.unique", "tensorflow.python.distribute.distribution_strategy_context.get_strategy", "tensorflow.python.util.nest.flatten", "tensorflow.python.framework.ops._get_graph_from_inputs", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.framework.ops.device", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.keras.backend.name_scope", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.util.tf_export.keras_export", "tensorflow.python.keras.utils.generic_utils.to_snake_case", "tensorflow.python.keras.optimizer_v2.learning_rate_schedule.deserialize", "tensorflow.python.distribute.distribution_strategy_context.get_replica_context", "tensorflow.python.keras.initializers.get", "tensorflow.python.ops.gradients.gradients", "tensorflow.python.framework.ops.executing_eagerly_outside_functions", "tensorflow.python.eager.backprop.GradientTape", "tensorflow.python.keras.utils.tf_utils.is_symbolic_tensor", "tensorflow.python.training.tracking.base.CheckpointInitialValue", "tensorflow.python.ops.clip_ops.clip_by_norm", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.ops.resource_variable_ops.resource_scatter_add", "tensorflow.python.keras.backend.set_value", "tensorflow.python.ops.resource_variable_ops.resource_scatter_update", "tensorflow.python.keras.backend.track_variable", "tensorflow.python.keras.optimizer_v2.learning_rate_schedule.serialize", "tensorflow.python.keras.backend.batch_set_value", "tensorflow.python.ops.clip_ops.clip_by_value", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.keras.backend.get_value", "tensorflow.python.util.tf_inspect.getfullargspec", "tensorflow.python.framework.tensor_util.is_tensor", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.keras.backend.batch_get_value" ] ]
Lain-progressivehouse/probspace-youtube
[ "04740862fb28fb9a38131554369d6c54eb560fc5" ]
[ "src/pseudo.py" ]
[ "import lightgbm as lgb\nfrom optuna.integration import lightgbm_tuner\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\nfrom sklearn.model_selection import KFold, StratifiedKFold\nfrom scipy import stats\n\nfrom src import data_frame, feature_selection, learn_lgb\n\n\ndef get_predict_test(params, train_x, train_y, test_x, validation):\n preds_test = []\n for i, (tr_idx, va_idx) in enumerate(validation):\n tr_x, va_x = train_x.iloc[tr_idx], train_x.iloc[va_idx]\n tr_y, va_y = train_y.iloc[tr_idx], train_y.iloc[va_idx]\n\n dtrain = lgb.Dataset(tr_x, tr_y)\n dtest = lgb.Dataset(va_x, va_y, reference=dtrain)\n model = lgb.train(params, dtrain, 2000, valid_sets=dtest, verbose_eval=100)\n\n pred_test = model.predict(test_x, num_iteration=model.best_iteration)\n preds_test.append(pred_test)\n\n pred_test_mean = np.mean(preds_test, axis=0)\n pred_test_std = np.std(preds_test, axis=0)\n pred_test_std = stats.mstats.rankdata(pred_test_std) / test_x.shape[0]\n\n return pred_test_mean, pred_test_std\n\n\ndef get_pseudo_data_set(train_x, train_y, test_x: pd.DataFrame, threshold=0.2):\n sss = StratifiedKFold(n_splits=10, shuffle=True, random_state=1)\n pred_test_mean, pred_test_std = get_predict_test(\n learn_lgb.params, train_x, train_y, test_x, sss.split(train_x, train_y // 2))\n\n train_x = pd.concat([train_x, test_x[pred_test_std < threshold].copy()]).reset_index(drop=True)\n train_y = pd.concat([train_y, pd.Series(pred_test_mean[pred_test_std < threshold])]).reset_index(drop=True)\n return train_x, train_y, test_x\n" ]
[ [ "sklearn.model_selection.StratifiedKFold", "numpy.mean", "numpy.std", "pandas.Series", "scipy.stats.mstats.rankdata" ] ]
mmaher22/iCV-SBR
[ "72effab621a9f8f5cee0d584b5a2f0e98524ffd6" ]
[ "Source Codes/CSRM_Tensorflow/csrm.py" ]
[ "# coding=utf-8\n\nfrom __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\nfrom ome import OME\nimport data_process\nimport time\nimport os\nimport pandas as pd\nimport subprocess\ntf.set_random_seed(22)\nnp.random.seed(22)\n\ndef numpy_floatX(data):\n return np.asarray(data, dtype=np.float32)\n\nclass CSRM:\n def __init__(self, sess, n_items, dim_proj, hidden_units, memory_size,\n memory_dim, shift_range, controller_layer_numbers, batch_size,\n epoch, lr, keep_probability, no_dropout, display_frequency, \n item_freqs, expname, keval):\n self.sess = sess\n self.expname = expname\n self.n_items = n_items\n self.keval = keval\n self.dim_proj = dim_proj\n self.hidden_units = hidden_units\n self.memory_size = memory_size\n self.memory_dim = memory_dim\n self.shift_range = shift_range\n self.controller_layer_numbers = controller_layer_numbers\n self.batch_size = batch_size\n self.epoch = epoch\n self.lr = lr\n self.keep_probability = np.array([0.75, 0.5])\n self.no_dropout = np.array([1.0, 1.0])\n self.display_frequency = display_frequency\n self.controller_hidden_layer_size = 100\n self.controller_output_size = self.memory_dim + 1 + 1 + (self.shift_range * 2 + 1) + 1 + self.memory_dim * 3 + 1 + 1 + (self.shift_range * 2 + 1) + 1\n self.item_freqs = item_freqs\n self.train_loss_record = []\n self.valid_loss_record = []\n self.test_loss_record = []\n self.train_recall_record, self.train_mrr_record = [], []\n self.valid_recall_record, self.valid_mrr_record = [], []\n self.test_recall_record, self.test_mrr_record = [], []\n \n self.build_graph()\n\n\n def build_graph(self):\n self.params = self.init_params()\n self.x_input = tf.placeholder(tf.int64, [None, None])\n self.mask_x = tf.placeholder(tf.float32, [None, None])\n self.y_target = tf.placeholder(tf.int64, [None])\n self.len_x = tf.placeholder(tf.int64, [None])\n self.keep_prob = tf.placeholder(tf.float32, [None])\n self.starting = tf.placeholder(tf.bool)\n\n \"\"\" \n attention gru & global gru\n Output:\n global_session_representation\n attentive_session_represention\n \"\"\"\n self.n_timesteps = tf.shape(self.x_input)[1]\n self.n_samples = tf.shape(self.x_input)[0]\n\n emb = tf.nn.embedding_lookup(self.params['Wemb'], self.x_input)\n emb = tf.nn.dropout(emb, keep_prob=self.keep_prob[0])\n\n with tf.variable_scope('global_encoder'):\n cell_global = tf.nn.rnn_cell.GRUCell(self.hidden_units)\n init_state = cell_global.zero_state(self.n_samples, tf.float32)\n outputs_global, state_global = tf.nn.dynamic_rnn(cell_global, inputs=emb, sequence_length=self.len_x,\n initial_state=init_state, dtype=tf.float32)\n last_global = state_global # batch_size*hidden_units\n\n with tf.variable_scope('local_encoder'):\n cell_local = tf.nn.rnn_cell.GRUCell(self.hidden_units)\n init_statel = cell_local.zero_state(self.n_samples, tf.float32)\n outputs_local, state_local = tf.nn.dynamic_rnn(cell_local, inputs=emb, sequence_length=self.len_x,\n initial_state=init_statel, dtype=tf.float32)\n last_h = state_local # batch_size*hidden_units\n\n tmp_0 = tf.reshape(outputs_local, [-1, self.hidden_units])\n tmp_1 = tf.reshape(tf.matmul(tmp_0, self.params['W_encoder']),\n [self.n_samples, self.n_timesteps, self.hidden_units])\n tmp_2 = tf.expand_dims(tf.matmul(last_h, self.params['W_decoder']), 1) # batch_size*hidden_units\n tmp_3 = tf.reshape(tf.sigmoid(tmp_1 + tmp_2), [-1, self.hidden_units]) # batch_size,n_steps, hidden_units\n alpha = tf.matmul(tmp_3, tf.transpose(self.params['bl_vector']))\n res = tf.reduce_sum(alpha, axis=1)\n sim_matrix = tf.reshape(res, [self.n_samples, self.n_timesteps])\n\n att = tf.nn.softmax(sim_matrix * self.mask_x) * self.mask_x # batch_size*n_step\n p = tf.expand_dims(tf.reduce_sum(att, axis=1), 1)\n weight = att / p\n atttention_proj = tf.reduce_sum((outputs_local * tf.expand_dims(weight, 2)), 1)\n self.global_session_representation = last_global\n self.attentive_session_represention = atttention_proj\n\n self.ome_cell = OME(mem_size=(self.memory_size, self.memory_dim), shift_range=self.shift_range,\n hidden_units=self.hidden_units)\n\n self.state = tf.placeholder(dtype=tf.float32, shape=[None, self.hidden_units])\n self.memory_network_reads, self.memory_new_state = self.ome_cell(self.state, atttention_proj, self.starting)\n\n att_mean, att_var = tf.nn.moments(self.attentive_session_represention, axes=[1])\n self.attentive_session_represention = (self.attentive_session_represention - tf.expand_dims(att_mean, 1)) / tf.expand_dims(tf.sqrt(att_var + 1e-10), 1)\n glo_mean, glo_var = tf.nn.moments(self.global_session_representation, axes=[1])\n self.global_session_representation = (self.global_session_representation - tf.expand_dims(glo_mean, 1)) / tf.expand_dims(tf.sqrt(glo_var + 1e-10), 1)\n ntm_mean, ntm_var = tf.nn.moments(self.memory_network_reads, axes=[1])\n self.memory_network_reads = (self.memory_network_reads - tf.expand_dims(ntm_mean, 1)) / tf.expand_dims(tf.sqrt(ntm_var + 1e-10), 1)\n\n new_gate = tf.matmul(self.attentive_session_represention, self.params['inner_encoder']) + \\\n tf.matmul(self.memory_network_reads, self.params['outer_encoder']) + \\\n tf.matmul(self.global_session_representation, self.params['state_encoder'])\n new_gate = tf.nn.sigmoid(new_gate)\n self.narm_representation = tf.concat((self.attentive_session_represention, self.global_session_representation), axis=1)\n self.memory_representation = tf.concat((self.memory_network_reads, self.memory_network_reads), axis=1)\n final_representation = new_gate * self.narm_representation + (1 - new_gate) * self.memory_representation\n\n # prediction\n proj = tf.nn.dropout(final_representation, keep_prob=self.keep_prob[1])\n ytem = tf.matmul(self.params['Wemb'], self.params['bili']) # [n_items, 200]\n hypothesis = tf.matmul(proj, tf.transpose(ytem)) + 1e-10 # [batch_size, n_step, n_items]\n self.hypo = tf.nn.softmax(hypothesis)\n self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=hypothesis, labels=self.y_target))\n # optimize\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)\n\n self.saver = tf.train.Saver(max_to_keep=1)\n\n def init_weights(self, i_name, shape):\n sigma = np.sqrt(2. / shape[0])\n return tf.get_variable(name=i_name, dtype=tf.float32, initializer=tf.random_normal(shape) * sigma)\n\n def init_params(self):\n \"\"\"\n Global (not GRU) parameter. For the embeding and the classifier.\n \"\"\"\n params = dict()\n # embedding\n params['Wemb'] = self.init_weights('Wemb', (self.n_items, self.dim_proj))\n # attention\n params['W_encoder'] = self.init_weights('W_encoder', (self.hidden_units, self.hidden_units))\n params['W_decoder'] = self.init_weights('W_decoder', (self.hidden_units, self.hidden_units))\n params['bl_vector'] = self.init_weights('bl_vector', (1, self.hidden_units))\n # classifier\n params['bili'] = self.init_weights('bili', (self.dim_proj, 2 * self.hidden_units))\n # final gate\n params['inner_encoder'] = self.init_weights('inner_encoder', (self.hidden_units, 1))\n params['outer_encoder'] = self.init_weights('outer_encoder', (self.hidden_units, 1))\n params['state_encoder'] = self.init_weights('state_encoder', (self.hidden_units, 1))\n\n return params\n\n def get_minibatches_idx(self, n, minibatch_size, shuffle=False):\n \"\"\"\n Used to shuffle the dataset at each iteration.\n \"\"\"\n idx_list = np.arange(n, dtype=\"int32\")\n\n if shuffle:\n np.random.shuffle(idx_list)\n\n minibatches = []\n minibatch_start = 0\n for i in range(n // minibatch_size):\n minibatches.append(idx_list[minibatch_start: minibatch_start + minibatch_size])\n minibatch_start += minibatch_size\n\n if minibatch_start != n:\n # Make a minibatch out of what is left\n minibatches.append(idx_list[minibatch_start:])\n\n return zip(range(len(minibatches)), minibatches)\n\n def pred_evaluation(self, data, iterator, ntm_init_state, k=20):\n \"\"\"\n Compute recall@20 and mrr@20\n f_pred_prob: Theano fct computing the prediction\n prepare_data: usual prepare_data for that dataset.\n \"\"\"\n recall = 0.0\n mrr = 0.0\n evalutation_point_count = 0\n preds = []\n freqs = []\n for _, valid_index in iterator:\n batch_data = [data[0][t] for t in valid_index]\n batch_label = [data[1][t] for t in valid_index]\n feed_dict = self.construct_feeddict(batch_data, batch_label, self.no_dropout, ntm_init_state)\n pred, ntm_init_state = self.sess.run([self.hypo, self.memory_new_state], feed_dict=feed_dict)\n ranks = (pred.T > np.diag(pred.T[batch_label])).sum(axis=0) + 1 # np.diag(preds.T[targets]) each bacth target\"s score\n rank_ok = (ranks <= k)\n recall += rank_ok.sum()\n mrr += (1.0 / ranks[rank_ok]).sum()\n evalutation_point_count += len(ranks)\n for i in range(pred.shape[0]):\n series = pd.Series(data = pred[i])\n s = series.nlargest(k).index.values\n for r in s:\n preds.append(r)\n freqs.append(self.item_freqs[r])\n\n recall = numpy_floatX(recall) / evalutation_point_count\n mrr = numpy_floatX(mrr) / evalutation_point_count\n eval_score = (recall, mrr, len(list(set(preds))) / len(self.item_freqs.keys()), (np.mean(freqs) / max(self.item_freqs.values())) )\n\n return eval_score, ntm_init_state\n\n\n def construct_feeddict(self, batch_data, batch_label, keepprob, state, starting=False):\n x, mask, y, lengths = data_process.prepare_data(batch_data, batch_label)\n feed = {self.x_input: x, self.mask_x: mask, self.y_target: y, self.len_x: lengths, self.keep_prob: keepprob,\n self.state: state, self.starting: starting}\n # feed the initialized state into placeholder\n return feed\n\n\n def train(self, Train_data, Validation_data, Test_data, result_path='save/'):\n cPid = os.getpid()\n print(\" [*] Initialize all variables\")\n self.sess.run(tf.global_variables_initializer())\n print(\" [*] Initialization finished\")\n t1 = time.time()\n command_memory =\"python memoryLogger.py \"+ str(cPid) + \" \" + self.expname + \"train\"\n memory_task = subprocess.Popen(command_memory, stdout=subprocess.PIPE, shell=True)\n for epoch in range(self.epoch):\n epoch_loss = []\n session_memory_state = np.random.normal(0, 0.05, size=[1, self.hidden_units])\n starting = True\n kf = self.get_minibatches_idx(len(Train_data[0]), self.batch_size)\n for _, train_index in kf:\n # Select the random examples for this minibatch\n batch_label = [Train_data[1][t] for t in train_index]\n batch_data = [Train_data[0][t] for t in train_index]\n feed_dict = self.construct_feeddict(batch_data, batch_label, self.keep_probability, session_memory_state, starting)\n cost, _, session_memory_state = self.sess.run([self.loss, self.optimizer, self.memory_new_state], feed_dict=feed_dict)\n starting = False\n epoch_loss.append(cost)\n \n if epoch % 5 == 0:\n print('Epoch ', epoch, ' -- Loss ', np.mean(epoch_loss))\n print('**************************************************')\n \n train_time = time.time() - t1\n memory_task.kill()\n \n command_memory =\"python memoryLogger.py \"+ str(cPid) + \" \" + self.expname + \"train\"\n #memory_task = subprocess.Popen(command_memory, stdout=subprocess.PIPE, shell=True)\n #hit = [0.0, 0.0, 0.0, 0.0, 0.0]\n #MRR = [0.0, 0.0, 0.0, 0.0, 0.0]\n #cov = [[], [], [], [], []]\n #pop = [[], [], [], [], []]\n #Ks = [1, 3, 5, 10, 20]\n t1 = time.time()\n #for k in range(len(Ks)):\n kf_valid = self.get_minibatches_idx(len(Validation_data[0]), self.batch_size)\n valid_evaluation, _ = self.pred_evaluation(Validation_data, kf_valid, session_memory_state, k = self.keval)\n hit = valid_evaluation[0]\n MRR = valid_evaluation[1]\n cov = valid_evaluation[2]\n pop = valid_evaluation[3]\n test_time = time.time() - t1\n print('==================================================')\n #memory_task.kill()\n return hit, MRR, cov, pop, train_time, test_time\n\n\n\n\n" ]
[ [ "tensorflow.matmul", "tensorflow.nn.moments", "tensorflow.reshape", "numpy.mean", "tensorflow.sqrt", "tensorflow.nn.embedding_lookup", "tensorflow.nn.softmax", "tensorflow.global_variables_initializer", "tensorflow.random_normal", "tensorflow.set_random_seed", "numpy.random.normal", "tensorflow.shape", "tensorflow.concat", "tensorflow.sigmoid", "tensorflow.train.Saver", "tensorflow.transpose", "tensorflow.variable_scope", "numpy.arange", "numpy.sqrt", "tensorflow.nn.dynamic_rnn", "tensorflow.nn.sigmoid", "tensorflow.nn.dropout", "numpy.array", "tensorflow.train.AdamOptimizer", "tensorflow.expand_dims", "numpy.random.shuffle", "tensorflow.placeholder", "tensorflow.reduce_sum", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "numpy.asarray", "numpy.random.seed", "tensorflow.nn.rnn_cell.GRUCell", "pandas.Series", "numpy.diag" ] ]
deep110/aoc-solutions
[ "e016d25e8fa822fca45bef672af2a679dd3483f8" ]
[ "2018/day11/solution.py" ]
[ "from os import path\nimport numpy as np\n\n\nwith open(path.join(path.dirname(__file__), \"input.txt\")) as f:\n ms = f.readlines()\n\n\ngrid_serial = 7400\n\n\ndef power(x, y):\n rid = x + 10\n pl = rid * y + grid_serial\n\n t = pl * rid\n if len(str(t)) >= 3:\n return int(str(t)[-3]) - 5\n else:\n return -5\n\n\ngrid = np.zeros((300, 300), dtype=np.int)\n\nfor i, _x in enumerate(grid):\n for j, _y in enumerate(_x):\n grid[i, j] = power(i + 1, j + 1)\n\n\ndef find_largest(size):\n te_si = 300 - size\n q = np.zeros((te_si, te_si), dtype=np.int)\n\n for i in range(te_si):\n for j in range(te_si):\n q[i, j] = np.sum(grid[i: i + size, j:j + size])\n\n z = np.argmax(q)\n p, q = z // te_si, z % te_si\n\n return p+1, q+1, size, np.sum(grid[p: p + size, q:q + size])\n\n\nk = []\nfor i in range(1, 399):\n _o = find_largest(i)\n k.append(_o)\n\nprint(max(k, key=lambda x: x[3]))\n\n\n\ndef part1():\n pass\n\ndef part2():\n pass\n\nprint(\"Part1 solution: \", part1())\nprint(\"Part2 solution: \", part2())\n" ]
[ [ "numpy.sum", "numpy.argmax", "numpy.zeros" ] ]
gautam-sharma1/FacialKeypointDetection
[ "ad542bc5d01e1cdf7c0291850e29b5aa5767d8d3" ]
[ "feature_maps.py" ]
[ "# Construct the dataset\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import DataLoader\nfrom model import Net\nimport cv2 as cv2\nfrom transforms import *\n\ndata_transform = transforms.Compose([Rescale(250),\n RandomCrop(224), Normalize(), ToTensor()\n ])\n\ntest_dataset = FacialKeypointsDataset(csv_file='./data/test_frames_keypoints.csv',\n dataset_location='./data/test',\n transforms=data_transform)\ntest_loader = DataLoader(test_dataset,\n batch_size=10,\n shuffle=True,\n num_workers=2)\n\n\ndef main():\n dataset_iter = iter(test_loader)\n\n # get the first sample\n sample = next(dataset_iter)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n image = sample['image']\n image = image.float()\n model = Net()\n model.load_state_dict(torch.load('./saved_models/keypoints_model_1-2.pt',map_location=torch.device('cpu')))\n model.to(device)\n model.eval()\n\n\n with torch.no_grad():\n # convert filter to numpy\n first_conv2D_filter = model.conv5.weight.data.numpy()\n src_image = image[0].numpy()\n src_image = np.transpose(src_image, (1, 2, 0)) # transpose to go from torch to numpy image\n\n # select a 2D filter from a 4D filter output\n plt.imshow(first_conv2D_filter[0][0],cmap=\"gray\")\n plt.pause(0.001)\n filtered_image = cv2.filter2D(np.squeeze(src_image), -1, first_conv2D_filter[0][0])\n plt.imshow(filtered_image, cmap=\"gray\")\n plt.waitforbuttonpress()\n\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "matplotlib.pyplot.waitforbuttonpress", "torch.utils.data.DataLoader", "matplotlib.pyplot.pause", "matplotlib.pyplot.imshow" ] ]
tevenfeng/TjuAgvCar
[ "7ce94703d827a063acb89816f7141086c8a2ac98" ]
[ "TrainCNN/TensorflowUtil.py" ]
[ "#!/usr/bin/python2\r\n# coding:utf8\r\n\r\nimport tensorflow as tf\r\n\r\n\r\ndef weight_variable(shape):\r\n '''Initialize weight variable with specified shape.\r\n shape: [kernel_shape[0], kernel_shape[1], num_of_channels, num_of_kernels]'''\r\n initial = tf.truncated_normal(shape, stddev=0.1)\r\n return tf.Variable(initial)\r\n\r\n\r\ndef bias_variable(shape):\r\n '''Initialize bias variable with specified shape.\r\n shape: number of bias, equals to the number of kernels in the same layer.'''\r\n initial = tf.constant(0.0, shape=shape)\r\n return tf.Variable(initial)\r\n\r\n\r\ndef conv2d(x, W, stride):\r\n '''Get 2d convolutional layer with given parameters.\r\n x: input of the layer\r\n W: weight tensor\r\n stride: single number determining the stride\r\n padding: we set default padding to \\'SAME\\''''\r\n return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')\r\n\r\n\r\ndef conv_layer(x, kernel_shape=(3, 3), stride=1, num_of_kernels=32, use_bias=False):\r\n '''Function to form a whole group of convolutional layer including\r\n a conv2d layer and a relu layer(PS: currently max pooling layer is ignored).'''\r\n W = weight_variable([kernel_shape[0], kernel_shape[1], x.get_shape()[-1].value, num_of_kernels])\r\n if use_bias:\r\n b = bias_variable([num_of_kernels])\r\n return tf.nn.relu(conv2d(x, W, stride=stride) + b), W\r\n else:\r\n return tf.nn.relu(conv2d(x, W, stride=stride)), W\r\n\r\n\r\ndef fc_layer(x, num_of_neurons, activation=tf.tanh, use_bias=True, dropout=False):\r\n '''Function to form a fully connected layer with the given parameters.\r\n x: input of this fully connected layer\r\n num_of_neurons: number of neurons included in this layer\r\n activation: activation function type\r\n '''\r\n W = weight_variable([x.get_shape()[-1].value, num_of_neurons])\r\n h, b = None, None\r\n if use_bias:\r\n b = bias_variable([num_of_neurons])\r\n h = activation(tf.matmul(x, W) + b)\r\n else:\r\n h = activation(tf.matmul(x, W))\r\n\r\n if dropout:\r\n keep_prob = tf.placeholder(tf.float32)\r\n h_drop = tf.nn.dropout(h, keep_prob)\r\n return h_drop, W, b, keep_prob\r\n else:\r\n return h, W, b, None\r\n\r\n\r\ndef flattened(x):\r\n product = 1\r\n for d in x.get_shape():\r\n if d.value is not None:\r\n product *= d.value\r\n return tf.reshape(x, [-1, product])\r\n" ]
[ [ "tensorflow.nn.conv2d", "tensorflow.matmul", "tensorflow.Variable", "tensorflow.reshape", "tensorflow.truncated_normal", "tensorflow.constant", "tensorflow.placeholder", "tensorflow.nn.dropout" ] ]
adityabingi/U-Net
[ "4c8e1a9d44aba7bbd5acb6ac0cd4e7d76f5cc7ac" ]
[ "src/main.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport functools\nimport argparse\nimport os\nimport cv2\n\nfrom DataPreparation import get_baseline_dataset, split_data, augment\nfrom Model import Model\n\n\n_IMG_SHAPE = (512, 512, 3)\n_BATCH_SIZE = 1\n\n\nclass FilePaths:\n\tfnAccuracy = '../model/accuracy.txt'\n\tfnTrain = '../data/train/'\n\tfnLabels = '../data/train_masks/'\n\tfnLabelsCsv ='../data/train_masks.csv'\n\tfnInfer = '../data/test/'\n\tfnResults ='../results/'\n\n\ndef preprocess_function(train):\n\n\tif(train):\n\t\tcfg = {\n\t\t'resize': [_IMG_SHAPE[0], _IMG_SHAPE[1]],\n\t\t'scale': 1 / 255.,\n\t\t'hue_delta': 0.1,\n\t\t'horizontal_flip': True,\n\t\t'width_shift_range': 0.1,\n\t\t'height_shift_range': 0.1\n\t\t}\n\telse:\n\t\tcfg = {\n\t\t'resize': [_IMG_SHAPE[0], _IMG_SHAPE[1]],\n\t\t'scale': 1 / 255.\n\t\t}\n\n\tpreprocessing_fn = functools.partial(augment, **cfg)\n\n\treturn preprocessing_fn\n\n# Helper function to write u_net prediction to an image\n\ndef preds_to_img(pred, actual_img, fname):\n\n\tscale = 255.\n\tpred = np.reshape(pred,(_IMG_SHAPE[0], _IMG_SHAPE[1]))\n\tpred = pred[:,:]*scale\n\t#pred = pred.astype(int)\n\tpred = np.reshape(pred,(_IMG_SHAPE[0],_IMG_SHAPE[1],1))\n\tcv2.imwrite(os.path.join(FilePaths.fnResults, \"{}.jpg\".format(fname)), actual_img)\n\tcv2.imwrite(os.path.join(FilePaths.fnResults, \"{}_result.jpg\".format(fname)), pred)\n\n\ndef main():\n\tprint(\"Inside main\")\n\t# optional command line args\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--train\", help=\"train the NN\", action=\"store_true\")\n\t#parser.add_argument(\"--validate\", help=\"validate the NN\", action=\"store_true\")\n\tparser.add_argument(\"--predict\",nargs=1)\n\targs = parser.parse_args()\n\tif args.train:\n\t# load training data, create TF model\n\n\t\tx_train_filenames, x_val_filenames, y_train_filenames, y_val_filenames = split_data(FilePaths)\n\n\t\ttrain_batches_per_epoch = int(len(x_train_filenames)/_BATCH_SIZE) + 1\n\n\t\tno_of_val_batches = int(len(x_val_filenames)/_BATCH_SIZE) + 1\n\n\t\ttrain_ds = get_baseline_dataset(x_train_filenames,\n\t\t\ty_train_filenames,\n\t\t\tbatch_size=_BATCH_SIZE,\n\t\t\tpreproc_fn=preprocess_function(train=True),\n\t\t\t)\n\n\t\tval_ds = get_baseline_dataset(x_val_filenames,\n\t\t\ty_val_filenames,\n\t\t\tbatch_size=_BATCH_SIZE,\n\t\t\tpreproc_fn= preprocess_function(train=False),\n\t\t\t)\n\n\n\t\tmodel = Model(val_dataset =val_ds, train_dataset=train_ds, mustRestore = False)\n\n\t\tmodel.train(train_batches_per_epoch, no_of_val_batches, FilePaths)\n\t#elif args.validate:\n\t\t#model = Model(val_dataset =val_ds, mustRestore = False)\n\t\t#model.validate()\n\t# infer on test image\n\telif args.predict:\n\t# We pass test_img as dummy label to maintain dataset structure\n\t\tx_val_filenames, y_val_filenames = [args.predict[0]]*32, [args.predict[0]]*32\n\t\tval_ds = get_baseline_dataset(x_val_filenames,\n\t\t\ty_val_filenames, \n\t\t\tbatch_size=_BATCH_SIZE,\n\t\t\tpreproc_fn= preprocess_function(train=False),\n\t\t\tthreads=1)\n\t\tprint(open(FilePaths.fnAccuracy).read())\n\t\tmodel = Model(val_dataset =val_ds, mustRestore = True)\n\t\tprediction = model.infer()\n\t\tfname = args.predict[0].split('/')[-1].split('.')[0]\n\t\ttest_img = cv2.imread(args.predict[0])\n\t\ttest_img = cv2.resize(test_img, (_IMG_SHAPE[0], _IMG_SHAPE[1]))\n\t\tpreds_to_img(prediction, test_img, fname)\nif __name__ == '__main__':\n\tmain()\n" ]
[ [ "numpy.reshape" ] ]
afshinrahimi/mmner
[ "7e181594c23552799cec657b84476df3705210e0" ]
[ "config.py" ]
[ "import numpy as np\nimport os\nimport random\nimport logging\nimport tarfile\nimport pdb\n#from utils import collect_vocab_and_tags_panx, collect_embedding_vocabs, write_vocab_tags_chars_embs, trim_embs\nfrom collections import defaultdict\n# shared global variables to be imported from model also\nUNK = \"$UNK$\"\nNUM = \"$NUM$\"\nNONE = \"O\"\n\n\n# special error message\nclass MyIOError(Exception):\n def __init__(self, filename):\n # custom error message\n message = \"\"\"\nERROR: Unable to locate file {}.\n\nFIX: Have you tried running python build_data.py first?\nThis will build vocab file from your train, test and dev sets and\ntrimm your word vectors.\n\"\"\".format(filename)\n super(MyIOError, self).__init__(message)\n\n\n\nclass Config():\n def __init__(self, args, load=True):\n \"\"\"Initialize hyperparameters and load vocabs\n\n Args:\n load_embeddings: (bool) if True, load embeddings into\n np array, else None\n\n \"\"\"\n self.dir_output = args.dir_output\n self.dir_input = args.dir_input\n self.dir_wikiann = args.dir_ner\n self.dir_model = os.path.join(self.dir_output, \"weights\")\n self.dir_model_highres = os.path.join(self.dir_output, 'weights')\n self.dir_unlabeled = os.path.join(self.dir_output, 'multiannotations')\n self.dir_bccannotations = os.path.join(self.dir_output, 'bccannotations')\n self.langtaglang_file = os.path.join(self.dir_output, 'langtaglang.json')\n\n self.params_dir = self.dir_model\n self.filename_words = {}\n for lang in set(self.highres_langs + self.lowres_langs):\n self.filename_words[lang] = os.path.join(self.dir_input, f'builtdata_{lang}/words.txt')\n\n self.filename_tags = os.path.join(self.dir_input, 'tags.txt')\n self.filename_chars = os.path.join(self.dir_input, 'chars.txt')\n self.path_log = os.path.join(self.dir_output, \"log.txt\")\n self.filename_trimmed = {}\n for lang in set(self.highres_langs + self.lowres_langs):\n self.filename_trimmed[lang] = os.path.join(self.dir_input, f'builtdata_{lang}/trimmed_embs.npz')\n\n # directory for training outputs\n if not os.path.exists(self.dir_output):\n os.mkdir(self.dir_output)\n os.mkdir(self.dir_model)\n os.mkdir(self.dir_unlabeled)\n os.mkdir(self.dir_bccannotations)\n\n\n\n self.logger = get_logger(self.path_log)\n\n # load if requested (default)\n if load:\n for lang in set(self.highres_langs + self.lowres_langs):\n self.load(lang)\n\n\n def load(self, lang):\n \"\"\"Loads vocabulary, processing functions and embeddings\n\n Supposes that build_data.py has been run successfully and that\n the corresponding files have been created (vocab and trimmed GloVe\n vectors)\n\n \"\"\"\n logging.info('loading {}'.format(lang))\n # 1. vocabulary\n self.vocab_words[lang] = load_vocab(self.filename_words[lang])\n for lang, vocab_word in self.vocab_words.items():\n self.word_vocab[lang] = {v:k for k, v in vocab_word.items()}\n self.vocab_tags = load_vocab(self.filename_tags)\n self.vocab_chars = load_vocab(self.filename_chars)\n self.tag_vocab = {v:k for k, v in self.vocab_tags.items()}\n self.nwords[lang] = len(self.vocab_words[lang])\n self.nchars = len(self.vocab_chars)\n self.ntags = len(self.vocab_tags)\n\n # 2. get processing functions that map str -> id\n self.processing_word[lang] = get_processing_word(self.vocab_words[lang],\n self.vocab_chars, lowercase=False, chars=self.use_chars, trim_lang=True, lowerchars=True if self.lowres_langs==['de'] else False)\n self.processing_tag = get_processing_word(self.vocab_tags,\n lowercase=False, allow_unk=False, trim_lang=False)\n \n # 3. get pre-trained embeddings\n self.embeddings[lang] = (get_trimmed_glove_vectors(self.filename_trimmed[lang], self.capacity)\n if self.use_pretrained else None)\n #logging.info('loaded {} with vocab size {} and embedding size {}'.format(lang, self.nwords[lang], self.embeddings[lang].shape[0]))\n\n\n # general config\n highres_langs = ['af', 'ar', 'bg', 'bn', 'bs', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es', 'et',\n 'fa', 'fi', 'fr', 'he', 'hi', 'hr', 'hu', 'id', 'it', 'lt', 'lv', 'mk', 'ms', 'nl',\n 'no', 'pl', 'pt', 'ro', 'ru', 'sk', 'sl', 'sq', 'sv', 'ta', 'tl',\n 'tr', 'uk', 'vi']\n\n #highres_langs = ['en']\n #highres_langs = highres_langs[20:25]\n lowres_langs = ['af', 'ar', 'bg', 'bn', 'bs', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es', 'et',\n 'fa', 'fi', 'fr', 'he', 'hi', 'hr', 'hu', 'id', 'it', 'lt', 'lv', 'mk', 'ms', 'nl',\n 'no', 'pl', 'pt', 'ro', 'ru', 'sk', 'sl', 'sq', 'sv', 'ta', 'tl',\n 'tr', 'uk', 'vi']\n\n\n #filenames\n\n\n\n\n\n #lowres_langs = ['en']\n\n lang_script = {}\n # vocab (created from dataset with build_data.py)\n #filename_words = \"./datasets/builtdata_cmu8/words.txt\"\n vocab_words = {}\n word_vocab = {}\n nwords = {}\n embeddings = {}\n processing_word = {}\n capacity = 80000\n num_unsup_iter = 1000\n num_unsup_gold = 0\n num_unsup_epochs = 1000\n unsuplr = 5e-3\n unsupsuplr = 5e-4\n unsuptopk = 10\n # glove files\n #filename_glove = \"data/glove.6B/glove.6B.{}d.txt\".format(dim_word)\n # trimmed embeddings (created from glove_filename with build_data.py)\n #filename_trimmed = \"./datasets/builtdata_cmu8/trimmed_embs_cmu.npz\"\n use_pretrained = True\n\n\n # embeddings\n dim_word = 300\n dim_char = 100\n\n\n # dataset\n #filename_dev = \"datasets/ner/uk.dev.multi\"\n #filename_test = \"datasets/ner/uk.test.multi\"\n #filename_train = \"datasets/ner/uk.train.100.multi\"\n\n #filename_dev = filename_test = filename_train = \"ner/data/test.txt\" # test\n\n max_iter = None # if not None, max number of examples in Dataset\n\n #nepoch for highres 20 for lowres 100\n # training\n train_embeddings = False\n nepochs = 100\n dropout = 0.5\n batch_size = 1\n test_batch_size = 100\n lr_method = \"adam\"\n lr = 0.001\n lr_decay = 0.9\n clip = -1 # if negative, no clipping\n nepoch_no_imprv = 3\n lowres_nepoch_no_imprv = 5\n dev_dropout = 1.0\n\n # model hyperparameters\n hidden_size_char = 100 # lstm on chars\n hidden_size_lstm = 300 # lstm on word embeddings\n\n # NOTE: if both chars and crf, only 1.6x slower on GPU\n use_crf = True # if crf, training is 1.7x slower on CPU\n use_chars = True # if char embedding, training is 3.5x slower on CPU\n\n entropy_loss_weight = 0\n\ndef get_processing_word(vocab_words=None, vocab_chars=None,\n lowercase=False, chars=False, allow_unk=True, trim_lang=False, lowerchars=False):\n \"\"\"Return lambda function that transform a word (string) into list,\n or tuple of (list, id) of int corresponding to the ids of the word and\n its corresponding characters.\n\n Args:\n vocab: dict[word] = idx\n\n Returns:\n f(\"cat\") = ([12, 4, 32], 12345)\n = (list of char ids, word id)\n\n \"\"\"\n def f(word):\n # 0. get chars of words\n if vocab_chars is not None and chars == True:\n char_ids = []\n for char in word if (not trim_lang or word in ['$NUM$', '$UNK$']) else word[3:].lower() if lowerchars else word[3:]:\n # ignore chars out of vocabulary\n if char in vocab_chars:\n char_ids += [vocab_chars[char]]\n\n # 1. preprocess word\n if lowercase:\n word = word.lower()\n if word.isdigit():\n word = NUM\n\n # 2. get id of word\n if vocab_words is not None:\n if word in vocab_words:\n word = vocab_words[word]\n elif word.lower() in vocab_words:\n word = vocab_words[word.lower()]\n else:\n if allow_unk:\n word = vocab_words[UNK]\n else:\n raise Exception(\"Unknow key is not allowed. Check that \"\\\n \"your vocab (tags?) is correct word missing: {}\".format(word))\n\n # 3. return tuple char ids, word id\n if vocab_chars is not None and chars == True:\n return char_ids, word\n else:\n return word\n\n return f\n\ndef count_unks(datasets, config):\n print(\"lang sents words ne unk nerunk\")\n for lang in datasets:\n sentences = datasets[lang]['train'].data\n unkid = config.vocab_words[lang][UNK]\n total_unk_count = 0\n total_word_count = 0\n total_ounk_count = 0\n total_ne = 0\n for sentid, sentencelabel in enumerate(sentences):\n sentence, labelids = sentencelabel\n vocabids = [word[1] for word in sentence]\n unk_indices = [i for i in range(len(vocabids)) if vocabids[i] == unkid]\n unk_labels = [labelids[i] for i in unk_indices]\n o_unks = [1 for l in unk_labels if l==5]\n num_ne = sum([1 for l in labelids if l!=5])\n num_o_unks = sum(o_unks)\n total_ounk_count += num_o_unks\n unk_count = sum([1 if vid==unkid else 0 for vid in vocabids])\n word_count = len(vocabids)\n total_unk_count += unk_count\n total_word_count += word_count\n total_ne += num_ne\n total_NER_unk_count = total_unk_count - total_ounk_count\n num_sentences = sentid\n print(f\"{lang} {num_sentences+1} {total_word_count} {total_ne} {total_unk_count} {total_NER_unk_count}\")\n\n\n\ndef load_vocab(filename):\n \"\"\"Loads vocab from a file\n\n Args:\n filename: (string) the format of the file must be one word per line.\n\n Returns:\n d: dict[word] = index\n\n \"\"\"\n try:\n d = dict()\n with open(filename) as f:\n for idx, word in enumerate(f):\n word = word.strip()\n d[word] = idx\n\n except IOError:\n raise MyIOError(filename)\n return d\n\nclass CoNLLDatasetList(object):\n \"\"\"Class that iterates over CoNLL Dataset\n\n __iter__ method yields a tuple (words, tags)\n words: list of raw words\n tags: list of raw tags\n\n If processing_word and processing_tag are not None,\n optional preprocessing is appplied\n\n Example:\n ```python\n data = CoNLLDataset(filename)\n for sentence, tags in data:\n pass\n ```\n\n \"\"\"\n def __init__(self, filename, processing_word=None, processing_tag=None,\n max_iter=None):\n \"\"\"\n Args:\n filename: path to the file\n processing_words: (optional) function that takes a word as input\n processing_tags: (optional) function that takes a tag as input\n max_iter: (optional) max number of sentences to yield\n\n \"\"\"\n self.filename = filename\n self.processing_word = processing_word\n self.processing_tag = processing_tag\n self.max_iter = max_iter\n self.length = None\n self.data = []\n self.sentences = []\n self.nwords = 0\n self.nunks = 0\n self.loaddata()\n\n\n def __iter__(self):\n for i, item in enumerate(self.data):\n if self.max_iter and i > self.max_iter:\n return\n yield item[0], item[1]\n\n\n\n def loaddata(self):\n niter = 0\n if type(self.filename) == str:\n f = open(self.filename, 'r', encoding='utf-8')\n else:\n #an open file is passed on\n f = self.filename\n words, tags = [], []\n sentence = []\n for line in f:\n if type(self.filename) != str:\n line = line.decode('utf-8')\n line = line.strip()\n if (len(line) == 0 or line.startswith(\"-DOCSTART-\")):\n if len(words) != 0:\n niter += 1\n if self.max_iter is not None and niter > self.max_iter:\n break\n self.data.append([words, tags])\n words, tags = [], []\n self.sentences.append(sentence)\n sentence = []\n else:\n #Afshin: change the delimiter from ' ' to default because tabs\n ls = line.split()\n word, tag = ls[0],ls[-1]\n self.nwords += 1\n sentence += [word]\n if self.processing_word is not None:\n word = self.processing_word(word)\n if self.processing_tag is not None:\n tag = self.processing_tag(tag)\n words += [word]\n tags += [tag]\n f.close()\n def __len__(self):\n return len(self.data)\n\n def sample(self, num_instances):\n return random.sample(self.data[0:self.max_iter], k=num_instances)\n\n\n\n\ndef get_vocabs(datasets):\n \"\"\"Build vocabulary from an iterable of datasets objects\n\n Args:\n datasets: a list of dataset objects\n\n Returns:\n a set of all the words in the dataset\n\n \"\"\"\n print(\"Building vocab...\")\n vocab_words = set()\n vocab_tags = set()\n for dataset in datasets:\n for words, tags in dataset:\n vocab_words.update(words)\n vocab_tags.update(tags)\n print(\"- done. {} tokens\".format(len(vocab_words)))\n return vocab_words, vocab_tags\n\n\ndef get_char_vocab(dataset):\n \"\"\"Build char vocabulary from an iterable of datasets objects\n\n Args:\n dataset: a iterator yielding tuples (sentence, tags)\n\n Returns:\n a set of all the characters in the dataset\n\n \"\"\"\n vocab_char = set()\n for words, _ in dataset:\n for word in words:\n vocab_char.update(word)\n\n return vocab_char\n\ndef get_trimmed_glove_vectors(filename, capacity=0):\n \"\"\"\n Args:\n filename: path to the npz file\n\n Returns:\n matrix of embeddings (np array)\n\n \"\"\"\n try:\n with np.load(filename) as data:\n if capacity:\n assert capacity > data['embeddings'].shape[0], 'capacity less than vocab size'\n logging.info('load embeddings with shape {} from {}'.format(str(data['embeddings'].shape), filename))\n extra_rows = capacity - data['embeddings'].shape[0]\n if extra_rows > 0:\n extra_embs = np.zeros((extra_rows, data['embeddings'].shape[1]), dtype=np.float32)\n return np.vstack((data['embeddings'], extra_embs))\n else:\n logging.info('load embeddings with shape {} from {}'.format(str(data['embeddings'].shape), filename))\n return data[\"embeddings\"]\n\n except IOError:\n raise MyIOError(filename)\n\ndef get_logger(filename):\n \"\"\"Return a logger instance that writes in filename\n\n Args:\n filename: (string) path to log.txt\n\n Returns:\n logger: (instance of logger)\n\n \"\"\"\n logger = logging.getLogger('logger')\n logger.setLevel(logging.DEBUG)\n logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)\n handler = logging.FileHandler(filename)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(logging.Formatter(\n '%(asctime)s:%(levelname)s: %(message)s'))\n logging.getLogger().addHandler(handler)\n\n return logger\n\n\n\n\n\ndef read_dataset_panx_individual(config, langid, max_iter=None):\n \"\"\"\n :param data_dir: where multilingual conll datasets are located (e.g. french dataset has fr- prefix)\n :param highres_langs: code for high resource languages (e.g. ['en', 'de', 'fr']\n :param lowres_lang: code for one low resource language e.g. 'uk'\n :return: datasets for all languages in a dictionary where key is langid and value is the dataset records\n \"\"\"\n data_dir = config.dir_wikiann\n targz_file = os.path.join(data_dir, \"{}.tar.gz\".format(langid))\n tar = tarfile.open(targz_file, \"r:gz\")\n data_set = {}\n #for lowres_lang we need to load train dev test\n targz_file = os.path.join(data_dir, \"{}.tar.gz\".format(langid))\n tar = tarfile.open(targz_file, \"r:gz\")\n for member in tar.getmembers():\n #don't read extra data\n if member.name == 'extra':\n continue\n file_handle = tar.extractfile(member)\n dataset = CoNLLDatasetList(file_handle, config.processing_word[langid],\n config.processing_tag, max_iter if member.name in ['train', 'dev'] and max_iter else config.max_iter)\n data_set[member.name] = dataset\n\n return data_set\n\ndef read_dataset_panx_multiannotated(config, langid, max_iter=None):\n \"\"\"\n :param data_dir: where multilingual conll datasets are located (e.g. french dataset has fr- prefix)\n :param highres_langs: code for high resource languages (e.g. ['en', 'de', 'fr']\n :param lowres_lang: code for one low resource language e.g. 'uk'\n :return: datasets for all languages in a dictionary where key is langid and value is the dataset records\n \"\"\"\n data_dir = config.dir_unlabeled\n targz_file = os.path.join(data_dir, f'{langid}.tar.gz')\n tar = tarfile.open(targz_file, \"r:gz\")\n data_set = {}\n #for lowres_lang we need to load train dev test\n for member in tar.getmembers():\n logging.info('loading {} tagged by {} max_iter {}'.format(langid, member.name, max_iter))\n file_handle = tar.extractfile(member)\n dataset = CoNLLDatasetList(file_handle, config.processing_word[langid],\n config.processing_tag, max_iter if max_iter else config.max_iter)\n data_set[member.name] = dataset\n\n return data_set" ]
[ [ "numpy.load", "numpy.zeros", "numpy.vstack" ] ]
eeshakumar/bark-ml
[ "ce76ec880676e129358706ec1a050ecfa207590e" ]
[ "bark_ml/library_wrappers/lib_fqf_iqn_qrdqn/utils.py" ]
[ "from collections import deque\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt\n\n\ndef update_params(optim, loss, networks, retain_graph=False, grad_cliping=None, count=0):\n optim.zero_grad()\n loss.backward(retain_graph=retain_graph)\n # Clip norms of gradients to stebilize training.\n if grad_cliping:\n for net in networks:\n torch.nn.utils.clip_grad_norm_(net.parameters(), grad_cliping)\n optim.step()\n return loss.retain_grad()\n\n\ndef plot_grad_flow(i, named_parameters):\n '''Plots the gradients flowing through different layers in the net during training.\n Can be used for checking for possible gradient vanishing / exploding problems.\n \n Usage: Plug this function in Trainer class after loss.backwards() as \n \"plot_grad_flow(self.model.named_parameters())\" to visualize the gradient flow'''\n ave_grads = []\n max_grads= []\n layers = []\n for n, p in named_parameters:\n # p.retain_grad = True\n # print(\"Parameter\", n, p, p.grad)\n if(p.requires_grad) and (\"bias\" not in n):\n layers.append(n)\n ave_grads.append(p.abs().mean().detach().numpy())\n max_grads.append(p.abs().max().detach().numpy())\n plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color=\"c\")\n plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color=\"b\")\n plt.hlines(0, 0, len(ave_grads)+1, lw=2, color=\"k\" )\n plt.xticks(range(0,len(ave_grads), 1), layers, rotation=\"vertical\")\n plt.xlim(left=0, right=len(ave_grads))\n plt.ylim(bottom = -0.001, top=0.02) # zoom in on the lower gradient regions\n plt.xlabel(\"Layers\")\n plt.ylabel(\"average gradient\")\n plt.title(\"Gradient flow\")\n plt.grid(True)\n plt.legend([plt.Line2D([0], [0], color=\"c\", lw=4),\n plt.Line2D([0], [0], color=\"b\", lw=4),\n plt.Line2D([0], [0], color=\"k\", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])\n plt.savefig(f\"{i}_grad_flow.png\")\n\n\ndef disable_gradients(network):\n # Disable calculations of gradients.\n for param in network.parameters():\n param.requires_grad = False\n\n\ndef get_onehot(input_list, columns):\n if isinstance(input_list, torch.Tensor):\n input_list = input_list.squeeze().type(torch.int8).cpu().detach().numpy()\n rows = input_list.shape[0]\n input_onehot = np.zeros((rows, columns))\n input_onehot[np.arange(rows), input_list] = 1.\n return input_onehot\n\n\ndef get_margin_loss(actions, total_actions, is_demos, margin, device):\n margins = (torch.ones(total_actions, total_actions) - torch.eye(total_actions)) * margin\n sampled_batch_margins = margins[actions.long()] \n return sampled_batch_margins.squeeze().to(device)\n\n\ndef calculate_huber_loss(td_errors, kappa=1.0):\n return torch.where(td_errors.abs() <= kappa, 0.5 * td_errors.pow(2),\n kappa * (td_errors.abs() - 0.5 * kappa))\n\n\ndef calculate_supervised_margin_classification_loss(current_q, actions, predicted_actions, total_actions, is_demos, device, \n margin=0.8):\n \"\"\"supervised margin loss to force Q value of all non expert actions to be lower\"\"\"\n sampled_batch_margin_loss = get_margin_loss(actions, total_actions, is_demos, margin, device)\n assert sampled_batch_margin_loss.shape == current_q.shape\n q1 = torch.max(current_q + sampled_batch_margin_loss, dim=1)[0]\n q2 = torch.diag(current_q[torch.arange(current_q.size(0)), actions.long()])\n q1 = q1.reshape(actions.shape)\n q2 = q2.reshape(actions.shape)\n assert q1.shape == q2.shape\n loss = is_demos * (q1 - q2)\n # net loss is mean of batch loss\n assert loss.shape == actions.shape\n return loss.mean()\n\n\ndef calculate_supervised_classification_quantile_loss(actions, states, online_net, taus, state_embeddings,\n next_state_embeddings, is_demos, total_actions, device,\n supervised_margin_weight=0.5, expert_margin=0.8):\n \"\"\"supervised classification loss for IQN quantiles\"\"\"\n sampled_batch_margin_loss = get_margin_loss(actions, total_actions, is_demos, expert_margin, device)\n weights = supervised_margin_weight * is_demos.squeeze()\n current_sa_quantiles = online_net.calculate_quantiles(taus, state_embeddings=state_embeddings)\n q = current_sa_quantiles.mean(dim=1)\n loss = calculate_expert_loss(q, sampled_batch_margin_loss, is_demos, actions, weights)\n return loss.mean()\n\n\ndef calculate_expert_loss(q, sampled_batch_margin_loss, is_demos, actions, weights):\n \"\"\"calculate expert supervised loss\"\"\"\n q1, _ = torch.max(q + sampled_batch_margin_loss, axis=1)\n expert_actions = is_demos * actions\n q2 = q.gather(1, expert_actions.long()).squeeze()\n loss = (q1 - q2) \n loss = weights * loss\n return loss\n\n\ndef calculate_l2_reg_loss(network, device, weight=1e-5):\n l2_reg_loss = torch.tensor(0., requires_grad=True, device=device)\n for W in network.parameters():\n l2_reg_loss = l2_reg_loss + torch.pow(W, 2).sum()\n return weight * l2_reg_loss\n\n\ndef calculate_quantile_huber_loss(td_errors, taus, weights=None, kappa=1.0):\n assert not taus.requires_grad\n batch_size, N, N_dash = td_errors.shape\n\n # Calculate huber loss element-wisely.\n element_wise_huber_loss = calculate_huber_loss(td_errors, kappa)\n assert element_wise_huber_loss.shape == (batch_size, N, N_dash)\n\n # Calculate quantile huber loss element-wisely.\n element_wise_quantile_huber_loss = torch.abs(taus[..., None] - (\n td_errors.detach() < 0).float()) * element_wise_huber_loss / kappa\n assert element_wise_quantile_huber_loss.shape == (batch_size, N, N_dash)\n\n # Quantile huber loss.\n batch_quantile_huber_loss = element_wise_quantile_huber_loss.sum(dim=1).mean(\n dim=1, keepdim=True)\n assert batch_quantile_huber_loss.shape == (batch_size, 1)\n\n if weights is not None:\n quantile_huber_loss = (batch_quantile_huber_loss * weights).mean()\n else:\n quantile_huber_loss = batch_quantile_huber_loss.mean()\n\n # print(\"Quantile huber loss\", quantile_huber_loss)\n return quantile_huber_loss\n\n\n# note: resume here**\ndef evaluate_quantile_at_action(s_quantiles, actions):\n # print(\"Shape quantiles\", s_quantiles.shape)\n assert s_quantiles.shape[0] == actions.shape[0]\n\n batch_size = s_quantiles.shape[0]\n N = s_quantiles.shape[1]\n # Expand actions into (batch_size, N, 1).\n action_index = actions[..., None].expand(batch_size, N, 1)\n\n # Calculate quantile values at specified actions.\n sa_quantiles = s_quantiles.gather(dim=2, index=action_index)\n\n return sa_quantiles\n\n\nclass RunningMeanStats:\n\n def __init__(self, n=10):\n self.n = n\n self.stats = deque(maxlen=n)\n\n def append(self, x):\n self.stats.append(x)\n\n def get(self):\n return np.mean(self.stats)\n\n\nclass LinearAnneaer:\n\n def __init__(self, start_value, end_value, num_steps):\n assert num_steps > 0 and isinstance(num_steps, int)\n\n self.steps = 0\n self.start_value = start_value\n self.end_value = end_value\n self.num_steps = num_steps\n\n self.a = (self.end_value - self.start_value) / self.num_steps\n self.b = self.start_value\n\n def step(self):\n self.steps = min(self.num_steps, self.steps + 1)\n\n def get(self):\n assert 0 < self.steps <= self.num_steps\n return self.a * self.steps + self.b\n" ]
[ [ "numpy.zeros", "torch.max", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.title", "matplotlib.pyplot.savefig", "numpy.mean", "torch.ones", "torch.tensor", "torch.eye", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.Line2D", "numpy.arange", "torch.pow" ] ]
ziotom78/toast
[ "66aef04c833a28f0928a0bbc221da45882aae475" ]
[ "src/toast/todmap/sim_det_map.py" ]
[ "# Copyright (c) 2015-2019 by the parties listed in the AUTHORS file.\n# All rights reserved. Use of this source code is governed by\n# a BSD-style license that can be found in the LICENSE file.\n\nimport numpy as np\n\nimport healpy as hp\n\nfrom ..timing import function_timer, GlobalTimers\n\nfrom .. import qarray as qa\n\nfrom .._libtoast import scan_map_float64, scan_map_float32\n\nfrom ..op import Operator\n\n\nclass OpSimGradient(Operator):\n \"\"\"Generate a fake sky signal as a gradient between the poles.\n\n This passes through each observation and creates a fake signal timestream\n based on the cartesian Z coordinate of the HEALPix pixel containing the\n detector pointing.\n\n Args:\n out (str): accumulate data to the cache with name <out>_<detector>.\n If the named cache objects do not exist, then they are created.\n nside (int): the HEALPix NSIDE value to use.\n min (float): the minimum value to use at the South Pole.\n max (float): the maximum value to use at the North Pole.\n nest (bool): whether to use NESTED ordering.\n \"\"\"\n\n def __init__(\n self,\n out=\"grad\",\n nside=512,\n min=-100.0,\n max=100.0,\n nest=False,\n flag_mask=255,\n common_flag_mask=255,\n keep_quats=False,\n ):\n # Call the parent class constructor\n super().__init__()\n self._nside = nside\n self._out = out\n self._min = min\n self._max = max\n self._nest = nest\n self._flag_mask = flag_mask\n self._common_flag_mask = common_flag_mask\n self._keep_quats = keep_quats\n\n @function_timer\n def exec(self, data):\n \"\"\"Create the gradient timestreams.\n\n This pixelizes each detector's pointing and then assigns a\n timestream value based on the cartesian Z coordinate of the pixel\n center.\n\n Args:\n data (toast.Data): The distributed data.\n\n \"\"\"\n zaxis = np.array([0, 0, 1], dtype=np.float64)\n nullquat = np.array([0, 0, 0, 1], dtype=np.float64)\n\n range = self._max - self._min\n\n for obs in data.obs:\n tod = obs[\"tod\"]\n\n offset, nsamp = tod.local_samples\n\n common = tod.local_common_flags() & self._common_flag_mask\n\n for det in tod.local_dets:\n flags = tod.local_flags(det) & self._flag_mask\n totflags = flags | common\n del flags\n\n pdata = tod.local_pointing(det).copy()\n pdata[totflags != 0, :] = nullquat\n\n dir = qa.rotate(pdata, zaxis)\n\n pixels = hp.vec2pix(\n self._nside, dir[:, 0], dir[:, 1], dir[:, 2], nest=self._nest\n )\n x, y, z = hp.pix2vec(self._nside, pixels, nest=self._nest)\n z += 1.0\n z *= 0.5\n z *= range\n z += self._min\n z[totflags != 0] = 0.0\n\n cachename = \"{}_{}\".format(self._out, det)\n if not tod.cache.exists(cachename):\n tod.cache.create(cachename, np.float64, (nsamp,))\n ref = tod.cache.reference(cachename)\n ref[:] += z\n del ref\n\n if not self._keep_quats:\n cachename = \"quat_{}\".format(det)\n tod.cache.destroy(cachename)\n\n del common\n return\n\n def sigmap(self):\n \"\"\"(array): Return the underlying signal map (full map on all processes).\n \"\"\"\n range = self._max - self._min\n pix = np.arange(0, 12 * self._nside * self._nside, dtype=np.int64)\n x, y, z = hp.pix2vec(self._nside, pix, nest=self._nest)\n z += 1.0\n z *= 0.5\n z *= range\n z += self._min\n return z\n\n\nclass OpSimScan(Operator):\n \"\"\"Operator which generates sky signal by scanning from a map.\n\n The signal to use should already be in a distributed pixel structure,\n and local pointing should already exist.\n\n Args:\n distmap (DistPixels): the distributed map domain data.\n pixels (str): the name of the cache object (<pixels>_<detector>)\n containing the pixel indices to use.\n weights (str): the name of the cache object (<weights>_<detector>)\n containing the pointing weights to use.\n out (str): accumulate data to the cache with name <out>_<detector>.\n If the named cache objects do not exist, then they are created.\n\n \"\"\"\n\n def __init__(\n self, distmap=None, pixels=\"pixels\", weights=\"weights\", out=\"scan\", dets=None\n ):\n # Call the parent class constructor\n super().__init__()\n self._map = distmap\n self._pixels = pixels\n self._weights = weights\n self._out = out\n self._dets = dets\n\n @function_timer\n def exec(self, data):\n \"\"\"Create the timestreams by scanning from the map.\n\n This loops over all observations and detectors and uses the pointing\n matrix to project the distributed map into a timestream.\n\n Args:\n data (toast.Data): The distributed data.\n\n Returns:\n None\n\n \"\"\"\n\n for obs in data.obs:\n tod = obs[\"tod\"]\n\n dets = tod.local_dets if self._dets is None else self._dets\n\n for det in dets:\n\n # get the pixels and weights from the cache\n\n pixelsname = \"{}_{}\".format(self._pixels, det)\n weightsname = \"{}_{}\".format(self._weights, det)\n pixels = tod.cache.reference(pixelsname)\n weights = tod.cache.reference(weightsname)\n\n nsamp, nnz = weights.shape\n\n gt = GlobalTimers.get()\n gt.start(\"OpSimScan.exec.global_to_local\")\n sm, lpix = self._map.global_to_local(pixels)\n gt.stop(\"OpSimScan.exec.global_to_local\")\n\n # f = (np.dot(weights[x], self._map.data[sm[x], lpix[x]])\n # if (lpix[x] >= 0) else 0\n # for x in range(tod.local_samples[1]))\n # maptod = np.fromiter(f, np.float64, count=tod.local_samples[1])\n maptod = np.zeros(nsamp)\n maptype = np.dtype(self._map.dtype)\n gt.start(\"OpSimScan.exec.scan_map\")\n if maptype.char == \"d\":\n scan_map_float64(\n self._map.submap,\n nnz,\n sm,\n lpix,\n self._map.flatdata,\n weights.astype(np.float64).reshape(-1),\n maptod,\n )\n elif maptype.char == \"f\":\n scan_map_float32(\n self._map.submap,\n nnz,\n sm,\n lpix,\n self._map.flatdata,\n weights.astype(np.float64).reshape(-1),\n maptod,\n )\n else:\n raise RuntimeError(\n \"Scanning from a map only supports float32 and float64 maps\"\n )\n gt.stop(\"OpSimScan.exec.scan_map\")\n\n cachename = \"{}_{}\".format(self._out, det)\n if not tod.cache.exists(cachename):\n tod.cache.create(cachename, np.float64, (nsamp,))\n ref = tod.cache.reference(cachename)\n ref[:] += maptod\n\n del ref\n del pixels\n del weights\n\n return\n" ]
[ [ "numpy.array", "numpy.arange", "numpy.dtype", "numpy.zeros" ] ]
daugraph/estimator
[ "2248336960a2cc65d2b7d0b21aca76157ccd0e79" ]
[ "tensorflow_estimator/python/estimator/tpu/_tpu_estimator_embedding.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===================================================================\n\"\"\"Tooling for support TPU embedding in TPUEstimator.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport tensorflow as tf\n\nfrom tensorflow.python.feature_column import feature_column as core_fc\nfrom tensorflow.python.feature_column import feature_column_lib as core_fc_lib\nfrom tensorflow.python.feature_column import utils as fc_utils\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.tpu import feature_column as tpu_fc\nfrom tensorflow.python.tpu import feature_column_v2 as tpu_fc_v2\nfrom tensorflow.python.tpu import tpu_embedding\nfrom tensorflow.python.tpu.tpu_embedding import AdagradParameters\nfrom tensorflow.python.tpu.tpu_embedding import AdamParameters\nfrom tensorflow.python.tpu.tpu_embedding import FtrlParameters\nfrom tensorflow.python.tpu.tpu_embedding import MomentumParameters\nfrom tensorflow.python.tpu.tpu_embedding import RMSPropParameters\nfrom tensorflow.python.tpu.tpu_embedding import StochasticGradientDescentParameters\nfrom tensorflow.python.util.tf_export import estimator_export\nfrom tensorflow_estimator.python.estimator import model_fn as model_fn_lib\n\n# pylint: disable=protected-access\n_TPU_EMBEDDING_COLUMN_CLASSES = (tpu_fc._TPUEmbeddingColumn,\n tpu_fc._TPUSharedEmbeddingColumn,\n tpu_fc_v2._TPUEmbeddingColumnV2,\n tpu_fc_v2._TPUSharedEmbeddingColumnV2)\n_TPU_DEVICE_SPECIFIC_EMBEDDING_COLUMNS = (\n tpu_fc_v2._TPUDeviceSpecificEmbeddingColumnV2,\n tpu_fc_v2._TPUSharedDeviceSpecificEmbeddingColumnV2)\n_EMBEDDING_COLUMN_CLASSES = (core_fc._EmbeddingColumn,\n core_fc_lib.EmbeddingColumn,\n core_fc._SharedEmbeddingColumn)\n_SUPPORTED_FEATURE_COLUMNS = (core_fc._NumericColumn, core_fc_lib.NumericColumn)\n\n_SUPPORTED_OPTIMIZERS = (\n AdagradParameters,\n AdamParameters,\n FtrlParameters,\n StochasticGradientDescentParameters,\n MomentumParameters,\n RMSPropParameters,\n)\n\n# pylint: enable=protected-access\n\n_TABLE_NAME_PREFIX = 'tbl_'\n_LEN_TABLE_NAME_PREFIX = len(_TABLE_NAME_PREFIX)\n\n\ndef _get_table_name_from_embedding_var_name(embedding_var_name):\n return '{}{}'.format(_TABLE_NAME_PREFIX, embedding_var_name)\n\n\ndef _get_embedding_var_name_from_table_name(table_name):\n return table_name[_LEN_TABLE_NAME_PREFIX:]\n\n\ndef _get_embedding_variable_name(scope_name, var_name):\n if scope_name:\n scope_name = scope_name + '/'\n return '{}{}'.format(scope_name, var_name)\n\n\ndef _get_slot_variable_names(scope_name, var_name, optimization_parameters):\n \"\"\"Return embedding variable names which are consistent with CPU runs.\"\"\"\n if scope_name:\n scope_name = scope_name + '/'\n if isinstance(optimization_parameters,\n tf.compat.v1.tpu.experimental.AdagradParameters):\n return tpu_embedding.AdagradSlotVariableName('{}{}/Adagrad'.format(\n scope_name, var_name))\n elif isinstance(optimization_parameters,\n tf.compat.v1.tpu.experimental.AdamParameters):\n return tpu_embedding.AdamSlotVariableNames(\n '{}{}/Adam/m'.format(scope_name, var_name),\n '{}{}/Adam/v'.format(scope_name, var_name))\n elif isinstance(optimization_parameters,\n tf.compat.v1.tpu.experimental.FtrlParameters):\n return tpu_embedding.FtrlSlotVariableName(\n '{}{}/Ftrl'.format(scope_name, var_name), # accumulator\n '{}{}/Ftrl_1'.format(scope_name, var_name)) # linear\n elif isinstance(optimization_parameters, MomentumParameters):\n return tpu_embedding.MomentumSlotVariableName('{}{}/Momentum'.format(\n scope_name, var_name))\n elif isinstance(optimization_parameters, RMSPropParameters):\n return tpu_embedding.RMSPropSlotVariableNames(\n ms='{}{}/RMSProp/ms'.format(scope_name, var_name),\n mom='{}{}/RMSProp/mom'.format(scope_name, var_name),\n )\n elif isinstance(\n optimization_parameters,\n tf.compat.v1.tpu.experimental.StochasticGradientDescentParameters):\n return None\n else:\n raise ValueError('Support to infer full variable name '\n 'for optimization_parameter {} has not been added.'.format(\n optimization_parameters))\n\n\ndef get_full_variable_names(graph,\n table_to_config_dict,\n optimization_parameters=None):\n \"\"\"Return embedding variable names and slot variables which are consistent with CPU runs.\"\"\"\n collection = graph.get_collection_ref(tpu_fc._TPU_FC_TO_SCOPE) # pylint: disable=protected-access\n if not collection:\n raise RuntimeError(\n 'Embedding feature column did not capture any thing. Make sure the '\n 'feature columns passed to TPUEstimator constructor is properly '\n 'used in model_fn.')\n\n embedding_variable_name_by_table = {}\n slot_variable_names_by_table = {}\n for table_name in table_to_config_dict:\n embedding_var_name = _get_embedding_var_name_from_table_name(table_name)\n (scope_name, var_name) = collection[0][embedding_var_name]\n embedding_variable_name_by_table[table_name] = (\n _get_embedding_variable_name(scope_name, var_name))\n if optimization_parameters:\n slot_variable_names_by_table[table_name] = _get_slot_variable_names(\n scope_name, var_name, optimization_parameters)\n\n graph.clear_collection(tpu_fc._TPU_FC_TO_SCOPE) # pylint: disable=protected-access\n return embedding_variable_name_by_table, slot_variable_names_by_table\n\n\ndef get_configs_from_feature_columns(feature_columns):\n \"\"\"Create configs for TPUEmbedding etc from a list of feature columns.\n\n Args:\n feature_columns: a list of supported feature columns.\n\n Returns:\n A tuple of dicts, the first maps tables to their config, the second maps\n features to their config, the third maps learning rate key to callback that\n takes global step and outputs dynamic learning rate.\n \"\"\"\n\n allowed = (\n tpu_fc_v2._TPUEmbeddingColumnV2, # pylint: disable=protected-access\n tpu_fc_v2._TPUSharedEmbeddingColumnV2) # pylint: disable=protected-access\n warn = (tpu_fc._TPUEmbeddingColumn, tpu_fc._TPUSharedEmbeddingColumn) # pylint: disable=protected-access\n\n for column in feature_columns:\n if not isinstance(column, allowed + warn):\n raise TypeError(\n 'Unsupported feature column {}. Supported types are {}.'.format(\n type(column), allowed))\n if isinstance(column, warn):\n tf.compat.v1.logging.warn(\n 'Columns of type {} are deprecated. Supported types are {}.'.format(\n type(column), allowed))\n\n table_to_config = {}\n feature_to_config = {}\n for column in feature_columns:\n feature_name = column.get_feature_key_name()\n table_name = _get_table_name_from_embedding_var_name(\n column.get_embedding_var_name())\n if feature_name in feature_to_config:\n raise ValueError(\n 'Feature column {} is used with multiple embeddings and this is '\n 'not supported.'.format(feature_name))\n feature_to_config[feature_name] = tpu_embedding.FeatureConfig(\n table_id=table_name,\n max_sequence_length=column.get_max_sequence_length(),\n weight_key=column.get_weight_key_name())\n vocabulary_size, dimension = column.get_embedding_table_size()\n table_to_config[table_name] = tpu_embedding.TableConfig(\n vocabulary_size=vocabulary_size,\n dimension=dimension,\n initializer=column.get_initializer(),\n combiner=column.get_combiner(),\n learning_rate_fn=column.get_learning_rate_fn())\n\n return table_to_config, feature_to_config\n\n\n@estimator_export(v1=['estimator.tpu.experimental.EmbeddingConfigSpec'])\nclass EmbeddingConfigSpec(\n collections.namedtuple('EmbeddingConfigSpec', [\n 'feature_columns', 'tensor_core_feature_columns',\n 'optimization_parameters', 'clipping_limit',\n 'pipeline_execution_with_tensor_core',\n 'experimental_gradient_multiplier_fn', 'feature_to_config_dict',\n 'table_to_config_dict', 'partition_strategy', 'profile_data_directory'\n ])):\n \"\"\"Class to keep track of the specification for TPU embeddings.\n\n Pass this class to `tf.estimator.tpu.TPUEstimator` via the\n `embedding_config_spec` parameter. At minimum you need to specify\n `feature_columns` and `optimization_parameters`. The feature columns passed\n should be created with some combination of\n `tf.tpu.experimental.embedding_column` and\n `tf.tpu.experimental.shared_embedding_columns`.\n\n TPU embeddings do not support arbitrary Tensorflow optimizers and the\n main optimizer you use for your model will be ignored for the embedding table\n variables. Instead TPU embeddigns support a fixed set of predefined optimizers\n that you can select from and set the parameters of. These include adagrad,\n adam and stochastic gradient descent. Each supported optimizer has a\n `Parameters` class in the `tf.tpu.experimental` namespace.\n\n ```\n column_a = tf.feature_column.categorical_column_with_identity(...)\n column_b = tf.feature_column.categorical_column_with_identity(...)\n column_c = tf.feature_column.categorical_column_with_identity(...)\n tpu_shared_columns = tf.tpu.experimental.shared_embedding_columns(\n [column_a, column_b], 10)\n tpu_non_shared_column = tf.tpu.experimental.embedding_column(\n column_c, 10)\n tpu_columns = [tpu_non_shared_column] + tpu_shared_columns\n ...\n def model_fn(features):\n dense_features = tf.keras.layers.DenseFeature(tpu_columns)\n embedded_feature = dense_features(features)\n ...\n\n estimator = tf.estimator.tpu.TPUEstimator(\n model_fn=model_fn,\n ...\n embedding_config_spec=tf.estimator.tpu.experimental.EmbeddingConfigSpec(\n column=tpu_columns,\n optimization_parameters=(\n tf.estimator.tpu.experimental.AdagradParameters(0.1))))\n ```\n \"\"\"\n\n def __new__(cls,\n feature_columns=None,\n optimization_parameters=None,\n clipping_limit=None,\n pipeline_execution_with_tensor_core=False,\n experimental_gradient_multiplier_fn=None,\n feature_to_config_dict=None,\n table_to_config_dict=None,\n partition_strategy='div',\n profile_data_directory=None):\n \"\"\"Creates an `EmbeddingConfigSpec` instance.\n\n Args:\n feature_columns: All embedding `FeatureColumn`s used by model.\n optimization_parameters: An instance of `AdagradParameters`,\n `AdamParameters` or `StochasticGradientDescentParameters`. This\n optimizer will be applied to all embedding variables specified by\n `feature_columns`.\n clipping_limit: (Optional) Clipping limit (absolute value).\n pipeline_execution_with_tensor_core: setting this to `True` makes training\n faster, but trained model will be different if step N and step N+1\n involve the same set of embedding IDs. Please see\n `tpu_embedding_configuration.proto` for details.\n experimental_gradient_multiplier_fn: (Optional) A Fn taking global step as\n input returning the current multiplier for all embedding gradients.\n feature_to_config_dict: A dictionary mapping feature names to instances of\n the class `FeatureConfig`. Either features_columns or the pair of\n `feature_to_config_dict` and `table_to_config_dict` must be specified.\n table_to_config_dict: A dictionary mapping feature names to instances of\n the class `TableConfig`. Either features_columns or the pair of\n `feature_to_config_dict` and `table_to_config_dict` must be specified.\n partition_strategy: A string, determining how tensors are sharded to the\n tpu hosts. See `tf.nn.safe_embedding_lookup_sparse` for more details.\n Allowed value are `\"div\"` and `\"mod\"'. If `\"mod\"` is used, evaluation\n and exporting the model to CPU will not work as expected.\n profile_data_directory: Directory where embedding lookup statistics are\n stored. These statistics summarize information about the inputs to the\n embedding lookup operation, in particular, the average number of\n embedding IDs per example and how well the embedding IDs are load\n balanced across the system. The lookup statistics are used during TPU\n initialization for embedding table partitioning. Collection of lookup\n statistics is done at runtime by profiling the embedding inputs: only\n 3% of input samples are profiled to minimize host CPU overhead. Once\n a suitable number of samples are profiled, the lookup statistics are\n saved to table-specific files in the profile data directory generally\n at the end of a TPU training loop. The filename corresponding to each\n table is obtained by hashing table specific parameters (e.g., table\n name and number of features) and global configuration parameters (e.g.,\n sharding strategy and task count). The same profile data directory can\n be shared among several models to reuse embedding lookup statistics.\n\n Returns:\n An `EmbeddingConfigSpec` instance.\n\n Raises:\n ValueError: If the feature_columns are not specified.\n TypeError: If the feature columns are not of ths correct type (one of\n _SUPPORTED_FEATURE_COLUMNS, _TPU_EMBEDDING_COLUMN_CLASSES OR\n _EMBEDDING_COLUMN_CLASSES).\n ValueError: If `optimization_parameters` is not one of the required types.\n \"\"\"\n if (not feature_columns and\n not (feature_to_config_dict and table_to_config_dict) or\n (feature_columns and\n (feature_to_config_dict and table_to_config_dict))):\n raise ValueError('Exactly one of `feature_columns` and the pair '\n '`feature_to_config_dict` and `table_to_config_dict` '\n 'must be be specified.')\n\n if partition_strategy not in ('div', 'mod'):\n raise ValueError('Invalid partition_strategy {}. Must be one of \"mod\" or '\n '\"div\".'.format(partition_strategy))\n\n tensor_core_feature_columns = None\n embedding_core_feature_columns = None\n if feature_columns:\n tensor_core_feature_columns = []\n embedding_core_feature_columns = []\n # It is unknown at this moment, whether the TPUEstimator is running in CPU\n # or TPU mode. So allow non-TPU embedding columns also.\n supported_classes = tuple(\n list(_SUPPORTED_FEATURE_COLUMNS) +\n list(_TPU_EMBEDDING_COLUMN_CLASSES) + list(_EMBEDDING_COLUMN_CLASSES))\n\n for column in feature_columns:\n if (isinstance(column, _TPU_DEVICE_SPECIFIC_EMBEDDING_COLUMNS) and\n (column._embedding_lookup_device == # pylint: disable=protected-access\n tpu_fc_v2.EmbeddingDevice.TPU_TENSOR_CORE)):\n tensor_core_feature_columns.append(column)\n else:\n embedding_core_feature_columns.append(column)\n if not isinstance(column, supported_classes):\n raise TypeError(\n 'All feature columns must be supported types in {}. Got {}'\n .format(supported_classes, type(column)))\n\n if not isinstance(optimization_parameters, _SUPPORTED_OPTIMIZERS):\n raise ValueError('optimization_parameters must be an instance of type '\n '{}. Got {}.'.format(_SUPPORTED_OPTIMIZERS,\n type(optimization_parameters)))\n else:\n for feature, config in feature_to_config_dict.items():\n if not isinstance(config, tpu_embedding.FeatureConfig):\n raise TypeError(\n 'Config for feature {} must be of type `FeatureConfig`. Got {}'\n .format(feature, type(config)))\n if config.table_id not in table_to_config_dict:\n raise ValueError('Feature {} refers to table {} which is not in the '\n 'table_to_config_dict.'.format(\n feature, config.table_id))\n for table, config in table_to_config_dict.items():\n if not isinstance(config, tpu_embedding.TableConfig):\n raise TypeError(\n 'Config for table {} must be of type `TableConfig`. Got '\n '{}'.format(table, type(config)))\n\n return super(EmbeddingConfigSpec, cls).__new__(\n cls,\n feature_columns=embedding_core_feature_columns,\n tensor_core_feature_columns=tensor_core_feature_columns,\n optimization_parameters=optimization_parameters,\n clipping_limit=clipping_limit,\n pipeline_execution_with_tensor_core=pipeline_execution_with_tensor_core,\n experimental_gradient_multiplier_fn=experimental_gradient_multiplier_fn,\n feature_to_config_dict=feature_to_config_dict,\n table_to_config_dict=table_to_config_dict,\n partition_strategy=partition_strategy,\n profile_data_directory=profile_data_directory)\n\n\nclass EmbeddingConfig(object):\n \"\"\"This is the internal immutable object for embedding config.\n\n `_EmbeddingConfig` is responsible to _translate_ user provided\n `EmbeddingConfigSpec` to internal data structures, mostly constructor\n arguments of `TPUEmbedding`.\n \"\"\"\n\n def __init__(self, embedding_config_spec, train_batch_size, eval_batch_size,\n num_hosts, num_cores, run_config):\n if not embedding_config_spec:\n raise ValueError('embedding_config_spec cannot be None.')\n\n self._embedding_config_spec = embedding_config_spec\n self._train_batch_size = train_batch_size\n self._eval_batch_size = eval_batch_size\n self._num_hosts = num_hosts\n self._num_cores = num_cores\n self._run_config = run_config\n\n if embedding_config_spec.feature_columns:\n self._table_to_config_dict, self._feature_to_config_dict = (\n get_configs_from_feature_columns(\n embedding_config_spec.feature_columns))\n else:\n self._table_to_config_dict = embedding_config_spec.table_to_config_dict\n self._feature_to_config_dict = embedding_config_spec.feature_to_config_dict\n self._partition_strategy = embedding_config_spec.partition_strategy\n self._mode_to_tpu_embedding_dict = {}\n self.dummy_table_variables = None\n\n self._grad_multiplier_fn = (\n embedding_config_spec.experimental_gradient_multiplier_fn)\n\n def get_grad_multiplier(self):\n if self._grad_multiplier_fn:\n return ops.convert_to_tensor(\n self._grad_multiplier_fn(tf.compat.v1.train.get_global_step()),\n dtype=tf.dtypes.float32)\n\n def has_embedding_tables(self):\n return bool(self._table_to_config_dict)\n\n def _create_tpu_embedding(self, mode):\n \"\"\"Create tpu_embedding.TPUEmbedding based on mode.\"\"\"\n if mode == model_fn_lib.ModeKeys.TRAIN:\n batch_size = self._train_batch_size\n else:\n batch_size = self._eval_batch_size\n\n if mode == model_fn_lib.ModeKeys.TRAIN:\n tpu_embedding_mode = tpu_embedding.TRAINING\n optimization_parameters = (\n self._embedding_config_spec.optimization_parameters)\n elif (mode == model_fn_lib.ModeKeys.EVAL or\n mode == model_fn_lib.ModeKeys.PREDICT):\n tpu_embedding_mode = tpu_embedding.INFERENCE\n optimization_parameters = None\n else:\n raise ValueError('Mode {} is not supported.'.format(mode))\n\n if self._run_config.cluster:\n master = self._run_config.cluster.master()\n cluster_spec = self._run_config.cluster.cluster_spec()\n cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None\n else:\n master = (\n self._run_config.evaluation_master\n if mode == model_fn_lib.ModeKeys.EVAL else self._run_config.master)\n cluster_def = None\n master_job_name = None\n if self._run_config.tpu_config.tpu_job_name is not None:\n master_job_name = self._run_config.tpu_config.tpu_job_name\n tpu_embedding_ = tpu_embedding.TPUEmbedding(\n self._table_to_config_dict,\n self._feature_to_config_dict,\n batch_size,\n tpu_embedding_mode,\n master,\n optimization_parameters,\n cluster_def,\n pipeline_execution_with_tensor_core=self._embedding_config_spec\n .pipeline_execution_with_tensor_core,\n partition_strategy=self._partition_strategy,\n profile_data_directory=self._embedding_config_spec\n .profile_data_directory,\n master_job_name=master_job_name)\n return tpu_embedding_\n\n def get_tpu_embedding(self, mode):\n if mode not in self._mode_to_tpu_embedding_dict:\n self._mode_to_tpu_embedding_dict[mode] = (\n self._create_tpu_embedding(mode))\n return self._mode_to_tpu_embedding_dict[mode]\n\n\ndef _maybe_dense_to_sparse(tensor):\n \"\"\"Possibly convert a dense (rank 1 or 2) tensor to a SparseTensor.\"\"\"\n # If already sparse, return as is.\n if isinstance(tensor, tf.sparse.SparseTensor):\n return tensor\n indices = tf.compat.v1.where(tensor)\n values = tf.compat.v1.gather_nd(tensor, indices)\n shape = tf.compat.v1.shape(tensor, out_type=tf.dtypes.int64)\n return tf.sparse.SparseTensor(indices, values, shape)\n\n\ndef split_inputs(ctx, features, labels, num_cores_per_batch=1):\n \"\"\"Splits the dense and sparse tensors inside the features and labels.\"\"\"\n enqueue_datas = collections.OrderedDict()\n\n if ctx.embedding_config:\n tpu_embedding_ = ctx.embedding_config.tpu_embedding\n for feature_key in tpu_embedding_.feature_to_config_dict:\n sparse_feature = _get_sparse_feature_from_feature(feature_key, features)\n max_sequence_length = tpu_embedding_.feature_to_config_dict[\n feature_key].max_sequence_length\n combiner = tpu_embedding_._table_to_config_dict[\n tpu_embedding_._feature_to_config_dict[feature_key].table_id].combiner\n if max_sequence_length > 0:\n length_feature_name = (\n tpu_fc.get_sequence_length_feature_key_name_from_feature_key_name(\n feature_key))\n length_feature = tf.math.minimum(\n fc_utils.sequence_length_from_sparse_tensor(sparse_feature),\n max_sequence_length)\n length_feature.set_shape(ctx.batch_size_for_input_fn)\n features[length_feature_name] = length_feature\n weight_key = tpu_embedding_.feature_to_config_dict[feature_key].weight_key\n sparse_feature_split = _split_tensor(sparse_feature, num_cores_per_batch)\n if combiner is None and not isinstance(sparse_feature,\n tf.sparse.SparseTensor):\n # A dense tensor with no combiner was provided so we assume that each\n # of the embedding_indices belongs to a different sample (setting\n # sample_indices to None).\n if weight_key is not None:\n raise ValueError(\n 'Found weights {} for weighted_categorical_column, which is not'\n 'compatible with sparse feature {} enqueued as dense tensor.'\n .format(weight_key, feature_key))\n enqueue_data = []\n for i in range(num_cores_per_batch):\n enqueue_data.append(\n tpu_embedding.EnqueueData(sparse_feature_split[i]))\n else:\n weights = None\n if isinstance(sparse_feature, tf.sparse.SparseTensor):\n weights = _get_weights_from_features(weight_key, features)\n weights_split = _split_tensor(weights, num_cores_per_batch)\n enqueue_data = []\n for i in range(num_cores_per_batch):\n split_weights = weights_split[i] if weights else None\n enqueue_data.append(\n tpu_embedding.EnqueueData.from_sparse_tensor(\n _maybe_dense_to_sparse(sparse_feature_split[i]),\n weights=split_weights))\n enqueue_datas[feature_key] = enqueue_data\n if ctx.tensor_core_embedding_columns:\n # pylint: disable=protected-access\n for column in ctx.tensor_core_embedding_columns:\n feature_key = column.categorical_column.key\n sparse_feature = _get_sparse_feature_from_feature(feature_key, features)\n padded_values, padded_mask = (\n tpu_fc_v2.pad_sparse_embedding_lookup_indices(\n sparse_feature, column._tensor_core_shape[1]))\n padded_values.set_shape(\n [ctx.batch_size_for_input_fn, column._tensor_core_shape[1]])\n padded_mask.set_shape(\n [ctx.batch_size_for_input_fn, column._tensor_core_shape[1]])\n features[feature_key] = padded_values\n mask_key = feature_key + tpu_fc_v2._TENSOR_CORE_MASK_KEY_SUFFIX\n if mask_key in features:\n raise ValueError('Mask key {} for Tensor Core embedding is '\n 'already in use.'.format(mask_key))\n features[mask_key] = padded_mask\n # pylint: enable=protected-access\n\n # Transpose the enqueue_datas dict into a list of dicts\n enqueue_datas_list = []\n for i in range(num_cores_per_batch):\n enqueue_data = {}\n for key, value in enqueue_datas.items():\n enqueue_data[key] = value[i]\n enqueue_datas_list.append(enqueue_data)\n return features, labels, enqueue_datas_list\n\n\ndef _split_tensor(tensor, num_splits):\n \"\"\"Splits tensor into num_splits pieces, returns a list of pieces.\"\"\"\n if tensor is None:\n return [None] * num_splits\n elif num_splits <= 0:\n return ValueError(\n 'Tensors cannot be split into {} pieces.'.format(num_splits))\n elif num_splits == 1:\n return [tensor]\n elif isinstance(tensor, tf.sparse.SparseTensor):\n return tf.compat.v2.sparse.split(tensor, num_splits, axis=0)\n else:\n return tf.split(tensor, num_splits)\n\n\ndef _get_sparse_feature_from_feature(feature_key, features):\n \"\"\"Pop and return sparse feature.\"\"\"\n sparse_feature = features.pop(feature_key)\n if not sparse_feature.dtype.is_integer:\n raise ValueError('SparseTensor with string as values are not supported. '\n 'If you are using categorical_column_with_vocabulary_file '\n 'or categorical_column_with_vocabulary_list, please call '\n 'your_column.categorical_column._transform_feature({{'\n 'your_column.key: features[your_column.key]}}) in '\n 'your input_fn() to convert string to int. '\n 'feature_key = {}.'.format(feature_key))\n return sparse_feature\n\n\ndef _get_weights_from_features(weight_key_name, features):\n \"\"\"Pop and return feature for weights, possibly None.\"\"\"\n weights = None\n if weight_key_name is not None:\n if weight_key_name in features:\n weights = features.pop(weight_key_name)\n else:\n raise ValueError(\n 'Cannot find weights {} for weighted_categorical_column.'\n ' Please check if the weights are present in feature dict. Also'\n ' note weight-sharing among weighted_categorical_column is not '\n 'supported on TPU.'.format(weight_key_name))\n if not isinstance(weights, tf.sparse.SparseTensor):\n raise ValueError(\n 'weighted_categorical_column with weight key name {} has dense '\n 'weights. Dense weights are not supported on TPU. Please use '\n 'sparse weights instead.'.format(weight_key_name))\n if weights.dtype is not tf.dtypes.float32:\n weights = tf.cast(weights, dtype=tf.dtypes.float32)\n return weights\n\n\ndef get_tpu_embedding_columns(feature_columns):\n \"\"\"Get feature columns meant to use TPU embedding.\n\n Args:\n feature_columns: a list of feature columns.\n\n Returns:\n A list of feature columns which can be placed on TPU embedding.\n \"\"\"\n tpu_embedding_columns = []\n for column in feature_columns:\n if isinstance(column, _TPU_EMBEDDING_COLUMN_CLASSES):\n tpu_embedding_columns.append(column)\n return tpu_embedding_columns\n" ]
[ [ "tensorflow.python.util.tf_export.estimator_export", "tensorflow.python.tpu.tpu_embedding.TPUEmbedding", "tensorflow.compat.v1.where", "tensorflow.compat.v1.gather_nd", "tensorflow.sparse.SparseTensor", "tensorflow.compat.v2.sparse.split", "tensorflow.compat.v1.shape", "tensorflow.python.tpu.feature_column_v2.pad_sparse_embedding_lookup_indices", "tensorflow.compat.v1.train.get_global_step", "tensorflow.python.feature_column.utils.sequence_length_from_sparse_tensor", "tensorflow.python.tpu.tpu_embedding.EnqueueData", "tensorflow.split", "tensorflow.cast", "tensorflow.python.tpu.feature_column.get_sequence_length_feature_key_name_from_feature_key_name" ] ]
mingkaid/soft-Q-learning-for-text-generation
[ "e769cd9da43197cf3701710e734285d20ecc19c1" ]
[ "sql/modules.py" ]
[ "import torch\nimport texar.torch as tx\nfrom typing import Tuple, Dict, Union, Optional, Callable, Any, cast\n\nfrom sql.utils import ForwardMode\nfrom sql import utils as sql_utils\n# from sql import replay_buffer\nfrom sql.modules_base import SoftQModelBase\nfrom sql.types import (\n BatchType,\n HF_BatchType,\n FloatTensor,\n LongTensor)\nfrom modules.models import Transformer\nTexarModules = Union[Transformer]\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass TXSoftQModel(SoftQModelBase):\n\n def __init__(\n self,\n model_constructor: Callable[[], TexarModules],\n behavior_model_constructor: Optional[Callable[[], Optional[TexarModules]]],\n sql_loss_impl: str,\n target_update_method: Optional[str],\n target_learning_rate: float,\n reward_shaping: bool,\n reward_shaping_min: float,\n reward_shaping_max: float,\n beam_width: int,\n reward_name: str,\n sql_loss_coefficients: Optional[float] = None,\n sql_loss_margin_constant: Optional[float] = None,\n sql_loss_margin_coefficient: Optional[float] = None,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n # Hacks not implemented in parent class\n hack_truncate_length_constant: Optional[int] = None,\n # Deprecated Arguments\n use_target_network: bool = True,\n target_sql_loss_impl: Optional[str] = None,\n ) -> None:\n \"\"\"\n Deprectaed Features:\n 1. Reply Buffer\n 2. Learn the target model, using different loss functions\n \"\"\"\n target_model_constructor = None\n actor_model_constructor = None\n if target_update_method in [\"copy\", \"polyak\"]:\n target_model_constructor = model_constructor\n\n if sql_loss_impl in [\"sac\"]:\n actor_model_constructor = model_constructor\n\n if use_target_network is False:\n raise ValueError(\"Deprecated\")\n if target_sql_loss_impl is not None:\n raise NotImplementedError\n\n super().__init__(\n model_constructor=model_constructor,\n target_model_constructor=target_model_constructor,\n actor_model_constructor=actor_model_constructor,\n behavior_model_constructor=behavior_model_constructor,\n sql_loss_impl=sql_loss_impl,\n target_update_method=target_update_method,\n target_learning_rate=target_learning_rate,\n reward_shaping=reward_shaping,\n reward_shaping_old_min=0,\n reward_shaping_old_max=100,\n reward_shaping_new_min=reward_shaping_min,\n reward_shaping_new_max=reward_shaping_max,\n sql_loss_coefficients=sql_loss_coefficients,\n sql_loss_margin_constant=sql_loss_margin_constant,\n sql_loss_margin_coefficient=sql_loss_margin_coefficient,\n top_k=top_k,\n top_p=top_p,\n beam_width=beam_width,\n reward_name=reward_name)\n\n if not isinstance(self._model, Transformer):\n raise TypeError\n if not isinstance(self._model_, Transformer):\n raise TypeError\n if self._actor_model is not None and not isinstance(self._actor_model, Transformer):\n raise TypeError\n if self._behavior_model is not None and not isinstance(self._behavior_model, Transformer):\n raise TypeError\n\n # Mypy stuff\n self._model: TexarModules\n self._model_: TexarModules\n self._actor_model: Optional[TexarModules]\n self._behavior_model: Optional[TexarModules]\n\n # Hacks\n if hack_truncate_length_constant is not None:\n sql_utils.colorful_warning(\n f\"Using hack_truncate_length_constant={hack_truncate_length_constant}\",\n bg=\"blue\")\n self._hack_truncate_length_constant = hack_truncate_length_constant\n\n def _decode_teacher_forcing(\n self,\n batch: Union[BatchType, HF_BatchType],\n use_target: bool,\n ) -> Tuple[FloatTensor,\n Optional[FloatTensor],\n Optional[FloatTensor],\n LongTensor,\n Optional[LongTensor],\n LongTensor,\n LongTensor]:\n\n batch = cast(BatchType, batch)\n outputs, _ = self._model(\n batch=batch,\n mode=ForwardMode.MLE)\n\n logits_ = None\n if use_target is True:\n outputs_, _ = self._model_(\n batch=batch,\n mode=ForwardMode.SQL_OFF_GT)\n logits_ = outputs_.logits.contiguous()\n\n logits_pi = None\n sampled_outputs_id = None\n if self.is_actor_critic is True:\n if self._actor_model is None:\n raise ValueError\n\n outputs_pi, _ = self._actor_model(\n batch=batch,\n mode=ForwardMode.SQL_OFF_GT)\n logits_pi = outputs_pi.logits.contiguous()\n\n # Add top-P/K here?\n sampled_outputs_id = (\n torch.distributions\n .Categorical(logits=logits_pi)\n .sample()\n .contiguous())\n\n return (\n outputs.logits.contiguous(),\n logits_,\n logits_pi,\n batch[\"target_text_ids\"][:, 1:].contiguous(),\n sampled_outputs_id,\n batch[\"target_text_ids\"][:, 1:].contiguous(),\n batch[\"target_length\"].contiguous() - 1,\n )\n\n def _decode_behavior_forcing(\n self,\n batch: Union[BatchType, HF_BatchType],\n use_target: bool,\n ) -> Tuple[FloatTensor,\n Optional[FloatTensor],\n Optional[FloatTensor],\n LongTensor,\n Optional[LongTensor],\n LongTensor,\n LongTensor]:\n\n batch = cast(BatchType, batch)\n if self._behavior_model is None:\n raise ValueError\n\n # We do not need gradients from the behavior model\n with torch.no_grad():\n behavior_outputs, behavior_sample_lengths = self._behavior_model(\n batch=batch,\n # `SQL_ON` and `PG` would be the same here\n mode=ForwardMode.SQL_ON,\n top_k=self._top_k,\n top_p=self._top_p)\n\n # Note:\n # Here the `behavior_batch[target_text]` will be populated\n # with `batch[target_text]` but `behavior_batch[target_text_*]`\n # will be populated with `outputs_id`. This is fine as when\n # calculating rewards, `target_text` will be used instead\n # of `target_text_ids`.\n behavior_batch = sql_utils.make_batch_from_outputs(\n batch=batch,\n outputs_id=behavior_outputs.sample_id,\n sequence_lengths=behavior_sample_lengths,\n target_vocab=self._model.target_vocab,\n include_target_text=\"ground-truth\")\n\n return self._decode_teacher_forcing(\n batch=behavior_batch,\n use_target=use_target)\n\n def _decode_sampling(\n self,\n batch: Union[BatchType, HF_BatchType],\n use_target: bool,\n ) -> Tuple[FloatTensor,\n Optional[FloatTensor],\n Optional[FloatTensor],\n LongTensor,\n Optional[LongTensor],\n LongTensor,\n LongTensor]:\n\n batch = cast(BatchType, batch)\n if not self.is_actor_critic:\n outputs, sample_lengths = self._model(\n batch=batch,\n mode=ForwardMode.SQL_ON,\n top_k=self._top_k,\n top_p=self._top_p)\n\n batch_ = sql_utils.make_batch_from_outputs(\n batch=batch,\n outputs_id=outputs.sample_id,\n sequence_lengths=sample_lengths,\n target_vocab=self._model.target_vocab)\n\n logits_pi = None\n sampled_outputs_id = None\n\n else:\n if self._actor_model is None:\n raise ValueError\n\n outputs_pi, sample_lengths_pi = self._actor_model(\n batch=batch,\n mode=ForwardMode.SQL_ON,\n top_k=self._top_k,\n top_p=self._top_p)\n\n batch_ = sql_utils.make_batch_from_outputs(\n batch=batch,\n outputs_id=outputs_pi.sample_id,\n sequence_lengths=sample_lengths_pi,\n target_vocab=self._model.target_vocab)\n\n outputs, sample_lengths = self._model(\n batch=batch_,\n mode=ForwardMode.SQL_OFF_GT)\n\n logits_pi = outputs_pi.logits.contiguous()\n sampled_outputs_id = outputs_pi.sample_id.contiguous()\n if not (sample_lengths == sample_lengths_pi).all().item():\n raise ValueError\n\n # Use off-policy because the target\n # has to follow the steps taken by the model\n outputs_, sample_lengths_ = self._model_(\n batch=batch_,\n mode=ForwardMode.SQL_OFF_GT)\n\n # Sanity check\n if sample_lengths_ is not None:\n # In `transformer`, this will be None, so skip the check.\n if not (sample_lengths == sample_lengths_).all().item():\n raise ValueError\n\n if self._hack_truncate_length_constant is not None:\n # Truncate length beyond a specified constant\n # beyond ground truth length.\n length_to_truncate = (\n batch[\"target_length\"] - 1 +\n self._hack_truncate_length_constant)\n sample_lengths = torch.minimum(\n sample_lengths,\n length_to_truncate)\n\n return (\n outputs.logits.contiguous(),\n outputs_.logits.contiguous(),\n logits_pi,\n outputs.sample_id.contiguous(),\n sampled_outputs_id,\n batch[\"target_text_ids\"][:, 1:].contiguous(),\n sample_lengths.contiguous(),\n )\n\n def _compute_rewards(\n self,\n batch: Union[BatchType, HF_BatchType],\n outputs_id: LongTensor,\n labels: LongTensor,\n sequence_lengths: LongTensor,\n ) -> Tuple[FloatTensor, FloatTensor, Dict[str, Any]]:\n # Decode the outputs\n source_texts = tx.utils.strip_special_tokens(\n batch[\"source_text\"],\n is_token_list=True)\n target_texts = tx.utils.strip_special_tokens(\n [text[1:] for text in batch[\"target_text\"]],\n is_token_list=True)\n output_texts = tx.data.vocabulary.map_ids_to_strs(\n ids=outputs_id.cpu(),\n vocab=self._model.target_vocab)\n\n if self._hack_truncate_length_constant is not None:\n # Special handling of when the `outputs_id` are\n # not truncated but the `sequence_lengths` are\n # truncated. This would cause some mismatch in\n # reward computation, so manually fix it here.\n output_texts = sql_utils.map_ids_to_strs_truncated(\n outputs_id=outputs_id.cpu(),\n sequence_lengths=sequence_lengths,\n vocab=self._model.target_vocab)\n\n rewards_tensor, rewards_log = self._reward_function(\n sources=[\" \".join(tokens) for tokens in source_texts],\n targets=[\" \".join(tokens) for tokens in target_texts],\n predictions=output_texts,\n to_tensor=True,\n mode=\"train\")\n\n rewards_tensor = rewards_tensor.to(device)\n shaped_rewards_tensor = self._reward_shaping_func(rewards_tensor)\n return rewards_tensor, shaped_rewards_tensor, rewards_log\n\n def _forward_decoding(\n self,\n batch: Union[BatchType, HF_BatchType],\n ) -> Tuple[Dict, Dict]:\n if not self.is_actor_critic:\n outputs = self._model(\n batch=batch,\n mode=ForwardMode.INFER,\n beam_width=self._beam_width)\n else:\n if self._actor_model is None:\n raise ValueError\n\n outputs = self._actor_model(\n batch=batch,\n mode=ForwardMode.INFER,\n beam_width=self._beam_width)\n\n return outputs, {}\n\n def forward(\n self,\n mode: ForwardMode,\n batch: BatchType,\n ) -> Tuple[Union[FloatTensor, Dict], Dict[str, Any]]:\n\n if mode == ForwardMode.INFER:\n return self._forward_decoding(batch=batch)\n\n _, loss, loss_log = self._forward(\n mode=mode,\n batch=batch)\n\n return loss, loss_log\n\n\n # def forward_SQL(self, batch: BatchType, mode: ForwardMode) -> Tuple[FloatTensor, Dict[str, Any]]:\n\n # if self._target_update_method == \"learn\":\n # # Detach target logits when computing model loss, and\n # # detach model logits when computing target loss\n # sql_loss, sql_loss_log = sql_losses.soft_q_loss_with_sparse_rewards(\n # implementation=self._sql_loss_impl,\n # actions=actions,\n # logits=logits,\n # logits_=logits_.detach(),\n # rewards=shaped_rewards,\n # sequence_length=sample_lengths,\n # coefficient=self._sql_loss_coefficients)\n\n # for i in range(len(target_outputs.logits_collections)):\n # target_loss, target_loss_log = sql_losses.soft_q_loss_with_sparse_rewards(\n # implementation=self._target_sql_loss_impl,\n # actions=actions,\n # logits=target_outputs.logits_collections[i],\n # logits_=logits.detach(),\n # rewards=shaped_rewards,\n # sequence_length=sample_lengths,\n # coefficient=self._sql_loss_coefficients)\n\n # sql_loss = sql_loss + target_loss\n # sql_utils.add_prefix_to_dict_keys_inplace(\n # target_loss_log,\n # prefix=f\"target-{i}/\")\n # sql_loss_log = unionize_dicts([\n # sql_loss_log, target_loss_log])\n" ]
[ [ "torch.distributions.Categorical", "torch.no_grad", "torch.cuda.is_available", "torch.minimum" ] ]
musicinmybrain/NiaAML
[ "58d879cb1d5f8a458bb5cca115e68c125418fc6e" ]
[ "niaaml/fitness/cohen_kappa.py" ]
[ "from sklearn.metrics import cohen_kappa_score\nfrom niaaml.fitness.fitness_function import FitnessFunction\n\n__all__ = [\"CohenKappa\"]\n\n\nclass CohenKappa(FitnessFunction):\n r\"\"\"Class representing the cohen's kappa as a fitness function.\n\n Date:\n 2020\n\n Author:\n Luka Pečnik\n\n License:\n MIT\n\n Documentation:\n https://scikit-learn.org/stable/modules/generated/sklearn.metrics.cohen_kappa_score.html\n\n See Also:\n * :class:`niaaml.fitness.FitnessFunction`\n \"\"\"\n Name = \"Cohen's Kappa\"\n\n def get_fitness(self, predicted, expected):\n r\"\"\"Return fitness value. The larger return value should represent a better fitness for the framework to work properly.\n\n Arguments:\n predicted (pandas.core.series.Series): Predicted values.\n expected (pandas.core.series.Series): Expected values.\n\n Returns:\n float: Calculated fitness value.\n \"\"\"\n return cohen_kappa_score(expected, predicted)\n" ]
[ [ "sklearn.metrics.cohen_kappa_score" ] ]
ZuhaoLiu/NAS-SCAM
[ "6fac20e97561c344a5f0775a11aa6baf04f10d11" ]
[ "network_rebuild.py" ]
[ "import torch\nfrom torch import sigmoid\nimport torch.nn.functional as F\nfrom torch.nn.functional import relu\nfrom primitives import SAM_PRIMITIVES, SAM_OPS, CAM_PRIMITIVES, CAM_OPS\nimport math\nfrom conv_modules import * \n\n\nclass MixedOp(nn.Module):\n '''\n the connection between two nodes in the rebuild architecture\n Args:\n input_C: the channel of input node\n output_C: the channel of output node\n PRIMITIVE: the list of operation name\n OPS: pytorch instance operation corresponds to operation name\n INDEX: the index of PRIMITIVE list\n '''\n def __init__(self, input_C, output_C, PRIMITIVE, OPS, INDEX):\n super(MixedOp, self).__init__()\n self.ops = OPS[PRIMITIVE[INDEX]](input_C, output_C)\n\n def forward(self, x, function):\n x = function(self.ops(x))\n return x\n\nclass Cell(nn.Module):\n '''\n the instance rebuild architecture of NAS-SAM or NAS-CAM\n Args:\n C: input channel number of search space\n operation_number: the total number of nodes except input node\n Index: the index of selected operation between each two nodes\n cell_type: 'SAM' for NAS-SAM and 'CAM' for NAS-CAM\n '''\n def __init__(self, C, operation_number, Index, cell_type = 'SAM'):\n super(Cell, self).__init__()\n self.operation_number = operation_number\n self.cell_type = cell_type\n channel_list = [32]*(operation_number+1) # set the channel number of intermidiate nodes, which is a prior parameter\n channel_list[0] = C\n channel_list[-1] = 1 # the channel of output node should be 1\n self.mixop_list = nn.ModuleList()\n total_index = 0\n for i in range(operation_number):\n for j in range(i+1):\n self.mixop_list.append(MixedOp(channel_list[j], channel_list[i+1],\n globals()[cell_type+'_PRIMITIVES'],\n globals()[cell_type+'_OPS'],\n Index[total_index]))\n total_index += 1\n self.function_list = ['relu']*math.factorial(operation_number) \n # add ReLU at the end of operations except the operations connected to the last node\n self.function_list[-operation_number:] = ['sigmoid']*operation_number \n # add sigmoid at end of operations connected to the last node\n def forward(self, input_x):\n x = input_x\n if self.cell_type == 'SAM':\n pass\n else:\n batch_size, channels, height, width = x.size()\n x = x.mean(dim=(2,3)) # for NAS-CAM, we need to do global average pooling first\n x = torch.unsqueeze(x, dim = 1)\n total_x = list()\n total_index = 0\n add_x = x\n for i in range(self.operation_number):\n total_x.append(add_x)\n now_x = 0\n for j in range(i+1):\n now_x = torch.add(self.mixop_list[total_index](total_x[j],\n globals()[self.function_list[total_index]],), now_x)\n total_index += 1\n add_x = now_x\n x = torch.div(add_x,self.operation_number)\n if self.cell_type == 'SAM':\n return torch.mul(input_x, x)\n else:\n return torch.mul(input_x, x.view(batch_size, channels, 1, 1))\n\nclass SCAM_P(nn.Module):\n def __init__(self, C, SAM_Index, CAM_Index):\n super(SCAM_P, self).__init__()\n self.SAM = Cell(C, 3, SAM_Index, cell_type = 'SAM')\n self.CAM = Cell(1, 3, CAM_Index, cell_type = 'CAM')\n def forward(self, x):\n x = torch.max(self.SAM(x), self.CAM(x))\n return x\n\nclass SCAM_S(nn.Module):\n def __init__(self, C, SAM_Index, CAM_Index):\n super(SCAM_S, self).__init__()\n self.SAM = Cell(C, 3, SAM_Index, cell_type = 'SAM')\n self.CAM = Cell(1, 3, CAM_Index, cell_type = 'CAM')\n def forward(self, x):\n x = self.SAM(x)\n x = self.CAM(x)\n return x\n\n\nclass MakeDownLayers(nn.Module):\n '''\n DOWN sampling blocks of the network\n '''\n def __init__(self, in_ch, layers_number, SAM_Index, CAM_Index, attention_type = 'P'):\n super(MakeDownLayers, self).__init__()\n self.layers_number = layers_number\n self.down_list = nn.ModuleList()\n self.SCAM_list = nn.ModuleList()\n for i in range(self.layers_number):\n self.down_list.append(Down(in_ch, in_ch*2))\n self.SCAM_list.append(globals()['SCAM_'+attention_type](in_ch*2, SAM_Index[i], CAM_Index[i]))\n in_ch *= 2\n\n def forward(self, x):\n output_list = list()\n output_list.append(x)\n for i in range(self.layers_number):\n x = self.down_list[i](x)\n x = self.SCAM_list[i](x)\n output_list.append(x)\n return output_list\n\nclass MakeUpLayers(nn.Module):\n '''\n UP sampling blocks of the network\n '''\n def __init__(self, in_ch, layers_number, SAM_Index, CAM_Index, attention_type = 'P'):\n super(MakeUpLayers, self).__init__()\n self.layers_number = layers_number\n self.up_list = nn.ModuleList()\n self.SCAM_list = nn.ModuleList()\n for i in range(self.layers_number):\n self.up_list.append(Up(in_ch, in_ch // 2))\n self.SCAM_list.append(globals()['SCAM_'+attention_type](in_ch // 2, SAM_Index[i], CAM_Index[i]))\n in_ch //= 2\n def forward(self, x_list):\n x = x_list[-1]\n for i in range(self.layers_number):\n x = self.up_list[i](x, x_list[-i-2])\n x = self.SCAM_list[i](x)\n return x\n\nclass RebuildNet(nn.Module):\n '''\n Rebuilding network with serial NAS-SCAM or parallel NAS-SCAM\n '''\n def __init__(self, SAM_Index, CAM_Index, NChannels = 4, NClasses = 4, FeatureRoot = 16, SamplingNumber = 4):\n super(RebuildNet, self).__init__()\n self.SamplingNumber = SamplingNumber\n self.InConv = DoubleConv(NChannels, FeatureRoot)\n self.DownLayers = MakeDownLayers(FeatureRoot, SamplingNumber,\n SAM_Index[0:SamplingNumber], CAM_Index[0:SamplingNumber])\n self.Middle = DoubleConv(2**SamplingNumber*FeatureRoot, 2**SamplingNumber*FeatureRoot)\n self.UpLayers = MakeUpLayers(2**SamplingNumber*FeatureRoot, SamplingNumber,\n SAM_Index[SamplingNumber:], CAM_Index[SamplingNumber:])\n self.OutConv = nn.Conv2d(FeatureRoot, NClasses, 1)\n def forward(self, x):\n x = self.InConv(x)\n x = self.DownLayers(x)\n x[-1] = self.Middle(x[-1])\n x = self.UpLayers(x)\n x = torch.sigmoid(self.OutConv(x))\n return x\n\n\n\n\n\n" ]
[ [ "torch.div", "torch.mul", "torch.unsqueeze" ] ]
paragon520/cascoKNN
[ "1bee06200c5e8c86977bbdec896b624af2c2e05a" ]
[ "pyod/models/combination.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"A collection of model combination functionalities.\n\"\"\"\n# Author: Yue Zhao <[email protected]>\n# License: BSD 2 clause\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom numpy.random import RandomState\nfrom sklearn.utils import check_array\nfrom sklearn.utils import column_or_1d\n# noinspection PyProtectedMember\nfrom sklearn.utils import shuffle\nfrom sklearn.utils.random import sample_without_replacement\nfrom sklearn.utils.testing import assert_equal\nfrom ..utils.utility import check_parameter\n\n\ndef aom(scores, n_buckets=5, method='static', bootstrap_estimators=False,\n random_state=None):\n \"\"\"Average of Maximum - An ensemble method for combining multiple\n estimators. See :cite:`aggarwal2015theoretical` for details.\n\n First dividing estimators into subgroups, take the maximum score as the\n subgroup score. Finally, take the average of all subgroup outlier scores.\n\n :param scores: The score matrix outputted from various estimators\n :type scores: numpy array of shape (n_samples, n_estimators)\n\n :param n_buckets: The number of subgroups to build\n :type n_buckets: int, optional (default=5)\n\n :param method: {'static', 'dynamic'}, if 'dynamic', build subgroups\n randomly with dynamic bucket size.\n :type method: str, optional (default='static')\n\n :param bootstrap_estimators: Whether estimators are drawn with replacement.\n :type bootstrap_estimators: bool, optional (default=False)\n\n :param random_state: If int, random_state is the seed used by the\n random number generator; If RandomState instance, random_state is\n the random number generator; If None, the random number generator\n is the RandomState instance used by `np.random`.\n :type random_state: int, RandomState instance or None,\n optional (default=None)\n\n :return: The combined outlier scores.\n :rtype: Numpy array of shape (n_samples,)\n \"\"\"\n\n # TODO: add one more parameter for max number of estimators\n # use random_state instead\n # for now it is fixed at n_estimators/2\n scores = check_array(scores)\n n_estimators = scores.shape[1]\n check_parameter(n_buckets, 2, n_estimators, param_name='n_buckets')\n\n scores_aom = np.zeros([scores.shape[0], n_buckets])\n\n if method == 'static':\n\n n_estimators_per_bucket = int(n_estimators / n_buckets)\n if n_estimators % n_buckets != 0:\n raise ValueError('n_estimators / n_buckets has a remainder. Not '\n 'allowed in static mode.')\n\n if not bootstrap_estimators:\n # shuffle the estimator order\n shuffled_list = shuffle(list(range(0, n_estimators, 1)),\n random_state=random_state)\n\n head = 0\n for i in range(0, n_estimators, n_estimators_per_bucket):\n tail = i + n_estimators_per_bucket\n batch_ind = int(i / n_estimators_per_bucket)\n\n scores_aom[:, batch_ind] = np.max(\n scores[:, shuffled_list[head:tail]], axis=1)\n\n # increment indexes\n head = head + n_estimators_per_bucket\n # noinspection PyUnusedLocal\n tail = tail + n_estimators_per_bucket\n else:\n for i in range(n_buckets):\n ind = sample_without_replacement(n_estimators,\n n_estimators_per_bucket,\n random_state=random_state)\n scores_aom[:, i] = np.max(scores[:, ind], axis=1)\n\n elif method == 'dynamic': # random bucket size\n for i in range(n_buckets):\n # the number of estimators in a bucket should be 2 - n/2\n max_estimator_per_bucket = RandomState(seed=random_state).randint(\n 2, int(n_estimators / 2))\n ind = sample_without_replacement(n_estimators,\n max_estimator_per_bucket,\n random_state=random_state)\n scores_aom[:, i] = np.max(scores[:, ind], axis=1)\n\n else:\n raise NotImplementedError(\n '{method} is not implemented'.format(method=method))\n\n return np.mean(scores_aom, axis=1)\n\n\ndef moa(scores, n_buckets=5, method='static', bootstrap_estimators=False,\n random_state=None):\n \"\"\"Maximization of Average - An ensemble method for combining multiple\n estimators. See :cite:`aggarwal2015theoretical` for details.\n\n First dividing estimators into subgroups, take the average score as the\n subgroup score. Finally, take the maximization of all subgroup outlier\n scores.\n\n :param scores: The score matrix outputted from various estimators\n :type scores: numpy array of shape (n_samples, n_estimators)\n\n :param n_buckets: The number of subgroups to build\n :type n_buckets: int, optional (default=5)\n\n :param method: {'static', 'dynamic'}, if 'dynamic', build subgroups\n randomly with dynamic bucket size.\n :type method: str, optional (default='static')\n\n :param bootstrap_estimators: Whether estimators are drawn with replacement.\n :type bootstrap_estimators: bool, optional (default=False)\n\n :param random_state: If int, random_state is the seed used by the\n random number generator; If RandomState instance, random_state is\n the random number generator; If None, the random number generator\n is the RandomState instance used by `np.random`.\n :type random_state: int, RandomState instance or None,\n optional (default=None)\n\n :return: The combined outlier scores.\n :rtype: Numpy array of shape (n_samples,)\n \"\"\"\n\n # TODO: add one more parameter for max number of estimators\n # for now it is fixed to n_estimators/2\n scores = check_array(scores)\n n_estimators = scores.shape[1]\n check_parameter(n_buckets, 2, n_estimators, param_name='n_buckets')\n\n scores_moa = np.zeros([scores.shape[0], n_buckets])\n\n if method == 'static':\n\n n_estimators_per_bucket = int(n_estimators / n_buckets)\n if n_estimators % n_buckets != 0:\n raise ValueError('n_estimators / n_buckets has a remainder. Not '\n 'allowed in static mode.')\n\n if not bootstrap_estimators:\n # shuffle the estimator order\n shuffled_list = shuffle(list(range(0, n_estimators, 1)),\n random_state=random_state)\n\n head = 0\n for i in range(0, n_estimators, n_estimators_per_bucket):\n tail = i + n_estimators_per_bucket\n batch_ind = int(i / n_estimators_per_bucket)\n\n scores_moa[:, batch_ind] = np.mean(\n scores[:, shuffled_list[head:tail]], axis=1)\n\n # increment index\n head = head + n_estimators_per_bucket\n # noinspection PyUnusedLocal\n tail = tail + n_estimators_per_bucket\n else:\n for i in range(n_buckets):\n ind = sample_without_replacement(n_estimators,\n n_estimators_per_bucket,\n random_state=random_state)\n scores_moa[:, i] = np.mean(scores[:, ind], axis=1)\n\n elif method == 'dynamic': # random bucket size\n for i in range(n_buckets):\n # the number of estimators in a bucket should be 2 - n/2\n max_estimator_per_bucket = RandomState(seed=random_state).randint(\n 2, int(n_estimators / 2))\n ind = sample_without_replacement(n_estimators,\n max_estimator_per_bucket,\n random_state=random_state)\n scores_moa[:, i] = np.mean(scores[:, ind], axis=1)\n\n else:\n raise NotImplementedError(\n '{method} is not implemented'.format(method=method))\n\n return np.max(scores_moa, axis=1)\n\n\ndef average(scores, estimator_weight=None):\n \"\"\"\n Combine the outlier scores from multiple estimators by averaging\n\n :param scores: score matrix from multiple estimators on the same samples\n :type scores: numpy array of shape (n_samples, n_estimators)\n\n :param estimator_weight: if specified, using weighted average\n :type estimator_weight: list of shape (1, n_estimators)\n\n :return: the combined outlier scores\n :rtype: numpy array of shape (n_samples, )\n \"\"\"\n scores = check_array(scores)\n\n if estimator_weight is not None:\n estimator_weight = column_or_1d(estimator_weight).reshape(1, -1)\n assert_equal(scores.shape[1], estimator_weight.shape[1])\n\n # (d1*w1 + d2*w2 + ...+ dn*wn)/(w1+w2+...+wn)\n # generated weighted scores\n scores = np.sum(np.multiply(scores, estimator_weight),\n axis=1) / np.sum(\n estimator_weight)\n return scores.ravel()\n\n else:\n return np.mean(scores, axis=1).ravel()\n\n\ndef maximization(scores):\n \"\"\"\n Combine the outlier scores from multiple estimators by taking the maximum\n\n :param scores: score matrix from multiple estimators on the same samples\n :type scores: numpy array of shape (n_samples, n_estimators)\n\n :return: the combined outlier scores\n :rtype: numpy array of shape (n_samples, )\n \"\"\"\n scores = check_array(scores)\n return np.max(scores, axis=1).ravel()\n" ]
[ [ "numpy.max", "numpy.zeros", "numpy.random.RandomState", "numpy.sum", "sklearn.utils.random.sample_without_replacement", "numpy.mean", "sklearn.utils.testing.assert_equal", "numpy.multiply", "sklearn.utils.check_array", "sklearn.utils.column_or_1d" ] ]
gkebe/EventBERT
[ "63a77067629350c118b12d48884394c4b85416b9" ]
[ "inverse_cloze.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport pickle\nimport argparse\nimport json\nimport logging\nimport os\nimport random\nimport sys\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom file_utils import PYTORCH_PRETRAINED_BERT_CACHE\nfrom modeling import BertForNextSentencePrediction, BertConfig, WEIGHTS_NAME, CONFIG_NAME\nfrom tokenization import BertTokenizer\nfrom optimization import BertAdam, warmup_linear\nfrom schedulers import LinearWarmUpScheduler\nfrom apex import amp\nfrom sklearn.metrics import matthews_corrcoef, f1_score, recall_score, precision_score, classification_report, confusion_matrix, accuracy_score\nfrom utils import is_main_process\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef compute_metrics(task_name, preds, labels, label_names=None):\n if label_names is None:\n label_names = []\n assert len(preds) == len(labels)\n if task_name == \"cola\":\n return {\"mcc\": matthews_corrcoef(labels, preds)}\n elif task_name == \"sst-2\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"mrpc\":\n return acc_and_f1(preds, labels)\n elif task_name == \"sts-b\":\n return pearson_and_spearman(preds, labels)\n elif task_name == \"qqp\":\n return acc_and_f1(preds, labels)\n elif task_name == \"mnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"mnli-mm\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"qnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"rte\":\n return {\"acc\": acc_and_f1(preds, labels)}\n elif task_name == \"wnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"frames\":\n return metrics_frame(preds, labels, label_names)\n else:\n raise KeyError(task_name)\n\n\ndef simple_accuracy(preds, labels):\n return (preds == labels).mean()\n\ndef simple_mrr(ranks):\n return np.sum([1/i for i in ranks])/len(ranks)\n\ndef metrics_frame(preds, labels, label_names):\n recall_micro = recall_score(labels, preds, average=\"micro\")\n recall_macro = recall_score(labels, preds, average=\"macro\")\n precision_micro = precision_score(labels, preds, average=\"micro\")\n precision_macro = precision_score(labels, preds, average=\"macro\")\n f1_micro = f1_score(labels, preds, average=\"micro\")\n f1_macro = f1_score(labels, preds, average=\"macro\")\n cr = classification_report(labels, preds, labels=list(range(len(label_names))), target_names=label_names)\n model_metrics = {\"Precision, Micro\": precision_micro, \"Precision, Macro\": precision_macro,\n \"Recall, Micro\": recall_micro, \"Recall, Macro\": recall_macro,\n \"F1 score, Micro\": f1_micro, \"F1 score, Macro\": f1_macro, \"Classification report\": cr}\n return model_metrics\n\n\ndef acc_and_f1(preds, labels):\n acc = simple_accuracy(preds, labels)\n f1 = f1_score(y_true=labels, y_pred=preds)\n return {\n \"acc\": acc,\n \"f1\": f1,\n \"acc_and_f1\": (acc + f1) / 2,\n }\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, guid, T, F1, F2, F3, F4, F5):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the instance.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the instance. This should be\n specified for train and dev instances, but not for test instances.\n \"\"\"\n self.guid = guid\n self.T = T\n self.F1 = F1\n self.F2 = F2\n self.F3 = F3\n self.F4 = F4\n self.F5 = F5\n\n\nclass InputInstance(object):\n \"\"\"A single training/test instance for simple sequence classification.\"\"\"\n\n def __init__(self, guid, T, F1, F2, F3, F4, F5):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the instance.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the instance. This should be\n specified for train and dev instances, but not for test instances.\n \"\"\"\n self.guid = guid\n self.T = T\n self.F1 = F1\n self.F2 = F2\n self.F3 = F3\n self.F4 = F4\n self.F5 = F5\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_instances(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n return self._create_instances(\n self._read_json(os.path.join(data_dir)))\n def get_num_events(self, data_dir):\n dataset = self._read_json(os.path.join(data_dir))\n return len(dataset[\"0\"][\"T\"])\n @classmethod\n def _read_json(cls, input_file):\n \"\"\"Reads a tab separated value file.\"\"\"\n dataset_json = \"\"\n with open(input_file, \"rb\") as f:\n dataset_json = f.read()\n return json.loads(dataset_json)\n\n def _create_instances(self, dataset):\n \"\"\"Creates instances for the training and dev sets.\"\"\"\n instances = []\n for i, value in dataset.items():\n guid = int(i)\n T = value[\"T\"]\n F1 = value[\"F1\"]\n F2 = value[\"F2\"]\n F3 = value[\"F3\"]\n F4 = value[\"F4\"]\n F5 = value[\"F5\"]\n instances.append(\n InputInstance(guid=guid, T=T, F1=F1, F2=F2, F3=F3, F4=F4, F5=F5))\n return instances\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef tokenize_sequence(sequence, max_seq_length, tokenizer):\n sequence_ = []\n first = \"\"\n for _, second in zip(sequence, sequence[1:]):\n first = \" \".join(sequence[:sequence.index(second)])\n tokens_a = tokenizer.tokenize(first)\n tokens_b = tokenizer.tokenize(second)\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n input_mask = [1] * len(input_ids)\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n seq = dict()\n seq[\"input_ids\"] = input_ids\n seq[\"input_mask\"] = input_mask\n seq[\"segment_ids\"] = segment_ids\n sequence_.append(seq)\n return sequence_\n\n\ndef convert_instances_to_features(instances, max_seq_length, tokenizer):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n features = []\n for (ex_index, instance) in enumerate(instances):\n features.append(\n InputFeatures(guid=instance.guid,\n T=tokenize_sequence(instance.T, max_seq_length, tokenizer),\n F1=tokenize_sequence(instance.F1, max_seq_length, tokenizer),\n F2=tokenize_sequence(instance.F2, max_seq_length, tokenizer),\n F3=tokenize_sequence(instance.F3, max_seq_length, tokenizer),\n F4=tokenize_sequence(instance.F4, max_seq_length, tokenizer),\n F5=tokenize_sequence(instance.F5, max_seq_length, tokenizer)))\n return features\n\ndef accuracy(out, labels):\n outputs = np.argmax(out, axis=1)\n return np.sum(outputs == labels)\n\n\nfrom apex.multi_tensor_apply import multi_tensor_applier\n\n\nclass GradientClipper:\n \"\"\"\n Clips gradient norm of an iterable of parameters.\n \"\"\"\n\n def __init__(self, max_grad_norm):\n self.max_norm = max_grad_norm\n if multi_tensor_applier.available:\n import amp_C\n self._overflow_buf = torch.cuda.IntTensor([0])\n self.multi_tensor_l2norm = amp_C.multi_tensor_l2norm\n self.multi_tensor_scale = amp_C.multi_tensor_scale\n else:\n raise RuntimeError('Gradient clipping requires cuda extensions')\n\n def step(self, parameters):\n l = [p.grad for p in parameters if p.grad is not None]\n total_norm, _ = multi_tensor_applier(self.multi_tensor_l2norm, self._overflow_buf, [l], False)\n total_norm = total_norm.item()\n if (total_norm == float('inf')): return\n clip_coef = self.max_norm / (total_norm + 1e-6)\n if clip_coef < 1:\n multi_tensor_applier(self.multi_tensor_scale, self._overflow_buf, [l, l], clip_coef)\n\n\ndef inverse_cloze(data_dir=\"data/inverse_cloze/cloze_dataset_weber.json\", bert_model=\"bert-base-uncased\", output_dir=\"results/inverse_cloze\", init_checkpoint=\"checkpoints/bert-base.pt\", vocab_file=\"data/download/google_pretrained_weights/uncased_L-12_H-768_A-12/vocab.txt\", config_file=\"bert_config.json\", cache_dir = \"\", max_seq_length=128,\n do_lower_case=True, no_cuda=False, local_rank=-1, seed=2, server_ip=\"\", server_port=\"\"):\n\n if server_ip and server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(server_ip, server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n\n\n if local_rank == -1 or no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n torch.cuda.set_device(local_rank)\n device = torch.device(\"cuda\", local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n logger.info(\"device: {} n_gpu: {}\".format(\n device, n_gpu))\n\n\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(seed)\n\n if os.path.exists(output_dir) and os.listdir(output_dir):\n print(\"WARNING: Output directory ({}) already exists and is not empty.\".format(output_dir))\n if not os.path.exists(output_dir) and is_main_process():\n os.makedirs(output_dir)\n\n # tokenizer = BertTokenizer.from_pretrained(bert_model, do_lower_case=do_lower_case)\n tokenizer = BertTokenizer(vocab_file, do_lower_case=do_lower_case, max_len=128) # for bert large\n\n\n # Prepare model\n config = BertConfig.from_json_file(config_file)\n # Padding for divisibility by 8\n if config.vocab_size % 8 != 0:\n config.vocab_size += 8 - (config.vocab_size % 8)\n\n model = BertForNextSentencePrediction(config)\n print(\"USING CHECKPOINT from\", init_checkpoint)\n model.load_state_dict(torch.load(init_checkpoint, map_location='cpu')[\"model\"], strict=False)\n print(\"USED CHECKPOINT from\", init_checkpoint)\n\n model.to(device)\n # Prepare optimizer\n processor = DataProcessor()\n instances = processor.get_instances(data_dir)\n num_events = processor.get_num_events(data_dir)\n eval_features = convert_instances_to_features(\n instances, max_seq_length, tokenizer)\n logger.info(\"***** Running evaluation *****\")\n logger.info(\" Num examples = %d\", len(instances))\n logger.info(\" Batch size = %d\", 24)\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n for f in eval_features:\n sequences = [f.T, f.F1, f.F2, f.F3, f.F4, f.F5]\n input_ids = []\n input_mask = []\n segment_ids = []\n for seq in sequences:\n input_ids += [s[\"input_ids\"] for s in seq]\n input_mask += [s[\"input_mask\"] for s in seq]\n segment_ids += [s[\"segment_ids\"] for s in seq]\n\n all_input_ids += input_ids\n all_input_mask += input_mask\n all_segment_ids += segment_ids\n\n all_input_ids = torch.tensor(all_input_ids, dtype=torch.long)\n all_input_mask = torch.tensor(all_input_mask, dtype=torch.long)\n all_segment_ids = torch.tensor(all_segment_ids, dtype=torch.long)\n eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)\n # Run prediction for full data\n eval_sampler = SequentialSampler(eval_data)\n\n eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=6*(num_events-1))\n\n model.eval()\n eval_loss, eval_accuracy = 0, 0\n nb_eval_steps, nb_eval_examples = 0, 0\n preds = []\n probs_ = []\n probs_prod_ = []\n ranks =[]\n\n for input_ids, input_mask, segment_ids in tqdm(eval_dataloader, desc=\"Evaluating\"):\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n segment_ids = segment_ids.to(device)\n with torch.no_grad():\n tmp_eval_loss = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, next_sentence_label=None)\n logits = model(input_ids, segment_ids, input_mask)\n\n probabilities = torch.softmax(logits, 1)\n probs = probabilities.tolist()\n probs = [i[0] for i in probs]\n probs_seq = [probs[x:x + (num_events-1)] for x in range(0, len(probs), (num_events-1))]\n probs_prod = [np.sum(np.log(i)) for i in probs_seq]\n\n label_indices = [i+1 for i, x in enumerate(sorted(probs_prod, reverse=True)) if x == probs_prod[0]]\n rank = random.choice(label_indices)\n pred = 1\n if rank == 1:\n pred = 0\n ranks.append(rank)\n preds.append(pred)\n probs_.append(probs_seq)\n probs_prod_.append(probs_prod)\n\n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n\n eval_loss = eval_loss / nb_eval_steps\n\n accuracy = simple_accuracy(np.array(preds), np.array([0]*len(preds)))\n mrr = simple_mrr(ranks)\n eval_loss = eval_loss / nb_eval_steps\n\n instance_template = [\"T\", \"F1\", \"F2\", \"F3\", \"F4\", \"F5\"]\n\n\n results = {'eval_loss': eval_loss,\n 'accuracy': accuracy,\n 'MRR': mrr}\n\n output_eval_file = os.path.join(output_dir,\n \"eval_results_alt_\" + init_checkpoint.split(\"/\")[-1].split(\".\")[0] + \"_\"\n + data_dir.split(\"/\")[-1].split(\".\")[0] + \".txt\")\n with open(output_eval_file, \"w\") as writer:\n for i in range(len(preds)):\n seqs = [instances[i].T, instances[i].F1, instances[i].F2, instances[i].F3, instances[i].F4, instances[i].F5]\n for j in range(len(probs_prod_[i])):\n print(instance_template[j])\n writer.write(instance_template[j] + \"\\n\")\n for k in range(len(probs_[i][j])):\n print(\"\\t\"+seqs[j][k] +\" \"+ seqs[j][k+1] + \": \" + str(probs_[i][j][k]))\n writer.write(\"\\t\"+seqs[j][k] + \" \" + seqs[j][k+1] + \": \" + str(probs_[i][j][k])+\"\\n\")\n print(\"Sum = \" + str(probs_prod_[i][j]))\n writer.write(\"Sum = \" + str(probs_prod_[i][j])+\"\\n\")\n print()\n writer.write(\"\\n\")\n print(\"Predicted \" + instance_template[int(preds[i])])\n writer.write(\"Predicted \" + instance_template[int(preds[i])]+\"\\n\")\n print(\"Rank of T: \" + str(ranks[i]))\n writer.write(\"Rank of T: \" + str(ranks[i]) +\"\\n\")\n print()\n print()\n writer.write(\"\\n\\n\")\n logger.info(\"***** Eval results *****\")\n for key in sorted(results.keys()):\n logger.info(\" %s = %s\", key, str(results[key]))\n writer.write(\"%s = %s\\n\" % (key, str(results[key])))\n return results\n" ]
[ [ "torch.cuda.is_available", "torch.load", "sklearn.metrics.f1_score", "numpy.log", "torch.distributed.init_process_group", "torch.manual_seed", "numpy.argmax", "torch.tensor", "torch.utils.data.DataLoader", "torch.device", "numpy.array", "torch.cuda.manual_seed_all", "sklearn.metrics.matthews_corrcoef", "torch.utils.data.SequentialSampler", "torch.cuda.IntTensor", "torch.cuda.device_count", "torch.cuda.set_device", "torch.utils.data.TensorDataset", "sklearn.metrics.recall_score", "numpy.random.seed", "numpy.sum", "torch.no_grad", "torch.softmax", "sklearn.metrics.precision_score" ] ]
carlos-aguayo/ray
[ "fedbdd5dc6a47aa9cba170816f8c0950193b4fd6" ]
[ "rllib/offline/mixed_input.py" ]
[ "import numpy as np\n\nfrom ray.rllib.offline.input_reader import InputReader\nfrom ray.rllib.offline.json_reader import JsonReader\nfrom ray.rllib.offline.io_context import IOContext\nfrom ray.rllib.utils.annotations import override, DeveloperAPI\nfrom ray.rllib.utils.typing import SampleBatchType\nfrom typing import Dict\n\n\n@DeveloperAPI\nclass MixedInput(InputReader):\n \"\"\"Mixes input from a number of other input sources.\n\n Examples:\n >>> MixedInput({\n \"sampler\": 0.4,\n \"/tmp/experiences/*.json\": 0.4,\n \"s3://bucket/expert.json\": 0.2,\n }, ioctx)\n \"\"\"\n\n @DeveloperAPI\n def __init__(self, dist: Dict[JsonReader, float], ioctx: IOContext):\n \"\"\"Initialize a MixedInput.\n\n Arguments:\n dist (dict): dict mapping JSONReader paths or \"sampler\" to\n probabilities. The probabilities must sum to 1.0.\n ioctx (IOContext): current IO context object.\n \"\"\"\n if sum(dist.values()) != 1.0:\n raise ValueError(\"Values must sum to 1.0: {}\".format(dist))\n self.choices = []\n self.p = []\n for k, v in dist.items():\n if k == \"sampler\":\n self.choices.append(ioctx.default_sampler_input())\n else:\n self.choices.append(JsonReader(k))\n self.p.append(v)\n\n @override(InputReader)\n def next(self) -> SampleBatchType:\n source = np.random.choice(self.choices, p=self.p)\n return source.next()\n" ]
[ [ "numpy.random.choice" ] ]
fjbriones/deep-text-recognition-benchmark
[ "c85d12aa56495fe221656bac4c8cb159a28456b1" ]
[ "utils.py" ]
[ "import torch\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\nfrom torch import nn\nimport numpy as np\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\nclass CTCLabelConverter(object):\n \"\"\" Convert between text-label and text-index \"\"\"\n\n def __init__(self, character):\n # character (str): set of the possible characters.\n dict_character = list(character)\n\n self.dict = {}\n for i, char in enumerate(dict_character):\n # NOTE: 0 is reserved for 'CTCblank' token required by CTCLoss\n self.dict[char] = i + 1\n\n self.character = ['[CTCblank]'] + dict_character # dummy '[CTCblank]' token for CTCLoss (index 0)\n\n def encode(self, text, batch_max_length=25):\n \"\"\"convert text-label into text-index.\n input:\n text: text labels of each image. [batch_size]\n batch_max_length: max length of text label in the batch. 25 by default\n\n output:\n text: text index for CTCLoss. [batch_size, batch_max_length]\n length: length of each text. [batch_size]\n \"\"\"\n length = [len(s) for s in text]\n\n # The index used for padding (=0) would not affect the CTC loss calculation.\n batch_text = torch.LongTensor(len(text), batch_max_length).fill_(0)\n for i, t in enumerate(text):\n text = list(t)\n text = [self.dict[char] for char in text]\n batch_text[i][:len(text)] = torch.LongTensor(text)\n return (batch_text.to(device), torch.IntTensor(length).to(device))\n\n def decode(self, text_index, length):\n \"\"\" convert text-index into text-label. \"\"\"\n texts = []\n for index, l in enumerate(length):\n t = text_index[index, :]\n\n char_list = []\n for i in range(l):\n if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])): # removing repeated characters and blank.\n char_list.append(self.character[t[i]])\n text = ''.join(char_list)\n\n texts.append(text)\n return texts\n\n\nclass CTCLabelConverterForBaiduWarpctc(object):\n \"\"\" Convert between text-label and text-index for baidu warpctc \"\"\"\n\n def __init__(self, character):\n # character (str): set of the possible characters.\n dict_character = list(character)\n\n self.dict = {}\n for i, char in enumerate(dict_character):\n # NOTE: 0 is reserved for 'CTCblank' token required by CTCLoss\n self.dict[char] = i + 1\n\n self.character = ['[CTCblank]'] + dict_character # dummy '[CTCblank]' token for CTCLoss (index 0)\n\n def encode(self, text, batch_max_length=25):\n \"\"\"convert text-label into text-index.\n input:\n text: text labels of each image. [batch_size]\n output:\n text: concatenated text index for CTCLoss.\n [sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)]\n length: length of each text. [batch_size]\n \"\"\"\n length = [len(s) for s in text]\n text = ''.join(text)\n text = [self.dict[char] for char in text]\n\n return (torch.IntTensor(text), torch.IntTensor(length))\n\n def decode(self, text_index, length):\n \"\"\" convert text-index into text-label. \"\"\"\n texts = []\n index = 0\n for l in length:\n t = text_index[index:index + l]\n\n char_list = []\n for i in range(l):\n if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])): # removing repeated characters and blank.\n char_list.append(self.character[t[i]])\n text = ''.join(char_list)\n\n texts.append(text)\n index += l\n return texts\n\n\nclass LinearLabelConverter(object):\n\n def __init__(self, character):\n list_token = [';']\n list_character = list(character)\n self.character = list_token + list_character\n\n self.dict = {}\n for i, char in enumerate(self.character):\n # print(i, char)\n self.dict[char] = i\n\n def encode(self, text, batch_max_length=25):\n length = [len(s) for s in text]\n batch_text = torch.LongTensor(len(text), batch_max_length + 1).fill_(0)\n for i, t in enumerate(text):\n text_list = list(t)\n text_index_list = [self.dict[char] for char in text_list]\n batch_text[i][:len(text_index_list)] = torch.LongTensor(text_index_list)\n return (batch_text.to(device), torch.IntTensor(length).to(device))\n\n def decode(self, text_index, length):\n texts = []\n for index, l in enumerate(length):\n text = ''.join([self.character[i] for i in text_index[index, :]])\n texts.append(text)\n return texts\n\n\n\nclass AttnLabelConverter(object):\n \"\"\" Convert between text-label and text-index \"\"\"\n\n def __init__(self, character):\n # character (str): set of the possible characters.\n # [GO] for the start token of the attention decoder. [s] for end-of-sentence token.\n list_token = ['[GO]', '[s]'] # ['[s]','[UNK]','[PAD]','[GO]']\n list_character = list(character)\n self.character = list_token + list_character\n\n self.dict = {}\n for i, char in enumerate(self.character):\n # print(i, char)\n self.dict[char] = i\n\n def encode(self, text, batch_max_length=25):\n \"\"\" convert text-label into text-index.\n input:\n text: text labels of each image. [batch_size]\n batch_max_length: max length of text label in the batch. 25 by default\n\n output:\n text : the input of attention decoder. [batch_size x (max_length+2)] +1 for [GO] token and +1 for [s] token.\n text[:, 0] is [GO] token and text is padded with [GO] token after [s] token.\n length : the length of output of attention decoder, which count [s] token also. [3, 7, ....] [batch_size]\n \"\"\"\n length = [len(s) + 1 for s in text] # +1 for [s] at end of sentence.\n # batch_max_length = max(length) # this is not allowed for multi-gpu setting\n batch_max_length += 1\n # additional +1 for [GO] at first step. batch_text is padded with [GO] token after [s] token.\n batch_text = torch.LongTensor(len(text), batch_max_length + 1).fill_(0)\n for i, t in enumerate(text):\n text_list = list(t)\n text_list.append('[s]')\n text_list = [self.dict[char] for char in text_list]\n batch_text[i][1:1 + len(text_list)] = torch.LongTensor(text_list) # batch_text[:, 0] = [GO] token\n return (batch_text.to(device), torch.IntTensor(length).to(device))\n\n def decode(self, text_index, length):\n \"\"\" convert text-index into text-label. \"\"\"\n texts = []\n for index, l in enumerate(length):\n text = ''.join([self.character[i] for i in text_index[index, :]])\n texts.append(text)\n return texts\n\n\nclass Averager(object):\n \"\"\"Compute average for torch.Tensor, used for loss average.\"\"\"\n\n def __init__(self):\n self.reset()\n\n def add(self, v):\n count = v.data.numel()\n v = v.data.sum()\n self.n_count += count\n self.sum += v\n\n def reset(self):\n self.n_count = 0\n self.sum = 0\n\n def val(self):\n res = 0\n if self.n_count != 0:\n res = self.sum / float(self.n_count)\n return res\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n vs, pred = output.topk(maxk, 1, True, True)\n # print(vs.shape)\n \n pred = pred.t()\n vs = vs.t()\n # print(pred.shape)\n # print(vs[:2,:10])\n # print(pred[:2,:10])\n # print(target[:10])\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[k-1:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\ndef info_nce_loss(features, batch_size, device, n_views=2, temperature=1, num_of_features=13):\n\n # num_of_features = 3\n # print(features.shape)\n batch_features = features.view(batch_size,-1,features.shape[1])\n limit_features_first = batch_features[:,:num_of_features*2-1,:]\n limit_features_last = batch_features[:,-1:,:]\n limit_features = torch.cat([limit_features_first, limit_features_last], dim=1)\n # limit_features = F.normalize(limit_features, dim=1)\n features = limit_features.view(-1, limit_features.shape[2])\n\n labels = torch.cat([torch.arange(batch_size*num_of_features) for i in range(n_views)], dim=0)\n labels = (labels.unsqueeze(0) == labels.unsqueeze(1)).float()\n labels = labels.to(device)\n\n\n features = F.normalize(features, dim=1)\n\n similarity_matrix = torch.matmul(features, features.T)\n # assert similarity_matrix.shape == (\n # self.args.n_views * self.args.batch_size, self.args.n_views * self.args.batch_size)\n # assert similarity_matrix.shape == labels.shape\n\n # discard the main diagonal from both: labels and similarities matrix\n mask = torch.eye(labels.shape[0], dtype=torch.bool).to(device)\n labels = labels[~mask].view(labels.shape[0], -1)\n similarity_matrix = similarity_matrix[~mask].view(similarity_matrix.shape[0], -1)\n # print(labels.shape)\n # print(similarity_matrix.shape)\n\n # assert similarity_matrix.shape == labels.shape\n\n # select and combine multiple positives\n positives = similarity_matrix[labels.bool()].view(labels.shape[0], -1)\n # select only the negatives the negatives\n negatives = similarity_matrix[~labels.bool()].view(similarity_matrix.shape[0], -1)\n\n logits = torch.cat([positives, negatives], dim=1)\n labels = torch.zeros(logits.shape[0], dtype=torch.long).to(device)\n\n random_labels = torch.randint(low=0, high=logits.shape[1], size=(logits.shape[0],1)).to(device)\n index = torch.arange(logits.shape[0]).to(device).unsqueeze(1)\n\n labels_access = torch.cat([index, random_labels], 1)\n labels_access = torch.transpose(labels_access, 0, 1)\n\n temp = logits[tuple(labels_access)]\n\n logits[:,0] = temp\n logits[tuple(labels_access)] = positives.squeeze()\n\n labels = random_labels.squeeze().to(device)\n\n logits = logits / temperature\n return logits, labels\n\n" ]
[ [ "torch.zeros", "torch.nn.functional.normalize", "torch.cat", "torch.arange", "torch.IntTensor", "torch.no_grad", "torch.randint", "torch.cuda.is_available", "torch.LongTensor", "torch.eye", "torch.transpose", "torch.matmul" ] ]
chrhenning/snn_global_pattern_induction
[ "3ee4037a4568c393378eaec74483696d5281f376" ]
[ "src/evaluation.py" ]
[ "#!/usr/bin/env python3\n# Copyright 2017 Christian Henning\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n@title :evaluation.py\n@author :ch\n@contact :[email protected]\n@created :04/19/2017\n@version :1.0\n@python_version :3.5.2\n\nThe module provides several methods to evaluate the simulation results.\n\nPlease refer to the description of the static methods defined in this module\n\"\"\"\n\nimport configuration as config\nfrom util.config_exception import ConfigException\nfrom readout.svm import SVM\nfrom readout.highest_response import HighestResponse\n\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.metrics import accuracy_score\nimport numpy as np\nfrom collections import Counter\nimport os\nimport json\nimport time\n\nimport logging\nlogger = logging.getLogger(config.logging_name)\n\n\"\"\"Contains the last trained readout classifier. Every time a new training\nbatch is evaluated, this classifier is retrained. (Note, this only applies for\nalternative classifiers. The standard readout demands no additional training.\n\"\"\"\nCLASSIFIER = None\n\ndef evaluate_output_rates(rates, labels, num_classes, tag, store=True,\n epoch=None):\n \"\"\"Evaluate the firing rates of the output layer.\n\n If the output layer size has not been specified by the user, each output\n neuron corresponds to a different class and the maximum firing rate\n determines the predicted label. If the user wishes to use another readout\n mechanism on the customized output layer, this method is used instead. An\n overview of alternative readout methods is given in the config file.\n\n This method computes measures for multiclass classification problems, such\n as accuracy and f-scores. The scores are logged. If desired, the scores can\n also be stored in a file (with more details). The filename will be:\n 'output_eval_<tag>.jsonl'\n\n Args:\n rates: A list of arrays. Each entry is an 1D array of spike counts\n (from the output layer).\n labels: A list of ground truth labels. Each entry corresponds to the\n rates given in the first parameter.\n num_classes: The number of output classes.\n tag: A name, that is associated with the data split, that is evaluated.\n This tag will define the filename. If an alternative readout\n classifier is used, this tag decides, wether this classifier has\n to be retrained.\n store: Whether to store the results or not. Results are appended as a\n single JSON object to a JSONL file.\n epoch: If specified, the epoch can later help to identify the results.\n\n Returns:\n The error (1-accuracy). (Or 1, if nothing can be evaluated)\n \"\"\"\n\n y_true = [] # Gound truth\n y_pred = [] # Predicted sample\n\n rates_arr = np.asarray(rates)\n labels_arr = np.asarray(labels)\n\n # We want to ignore those samples, that elicited no output spiking. (This\n # is decided by the user, as he can tweak the simulation to continue as\n # long as no output spikes appeared).\n # I.e., if all output rates are zero, the sample is ignored.\n valid_rates = np.where(np.sum(rates_arr, axis=1) != 0)[0]\n ignored = labels_arr.size - valid_rates.size\n # Delete invalid samples.\n rates_arr = rates_arr[valid_rates]\n labels_arr = labels_arr[valid_rates]\n # Simple renaming.\n y_true = labels_arr\n\n # Helper functions to print results.\n _perc = lambda x : round(100*x,2)\n _sperc = lambda x : '%.2f%%' % (_perc(x))\n\n if ignored > 0:\n logger.warning('%d (%s) samples have been ignored during ' \\\n % (ignored, _sperc(ignored/len(rates))) \\\n + 'evaluation, since there has been no output ' \\\n + 'activity.')\n\n # Just a flag that is set to False for alternative classifiers, as they\n # provide less evaluation metrics. (I.e., some metrics cannot be calculated\n # and stored when using alternative classifiers).\n extra_scores = False\n\n # If we need no special readout. Meaning each output neuron simply\n # represents a class and no further logic is needed to classify the output.\n # Note, that this option provides several eval measures (such as\n # ambiguousness) that the alternative classifiers do not provide.\n if config.output_size is None:\n extra_scores = True\n\n # Predicted samples, where ambiguous outcomes are not necessarily\n # misclassification. I.e., if the correct class has one of the highest\n # outputs, it is considered as correct classification.\n y_pred_with_ambig = []\n confidence = [] # Confidence of correct output\n unambiguousness = [] # Normalized distance to second best prediction\n # Ambiguous samples are samples, that have multiple outputs with the\n # same maximum firing rate. They are considered as misclassifications.\n ambiguous = 0\n\n for i in range(rates_arr.shape[0]):\n frates = rates_arr[i,:]\n label = labels_arr[i]\n\n pred = np.argmax(frates)\n pred_with_ambig = pred\n # Ensure, that output is not ambiguous.\n counts = Counter(frates)\n if counts[frates[pred]] > 1:\n ambiguous += 1\n # Choose index, that is not the correct one, but has maximum\n # confidence. I.e., enforce misclassification.\n pred = np.argmax(np.concatenate((frates[:label], [-1],\n frates[label+1:])))\n assert(pred != label)\n\n if frates[pred] == frates[label]:\n pred_with_ambig = label\n\n norm_frates = frates / np.linalg.norm(frates, 1)\n sec_best = np.argmax(np.concatenate((frates[:pred],\n frates[pred+1:])))\n\n y_pred.append(pred)\n y_pred_with_ambig.append(pred_with_ambig)\n\n confidence.append(norm_frates[label])\n unambiguousness.append(norm_frates[pred] - norm_frates[sec_best])\n\n y_pred = np.asarray(y_pred)\n\n # Use an alternative readout classifier to evaluate the output rates.\n # Retrain the classifier if tag == 'training'.\n else:\n global CLASSIFIER\n\n # Retrain if necessary.\n if tag == 'training':\n logger.debug('Retraining readout classifier according to method:' \\\n + ' \\'%s\\'' % config.classification_method)\n if config.classification_method == 'highest_response':\n CLASSIFIER = HighestResponse()\n CLASSIFIER.fit(rates_arr, labels_arr, num_classes=num_classes)\n elif config.classification_method == 'svm':\n CLASSIFIER = SVM()\n CLASSIFIER.fit(rates_arr, labels_arr, C=config.svm_C,\n kernel=config.svm_kernel)\n else:\n error_msg = 'Classification method \\'%s\\' is unknown. ' \\\n % config.classification_method\n raise ConfigException(error_msg)\n\n # Predict outcome for given rates.\n y_pred = CLASSIFIER.predict(rates_arr)\n\n if y_true.size == 0:\n return 1\n\n json_obj = dict()\n json_obj['timestamp'] = time.time()\n if epoch is not None:\n json_obj['epoch'] = epoch\n json_obj['num_samples'] = len(rates)\n json_obj['ignored'] = ignored\n json_obj['classification_method'] = None\n if config.output_size is not None:\n json_obj['classification_method'] = config.classification_method\n\n if extra_scores:\n json_obj['ambiguous'] = ambiguous\n\n if ambiguous > 0:\n logger.debug('%d (%s) samples had more than one output neuron with'\\\n % (ambiguous, _sperc(ambiguous/len(rates)))\n + ' maximum confidence (ambiguous classification).')\n\n acc = accuracy_score(y_true, y_pred)\n json_obj['accuracy'] = acc\n logger.info('### %s accuracy: %s' % (tag, _sperc(acc)))\n\n if extra_scores and ambiguous > 0:\n acc_with_ambig = accuracy_score(y_true, y_pred_with_ambig)\n if acc_with_ambig != acc:\n json_obj['accuracy_with_ambiguous'] = acc_with_ambig\n logger.info('When ambiguous outcomes are allowed, the accuracy ' \\\n + 'would be: %s' % (_sperc(acc_with_ambig)))\n\n classes = list(range(num_classes))\n\n def _f_score(method):\n prec, rec, f1, _ = precision_recall_fscore_support(y_true, y_pred, \\\n labels=classes, average=method)\n json_obj['prec_'+method] = prec\n json_obj['rec_'+method] = rec\n json_obj['f1_'+method] = f1\n return f1\n\n f1_micro = _f_score('micro')\n f1_macro = _f_score('macro')\n f1_weighted = _f_score('weighted')\n\n logger.info('Micro/Macro/Weighted - F-Scores: %.4f, %.4f, %.4f.' \\\n % (round(f1_micro,4), round(f1_macro,4), round(f1_weighted,4)))\n\n # Label-wise f-scores.\n prec, rec, f1, supp = precision_recall_fscore_support(y_true, y_pred, \\\n labels=classes, average=None)\n json_obj['labels'] = classes\n json_obj['prec'] = prec.tolist()\n json_obj['rec'] = rec.tolist()\n json_obj['f1'] = f1.tolist()\n json_obj['support'] = supp.tolist()\n\n # Prediction confidence and unambiguousness.\n if extra_scores:\n conf_mean = np.mean(confidence)\n conf_std = np.std(confidence)\n unambig_mean = np.mean(unambiguousness)\n unambig_std = np.std(unambiguousness)\n json_obj['confidence_mean'] = conf_mean\n json_obj['confidence_std'] = conf_std\n json_obj['unambiguousness_mean'] = unambig_mean\n json_obj['unambiguousness_std'] = unambig_std\n\n logger.info('Confidence for correct label [mean (std)]: %.4f (%.4f).' \\\n % (round(conf_mean, 4), round(conf_std, 4)))\n\n logger.info('Unambiguousness of the predictions (distance of best to ' \\\n + 'second-best prediction) [mean (std)]: %.4f (%.4f).' \\\n % (round(unambig_mean, 4), round(unambig_std, 4)))\n\n # Store results.\n if store:\n if not os.path.isdir(config.eval_dir):\n os.makedirs(config.eval_dir)\n filename = os.path.join(config.eval_dir, 'output_eval_'+tag+'.jsonl')\n\n with open(filename, 'a') as f:\n json_str = json.dumps(json_obj)\n f.write(json_str + '\\n')\n f.flush()\n\n logger.debug('Appended output evaluations to %s.' % filename)\n\n return 1 - acc\n\nif __name__ == '__main__':\n pass\n\n\n" ]
[ [ "numpy.concatenate", "numpy.linalg.norm", "numpy.asarray", "numpy.sum", "numpy.mean", "sklearn.metrics.precision_recall_fscore_support", "sklearn.metrics.accuracy_score", "numpy.std", "numpy.argmax" ] ]
ScanLab-ossi/SDN-DCRNN
[ "ed74497717fc7d0a6bcadc1b3d48e2e848a60175" ]
[ "scripts/plot_predictions_multi_horizon.py" ]
[ "import numpy as np\nimport matplotlib.pylab as plt\nimport argparse\nimport logging\nfrom os.path import join as pj\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-p\", \"--predictions-file\", type=str, default=\"DCRNN npz file\"\n )\n parser.add_argument(\n \"-n\", \"--plot-nodes\", type=int, nargs='+', default=[0],\n help='Node numbers to plot for.'\n )\n parser.add_argument(\n \"-o\", \"--output-dir\", type=str, help=\"Output directory for PNG figures.\"\n )\n parser.add_argument('--horizons', type=int, nargs='+', default=[7, 30],\n help='Horizons to plot predictions for')\n parser.add_argument('-c', '--cut-off', type=int, default=3000,\n help='Cut off point of the plot after this amount of samples, for better readability')\n return parser.parse_args()\n\n\ndef plot_multi_horizon_predictions_vs_ground_truth(horizons, predictions, ground_truth, output_path, cut_off):\n logging.debug(\"plot_max_horizon_predictions_vs_ground_truth\")\n figure = plt.figure(figsize=(60, 15))\n axes = figure.add_subplot(111)\n plot_len = len(ground_truth[0])\n for horizon in horizons:\n # diff is created by the model in prediction calculation\n # used as start point to align all plots\n diff_to_max_horizon = max(horizons) - horizon\n # used to calculate end point to align all plots\n diff_to_min_horizon = horizon - min(horizons)\n # cut off end of plot if required\n end = min(cut_off, len(ground_truth[horizon-1]) - diff_to_min_horizon)\n plot_len = end - diff_to_max_horizon\n\n if horizon == min(horizons):\n # only need to plot ground truth once\n axes.plot(ground_truth[horizon-1][diff_to_max_horizon:end],\n label='ground truth')\n axes.plot(predictions[horizon-1][diff_to_max_horizon:end],\n label='prediction horizon {}'.format(horizon))\n axes.set_title('Horizon Predictions at {} vs Ground Truth'.format(horizons), fontsize=30)\n handles, labels = axes.get_legend_handles_labels()\n axes.legend(handles, labels, loc='upper center', fontsize=30)\n axes.set_xlabel(\"Sample Seconds\", fontsize=30)\n axes.set_xticks(range(0, plot_len, 300))\n axes.tick_params(labelsize=30)\n axes.set_xlim(0, plot_len)\n axes.set_ylabel(\"Prediction vs Truth\", fontsize=30)\n figure.savefig(output_path, bbox_inches='tight', pad_inches=0)\n plt.close(figure)\n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n args = parse_args()\n\n data_set = np.load(args.predictions_file)\n\n horizons_len = data_set['predictions'].shape[0]\n logging.info(\"Found %d horizons in data\", horizons_len)\n if max(args.horizons) > horizons_len:\n logging.fatal(\"Requested horizons {} out of bound of horizons found in data {}\"\n .format(args.horizons, horizons_len))\n num_nodes = data_set['predictions'].shape[2]\n logging.info(\"Found %d nodes (ports) in data\", num_nodes)\n\n predictions = data_set['predictions'].transpose()\n ground_truth = data_set['groundtruth'].transpose()\n\n for node in args.plot_nodes:\n logging.info(\"Processing node #\" + str(node))\n output_path = pj(args.output_dir, \"predictions-vs-ground-truth-node-{}.png\".format(node))\n node_ground_truth = ground_truth[node].transpose()\n node_predictions = predictions[node].transpose()\n plot_multi_horizon_predictions_vs_ground_truth(args.horizons,\n node_predictions,\n node_ground_truth,\n output_path,\n args.cut_off)\n\n logging.info(\"Completed all plots, saved to %s\", args.output_dir)\n" ]
[ [ "matplotlib.pylab.figure", "numpy.load", "matplotlib.pylab.close" ] ]
Aerospace-AI/RL_ExoIntercept
[ "44514f96d7771437d8b29c220f55f9ed936aa1fd" ]
[ "RL_lib/Policies/PPO/policy_ppo_kf.py" ]
[ "\"\"\"\n Implements PPO\n\n PPO: https://arxiv.org/abs/1707.06347\n Modified from policy Written by Patrick Coady (pat-coady.github.io) to implement\n latest version of PPO with pessimistic ratio clipping\n\n o Has an option to servo both the learning rate and the clip_param to keep KL \n within a specified range. This helps on some control tasks\n (i.e., Mujoco Humanid-v2)\n \n o Uses approximate KL \n\n o Models distribution of actions as a Gaussian with variance not conditioned on state\n\n \n\"\"\"\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport rl_utils\nimport advantage_utils\nfrom time import time\nimport sklearn.utils\n \nclass Policy(object):\n \"\"\" NN-based policy approximation \"\"\"\n def __init__(self, net, pd, adv_func=None, kl_targ=0.001, mask_neg_adv=False,\n epochs=20, init_func=rl_utils.default_init,\n test_mode=False, shuffle=True, shuffle_by_chunks=False, servo_kl=False, beta=0.1, max_grad_norm=999, \n obs_key='observes', scale_vector_obs=True, scale_image_obs=False, verbose=False, rollout_limit=1):\n \"\"\"\n Args:\n kl_targ: target KL divergence between pi_old and pi_new\n epochs: number of epochs per update\n test_mode: boolean, True removes all exploration noise\n shuffle: boolean, shuffles data each epoch \n servo_kl: boolean: set to False to not servo beta to KL, which is original PPO implementation\n beta: clipping parameter for pessimistic loss ratio\n \n \"\"\"\n print('Policy with vectorized sample')\n net.apply(init_func)\n\n self.net = net\n self.pd = pd\n\n if adv_func is None:\n self.adv_func = advantage_utils.Adv_default()\n else:\n self.adv_func = adv_func\n \n self.servo_kl = servo_kl\n self.test_mode = test_mode\n self.shuffle = shuffle\n self.shuffle_by_chunks = shuffle_by_chunks\n\n self.mask_neg_adv = mask_neg_adv \n if self.net.recurrent_steps > 1 and not self.shuffle_by_chunks:\n print('Policy: recurrent steps > 1, disabling shuffle')\n self.shuffle = False\n self.kl_stat = None\n self.entropy_stat = None\n self.kl_targ = kl_targ\n self.epochs = epochs \n self.lr_multiplier = 1.0 # dynamically adjust lr when D_KL out of control\n self.max_beta = 0.5\n self.min_beta = 0.01 \n self.max_grad_norm = max_grad_norm\n self.beta = beta\n self.obs_key = obs_key\n self.grad_monitor = rl_utils.Grad_monitor('Policy', net)\n self.vector_scaler = rl_utils.Scaler(net.obs_dim)\n self.image_scaler = rl_utils.Image_scaler(net.obs_dim)\n self.scale_image_obs = scale_image_obs\n self.scale_vector_obs = scale_vector_obs\n\n self.verbose = verbose \n self.rollout_limit = rollout_limit\n self.rollout_list = []\n\n self.calc_loss = self.calc_loss1\n\n if self.net.recurrent_steps > 1:\n self.use_padding = True\n else:\n self.use_padding = False\n\n self.optimizer = torch.optim.Adam(self.net.parameters(), self.net.lr)\n\n print('\\tTest Mode: ',self.test_mode)\n print('\\tClip Param: ',self.beta)\n print('\\tShuffle : ',self.shuffle)\n print('\\tShuffle by Chunks: ',self.shuffle_by_chunks)\n print('\\tMax Grad Norm: ',self.max_grad_norm)\n print('\\tRecurrent Steps: ',self.net.recurrent_steps)\n print('\\tRollout Limit: ',self.rollout_limit)\n print('\\tAdvantage Func: ',self.adv_func)\n print('\\tAdvantage Norm: ',self.adv_func.normalizer)\n print('\\tPD: ',self.pd)\n print('\\tLoss Function: ',self.calc_loss)\n\n def save_params(self,fname):\n fname = 'policy_' + fname + '.pt'\n param_dict = {}\n param_dict['image_scaler_u'] = self.image_scaler.means\n param_dict['image_scaler_var'] = self.image_scaler.vars\n param_dict['vector_scaler_u'] = self.vector_scaler.means\n param_dict['vector_scaler_var'] = self.vector_scaler.vars\n param_dict['net_state'] = self.net.state_dict()\n torch.save(param_dict, fname)\n\n def load_params(self,fname):\n fname = 'policy_' + fname + '.pt'\n param_dict = torch.load(fname)\n self.image_scaler.means = param_dict['image_scaler_u']\n self.image_scaler.vars = param_dict['image_scaler_var']\n self.vector_scaler.means = param_dict['vector_scaler_u']\n self.vector_scaler.vars = param_dict['vector_scaler_var']\n self.net.load_state_dict(param_dict['net_state'])\n\n\n\n def sample(self, image_obs, vector_obs, state):\n\n if self.scale_image_obs:\n image_obs = self.image_scaler.apply(image_obs)\n if self.scale_vector_obs:\n vector_obs = self.vector_scaler.apply(vector_obs)\n logits, log_vars, state = self.net.forward(image_obs, vector_obs, state, np.ones(1), np.zeros(1), return_tensor=False)\n action, env_action = self.pd.sample(logits, log_vars, self.test_mode)\n return action, env_action, state \n\n def update_scalers(self, rollouts):\n self.image_scaler.update(rollouts['image_observes'])\n self.vector_scaler.update(rollouts[self.obs_key])\n\n \n def update(self, rollouts, logger):\n if len(self.rollout_list) == self.rollout_limit:\n del self.rollout_list[0]\n self.rollout_list.append(rollouts)\n keys = self.rollout_list[0].keys()\n comb_rollouts = {}\n for k in keys:\n comb_rollouts[k] = np.concatenate([r[k] for r in self.rollout_list])\n self.update1(comb_rollouts, logger)\n \n def update1(self, rollouts, logger):\n \n if self.use_padding:\n key = 'padded_'\n else:\n key = '' \n image_observes = rollouts[key + 'image_observes']\n vector_observes = rollouts[key + self.obs_key]\n\n actions = rollouts[key + 'actions']\n states = rollouts[key + 'policy_states']\n vtarg = rollouts[key + 'disc_sum_rew']\n vpred = rollouts[key + 'vpreds']\n masks = rollouts[key + 'masks']\n flags = rollouts[key + 'flags']\n\n if self.scale_vector_obs:\n vector_observes = self.vector_scaler.apply(vector_observes)\n if self.scale_image_obs:\n image_observes = self.image_scaler.apply(image_observes)\n \n vtarg_unp = rollouts['disc_sum_rew']\n vpred_unp = rollouts['vpreds']\n\n actions_pt = self.pd.from_numpy(actions)\n\n with torch.no_grad():\n old_logits_pt, log_vars_pt, _ = self.net.forward(image_observes, vector_observes, states, masks, flags)\n\n old_logp_pt = self.pd.logp(actions_pt, old_logits_pt, log_vars_pt) \n old_logp = old_logp_pt.detach().numpy() \n loss, kl, entropy = 0, 0, 0\n\n advantages_unp = vtarg_unp - vpred_unp\n advantages = vtarg - vpred \n\n print('ADV1: ', np.mean(advantages), np.std(advantages), np.max(advantages), np.min(advantages))\n advantages = self.adv_func.calc_adv(advantages_unp, advantages)\n print('ADV2: ', np.mean(advantages), np.std(advantages), np.max(advantages), np.min(advantages))\n\n t0 = time()\n for e in range(self.epochs):\n\n if self.shuffle:\n if self.shuffle_by_chunks:\n image_observes, vector_observes, actions, advantages, states, masks, flags, old_logp = \\\n rl_utils.shuffle_list_by_chunks([image_observes, vector_observes, actions, advantages, states, masks, flags, old_logp], self.net.recurrent_steps)\n else:\n image_observes, vector_observes, actions, advantages, states, masks, flags, old_logp, = \\\n sklearn.utils.shuffle(image_observes, vector_observes, actions, advantages, states, masks, flags, old_logp )\n\n actions_pt = self.pd.from_numpy(actions)\n\n self.optimizer.zero_grad()\n logits_pt, log_vars_pt, _ = self.net.forward(image_observes, vector_observes, states, masks, flags, unroll=True)\n logp_pt = self.pd.logp(actions_pt, logits_pt, log_vars_pt)\n loss = self.calc_loss(logp_pt, torch.from_numpy(old_logp).float(), torch.from_numpy(advantages).float(), self.beta, masks)\n loss.backward()\n if self.max_grad_norm is not None:\n ng = nn.utils.clip_grad_norm_(self.net.parameters(), self.max_grad_norm)\n else:\n ng = None\n self.optimizer.step()\n self.grad_monitor.add(ng)\n\n kl = self.pd.kl(old_logp, logp_pt.detach().numpy(), log_vars_pt, masks)\n entropy = self.pd.entropy(logp_pt.detach().numpy(), log_vars_pt, masks) \n if kl > 4.0 * self.kl_targ and self.servo_kl:\n print(' *** BROKE *** ',e, kl)\n break \n\n t1 = time()\n \n if self.servo_kl:\n self.adjust_beta(kl)\n\n for g in self.optimizer.param_groups:\n g['lr'] = self.net.lr * self.lr_multiplier\n self.kl_stat = kl\n self.entropy_stat = entropy\n self.grad_monitor.show()\n\n if self.verbose:\n print('POLICY ROLLOUT LIST: ',len(self.rollout_list))\n print('POLICY Update: ',t1-t0,observes.shape)\n print('kl = ',kl, ' beta = ',self.beta,' lr_mult = ',self.lr_multiplier)\n print('u_adv: ',u_adv)\n print('std_adv: ',std_adv)\n\n logger.log({'PolicyLoss': loss,\n 'Policy_SD' : np.mean(self.pd.sd(logits_pt, log_vars_pt)), \n 'Policy_Entropy': entropy,\n 'Policy_KL': kl,\n 'Policy_Beta': self.beta,\n 'Policy_lr_mult': self.lr_multiplier})\n\n def adjust_beta(self,kl):\n if kl < self.kl_targ / 2:\n self.beta = np.minimum(self.max_beta, 1.5 * self.beta) # max clip beta\n #print('too low')\n if self.beta > (self.max_beta/2) and self.lr_multiplier < 10:\n self.lr_multiplier *= 1.5\n elif kl > self.kl_targ * 2:\n #print('too high')\n self.beta = np.maximum(self.min_beta, self.beta / 1.5) # min clip beta\n if self.beta <= (2*self.min_beta) and self.lr_multiplier > 0.1:\n self.lr_multiplier /= 1.5\n\n def calc_loss1(self,logp, old_logp, advantages, beta, masks):\n\n if self.mask_neg_adv:\n new_masks = masks * (advantages > 0)\n else:\n new_masks = masks\n\n if self.use_padding:\n logp, old_logp, advantages = rl_utils.unpad_list([logp, old_logp, advantages], new_masks)\n\n ratio = torch.exp(logp - old_logp)\n surr1 = advantages * ratio\n surr2 = advantages * torch.clamp(ratio, 1.0 - beta, 1.0 + beta)\n \n loss = -torch.mean(torch.min(surr1,surr2)) \n return loss\n\n def calc_loss2(self,logp, old_logp, advantages, beta, masks):\n\n if self.mask_neg_adv:\n new_masks = masks * (advantages > 0)\n else:\n new_masks = masks\n\n if self.use_padding:\n logp, old_logp, advantages = rl_utils.unpad_list([logp, old_logp, advantages], new_masks)\n\n advantages /= torch.sum(advantages)\n ratio = torch.exp(logp - old_logp)\n surr1 = advantages * ratio\n surr2 = advantages * torch.clamp(ratio, 1.0 - beta, 1.0 + beta)\n\n loss = -torch.sum(torch.min(surr1,surr2))\n return loss\n\n def calc_loss3(self,logp, old_logp, advantages, beta, masks):\n\n if self.mask_neg_adv:\n new_masks = masks * (advantages > 0)\n else:\n new_masks = masks\n\n if self.use_padding:\n logp, old_logp, advantages = rl_utils.unpad_list([logp, old_logp, advantages], new_masks)\n\n ratio = torch.exp(logp - old_logp)\n surr1 = advantages * ratio\n\n loss = -torch.mean(surr1)\n return loss\n\n\n" ]
[ [ "numpy.concatenate", "numpy.max", "torch.min", "numpy.zeros", "numpy.minimum", "torch.save", "numpy.ones", "torch.no_grad", "numpy.min", "numpy.mean", "torch.clamp", "torch.from_numpy", "numpy.std", "torch.load", "numpy.maximum", "torch.exp", "torch.mean", "torch.sum" ] ]
Ifyokoh/End-to-End-Machine-Learning
[ "f23f8034aa3fc02c6dd834de50a25d5603adc8d2" ]
[ "propertypro/propertypro.py" ]
[ "from numpy import NaN\nimport pandas as pd\nimport requests\nimport math\nimport re\n\nfrom bs4 import BeautifulSoup\n\n\nclass Propertypro:\n \"\"\"\n web-scraper tool for scraping data on propertypro.ng\n \n Parameters:\n num_samples (int): The number of samples of data to be scraped. \n location (list): list of keywords to scrape\n\n Returns:\n pd.DataFrame: Returns a dataframe with the following categories as columns:\n title, location, price, number of bedrooms, toilets, bathroom, whether it is furnished, serviced and newly built\n \n \"\"\"\n\n def __init__(self) -> None:\n self.no_samples = 0\n\n\n def process_data(self, dataframe: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n cleans data from the provided Dataframe.\n :param data: Scraped data .\n :return: pandas dataframe\n \"\"\"\n data = dataframe\n data = data.dropna()\n data['rooms'] = data['rooms'].str.split('\\n')\n data[['nothing', 'bedroom', 'bathroom', 'toilet', 'remove']] = pd.DataFrame(data['rooms'].tolist(), index= data.index)\n data['bedroom'] = data['bedroom'].str.strip('beds')\n data['bathroom'] = data['bathroom'].str.strip('baths')\n data['toilet'] = data['toilet'].str.strip('Toilets')\n data['price'] = data['price'].str.replace(r'[^0-9]+','')\n data['furnishing'] = data['furnishing'].str.split('\\n')\n data['newly_built'] = data['furnishing'].apply(lambda x: ''.join(['1' if \"Newly Built\" in x else '0']))\n data['furnished'] = data['furnishing'].apply(lambda x: ''.join(['1' if \"Furnished\" in x else '0']))\n data['serviced'] = data['furnishing'].apply(lambda x: ''.join(['1' if \"Serviced\" in x else '0']))\n data = data.drop(columns=['rooms', 'nothing', 'remove', 'furnishing'])\n return data\n\n\n def scrape_data(self, no_samples, keywords):\n \"\"\"\n Scrapes data from provided urls\n :param : no_samples, keywords\n :return: pandas dataFrame.\n \"\"\"\n\n data = {\"title\": [], \"location\": [], \"furnishing\": [], \"rooms\": [], \"price\": []}\n for keyword in keywords:\n page_url = []\n for i in range(0,round((no_samples/22))):\n page_url.append('https://www.propertypro.ng/property-for-rent/in/' + keyword + '?search=&type=&bedroom=&min_price=&max_price=&page=' + str(i))\n for links in page_url:\n response = requests.get(links)\n soup = BeautifulSoup(response.content, 'html.parser')\n \n for title in soup.find_all('h2', { 'class':\"listings-property-title\" }):\n data[\"title\"].append(title.text)\n data[\"location\"].append(keyword)\n for furnishing in soup.find_all('div', {'class': \"furnished-btn\"}):\n data[\"furnishing\"].append(furnishing.text)\n for rooms in soup.find_all('div', {'class': \"fur-areea\"}):\n data[\"rooms\"].append(rooms.text)\n for price in soup.find_all('h3', { 'class': 'listings-price' }):\n data[\"price\"].append(price.text)\n page_url.clear()\n\n # df = pd.DataFrame(data)\n df = pd.DataFrame.from_dict(data, orient='index')\n df = df.transpose()\n pd.set_option(\"display.max_rows\", None, \"display.max_columns\", None)\n df = self.process_data(df)\n return df\n\n" ]
[ [ "pandas.DataFrame.from_dict", "pandas.set_option" ] ]
geosharma/pyrotd
[ "f1c1637ef990c88737a08add496b923013a83d23" ]
[ "tests/test_osc_resp.py" ]
[ "\"\"\"Test oscillator response calculation.\"\"\"\nimport numpy as np\nimport pytest\n\nimport pyrotd\nfrom .test_spectra import load_at2\n\nosc_freq = 10\n\n\ndef calc_oscillator_resp(motion, osc_damping, resp):\n return pyrotd.calc_oscillator_resp(\n motion[\"freqs\"],\n motion[\"fourier_amps\"],\n osc_damping,\n osc_freq,\n peak_resp_only=True,\n osc_type=resp,\n )\n\n\[email protected]\ndef motion():\n time_step, accels = load_at2(\"RSN8883_14383980_13849090.AT2\")\n fourier_amps = np.fft.rfft(accels)\n freqs = np.linspace(0, 1.0 / (2 * time_step), num=fourier_amps.size)\n\n return {\n \"time_step\": time_step,\n \"accels\": accels,\n \"freqs\": freqs,\n \"fourier_amps\": fourier_amps,\n }\n\n\[email protected](\n \"resp,power\",\n [\n (\"sa\", 0),\n (\"psv\", 1),\n (\"sv\", 1),\n (\"sd\", 2),\n ],\n)\ndef test_osc_resp(motion, resp, power):\n # For very light damping everything should be the same\n osc_damping = 0.005\n\n ref_psa = calc_oscillator_resp(motion, osc_damping, \"psa\")\n\n calc_resp = pyrotd.calc_oscillator_resp(\n motion[\"freqs\"],\n motion[\"fourier_amps\"],\n osc_damping,\n osc_freq,\n peak_resp_only=True,\n osc_type=resp,\n )\n # Convert to PSA\n calc_psa = calc_resp * (2 * np.pi * osc_freq) ** power\n np.testing.assert_allclose(calc_psa, ref_psa, rtol=1e-1)\n\n\[email protected](strict=True)\ndef test_sa(motion):\n osc_damping = 0.010\n ref_psa = calc_oscillator_resp(motion, osc_damping, \"psa\")\n\n calc_psa = pyrotd.calc_oscillator_resp(\n motion[\"freqs\"],\n motion[\"fourier_amps\"],\n osc_damping,\n osc_freq,\n peak_resp_only=True,\n osc_type=\"sa\",\n )\n np.testing.assert_allclose(calc_psa, ref_psa, rtol=1e-2)\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.fft.rfft", "numpy.linspace" ] ]
wthirskgaskill/codonPython
[ "8bbb97fa73a9f663f602859ac310bc83f1224f85" ]
[ "codonPython/nhsNumber.py" ]
[ "import random\nimport numpy as np\n\n\ndef nhsNumberValidator(number: int) -> bool:\n \"\"\"\n Validate NHS Number according to modulus 11 checks as recorded in the data dictionary.\n https://www.datadictionary.nhs.uk/data_dictionary/attributes/n/nhs/nhs_number_de.asp?shownav=1\n\n Parameters\n ----------\n number : int\n 10 digit integer to validate.\n\n Returns\n ----------\n bool\n If the number passes modulus 11 checks a.k.a. is valid.\n\n Examples\n ---------\n >>> nhsNumberValidator(8429141456)\n True\n >>> nhsNumberValidator(8429141457)\n False\n \"\"\"\n\n if not isinstance(number, int):\n raise ValueError(\n \"Please input a positive 10 digit integer to validate.\")\n if number < 0:\n raise ValueError(\n \"Please input a postitive 10 digit integer to validate.\")\n digits = [int(digit) for digit in str(number)]\n # NHS Numbers are 10 digits long.\n if not len(digits) == 10:\n raise ValueError(\n \"Please input a postitive 10 digit integer to validate.\")\n # Apply weighting to first 9 digits\n weighted_digits = np.dot(np.array(digits[:9]), np.arange(10, 1, -1))\n # Validity is based on the check digit, which has to be equal to `remainder`\n remainder = weighted_digits % 11\n check_digit = 11 - remainder\n if check_digit == 11:\n check_digit = 0\n if check_digit == digits[-1]:\n return True\n else:\n return False\n\n\ndef nhsNumberGenerator(to_generate: int, random_state: int = None) -> list:\n \"\"\"\n Generates up to 1M random NHS numbers compliant with modulus 11 checks as recorded \n in the data dictonary.\n https://www.datadictionary.nhs.uk/data_dictionary/attributes/n/nhs/nhs_number_de.asp?shownav=1\n\n Parameters\n ----------\n to_generate : int\n number of NHS numbers to generate\n random_state : int, default : None\n Optional seed for random number generation, for testing and reproducibility.\n\n Returns\n ----------\n generated : list\n List of randomly generated NHS numbers\n\n Examples\n ---------\n >>> nhsNumberGenerator(2, random_state=42)\n [8429141456, 2625792787]\n \"\"\"\n\n if random_state:\n random.seed(random_state)\n if not isinstance(to_generate, int):\n raise ValueError(\n \"Please input a positive integer to generate numbers.\")\n if to_generate > 1000000:\n raise ValueError(\"More than one million values requested\")\n if to_generate < 0:\n raise ValueError(\n \"Please input a postitive integer to generate numbers.\")\n\n generated = []\n while len(generated) < to_generate:\n # Random 10 digit integer, starting with non-zero digit\n number = random.randint(1000000000, 9999999999)\n if nhsNumberValidator(number):\n generated.append(number)\n return generated\n" ]
[ [ "numpy.array", "numpy.arange" ] ]
yala/NeuraCrypt
[ "6c9862d1076095d76779af03a3a9ffd2cfec748a" ]
[ "sandstone/datasets/chest_xray.py" ]
[ "import os\nfrom collections import Counter\nimport torch\nfrom sandstone.datasets.factory import RegisterDataset\nfrom sandstone.datasets.abstract_dataset import Abstract_Dataset\nimport sandstone.utils\nimport tqdm\nfrom random import shuffle\nimport copy\nimport numpy as np\nimport datetime\nimport pdb\n\nMIMIC_METADATA_PATH = \"data/mimic.json\"\nSTANFORD_METADATA_PATH = \"data/chexpert.json\"\nALL_METADATA_PATH = \"data/combined.json\"\n\nSUMMARY_MSG = \"Contructed CXR {} {} dataset with {} records, {} exams, {} patients, and the following class balance \\n {}\"\n\nclass Abstract_Cxr(Abstract_Dataset):\n '''\n Working dataset for suvival analysis.\n '''\n def create_dataset(self, split_group, img_dir):\n \"\"\"\n Return the dataset from the paths and labels in the json.\n\n :split_group: - ['train'|'dev'|'test'].\n :img_dir: - The path to the dir containing the images.\n \"\"\"\n dataset = []\n for row in tqdm.tqdm(self.metadata_json):\n ssn, split, exam = row['pid'], row['split_group'], row['exam']\n split = 'dev' if row['split_group'] == 'validate' else row['split_group']\n\n ## Use challenge splits for attack mode. Only implemented for CheXpert (i.e Stanford)\n if self.args.lightning_name == \"adversarial_attack\" and 'challenge_split' in row:\n if split_group in ['dev','train']:\n if not row['challenge_split'] == 'public':\n continue\n else:\n assert split_group == 'test'\n if not row['challenge_split'] == 'private-encoded':\n continue\n else:\n if split != split_group:\n continue\n\n if self.check_label(row):\n label = self.get_label(row)\n source = 0\n if self.args.rlc_private_multi_host or self.args.use_adv:\n if not 'source' in row:\n row['source'] = 'mimic' if 'MIMIC' in self.METADATA_FILENAME else 'stanford'\n assert row['source'] in ['mimic', 'stanford']\n source = 1 if row['source'] == 'mimic' else 0\n dataset.append({\n 'path': row['path'],\n 'y': label,\n 'additional': {},\n 'exam': exam,\n 'source': source,\n 'ssn': ssn\n })\n\n if self.args.use_shuffle_pairs:\n shuffle_dataset = copy.deepcopy(dataset)\n np.random.shuffle(shuffle_dataset)\n\n for source, target in zip(dataset, shuffle_dataset):\n source['paths'] = [source['path'], target['path']]\n source['additionals'] = []\n del source['additional']\n del source['path']\n dataset = dataset[:128]\n\n return dataset\n\n def get_summary_statement(self, dataset, split_group):\n class_balance = Counter([d['y'] for d in dataset])\n exams = set([d['exam'] for d in dataset])\n patients = set([d['ssn'] for d in dataset])\n statement = SUMMARY_MSG.format(self.task, split_group, len(dataset), len(exams), len(patients), class_balance)\n return statement\n\n def get_label(self, row):\n return row['label_dict'][self.task] == \"1.0\"\n\n\n def check_label(self, row):\n if self.args.lightning_name == \"adversarial_attack\":\n return True\n return row['label_dict'][self.task] in [\"1.0\", \"0.0\"] or ( 'No Finding' in row['label_dict'] and row['label_dict']['No Finding'] == \"1.0\")\n\n @staticmethod\n def set_args(args):\n args.num_classes = 2\n args.num_chan = 1\n args.num_hospitals = 2\n args.img_size = (256, 256)\n args.img_mean = [43.9]\n args.img_std = [63.2]\n args.input_loader_name = 'default_image_loader'\n args.image_augmentations = [\"scale_2d\"]\n args.tensor_augmentations = [\"normalize_2d\"]\n args.test_image_augmentations = [\"scale_2d\"]\n args.test_tensor_augmentations = [\"normalize_2d\"]\n\n if args.use_shuffle_pairs:\n args.multi_image = True\n args.num_images = 2\n\nclass Abstract_Mimic_Cxr(Abstract_Cxr):\n @property\n def METADATA_FILENAME(self):\n return MIMIC_METADATA_PATH\n\nclass Abstract_Stanford_Cxr(Abstract_Cxr):\n @property\n def METADATA_FILENAME(self):\n return STANFORD_METADATA_PATH\n\nclass Abstract_Combined_Cxr(Abstract_Cxr):\n @property\n def METADATA_FILENAME(self):\n return ALL_METADATA_PATH\n\n\n## Mimic Datasets\n@RegisterDataset(\"mimic_pneumothorax\")\nclass Mimic_Cxr_Pneumothorax(Abstract_Mimic_Cxr):\n @property\n def task(self):\n return \"Pneumothorax\"\n\n@RegisterDataset(\"mimic_cxr_edema\")\nclass Mimic_Cxr_Edema(Abstract_Mimic_Cxr):\n @property\n def task(self):\n return 'Edema'\n\n@RegisterDataset(\"mimic_cxr_consolidation\")\nclass Mimic_Cxr_Consolidation(Abstract_Mimic_Cxr):\n @property\n def task(self):\n return 'Consolidation'\n\n@RegisterDataset(\"mimic_cxr_cardiomegaly\")\nclass Mimic_Cxr_Cardiomegaly(Abstract_Mimic_Cxr):\n @property\n def task(self):\n return 'Cardiomegaly'\n\n@RegisterDataset(\"mimic_cxr_atelectasis\")\nclass Mimic_Cxr_Atelectasis(Abstract_Mimic_Cxr):\n @property\n def task(self):\n return 'Atelectasis'\n\n\n## Stanford Datasets\n@RegisterDataset(\"stanford_pneumothorax\")\nclass Stanford_Cxr_Pneumothorax(Abstract_Stanford_Cxr):\n @property\n def task(self):\n return \"Pneumothorax\"\n\n@RegisterDataset(\"stanford_cxr_edema\")\nclass Stanford_Cxr_Edema(Abstract_Stanford_Cxr):\n @property\n def task(self):\n return 'Edema'\n\n@RegisterDataset(\"stanford_cxr_consolidation\")\nclass Stanford_Cxr_Consolidation(Abstract_Stanford_Cxr):\n @property\n def task(self):\n return 'Consolidation'\n\n@RegisterDataset(\"stanford_cxr_cardiomegaly\")\nclass Stanford_Cxr_Cardiomegaly(Abstract_Stanford_Cxr):\n @property\n def task(self):\n return 'Cardiomegaly'\n\n@RegisterDataset(\"stanford_cxr_atelectasis\")\nclass Stanford_Cxr_Atelectasis(Abstract_Stanford_Cxr):\n @property\n def task(self):\n return 'Atelectasis'\n\n\n## Combined Datasets\n@RegisterDataset(\"combined_pneumothorax\")\nclass Combined_Cxr_Pneumothorax(Abstract_Combined_Cxr):\n @property\n def task(self):\n return \"Pneumothorax\"\n\n@RegisterDataset(\"combined_cxr_edema\")\nclass Combined_Cxr_Edema(Abstract_Combined_Cxr):\n @property\n def task(self):\n return 'Edema'\n\n@RegisterDataset(\"combined_cxr_consolidation\")\nclass Combined_Cxr_Consolidation(Abstract_Combined_Cxr):\n @property\n def task(self):\n return 'Consolidation'\n\n@RegisterDataset(\"combined_cxr_cardiomegaly\")\nclass Combined_Cxr_Cardiomegaly(Abstract_Combined_Cxr):\n @property\n def task(self):\n return 'Cardiomegaly'\n\n@RegisterDataset(\"combined_cxr_atelectasis\")\nclass Combined_Cxr_Atelectasis(Abstract_Combined_Cxr):\n @property\n def task(self):\n return 'Atelectasis'\n" ]
[ [ "numpy.random.shuffle" ] ]
marcobarilari/nighres
[ "e503bb96a6a73f73020c5d9d7b540bc5f17699a8" ]
[ "nighres/parcellation/massp.py" ]
[ "import numpy\nimport nibabel\nimport os\nimport sys\nimport json\nimport nighresjava\nfrom ..io import load_volume, save_volume\nfrom ..utils import _output_dir_4saving, _fname_4saving, \\\n _check_topology_lut_dir, _check_available_memory\nfrom nighres.global_settings import DEFAULT_MASSP_ATLAS, DEFAULT_MASSP_HIST, \\\n DEFAULT_MASSP_SPATIAL_PROBA, DEFAULT_MASSP_SPATIAL_LABEL, \\\n DEFAULT_MASSP_SKEL_PROBA, DEFAULT_MASSP_SKEL_LABEL\nfrom nighres.data.download_data import download_MASSP_atlas\n\n# labels for the 17 structures anatomical parcellation atlas\nlabels_17structures = ['Str-l','Str-r','STN-l','STN-r','SN-l','SN-r',\\\n 'RN-l','RN-r','GPi-l','GPi-r','GPe-l','GPe-r',\\\n 'Tha-l','Tha-r','LV-l','LV-r','3V','4V','Amg-l','Amg-r',\\\n 'ic-l','ic-r','VTA-l','VTA-r','fx','PAG-l','PAG-r',\\\n 'PPN-l','PPN-r','Cl-l','Cl-r']\n\ndef massp_17structures_label(name):\n return 1+labels_17structures.index(name)\n\ndef massp_17structures_list():\n return labels_17structures\n \ndef massp(target_images, structures=31,\n shape_atlas_probas=None, shape_atlas_labels=None, \n intensity_atlas_hist=None,\n skeleton_atlas_probas=None, skeleton_atlas_labels=None, \n map_to_target=None,\n max_iterations=80, max_difference=0.1, volume_scaling=1.0,\n atlas_file=None, intensity_prior=1.0,\n save_data=False, overwrite=False, output_dir=None,\n file_name=None):\n \"\"\" Multi-contrast Anatomical Subcortical Structure parcellation (MASSP)\n\n Estimates subcortical structures based on a multi-atlas approach on shape\n\n Parameters\n ----------\n target_images: [niimg]\n Input images to perform the parcellation from\n structures: int\n Number of structures to parcellate\n shape_atlas_probas: niimg (opt)\n Pre-computed shape atlas probabilities (default is loaded from nighres atlas)\n shape_atlas_labels: niimg (opt)\n Pre-computed shape atlas labels (default is loaded from nighres atlas)\n intensity_atlas_hist: niimg (opt)\n Pre-computed intensity atlas from the contrast images (default is loaded from nighres atlas)\n skeleton_atlas_probas: niimg (opt)\n Pre-computed skeleton atlas probabilities (default is loaded from nighres atlas)\n skeleton_atlas_labels: niimg (opt)\n Pre-computed skeleton atlas labels (default is loaded from nighres atlas)\n map_to_target: niimg\n Coordinate mapping from the atlas to the target (opt)\n max_iterations: int\n Maximum number of diffusion iterations to perform\n max_difference: float\n Maximum difference between diffusion steps\n atlas_file: json\n File with atlas labels and metadata (opt)\n intensity_prior: float\n Importance scaling factor for the intensities in [0,1] (default is 1.0)\n save_data: bool\n Save output data to file (default is False)\n overwrite: bool\n Overwrite existing results (default is False)\n output_dir: str, optional\n Path to desired output directory, will be created if it doesn't exist\n file_name: str, optional\n Desired base name for output files with file extension\n (suffixes will be added)\n\n Returns\n ----------\n dict\n Dictionary collecting outputs under the following keys\n (suffix of output files in brackets)\n\n * max_proba (niimg): Maximum probability map (_massp-proba)\n * max_label (niimg): Maximum probability labels (_massp-label)\n\n Notes\n ----------\n Original Java module by Pierre-Louis Bazin.\n \"\"\"\n\n print('\\nMASSP')\n\n # check topology_lut_dir and set default if not given\n topology_lut_dir = _check_topology_lut_dir(None)\n\n # make sure that saving related parameters are correct\n if save_data:\n output_dir = _output_dir_4saving(output_dir, target_images[0])\n\n proba_file = os.path.join(output_dir, \n _fname_4saving(module=__name__,file_name=file_name,\n rootfile=target_images[0],\n suffix='massp-proba', ))\n\n label_file = os.path.join(output_dir, \n _fname_4saving(module=__name__,file_name=file_name,\n rootfile=target_images[0],\n suffix='massp-label'))\n\n if overwrite is False \\\n and os.path.isfile(proba_file) \\\n and os.path.isfile(label_file):\n \n print(\"skip computation (use existing results)\")\n output = {'max_proba': proba_file, \n 'max_label': label_file}\n return output\n\n contrasts = len(target_images)\n\n # start virtual machine, if not already running\n try:\n mem = _check_available_memory()\n nighresjava.initVM(initialheap=mem['init'], maxheap=mem['max'])\n except ValueError:\n pass\n # create instance\n massp = nighresjava.ConditionalShapeSegmentation()\n\n # set parameters\n massp.setNumberOfSubjectsObjectsBgAndContrasts(1,structures,1,contrasts)\n massp.setOptions(True, False, False, False, True)\n massp.setDiffusionParameters(max_iterations, max_difference)\n massp.setIntensityImportancePrior(intensity_prior)\n \n # load atlas metadata, if given (after setting up the numbers above!!)\n if atlas_file is not None:\n f = open(atlas_file)\n metadata = json.load(f)\n f.close()\n \n # structures = metadata['MASSP Labels']\n contrastList = numpy.zeros(structures*contrasts, dtype=int)\n for st in range(structures):\n #print('Label '+str(st+1)+\": \"+str(metadata[metadata['Label '+str(st+1)][1]]))\n for c in metadata[metadata['Label '+str(st+1)][1]]:\n contrastList[st*contrasts+c] = 1\n massp.setContrastList(nighresjava.JArray('int')(\n (contrastList.flatten('F')).astype(int).tolist()))\n\n # load target image for parameters\n print(\"load: \"+str(target_images[0]))\n img = load_volume(target_images[0])\n data = img.get_data()\n trg_affine = img.get_affine()\n trg_header = img.get_header()\n trg_resolution = [x.item() for x in trg_header.get_zooms()]\n trg_dimensions = data.shape\n\n massp.setTargetDimensions(trg_dimensions[0], trg_dimensions[1], trg_dimensions[2])\n massp.setTargetResolutions(trg_resolution[0], trg_resolution[1], trg_resolution[2])\n\n # target image 1\n massp.setTargetImageAt(0, nighresjava.JArray('float')(\n (data.flatten('F')).astype(float)))\n \n # if further contrast are specified, input them\n for contrast in range(1,contrasts): \n print(\"load: \"+str(target_images[contrast]))\n data = load_volume(target_images[contrast]).get_data()\n massp.setTargetImageAt(contrast, nighresjava.JArray('float')(\n (data.flatten('F')).astype(float)))\n\n # if not specified, check if standard atlases are available or download them\n if ( (shape_atlas_probas is None) or (shape_atlas_labels is None)\n or (skeleton_atlas_probas is None) or (skeleton_atlas_labels is None)):\n \n if (not (os.path.exists(DEFAULT_MASSP_ATLAS) \n and os.path.exists(DEFAULT_MASSP_SPATIAL_PROBA) \n and os.path.exists(DEFAULT_MASSP_SPATIAL_LABEL) \n and os.path.exists(DEFAULT_MASSP_SKEL_PROBA) \n and os.path.exists(DEFAULT_MASSP_SKEL_LABEL) \n and os.path.exists(DEFAULT_MASSP_HIST))):\n download_MASSP_atlas(overwrite=False)\n \n shape_atlas_probas = DEFAULT_MASSP_SPATIAL_PROBA\n shape_atlas_labels = DEFAULT_MASSP_SPATIAL_LABEL\n skeleton_atlas_probas = DEFAULT_MASSP_SKEL_PROBA\n skeleton_atlas_labels = DEFAULT_MASSP_SKEL_LABEL\n\n # allow for diffrent default atlases for intensities\n if (intensity_atlas_hist is not None):\n if not os.path.isfile(intensity_atlas_hist):\n intensity_atlas_hist = os.path.join(DEFAULT_MASSP_ATLAS,intensity_atlas_hist)\n else:\n intensity_atlas_hist = DEFAULT_MASSP_HIST\n \n # load the shape and intensity atlases\n print(\"load: \"+str(intensity_atlas_hist))\n hist = load_volume(intensity_atlas_hist).get_data()\n massp.setConditionalHistogram(nighresjava.JArray('float')(\n (hist.flatten('F')).astype(float)))\n\n print(\"load: \"+str(shape_atlas_probas))\n \n # load a first image for dim, res\n img = load_volume(shape_atlas_probas)\n pdata = img.get_data()\n header = img.get_header()\n affine = img.get_affine()\n resolution = [x.item() for x in header.get_zooms()]\n dimensions = pdata.shape\n \n massp.setAtlasDimensions(dimensions[0], dimensions[1], dimensions[2])\n massp.setAtlasResolutions(resolution[0], resolution[1], resolution[2])\n\n print(\"load: \"+str(shape_atlas_labels))\n ldata = load_volume(shape_atlas_labels).get_data()\n \n if map_to_target is not None:\n print(\"map atlas to subject\")\n print(\"load: \"+str(map_to_target))\n mdata = load_volume(map_to_target).get_data()\n massp.setMappingToTarget(nighresjava.JArray('float')(\n (mdata.flatten('F')).astype(float)))\n \n massp.setShapeAtlasProbasAndLabels(nighresjava.JArray('float')(\n (pdata.flatten('F')).astype(float)),\n nighresjava.JArray('int')(\n (ldata.flatten('F')).astype(int).tolist()))\n\n print(\"load: \"+str(skeleton_atlas_probas))\n pdata = load_volume(skeleton_atlas_probas).get_data()\n \n print(\"load: \"+str(skeleton_atlas_labels))\n ldata = load_volume(skeleton_atlas_labels).get_data()\n\n massp.setSkeletonAtlasProbasAndLabels(nighresjava.JArray('float')(\n (pdata.flatten('F')).astype(float)),\n nighresjava.JArray('int')(\n (ldata.flatten('F')).astype(int).tolist()))\n\n # execute\n try:\n massp.estimateTarget()\n massp.fastSimilarityDiffusion(4)\n massp.collapseToJointMaps()\n massp.precomputeStoppingStatistics(3.0)\n massp.topologyBoundaryDefinition(\"wcs\", topology_lut_dir)\n massp.conditionalPrecomputedDirectVolumeGrowth(3.0)\n massp.collapseSpatialPriorMaps()\n\n except:\n # if the Java module fails, reraise the error it throws\n print(\"\\n The underlying Java code did not execute cleanly: \")\n print(sys.exc_info()[0])\n raise\n return\n\n # reshape output to what nibabel likes\n dims3Dtrg = (trg_dimensions[0],trg_dimensions[1],trg_dimensions[2])\n\n proba_data = numpy.reshape(numpy.array(massp.getFinalProba(),\n dtype=numpy.float32), dims3Dtrg, 'F')\n\n label_data = numpy.reshape(numpy.array(massp.getFinalLabel(),\n dtype=numpy.int32), dims3Dtrg, 'F')\n\n # adapt header max for each image so that correct max is displayed\n # and create nifiti objects\n trg_header['cal_max'] = numpy.nanmax(proba_data)\n proba = nibabel.Nifti1Image(proba_data, trg_affine, trg_header)\n\n trg_header['cal_max'] = numpy.nanmax(label_data)\n label = nibabel.Nifti1Image(label_data, trg_affine, trg_header)\n\n if save_data:\n save_volume(proba_file, proba)\n save_volume(label_file, label)\n\n output= {'max_proba': proba_file, 'max_label': label_file}\n return output\n else:\n output= {'max_proba': proba, 'max_label': label}\n return output\n\n\ndef massp_atlasing(subjects, structures, contrasts, \n levelset_images=None, skeleton_images=None, \n contrast_images=None, \n save_data=False, overwrite=False, output_dir=None,\n file_name=None):\n \"\"\" MASSP Atlasing\n\n Builds a multi-atlas prior for MASSP\n\n Parameters\n ----------\n subjects: int\n Number of atlas subjects\n structures: int\n Number of structures to parcellate\n contrasts: int\n Number of image intensity contrasts\n levelset_images: [niimg]\n Atlas shape levelsets indexed by (subjects,structures)\n skeleton_images: [niimg]\n Atlas shape skeletons indexed by (subjects,structures)\n contrast_images: [niimg]\n Atlas images to use in the parcellation, indexed by (subjects, contrasts)\n save_data: bool\n Save output data to file (default is False)\n overwrite: bool\n Overwrite existing results (default is False)\n output_dir: str, optional\n Path to desired output directory, will be created if it doesn't exist\n file_name: str, optional\n Desired base name for output files with file extension\n (suffixes will be added)\n\n Returns\n ----------\n dict\n Dictionary collecting outputs under the following keys\n (suffix of output files in brackets)\n\n * max_spatial_proba (niimg): Maximum spatial probability map (_massp-sproba)\n * max_spatial_label (niimg): Maximum spatial probability labels (_massp-slabel)\n * cond_hist (niimg): Conditional intensity histograms (_massp-chist)\n * max_skeleton_proba (niimg): Maximum skeleton probability map (_massp-kproba)\n * max_skeleton_label (niimg): Maximum skeleton probability labels (_massp-klabel)\n\n Notes\n ----------\n Original Java module by Pierre-Louis Bazin.\n \"\"\"\n\n print('\\nMASSP Atlasing')\n\n # make sure that saving related parameters are correct\n if save_data:\n output_dir = _output_dir_4saving(output_dir, contrast_images[0][0])\n\n spatial_proba_file = os.path.join(output_dir, \n _fname_4saving(module=__name__,file_name=file_name,\n rootfile=contrast_images[0][0],\n suffix='massp-sproba', ))\n\n spatial_label_file = os.path.join(output_dir, \n _fname_4saving(module=__name__,file_name=file_name,\n rootfile=contrast_images[0][0],\n suffix='massp-slabel'))\n\n condhist_file = os.path.join(output_dir, \n _fname_4saving(module=__name__,file_name=file_name,\n rootfile=contrast_images[0][0],\n suffix='massp-chist'))\n \n skeleton_proba_file = os.path.join(output_dir, \n _fname_4saving(module=__name__,file_name=file_name,\n rootfile=contrast_images[0][0],\n suffix='massp-kproba', ))\n\n skeleton_label_file = os.path.join(output_dir, \n _fname_4saving(module=__name__,file_name=file_name,\n rootfile=contrast_images[0][0],\n suffix='massp-klabel'))\n\n \n if overwrite is False \\\n and os.path.isfile(spatial_proba_file) \\\n and os.path.isfile(spatial_label_file) \\\n and os.path.isfile(condhist_file) \\\n and os.path.isfile(skeleton_proba_file) \\\n and os.path.isfile(skeleton_label_file):\n \n print(\"skip computation (use existing results)\")\n output = {'max_spatial_proba': spatial_proba_file, \n 'max_spatial_label': spatial_label_file,\n 'cond_hist': condhist_file,\n 'max_skeleton_proba': skeleton_proba_file, \n 'max_skeleton_label': skeleton_label_file}\n\n return output\n\n\n # start virtual machine, if not already running\n try:\n mem = _check_available_memory()\n nighresjava.initVM(initialheap=mem['init'], maxheap=mem['max'])\n except ValueError:\n pass\n # create instance\n massp = nighresjava.ConditionalShapeSegmentation()\n\n # set parameters\n massp.setNumberOfSubjectsObjectsBgAndContrasts(subjects,structures,1,contrasts)\n massp.setOptions(True, False, False, False, True)\n \n # load target image for parameters\n # load a first image for dim, res\n img = load_volume(contrast_images[0][0])\n data = img.get_data()\n header = img.get_header()\n affine = img.get_affine()\n trg_resolution = [x.item() for x in header.get_zooms()]\n trg_dimensions = data.shape\n \n massp.setTargetDimensions(trg_dimensions[0], trg_dimensions[1], trg_dimensions[2])\n massp.setTargetResolutions(trg_resolution[0], trg_resolution[1], trg_resolution[2])\n\n resolution = trg_resolution\n dimensions = trg_dimensions\n \n massp.setAtlasDimensions(dimensions[0], dimensions[1], dimensions[2])\n massp.setAtlasResolutions(resolution[0], resolution[1], resolution[2])\n \n # load the atlas structures and contrasts, if needed\n for sub in range(subjects):\n for struct in range(structures):\n print(\"load: \"+str(levelset_images[sub][struct]))\n data = load_volume(levelset_images[sub][struct]).get_data()\n massp.setLevelsetImageAt(sub, struct, nighresjava.JArray('float')(\n (data.flatten('F')).astype(float)))\n for contrast in range(contrasts):\n print(\"load: \"+str(contrast_images[sub][contrast]))\n data = load_volume(contrast_images[sub][contrast]).get_data()\n massp.setContrastImageAt(sub, contrast, nighresjava.JArray('float')(\n (data.flatten('F')).astype(float)))\n # execute first step\n scale = 1.0\n try:\n scale = massp.computeAtlasPriors()\n \n except:\n # if the Java module fails, reraise the error it throws\n print(\"\\n The underlying Java code did not execute cleanly: \")\n print(sys.exc_info()[0])\n raise\n return\n\n # clean up and go to second step\n levelset_images = None\n contrast_images = None\n \n for sub in range(subjects):\n for struct in range(structures):\n print(\"load: \"+str(skeleton_images[sub][struct]))\n data = load_volume(skeleton_images[sub][struct]).get_data()\n massp.setSkeletonImageAt(sub, struct, nighresjava.JArray('float')(\n (data.flatten('F')).astype(float)))\n \n try:\n massp.computeSkeletonPriors(scale)\n \n except:\n # if the Java module fails, reraise the error it throws\n print(\"\\n The underlying Java code did not execute cleanly: \")\n print(sys.exc_info()[0])\n raise\n return\n\n skeleton_images = None\n\n # reshape output to what nibabel likes\n dimensions = (dimensions[0],dimensions[1],dimensions[2],massp.getBestDimension())\n dimskel = (dimensions[0],dimensions[1],dimensions[2],int(massp.getBestDimension()/4))\n dims3Dtrg = (trg_dimensions[0],trg_dimensions[1],trg_dimensions[2])\n\n intens_dims = (structures+1,structures+1,contrasts)\n intens_hist_dims = ((structures+1)*(structures+1),massp.getNumberOfBins()+6,contrasts)\n\n spatial_proba_data = numpy.reshape(numpy.array(massp.getBestSpatialProbabilityMaps(dimensions[3]),\n dtype=numpy.float32), dimensions, 'F')\n\n spatial_label_data = numpy.reshape(numpy.array(massp.getBestSpatialProbabilityLabels(dimensions[3]),\n dtype=numpy.int32), dimensions, 'F') \n\n intens_hist_data = numpy.reshape(numpy.array(massp.getConditionalHistogram(),\n dtype=numpy.float32), intens_hist_dims, 'F')\n\n skeleton_proba_data = numpy.reshape(numpy.array(massp.getBestSkeletonProbabilityMaps(dimskel[3]),\n dtype=numpy.float32), dimskel, 'F')\n\n skeleton_label_data = numpy.reshape(numpy.array(massp.getBestSkeletonProbabilityLabels(dimskel[3]),\n dtype=numpy.int32), dimskel, 'F') \n\n\n # adapt header max for each image so that correct max is displayed\n # and create nifiti objects\n header['cal_max'] = numpy.nanmax(spatial_proba_data)\n spatial_proba = nibabel.Nifti1Image(spatial_proba_data, affine, header)\n\n header['cal_max'] = numpy.nanmax(spatial_label_data)\n spatial_label = nibabel.Nifti1Image(spatial_label_data, affine, header)\n\n chist = nibabel.Nifti1Image(intens_hist_data, None, None)\n\n header['cal_max'] = numpy.nanmax(skeleton_proba_data)\n skeleton_proba = nibabel.Nifti1Image(skeleton_proba_data, affine, header)\n\n header['cal_max'] = numpy.nanmax(skeleton_label_data)\n skeleton_label = nibabel.Nifti1Image(skeleton_label_data, affine, header)\n\n if save_data:\n save_volume(spatial_proba_file, spatial_proba)\n save_volume(spatial_label_file, spatial_label)\n save_volume(condhist_file, chist)\n save_volume(skeleton_proba_file, skeleton_proba)\n save_volume(skeleton_label_file, skeleton_label)\n output= {'max_spatial_proba': spatial_proba_file, \n 'max_spatial_label': spatial_label_file, \n 'cond_hist': condhist_file,\n 'max_skeleton_proba': skeleton_proba_file, \n 'max_skeleton_label': skeleton_label_file}\n return output\n else:\n output= {'max_spatial_proba': spatial_proba, \n 'max_spatial_label': spatial_label, \n 'cond_hist': chist,\n 'max_skeleton_proba': skeleton_proba, \n 'max_skeleton_label': skeleton_label}\n return output\n" ]
[ [ "numpy.nanmax", "numpy.zeros" ] ]
namjmnam/Compounder
[ "5bde1877459c968327aab1bfb9839d370e17b416" ]
[ "elbow.py" ]
[ "# coding=UTF-8\r\nimport math\r\nimport re\r\nimport pandas\r\n\r\nclass CorpusBuilder:\r\n def __init__(self, inputPath):\r\n self.corpusDocList = list(pandas.read_csv(inputPath)['NEWS_BODY'])\r\n self.corpusText = ' '.join(self.corpusDocList)\r\n temp = cleanText(self.corpusText).split(' ')\r\n self.corpusEoList = [i for i in temp if len(re.sub(r'[0-9]+', '-', i)) >= 3] # 3자 이상의 어절만\r\n\r\n# 텍스트 클렌징\r\ndef cleanText(text):\r\n text = text.replace(u'\\xa0', u' ')\r\n\r\n pattern = '([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+)'\r\n text = re.sub(pattern=pattern, repl=' ', string=text)\r\n pattern = '<script.*script>'\r\n text = re.sub(pattern=pattern, repl=' ', string=text)\r\n pattern = '(http|ftp|https)://(?:[-\\w.]|(?:%[\\da-fA-F]{2}))+'\r\n text = re.sub(pattern=pattern, repl=' ', string=text)\r\n pattern = '([ㄱ-ㅎㅏ-ㅣ]+)'\r\n text = re.sub(pattern=pattern, repl=' ', string=text)\r\n pattern = '<[^>]*>'\r\n text = re.sub(pattern=pattern, repl=' ', string=text)\r\n # pattern = '''[^-/.()&*+%$·,`'\"‘’▶\\w\\s]''' # 좀 더 관대한 필터링\r\n # pattern = \"[^-/.()&*+%$\\w\\s]\" # 괄호를 걸러내지 않는 필터링\r\n pattern = \"[^-/.&*+%$\\w\\s]\"\r\n # pattern = \"[^\\w\\s]\" # 엄격한 필터링\r\n text = re.sub(pattern=pattern, repl=' ', string=text)\r\n\r\n text = text.replace('\\n', ' ')\r\n text = text.replace('\\t', ' ')\r\n text = text.replace('\\r', ' ')\r\n\r\n text = re.sub(' +', ' ', text)\r\n return text\r\n\r\n# 한 어절에 대해서 모든 2자 이상의 LP를 나열한 리스트\r\ndef genLP(eojeol):\r\n out = []\r\n # 추출된 어절 끝에 마침표가 오는 경우 제거\r\n if eojeol[-1] == '.': eojeol = eojeol[:-1]\r\n for i in range(2, len(eojeol)+1):\r\n if len(eojeol[:i].replace('.', '')) > 1 and eojeol[:1][-1] != '.':\r\n out.append(eojeol[:i])\r\n return out\r\n\r\n# 어절 리스트를 대상으로 입력 문자열을 각 어절의 좌측에서 검색하여 나온 결과를 출력\r\ndef leftSearcher(word, eoList):\r\n out = []\r\n max = len(word)\r\n for i in eoList:\r\n if len(i) >= max and i[0:max] == word: out.append(i)\r\n return len(out)\r\n\r\n# TF-IDF calculation\r\ndef calcTFIDF(text, doc, corpusDocList):\r\n # calculate TF\r\n tf = math.log(doc.count(text) + 1)\r\n if tf == 0: return 0\r\n # calculate IDF\r\n denominator = sum(text in s for s in corpusDocList)\r\n idf = math.log(len(corpusDocList) / denominator)\r\n return tf*idf\r\n\r\n# 추출 프로세스\r\ndef extOutput(corpusText, corpusDocList, corpusEoList, index=0):\r\n rawDocument = corpusDocList[index]\r\n\r\n # eoList: 어절 리스트\r\n cleansedDocument = cleanText(rawDocument)\r\n temp = cleansedDocument.split(' ')\r\n eoList = [i for i in temp if len(re.sub(r'[0-9]+', '-', i)) >= 3] # 3자 이상의 어절만\r\n\r\n temp = cleanText(corpusText).split(' ')\r\n corpusEoList = [i for i in temp if len(re.sub(r'[0-9]+', '-', i)) >= 3] # 3자 이상의 어절만\r\n\r\n # lplist: 모든 어절의 2자 이상의 LP부분 리스트: [[\"어절1LP1\", \"어절1LP2\", ...], [\"어절2LP1\", \"어절2LP2\", ...], ...]\r\n lplist = []\r\n iter = eoList[:]\r\n iter = list(dict.fromkeys(iter))\r\n for i in iter:\r\n if len(i) > 1: lplist.append(genLP(i))\r\n\r\n # 명사로 추정되는 문자열 리스트 추출 -> extractednouns\r\n extractedNouns = []\r\n for i in lplist:\r\n scores = []\r\n chosen = []\r\n for j in range(len(i)):\r\n scores.append(leftSearcher(i[j], corpusEoList))\r\n scores.append(scores[-1] * 0.8) # 임시방편1\r\n # 빈도수의 엘보 포인트(elbow point)에서 명사로 등록\r\n if scores[0] > scores[1] * 1.1: chosen.append(i[0]) # 임시방편2\r\n for j in range(1, len(i)):\r\n scoreBefore = scores[j-1]\r\n scoreCurrent = scores[j]\r\n scoreAfter = scores[j+1]\r\n if scoreBefore - scoreCurrent < scoreCurrent - scoreAfter: chosen.append(i[j])\r\n\r\n for j in range(len(chosen)):\r\n if rawDocument.count(chosen[j]) >= 2: extractedNouns.append(chosen[j])\r\n extractedNouns = list(dict.fromkeys(extractedNouns))\r\n \r\n temp = []\r\n for j in extractedNouns:\r\n if calcTFIDF(j, rawDocument, corpusDocList) > 3.5: temp.append(j) # TF-IDF가 3.5 초과인 경우만 등록\r\n extractedNouns = temp\r\n return extractedNouns\r\n\r\ninputPath = r\"C:/comfinder/longtext.csv\"\r\ncb = CorpusBuilder(inputPath)\r\n\r\noutput = []\r\nfor i in range(20):\r\n out = extOutput(cb.corpusText, cb.corpusDocList, cb.corpusEoList, i)\r\n # print(out)\r\n print(i)\r\n for j in out:\r\n output.append(j)\r\noutput = list(dict.fromkeys(output))\r\noutput.sort()\r\nprint(output)\r\n\r\nf = open(\"C:/comfinder/out.txt\", 'w', encoding='utf8')\r\nf.write('\\n'.join(output))\r\nf.close()\r\n" ]
[ [ "pandas.read_csv" ] ]
pramitmallick/jiant
[ "1e203c0c226731f218a2be96d2b03f0c6313eb5e" ]
[ "src/modules/seq2seq_decoder.py" ]
[ "# This is a slightly modified version of the AllenNLP SimpleSeq2Seq class:\n# https://github.com/allenai/allennlp/blob/master/allennlp/models/encoder_decoders/simple_seq2seq.py\n\nfrom typing import Dict\n\nimport numpy\nfrom overrides import overrides\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn.modules.rnn import LSTMCell\nfrom torch.nn.modules.linear import Linear\nimport torch.nn.functional as F\n\nfrom allennlp.common import Params\nfrom allennlp.common.util import START_SYMBOL, END_SYMBOL\nfrom allennlp.data.vocabulary import Vocabulary\nfrom allennlp.modules import TextFieldEmbedder, Seq2SeqEncoder\nfrom allennlp.modules.attention import BilinearAttention\nfrom allennlp.modules.similarity_functions import SimilarityFunction\nfrom allennlp.modules.token_embedders import Embedding\nfrom allennlp.models.model import Model\nfrom allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits, weighted_sum\n\nfrom .modules import Pooler\n\nimport pdb\n\nclass Actor(Model):\n def __init__(self, vocab: Vocabulary, hidden_size: int, attn_size: int) -> None:\n super(Actor, self).__init__(vocab)\n self._hidden_size = hidden_size\n self._attn_size = attn_size\n self._gru = nn.GRU(hidden_size + attn_size, hidden_size)\n self._fc = nn.Linear(hidden_size, hidden_size)\n self._softmax = nn.LogSoftmax(dim=1)\n def forward(self, hidden, seq_hidden, attn):\n # inp = torch.cat((hidden, attn), 1)\n # pdb.set_trace()\n # inp = torch.cat((hidden, attn.transpose(1, 0)), 1)\n # inp = torch.unsqueeze(inp, 0)\n inp = torch.cat((seq_hidden, attn.transpose(1, 0)), 2)\n output, hidden = self.gru(inp, hidden)\n output = self.softmax(self.fc(output[0]))\n return output, hidden\n\n\nclass Seq2SeqDecoder(Model):\n \"\"\"\n This is a slightly modified version of AllenNLP SimpleSeq2Seq class\n \"\"\"\n\n def __init__(self,\n vocab: Vocabulary,\n input_dim: int,\n decoder_hidden_size: int,\n max_decoding_steps: int,\n output_proj_input_dim: int,\n target_namespace: str = \"targets\",\n target_embedding_dim: int = None,\n attention: str = \"none\",\n dropout: float = 0.0,\n scheduled_sampling_ratio: float = 0.0,\n ) -> None:\n super(Seq2SeqDecoder, self).__init__(vocab)\n self._max_decoding_steps = max_decoding_steps\n self._target_namespace = target_namespace\n\n # We need the start symbol to provide as the input at the first timestep of decoding, and\n # end symbol as a way to indicate the end of the decoded sequence.\n self._start_index = self.vocab.get_token_index(START_SYMBOL, self._target_namespace)\n self._end_index = self.vocab.get_token_index(END_SYMBOL, self._target_namespace)\n self._unk_index = self.vocab.get_token_index(\"@@UNKNOWN@@\", self._target_namespace)\n num_classes = self.vocab.get_vocab_size(self._target_namespace)\n\n # Decoder output dim needs to be the same as the encoder output dim since we initialize the\n # hidden state of the decoder with that of the final hidden states of the encoder. Also, if\n # we're using attention with ``DotProductSimilarity``, this is needed.\n self._encoder_output_dim = input_dim\n self._decoder_hidden_dim = decoder_hidden_size\n if self._encoder_output_dim != self._decoder_hidden_dim:\n self._projection_encoder_out = Linear(\n self._encoder_output_dim, self._decoder_hidden_dim)\n else:\n self._projection_encoder_out = lambda x: x\n self._decoder_output_dim = self._decoder_hidden_dim\n self._output_proj_input_dim = output_proj_input_dim\n self._target_embedding_dim = target_embedding_dim\n self._target_embedder = Embedding(num_classes, self._target_embedding_dim)\n\n # Used to get an initial hidden state from the encoder states\n self._sent_pooler = Pooler.from_params(\n d_inp=input_dim, d_proj=decoder_hidden_size, project=True)\n\n if attention == \"bilinear\":\n self._decoder_attention = BilinearAttention(decoder_hidden_size, input_dim)\n # The output of attention, a weighted average over encoder outputs, will be\n # concatenated to the input vector of the decoder at each time step.\n self._decoder_input_dim = input_dim + target_embedding_dim\n elif attention == \"none\":\n self._decoder_attention = None\n self._decoder_input_dim = target_embedding_dim\n else:\n raise Exception(\"attention not implemented {}\".format(attention))\n\n self._decoder_cell = LSTMCell(self._decoder_input_dim, self._decoder_hidden_dim)\n # Allow for a bottleneck layer between encoder outputs and distribution over vocab\n # The bottleneck layer consists of a linear transform and helps to reduce\n # number of parameters\n if self._output_proj_input_dim != self._decoder_output_dim:\n self._projection_bottleneck = Linear(\n self._decoder_output_dim, self._output_proj_input_dim)\n else:\n self._projection_bottleneck = lambda x: x\n self._output_projection_layer = Linear(self._output_proj_input_dim, num_classes)\n self._dropout = torch.nn.Dropout(p=dropout)\n\n self._actor1 = Actor(vocab, decoder_hidden_size, self._decoder_input_dim)\n self._actor2 = LSTMCell(self._decoder_hidden_dim * 3, self._decoder_hidden_dim)\n # self._actor_hx = torch.zeros(batch_size, self._decoder_hidden_dim)\n self._actor_hx = Variable(torch.randn(8, 1024), requires_grad=True).cuda()\n # self._actor_cx = torch.zeros(batch_size, self._decoder_hidden_dim)\n self._actor_cx = Variable(torch.randn(8, 1024), requires_grad=True).cuda()\n\n def _initalize_hidden_context_states(self, encoder_outputs, encoder_outputs_mask):\n \"\"\"\n Initialization of the decoder state, based on the encoder output.\n Parameters\n ----------\n encoder_outputs: torch.FloatTensor, [bs, T, h]\n encoder_outputs_mask: torch.LongTensor, [bs, T, 1]\n \"\"\"\n\n if self._decoder_attention is not None:\n encoder_outputs = self._projection_encoder_out(encoder_outputs)\n encoder_outputs.data.masked_fill_(1 - encoder_outputs_mask.byte().data, -float('inf'))\n\n decoder_hidden = encoder_outputs.new_zeros(\n encoder_outputs_mask.size(0), self._decoder_hidden_dim)\n decoder_context = encoder_outputs.max(dim=1)[0]\n else:\n decoder_hidden = self._sent_pooler(encoder_outputs, encoder_outputs_mask)\n decoder_context = encoder_outputs.new_zeros(\n encoder_outputs_mask.size(0), self._decoder_hidden_dim)\n\n return decoder_hidden, decoder_context\n\n @overrides\n def forward(self, # type: ignore\n encoder_outputs, # type: ignore\n encoder_outputs_mask, # type: ignore\n target_tokens: Dict[str, torch.LongTensor] = None) -> Dict[str, torch.Tensor]:\n # pylint: disable=arguments-differ\n \"\"\"\n Decoder logic for producing the entire target sequence at train time.\n\n Parameters\n ----------\n encoder_outputs : torch.FloatTensor, [bs, T, h]\n encoder_outputs_mask : torch.LongTensor, [bs, T, 1]\n target_tokens : Dict[str, torch.LongTensor]\n \"\"\"\n # TODO: target_tokens is not optional.\n batch_size, _, _ = encoder_outputs.size()\n\n if target_tokens is not None:\n targets = target_tokens[\"words\"]\n target_sequence_length = targets.size()[1]\n num_decoding_steps = target_sequence_length - 1\n else:\n num_decoding_steps = self._max_decoding_steps\n\n decoder_hidden, decoder_context = self._initalize_hidden_context_states(\n encoder_outputs, encoder_outputs_mask)\n\n step_logits = []\n\n for timestep in range(num_decoding_steps):\n input_choices = targets[:, timestep]\n decoder_input = self._prepare_decode_step_input(\n input_choices, decoder_hidden,\n encoder_outputs, encoder_outputs_mask)\n decoder_hidden, decoder_context = self._decoder_cell(\n decoder_input, (decoder_hidden, decoder_context))\n\n # output projection\n proj_input = self._projection_bottleneck(decoder_hidden)\n # (batch_size, num_classes)\n output_projections = self._output_projection_layer(proj_input)\n\n # list of (batch_size, 1, num_classes)\n step_logit = output_projections.unsqueeze(1)\n step_logits.append(step_logit)\n\n # (batch_size, num_decoding_steps, num_classes)\n logits = torch.cat(step_logits, 1)\n\n # pdb.set_trace()\n\n if batch_size == 8:\n context = decoder_context\n hidden = decoder_hidden\n last_enc = encoder_outputs[:,-1,:]\n inp = torch.cat([context, hidden, last_enc], 1)\n self._actor_hx, self._actor_cx = self._actor2(inp, (self._actor_hx, self._actor_cx))\n\n output_dict = {\"logits\": logits}\n\n if target_tokens:\n target_mask = get_text_field_mask(target_tokens)\n loss = self._get_loss(logits, targets, target_mask)\n output_dict[\"loss\"] = loss\n\n return output_dict\n\n def _decoder_step(self,\n decoder_input,\n decoder_hidden,\n decoder_context):\n \"\"\"\n Applies one step of the decoder. This is used by beam search.\n\n Parameters\n ----------\n decoder_input: torch.FloatTensor\n decoder_hidden: torch.FloatTensor\n decoder_context: torch.FloatTensor\n \"\"\"\n decoder_hidden, decoder_context = self._decoder_cell(\n decoder_input, (decoder_hidden, decoder_context))\n\n logits = self._output_projection_layer(decoder_hidden)\n\n return logits, (decoder_hidden, decoder_context)\n\n def _prepare_decode_step_input(\n self,\n input_indices: torch.LongTensor,\n decoder_hidden_state: torch.LongTensor = None,\n encoder_outputs: torch.LongTensor = None,\n encoder_outputs_mask: torch.LongTensor = None) -> torch.LongTensor:\n \"\"\"\n Given the input indices for the current timestep of the decoder, and all the encoder\n outputs, compute the input at the current timestep. Note: This method is agnostic to\n whether the indices are gold indices or the predictions made by the decoder at the last\n timestep.\n\n If we're not using attention, the output of this method is just an embedding of the input\n indices. If we are, the output will be a concatentation of the embedding and an attended\n average of the encoder inputs.\n\n Parameters\n ----------\n input_indices : torch.LongTensor\n Indices of either the gold inputs to the decoder or the predicted labels from the\n previous timestep.\n decoder_hidden_state : torch.LongTensor, optional (not needed if no attention)\n Output of from the decoder at the last time step. Needed only if using attention.\n encoder_outputs : torch.LongTensor, optional (not needed if no attention)\n Encoder outputs from all time steps. Needed only if using attention.\n encoder_outputs_mask : torch.LongTensor, optional (not needed if no attention)\n Masks on encoder outputs. Needed only if using attention.\n \"\"\"\n input_indices = input_indices.long()\n # input_indices : (batch_size,) since we are processing these one timestep at a time.\n # (batch_size, target_embedding_dim)\n embedded_input = self._target_embedder(input_indices)\n\n if self._decoder_attention is not None:\n # encoder_outputs : (batch_size, input_sequence_length, encoder_output_dim)\n # Ensuring mask is also a FloatTensor. Or else the multiplication within attention will\n # complain.\n\n # important - need to use zero-masking instead of -inf for attention\n # I've checked that doing this doesn't significantly increase time\n # per batch, but should consider only doing once\n encoder_outputs.data.masked_fill_(\n 1 - encoder_outputs_mask.byte().data, 0.0)\n\n encoder_outputs = 0.5 * encoder_outputs\n encoder_outputs_mask = encoder_outputs_mask.float()\n encoder_outputs_mask = encoder_outputs_mask[:, :, 0]\n # (batch_size, input_sequence_length)\n input_weights = self._decoder_attention(\n decoder_hidden_state, encoder_outputs, encoder_outputs_mask)\n # (batch_size, input_dim)\n attended_input = weighted_sum(encoder_outputs, input_weights)\n # (batch_size, input_dim + target_embedding_dim)\n return torch.cat((attended_input, embedded_input), -1)\n else:\n return embedded_input\n\n @staticmethod\n def _get_loss(logits: torch.LongTensor,\n targets: torch.LongTensor,\n target_mask: torch.LongTensor) -> torch.LongTensor:\n \"\"\"\n Takes logits (unnormalized outputs from the decoder) of size (batch_size,\n num_decoding_steps, num_classes), target indices of size (batch_size, num_decoding_steps+1)\n and corresponding masks of size (batch_size, num_decoding_steps+1) steps and computes cross\n entropy loss while taking the mask into account.\n\n The length of ``targets`` is expected to be greater than that of ``logits`` because the\n decoder does not need to compute the output corresponding to the last timestep of\n ``targets``. This method aligns the inputs appropriately to compute the loss.\n\n During training, we want the logit corresponding to timestep i to be similar to the target\n token from timestep i + 1. That is, the targets should be shifted by one timestep for\n appropriate comparison. Consider a single example where the target has 3 words, and\n padding is to 7 tokens.\n The complete sequence would correspond to <S> w1 w2 w3 <E> <P> <P>\n and the mask would be 1 1 1 1 1 0 0\n and let the logits be l1 l2 l3 l4 l5 l6\n We actually need to compare:\n the sequence w1 w2 w3 <E> <P> <P>\n with masks 1 1 1 1 0 0\n against l1 l2 l3 l4 l5 l6\n (where the input was) <S> w1 w2 w3 <E> <P>\n \"\"\"\n relevant_targets = targets[:, 1:].contiguous() # (batch_size, num_decoding_steps)\n relevant_mask = target_mask[:, 1:].contiguous() # (batch_size, num_decoding_steps)\n loss = sequence_cross_entropy_with_logits(logits, relevant_targets, relevant_mask)\n return loss\n" ]
[ [ "torch.nn.Linear", "torch.nn.modules.linear.Linear", "torch.nn.LogSoftmax", "torch.nn.modules.rnn.LSTMCell", "torch.nn.Dropout", "torch.nn.GRU", "torch.cat", "torch.randn" ] ]
rtphokie/stemwizardapi
[ "85d14da95d14601caeb7fb169e3ae574638628c2" ]
[ "stemwizard_automation.py" ]
[ "#!/usr/bin/env python3\n\nfrom STEMWizard import STEMWizardAPI\nimport pandas as pd\nimport argparse\n\ndef stats(listname, df, filename, columns):\n print(f\"created {filename}\")\n for column in columns:\n print(f'\\n{column.lower()}')\n c = df[f\"{column.upper()}\"].value_counts(dropna=False)\n p = df[f\"{column.upper()}\"].value_counts(dropna=False, normalize=True).mul(100).round(1)\n print(pd.concat([c, p], axis=1, keys=[listname, '%']))\n print()\n print(f\"total {listname}: {df.shape[0]}\")\n\n\ndef get_args():\n global args\n parser = argparse.ArgumentParser(description='synchronize with STEM Wizard')\n parser.add_argument('-students', help='gather student data', action='store_true')\n parser.add_argument('-judges', help='gather judge data', action='store_true')\n parser.add_argument('-volunteers', help='gather volunteer data', action='store_true')\n parser.add_argument('-files', help='fetch files and forms metadata', action='store_true')\n parser.add_argument('-download', help='download files and forms', action='store_true')\n parser.add_argument('--configfile', help='download files and forms', default='stemwizardapi.yaml')\n args = parser.parse_args()\n\n\nif __name__ == '__main__':\n get_args()\n\n sw = STEMWizardAPI(configfile=args.configfile)\n if args.download:\n data = sw.getStudentData_by_category(fileinfo=True, download=False)\n elif args.judges:\n filename, df = sw.export_judge_list()\n stats('judges', df, filename, ['HIGHEST DEGREE ATTAINED', 'SPECIAL AWARD JUDGE',\n 'CITY', 'ORGANIZATION / EMPLOYER', 'REGISTRATION STATUS'])\n elif args.students:\n filename, df = sw.export_student_list()\n stats('students', df, filename, ['approval status',\n 'payment status', 'final status'])\n else:\n print('not implemented')\n\n\n" ]
[ [ "pandas.concat" ] ]
huggingface/hf_benchmarks
[ "2a4367b003d4e363a7e3c6485c4a6bdbfd8f95f0" ]
[ "tests/test_hub.py" ]
[ "import os\nfrom unittest import TestCase\n\nimport pandas as pd\nfrom huggingface_hub import HfFolder\n\nfrom hf_benchmarks import extract_tags, get_benchmark_repos\n\nfrom .testing_utils import (\n BOGUS_BENCHMARK_NAME,\n DUMMY_BENCHMARK_NAME,\n DUMMY_EVALUATION_ID,\n DUMMY_MODEL_ID,\n DUMMY_PREDICTION_ID,\n)\n\n\nclass ExtractTagsTest(TestCase):\n def test_no_tags(self):\n repo_info = {\"modelId\": \"bert-base-uncased\"}\n tags = extract_tags(repo_info)\n self.assertDictEqual(tags, {})\n\n def test_no_keyed_tags(self):\n repo_info = {\"modelId\": \"bert-base-uncased\", \"tags\": [\"exbert\"]}\n tags = extract_tags(repo_info)\n self.assertDictEqual(tags, {})\n\n def test_keyed_tags(self):\n repo_info = {\"modelId\": \"bert-base-uncased\", \"tags\": [\"benchmark:glue\", \"dataset:wikipedia\"]}\n tags = extract_tags(repo_info)\n self.assertDictEqual(tags, {\"benchmark\": \"glue\", \"dataset\": \"wikipedia\"})\n\n def test_keyed_tags_with_multiple_colons(self):\n repo_info = {\"modelId\": \"bert-base-uncased\", \"tags\": [\"benchmark:glue:superglue\", \"dataset:wikipedia\"]}\n tags = extract_tags(repo_info)\n self.assertDictEqual(tags, {\"benchmark\": \"glue:superglue\", \"dataset\": \"wikipedia\"})\n\n def test_mixed_tags(self):\n repo_info = {\"modelId\": \"bert-base-uncased\", \"tags\": [\"exbert\", \"benchmark:glue\", \"dataset:wikipedia\"]}\n tags = extract_tags(repo_info)\n self.assertDictEqual(tags, {\"benchmark\": \"glue\", \"dataset\": \"wikipedia\"})\n\n\nclass GetBenchmarkReposTest(TestCase):\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Share this valid token in all tests below. Needed for CI\n \"\"\"\n token = os.getenv(\"HF_HUB_TOKEN\")\n if token:\n HfFolder.save_token(token)\n\n def test_no_datasets_repo(self):\n data = get_benchmark_repos(\n benchmark=BOGUS_BENCHMARK_NAME, use_auth_token=True, endpoint=\"datasets\", repo_type=\"prediction\"\n )\n self.assertEqual(len(data), 0)\n\n def test_no_models_repo(self):\n data = get_benchmark_repos(\n benchmark=BOGUS_BENCHMARK_NAME, use_auth_token=True, endpoint=\"models\", repo_type=\"prediction\"\n )\n self.assertEqual(len(data), 0)\n\n def test_prediction_repo(self):\n data = get_benchmark_repos(\n benchmark=DUMMY_BENCHMARK_NAME, use_auth_token=True, endpoint=\"datasets\", repo_type=\"prediction\"\n )\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0][\"id\"], DUMMY_PREDICTION_ID)\n\n def test_evaluation_repo(self):\n data = get_benchmark_repos(\n benchmark=DUMMY_BENCHMARK_NAME, use_auth_token=True, endpoint=\"datasets\", repo_type=\"evaluation\"\n )\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0][\"id\"], DUMMY_EVALUATION_ID)\n\n def test_model_upload_repo(self):\n data = get_benchmark_repos(\n benchmark=DUMMY_BENCHMARK_NAME, use_auth_token=True, endpoint=\"models\", repo_type=\"model\"\n )\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0][\"modelId\"], DUMMY_MODEL_ID)\n\n def test_repo_in_submission_window(self):\n # Grab repo to extract timestamp\n # TODO(lewtun): Use HfApi.dataset_info if we bump huggingface-hub in AutoNLP backend\n repo = get_benchmark_repos(\n benchmark=DUMMY_BENCHMARK_NAME, use_auth_token=True, endpoint=\"datasets\", repo_type=\"prediction\"\n )\n submission_time = pd.to_datetime(repo[0].get(\"lastModified\"))\n start_date = (submission_time - pd.Timedelta(days=1)).date()\n end_date = (submission_time + pd.Timedelta(days=1)).date()\n data = get_benchmark_repos(\n benchmark=DUMMY_BENCHMARK_NAME,\n use_auth_token=True,\n endpoint=\"datasets\",\n repo_type=\"prediction\",\n start_date=start_date,\n end_date=end_date,\n )\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0][\"id\"], DUMMY_PREDICTION_ID)\n\n def test_repo_outside_submission_window(self):\n # Grab repo to extract timestamp\n # TODO(lewtun): Use HfApi.dataset_info if we bump huggingface-hub in AutoNLP backend\n repo = get_benchmark_repos(\n benchmark=DUMMY_BENCHMARK_NAME, use_auth_token=True, endpoint=\"datasets\", repo_type=\"prediction\"\n )\n submission_time = pd.to_datetime(repo[0].get(\"lastModified\"))\n start_date = (submission_time + pd.Timedelta(days=1)).date()\n end_date = (submission_time + pd.Timedelta(days=2)).date()\n data = get_benchmark_repos(\n benchmark=DUMMY_BENCHMARK_NAME,\n use_auth_token=True,\n endpoint=\"datasets\",\n repo_type=\"prediction\",\n start_date=start_date,\n end_date=end_date,\n )\n self.assertEqual(len(data), 0)\n" ]
[ [ "pandas.Timedelta" ] ]
DavidCai1993/coursera-deep-learning
[ "fd01dd25bf4b5521732ab1e9f723dc716ac26691", "fd01dd25bf4b5521732ab1e9f723dc716ac26691" ]
[ "improving-deep-neural-networks/week-2/Optimization+methods.py", "convolutional-neural-networks/week-1/Convolution+model+-+Step+by+Step+-+v2.py" ]
[ "\n# coding: utf-8\n\n# # Optimization Methods\n# \n# Until now, you've always used Gradient Descent to update the parameters and minimize the cost. In this notebook, you will learn more advanced optimization methods that can speed up learning and perhaps even get you to a better final value for the cost function. Having a good optimization algorithm can be the difference between waiting days vs. just a few hours to get a good result. \n# \n# Gradient descent goes \"downhill\" on a cost function $J$. Think of it as trying to do this: \n# <img src=\"images/cost.jpg\" style=\"width:650px;height:300px;\">\n# <caption><center> <u> **Figure 1** </u>: **Minimizing the cost is like finding the lowest point in a hilly landscape**<br> At each step of the training, you update your parameters following a certain direction to try to get to the lowest possible point. </center></caption>\n# \n# **Notations**: As usual, $\\frac{\\partial J}{\\partial a } = $ `da` for any variable `a`.\n# \n# To get started, run the following code to import the libraries you will need.\n\n# In[6]:\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io\nimport math\nimport sklearn\nimport sklearn.datasets\n\nfrom opt_utils import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation\nfrom opt_utils import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset\nfrom testCases import *\n\nget_ipython().magic('matplotlib inline')\nplt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n\n# ## 1 - Gradient Descent\n# \n# A simple optimization method in machine learning is gradient descent (GD). When you take gradient steps with respect to all $m$ examples on each step, it is also called Batch Gradient Descent. \n# \n# **Warm-up exercise**: Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$: \n# $$ W^{[l]} = W^{[l]} - \\alpha \\text{ } dW^{[l]} \\tag{1}$$\n# $$ b^{[l]} = b^{[l]} - \\alpha \\text{ } db^{[l]} \\tag{2}$$\n# \n# where L is the number of layers and $\\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.\n\n# In[9]:\n\n# GRADED FUNCTION: update_parameters_with_gd\n\ndef update_parameters_with_gd(parameters, grads, learning_rate):\n \"\"\"\n Update parameters using one step of gradient descent\n \n Arguments:\n parameters -- python dictionary containing your parameters to be updated:\n parameters['W' + str(l)] = Wl\n parameters['b' + str(l)] = bl\n grads -- python dictionary containing your gradients to update each parameters:\n grads['dW' + str(l)] = dWl\n grads['db' + str(l)] = dbl\n learning_rate -- the learning rate, scalar.\n \n Returns:\n parameters -- python dictionary containing your updated parameters \n \"\"\"\n\n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for l in range(L):\n ### START CODE HERE ### (approx. 2 lines)\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * grads[\"dW\" + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate * grads[\"db\" + str(l+1)]\n ### END CODE HERE ###\n \n return parameters\n\n\n# In[10]:\n\nparameters, grads, learning_rate = update_parameters_with_gd_test_case()\n\nparameters = update_parameters_with_gd(parameters, grads, learning_rate)\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))\n\n\n# **Expected Output**:\n# \n# <table> \n# <tr>\n# <td > **W1** </td> \n# <td > [[ 1.63535156 -0.62320365 -0.53718766]\n# [-1.07799357 0.85639907 -2.29470142]] </td> \n# </tr> \n# \n# <tr>\n# <td > **b1** </td> \n# <td > [[ 1.74604067]\n# [-0.75184921]] </td> \n# </tr> \n# \n# <tr>\n# <td > **W2** </td> \n# <td > [[ 0.32171798 -0.25467393 1.46902454]\n# [-2.05617317 -0.31554548 -0.3756023 ]\n# [ 1.1404819 -1.09976462 -0.1612551 ]] </td> \n# </tr> \n# \n# <tr>\n# <td > **b2** </td> \n# <td > [[-0.88020257]\n# [ 0.02561572]\n# [ 0.57539477]] </td> \n# </tr> \n# </table>\n# \n\n# A variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent. \n# \n# - **(Batch) Gradient Descent**:\n# \n# ``` python\n# X = data_input\n# Y = labels\n# parameters = initialize_parameters(layers_dims)\n# for i in range(0, num_iterations):\n# # Forward propagation\n# a, caches = forward_propagation(X, parameters)\n# # Compute cost.\n# cost = compute_cost(a, Y)\n# # Backward propagation.\n# grads = backward_propagation(a, caches, parameters)\n# # Update parameters.\n# parameters = update_parameters(parameters, grads)\n# \n# ```\n# \n# - **Stochastic Gradient Descent**:\n# \n# ```python\n# X = data_input\n# Y = labels\n# parameters = initialize_parameters(layers_dims)\n# for i in range(0, num_iterations):\n# for j in range(0, m):\n# # Forward propagation\n# a, caches = forward_propagation(X[:,j], parameters)\n# # Compute cost\n# cost = compute_cost(a, Y[:,j])\n# # Backward propagation\n# grads = backward_propagation(a, caches, parameters)\n# # Update parameters.\n# parameters = update_parameters(parameters, grads)\n# ```\n# \n\n# In Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will \"oscillate\" toward the minimum rather than converge smoothly. Here is an illustration of this: \n# \n# <img src=\"images/kiank_sgd.png\" style=\"width:750px;height:250px;\">\n# <caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **SGD vs GD**<br> \"+\" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence. But each step is a lot faster to compute for SGD than for GD, as it uses only one training example (vs. the whole batch for GD). </center></caption>\n# \n# **Note** also that implementing SGD requires 3 for-loops in total:\n# 1. Over the number of iterations\n# 2. Over the $m$ training examples\n# 3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$)\n# \n# In practice, you'll often get faster results if you do not use neither the whole training set, nor only one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples.\n# \n# <img src=\"images/kiank_minibatch.png\" style=\"width:750px;height:250px;\">\n# <caption><center> <u> <font color='purple'> **Figure 2** </u>: <font color='purple'> **SGD vs Mini-Batch GD**<br> \"+\" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. </center></caption>\n# \n# <font color='blue'>\n# **What you should remember**:\n# - The difference between gradient descent, mini-batch gradient descent and stochastic gradient descent is the number of examples you use to perform one update step.\n# - You have to tune a learning rate hyperparameter $\\alpha$.\n# - With a well-turned mini-batch size, usually it outperforms either gradient descent or stochastic gradient descent (particularly when the training set is large).\n\n# ## 2 - Mini-Batch Gradient descent\n# \n# Let's learn how to build mini-batches from the training set (X, Y).\n# \n# There are two steps:\n# - **Shuffle**: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches. \n# \n# <img src=\"images/kiank_shuffle.png\" style=\"width:550px;height:300px;\">\n# \n# - **Partition**: Partition the shuffled (X, Y) into mini-batches of size `mini_batch_size` (here 64). Note that the number of training examples is not always divisible by `mini_batch_size`. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full `mini_batch_size`, it will look like this: \n# \n# <img src=\"images/kiank_partition.png\" style=\"width:550px;height:300px;\">\n# \n# **Exercise**: Implement `random_mini_batches`. We coded the shuffling part for you. To help you with the partitioning step, we give you the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches:\n# ```python\n# first_mini_batch_X = shuffled_X[:, 0 : mini_batch_size]\n# second_mini_batch_X = shuffled_X[:, mini_batch_size : 2 * mini_batch_size]\n# ...\n# ```\n# \n# Note that the last mini-batch might end up smaller than `mini_batch_size=64`. Let $\\lfloor s \\rfloor$ represents $s$ rounded down to the nearest integer (this is `math.floor(s)` in Python). If the total number of examples is not a multiple of `mini_batch_size=64` then there will be $\\lfloor \\frac{m}{mini\\_batch\\_size}\\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be ($m-mini_\\_batch_\\_size \\times \\lfloor \\frac{m}{mini\\_batch\\_size}\\rfloor$). \n\n# In[17]:\n\n# GRADED FUNCTION: random_mini_batches\n\ndef random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):\n \"\"\"\n Creates a list of random minibatches from (X, Y)\n \n Arguments:\n X -- input data, of shape (input size, number of examples)\n Y -- true \"label\" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)\n mini_batch_size -- size of the mini-batches, integer\n \n Returns:\n mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)\n \"\"\"\n \n np.random.seed(seed) # To make your \"random\" minibatches the same as ours\n m = X.shape[1] # number of training examples\n mini_batches = []\n \n # Step 1: Shuffle (X, Y)\n permutation = list(np.random.permutation(m))\n shuffled_X = X[:, permutation]\n shuffled_Y = Y[:, permutation].reshape((1,m))\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n ### START CODE HERE ### (approx. 2 lines)\n mini_batch_X = shuffled_X[:, k * mini_batch_size: (k + 1) * mini_batch_size]\n mini_batch_Y = shuffled_Y[:, k * mini_batch_size: (k + 1) * mini_batch_size]\n ### END CODE HERE ###\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n ### START CODE HERE ### (approx. 2 lines)\n mini_batch_X = shuffled_X[:,: (m - mini_batch_size * math.floor(m / mini_batch_size))]\n mini_batch_Y = shuffled_Y[:, : (m - mini_batch_size * math.floor(m / mini_batch_size))]\n ### END CODE HERE ###\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches\n\n\n# In[18]:\n\nX_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()\nmini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)\n\nprint (\"shape of the 1st mini_batch_X: \" + str(mini_batches[0][0].shape))\nprint (\"shape of the 2nd mini_batch_X: \" + str(mini_batches[1][0].shape))\nprint (\"shape of the 3rd mini_batch_X: \" + str(mini_batches[2][0].shape))\nprint (\"shape of the 1st mini_batch_Y: \" + str(mini_batches[0][1].shape))\nprint (\"shape of the 2nd mini_batch_Y: \" + str(mini_batches[1][1].shape)) \nprint (\"shape of the 3rd mini_batch_Y: \" + str(mini_batches[2][1].shape))\nprint (\"mini batch sanity check: \" + str(mini_batches[0][0][0][0:3]))\n\n\n# **Expected Output**:\n# \n# <table style=\"width:50%\"> \n# <tr>\n# <td > **shape of the 1st mini_batch_X** </td> \n# <td > (12288, 64) </td> \n# </tr> \n# \n# <tr>\n# <td > **shape of the 2nd mini_batch_X** </td> \n# <td > (12288, 64) </td> \n# </tr> \n# \n# <tr>\n# <td > **shape of the 3rd mini_batch_X** </td> \n# <td > (12288, 20) </td> \n# </tr>\n# <tr>\n# <td > **shape of the 1st mini_batch_Y** </td> \n# <td > (1, 64) </td> \n# </tr> \n# <tr>\n# <td > **shape of the 2nd mini_batch_Y** </td> \n# <td > (1, 64) </td> \n# </tr> \n# <tr>\n# <td > **shape of the 3rd mini_batch_Y** </td> \n# <td > (1, 20) </td> \n# </tr> \n# <tr>\n# <td > **mini batch sanity check** </td> \n# <td > [ 0.90085595 -0.7612069 0.2344157 ] </td> \n# </tr>\n# \n# </table>\n\n# <font color='blue'>\n# **What you should remember**:\n# - Shuffling and Partitioning are the two steps required to build mini-batches\n# - Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128.\n\n# ## 3 - Momentum\n# \n# Because mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will \"oscillate\" toward convergence. Using momentum can reduce these oscillations. \n# \n# Momentum takes into account the past gradients to smooth out the update. We will store the 'direction' of the previous gradients in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the \"velocity\" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill. \n# \n# <img src=\"images/opt_momentum.png\" style=\"width:400px;height:250px;\">\n# <caption><center> <u><font color='purple'>**Figure 3**</u><font color='purple'>: The red arrows shows the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, we let the gradient influence $v$ and then take a step in the direction of $v$.<br> <font color='black'> </center>\n# \n# \n# **Exercise**: Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the `grads` dictionary, that is:\n# for $l =1,...,L$:\n# ```python\n# v[\"dW\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"W\" + str(l+1)])\n# v[\"db\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"b\" + str(l+1)])\n# ```\n# **Note** that the iterator l starts at 0 in the for loop while the first parameters are v[\"dW1\"] and v[\"db1\"] (that's a \"one\" on the superscript). This is why we are shifting l to l+1 in the `for` loop.\n\n# In[27]:\n\n# GRADED FUNCTION: initialize_velocity\n\ndef initialize_velocity(parameters):\n \"\"\"\n Initializes the velocity as a python dictionary with:\n - keys: \"dW1\", \"db1\", ..., \"dWL\", \"dbL\" \n - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.\n Arguments:\n parameters -- python dictionary containing your parameters.\n parameters['W' + str(l)] = Wl\n parameters['b' + str(l)] = bl\n \n Returns:\n v -- python dictionary containing the current velocity.\n v['dW' + str(l)] = velocity of dWl\n v['db' + str(l)] = velocity of dbl\n \"\"\"\n \n L = len(parameters) // 2 # number of layers in the neural networks\n v = {}\n \n # Initialize velocity\n for l in range(L):\n ### START CODE HERE ### (approx. 2 lines)\n v[\"dW\" + str(l+1)] = np.zeros(parameters[\"W\" + str(l+1)].shape)\n v[\"db\" + str(l+1)] = np.zeros(parameters[\"b\" + str(l+1)].shape)\n ### END CODE HERE ###\n \n return v\n\n\n# In[28]:\n\nparameters = initialize_velocity_test_case()\n\nv = initialize_velocity(parameters)\nprint(\"v[\\\"dW1\\\"] = \" + str(v[\"dW1\"]))\nprint(\"v[\\\"db1\\\"] = \" + str(v[\"db1\"]))\nprint(\"v[\\\"dW2\\\"] = \" + str(v[\"dW2\"]))\nprint(\"v[\\\"db2\\\"] = \" + str(v[\"db2\"]))\n\n\n# **Expected Output**:\n# \n# <table style=\"width:40%\"> \n# <tr>\n# <td > **v[\"dW1\"]** </td> \n# <td > [[ 0. 0. 0.]\n# [ 0. 0. 0.]] </td> \n# </tr> \n# \n# <tr>\n# <td > **v[\"db1\"]** </td> \n# <td > [[ 0.]\n# [ 0.]] </td> \n# </tr> \n# \n# <tr>\n# <td > **v[\"dW2\"]** </td> \n# <td > [[ 0. 0. 0.]\n# [ 0. 0. 0.]\n# [ 0. 0. 0.]] </td> \n# </tr> \n# \n# <tr>\n# <td > **v[\"db2\"]** </td> \n# <td > [[ 0.]\n# [ 0.]\n# [ 0.]] </td> \n# </tr> \n# </table>\n# \n\n# **Exercise**: Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$: \n# \n# $$ \\begin{cases}\n# v_{dW^{[l]}} = \\beta v_{dW^{[l]}} + (1 - \\beta) dW^{[l]} \\\\\n# W^{[l]} = W^{[l]} - \\alpha v_{dW^{[l]}}\n# \\end{cases}\\tag{3}$$\n# \n# $$\\begin{cases}\n# v_{db^{[l]}} = \\beta v_{db^{[l]}} + (1 - \\beta) db^{[l]} \\\\\n# b^{[l]} = b^{[l]} - \\alpha v_{db^{[l]}} \n# \\end{cases}\\tag{4}$$\n# \n# where L is the number of layers, $\\beta$ is the momentum and $\\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a \"one\" on the superscript). So you will need to shift `l` to `l+1` when coding.\n\n# In[31]:\n\n# GRADED FUNCTION: update_parameters_with_momentum\n\ndef update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):\n \"\"\"\n Update parameters using Momentum\n \n Arguments:\n parameters -- python dictionary containing your parameters:\n parameters['W' + str(l)] = Wl\n parameters['b' + str(l)] = bl\n grads -- python dictionary containing your gradients for each parameters:\n grads['dW' + str(l)] = dWl\n grads['db' + str(l)] = dbl\n v -- python dictionary containing the current velocity:\n v['dW' + str(l)] = ...\n v['db' + str(l)] = ...\n beta -- the momentum hyperparameter, scalar\n learning_rate -- the learning rate, scalar\n \n Returns:\n parameters -- python dictionary containing your updated parameters \n v -- python dictionary containing your updated velocities\n \"\"\"\n\n L = len(parameters) // 2 # number of layers in the neural networks\n \n # Momentum update for each parameter\n for l in range(L):\n \n ### START CODE HERE ### (approx. 4 lines)\n # compute velocities\n v[\"dW\" + str(l+1)] = beta * v[\"dW\" + str(l+1)] + (1 - beta)* grads[\"dW\" + str(l+1)]\n v[\"db\" + str(l+1)] = beta * v[\"db\" + str(l+1)] + (1 - beta)* grads[\"db\" + str(l+1)]\n # update parameters\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * v[\"dW\" + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate * v[\"db\" + str(l+1)]\n ### END CODE HERE ###\n \n return parameters, v\n\n\n# In[32]:\n\nparameters, grads, v = update_parameters_with_momentum_test_case()\n\nparameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))\nprint(\"v[\\\"dW1\\\"] = \" + str(v[\"dW1\"]))\nprint(\"v[\\\"db1\\\"] = \" + str(v[\"db1\"]))\nprint(\"v[\\\"dW2\\\"] = \" + str(v[\"dW2\"]))\nprint(\"v[\\\"db2\\\"] = \" + str(v[\"db2\"]))\n\n\n# **Expected Output**:\n# \n# <table style=\"width:90%\"> \n# <tr>\n# <td > **W1** </td> \n# <td > [[ 1.62544598 -0.61290114 -0.52907334]\n# [-1.07347112 0.86450677 -2.30085497]] </td> \n# </tr> \n# \n# <tr>\n# <td > **b1** </td> \n# <td > [[ 1.74493465]\n# [-0.76027113]] </td> \n# </tr> \n# \n# <tr>\n# <td > **W2** </td> \n# <td > [[ 0.31930698 -0.24990073 1.4627996 ]\n# [-2.05974396 -0.32173003 -0.38320915]\n# [ 1.13444069 -1.0998786 -0.1713109 ]] </td> \n# </tr> \n# \n# <tr>\n# <td > **b2** </td> \n# <td > [[-0.87809283]\n# [ 0.04055394]\n# [ 0.58207317]] </td> \n# </tr> \n# \n# <tr>\n# <td > **v[\"dW1\"]** </td> \n# <td > [[-0.11006192 0.11447237 0.09015907]\n# [ 0.05024943 0.09008559 -0.06837279]] </td> \n# </tr> \n# \n# <tr>\n# <td > **v[\"db1\"]** </td> \n# <td > [[-0.01228902]\n# [-0.09357694]] </td> \n# </tr> \n# \n# <tr>\n# <td > **v[\"dW2\"]** </td> \n# <td > [[-0.02678881 0.05303555 -0.06916608]\n# [-0.03967535 -0.06871727 -0.08452056]\n# [-0.06712461 -0.00126646 -0.11173103]] </td> \n# </tr> \n# \n# <tr>\n# <td > **v[\"db2\"]** </td> \n# <td > [[ 0.02344157]\n# [ 0.16598022]\n# [ 0.07420442]]</td> \n# </tr> \n# </table>\n# \n# \n\n# **Note** that:\n# - The velocity is initialized with zeros. So the algorithm will take a few iterations to \"build up\" velocity and start to take bigger steps.\n# - If $\\beta = 0$, then this just becomes standard gradient descent without momentum. \n# \n# **How do you choose $\\beta$?**\n# \n# - The larger the momentum $\\beta$ is, the smoother the update because the more we take the past gradients into account. But if $\\beta$ is too big, it could also smooth out the updates too much. \n# - Common values for $\\beta$ range from 0.8 to 0.999. If you don't feel inclined to tune this, $\\beta = 0.9$ is often a reasonable default. \n# - Tuning the optimal $\\beta$ for your model might need trying several values to see what works best in term of reducing the value of the cost function $J$. \n\n# <font color='blue'>\n# **What you should remember**:\n# - Momentum takes past gradients into account to smooth out the steps of gradient descent. It can be applied with batch gradient descent, mini-batch gradient descent or stochastic gradient descent.\n# - You have to tune a momentum hyperparameter $\\beta$ and a learning rate $\\alpha$.\n\n# ## 4 - Adam\n# \n# Adam is one of the most effective optimization algorithms for training neural networks. It combines ideas from RMSProp (described in lecture) and Momentum. \n# \n# **How does Adam work?**\n# 1. It calculates an exponentially weighted average of past gradients, and stores it in variables $v$ (before bias correction) and $v^{corrected}$ (with bias correction). \n# 2. It calculates an exponentially weighted average of the squares of the past gradients, and stores it in variables $s$ (before bias correction) and $s^{corrected}$ (with bias correction). \n# 3. It updates parameters in a direction based on combining information from \"1\" and \"2\".\n# \n# The update rule is, for $l = 1, ..., L$: \n# \n# $$\\begin{cases}\n# v_{dW^{[l]}} = \\beta_1 v_{dW^{[l]}} + (1 - \\beta_1) \\frac{\\partial \\mathcal{J} }{ \\partial W^{[l]} } \\\\\n# v^{corrected}_{dW^{[l]}} = \\frac{v_{dW^{[l]}}}{1 - (\\beta_1)^t} \\\\\n# s_{dW^{[l]}} = \\beta_2 s_{dW^{[l]}} + (1 - \\beta_2) (\\frac{\\partial \\mathcal{J} }{\\partial W^{[l]} })^2 \\\\\n# s^{corrected}_{dW^{[l]}} = \\frac{s_{dW^{[l]}}}{1 - (\\beta_1)^t} \\\\\n# W^{[l]} = W^{[l]} - \\alpha \\frac{v^{corrected}_{dW^{[l]}}}{\\sqrt{s^{corrected}_{dW^{[l]}}} + \\varepsilon}\n# \\end{cases}$$\n# where:\n# - t counts the number of steps taken of Adam \n# - L is the number of layers\n# - $\\beta_1$ and $\\beta_2$ are hyperparameters that control the two exponentially weighted averages. \n# - $\\alpha$ is the learning rate\n# - $\\varepsilon$ is a very small number to avoid dividing by zero\n# \n# As usual, we will store all parameters in the `parameters` dictionary \n\n# **Exercise**: Initialize the Adam variables $v, s$ which keep track of the past information.\n# \n# **Instruction**: The variables $v, s$ are python dictionaries that need to be initialized with arrays of zeros. Their keys are the same as for `grads`, that is:\n# for $l = 1, ..., L$:\n# ```python\n# v[\"dW\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"W\" + str(l+1)])\n# v[\"db\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"b\" + str(l+1)])\n# s[\"dW\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"W\" + str(l+1)])\n# s[\"db\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"b\" + str(l+1)])\n# \n# ```\n\n# In[43]:\n\n# GRADED FUNCTION: initialize_adam\n\ndef initialize_adam(parameters) :\n \"\"\"\n Initializes v and s as two python dictionaries with:\n - keys: \"dW1\", \"db1\", ..., \"dWL\", \"dbL\" \n - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.\n \n Arguments:\n parameters -- python dictionary containing your parameters.\n parameters[\"W\" + str(l)] = Wl\n parameters[\"b\" + str(l)] = bl\n \n Returns: \n v -- python dictionary that will contain the exponentially weighted average of the gradient.\n v[\"dW\" + str(l)] = ...\n v[\"db\" + str(l)] = ...\n s -- python dictionary that will contain the exponentially weighted average of the squared gradient.\n s[\"dW\" + str(l)] = ...\n s[\"db\" + str(l)] = ...\n\n \"\"\"\n \n L = len(parameters) // 2 # number of layers in the neural networks\n v = {}\n s = {}\n \n # Initialize v, s. Input: \"parameters\". Outputs: \"v, s\".\n for l in range(L):\n ### START CODE HERE ### (approx. 4 lines)\n v[\"dW\" + str(l+1)] = np.zeros(parameters[\"W\" + str(l+1)].shape)\n v[\"db\" + str(l+1)] = np.zeros(parameters[\"b\" + str(l+1)].shape)\n s[\"dW\" + str(l+1)] = np.zeros(parameters[\"W\" + str(l+1)].shape)\n s[\"db\" + str(l+1)] = np.zeros(parameters[\"b\" + str(l+1)].shape)\n ### END CODE HERE ###\n \n return v, s\n\n\n# In[44]:\n\nparameters = initialize_adam_test_case()\n\nv, s = initialize_adam(parameters)\nprint(\"v[\\\"dW1\\\"] = \" + str(v[\"dW1\"]))\nprint(\"v[\\\"db1\\\"] = \" + str(v[\"db1\"]))\nprint(\"v[\\\"dW2\\\"] = \" + str(v[\"dW2\"]))\nprint(\"v[\\\"db2\\\"] = \" + str(v[\"db2\"]))\nprint(\"s[\\\"dW1\\\"] = \" + str(s[\"dW1\"]))\nprint(\"s[\\\"db1\\\"] = \" + str(s[\"db1\"]))\nprint(\"s[\\\"dW2\\\"] = \" + str(s[\"dW2\"]))\nprint(\"s[\\\"db2\\\"] = \" + str(s[\"db2\"]))\n\n\n# **Expected Output**:\n# \n# <table style=\"width:40%\"> \n# <tr>\n# <td > **v[\"dW1\"]** </td> \n# <td > [[ 0. 0. 0.]\n# [ 0. 0. 0.]] </td> \n# </tr> \n# \n# <tr>\n# <td > **v[\"db1\"]** </td> \n# <td > [[ 0.]\n# [ 0.]] </td> \n# </tr> \n# \n# <tr>\n# <td > **v[\"dW2\"]** </td> \n# <td > [[ 0. 0. 0.]\n# [ 0. 0. 0.]\n# [ 0. 0. 0.]] </td> \n# </tr> \n# \n# <tr>\n# <td > **v[\"db2\"]** </td> \n# <td > [[ 0.]\n# [ 0.]\n# [ 0.]] </td> \n# </tr> \n# <tr>\n# <td > **s[\"dW1\"]** </td> \n# <td > [[ 0. 0. 0.]\n# [ 0. 0. 0.]] </td> \n# </tr> \n# \n# <tr>\n# <td > **s[\"db1\"]** </td> \n# <td > [[ 0.]\n# [ 0.]] </td> \n# </tr> \n# \n# <tr>\n# <td > **s[\"dW2\"]** </td> \n# <td > [[ 0. 0. 0.]\n# [ 0. 0. 0.]\n# [ 0. 0. 0.]] </td> \n# </tr> \n# \n# <tr>\n# <td > **s[\"db2\"]** </td> \n# <td > [[ 0.]\n# [ 0.]\n# [ 0.]] </td> \n# </tr>\n# \n# </table>\n# \n\n# **Exercise**: Now, implement the parameters update with Adam. Recall the general update rule is, for $l = 1, ..., L$: \n# \n# $$\\begin{cases}\n# v_{W^{[l]}} = \\beta_1 v_{W^{[l]}} + (1 - \\beta_1) \\frac{\\partial J }{ \\partial W^{[l]} } \\\\\n# v^{corrected}_{W^{[l]}} = \\frac{v_{W^{[l]}}}{1 - (\\beta_1)^t} \\\\\n# s_{W^{[l]}} = \\beta_2 s_{W^{[l]}} + (1 - \\beta_2) (\\frac{\\partial J }{\\partial W^{[l]} })^2 \\\\\n# s^{corrected}_{W^{[l]}} = \\frac{s_{W^{[l]}}}{1 - (\\beta_2)^t} \\\\\n# W^{[l]} = W^{[l]} - \\alpha \\frac{v^{corrected}_{W^{[l]}}}{\\sqrt{s^{corrected}_{W^{[l]}}}+\\varepsilon}\n# \\end{cases}$$\n# \n# \n# **Note** that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.\n\n# In[63]:\n\n# GRADED FUNCTION: update_parameters_with_adam\n\ndef update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,\n beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):\n \"\"\"\n Update parameters using Adam\n \n Arguments:\n parameters -- python dictionary containing your parameters:\n parameters['W' + str(l)] = Wl\n parameters['b' + str(l)] = bl\n grads -- python dictionary containing your gradients for each parameters:\n grads['dW' + str(l)] = dWl\n grads['db' + str(l)] = dbl\n v -- Adam variable, moving average of the first gradient, python dictionary\n s -- Adam variable, moving average of the squared gradient, python dictionary\n learning_rate -- the learning rate, scalar.\n beta1 -- Exponential decay hyperparameter for the first moment estimates \n beta2 -- Exponential decay hyperparameter for the second moment estimates \n epsilon -- hyperparameter preventing division by zero in Adam updates\n\n Returns:\n parameters -- python dictionary containing your updated parameters \n v -- Adam variable, moving average of the first gradient, python dictionary\n s -- Adam variable, moving average of the squared gradient, python dictionary\n \"\"\"\n \n L = len(parameters) // 2 # number of layers in the neural networks\n v_corrected = {} # Initializing first moment estimate, python dictionary\n s_corrected = {} # Initializing second moment estimate, python dictionary\n \n # Perform Adam update on all parameters\n for l in range(L):\n # Moving average of the gradients. Inputs: \"v, grads, beta1\". Output: \"v\".\n ### START CODE HERE ### (approx. 2 lines)\n v[\"dW\" + str(l+1)] = beta1 * v[\"dW\" + str(l+1)] + (1 - beta1) * grads['dW' + str(l + 1)]\n v[\"db\" + str(l+1)] = beta1 * v[\"db\" + str(l+1)] + (1 - beta1) * grads['db' + str(l + 1)]\n ### END CODE HERE ###\n\n # Compute bias-corrected first moment estimate. Inputs: \"v, beta1, t\". Output: \"v_corrected\".\n ### START CODE HERE ### (approx. 2 lines)\n v_corrected[\"dW\" + str(l+1)] = v[\"dW\" + str(l+1)] / (1 - pow(beta1, t))\n v_corrected[\"db\" + str(l+1)] = v[\"db\" + str(l+1)] / (1 - pow(beta1, t))\n ### END CODE HERE ###\n\n # Moving average of the squared gradients. Inputs: \"s, grads, beta2\". Output: \"s\".\n ### START CODE HERE ### (approx. 2 lines)\n s[\"dW\" + str(l+1)] = beta2 * s[\"dW\" + str(l+1)] + (1 - beta2) * np.power(grads['dW' + str(l + 1)], 2)\n s[\"db\" + str(l+1)] = beta2 * s[\"db\" + str(l+1)] + (1 - beta2) * np.power(grads['db' + str(l + 1)], 2)\n ### END CODE HERE ###\n\n # Compute bias-corrected second raw moment estimate. Inputs: \"s, beta2, t\". Output: \"s_corrected\".\n ### START CODE HERE ### (approx. 2 lines)\n s_corrected[\"dW\" + str(l+1)] = s[\"dW\" + str(l+1)] / (1 - pow(beta2, t))\n s_corrected[\"db\" + str(l+1)] = s[\"db\" + str(l+1)] / (1 - pow(beta2, t))\n ### END CODE HERE ###\n\n # Update parameters. Inputs: \"parameters, learning_rate, v_corrected, s_corrected, epsilon\". Output: \"parameters\".\n ### START CODE HERE ### (approx. 2 lines)\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * (v_corrected[\"dW\" + str(l+1)] / ((np.sqrt(s_corrected[\"dW\" + str(l+1)])) + epsilon))\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate * (v_corrected[\"db\" + str(l+1)] / ((np.sqrt(s_corrected[\"db\" + str(l+1)])) + epsilon))\n ### END CODE HERE ###\n\n return parameters, v, s\n\n\n# In[64]:\n\nparameters, grads, v, s = update_parameters_with_adam_test_case()\nparameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2)\n\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))\nprint(\"v[\\\"dW1\\\"] = \" + str(v[\"dW1\"]))\nprint(\"v[\\\"db1\\\"] = \" + str(v[\"db1\"]))\nprint(\"v[\\\"dW2\\\"] = \" + str(v[\"dW2\"]))\nprint(\"v[\\\"db2\\\"] = \" + str(v[\"db2\"]))\nprint(\"s[\\\"dW1\\\"] = \" + str(s[\"dW1\"]))\nprint(\"s[\\\"db1\\\"] = \" + str(s[\"db1\"]))\nprint(\"s[\\\"dW2\\\"] = \" + str(s[\"dW2\"]))\nprint(\"s[\\\"db2\\\"] = \" + str(s[\"db2\"]))\n\n\n# **Expected Output**:\n# \n# <table> \n# <tr>\n# <td > **W1** </td> \n# <td > [[ 1.63178673 -0.61919778 -0.53561312]\n# [-1.08040999 0.85796626 -2.29409733]] </td> \n# </tr> \n# \n# <tr>\n# <td > **b1** </td> \n# <td > [[ 1.75225313]\n# [-0.75376553]] </td> \n# </tr> \n# \n# <tr>\n# <td > **W2** </td> \n# <td > [[ 0.32648046 -0.25681174 1.46954931]\n# [-2.05269934 -0.31497584 -0.37661299]\n# [ 1.14121081 -1.09245036 -0.16498684]] </td> \n# </tr> \n# \n# <tr>\n# <td > **b2** </td> \n# <td > [[-0.88529978]\n# [ 0.03477238]\n# [ 0.57537385]] </td> \n# </tr> \n# <tr>\n# <td > **v[\"dW1\"]** </td> \n# <td > [[-0.11006192 0.11447237 0.09015907]\n# [ 0.05024943 0.09008559 -0.06837279]] </td> \n# </tr> \n# \n# <tr>\n# <td > **v[\"db1\"]** </td> \n# <td > [[-0.01228902]\n# [-0.09357694]] </td> \n# </tr> \n# \n# <tr>\n# <td > **v[\"dW2\"]** </td> \n# <td > [[-0.02678881 0.05303555 -0.06916608]\n# [-0.03967535 -0.06871727 -0.08452056]\n# [-0.06712461 -0.00126646 -0.11173103]] </td> \n# </tr> \n# \n# <tr>\n# <td > **v[\"db2\"]** </td> \n# <td > [[ 0.02344157]\n# [ 0.16598022]\n# [ 0.07420442]] </td> \n# </tr> \n# <tr>\n# <td > **s[\"dW1\"]** </td> \n# <td > [[ 0.00121136 0.00131039 0.00081287]\n# [ 0.0002525 0.00081154 0.00046748]] </td> \n# </tr> \n# \n# <tr>\n# <td > **s[\"db1\"]** </td> \n# <td > [[ 1.51020075e-05]\n# [ 8.75664434e-04]] </td> \n# </tr> \n# \n# <tr>\n# <td > **s[\"dW2\"]** </td> \n# <td > [[ 7.17640232e-05 2.81276921e-04 4.78394595e-04]\n# [ 1.57413361e-04 4.72206320e-04 7.14372576e-04]\n# [ 4.50571368e-04 1.60392066e-07 1.24838242e-03]] </td> \n# </tr> \n# \n# <tr>\n# <td > **s[\"db2\"]** </td> \n# <td > [[ 5.49507194e-05]\n# [ 2.75494327e-03]\n# [ 5.50629536e-04]] </td> \n# </tr>\n# </table>\n# \n\n# You now have three working optimization algorithms (mini-batch gradient descent, Momentum, Adam). Let's implement a model with each of these optimizers and observe the difference.\n\n# ## 5 - Model with different optimization algorithms\n# \n# Lets use the following \"moons\" dataset to test the different optimization methods. (The dataset is named \"moons\" because the data from each of the two classes looks a bit like a crescent-shaped moon.) \n\n# In[65]:\n\ntrain_X, train_Y = load_dataset()\n\n\n# We have already implemented a 3-layer neural network. You will train it with: \n# - Mini-batch **Gradient Descent**: it will call your function:\n# - `update_parameters_with_gd()`\n# - Mini-batch **Momentum**: it will call your functions:\n# - `initialize_velocity()` and `update_parameters_with_momentum()`\n# - Mini-batch **Adam**: it will call your functions:\n# - `initialize_adam()` and `update_parameters_with_adam()`\n\n# In[66]:\n\ndef model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,\n beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True):\n \"\"\"\n 3-layer neural network model which can be run in different optimizer modes.\n \n Arguments:\n X -- input data, of shape (2, number of examples)\n Y -- true \"label\" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)\n layers_dims -- python list, containing the size of each layer\n learning_rate -- the learning rate, scalar.\n mini_batch_size -- the size of a mini batch\n beta -- Momentum hyperparameter\n beta1 -- Exponential decay hyperparameter for the past gradients estimates \n beta2 -- Exponential decay hyperparameter for the past squared gradients estimates \n epsilon -- hyperparameter preventing division by zero in Adam updates\n num_epochs -- number of epochs\n print_cost -- True to print the cost every 1000 epochs\n\n Returns:\n parameters -- python dictionary containing your updated parameters \n \"\"\"\n\n L = len(layers_dims) # number of layers in the neural networks\n costs = [] # to keep track of the cost\n t = 0 # initializing the counter required for Adam update\n seed = 10 # For grading purposes, so that your \"random\" minibatches are the same as ours\n \n # Initialize parameters\n parameters = initialize_parameters(layers_dims)\n\n # Initialize the optimizer\n if optimizer == \"gd\":\n pass # no initialization required for gradient descent\n elif optimizer == \"momentum\":\n v = initialize_velocity(parameters)\n elif optimizer == \"adam\":\n v, s = initialize_adam(parameters)\n \n # Optimization loop\n for i in range(num_epochs):\n \n # Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch\n seed = seed + 1\n minibatches = random_mini_batches(X, Y, mini_batch_size, seed)\n\n for minibatch in minibatches:\n\n # Select a minibatch\n (minibatch_X, minibatch_Y) = minibatch\n\n # Forward propagation\n a3, caches = forward_propagation(minibatch_X, parameters)\n\n # Compute cost\n cost = compute_cost(a3, minibatch_Y)\n\n # Backward propagation\n grads = backward_propagation(minibatch_X, minibatch_Y, caches)\n\n # Update parameters\n if optimizer == \"gd\":\n parameters = update_parameters_with_gd(parameters, grads, learning_rate)\n elif optimizer == \"momentum\":\n parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)\n elif optimizer == \"adam\":\n t = t + 1 # Adam counter\n parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,\n t, learning_rate, beta1, beta2, epsilon)\n \n # Print the cost every 1000 epoch\n if print_cost and i % 1000 == 0:\n print (\"Cost after epoch %i: %f\" %(i, cost))\n if print_cost and i % 100 == 0:\n costs.append(cost)\n \n # plot the cost\n plt.plot(costs)\n plt.ylabel('cost')\n plt.xlabel('epochs (per 100)')\n plt.title(\"Learning rate = \" + str(learning_rate))\n plt.show()\n\n return parameters\n\n\n# You will now run this 3 layer neural network with each of the 3 optimization methods.\n# \n# ### 5.1 - Mini-batch Gradient descent\n# \n# Run the following code to see how the model does with mini-batch gradient descent.\n\n# In[67]:\n\n# train 3-layer model\nlayers_dims = [train_X.shape[0], 5, 2, 1]\nparameters = model(train_X, train_Y, layers_dims, optimizer = \"gd\")\n\n# Predict\npredictions = predict(train_X, train_Y, parameters)\n\n# Plot decision boundary\nplt.title(\"Model with Gradient Descent optimization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,2.5])\naxes.set_ylim([-1,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)\n\n\n# ### 5.2 - Mini-batch gradient descent with momentum\n# \n# Run the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains.\n\n# In[68]:\n\n# train 3-layer model\nlayers_dims = [train_X.shape[0], 5, 2, 1]\nparameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = \"momentum\")\n\n# Predict\npredictions = predict(train_X, train_Y, parameters)\n\n# Plot decision boundary\nplt.title(\"Model with Momentum optimization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,2.5])\naxes.set_ylim([-1,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)\n\n\n# ### 5.3 - Mini-batch with Adam mode\n# \n# Run the following code to see how the model does with Adam.\n\n# In[69]:\n\n# train 3-layer model\nlayers_dims = [train_X.shape[0], 5, 2, 1]\nparameters = model(train_X, train_Y, layers_dims, optimizer = \"adam\")\n\n# Predict\npredictions = predict(train_X, train_Y, parameters)\n\n# Plot decision boundary\nplt.title(\"Model with Adam optimization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,2.5])\naxes.set_ylim([-1,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)\n\n\n# ### 5.4 - Summary\n# \n# <table> \n# <tr>\n# <td>\n# **optimization method**\n# </td>\n# <td>\n# **accuracy**\n# </td>\n# <td>\n# **cost shape**\n# </td>\n# \n# </tr>\n# <td>\n# Gradient descent\n# </td>\n# <td>\n# 79.7%\n# </td>\n# <td>\n# oscillations\n# </td>\n# <tr>\n# <td>\n# Momentum\n# </td>\n# <td>\n# 79.7%\n# </td>\n# <td>\n# oscillations\n# </td>\n# </tr>\n# <tr>\n# <td>\n# Adam\n# </td>\n# <td>\n# 94%\n# </td>\n# <td>\n# smoother\n# </td>\n# </tr>\n# </table> \n# \n# Momentum usually helps, but given the small learning rate and the simplistic dataset, its impact is almost negligeable. Also, the huge oscillations you see in the cost come from the fact that some minibatches are more difficult thans others for the optimization algorithm.\n# \n# Adam on the other hand, clearly outperforms mini-batch gradient descent and Momentum. If you run the model for more epochs on this simple dataset, all three methods will lead to very good results. However, you've seen that Adam converges a lot faster.\n# \n# Some advantages of Adam include:\n# - Relatively low memory requirements (though higher than gradient descent and gradient descent with momentum) \n# - Usually works well even with little tuning of hyperparameters (except $\\alpha$)\n\n# **References**:\n# \n# - Adam paper: https://arxiv.org/pdf/1412.6980.pdf\n", "\n# coding: utf-8\n\n# # Convolutional Neural Networks: Step by Step\n# \n# Welcome to Course 4's first assignment! In this assignment, you will implement convolutional (CONV) and pooling (POOL) layers in numpy, including both forward propagation and (optionally) backward propagation. \n# \n# **Notation**:\n# - Superscript $[l]$ denotes an object of the $l^{th}$ layer. \n# - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.\n# \n# \n# - Superscript $(i)$ denotes an object from the $i^{th}$ example. \n# - Example: $x^{(i)}$ is the $i^{th}$ training example input.\n# \n# \n# - Lowerscript $i$ denotes the $i^{th}$ entry of a vector.\n# - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$, assuming this is a fully connected (FC) layer.\n# \n# \n# - $n_H$, $n_W$ and $n_C$ denote respectively the height, width and number of channels of a given layer. If you want to reference a specific layer $l$, you can also write $n_H^{[l]}$, $n_W^{[l]}$, $n_C^{[l]}$. \n# - $n_{H_{prev}}$, $n_{W_{prev}}$ and $n_{C_{prev}}$ denote respectively the height, width and number of channels of the previous layer. If referencing a specific layer $l$, this could also be denoted $n_H^{[l-1]}$, $n_W^{[l-1]}$, $n_C^{[l-1]}$. \n# \n# We assume that you are already familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started!\n\n# ## 1 - Packages\n# \n# Let's first import all the packages that you will need during this assignment. \n# - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.\n# - [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.\n# - np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.\n\n# In[1]:\n\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\n\nget_ipython().magic('matplotlib inline')\nplt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\nget_ipython().magic('load_ext autoreload')\nget_ipython().magic('autoreload 2')\n\nnp.random.seed(1)\n\n\n# ## 2 - Outline of the Assignment\n# \n# You will be implementing the building blocks of a convolutional neural network! Each function you will implement will have detailed instructions that will walk you through the steps needed:\n# \n# - Convolution functions, including:\n# - Zero Padding\n# - Convolve window \n# - Convolution forward\n# - Convolution backward (optional)\n# - Pooling functions, including:\n# - Pooling forward\n# - Create mask \n# - Distribute value\n# - Pooling backward (optional)\n# \n# This notebook will ask you to implement these functions from scratch in `numpy`. In the next notebook, you will use the TensorFlow equivalents of these functions to build the following model:\n# \n# <img src=\"images/model.png\" style=\"width:800px;height:300px;\">\n# \n# **Note** that for every forward function, there is its corresponding backward equivalent. Hence, at every step of your forward module you will store some parameters in a cache. These parameters are used to compute gradients during backpropagation. \n\n# ## 3 - Convolutional Neural Networks\n# \n# Although programming frameworks make convolutions easy to use, they remain one of the hardest concepts to understand in Deep Learning. A convolution layer transforms an input volume into an output volume of different size, as shown below. \n# \n# <img src=\"images/conv_nn.png\" style=\"width:350px;height:200px;\">\n# \n# In this part, you will build every step of the convolution layer. You will first implement two helper functions: one for zero padding and the other for computing the convolution function itself. \n\n# ### 3.1 - Zero-Padding\n# \n# Zero-padding adds zeros around the border of an image:\n# \n# <img src=\"images/PAD.png\" style=\"width:600px;height:400px;\">\n# <caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **Zero-Padding**<br> Image (3 channels, RGB) with a padding of 2. </center></caption>\n# \n# The main benefits of padding are the following:\n# \n# - It allows you to use a CONV layer without necessarily shrinking the height and width of the volumes. This is important for building deeper networks, since otherwise the height/width would shrink as you go to deeper layers. An important special case is the \"same\" convolution, in which the height/width is exactly preserved after one layer. \n# \n# - It helps us keep more of the information at the border of an image. Without padding, very few values at the next layer would be affected by pixels as the edges of an image.\n# \n# **Exercise**: Implement the following function, which pads all the images of a batch of examples X with zeros. [Use np.pad](https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html). Note if you want to pad the array \"a\" of shape $(5,5,5,5,5)$ with `pad = 1` for the 2nd dimension, `pad = 3` for the 4th dimension and `pad = 0` for the rest, you would do:\n# ```python\n# a = np.pad(a, ((0,0), (1,1), (0,0), (3,3), (0,0)), 'constant', constant_values = (..,..))\n# ```\n\n# In[8]:\n\n# GRADED FUNCTION: zero_pad\n\ndef zero_pad(X, pad):\n \"\"\"\n Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image, \n as illustrated in Figure 1.\n \n Argument:\n X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images\n pad -- integer, amount of padding around each image on vertical and horizontal dimensions\n \n Returns:\n X_pad -- padded image of shape (m, n_H + 2*pad, n_W + 2*pad, n_C)\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line)\n X_pad = np.pad(X, ((0, 0), (pad, pad), (pad, pad), (0, 0)), 'constant', constant_values=0)\n ### END CODE HERE ###\n \n return X_pad\n\n\n# In[9]:\n\nnp.random.seed(1)\nx = np.random.randn(4, 3, 3, 2)\nx_pad = zero_pad(x, 2)\nprint (\"x.shape =\", x.shape)\nprint (\"x_pad.shape =\", x_pad.shape)\nprint (\"x[1,1] =\", x[1,1])\nprint (\"x_pad[1,1] =\", x_pad[1,1])\n\nfig, axarr = plt.subplots(1, 2)\naxarr[0].set_title('x')\naxarr[0].imshow(x[0,:,:,0])\naxarr[1].set_title('x_pad')\naxarr[1].imshow(x_pad[0,:,:,0])\n\n\n# **Expected Output**:\n# \n# <table>\n# <tr>\n# <td>\n# **x.shape**:\n# </td>\n# <td>\n# (4, 3, 3, 2)\n# </td>\n# </tr>\n# <tr>\n# <td>\n# **x_pad.shape**:\n# </td>\n# <td>\n# (4, 7, 7, 2)\n# </td>\n# </tr>\n# <tr>\n# <td>\n# **x[1,1]**:\n# </td>\n# <td>\n# [[ 0.90085595 -0.68372786]\n# [-0.12289023 -0.93576943]\n# [-0.26788808 0.53035547]]\n# </td>\n# </tr>\n# <tr>\n# <td>\n# **x_pad[1,1]**:\n# </td>\n# <td>\n# [[ 0. 0.]\n# [ 0. 0.]\n# [ 0. 0.]\n# [ 0. 0.]\n# [ 0. 0.]\n# [ 0. 0.]\n# [ 0. 0.]]\n# </td>\n# </tr>\n# \n# </table>\n\n# ### 3.2 - Single step of convolution \n# \n# In this part, implement a single step of convolution, in which you apply the filter to a single position of the input. This will be used to build a convolutional unit, which: \n# \n# - Takes an input volume \n# - Applies a filter at every position of the input\n# - Outputs another volume (usually of different size)\n# \n# <img src=\"images/Convolution_schematic.gif\" style=\"width:500px;height:300px;\">\n# <caption><center> <u> <font color='purple'> **Figure 2** </u><font color='purple'> : **Convolution operation**<br> with a filter of 2x2 and a stride of 1 (stride = amount you move the window each time you slide) </center></caption>\n# \n# In a computer vision application, each value in the matrix on the left corresponds to a single pixel value, and we convolve a 3x3 filter with the image by multiplying its values element-wise with the original matrix, then summing them up and adding a bias. In this first step of the exercise, you will implement a single step of convolution, corresponding to applying a filter to just one of the positions to get a single real-valued output. \n# \n# Later in this notebook, you'll apply this function to multiple positions of the input to implement the full convolutional operation. \n# \n# **Exercise**: Implement conv_single_step(). [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.sum.html).\n# \n\n# In[18]:\n\n# GRADED FUNCTION: conv_single_step\n\ndef conv_single_step(a_slice_prev, W, b):\n \"\"\"\n Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation \n of the previous layer.\n \n Arguments:\n a_slice_prev -- slice of input data of shape (f, f, n_C_prev)\n W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)\n b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)\n \n Returns:\n Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data\n \"\"\"\n\n ### START CODE HERE ### (≈ 2 lines of code)\n # Element-wise product between a_slice and W. Do not add the bias yet.\n s = np.multiply(a_slice_prev, W)\n # Sum over all entries of the volume s.\n Z = np.sum(s)\n # Add bias b to Z. Cast b to a float() so that Z results in a scalar value.\n Z = Z + float(b)\n ### END CODE HERE ###\n\n return Z\n\n\n# In[19]:\n\nnp.random.seed(1)\na_slice_prev = np.random.randn(4, 4, 3)\nW = np.random.randn(4, 4, 3)\nb = np.random.randn(1, 1, 1)\n\nZ = conv_single_step(a_slice_prev, W, b)\nprint(\"Z =\", Z)\n\n\n# **Expected Output**:\n# <table>\n# <tr>\n# <td>\n# **Z**\n# </td>\n# <td>\n# -6.99908945068\n# </td>\n# </tr>\n# \n# </table>\n\n# ### 3.3 - Convolutional Neural Networks - Forward pass\n# \n# In the forward pass, you will take many filters and convolve them on the input. Each 'convolution' gives you a 2D matrix output. You will then stack these outputs to get a 3D volume: \n# \n# <center>\n# <video width=\"620\" height=\"440\" src=\"images/conv_kiank.mp4\" type=\"video/mp4\" controls>\n# </video>\n# </center>\n# \n# **Exercise**: Implement the function below to convolve the filters W on an input activation A_prev. This function takes as input A_prev, the activations output by the previous layer (for a batch of m inputs), F filters/weights denoted by W, and a bias vector denoted by b, where each filter has its own (single) bias. Finally you also have access to the hyperparameters dictionary which contains the stride and the padding. \n# \n# **Hint**: \n# 1. To select a 2x2 slice at the upper left corner of a matrix \"a_prev\" (shape (5,5,3)), you would do:\n# ```python\n# a_slice_prev = a_prev[0:2,0:2,:]\n# ```\n# This will be useful when you will define `a_slice_prev` below, using the `start/end` indexes you will define.\n# 2. To define a_slice you will need to first define its corners `vert_start`, `vert_end`, `horiz_start` and `horiz_end`. This figure may be helpful for you to find how each of the corner can be defined using h, w, f and s in the code below.\n# \n# <img src=\"images/vert_horiz_kiank.png\" style=\"width:400px;height:300px;\">\n# <caption><center> <u> <font color='purple'> **Figure 3** </u><font color='purple'> : **Definition of a slice using vertical and horizontal start/end (with a 2x2 filter)** <br> This figure shows only a single channel. </center></caption>\n# \n# \n# **Reminder**:\n# The formulas relating the output shape of the convolution to the input shape is:\n# $$ n_H = \\lfloor \\frac{n_{H_{prev}} - f + 2 \\times pad}{stride} \\rfloor +1 $$\n# $$ n_W = \\lfloor \\frac{n_{W_{prev}} - f + 2 \\times pad}{stride} \\rfloor +1 $$\n# $$ n_C = \\text{number of filters used in the convolution}$$\n# \n# For this exercise, we won't worry about vectorization, and will just implement everything with for-loops.\n\n# In[24]:\n\n# GRADED FUNCTION: conv_forward\n\ndef conv_forward(A_prev, W, b, hparameters):\n \"\"\"\n Implements the forward propagation for a convolution function\n \n Arguments:\n A_prev -- output activations of the previous layer, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)\n b -- Biases, numpy array of shape (1, 1, 1, n_C)\n hparameters -- python dictionary containing \"stride\" and \"pad\"\n \n Returns:\n Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache of values needed for the conv_backward() function\n \"\"\"\n \n ### START CODE HERE ###\n # Retrieve dimensions from A_prev's shape (≈1 line) \n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n \n # Retrieve dimensions from W's shape (≈1 line)\n (f, f, n_C_prev, n_C) = W.shape\n \n # Retrieve information from \"hparameters\" (≈2 lines)\n stride = hparameters['stride']\n pad = hparameters['pad']\n \n # Compute the dimensions of the CONV output volume using the formula given above. Hint: use int() to floor. (≈2 lines)\n n_H = int((n_H_prev - f + 2 * pad) / stride) + 1\n n_W = int((n_W_prev - f + 2 * pad) / stride) + 1\n \n # Initialize the output volume Z with zeros. (≈1 line)\n Z = np.zeros((m, n_H, n_W, n_C))\n \n # Create A_prev_pad by padding A_prev\n A_prev_pad = zero_pad(A_prev, pad)\n \n for i in range(m): # loop over the batch of training examples\n a_prev_pad = A_prev_pad[i] # Select ith training example's padded activation\n for h in range(n_H): # loop over vertical axis of the output volume\n for w in range(n_W): # loop over horizontal axis of the output volume\n for c in range(n_C): # loop over channels (= #filters) of the output volume\n \n # Find the corners of the current \"slice\" (≈4 lines)\n vert_start = h * stride\n vert_end = vert_start + f\n horiz_start = w * stride\n horiz_end = horiz_start + f\n \n # Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (≈1 line)\n a_slice_prev = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]\n \n # Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (≈1 line)\n Z[i, h, w, c] = conv_single_step(a_slice_prev, W[...,c] , b[...,c])\n \n ### END CODE HERE ###\n \n # Making sure your output shape is correct\n assert(Z.shape == (m, n_H, n_W, n_C))\n \n # Save information in \"cache\" for the backprop\n cache = (A_prev, W, b, hparameters)\n \n return Z, cache\n\n\n# In[25]:\n\nnp.random.seed(1)\nA_prev = np.random.randn(10,4,4,3)\nW = np.random.randn(2,2,3,8)\nb = np.random.randn(1,1,1,8)\nhparameters = {\"pad\" : 2,\n \"stride\": 2}\n\nZ, cache_conv = conv_forward(A_prev, W, b, hparameters)\nprint(\"Z's mean =\", np.mean(Z))\nprint(\"Z[3,2,1] =\", Z[3,2,1])\nprint(\"cache_conv[0][1][2][3] =\", cache_conv[0][1][2][3])\n\n\n# **Expected Output**:\n# \n# <table>\n# <tr>\n# <td>\n# **Z's mean**\n# </td>\n# <td>\n# 0.0489952035289\n# </td>\n# </tr>\n# <tr>\n# <td>\n# **Z[3,2,1]**\n# </td>\n# <td>\n# [-0.61490741 -6.7439236 -2.55153897 1.75698377 3.56208902 0.53036437\n# 5.18531798 8.75898442]\n# </td>\n# </tr>\n# <tr>\n# <td>\n# **cache_conv[0][1][2][3]**\n# </td>\n# <td>\n# [-0.20075807 0.18656139 0.41005165]\n# </td>\n# </tr>\n# \n# </table>\n# \n\n# Finally, CONV layer should also contain an activation, in which case we would add the following line of code:\n# \n# ```python\n# # Convolve the window to get back one output neuron\n# Z[i, h, w, c] = ...\n# # Apply activation\n# A[i, h, w, c] = activation(Z[i, h, w, c])\n# ```\n# \n# You don't need to do it here. \n# \n\n# ## 4 - Pooling layer \n# \n# The pooling (POOL) layer reduces the height and width of the input. It helps reduce computation, as well as helps make feature detectors more invariant to its position in the input. The two types of pooling layers are: \n# \n# - Max-pooling layer: slides an ($f, f$) window over the input and stores the max value of the window in the output.\n# \n# - Average-pooling layer: slides an ($f, f$) window over the input and stores the average value of the window in the output.\n# \n# <table>\n# <td>\n# <img src=\"images/max_pool1.png\" style=\"width:500px;height:300px;\">\n# <td>\n# \n# <td>\n# <img src=\"images/a_pool.png\" style=\"width:500px;height:300px;\">\n# <td>\n# </table>\n# \n# These pooling layers have no parameters for backpropagation to train. However, they have hyperparameters such as the window size $f$. This specifies the height and width of the fxf window you would compute a max or average over. \n# \n# ### 4.1 - Forward Pooling\n# Now, you are going to implement MAX-POOL and AVG-POOL, in the same function. \n# \n# **Exercise**: Implement the forward pass of the pooling layer. Follow the hints in the comments below.\n# \n# **Reminder**:\n# As there's no padding, the formulas binding the output shape of the pooling to the input shape is:\n# $$ n_H = \\lfloor \\frac{n_{H_{prev}} - f}{stride} \\rfloor +1 $$\n# $$ n_W = \\lfloor \\frac{n_{W_{prev}} - f}{stride} \\rfloor +1 $$\n# $$ n_C = n_{C_{prev}}$$\n\n# In[26]:\n\n# GRADED FUNCTION: pool_forward\n\ndef pool_forward(A_prev, hparameters, mode = \"max\"):\n \"\"\"\n Implements the forward pass of the pooling layer\n \n Arguments:\n A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n hparameters -- python dictionary containing \"f\" and \"stride\"\n mode -- the pooling mode you would like to use, defined as a string (\"max\" or \"average\")\n \n Returns:\n A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters \n \"\"\"\n \n # Retrieve dimensions from the input shape\n (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape\n \n # Retrieve hyperparameters from \"hparameters\"\n f = hparameters[\"f\"]\n stride = hparameters[\"stride\"]\n \n # Define the dimensions of the output\n n_H = int(1 + (n_H_prev - f) / stride)\n n_W = int(1 + (n_W_prev - f) / stride)\n n_C = n_C_prev\n \n # Initialize output matrix A\n A = np.zeros((m, n_H, n_W, n_C)) \n \n ### START CODE HERE ###\n for i in range(m): # loop over the training examples\n for h in range(n_H): # loop on the vertical axis of the output volume\n for w in range(n_W): # loop on the horizontal axis of the output volume\n for c in range (n_C): # loop over the channels of the output volume\n \n # Find the corners of the current \"slice\" (≈4 lines)\n vert_start = h * stride\n vert_end = vert_start + f\n horiz_start = w * stride\n horiz_end = horiz_start + f\n \n # Use the corners to define the current slice on the ith training example of A_prev, channel c. (≈1 line)\n a_prev_slice = A_prev[i, vert_start:vert_end, horiz_start:horiz_end, c]\n \n # Compute the pooling operation on the slice. Use an if statment to differentiate the modes. Use np.max/np.mean.\n if mode == \"max\":\n A[i, h, w, c] = np.max(a_prev_slice)\n elif mode == \"average\":\n A[i, h, w, c] = np.mean(a_prev_slice)\n \n ### END CODE HERE ###\n \n # Store the input and hparameters in \"cache\" for pool_backward()\n cache = (A_prev, hparameters)\n \n # Making sure your output shape is correct\n assert(A.shape == (m, n_H, n_W, n_C))\n \n return A, cache\n\n\n# In[27]:\n\nnp.random.seed(1)\nA_prev = np.random.randn(2, 4, 4, 3)\nhparameters = {\"stride\" : 2, \"f\": 3}\n\nA, cache = pool_forward(A_prev, hparameters)\nprint(\"mode = max\")\nprint(\"A =\", A)\nprint()\nA, cache = pool_forward(A_prev, hparameters, mode = \"average\")\nprint(\"mode = average\")\nprint(\"A =\", A)\n\n\n# **Expected Output:**\n# <table>\n# \n# <tr>\n# <td>\n# A =\n# </td>\n# <td>\n# [[[[ 1.74481176 0.86540763 1.13376944]]]\n# \n# \n# [[[ 1.13162939 1.51981682 2.18557541]]]]\n# \n# </td>\n# </tr>\n# <tr>\n# <td>\n# A =\n# </td>\n# <td>\n# [[[[ 0.02105773 -0.20328806 -0.40389855]]]\n# \n# \n# [[[-0.22154621 0.51716526 0.48155844]]]]\n# \n# </td>\n# </tr>\n# \n# </table>\n# \n\n# Congratulations! You have now implemented the forward passes of all the layers of a convolutional network. \n# \n# The remainer of this notebook is optional, and will not be graded.\n# \n\n# ## 5 - Backpropagation in convolutional neural networks (OPTIONAL / UNGRADED)\n# \n# In modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers don't need to bother with the details of the backward pass. The backward pass for convolutional networks is complicated. If you wish however, you can work through this optional portion of the notebook to get a sense of what backprop in a convolutional network looks like. \n# \n# When in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in convolutional neural networks you can to calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are not trivial and we did not derive them in lecture, but we briefly presented them below.\n# \n# ### 5.1 - Convolutional layer backward pass \n# \n# Let's start by implementing the backward pass for a CONV layer. \n# \n# #### 5.1.1 - Computing dA:\n# This is the formula for computing $dA$ with respect to the cost for a certain filter $W_c$ and a given training example:\n# \n# $$ dA += \\sum _{h=0} ^{n_H} \\sum_{w=0} ^{n_W} W_c \\times dZ_{hw} \\tag{1}$$\n# \n# Where $W_c$ is a filter and $dZ_{hw}$ is a scalar corresponding to the gradient of the cost with respect to the output of the conv layer Z at the hth row and wth column (corresponding to the dot product taken at the ith stride left and jth stride down). Note that at each time, we multiply the the same filter $W_c$ by a different dZ when updating dA. We do so mainly because when computing the forward propagation, each filter is dotted and summed by a different a_slice. Therefore when computing the backprop for dA, we are just adding the gradients of all the a_slices. \n# \n# In code, inside the appropriate for-loops, this formula translates into:\n# ```python\n# da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]\n# ```\n# \n# #### 5.1.2 - Computing dW:\n# This is the formula for computing $dW_c$ ($dW_c$ is the derivative of one filter) with respect to the loss:\n# \n# $$ dW_c += \\sum _{h=0} ^{n_H} \\sum_{w=0} ^ {n_W} a_{slice} \\times dZ_{hw} \\tag{2}$$\n# \n# Where $a_{slice}$ corresponds to the slice which was used to generate the acitivation $Z_{ij}$. Hence, this ends up giving us the gradient for $W$ with respect to that slice. Since it is the same $W$, we will just add up all such gradients to get $dW$. \n# \n# In code, inside the appropriate for-loops, this formula translates into:\n# ```python\n# dW[:,:,:,c] += a_slice * dZ[i, h, w, c]\n# ```\n# \n# #### 5.1.3 - Computing db:\n# \n# This is the formula for computing $db$ with respect to the cost for a certain filter $W_c$:\n# \n# $$ db = \\sum_h \\sum_w dZ_{hw} \\tag{3}$$\n# \n# As you have previously seen in basic neural networks, db is computed by summing $dZ$. In this case, you are just summing over all the gradients of the conv output (Z) with respect to the cost. \n# \n# In code, inside the appropriate for-loops, this formula translates into:\n# ```python\n# db[:,:,:,c] += dZ[i, h, w, c]\n# ```\n# \n# **Exercise**: Implement the `conv_backward` function below. You should sum over all the training examples, filters, heights, and widths. You should then compute the derivatives using formulas 1, 2 and 3 above. \n\n# In[ ]:\n\ndef conv_backward(dZ, cache):\n \"\"\"\n Implement the backward propagation for a convolution function\n \n Arguments:\n dZ -- gradient of the cost with respect to the output of the conv layer (Z), numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache of values needed for the conv_backward(), output of conv_forward()\n \n Returns:\n dA_prev -- gradient of the cost with respect to the input of the conv layer (A_prev),\n numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n dW -- gradient of the cost with respect to the weights of the conv layer (W)\n numpy array of shape (f, f, n_C_prev, n_C)\n db -- gradient of the cost with respect to the biases of the conv layer (b)\n numpy array of shape (1, 1, 1, n_C)\n \"\"\"\n \n ### START CODE HERE ###\n # Retrieve information from \"cache\"\n (A_prev, W, b, hparameters) = None\n \n # Retrieve dimensions from A_prev's shape\n (m, n_H_prev, n_W_prev, n_C_prev) = None\n \n # Retrieve dimensions from W's shape\n (f, f, n_C_prev, n_C) = None\n \n # Retrieve information from \"hparameters\"\n stride = None\n pad = None\n \n # Retrieve dimensions from dZ's shape\n (m, n_H, n_W, n_C) = None\n \n # Initialize dA_prev, dW, db with the correct shapes\n dA_prev = None \n dW = None\n db = None\n\n # Pad A_prev and dA_prev\n A_prev_pad = None\n dA_prev_pad = None\n \n for i in range(None): # loop over the training examples\n \n # select ith training example from A_prev_pad and dA_prev_pad\n a_prev_pad = None\n da_prev_pad = None\n \n for h in range(None): # loop over vertical axis of the output volume\n for w in range(None): # loop over horizontal axis of the output volume\n for c in range(None): # loop over the channels of the output volume\n \n # Find the corners of the current \"slice\"\n vert_start = None\n vert_end = None\n horiz_start = None\n horiz_end = None\n \n # Use the corners to define the slice from a_prev_pad\n a_slice = None\n\n # Update gradients for the window and the filter's parameters using the code formulas given above\n da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += None\n dW[:,:,:,c] += None\n db[:,:,:,c] += None\n \n # Set the ith training example's dA_prev to the unpaded da_prev_pad (Hint: use X[pad:-pad, pad:-pad, :])\n dA_prev[i, :, :, :] = None\n ### END CODE HERE ###\n \n # Making sure your output shape is correct\n assert(dA_prev.shape == (m, n_H_prev, n_W_prev, n_C_prev))\n \n return dA_prev, dW, db\n\n\n# In[ ]:\n\nnp.random.seed(1)\ndA, dW, db = conv_backward(Z, cache_conv)\nprint(\"dA_mean =\", np.mean(dA))\nprint(\"dW_mean =\", np.mean(dW))\nprint(\"db_mean =\", np.mean(db))\n\n\n# ** Expected Output: **\n# <table>\n# <tr>\n# <td>\n# **dA_mean**\n# </td>\n# <td>\n# 1.45243777754\n# </td>\n# </tr>\n# <tr>\n# <td>\n# **dW_mean**\n# </td>\n# <td>\n# 1.72699145831\n# </td>\n# </tr>\n# <tr>\n# <td>\n# **db_mean**\n# </td>\n# <td>\n# 7.83923256462\n# </td>\n# </tr>\n# \n# </table>\n# \n\n# ## 5.2 Pooling layer - backward pass\n# \n# Next, let's implement the backward pass for the pooling layer, starting with the MAX-POOL layer. Even though a pooling layer has no parameters for backprop to update, you still need to backpropagation the gradient through the pooling layer in order to compute gradients for layers that came before the pooling layer. \n# \n# ### 5.2.1 Max pooling - backward pass \n# \n# Before jumping into the backpropagation of the pooling layer, you are going to build a helper function called `create_mask_from_window()` which does the following: \n# \n# $$ X = \\begin{bmatrix}\n# 1 && 3 \\\\\n# 4 && 2\n# \\end{bmatrix} \\quad \\rightarrow \\quad M =\\begin{bmatrix}\n# 0 && 0 \\\\\n# 1 && 0\n# \\end{bmatrix}\\tag{4}$$\n# \n# As you can see, this function creates a \"mask\" matrix which keeps track of where the maximum of the matrix is. True (1) indicates the position of the maximum in X, the other entries are False (0). You'll see later that the backward pass for average pooling will be similar to this but using a different mask. \n# \n# **Exercise**: Implement `create_mask_from_window()`. This function will be helpful for pooling backward. \n# Hints:\n# - [np.max()]() may be helpful. It computes the maximum of an array.\n# - If you have a matrix X and a scalar x: `A = (X == x)` will return a matrix A of the same size as X such that:\n# ```\n# A[i,j] = True if X[i,j] = x\n# A[i,j] = False if X[i,j] != x\n# ```\n# - Here, you don't need to consider cases where there are several maxima in a matrix.\n\n# In[ ]:\n\ndef create_mask_from_window(x):\n \"\"\"\n Creates a mask from an input matrix x, to identify the max entry of x.\n \n Arguments:\n x -- Array of shape (f, f)\n \n Returns:\n mask -- Array of the same shape as window, contains a True at the position corresponding to the max entry of x.\n \"\"\"\n \n ### START CODE HERE ### (≈1 line)\n mask = None\n ### END CODE HERE ###\n \n return mask\n\n\n# In[ ]:\n\nnp.random.seed(1)\nx = np.random.randn(2,3)\nmask = create_mask_from_window(x)\nprint('x = ', x)\nprint(\"mask = \", mask)\n\n\n# **Expected Output:** \n# \n# <table> \n# <tr> \n# <td>\n# \n# **x =**\n# </td>\n# \n# <td>\n# \n# [[ 1.62434536 -0.61175641 -0.52817175] <br>\n# [-1.07296862 0.86540763 -2.3015387 ]]\n# \n# </td>\n# </tr>\n# \n# <tr> \n# <td>\n# **mask =**\n# </td>\n# <td>\n# [[ True False False] <br>\n# [False False False]]\n# </td>\n# </tr>\n# \n# \n# </table>\n\n# Why do we keep track of the position of the max? It's because this is the input value that ultimately influenced the output, and therefore the cost. Backprop is computing gradients with respect to the cost, so anything that influences the ultimate cost should have a non-zero gradient. So, backprop will \"propagate\" the gradient back to this particular input value that had influenced the cost. \n\n# ### 5.2.2 - Average pooling - backward pass \n# \n# In max pooling, for each input window, all the \"influence\" on the output came from a single input value--the max. In average pooling, every element of the input window has equal influence on the output. So to implement backprop, you will now implement a helper function that reflects this.\n# \n# For example if we did average pooling in the forward pass using a 2x2 filter, then the mask you'll use for the backward pass will look like: \n# $$ dZ = 1 \\quad \\rightarrow \\quad dZ =\\begin{bmatrix}\n# 1/4 && 1/4 \\\\\n# 1/4 && 1/4\n# \\end{bmatrix}\\tag{5}$$\n# \n# This implies that each position in the $dZ$ matrix contributes equally to output because in the forward pass, we took an average. \n# \n# **Exercise**: Implement the function below to equally distribute a value dz through a matrix of dimension shape. [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ones.html)\n\n# In[ ]:\n\ndef distribute_value(dz, shape):\n \"\"\"\n Distributes the input value in the matrix of dimension shape\n \n Arguments:\n dz -- input scalar\n shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz\n \n Returns:\n a -- Array of size (n_H, n_W) for which we distributed the value of dz\n \"\"\"\n \n ### START CODE HERE ###\n # Retrieve dimensions from shape (≈1 line)\n (n_H, n_W) = None\n \n # Compute the value to distribute on the matrix (≈1 line)\n average = None\n \n # Create a matrix where every entry is the \"average\" value (≈1 line)\n a = None\n ### END CODE HERE ###\n \n return a\n\n\n# In[ ]:\n\na = distribute_value(2, (2,2))\nprint('distributed value =', a)\n\n\n# **Expected Output**: \n# \n# <table> \n# <tr> \n# <td>\n# distributed_value =\n# </td>\n# <td>\n# [[ 0.5 0.5]\n# <br\\> \n# [ 0.5 0.5]]\n# </td>\n# </tr>\n# </table>\n\n# ### 5.2.3 Putting it together: Pooling backward \n# \n# You now have everything you need to compute backward propagation on a pooling layer.\n# \n# **Exercise**: Implement the `pool_backward` function in both modes (`\"max\"` and `\"average\"`). You will once again use 4 for-loops (iterating over training examples, height, width, and channels). You should use an `if/elif` statement to see if the mode is equal to `'max'` or `'average'`. If it is equal to 'average' you should use the `distribute_value()` function you implemented above to create a matrix of the same shape as `a_slice`. Otherwise, the mode is equal to '`max`', and you will create a mask with `create_mask_from_window()` and multiply it by the corresponding value of dZ.\n\n# In[ ]:\n\ndef pool_backward(dA, cache, mode = \"max\"):\n \"\"\"\n Implements the backward pass of the pooling layer\n \n Arguments:\n dA -- gradient of cost with respect to the output of the pooling layer, same shape as A\n cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters \n mode -- the pooling mode you would like to use, defined as a string (\"max\" or \"average\")\n \n Returns:\n dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev\n \"\"\"\n \n ### START CODE HERE ###\n \n # Retrieve information from cache (≈1 line)\n (A_prev, hparameters) = None\n \n # Retrieve hyperparameters from \"hparameters\" (≈2 lines)\n stride = None\n f = None\n \n # Retrieve dimensions from A_prev's shape and dA's shape (≈2 lines)\n m, n_H_prev, n_W_prev, n_C_prev = None\n m, n_H, n_W, n_C = None\n \n # Initialize dA_prev with zeros (≈1 line)\n dA_prev = None\n \n for i in range(None): # loop over the training examples\n \n # select training example from A_prev (≈1 line)\n a_prev = None\n \n for h in range(None): # loop on the vertical axis\n for w in range(None): # loop on the horizontal axis\n for c in range(None): # loop over the channels (depth)\n \n # Find the corners of the current \"slice\" (≈4 lines)\n vert_start = None\n vert_end = None\n horiz_start = None\n horiz_end = None\n \n # Compute the backward propagation in both modes.\n if mode == \"max\":\n \n # Use the corners and \"c\" to define the current slice from a_prev (≈1 line)\n a_prev_slice = None\n # Create the mask from a_prev_slice (≈1 line)\n mask = None\n # Set dA_prev to be dA_prev + (the mask multiplied by the correct entry of dA) (≈1 line)\n dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += None\n \n elif mode == \"average\":\n \n # Get the value a from dA (≈1 line)\n da = None\n # Define the shape of the filter as fxf (≈1 line)\n shape = None\n # Distribute it to get the correct slice of dA_prev. i.e. Add the distributed value of da. (≈1 line)\n dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += None\n \n ### END CODE ###\n \n # Making sure your output shape is correct\n assert(dA_prev.shape == A_prev.shape)\n \n return dA_prev\n\n\n# In[ ]:\n\nnp.random.seed(1)\nA_prev = np.random.randn(5, 5, 3, 2)\nhparameters = {\"stride\" : 1, \"f\": 2}\nA, cache = pool_forward(A_prev, hparameters)\ndA = np.random.randn(5, 4, 2, 2)\n\ndA_prev = pool_backward(dA, cache, mode = \"max\")\nprint(\"mode = max\")\nprint('mean of dA = ', np.mean(dA))\nprint('dA_prev[1,1] = ', dA_prev[1,1]) \nprint()\ndA_prev = pool_backward(dA, cache, mode = \"average\")\nprint(\"mode = average\")\nprint('mean of dA = ', np.mean(dA))\nprint('dA_prev[1,1] = ', dA_prev[1,1]) \n\n\n# **Expected Output**: \n# \n# mode = max:\n# <table> \n# <tr> \n# <td>\n# \n# **mean of dA =**\n# </td>\n# \n# <td>\n# \n# 0.145713902729\n# \n# </td>\n# </tr>\n# \n# <tr> \n# <td>\n# **dA_prev[1,1] =** \n# </td>\n# <td>\n# [[ 0. 0. ] <br>\n# [ 5.05844394 -1.68282702] <br>\n# [ 0. 0. ]]\n# </td>\n# </tr>\n# </table>\n# \n# mode = average\n# <table> \n# <tr> \n# <td>\n# \n# **mean of dA =**\n# </td>\n# \n# <td>\n# \n# 0.145713902729\n# \n# </td>\n# </tr>\n# \n# <tr> \n# <td>\n# **dA_prev[1,1] =** \n# </td>\n# <td>\n# [[ 0.08485462 0.2787552 ] <br>\n# [ 1.26461098 -0.25749373] <br>\n# [ 1.17975636 -0.53624893]]\n# </td>\n# </tr>\n# </table>\n\n# ### Congratulations !\n# \n# Congratulation on completing this assignment. You now understand how convolutional neural networks work. You have implemented all the building blocks of a neural network. In the next assignment you will implement a ConvNet using TensorFlow.\n" ]
[ [ "numpy.random.seed", "matplotlib.pyplot.xlabel", "numpy.random.permutation", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "matplotlib.pyplot.gca" ], [ "numpy.max", "numpy.pad", "numpy.zeros", "numpy.random.seed", "numpy.sum", "numpy.random.randn", "matplotlib.pyplot.subplots", "numpy.mean", "numpy.multiply" ] ]
0000marcosg/Person_remover
[ "a3e9ba198b3bdc6645e53e0d652430534c362442" ]
[ "yolo/yolov3/models.py" ]
[ "import numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import (\n Add,\n Concatenate,\n Conv2D,\n Input,\n Lambda,\n LeakyReLU,\n MaxPool2D,\n UpSampling2D,\n ZeroPadding2D,\n)\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.losses import (\n binary_crossentropy,\n sparse_categorical_crossentropy\n)\nfrom .batch_norm import BatchNormalization\nfrom .utils import broadcast_iou\n\nyolo_anchors = np.array([(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),\n (59, 119), (116, 90), (156, 198), (373, 326)],\n np.float32) / 416\nyolo_anchor_masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])\n\nyolo_tiny_anchors = np.array([(10, 14), (23, 27), (37, 58),\n (81, 82), (135, 169), (344, 319)],\n np.float32) / 416\nyolo_tiny_anchor_masks = np.array([[3, 4, 5], [0, 1, 2]])\n\n\ndef DarknetConv(x, filters, size, strides=1, batch_norm=True):\n if strides == 1:\n padding = 'same'\n else:\n x = ZeroPadding2D(((1, 0), (1, 0)))(x) # top left half-padding\n padding = 'valid'\n x = Conv2D(filters=filters, kernel_size=size,\n strides=strides, padding=padding,\n use_bias=not batch_norm, kernel_regularizer=l2(0.0005))(x)\n if batch_norm:\n x = BatchNormalization()(x)\n x = LeakyReLU(alpha=0.1)(x)\n return x\n\n\ndef DarknetResidual(x, filters):\n prev = x\n x = DarknetConv(x, filters // 2, 1)\n x = DarknetConv(x, filters, 3)\n x = Add()([prev, x])\n return x\n\n\ndef DarknetBlock(x, filters, blocks):\n x = DarknetConv(x, filters, 3, strides=2)\n for _ in range(blocks):\n x = DarknetResidual(x, filters)\n return x\n\n\ndef Darknet(name=None):\n x = inputs = Input([None, None, 3])\n x = DarknetConv(x, 32, 3)\n x = DarknetBlock(x, 64, 1)\n x = DarknetBlock(x, 128, 2) # skip connection\n x = x_36 = DarknetBlock(x, 256, 8) # skip connection\n x = x_61 = DarknetBlock(x, 512, 8)\n x = DarknetBlock(x, 1024, 4)\n return tf.keras.Model(inputs, (x_36, x_61, x), name=name)\n\n\ndef DarknetTiny(name=None):\n x = inputs = Input([None, None, 3])\n x = DarknetConv(x, 16, 3)\n x = MaxPool2D(2, 2, 'same')(x)\n x = DarknetConv(x, 32, 3)\n x = MaxPool2D(2, 2, 'same')(x)\n x = DarknetConv(x, 64, 3)\n x = MaxPool2D(2, 2, 'same')(x)\n x = DarknetConv(x, 128, 3)\n x = MaxPool2D(2, 2, 'same')(x)\n x = x_8 = DarknetConv(x, 256, 3) # skip connection\n x = MaxPool2D(2, 2, 'same')(x)\n x = DarknetConv(x, 512, 3)\n x = MaxPool2D(2, 1, 'same')(x)\n x = DarknetConv(x, 1024, 3)\n return tf.keras.Model(inputs, (x_8, x), name=name)\n\n\ndef YoloConv(filters, name=None):\n def yolo_conv(x_in):\n if isinstance(x_in, tuple):\n inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])\n x, x_skip = inputs\n\n # concat with skip connection\n x = DarknetConv(x, filters, 1)\n x = UpSampling2D(2)(x)\n x = Concatenate()([x, x_skip])\n else:\n x = inputs = Input(x_in.shape[1:])\n\n x = DarknetConv(x, filters, 1)\n x = DarknetConv(x, filters * 2, 3)\n x = DarknetConv(x, filters, 1)\n x = DarknetConv(x, filters * 2, 3)\n x = DarknetConv(x, filters, 1)\n return Model(inputs, x, name=name)(x_in)\n return yolo_conv\n\n\ndef YoloConvTiny(filters, name=None):\n def yolo_conv(x_in):\n if isinstance(x_in, tuple):\n inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])\n x, x_skip = inputs\n\n # concat with skip connection\n x = DarknetConv(x, filters, 1)\n x = UpSampling2D(2)(x)\n x = Concatenate()([x, x_skip])\n else:\n x = inputs = Input(x_in.shape[1:])\n x = DarknetConv(x, filters, 1)\n\n return Model(inputs, x, name=name)(x_in)\n return yolo_conv\n\n\ndef YoloOutput(filters, anchors, classes, name=None):\n def yolo_output(x_in):\n x = inputs = Input(x_in.shape[1:])\n x = DarknetConv(x, filters * 2, 3)\n x = DarknetConv(x, anchors * (classes + 5), 1, batch_norm=False)\n x = Lambda(lambda x: tf.reshape(x, (-1, tf.shape(x)[1], tf.shape(x)[2],\n anchors, classes + 5)))(x)\n return tf.keras.Model(inputs, x, name=name)(x_in)\n return yolo_output\n\n\ndef yolo_boxes(pred, anchors, classes):\n # pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))\n grid_size = tf.shape(pred)[1]\n box_xy, box_wh, objectness, class_probs = tf.split(\n pred, (2, 2, 1, classes), axis=-1)\n\n box_xy = tf.sigmoid(box_xy)\n objectness = tf.sigmoid(objectness)\n class_probs = tf.sigmoid(class_probs)\n pred_box = tf.concat((box_xy, box_wh), axis=-1) # original xywh for loss\n\n # !!! grid[x][y] == (y, x)\n grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))\n grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]\n\n box_xy = (box_xy + tf.cast(grid, tf.float32)) / \\\n tf.cast(grid_size, tf.float32)\n box_wh = tf.exp(box_wh) * anchors\n\n box_x1y1 = box_xy - box_wh / 2\n box_x2y2 = box_xy + box_wh / 2\n bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)\n\n return bbox, objectness, class_probs, pred_box\n\n\ndef yolo_nms(outputs, anchors, masks, classes):\n # boxes, conf, type\n b, c, t = [], [], []\n\n for o in outputs:\n b.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))\n c.append(tf.reshape(o[1], (tf.shape(o[1])[0], -1, tf.shape(o[1])[-1])))\n t.append(tf.reshape(o[2], (tf.shape(o[2])[0], -1, tf.shape(o[2])[-1])))\n\n bbox = tf.concat(b, axis=1)\n confidence = tf.concat(c, axis=1)\n class_probs = tf.concat(t, axis=1)\n\n scores = confidence * class_probs\n boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(\n boxes=tf.reshape(bbox, (tf.shape(bbox)[0], -1, 1, 4)),\n scores=tf.reshape(\n scores, (tf.shape(scores)[0], -1, tf.shape(scores)[-1])),\n max_output_size_per_class=100,\n max_total_size=100,\n iou_threshold=0.5,\n score_threshold=0.5\n )\n\n return boxes, scores, classes, valid_detections\n\n\ndef YoloV3(size=None, channels=3, anchors=yolo_anchors,\n masks=yolo_anchor_masks, classes=80, training=False):\n x = inputs = Input([size, size, channels])\n\n x_36, x_61, x = Darknet(name='yolo_darknet')(x)\n\n x = YoloConv(512, name='yolo_conv_0')(x)\n output_0 = YoloOutput(512, len(masks[0]), classes, name='yolo_output_0')(x)\n\n x = YoloConv(256, name='yolo_conv_1')((x, x_61))\n output_1 = YoloOutput(256, len(masks[1]), classes, name='yolo_output_1')(x)\n\n x = YoloConv(128, name='yolo_conv_2')((x, x_36))\n output_2 = YoloOutput(128, len(masks[2]), classes, name='yolo_output_2')(x)\n\n if training:\n return Model(inputs, (output_0, output_1, output_2), name='yolov3')\n\n boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),\n name='yolo_boxes_0')(output_0)\n boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),\n name='yolo_boxes_1')(output_1)\n boxes_2 = Lambda(lambda x: yolo_boxes(x, anchors[masks[2]], classes),\n name='yolo_boxes_2')(output_2)\n\n outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),\n name='yolo_nms')((boxes_0[:3], boxes_1[:3], boxes_2[:3]))\n\n return Model(inputs, outputs, name='yolov3')\n\n\ndef YoloV3Tiny(size=None, channels=3, anchors=yolo_tiny_anchors,\n masks=yolo_tiny_anchor_masks, classes=80, training=False):\n x = inputs = Input([size, size, channels])\n\n x_8, x = DarknetTiny(name='yolo_darknet')(x)\n\n x = YoloConvTiny(256, name='yolo_conv_0')(x)\n output_0 = YoloOutput(256, len(masks[0]), classes, name='yolo_output_0')(x)\n\n x = YoloConvTiny(128, name='yolo_conv_1')((x, x_8))\n output_1 = YoloOutput(128, len(masks[1]), classes, name='yolo_output_1')(x)\n\n if training:\n return Model(inputs, (output_0, output_1), name='yolov3')\n\n boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),\n name='yolo_boxes_0')(output_0)\n boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),\n name='yolo_boxes_1')(output_1)\n outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),\n name='yolo_nms')((boxes_0[:3], boxes_1[:3]))\n return Model(inputs, outputs, name='yolov3_tiny')\n\n\ndef YoloLoss(anchors, classes=80, ignore_thresh=0.5):\n def yolo_loss(y_true, y_pred):\n # 1. transform all pred outputs\n # y_pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...cls))\n pred_box, pred_obj, pred_class, pred_xywh = yolo_boxes(\n y_pred, anchors, classes)\n pred_xy = pred_xywh[..., 0:2]\n pred_wh = pred_xywh[..., 2:4]\n\n # 2. transform all true outputs\n # y_true: (batch_size, grid, grid, anchors, (x1, y1, x2, y2, obj, cls))\n true_box, true_obj, true_class_idx = tf.split(\n y_true, (4, 1, 1), axis=-1)\n true_xy = (true_box[..., 0:2] + true_box[..., 2:4]) / 2\n true_wh = true_box[..., 2:4] - true_box[..., 0:2]\n\n # give higher weights to small boxes\n box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]\n\n # 3. inverting the pred box equations\n grid_size = tf.shape(y_true)[1]\n grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))\n grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)\n true_xy = true_xy * tf.cast(grid_size, tf.float32) - \\\n tf.cast(grid, tf.float32)\n true_wh = tf.math.log(true_wh / anchors)\n true_wh = tf.where(tf.math.is_inf(true_wh),\n tf.zeros_like(true_wh), true_wh)\n\n # 4. calculate all masks\n obj_mask = tf.squeeze(true_obj, -1)\n # ignore false positive when iou is over threshold\n true_box_flat = tf.boolean_mask(true_box, tf.cast(obj_mask, tf.bool))\n best_iou = tf.reduce_max(broadcast_iou(\n pred_box, true_box_flat), axis=-1)\n ignore_mask = tf.cast(best_iou < ignore_thresh, tf.float32)\n\n # 5. calculate all losses\n xy_loss = obj_mask * box_loss_scale * \\\n tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)\n wh_loss = obj_mask * box_loss_scale * \\\n tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)\n obj_loss = binary_crossentropy(true_obj, pred_obj)\n obj_loss = obj_mask * obj_loss + \\\n (1 - obj_mask) * ignore_mask * obj_loss\n # TODO: use binary_crossentropy instead\n class_loss = obj_mask * sparse_categorical_crossentropy(\n true_class_idx, pred_class)\n\n # 6. sum over (batch, gridx, gridy, anchors) => (batch, 1)\n xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))\n wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))\n obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3))\n class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))\n\n return xy_loss + wh_loss + obj_loss + class_loss\n return yolo_loss\n" ]
[ [ "tensorflow.exp", "tensorflow.keras.layers.Add", "tensorflow.keras.Model", "tensorflow.zeros_like", "tensorflow.stack", "tensorflow.cast", "tensorflow.shape", "tensorflow.concat", "tensorflow.sigmoid", "tensorflow.math.log", "tensorflow.keras.layers.LeakyReLU", "tensorflow.keras.losses.sparse_categorical_crossentropy", "tensorflow.squeeze", "tensorflow.split", "tensorflow.keras.layers.Concatenate", "numpy.array", "tensorflow.range", "tensorflow.keras.layers.UpSampling2D", "tensorflow.keras.layers.MaxPool2D", "tensorflow.reduce_sum", "tensorflow.keras.regularizers.l2", "tensorflow.keras.layers.ZeroPadding2D", "tensorflow.keras.losses.binary_crossentropy", "tensorflow.keras.layers.Input", "tensorflow.square", "tensorflow.math.is_inf" ] ]
gatarelib/PyTorchCV
[ "5191d0ddc5c42a4cc8dc5451aa14c263c2f3e77f" ]
[ "methods/det/single_shot_detector.py" ]
[ "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: Donny You ([email protected])\n# Class Definition for Single Shot Detector.\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\nimport torch\nimport torch.backends.cudnn as cudnn\n\nfrom datasets.det_data_loader import DetDataLoader\nfrom datasets.tools.data_transformer import DataTransformer\nfrom loss.det_loss_manager import DetLossManager\nfrom methods.det.single_shot_detector_test import SingleShotDetectorTest\nfrom methods.tools.module_utilizer import ModuleUtilizer\nfrom methods.tools.optim_scheduler import OptimScheduler\nfrom models.det_model_manager import DetModelManager\nfrom utils.layers.det.ssd_priorbox_layer import SSDPriorBoxLayer\nfrom utils.layers.det.ssd_target_generator import SSDTargetGenerator\nfrom utils.tools.average_meter import AverageMeter\nfrom utils.tools.logger import Logger as Log\nfrom val.scripts.det.det_running_score import DetRunningScore\nfrom vis.visualizer.det_visualizer import DetVisualizer\n\n\nclass SingleShotDetector(object):\n \"\"\"\n The class for Single Shot Detector. Include train, val, test & predict.\n \"\"\"\n def __init__(self, configer):\n self.configer = configer\n self.batch_time = AverageMeter()\n self.data_time = AverageMeter()\n self.train_losses = AverageMeter()\n self.val_losses = AverageMeter()\n self.det_visualizer = DetVisualizer(configer)\n self.det_loss_manager = DetLossManager(configer)\n self.det_model_manager = DetModelManager(configer)\n self.det_data_loader = DetDataLoader(configer)\n self.ssd_target_generator = SSDTargetGenerator(configer)\n self.ssd_priorbox_layer = SSDPriorBoxLayer(configer)\n self.det_running_score = DetRunningScore(configer)\n self.module_utilizer = ModuleUtilizer(configer)\n self.optim_scheduler = OptimScheduler(configer)\n self.data_transformer = DataTransformer(configer)\n\n self.det_net = None\n self.train_loader = None\n self.val_loader = None\n self.optimizer = None\n self.scheduler = None\n\n self._init_model()\n\n def _init_model(self):\n self.det_net = self.det_model_manager.object_detector()\n self.det_net = self.module_utilizer.load_net(self.det_net)\n\n self.optimizer, self.scheduler = self.optim_scheduler.init_optimizer(self._get_parameters())\n\n self.train_loader = self.det_data_loader.get_trainloader()\n self.val_loader = self.det_data_loader.get_valloader()\n\n self.det_loss = self.det_loss_manager.get_det_loss('ssd_multibox_loss')\n\n def _get_parameters(self):\n\n return self.det_net.parameters()\n\n def warm_lr(self, batch_len):\n \"\"\"Sets the learning rate\n # Adapted from PyTorch Imagenet example:\n # https://github.com/pytorch/examples/blob/master/imagenet/main.py\n \"\"\"\n warm_iters = self.configer.get('lr', 'warm')['warm_epoch'] * batch_len\n warm_lr = self.configer.get('lr', 'warm')['warm_lr']\n if self.configer.get('iters') < warm_iters:\n lr_delta = (self.configer.get('lr', 'base_lr') - warm_lr) * self.configer.get('iters') / warm_iters\n lr = warm_lr + lr_delta\n\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr\n\n def __train(self):\n \"\"\"\n Train function of every epoch during train phase.\n \"\"\"\n self.det_net.train()\n start_time = time.time()\n # Adjust the learning rate after every epoch.\n self.configer.plus_one('epoch')\n self.scheduler.step(self.configer.get('epoch'))\n\n # data_tuple: (inputs, heatmap, maskmap, vecmap)\n for i, data_dict in enumerate(self.train_loader):\n if not self.configer.is_empty('lr', 'is_warm') and self.configer.get('lr', 'is_warm'):\n self.warm_lr(len(self.train_loader))\n\n inputs = data_dict['img']\n batch_gt_bboxes = data_dict['bboxes']\n batch_gt_labels = data_dict['labels']\n # Change the data type.\n inputs = self.module_utilizer.to_device(inputs)\n\n self.data_time.update(time.time() - start_time)\n # Forward pass.\n feat_list, loc, cls = self.det_net(inputs)\n\n bboxes, labels = self.ssd_target_generator(feat_list, batch_gt_bboxes,\n batch_gt_labels, [inputs.size(3), inputs.size(2)])\n\n bboxes, labels = self.module_utilizer.to_device(bboxes, labels)\n # Compute the loss of the train batch & backward.\n loss = self.det_loss(loc, bboxes, cls, labels)\n\n self.train_losses.update(loss.item(), inputs.size(0))\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Update the vars of the train phase.\n self.batch_time.update(time.time() - start_time)\n start_time = time.time()\n self.configer.plus_one('iters')\n\n # Print the log info & reset the states.\n if self.configer.get('iters') % self.configer.get('solver', 'display_iter') == 0:\n Log.info('Train Epoch: {0}\\tTrain Iteration: {1}\\t'\n 'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\\t'\n 'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\\n'\n 'Learning rate = {3}\\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\\n'.format(\n self.configer.get('epoch'), self.configer.get('iters'),\n self.configer.get('solver', 'display_iter'),\n self.scheduler.get_lr(), batch_time=self.batch_time,\n data_time=self.data_time, loss=self.train_losses))\n self.batch_time.reset()\n self.data_time.reset()\n self.train_losses.reset()\n\n # Check to val the current model.\n if self.val_loader is not None and \\\n (self.configer.get('iters')) % self.configer.get('solver', 'test_interval') == 0:\n self.__val()\n\n def __val(self):\n \"\"\"\n Validation function during the train phase.\n \"\"\"\n self.det_net.eval()\n start_time = time.time()\n with torch.no_grad():\n for j, data_dict in enumerate(self.val_loader):\n inputs = data_dict['img']\n batch_gt_bboxes = data_dict['bboxes']\n batch_gt_labels = data_dict['labels']\n inputs = self.module_utilizer.to_device(inputs)\n input_size = [inputs.size(3), inputs.size(2)]\n # Forward pass.\n feat_list, loc, cls = self.det_net(inputs)\n bboxes, labels = self.ssd_target_generator(feat_list, batch_gt_bboxes,\n batch_gt_labels, input_size)\n\n bboxes, labels = self.module_utilizer.to_device(bboxes, labels)\n # Compute the loss of the val batch.\n loss = self.det_loss(loc, bboxes, cls, labels)\n self.val_losses.update(loss.item(), inputs.size(0))\n\n batch_detections = SingleShotDetectorTest.decode(loc, cls,\n self.ssd_priorbox_layer(feat_list, input_size),\n self.configer, input_size)\n batch_pred_bboxes = self.__get_object_list(batch_detections)\n # batch_pred_bboxes = self._get_gt_object_list(batch_gt_bboxes, batch_gt_labels)\n self.det_running_score.update(batch_pred_bboxes, batch_gt_bboxes, batch_gt_labels)\n\n # Update the vars of the val phase.\n self.batch_time.update(time.time() - start_time)\n start_time = time.time()\n\n self.module_utilizer.save_net(self.det_net, save_mode='iters')\n # Print the log info & reset the states.\n Log.info(\n 'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\\t'\n 'Loss {loss.avg:.8f}\\n'.format(\n batch_time=self.batch_time, loss=self.val_losses))\n Log.info('Val mAP: {}'.format(self.det_running_score.get_mAP()))\n self.det_running_score.reset()\n self.batch_time.reset()\n self.val_losses.reset()\n self.det_net.train()\n\n def _get_gt_object_list(self, batch_gt_bboxes, batch_gt_labels):\n batch_pred_bboxes = list()\n for i in range(len(batch_gt_bboxes)):\n object_list = list()\n if batch_gt_bboxes[i].numel() > 0:\n for j in range(batch_gt_bboxes[i].size(0)):\n object_list.append([batch_gt_bboxes[i][j][0].item(), batch_gt_bboxes[i][j][1].item(),\n batch_gt_bboxes[i][j][2].item(), batch_gt_bboxes[i][j][3].item(),\n batch_gt_labels[i][j].item(), 1.0])\n\n batch_pred_bboxes.append(object_list)\n return batch_pred_bboxes\n\n def __get_object_list(self, batch_detections):\n batch_pred_bboxes = list()\n for idx, detections in enumerate(batch_detections):\n object_list = list()\n if detections is not None:\n for x1, y1, x2, y2, conf, cls_pred in detections:\n xmin = x1.cpu().item()\n ymin = y1.cpu().item()\n xmax = x2.cpu().item()\n ymax = y2.cpu().item()\n cf = conf.cpu().item()\n cls_pred = cls_pred.cpu().item() - 1\n object_list.append([xmin, ymin, xmax, ymax, int(cls_pred), float('%.2f' % cf)])\n\n batch_pred_bboxes.append(object_list)\n\n return batch_pred_bboxes\n\n def train(self):\n cudnn.benchmark = True\n if self.configer.get('network', 'resume') is not None and self.configer.get('network', 'resume_val'):\n self.__val()\n\n while self.configer.get('epoch') < self.configer.get('solver', 'max_epoch'):\n self.__train()\n if self.configer.get('epoch') == self.configer.get('solver', 'max_epoch'):\n break\n\n\nif __name__ == \"__main__\":\n # Test class for pose estimator.\n pass\n" ]
[ [ "torch.no_grad" ] ]
JeffHeard/terrapyn
[ "8d883b6c8a729e972c12d6f0c3b2a34ea86dcd88" ]
[ "ows/tasks.py" ]
[ "#!/usr/bin/python\n\nfrom celery.task import Task\nfrom celery.task.sets import subtask\nfrom osgeo import gdal\n\nfrom terrapyn.ows.views import common\n\n\ntry:\n import cairo\n have_cairo = True\nexcept ImportError:\n have_cairo = False\n\ntry:\n import scipy\n have_scipy = True\nexcept ImportError:\n have_scipy = False\n\nimport tempfile\n\nclass DeferredRenderer(Task):\n \"\"\"A deferred renderer abstract class that allows a map provider to use Celery to defer some of the rendering over\n a wide cluster. There are two basic ways to use a Deferred Renderer:\n\n * As a slave of a WMS instance, by assigning :const:task to the WMS view subclass.\n * As a way to pre-cache rendered maps.\n * As an independent renderer for pre-caching or rendering out tiles.\n\n For the first case, something like this works well. In views.py::\n\n class CensusCountyWMSView(WMS):\n title = '2010 TigerLINE Census Counties'\n adapter = GeoDjangoWMSAdapter(CensusCounty, styles = {\n 'default' : default_county_styler\n })\n\n class CensusCountyDeferredWMSView(CensusCountyWMSView):\n task = census_county_renderer\n\n In tasks.py::\n\n @task\n class CountyDeferredRenderer(DeferredRenderer):\n adapter = GeoDjangoWMSAdapter(CensusCounty, styles = {\n 'default' : default_county_styler\n })\n census_county_renderer = registry.tasks[CountyDeferredRenderer.name]\n\n For the second case, you call the DeferredRenderer with cache_only=True and GetMapMixin.Parameters' cleaned data::\n\n for parms in parms_generator:\n census_county_renderer.delay(parms, cache_only=True)\n\n For the third case, you call with a Celery task as callback::\n\n for parms in pyramid_generator.parameter_sequence:\n census_county_renderer.delay(parms, callback=pyramid_generator.task)\n\n In all cases you **must** derive a new class from DeferredRenderer. This is necessary because the renderer shares\n the same WMSAdapter code as the WMS instance. This insures that you have exactly the same map tiles whether you\n render them in a distributed or thread-local fashion.\n \"\"\"\n abstract=True\n\n #: A WMSAdapterBase subclass instance to render your map.\n adapter=None\n\n def run(self, parms, callback=None, cache_only=False):\n \"\"\"\n :param parms: A dict containing the parameters in :class:ga_ows.views.wms.WMSAdapterBase\n :param callback: A Celery subtask, optional, that takes the place of simply returning the rendered data.\n :param cache_only: If true, return no result and only use this task to cache the data calculated. Useful for pre-calculating tiles.\n :return: A binary stream containing data formatted in a particular file format, such as JPEG, GeoTIFF... anything GDAL can write.\n \"\"\"\n if parms['format'].startswith('image/'):\n format = parms['format'][len('image/'):]\n else:\n format = parms['format']\n\n filter = None\n if parms['filter']:\n filter = json.loads(parms['filter'])\n\n ds = self.adapter.get_2d_dataset(\n layers=parms['layers'],\n srs=parms['srs'],\n bbox=parms['bbox'],\n width=parms['width'],\n height=parms['height'],\n styles=parms['styles'],\n bgcolor=parms['bgcolor'],\n transparent=parms['transparent'],\n time=parms['time'],\n elevation=parms['elevation'],\n v=parms['v'],\n filter = filter\n )\n\n tmp = None\n ret = None\n if not isinstance(ds, gdal.Dataset): # then it == a Cairo imagesurface or numpy array, or at least... it'd BETTER be\n if have_cairo and isinstance(ds,cairo.Surface):\n tmp = tempfile.NamedTemporaryFile(suffix='.png')\n ds.write_to_png(tmp.name)\n ds = gdal.Open(tmp.name)\n # TODO add all the appropriate metadata from the request into the dataset if this == being returned as a GeoTIFF\n elif isinstance(ds, file):\n ret = ds\n elif isinstance(ds, StringIO):\n ret = ds\n elif have_scipy:\n tmp = tempfile.NamedTemporaryFile(suffix='.tif')\n scipy.misc.imsave(tmp.name, ds)\n ds = gdal.Open(tmp.name)\n # TODO add all the appropriate metadata from the request into the dataset if this == being returned as a GeoTIFF\n \n if ret:\n return ret\n\n if format == 'tiff' or format == 'geotiff':\n driver = gdal.GetDriverByName('GTiff')\n elif format == 'jpg' or format == 'jpeg':\n driver = gdal.GetDriverByName('jpeg')\n elif format == 'jp2k' or format == 'jpeg2000':\n tmp = tempfile.NamedTemporaryFile(suffix='.jp2')\n driver = gdal.GetDriverByName('jpeg2000')\n else:\n driver = gdal.GetDriverByName(format.encode('ascii'))\n try:\n tmp = tempfile.NamedTemporaryFile(suffix='.' + format)\n ds2 = driver.CreateCopy(tmp.name, ds)\n del ds2\n tmp.seek(0)\n ret = tmp.read()\n if callback:\n subtask(callback).delay(ret, parms)\n return None\n elif cache_only:\n self.adapter.cache_result(ret, **parms)\n return None\n else:\n self.adapter.cache_result(ret, **parms)\n return ret\n except Exception as ex:\n del tmp\n raise common.NoApplicableCode(str(ex))\n\n\n\n" ]
[ [ "scipy.misc.imsave" ] ]
intel/Theano-dev
[ "6ca6fd4646f9e958058c7bce52cd51923c05c2f4" ]
[ "theano/tensor/fft.py" ]
[ "from __future__ import absolute_import, print_function, division\nimport numpy as np\nfrom theano import gof\nimport theano.tensor as T\nfrom theano.gradient import DisconnectedType\n\n\nclass RFFTOp(gof.Op):\n\n __props__ = ()\n\n def output_type(self, inp):\n # add extra dim for real/imag\n return T.TensorType(inp.dtype,\n broadcastable=[False] * (inp.type.ndim + 1))\n\n def make_node(self, a, s=None):\n a = T.as_tensor_variable(a)\n if a.ndim < 2:\n raise TypeError('%s: input must have dimension > 2, with first dimension batches' %\n self.__class__.__name__)\n\n if s is None:\n s = a.shape[1:]\n s = T.as_tensor_variable(s)\n else:\n s = T.as_tensor_variable(s)\n if (not s.dtype.startswith('int')) and \\\n (not s.dtype.startswith('uint')):\n raise TypeError('%s: length of the transformed axis must be'\n ' of type integer' % self.__class__.__name__)\n return gof.Apply(self, [a, s], [self.output_type(a)()])\n\n def perform(self, node, inputs, output_storage):\n a = inputs[0]\n s = inputs[1]\n\n A = np.fft.rfftn(a, s=tuple(s))\n # Format output with two extra dimensions for real and imaginary\n # parts.\n out = np.zeros(A.shape + (2,), dtype=a.dtype)\n out[..., 0], out[..., 1] = np.real(A), np.imag(A)\n output_storage[0][0] = out\n\n def grad(self, inputs, output_grads):\n gout, = output_grads\n s = inputs[1]\n # Divide the last dimension of the output gradients by 2, they are\n # double-counted by the real-IFFT due to symmetry, except the first\n # and last elements (for even transforms) which are unique.\n idx = [slice(None)] * (gout.ndim - 2) \\\n + [slice(1, (s[-1] // 2) + (s[-1] % 2))] + [slice(None)]\n gout = T.set_subtensor(gout[idx], gout[idx] * 0.5)\n return [irfft_op(gout, s), DisconnectedType()()]\n\n def connection_pattern(self, node):\n # Specificy that shape input parameter has no connection to graph and gradients.\n return [[True], [False]]\n\nrfft_op = RFFTOp()\n\n\nclass IRFFTOp(gof.Op):\n\n __props__ = ()\n\n def output_type(self, inp):\n # remove extra dim for real/imag\n return T.TensorType(inp.dtype,\n broadcastable=[False] * (inp.type.ndim - 1))\n\n def make_node(self, a, s=None):\n a = T.as_tensor_variable(a)\n if a.ndim < 3:\n raise TypeError('%s: input must have dimension >= 3, with ' %\n self.__class__.__name__ +\n 'first dimension batches and last real/imag parts')\n\n if s is None:\n s = a.shape[1:-1]\n s = T.set_subtensor(s[-1], (s[-1] - 1) * 2)\n s = T.as_tensor_variable(s)\n else:\n s = T.as_tensor_variable(s)\n if (not s.dtype.startswith('int')) and \\\n (not s.dtype.startswith('uint')):\n raise TypeError('%s: length of the transformed axis must be'\n ' of type integer' % self.__class__.__name__)\n return gof.Apply(self, [a, s], [self.output_type(a)()])\n\n def perform(self, node, inputs, output_storage):\n a = inputs[0]\n s = inputs[1]\n\n # Reconstruct complex array from two float dimensions\n inp = a[..., 0] + 1j * a[..., 1]\n out = np.fft.irfftn(inp, s=tuple(s))\n # Remove numpy's default normalization\n # Cast to input type (numpy outputs float64 by default)\n output_storage[0][0] = (out * s.prod()).astype(a.dtype)\n\n def grad(self, inputs, output_grads):\n gout, = output_grads\n s = inputs[1]\n gf = rfft_op(gout, s)\n # Multiply the last dimension of the gradient by 2, they represent\n # both positive and negative frequencies, except the first\n # and last elements (for even transforms) which are unique.\n idx = [slice(None)] * (gf.ndim - 2) \\\n + [slice(1, (s[-1] // 2) + (s[-1] % 2))] + [slice(None)]\n gf = T.set_subtensor(gf[idx], gf[idx] * 2)\n return [gf, DisconnectedType()()]\n\n def connection_pattern(self, node):\n # Specificy that shape input parameter has no connection to graph and gradients.\n return [[True], [False]]\n\nirfft_op = IRFFTOp()\n\n\ndef rfft(inp, norm=None):\n \"\"\"\n Performs the fast Fourier transform of a real-valued input.\n\n The input must be a real-valued variable of dimensions (m, ..., n).\n It performs FFTs of size (..., n) on m batches.\n\n The output is a tensor of dimensions (m, ..., n//2+1, 2). The second to\n last dimension of the output contains the n//2+1 non-trivial elements of\n the real-valued FFTs. The real and imaginary parts are stored as a pair of\n float arrays.\n\n Parameters\n ----------\n inp\n Array of floats of size (m, ..., n), containing m inputs of\n size (..., n).\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n\n \"\"\"\n\n s = inp.shape[1:]\n cond_norm = _unitary(norm)\n scaling = 1\n if cond_norm == \"ortho\":\n scaling = T.sqrt(s.prod().astype(inp.dtype))\n\n return rfft_op(inp, s) / scaling\n\n\ndef irfft(inp, norm=None, is_odd=False):\n \"\"\"\n Performs the inverse fast Fourier Transform with real-valued output.\n\n The input is a variable of dimensions (m, ..., n//2+1, 2)\n representing the non-trivial elements of m real-valued Fourier transforms\n of initial size (..., n). The real and imaginary parts are stored as a\n pair of float arrays.\n\n The output is a real-valued variable of dimensions (m, ..., n)\n giving the m inverse FFTs.\n\n Parameters\n ----------\n inp\n Array of size (m, ..., n//2+1, 2), containing m inputs\n with n//2+1 non-trivial elements on the last dimension and real\n and imaginary parts stored as separate real arrays.\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n is_odd : {True, False}\n Set to True to get a real inverse transform output with an odd last dimension\n of length (N-1)*2 + 1 for an input last dimension of length N.\n\n \"\"\"\n\n if is_odd not in (True, False):\n raise ValueError(\"Invalid value %s for id_odd, must be True or False\" % is_odd)\n\n s = inp.shape[1:-1]\n if is_odd:\n s = T.set_subtensor(s[-1], (s[-1] - 1) * 2 + 1)\n else:\n s = T.set_subtensor(s[-1], (s[-1] - 1) * 2)\n\n cond_norm = _unitary(norm)\n scaling = 1\n # Numpy's default normalization is 1/N on the inverse transform.\n if cond_norm is None:\n scaling = s.prod().astype(inp.dtype)\n elif cond_norm == \"ortho\":\n scaling = T.sqrt(s.prod().astype(inp.dtype))\n\n return irfft_op(inp, s) / scaling\n\n\ndef _unitary(norm):\n if norm not in (None, \"ortho\", \"no_norm\"):\n raise ValueError(\"Invalid value %s for norm, must be None, 'ortho' or \"\n \"'no norm'\" % norm)\n return norm\n" ]
[ [ "numpy.imag", "numpy.real", "numpy.zeros" ] ]
ameier3/geopandas
[ "0435306e74c71b870c06ea4e26dc4d4ee85ea9d9" ]
[ "geopandas/geodataframe.py" ]
[ "import json\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame, Series\nfrom pandas.core.accessor import CachedAccessor\n\nfrom shapely.geometry import mapping, shape\nfrom shapely.geometry.base import BaseGeometry\n\nfrom pyproj import CRS\n\nfrom geopandas.array import GeometryArray, GeometryDtype, from_shapely, to_wkb, to_wkt\nfrom geopandas.base import GeoPandasBase, is_geometry_type\nfrom geopandas.geoseries import GeoSeries, _geoseries_constructor_with_fallback\nimport geopandas.io\nfrom geopandas.explore import _explore\nfrom . import _compat as compat\nfrom ._decorator import doc\n\n\nDEFAULT_GEO_COLUMN_NAME = \"geometry\"\n\n\ndef _geodataframe_constructor_with_fallback(*args, **kwargs):\n \"\"\"\n A flexible constructor for GeoDataFrame._constructor, which falls back\n to returning a DataFrame (if a certain operation does not preserve the\n geometry column)\n \"\"\"\n df = GeoDataFrame(*args, **kwargs)\n geometry_cols_mask = df.dtypes == \"geometry\"\n if len(geometry_cols_mask) == 0 or geometry_cols_mask.sum() == 0:\n df = pd.DataFrame(df)\n\n return df\n\n\ndef _ensure_geometry(data, crs=None):\n \"\"\"\n Ensure the data is of geometry dtype or converted to it.\n\n If input is a (Geo)Series, output is a GeoSeries, otherwise output\n is GeometryArray.\n\n If the input is a GeometryDtype with a set CRS, `crs` is ignored.\n \"\"\"\n if is_geometry_type(data):\n if isinstance(data, Series):\n data = GeoSeries(data)\n if data.crs is None:\n data.crs = crs\n return data\n else:\n if isinstance(data, Series):\n out = from_shapely(np.asarray(data), crs=crs)\n return GeoSeries(out, index=data.index, name=data.name)\n else:\n out = from_shapely(data, crs=crs)\n return out\n\n\ncrs_mismatch_error = (\n \"CRS mismatch between CRS of the passed geometries \"\n \"and 'crs'. Use 'GeoDataFrame.set_crs(crs, \"\n \"allow_override=True)' to overwrite CRS or \"\n \"'GeoDataFrame.to_crs(crs)' to reproject geometries. \"\n)\n\n\nclass GeoDataFrame(GeoPandasBase, DataFrame):\n \"\"\"\n A GeoDataFrame object is a pandas.DataFrame that has a column\n with geometry. In addition to the standard DataFrame constructor arguments,\n GeoDataFrame also accepts the following keyword arguments:\n\n Parameters\n ----------\n crs : value (optional)\n Coordinate Reference System of the geometry objects. Can be anything accepted by\n :meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,\n such as an authority string (eg \"EPSG:4326\") or a WKT string.\n geometry : str or array (optional)\n If str, column to use as geometry. If array, will be set as 'geometry'\n column on GeoDataFrame.\n\n Examples\n --------\n Constructing GeoDataFrame from a dictionary.\n\n >>> from shapely.geometry import Point\n >>> d = {'col1': ['name1', 'name2'], 'geometry': [Point(1, 2), Point(2, 1)]}\n >>> gdf = geopandas.GeoDataFrame(d, crs=\"EPSG:4326\")\n >>> gdf\n col1 geometry\n 0 name1 POINT (1.00000 2.00000)\n 1 name2 POINT (2.00000 1.00000)\n\n Notice that the inferred dtype of 'geometry' columns is geometry.\n\n >>> gdf.dtypes\n col1 object\n geometry geometry\n dtype: object\n\n Constructing GeoDataFrame from a pandas DataFrame with a column of WKT geometries:\n\n >>> import pandas as pd\n >>> d = {'col1': ['name1', 'name2'], 'wkt': ['POINT (1 2)', 'POINT (2 1)']}\n >>> df = pd.DataFrame(d)\n >>> gs = geopandas.GeoSeries.from_wkt(df['wkt'])\n >>> gdf = geopandas.GeoDataFrame(df, geometry=gs, crs=\"EPSG:4326\")\n >>> gdf\n col1 wkt geometry\n 0 name1 POINT (1 2) POINT (1.00000 2.00000)\n 1 name2 POINT (2 1) POINT (2.00000 1.00000)\n\n See also\n --------\n GeoSeries : Series object designed to store shapely geometry objects\n \"\"\"\n\n _metadata = [\"_crs\", \"_geometry_column_name\"]\n\n _geometry_column_name = DEFAULT_GEO_COLUMN_NAME\n\n def __init__(self, data=None, *args, geometry=None, crs=None, **kwargs):\n with compat.ignore_shapely2_warnings():\n super().__init__(data, *args, **kwargs)\n\n # need to set this before calling self['geometry'], because\n # getitem accesses crs\n self._crs = CRS.from_user_input(crs) if crs else None\n\n # set_geometry ensures the geometry data have the proper dtype,\n # but is not called if `geometry=None` ('geometry' column present\n # in the data), so therefore need to ensure it here manually\n # but within a try/except because currently non-geometries are\n # allowed in that case\n # TODO do we want to raise / return normal DataFrame in this case?\n\n # if gdf passed in and geo_col is set, we use that for geometry\n if geometry is None and isinstance(data, GeoDataFrame):\n self._geometry_column_name = data._geometry_column_name\n if crs is not None and data.crs != crs:\n raise ValueError(crs_mismatch_error)\n\n if geometry is None and \"geometry\" in self.columns:\n # Check for multiple columns with name \"geometry\". If there are,\n # self[\"geometry\"] is a gdf and constructor gets recursively recalled\n # by pandas internals trying to access this\n if (self.columns == \"geometry\").sum() > 1:\n raise ValueError(\n \"GeoDataFrame does not support multiple columns \"\n \"using the geometry column name 'geometry'.\"\n )\n\n # only if we have actual geometry values -> call set_geometry\n index = self.index\n try:\n if (\n hasattr(self[\"geometry\"].values, \"crs\")\n and self[\"geometry\"].values.crs\n and crs\n and not self[\"geometry\"].values.crs == crs\n ):\n raise ValueError(crs_mismatch_error)\n self[\"geometry\"] = _ensure_geometry(self[\"geometry\"].values, crs)\n except TypeError:\n pass\n else:\n if self.index is not index:\n # With pandas < 1.0 and an empty frame (no rows), the index\n # gets reset to a default RangeIndex -> set back the original\n # index if needed\n self.index = index\n geometry = \"geometry\"\n\n if geometry is not None:\n if (\n hasattr(geometry, \"crs\")\n and geometry.crs\n and crs\n and not geometry.crs == crs\n ):\n raise ValueError(crs_mismatch_error)\n\n self.set_geometry(geometry, inplace=True)\n\n if geometry is None and crs:\n raise ValueError(\n \"Assigning CRS to a GeoDataFrame without a geometry column is not \"\n \"supported. Supply geometry using the 'geometry=' keyword argument, \"\n \"or by providing a DataFrame with column name 'geometry'\",\n )\n\n def __setattr__(self, attr, val):\n # have to special case geometry b/c pandas tries to use as column...\n if attr == \"geometry\":\n object.__setattr__(self, attr, val)\n else:\n super().__setattr__(attr, val)\n\n def _get_geometry(self):\n if self._geometry_column_name not in self:\n if self._geometry_column_name is None:\n msg = (\n \"You are calling a geospatial method on the GeoDataFrame, \"\n \"but the active geometry column to use has not been set. \"\n )\n else:\n msg = (\n \"You are calling a geospatial method on the GeoDataFrame, \"\n f\"but the active geometry column ('{self._geometry_column_name}') \"\n \"is not present. \"\n )\n geo_cols = list(self.columns[self.dtypes == \"geometry\"])\n if len(geo_cols) > 0:\n msg += (\n f\"\\nThere are columns with geometry data type ({geo_cols}), and \"\n \"you can either set one as the active geometry with \"\n 'df.set_geometry(\"name\") or access the column as a '\n 'GeoSeries (df[\"name\"]) and call the method directly on it.'\n )\n else:\n msg += (\n \"\\nThere are no existing columns with geometry data type. You can \"\n \"add a geometry column as the active geometry column with \"\n \"df.set_geometry. \"\n )\n\n raise AttributeError(msg)\n return self[self._geometry_column_name]\n\n def _set_geometry(self, col):\n if not pd.api.types.is_list_like(col):\n raise ValueError(\"Must use a list-like to set the geometry property\")\n self.set_geometry(col, inplace=True)\n\n geometry = property(\n fget=_get_geometry, fset=_set_geometry, doc=\"Geometry data for GeoDataFrame\"\n )\n\n def set_geometry(self, col, drop=False, inplace=False, crs=None):\n \"\"\"\n Set the GeoDataFrame geometry using either an existing column or\n the specified input. By default yields a new object.\n\n The original geometry column is replaced with the input.\n\n Parameters\n ----------\n col : column label or array\n drop : boolean, default False\n Delete column to be used as the new geometry\n inplace : boolean, default False\n Modify the GeoDataFrame in place (do not create a new object)\n crs : pyproj.CRS, optional\n Coordinate system to use. The value can be anything accepted\n by :meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,\n such as an authority string (eg \"EPSG:4326\") or a WKT string.\n If passed, overrides both DataFrame and col's crs.\n Otherwise, tries to get crs from passed col values or DataFrame.\n\n Examples\n --------\n >>> from shapely.geometry import Point\n >>> d = {'col1': ['name1', 'name2'], 'geometry': [Point(1, 2), Point(2, 1)]}\n >>> gdf = geopandas.GeoDataFrame(d, crs=\"EPSG:4326\")\n >>> gdf\n col1 geometry\n 0 name1 POINT (1.00000 2.00000)\n 1 name2 POINT (2.00000 1.00000)\n\n Passing an array:\n\n >>> df1 = gdf.set_geometry([Point(0,0), Point(1,1)])\n >>> df1\n col1 geometry\n 0 name1 POINT (0.00000 0.00000)\n 1 name2 POINT (1.00000 1.00000)\n\n Using existing column:\n\n >>> gdf[\"buffered\"] = gdf.buffer(2)\n >>> df2 = gdf.set_geometry(\"buffered\")\n >>> df2.geometry\n 0 POLYGON ((3.00000 2.00000, 2.99037 1.80397, 2....\n 1 POLYGON ((4.00000 1.00000, 3.99037 0.80397, 3....\n Name: buffered, dtype: geometry\n\n Returns\n -------\n GeoDataFrame\n\n See also\n --------\n GeoDataFrame.rename_geometry : rename an active geometry column\n \"\"\"\n # Most of the code here is taken from DataFrame.set_index()\n if inplace:\n frame = self\n else:\n frame = self.copy()\n # if there is no previous self.geometry, self.copy() will downcast\n if type(frame) == DataFrame:\n frame = GeoDataFrame(frame)\n\n to_remove = None\n geo_column_name = self._geometry_column_name\n if isinstance(col, (Series, list, np.ndarray, GeometryArray)):\n level = col\n elif hasattr(col, \"ndim\") and col.ndim > 1:\n raise ValueError(\"Must pass array with one dimension only.\")\n else:\n try:\n level = frame[col]\n except KeyError:\n raise ValueError(\"Unknown column %s\" % col)\n except Exception:\n raise\n if isinstance(level, DataFrame):\n raise ValueError(\n \"GeoDataFrame does not support setting the geometry column where \"\n \"the column name is shared by multiple columns.\"\n )\n\n if drop:\n to_remove = col\n geo_column_name = self._geometry_column_name\n else:\n geo_column_name = col\n\n if to_remove:\n del frame[to_remove]\n\n if not crs:\n level_crs = getattr(level, \"crs\", None)\n crs = level_crs if level_crs is not None else self._crs\n\n if isinstance(level, (GeoSeries, GeometryArray)) and level.crs != crs:\n # Avoids caching issues/crs sharing issues\n level = level.copy()\n level.crs = crs\n\n # Check that we are using a listlike of geometries\n level = _ensure_geometry(level, crs=crs)\n index = frame.index\n frame[geo_column_name] = level\n if frame.index is not index and len(frame.index) == len(index):\n # With pandas < 1.0 and an empty frame (no rows), the index gets reset\n # to a default RangeIndex -> set back the original index if needed\n frame.index = index\n frame._geometry_column_name = geo_column_name\n frame.crs = crs\n if not inplace:\n return frame\n\n def rename_geometry(self, col, inplace=False):\n \"\"\"\n Renames the GeoDataFrame geometry column to\n the specified name. By default yields a new object.\n\n The original geometry column is replaced with the input.\n\n Parameters\n ----------\n col : new geometry column label\n inplace : boolean, default False\n Modify the GeoDataFrame in place (do not create a new object)\n\n Examples\n --------\n >>> from shapely.geometry import Point\n >>> d = {'col1': ['name1', 'name2'], 'geometry': [Point(1, 2), Point(2, 1)]}\n >>> df = geopandas.GeoDataFrame(d, crs=\"EPSG:4326\")\n >>> df1 = df.rename_geometry('geom1')\n >>> df1.geometry.name\n 'geom1'\n >>> df.rename_geometry('geom1', inplace=True)\n >>> df.geometry.name\n 'geom1'\n\n Returns\n -------\n geodataframe : GeoDataFrame\n\n See also\n --------\n GeoDataFrame.set_geometry : set the active geometry\n \"\"\"\n geometry_col = self.geometry.name\n if col in self.columns:\n raise ValueError(f\"Column named {col} already exists\")\n else:\n if not inplace:\n return self.rename(columns={geometry_col: col}).set_geometry(\n col, inplace\n )\n self.rename(columns={geometry_col: col}, inplace=inplace)\n self.set_geometry(col, inplace=inplace)\n\n @property\n def crs(self):\n \"\"\"\n The Coordinate Reference System (CRS) represented as a ``pyproj.CRS``\n object.\n\n Returns None if the CRS is not set, and to set the value it\n :getter: Returns a ``pyproj.CRS`` or None. When setting, the value\n can be anything accepted by\n :meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,\n such as an authority string (eg \"EPSG:4326\") or a WKT string.\n\n Examples\n --------\n\n >>> gdf.crs # doctest: +SKIP\n <Geographic 2D CRS: EPSG:4326>\n Name: WGS 84\n Axis Info [ellipsoidal]:\n - Lat[north]: Geodetic latitude (degree)\n - Lon[east]: Geodetic longitude (degree)\n Area of Use:\n - name: World\n - bounds: (-180.0, -90.0, 180.0, 90.0)\n Datum: World Geodetic System 1984\n - Ellipsoid: WGS 84\n - Prime Meridian: Greenwich\n\n See also\n --------\n GeoDataFrame.set_crs : assign CRS\n GeoDataFrame.to_crs : re-project to another CRS\n\n \"\"\"\n return self._crs\n\n @crs.setter\n def crs(self, value):\n \"\"\"Sets the value of the crs\"\"\"\n if self._geometry_column_name not in self:\n raise ValueError(\n \"Assigning CRS to a GeoDataFrame without a geometry column is not \"\n \"supported. Use GeoDataFrame.set_geometry to set the active \"\n \"geometry column.\",\n )\n else:\n if hasattr(self.geometry.values, \"crs\"):\n self.geometry.values.crs = value\n self._crs = self.geometry.values.crs\n else:\n # column called 'geometry' without geometry\n self._crs = None if not value else CRS.from_user_input(value)\n\n def __setstate__(self, state):\n # overriding DataFrame method for compat with older pickles (CRS handling)\n if isinstance(state, dict):\n if \"_metadata\" in state and \"crs\" in state[\"_metadata\"]:\n metadata = state[\"_metadata\"]\n metadata[metadata.index(\"crs\")] = \"_crs\"\n if \"crs\" in state and \"_crs\" not in state:\n crs = state.pop(\"crs\")\n state[\"_crs\"] = CRS.from_user_input(crs) if crs is not None else crs\n\n super().__setstate__(state)\n\n # for some versions that didn't yet have CRS at array level -> crs is set\n # at GeoDataFrame level with '_crs' (and not 'crs'), so without propagating\n # to the GeoSeries/GeometryArray\n try:\n if self.crs is not None:\n if self.geometry.values.crs is None:\n self.crs = self.crs\n except Exception:\n pass\n\n @classmethod\n def from_dict(cls, data, geometry=None, crs=None, **kwargs):\n \"\"\"\n Construct GeoDataFrame from dict of array-like or dicts by\n overriding DataFrame.from_dict method with geometry and crs\n\n Parameters\n ----------\n data : dict\n Of the form {field : array-like} or {field : dict}.\n geometry : str or array (optional)\n If str, column to use as geometry. If array, will be set as 'geometry'\n column on GeoDataFrame.\n crs : str or dict (optional)\n Coordinate reference system to set on the resulting frame.\n kwargs : key-word arguments\n These arguments are passed to DataFrame.from_dict\n\n Returns\n -------\n GeoDataFrame\n\n \"\"\"\n dataframe = DataFrame.from_dict(data, **kwargs)\n return GeoDataFrame(dataframe, geometry=geometry, crs=crs)\n\n @classmethod\n def from_file(cls, filename, **kwargs):\n \"\"\"Alternate constructor to create a ``GeoDataFrame`` from a file.\n\n It is recommended to use :func:`geopandas.read_file` instead.\n\n Can load a ``GeoDataFrame`` from a file in any format recognized by\n `fiona`. See http://fiona.readthedocs.io/en/latest/manual.html for details.\n\n Parameters\n ----------\n filename : str\n File path or file handle to read from. Depending on which kwargs\n are included, the content of filename may vary. See\n http://fiona.readthedocs.io/en/latest/README.html#usage for usage details.\n kwargs : key-word arguments\n These arguments are passed to fiona.open, and can be used to\n access multi-layer data, data stored within archives (zip files),\n etc.\n\n Examples\n --------\n\n >>> path = geopandas.datasets.get_path('nybb')\n >>> gdf = geopandas.GeoDataFrame.from_file(path)\n >>> gdf # doctest: +SKIP\n BoroCode BoroName Shape_Leng Shape_Area \\\n geometry\n 0 5 Staten Island 330470.010332 1.623820e+09 MULTIPOLYGON ((\\\n(970217.022 145643.332, 970227....\n 1 4 Queens 896344.047763 3.045213e+09 MULTIPOLYGON ((\\\n(1029606.077 156073.814, 102957...\n 2 3 Brooklyn 741080.523166 1.937479e+09 MULTIPOLYGON ((\\\n(1021176.479 151374.797, 102100...\n 3 1 Manhattan 359299.096471 6.364715e+08 MULTIPOLYGON ((\\\n(981219.056 188655.316, 980940....\n 4 2 Bronx 464392.991824 1.186925e+09 MULTIPOLYGON ((\\\n(1012821.806 229228.265, 101278...\n\n The recommended method of reading files is :func:`geopandas.read_file`:\n\n >>> gdf = geopandas.read_file(path)\n\n See also\n --------\n read_file : read file to GeoDataFame\n GeoDataFrame.to_file : write GeoDataFrame to file\n\n \"\"\"\n return geopandas.io.file._read_file(filename, **kwargs)\n\n @classmethod\n def from_features(cls, features, crs=None, columns=None):\n \"\"\"\n Alternate constructor to create GeoDataFrame from an iterable of\n features or a feature collection.\n\n Parameters\n ----------\n features\n - Iterable of features, where each element must be a feature\n dictionary or implement the __geo_interface__.\n - Feature collection, where the 'features' key contains an\n iterable of features.\n - Object holding a feature collection that implements the\n ``__geo_interface__``.\n crs : str or dict (optional)\n Coordinate reference system to set on the resulting frame.\n columns : list of column names, optional\n Optionally specify the column names to include in the output frame.\n This does not overwrite the property names of the input, but can\n ensure a consistent output format.\n\n Returns\n -------\n GeoDataFrame\n\n Notes\n -----\n For more information about the ``__geo_interface__``, see\n https://gist.github.com/sgillies/2217756\n\n Examples\n --------\n >>> feature_coll = {\n ... \"type\": \"FeatureCollection\",\n ... \"features\": [\n ... {\n ... \"id\": \"0\",\n ... \"type\": \"Feature\",\n ... \"properties\": {\"col1\": \"name1\"},\n ... \"geometry\": {\"type\": \"Point\", \"coordinates\": (1.0, 2.0)},\n ... \"bbox\": (1.0, 2.0, 1.0, 2.0),\n ... },\n ... {\n ... \"id\": \"1\",\n ... \"type\": \"Feature\",\n ... \"properties\": {\"col1\": \"name2\"},\n ... \"geometry\": {\"type\": \"Point\", \"coordinates\": (2.0, 1.0)},\n ... \"bbox\": (2.0, 1.0, 2.0, 1.0),\n ... },\n ... ],\n ... \"bbox\": (1.0, 1.0, 2.0, 2.0),\n ... }\n >>> df = geopandas.GeoDataFrame.from_features(feature_coll)\n >>> df\n geometry col1\n 0 POINT (1.00000 2.00000) name1\n 1 POINT (2.00000 1.00000) name2\n\n \"\"\"\n # Handle feature collections\n if hasattr(features, \"__geo_interface__\"):\n fs = features.__geo_interface__\n else:\n fs = features\n\n if isinstance(fs, dict) and fs.get(\"type\") == \"FeatureCollection\":\n features_lst = fs[\"features\"]\n else:\n features_lst = features\n\n rows = []\n for feature in features_lst:\n # load geometry\n if hasattr(feature, \"__geo_interface__\"):\n feature = feature.__geo_interface__\n row = {\n \"geometry\": shape(feature[\"geometry\"]) if feature[\"geometry\"] else None\n }\n # load properties\n row.update(feature[\"properties\"])\n rows.append(row)\n return GeoDataFrame(rows, columns=columns, crs=crs)\n\n @classmethod\n def from_postgis(\n cls,\n sql,\n con,\n geom_col=\"geom\",\n crs=None,\n index_col=None,\n coerce_float=True,\n parse_dates=None,\n params=None,\n chunksize=None,\n ):\n \"\"\"\n Alternate constructor to create a ``GeoDataFrame`` from a sql query\n containing a geometry column in WKB representation.\n\n Parameters\n ----------\n sql : string\n con : sqlalchemy.engine.Connection or sqlalchemy.engine.Engine\n geom_col : string, default 'geom'\n column name to convert to shapely geometries\n crs : optional\n Coordinate reference system to use for the returned GeoDataFrame\n index_col : string or list of strings, optional, default: None\n Column(s) to set as index(MultiIndex)\n coerce_float : boolean, default True\n Attempt to convert values of non-string, non-numeric objects (like\n decimal.Decimal) to floating point, useful for SQL result sets\n parse_dates : list or dict, default None\n - List of column names to parse as dates.\n - Dict of ``{column_name: format string}`` where format string is\n strftime compatible in case of parsing string times, or is one of\n (D, s, ns, ms, us) in case of parsing integer timestamps.\n - Dict of ``{column_name: arg dict}``, where the arg dict\n corresponds to the keyword arguments of\n :func:`pandas.to_datetime`. Especially useful with databases\n without native Datetime support, such as SQLite.\n params : list, tuple or dict, optional, default None\n List of parameters to pass to execute method.\n chunksize : int, default None\n If specified, return an iterator where chunksize is the number\n of rows to include in each chunk.\n\n Examples\n --------\n PostGIS\n\n >>> from sqlalchemy import create_engine # doctest: +SKIP\n >>> db_connection_url = \"postgresql://myusername:mypassword@myhost:5432/mydb\"\n >>> con = create_engine(db_connection_url) # doctest: +SKIP\n >>> sql = \"SELECT geom, highway FROM roads\"\n >>> df = geopandas.GeoDataFrame.from_postgis(sql, con) # doctest: +SKIP\n\n SpatiaLite\n\n >>> sql = \"SELECT ST_Binary(geom) AS geom, highway FROM roads\"\n >>> df = geopandas.GeoDataFrame.from_postgis(sql, con) # doctest: +SKIP\n\n The recommended method of reading from PostGIS is\n :func:`geopandas.read_postgis`:\n\n >>> df = geopandas.read_postgis(sql, con) # doctest: +SKIP\n\n See also\n --------\n geopandas.read_postgis : read PostGIS database to GeoDataFrame\n \"\"\"\n\n df = geopandas.io.sql._read_postgis(\n sql,\n con,\n geom_col=geom_col,\n crs=crs,\n index_col=index_col,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n params=params,\n chunksize=chunksize,\n )\n\n return df\n\n def to_json(self, na=\"null\", show_bbox=False, drop_id=False, **kwargs):\n \"\"\"\n Returns a GeoJSON representation of the ``GeoDataFrame`` as a string.\n\n Parameters\n ----------\n na : {'null', 'drop', 'keep'}, default 'null'\n Indicates how to output missing (NaN) values in the GeoDataFrame.\n See below.\n show_bbox : bool, optional, default: False\n Include bbox (bounds) in the geojson\n drop_id : bool, default: False\n Whether to retain the index of the GeoDataFrame as the id property\n in the generated GeoJSON. Default is False, but may want True\n if the index is just arbitrary row numbers.\n\n Notes\n -----\n The remaining *kwargs* are passed to json.dumps().\n\n Missing (NaN) values in the GeoDataFrame can be represented as follows:\n\n - ``null``: output the missing entries as JSON null.\n - ``drop``: remove the property from the feature. This applies to each\n feature individually so that features may have different properties.\n - ``keep``: output the missing entries as NaN.\n\n Examples\n --------\n\n >>> from shapely.geometry import Point\n >>> d = {'col1': ['name1', 'name2'], 'geometry': [Point(1, 2), Point(2, 1)]}\n >>> gdf = geopandas.GeoDataFrame(d, crs=\"EPSG:4326\")\n >>> gdf\n col1 geometry\n 0 name1 POINT (1.00000 2.00000)\n 1 name2 POINT (2.00000 1.00000)\n\n >>> gdf.to_json()\n '{\"type\": \"FeatureCollection\", \"features\": [{\"id\": \"0\", \"type\": \"Feature\", \\\n\"properties\": {\"col1\": \"name1\"}, \"geometry\": {\"type\": \"Point\", \"coordinates\": [1.0,\\\n 2.0]}}, {\"id\": \"1\", \"type\": \"Feature\", \"properties\": {\"col1\": \"name2\"}, \"geometry\"\\\n: {\"type\": \"Point\", \"coordinates\": [2.0, 1.0]}}]}'\n\n Alternatively, you can write GeoJSON to file:\n\n >>> gdf.to_file(path, driver=\"GeoJSON\") # doctest: +SKIP\n\n See also\n --------\n GeoDataFrame.to_file : write GeoDataFrame to file\n\n \"\"\"\n return json.dumps(\n self._to_geo(na=na, show_bbox=show_bbox, drop_id=drop_id), **kwargs\n )\n\n @property\n def __geo_interface__(self):\n \"\"\"Returns a ``GeoDataFrame`` as a python feature collection.\n\n Implements the `geo_interface`. The returned python data structure\n represents the ``GeoDataFrame`` as a GeoJSON-like\n ``FeatureCollection``.\n\n This differs from `_to_geo()` only in that it is a property with\n default args instead of a method\n\n Examples\n --------\n\n >>> from shapely.geometry import Point\n >>> d = {'col1': ['name1', 'name2'], 'geometry': [Point(1, 2), Point(2, 1)]}\n >>> gdf = geopandas.GeoDataFrame(d, crs=\"EPSG:4326\")\n >>> gdf\n col1 geometry\n 0 name1 POINT (1.00000 2.00000)\n 1 name2 POINT (2.00000 1.00000)\n\n >>> gdf.__geo_interface__\n {'type': 'FeatureCollection', 'features': [{'id': '0', 'type': 'Feature', \\\n'properties': {'col1': 'name1'}, 'geometry': {'type': 'Point', 'coordinates': (1.0\\\n, 2.0)}, 'bbox': (1.0, 2.0, 1.0, 2.0)}, {'id': '1', 'type': 'Feature', 'properties\\\n': {'col1': 'name2'}, 'geometry': {'type': 'Point', 'coordinates': (2.0, 1.0)}, 'b\\\nbox': (2.0, 1.0, 2.0, 1.0)}], 'bbox': (1.0, 1.0, 2.0, 2.0)}\n\n\n \"\"\"\n return self._to_geo(na=\"null\", show_bbox=True, drop_id=False)\n\n def iterfeatures(self, na=\"null\", show_bbox=False, drop_id=False):\n \"\"\"\n Returns an iterator that yields feature dictionaries that comply with\n __geo_interface__\n\n Parameters\n ----------\n na : str, optional\n Options are {'null', 'drop', 'keep'}, default 'null'.\n Indicates how to output missing (NaN) values in the GeoDataFrame\n\n - null: output the missing entries as JSON null\n - drop: remove the property from the feature. This applies to each feature \\\nindividually so that features may have different properties\n - keep: output the missing entries as NaN\n\n show_bbox : bool, optional\n Include bbox (bounds) in the geojson. Default False.\n drop_id : bool, default: False\n Whether to retain the index of the GeoDataFrame as the id property\n in the generated GeoJSON. Default is False, but may want True\n if the index is just arbitrary row numbers.\n\n Examples\n --------\n\n >>> from shapely.geometry import Point\n >>> d = {'col1': ['name1', 'name2'], 'geometry': [Point(1, 2), Point(2, 1)]}\n >>> gdf = geopandas.GeoDataFrame(d, crs=\"EPSG:4326\")\n >>> gdf\n col1 geometry\n 0 name1 POINT (1.00000 2.00000)\n 1 name2 POINT (2.00000 1.00000)\n\n >>> feature = next(gdf.iterfeatures())\n >>> feature\n {'id': '0', 'type': 'Feature', 'properties': {'col1': 'name1'}, 'geometry': {\\\n'type': 'Point', 'coordinates': (1.0, 2.0)}}\n \"\"\"\n if na not in [\"null\", \"drop\", \"keep\"]:\n raise ValueError(\"Unknown na method {0}\".format(na))\n\n if self._geometry_column_name not in self:\n raise AttributeError(\n \"No geometry data set (expected in\"\n \" column '%s').\" % self._geometry_column_name\n )\n\n ids = np.array(self.index, copy=False)\n geometries = np.array(self[self._geometry_column_name], copy=False)\n\n if not self.columns.is_unique:\n raise ValueError(\"GeoDataFrame cannot contain duplicated column names.\")\n\n properties_cols = self.columns.difference([self._geometry_column_name])\n\n if len(properties_cols) > 0:\n # convert to object to get python scalars.\n properties = self[properties_cols].astype(object).values\n if na == \"null\":\n properties[pd.isnull(self[properties_cols]).values] = None\n\n for i, row in enumerate(properties):\n geom = geometries[i]\n\n if na == \"drop\":\n properties_items = {\n k: v for k, v in zip(properties_cols, row) if not pd.isnull(v)\n }\n else:\n properties_items = {k: v for k, v in zip(properties_cols, row)}\n\n if drop_id:\n feature = {}\n else:\n feature = {\"id\": str(ids[i])}\n\n feature[\"type\"] = \"Feature\"\n feature[\"properties\"] = properties_items\n feature[\"geometry\"] = mapping(geom) if geom else None\n\n if show_bbox:\n feature[\"bbox\"] = geom.bounds if geom else None\n\n yield feature\n\n else:\n for fid, geom in zip(ids, geometries):\n\n if drop_id:\n feature = {}\n else:\n feature = {\"id\": str(fid)}\n\n feature[\"type\"] = \"Feature\"\n feature[\"properties\"] = {}\n feature[\"geometry\"] = mapping(geom) if geom else None\n\n if show_bbox:\n feature[\"bbox\"] = geom.bounds if geom else None\n\n yield feature\n\n def _to_geo(self, **kwargs):\n \"\"\"\n Returns a python feature collection (i.e. the geointerface)\n representation of the GeoDataFrame.\n\n \"\"\"\n geo = {\n \"type\": \"FeatureCollection\",\n \"features\": list(self.iterfeatures(**kwargs)),\n }\n\n if kwargs.get(\"show_bbox\", False):\n geo[\"bbox\"] = tuple(self.total_bounds)\n\n return geo\n\n def to_wkb(self, hex=False, **kwargs):\n \"\"\"\n Encode all geometry columns in the GeoDataFrame to WKB.\n\n Parameters\n ----------\n hex : bool\n If true, export the WKB as a hexadecimal string.\n The default is to return a binary bytes object.\n kwargs\n Additional keyword args will be passed to\n :func:`pygeos.to_wkb` if pygeos is installed.\n\n Returns\n -------\n DataFrame\n geometry columns are encoded to WKB\n \"\"\"\n\n df = DataFrame(self.copy())\n\n # Encode all geometry columns to WKB\n for col in df.columns[df.dtypes == \"geometry\"]:\n df[col] = to_wkb(df[col].values, hex=hex, **kwargs)\n\n return df\n\n def to_wkt(self, **kwargs):\n \"\"\"\n Encode all geometry columns in the GeoDataFrame to WKT.\n\n Parameters\n ----------\n kwargs\n Keyword args will be passed to :func:`pygeos.to_wkt`\n if pygeos is installed.\n\n Returns\n -------\n DataFrame\n geometry columns are encoded to WKT\n \"\"\"\n\n df = DataFrame(self.copy())\n\n # Encode all geometry columns to WKT\n for col in df.columns[df.dtypes == \"geometry\"]:\n df[col] = to_wkt(df[col].values, **kwargs)\n\n return df\n\n def to_parquet(self, path, index=None, compression=\"snappy\", **kwargs):\n \"\"\"Write a GeoDataFrame to the Parquet format.\n\n Any geometry columns present are serialized to WKB format in the file.\n\n Requires 'pyarrow'.\n\n WARNING: this is an initial implementation of Parquet file support and\n associated metadata. This is tracking version 0.1.0 of the metadata\n specification at:\n https://github.com/geopandas/geo-arrow-spec\n\n This metadata specification does not yet make stability promises. As such,\n we do not yet recommend using this in a production setting unless you are\n able to rewrite your Parquet files.\n\n .. versionadded:: 0.8\n\n Parameters\n ----------\n path : str, path object\n index : bool, default None\n If ``True``, always include the dataframe's index(es) as columns\n in the file output.\n If ``False``, the index(es) will not be written to the file.\n If ``None``, the index(ex) will be included as columns in the file\n output except `RangeIndex` which is stored as metadata only.\n compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'\n Name of the compression to use. Use ``None`` for no compression.\n kwargs\n Additional keyword arguments passed to :func:`pyarrow.parquet.write_table`.\n\n Examples\n --------\n\n >>> gdf.to_parquet('data.parquet') # doctest: +SKIP\n\n See also\n --------\n GeoDataFrame.to_feather : write GeoDataFrame to feather\n GeoDataFrame.to_file : write GeoDataFrame to file\n \"\"\"\n\n from geopandas.io.arrow import _to_parquet\n\n _to_parquet(self, path, compression=compression, index=index, **kwargs)\n\n def to_feather(self, path, index=None, compression=None, **kwargs):\n \"\"\"Write a GeoDataFrame to the Feather format.\n\n Any geometry columns present are serialized to WKB format in the file.\n\n Requires 'pyarrow' >= 0.17.\n\n WARNING: this is an initial implementation of Feather file support and\n associated metadata. This is tracking version 0.1.0 of the metadata\n specification at:\n https://github.com/geopandas/geo-arrow-spec\n\n This metadata specification does not yet make stability promises. As such,\n we do not yet recommend using this in a production setting unless you are\n able to rewrite your Feather files.\n\n .. versionadded:: 0.8\n\n Parameters\n ----------\n path : str, path object\n index : bool, default None\n If ``True``, always include the dataframe's index(es) as columns\n in the file output.\n If ``False``, the index(es) will not be written to the file.\n If ``None``, the index(ex) will be included as columns in the file\n output except `RangeIndex` which is stored as metadata only.\n compression : {'zstd', 'lz4', 'uncompressed'}, optional\n Name of the compression to use. Use ``\"uncompressed\"`` for no\n compression. By default uses LZ4 if available, otherwise uncompressed.\n kwargs\n Additional keyword arguments passed to to\n :func:`pyarrow.feather.write_feather`.\n\n Examples\n --------\n\n >>> gdf.to_feather('data.feather') # doctest: +SKIP\n\n See also\n --------\n GeoDataFrame.to_parquet : write GeoDataFrame to parquet\n GeoDataFrame.to_file : write GeoDataFrame to file\n \"\"\"\n\n from geopandas.io.arrow import _to_feather\n\n _to_feather(self, path, index=index, compression=compression, **kwargs)\n\n def to_file(self, filename, driver=None, schema=None, index=None, **kwargs):\n \"\"\"Write the ``GeoDataFrame`` to a file.\n\n By default, an ESRI shapefile is written, but any OGR data source\n supported by Fiona can be written. A dictionary of supported OGR\n providers is available via:\n\n >>> import fiona\n >>> fiona.supported_drivers # doctest: +SKIP\n\n Parameters\n ----------\n filename : string\n File path or file handle to write to.\n driver : string, default None\n The OGR format driver used to write the vector file.\n If not specified, it attempts to infer it from the file extension.\n If no extension is specified, it saves ESRI Shapefile to a folder.\n schema : dict, default: None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written.\n index : bool, default None\n If True, write index into one or more columns (for MultiIndex).\n Default None writes the index into one or more columns only if\n the index is named, is a MultiIndex, or has a non-integer data\n type. If False, no index is written.\n\n .. versionadded:: 0.7\n Previously the index was not written.\n\n Notes\n -----\n The extra keyword arguments ``**kwargs`` are passed to fiona.open and\n can be used to write to multi-layer data, store data within archives\n (zip files), etc.\n\n The format drivers will attempt to detect the encoding of your data, but\n may fail. In this case, the proper encoding can be specified explicitly\n by using the encoding keyword parameter, e.g. ``encoding='utf-8'``.\n\n See Also\n --------\n GeoSeries.to_file\n GeoDataFrame.to_postgis : write GeoDataFrame to PostGIS database\n GeoDataFrame.to_parquet : write GeoDataFrame to parquet\n GeoDataFrame.to_feather : write GeoDataFrame to feather\n\n Examples\n --------\n\n >>> gdf.to_file('dataframe.shp') # doctest: +SKIP\n\n >>> gdf.to_file('dataframe.gpkg', driver='GPKG', layer='name') # doctest: +SKIP\n\n >>> gdf.to_file('dataframe.geojson', driver='GeoJSON') # doctest: +SKIP\n\n With selected drivers you can also append to a file with `mode=\"a\"`:\n\n >>> gdf.to_file('dataframe.shp', mode=\"a\") # doctest: +SKIP\n \"\"\"\n from geopandas.io.file import _to_file\n\n _to_file(self, filename, driver, schema, index, **kwargs)\n\n def set_crs(self, crs=None, epsg=None, inplace=False, allow_override=False):\n \"\"\"\n Set the Coordinate Reference System (CRS) of the ``GeoDataFrame``.\n\n If there are multiple geometry columns within the GeoDataFrame, only\n the CRS of the active geometry column is set.\n\n NOTE: The underlying geometries are not transformed to this CRS. To\n transform the geometries to a new CRS, use the ``to_crs`` method.\n\n Parameters\n ----------\n crs : pyproj.CRS, optional if `epsg` is specified\n The value can be anything accepted\n by :meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,\n such as an authority string (eg \"EPSG:4326\") or a WKT string.\n epsg : int, optional if `crs` is specified\n EPSG code specifying the projection.\n inplace : bool, default False\n If True, the CRS of the GeoDataFrame will be changed in place\n (while still returning the result) instead of making a copy of\n the GeoDataFrame.\n allow_override : bool, default False\n If the the GeoDataFrame already has a CRS, allow to replace the\n existing CRS, even when both are not equal.\n\n Examples\n --------\n >>> from shapely.geometry import Point\n >>> d = {'col1': ['name1', 'name2'], 'geometry': [Point(1, 2), Point(2, 1)]}\n >>> gdf = geopandas.GeoDataFrame(d)\n >>> gdf\n col1 geometry\n 0 name1 POINT (1.00000 2.00000)\n 1 name2 POINT (2.00000 1.00000)\n\n Setting CRS to a GeoDataFrame without one:\n\n >>> gdf.crs is None\n True\n\n >>> gdf = gdf.set_crs('epsg:3857')\n >>> gdf.crs # doctest: +SKIP\n <Projected CRS: EPSG:3857>\n Name: WGS 84 / Pseudo-Mercator\n Axis Info [cartesian]:\n - X[east]: Easting (metre)\n - Y[north]: Northing (metre)\n Area of Use:\n - name: World - 85°S to 85°N\n - bounds: (-180.0, -85.06, 180.0, 85.06)\n Coordinate Operation:\n - name: Popular Visualisation Pseudo-Mercator\n - method: Popular Visualisation Pseudo Mercator\n Datum: World Geodetic System 1984\n - Ellipsoid: WGS 84\n - Prime Meridian: Greenwich\n\n Overriding existing CRS:\n\n >>> gdf = gdf.set_crs(4326, allow_override=True)\n\n Without ``allow_override=True``, ``set_crs`` returns an error if you try to\n override CRS.\n\n See also\n --------\n GeoDataFrame.to_crs : re-project to another CRS\n\n \"\"\"\n if not inplace:\n df = self.copy()\n else:\n df = self\n df.geometry = df.geometry.set_crs(\n crs=crs, epsg=epsg, allow_override=allow_override, inplace=True\n )\n return df\n\n def to_crs(self, crs=None, epsg=None, inplace=False):\n \"\"\"Transform geometries to a new coordinate reference system.\n\n Transform all geometries in an active geometry column to a different coordinate\n reference system. The ``crs`` attribute on the current GeoSeries must\n be set. Either ``crs`` or ``epsg`` may be specified for output.\n\n This method will transform all points in all objects. It has no notion\n or projecting entire geometries. All segments joining points are\n assumed to be lines in the current projection, not geodesics. Objects\n crossing the dateline (or other projection boundary) will have\n undesirable behavior.\n\n Parameters\n ----------\n crs : pyproj.CRS, optional if `epsg` is specified\n The value can be anything accepted by\n :meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,\n such as an authority string (eg \"EPSG:4326\") or a WKT string.\n epsg : int, optional if `crs` is specified\n EPSG code specifying output projection.\n inplace : bool, optional, default: False\n Whether to return a new GeoDataFrame or do the transformation in\n place.\n\n Returns\n -------\n GeoDataFrame\n\n Examples\n --------\n >>> from shapely.geometry import Point\n >>> d = {'col1': ['name1', 'name2'], 'geometry': [Point(1, 2), Point(2, 1)]}\n >>> gdf = geopandas.GeoDataFrame(d, crs=4326)\n >>> gdf\n col1 geometry\n 0 name1 POINT (1.00000 2.00000)\n 1 name2 POINT (2.00000 1.00000)\n >>> gdf.crs # doctest: +SKIP\n <Geographic 2D CRS: EPSG:4326>\n Name: WGS 84\n Axis Info [ellipsoidal]:\n - Lat[north]: Geodetic latitude (degree)\n - Lon[east]: Geodetic longitude (degree)\n Area of Use:\n - name: World\n - bounds: (-180.0, -90.0, 180.0, 90.0)\n Datum: World Geodetic System 1984\n - Ellipsoid: WGS 84\n - Prime Meridian: Greenwich\n\n >>> gdf = gdf.to_crs(3857)\n >>> gdf\n col1 geometry\n 0 name1 POINT (111319.491 222684.209)\n 1 name2 POINT (222638.982 111325.143)\n >>> gdf.crs # doctest: +SKIP\n <Projected CRS: EPSG:3857>\n Name: WGS 84 / Pseudo-Mercator\n Axis Info [cartesian]:\n - X[east]: Easting (metre)\n - Y[north]: Northing (metre)\n Area of Use:\n - name: World - 85°S to 85°N\n - bounds: (-180.0, -85.06, 180.0, 85.06)\n Coordinate Operation:\n - name: Popular Visualisation Pseudo-Mercator\n - method: Popular Visualisation Pseudo Mercator\n Datum: World Geodetic System 1984\n - Ellipsoid: WGS 84\n - Prime Meridian: Greenwich\n\n See also\n --------\n GeoDataFrame.set_crs : assign CRS without re-projection\n \"\"\"\n if inplace:\n df = self\n else:\n df = self.copy()\n geom = df.geometry.to_crs(crs=crs, epsg=epsg)\n df.geometry = geom\n df.crs = geom.crs\n if not inplace:\n return df\n\n def estimate_utm_crs(self, datum_name=\"WGS 84\"):\n \"\"\"Returns the estimated UTM CRS based on the bounds of the dataset.\n\n .. versionadded:: 0.9\n\n .. note:: Requires pyproj 3+\n\n Parameters\n ----------\n datum_name : str, optional\n The name of the datum to use in the query. Default is WGS 84.\n\n Returns\n -------\n pyproj.CRS\n\n Examples\n --------\n >>> world = geopandas.read_file(\n ... geopandas.datasets.get_path(\"naturalearth_lowres\")\n ... )\n >>> germany = world.loc[world.name == \"Germany\"]\n >>> germany.estimate_utm_crs() # doctest: +SKIP\n <Projected CRS: EPSG:32632>\n Name: WGS 84 / UTM zone 32N\n Axis Info [cartesian]:\n - E[east]: Easting (metre)\n - N[north]: Northing (metre)\n Area of Use:\n - name: World - N hemisphere - 6°E to 12°E - by country\n - bounds: (6.0, 0.0, 12.0, 84.0)\n Coordinate Operation:\n - name: UTM zone 32N\n - method: Transverse Mercator\n Datum: World Geodetic System 1984\n - Ellipsoid: WGS 84\n - Prime Meridian: Greenwich\n \"\"\"\n return self.geometry.estimate_utm_crs(datum_name=datum_name)\n\n def __getitem__(self, key):\n \"\"\"\n If the result is a column containing only 'geometry', return a\n GeoSeries. If it's a DataFrame with any columns of GeometryDtype,\n return a GeoDataFrame.\n \"\"\"\n result = super().__getitem__(key)\n geo_col = self._geometry_column_name\n if isinstance(result, Series) and isinstance(result.dtype, GeometryDtype):\n result.__class__ = GeoSeries\n elif isinstance(result, DataFrame):\n if (result.dtypes == \"geometry\").sum() > 0:\n result.__class__ = GeoDataFrame\n if geo_col in result:\n result._geometry_column_name = geo_col\n else:\n result._geometry_column_name = None\n result._crs = None\n else:\n result.__class__ = DataFrame\n return result\n\n def __setitem__(self, key, value):\n \"\"\"\n Overwritten to preserve CRS of GeometryArray in cases like\n df['geometry'] = [geom... for geom in df.geometry]\n \"\"\"\n if not pd.api.types.is_list_like(key) and key == self._geometry_column_name:\n if pd.api.types.is_scalar(value) or isinstance(value, BaseGeometry):\n value = [value] * self.shape[0]\n try:\n value = _ensure_geometry(value, crs=self.crs)\n self._crs = value.crs\n except TypeError:\n warnings.warn(\"Geometry column does not contain geometry.\")\n super().__setitem__(key, value)\n\n #\n # Implement pandas methods\n #\n\n def merge(self, *args, **kwargs):\n r\"\"\"Merge two ``GeoDataFrame`` objects with a database-style join.\n\n Returns a ``GeoDataFrame`` if a geometry column is present; otherwise,\n returns a pandas ``DataFrame``.\n\n Returns\n -------\n GeoDataFrame or DataFrame\n\n Notes\n -----\n The extra arguments ``*args`` and keyword arguments ``**kwargs`` are\n passed to DataFrame.merge.\n\n Reference\n ---------\n https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas\\\n .DataFrame.merge.html\n\n \"\"\"\n result = DataFrame.merge(self, *args, **kwargs)\n geo_col = self._geometry_column_name\n if isinstance(result, DataFrame) and geo_col in result:\n result.__class__ = GeoDataFrame\n result.crs = self.crs\n result._geometry_column_name = geo_col\n elif isinstance(result, DataFrame) and geo_col not in result:\n result.__class__ = DataFrame\n return result\n\n @doc(pd.DataFrame)\n def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwargs):\n result = super().apply(\n func, axis=axis, raw=raw, result_type=result_type, args=args, **kwargs\n )\n # Reconstruct gdf if it was lost by apply\n if (\n isinstance(result, DataFrame)\n and self._geometry_column_name in result.columns\n ):\n # axis=1 apply will split GeometryDType to object, try and cast back\n try:\n result = result.set_geometry(self._geometry_column_name)\n except TypeError:\n pass\n else:\n if self.crs is not None and result.crs is None:\n result.set_crs(self.crs, inplace=True)\n\n return result\n\n @property\n def _constructor(self):\n return _geodataframe_constructor_with_fallback\n\n @property\n def _constructor_sliced(self):\n return _geoseries_constructor_with_fallback\n\n def __finalize__(self, other, method=None, **kwargs):\n \"\"\"propagate metadata from other to self\"\"\"\n self = super().__finalize__(other, method=method, **kwargs)\n\n # merge operation: using metadata of the left object\n if method == \"merge\":\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other.left, name, None))\n # concat operation: using metadata of the first object\n elif method == \"concat\":\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other.objs[0], name, None))\n\n if (self.columns == self._geometry_column_name).sum() > 1:\n raise ValueError(\n \"Concat operation has resulted in multiple columns using \"\n f\"the geometry column name '{self._geometry_column_name}'.\\n\"\n f\"Please ensure this column from the first DataFrame is not \"\n f\"repeated.\"\n )\n elif method == \"unstack\":\n # unstack adds multiindex columns and reshapes data.\n # it never makes sense to retain geometry column\n self._geometry_column_name = None\n self._crs = None\n return self\n\n def dissolve(\n self,\n by=None,\n aggfunc=\"first\",\n as_index=True,\n level=None,\n sort=True,\n observed=False,\n dropna=True,\n ):\n \"\"\"\n Dissolve geometries within `groupby` into single observation.\n This is accomplished by applying the `unary_union` method\n to all geometries within a groupself.\n\n Observations associated with each `groupby` group will be aggregated\n using the `aggfunc`.\n\n Parameters\n ----------\n by : string, default None\n Column whose values define groups to be dissolved. If None,\n whole GeoDataFrame is considered a single group.\n aggfunc : function or string, default \"first\"\n Aggregation function for manipulation of data associated\n with each group. Passed to pandas `groupby.agg` method.\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. [np.sum, 'mean']\n - dict of axis labels -> functions, function names or list of such.\n as_index : boolean, default True\n If true, groupby columns become index of result.\n level : int or str or sequence of int or sequence of str, default None\n If the axis is a MultiIndex (hierarchical), group by a\n particular level or levels.\n\n .. versionadded:: 0.9.0\n sort : bool, default True\n Sort group keys. Get better performance by turning this off.\n Note this does not influence the order of observations within\n each group. Groupby preserves the order of rows within each group.\n\n .. versionadded:: 0.9.0\n observed : bool, default False\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n\n .. versionadded:: 0.9.0\n dropna : bool, default True\n If True, and if group keys contain NA values, NA values\n together with row/column will be dropped. If False, NA\n values will also be treated as the key in groups.\n\n This parameter is not supported for pandas < 1.1.0.\n A warning will be emitted for earlier pandas versions\n if a non-default value is given for this parameter.\n\n .. versionadded:: 0.9.0\n\n Returns\n -------\n GeoDataFrame\n\n Examples\n --------\n >>> from shapely.geometry import Point\n >>> d = {\n ... \"col1\": [\"name1\", \"name2\", \"name1\"],\n ... \"geometry\": [Point(1, 2), Point(2, 1), Point(0, 1)],\n ... }\n >>> gdf = geopandas.GeoDataFrame(d, crs=4326)\n >>> gdf\n col1 geometry\n 0 name1 POINT (1.00000 2.00000)\n 1 name2 POINT (2.00000 1.00000)\n 2 name1 POINT (0.00000 1.00000)\n\n >>> dissolved = gdf.dissolve('col1')\n >>> dissolved # doctest: +SKIP\n geometry\n col1\n name1 MULTIPOINT (0.00000 1.00000, 1.00000 2.00000)\n name2 POINT (2.00000 1.00000)\n\n See also\n --------\n GeoDataFrame.explode : explode multi-part geometries into single geometries\n\n \"\"\"\n\n if by is None and level is None:\n by = np.zeros(len(self), dtype=\"int64\")\n\n groupby_kwargs = dict(\n by=by, level=level, sort=sort, observed=observed, dropna=dropna\n )\n if not compat.PANDAS_GE_11:\n groupby_kwargs.pop(\"dropna\")\n\n if not dropna: # If they passed a non-default dropna value\n warnings.warn(\"dropna kwarg is not supported for pandas < 1.1.0\")\n\n # Process non-spatial component\n data = self.drop(labels=self.geometry.name, axis=1)\n aggregated_data = data.groupby(**groupby_kwargs).agg(aggfunc)\n aggregated_data.columns = aggregated_data.columns.to_flat_index()\n\n # Process spatial component\n def merge_geometries(block):\n merged_geom = block.unary_union\n return merged_geom\n\n g = self.groupby(group_keys=False, **groupby_kwargs)[self.geometry.name].agg(\n merge_geometries\n )\n\n # Aggregate\n aggregated_geometry = GeoDataFrame(g, geometry=self.geometry.name, crs=self.crs)\n # Recombine\n aggregated = aggregated_geometry.join(aggregated_data)\n\n # Reset if requested\n if not as_index:\n aggregated = aggregated.reset_index()\n\n return aggregated\n\n # overrides the pandas native explode method to break up features geometrically\n def explode(self, column=None, ignore_index=False, index_parts=None, **kwargs):\n \"\"\"\n Explode multi-part geometries into multiple single geometries.\n\n Each row containing a multi-part geometry will be split into\n multiple rows with single geometries, thereby increasing the vertical\n size of the GeoDataFrame.\n\n .. note:: ignore_index requires pandas 1.1.0 or newer.\n\n Parameters\n ----------\n column : string, default None\n Column to explode. In the case of a geometry column, multi-part\n geometries are converted to single-part.\n If None, the active geometry column is used.\n ignore_index : bool, default False\n If True, the resulting index will be labelled 0, 1, …, n - 1,\n ignoring `index_parts`.\n index_parts : boolean, default True\n If True, the resulting index will be a multi-index (original\n index with an additional level indicating the multiple\n geometries: a new zero-based index for each single part geometry\n per multi-part geometry).\n\n Returns\n -------\n GeoDataFrame\n Exploded geodataframe with each single geometry\n as a separate entry in the geodataframe.\n\n Examples\n --------\n\n >>> from shapely.geometry import MultiPoint\n >>> d = {\n ... \"col1\": [\"name1\", \"name2\"],\n ... \"geometry\": [\n ... MultiPoint([(1, 2), (3, 4)]),\n ... MultiPoint([(2, 1), (0, 0)]),\n ... ],\n ... }\n >>> gdf = geopandas.GeoDataFrame(d, crs=4326)\n >>> gdf\n col1 geometry\n 0 name1 MULTIPOINT (1.00000 2.00000, 3.00000 4.00000)\n 1 name2 MULTIPOINT (2.00000 1.00000, 0.00000 0.00000)\n\n >>> exploded = gdf.explode(index_parts=True)\n >>> exploded\n col1 geometry\n 0 0 name1 POINT (1.00000 2.00000)\n 1 name1 POINT (3.00000 4.00000)\n 1 0 name2 POINT (2.00000 1.00000)\n 1 name2 POINT (0.00000 0.00000)\n\n >>> exploded = gdf.explode(index_parts=False)\n >>> exploded\n col1 geometry\n 0 name1 POINT (1.00000 2.00000)\n 0 name1 POINT (3.00000 4.00000)\n 1 name2 POINT (2.00000 1.00000)\n 1 name2 POINT (0.00000 0.00000)\n\n >>> exploded = gdf.explode(ignore_index=True)\n >>> exploded\n col1 geometry\n 0 name1 POINT (1.00000 2.00000)\n 1 name1 POINT (3.00000 4.00000)\n 2 name2 POINT (2.00000 1.00000)\n 3 name2 POINT (0.00000 0.00000)\n\n See also\n --------\n GeoDataFrame.dissolve : dissolve geometries into a single observation.\n\n \"\"\"\n\n # If no column is specified then default to the active geometry column\n if column is None:\n column = self.geometry.name\n # If the specified column is not a geometry dtype use pandas explode\n if not isinstance(self[column].dtype, GeometryDtype):\n if compat.PANDAS_GE_11:\n return super().explode(column, ignore_index=ignore_index, **kwargs)\n else:\n return super().explode(column, **kwargs)\n\n if index_parts is None:\n if not ignore_index:\n warnings.warn(\n \"Currently, index_parts defaults to True, but in the future, \"\n \"it will default to False to be consistent with Pandas. \"\n \"Use `index_parts=True` to keep the current behavior and \"\n \"True/False to silence the warning.\",\n FutureWarning,\n stacklevel=2,\n )\n index_parts = True\n\n exploded_geom = self.geometry.reset_index(drop=True).explode(index_parts=True)\n\n df = GeoDataFrame(\n self.drop(self._geometry_column_name, axis=1).take(\n exploded_geom.index.droplevel(-1)\n ),\n geometry=exploded_geom.values,\n ).__finalize__(self)\n\n if ignore_index:\n df.reset_index(inplace=True, drop=True)\n elif index_parts:\n # reset to MultiIndex, otherwise df index is only first level of\n # exploded GeoSeries index.\n df = df.set_index(\n exploded_geom.index.droplevel(\n list(range(exploded_geom.index.nlevels - 1))\n ),\n append=True,\n )\n\n return df\n\n # overrides the pandas astype method to ensure the correct return type\n def astype(self, dtype, copy=True, errors=\"raise\", **kwargs):\n \"\"\"\n Cast a pandas object to a specified dtype ``dtype``.\n\n Returns a GeoDataFrame when the geometry column is kept as geometries,\n otherwise returns a pandas DataFrame.\n\n See the pandas.DataFrame.astype docstring for more details.\n\n Returns\n -------\n GeoDataFrame or DataFrame\n \"\"\"\n df = super().astype(dtype, copy=copy, errors=errors, **kwargs)\n\n try:\n geoms = df[self._geometry_column_name]\n if is_geometry_type(geoms):\n return geopandas.GeoDataFrame(df, geometry=self._geometry_column_name)\n except KeyError:\n pass\n # if the geometry column is converted to non-geometries or did not exist\n # do not return a GeoDataFrame\n return pd.DataFrame(df)\n\n def convert_dtypes(self, *args, **kwargs):\n \"\"\"\n Convert columns to best possible dtypes using dtypes supporting ``pd.NA``.\n\n Always returns a GeoDataFrame as no conversions are applied to the\n geometry column.\n\n See the pandas.DataFrame.convert_dtypes docstring for more details.\n\n Returns\n -------\n GeoDataFrame\n\n \"\"\"\n # Overridden to fix GH1870, that return type is not preserved always\n # (and where it was, geometry col was not)\n\n return GeoDataFrame(\n super().convert_dtypes(*args, **kwargs),\n geometry=self.geometry.name,\n crs=self.crs,\n )\n\n def to_postgis(\n self,\n name,\n con,\n schema=None,\n if_exists=\"fail\",\n index=False,\n index_label=None,\n chunksize=None,\n dtype=None,\n ):\n \"\"\"\n Upload GeoDataFrame into PostGIS database.\n\n This method requires SQLAlchemy and GeoAlchemy2, and a PostgreSQL\n Python driver (e.g. psycopg2) to be installed.\n\n Parameters\n ----------\n name : str\n Name of the target table.\n con : sqlalchemy.engine.Connection or sqlalchemy.engine.Engine\n Active connection to the PostGIS database.\n if_exists : {'fail', 'replace', 'append'}, default 'fail'\n How to behave if the table already exists:\n\n - fail: Raise a ValueError.\n - replace: Drop the table before inserting new values.\n - append: Insert new values to the existing table.\n schema : string, optional\n Specify the schema. If None, use default schema: 'public'.\n index : bool, default False\n Write DataFrame index as a column.\n Uses *index_label* as the column name in the table.\n index_label : string or sequence, default None\n Column label for index column(s).\n If None is given (default) and index is True,\n then the index names are used.\n chunksize : int, optional\n Rows will be written in batches of this size at a time.\n By default, all rows will be written at once.\n dtype : dict of column name to SQL type, default None\n Specifying the datatype for columns.\n The keys should be the column names and the values\n should be the SQLAlchemy types.\n\n Examples\n --------\n\n >>> from sqlalchemy import create_engine\n >>> engine = create_engine(\"postgresql://myusername:mypassword@myhost:5432\\\n/mydatabase\") # doctest: +SKIP\n >>> gdf.to_postgis(\"my_table\", engine) # doctest: +SKIP\n\n See also\n --------\n GeoDataFrame.to_file : write GeoDataFrame to file\n read_postgis : read PostGIS database to GeoDataFrame\n\n \"\"\"\n geopandas.io.sql._write_postgis(\n self, name, con, schema, if_exists, index, index_label, chunksize, dtype\n )\n\n #\n # Implement standard operators for GeoSeries\n #\n\n def __xor__(self, other):\n \"\"\"Implement ^ operator as for builtin set type\"\"\"\n warnings.warn(\n \"'^' operator will be deprecated. Use the 'symmetric_difference' \"\n \"method instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return self.geometry.symmetric_difference(other)\n\n def __or__(self, other):\n \"\"\"Implement | operator as for builtin set type\"\"\"\n warnings.warn(\n \"'|' operator will be deprecated. Use the 'union' method instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return self.geometry.union(other)\n\n def __and__(self, other):\n \"\"\"Implement & operator as for builtin set type\"\"\"\n warnings.warn(\n \"'&' operator will be deprecated. Use the 'intersection' method instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return self.geometry.intersection(other)\n\n def __sub__(self, other):\n \"\"\"Implement - operator as for builtin set type\"\"\"\n warnings.warn(\n \"'-' operator will be deprecated. Use the 'difference' method instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return self.geometry.difference(other)\n\n plot = CachedAccessor(\"plot\", geopandas.plotting.GeoplotAccessor)\n\n @doc(_explore)\n def explore(self, *args, **kwargs):\n \"\"\"Interactive map based on folium/leaflet.js\"\"\"\n return _explore(self, *args, **kwargs)\n\n def sjoin(self, df, *args, **kwargs):\n \"\"\"Spatial join of two GeoDataFrames.\n\n See the User Guide page :doc:`../../user_guide/mergingdata` for details.\n\n Parameters\n ----------\n df : GeoDataFrame\n how : string, default 'inner'\n The type of join:\n\n * 'left': use keys from left_df; retain only left_df geometry column\n * 'right': use keys from right_df; retain only right_df geometry column\n * 'inner': use intersection of keys from both dfs; retain only\n left_df geometry column\n\n predicate : string, default 'intersects'\n Binary predicate. Valid values are determined by the spatial index used.\n You can check the valid values in left_df or right_df as\n ``left_df.sindex.valid_query_predicates`` or\n ``right_df.sindex.valid_query_predicates``\n lsuffix : string, default 'left'\n Suffix to apply to overlapping column names (left GeoDataFrame).\n rsuffix : string, default 'right'\n Suffix to apply to overlapping column names (right GeoDataFrame).\n\n Examples\n --------\n >>> countries = geopandas.read_file( \\\n geopandas.datasets.get_path(\"naturalearth_lowres\"))\n >>> cities = geopandas.read_file( \\\n geopandas.datasets.get_path(\"naturalearth_cities\"))\n >>> countries.head() # doctest: +SKIP\n pop_est continent name \\\n iso_a3 gdp_md_est geometry\n 0 920938 Oceania Fiji FJI 8374.0 \\\n MULTIPOLYGON (((180.00000 -16.06713, 180.00000...\n 1 53950935 Africa Tanzania TZA 150600.0 \\\n POLYGON ((33.90371 -0.95000, 34.07262 -1.05982...\n 2 603253 Africa W. Sahara ESH 906.5 \\\n POLYGON ((-8.66559 27.65643, -8.66512 27.58948...\n 3 35623680 North America Canada CAN 1674000.0 \\\n MULTIPOLYGON (((-122.84000 49.00000, -122.9742...\n 4 326625791 North America United States of America USA 18560000.0 \\\n MULTIPOLYGON (((-122.84000 49.00000, -120.0000...\n >>> cities.head()\n name geometry\n 0 Vatican City POINT (12.45339 41.90328)\n 1 San Marino POINT (12.44177 43.93610)\n 2 Vaduz POINT (9.51667 47.13372)\n 3 Luxembourg POINT (6.13000 49.61166)\n 4 Palikir POINT (158.14997 6.91664)\n\n >>> cities_w_country_data = cities.sjoin(countries)\n >>> cities_w_country_data.head() # doctest: +SKIP\n name_left geometry index_right pop_est \\\n continent name_right iso_a3 gdp_md_est\n 0 Vatican City POINT (12.45339 41.90328) 141 62137802 \\\n Europe Italy ITA 2221000.0\n 1 San Marino POINT (12.44177 43.93610) 141 62137802 \\\n Europe Italy ITA 2221000.0\n 192 Rome POINT (12.48131 41.89790) 141 62137802 \\\n Europe Italy ITA 2221000.0\n 2 Vaduz POINT (9.51667 47.13372) 114 8754413 \\\n Europe Au stria AUT 416600.0\n 184 Vienna POINT (16.36469 48.20196) 114 8754413 \\\n Europe Austria AUT 416600.0\n\n Notes\n ------\n Every operation in GeoPandas is planar, i.e. the potential third\n dimension is not taken into account.\n\n See also\n --------\n GeoDataFrame.sjoin_nearest : nearest neighbor join\n sjoin : equivalent top-level function\n \"\"\"\n return geopandas.sjoin(left_df=self, right_df=df, *args, **kwargs)\n\n def sjoin_nearest(\n self,\n right,\n how=\"inner\",\n max_distance=None,\n lsuffix=\"left\",\n rsuffix=\"right\",\n distance_col=None,\n ):\n \"\"\"\n Spatial join of two GeoDataFrames based on the distance between their\n geometries.\n\n Results will include multiple output records for a single input record\n where there are multiple equidistant nearest or intersected neighbors.\n\n See the User Guide page\n https://geopandas.readthedocs.io/en/latest/docs/user_guide/mergingdata.html\n for more details.\n\n\n Parameters\n ----------\n right : GeoDataFrame\n how : string, default 'inner'\n The type of join:\n\n * 'left': use keys from left_df; retain only left_df geometry column\n * 'right': use keys from right_df; retain only right_df geometry column\n * 'inner': use intersection of keys from both dfs; retain only\n left_df geometry column\n\n max_distance : float, default None\n Maximum distance within which to query for nearest geometry.\n Must be greater than 0.\n The max_distance used to search for nearest items in the tree may have a\n significant impact on performance by reducing the number of input\n geometries that are evaluated for nearest items in the tree.\n lsuffix : string, default 'left'\n Suffix to apply to overlapping column names (left GeoDataFrame).\n rsuffix : string, default 'right'\n Suffix to apply to overlapping column names (right GeoDataFrame).\n distance_col : string, default None\n If set, save the distances computed between matching geometries under a\n column of this name in the joined GeoDataFrame.\n\n Examples\n --------\n >>> countries = geopandas.read_file(geopandas.datasets.get_\\\npath(\"naturalearth_lowres\"))\n >>> cities = geopandas.read_file(geopandas.datasets.get_path(\"naturalearth_citi\\\nes\"))\n >>> countries.head(2).name # doctest: +SKIP\n pop_est continent name \\\n iso_a3 gdp_md_est geometry\n 0 920938 Oceania Fiji FJI 8374.0 MULTI\\\n POLYGON (((180.00000 -16.06713, 180.00000...\n 1 53950935 Africa Tanzania TZA 150600.0 POLYG\\\n ON ((33.90371 -0.95000, 34.07262 -1.05982...\n >>> cities.head(2).name # doctest: +SKIP\n name geometry\n 0 Vatican City POINT (12.45339 41.90328)\n 1 San Marino POINT (12.44177 43.93610)\n\n >>> cities_w_country_data = cities.sjoin_nearest(countries)\n >>> cities_w_country_data[['name_left', 'name_right']].head(2) # doctest: +SKIP\n name_left geometry index_right pop_est continent n\\\n ame_right iso_a3 gdp_md_est\n 0 Vatican City POINT (12.45339 41.90328) 141 62137802 Europe \\\n Italy ITA 2221000.0\n 1 San Marino POINT (12.44177 43.93610) 141 62137802 Europe \\\n Italy ITA 2221000.0\n\n To include the distances:\n\n >>> cities_w_country_data = cities.sjoin_nearest(countries, \\\ndistance_col=\"distances\")\n >>> cities_w_country_data[[\"name_left\", \"name_right\", \\\n\"distances\"]].head(2) # doctest: +SKIP\n name_left name_right distances\n 0 Vatican City Italy 0.0\n 1 San Marino Italy 0.0\n\n In the following example, we get multiple cities for Italy because all results\n are equidistant (in this case zero because they intersect).\n In fact, we get 3 results in total:\n\n >>> countries_w_city_data = cities.sjoin_nearest(countries, \\\ndistance_col=\"distances\", how=\"right\")\n >>> italy_results = \\\ncountries_w_city_data[countries_w_city_data[\"name_left\"] == \"Italy\"]\n >>> italy_results # doctest: +SKIP\n name_x name_y\n 141 Vatican City Italy\n 141 San Marino Italy\n 141 Rome Italy\n\n See also\n --------\n GeoDataFrame.sjoin : binary predicate joins\n sjoin_nearest : equivalent top-level function\n\n Notes\n -----\n Since this join relies on distances, results will be inaccurate\n if your geometries are in a geographic CRS.\n\n Every operation in GeoPandas is planar, i.e. the potential third\n dimension is not taken into account.\n \"\"\"\n return geopandas.sjoin_nearest(\n self,\n right,\n how=how,\n max_distance=max_distance,\n lsuffix=lsuffix,\n rsuffix=rsuffix,\n distance_col=distance_col,\n )\n\n def clip(self, mask, keep_geom_type=False):\n \"\"\"Clip points, lines, or polygon geometries to the mask extent.\n\n Both layers must be in the same Coordinate Reference System (CRS).\n The GeoDataFrame will be clipped to the full extent of the `mask` object.\n\n If there are multiple polygons in mask, data from the GeoDataFrame will be\n clipped to the total boundary of all polygons in mask.\n\n Parameters\n ----------\n mask : GeoDataFrame, GeoSeries, (Multi)Polygon\n Polygon vector layer used to clip `gdf`.\n The mask's geometry is dissolved into one geometric feature\n and intersected with `gdf`.\n keep_geom_type : boolean, default False\n If True, return only geometries of original type in case of intersection\n resulting in multiple geometry types or GeometryCollections.\n If False, return all resulting geometries (potentially mixed types).\n\n Returns\n -------\n GeoDataFrame\n Vector data (points, lines, polygons) from `gdf` clipped to\n polygon boundary from mask.\n\n See also\n --------\n clip : equivalent top-level function\n\n Examples\n --------\n Clip points (global cities) with a polygon (the South American continent):\n\n >>> world = geopandas.read_file(\n ... geopandas.datasets.get_path('naturalearth_lowres'))\n >>> south_america = world[world['continent'] == \"South America\"]\n >>> capitals = geopandas.read_file(\n ... geopandas.datasets.get_path('naturalearth_cities'))\n >>> capitals.shape\n (202, 2)\n\n >>> sa_capitals = capitals.clip(south_america)\n >>> sa_capitals.shape\n (12, 2)\n \"\"\"\n return geopandas.clip(self, mask=mask, keep_geom_type=keep_geom_type)\n\n def overlay(self, right, how=\"intersection\", keep_geom_type=None, make_valid=True):\n \"\"\"Perform spatial overlay between GeoDataFrames.\n\n Currently only supports data GeoDataFrames with uniform geometry types,\n i.e. containing only (Multi)Polygons, or only (Multi)Points, or a\n combination of (Multi)LineString and LinearRing shapes.\n Implements several methods that are all effectively subsets of the union.\n\n See the User Guide page :doc:`../../user_guide/set_operations` for details.\n\n Parameters\n ----------\n right : GeoDataFrame\n how : string\n Method of spatial overlay: 'intersection', 'union',\n 'identity', 'symmetric_difference' or 'difference'.\n keep_geom_type : bool\n If True, return only geometries of the same geometry type the GeoDataFrame\n has, if False, return all resulting geometries. Default is None,\n which will set keep_geom_type to True but warn upon dropping\n geometries.\n make_valid : bool, default True\n If True, any invalid input geometries are corrected with a call to\n `buffer(0)`, if False, a `ValueError` is raised if any input geometries\n are invalid.\n\n Returns\n -------\n df : GeoDataFrame\n GeoDataFrame with new set of polygons and attributes\n resulting from the overlay\n\n Examples\n --------\n >>> from shapely.geometry import Polygon\n >>> polys1 = geopandas.GeoSeries([Polygon([(0,0), (2,0), (2,2), (0,2)]),\n ... Polygon([(2,2), (4,2), (4,4), (2,4)])])\n >>> polys2 = geopandas.GeoSeries([Polygon([(1,1), (3,1), (3,3), (1,3)]),\n ... Polygon([(3,3), (5,3), (5,5), (3,5)])])\n >>> df1 = geopandas.GeoDataFrame({'geometry': polys1, 'df1_data':[1,2]})\n >>> df2 = geopandas.GeoDataFrame({'geometry': polys2, 'df2_data':[1,2]})\n\n >>> df1.overlay(df2, how='union')\n df1_data df2_data geometry\n 0 1.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....\n 1 2.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....\n 2 2.0 2.0 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....\n 3 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....\n 4 2.0 NaN MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000...\n 5 NaN 1.0 MULTIPOLYGON (((2.00000 2.00000, 3.00000 2.000...\n 6 NaN 2.0 POLYGON ((3.00000 5.00000, 5.00000 5.00000, 5....\n\n >>> df1.overlay(df2, how='intersection')\n df1_data df2_data geometry\n 0 1 1 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....\n 1 2 1 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....\n 2 2 2 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....\n\n >>> df1.overlay(df2, how='symmetric_difference')\n df1_data df2_data geometry\n 0 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....\n 1 2.0 NaN MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000...\n 2 NaN 1.0 MULTIPOLYGON (((2.00000 2.00000, 3.00000 2.000...\n 3 NaN 2.0 POLYGON ((3.00000 5.00000, 5.00000 5.00000, 5....\n\n >>> df1.overlay(df2, how='difference')\n geometry df1_data\n 0 POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0.... 1\n 1 MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000... 2\n\n >>> df1.overlay(df2, how='identity')\n df1_data df2_data geometry\n 0 1.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....\n 1 2.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....\n 2 2.0 2.0 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....\n 3 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....\n 4 2.0 NaN MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000...\n\n See also\n --------\n GeoDataFrame.sjoin : spatial join\n overlay : equivalent top-level function\n\n Notes\n ------\n Every operation in GeoPandas is planar, i.e. the potential third\n dimension is not taken into account.\n \"\"\"\n return geopandas.overlay(\n self, right, how=how, keep_geom_type=keep_geom_type, make_valid=make_valid\n )\n\n\ndef _dataframe_set_geometry(self, col, drop=False, inplace=False, crs=None):\n if inplace:\n raise ValueError(\n \"Can't do inplace setting when converting from DataFrame to GeoDataFrame\"\n )\n gf = GeoDataFrame(self)\n # this will copy so that BlockManager gets copied\n return gf.set_geometry(col, drop=drop, inplace=False, crs=crs)\n\n\nDataFrame.set_geometry = _dataframe_set_geometry\n\nif not compat.PANDAS_GE_11: # i.e. on pandas 1.0.x\n _geodataframe_constructor_with_fallback._from_axes = GeoDataFrame._from_axes\n" ]
[ [ "pandas.isnull", "numpy.array", "pandas.api.types.is_list_like", "numpy.asarray", "pandas.api.types.is_scalar", "pandas.DataFrame.from_dict", "pandas.DataFrame", "pandas.core.accessor.CachedAccessor", "pandas.DataFrame.merge" ] ]
Yaakoubi/Struct-CKN
[ "fa007fa71310866584bdf2e5b038e6663b94e965" ]
[ "SDCA/sdca4crf/parameters/dense_weights.py" ]
[ "import numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom SDCA.sdca4crf.parameters.sparse_weights import SparsePrimalDirection\nfrom SDCA.sdca4crf.parameters.weights import WeightsWithoutEmission\nfrom SDCA.sdca4crf.utils import letters2wordimage\n\n\nclass DenseWeights(WeightsWithoutEmission):\n \"\"\"Implement the weights of the model.\n\n Support all the operations necessary for the CRF and the optimization.\n Is also used to store the primal direction for dense data.\n \"\"\"\n\n def __init__(self, emission=None, bias=None, transition=None,\n nb_labels=0, nb_features=0, is_dataset_sparse=False):\n\n super().__init__(bias=bias, transition=transition, nb_labels=nb_labels)\n\n self.is_dataset_sparse = is_dataset_sparse\n self.emission = np.zeros([nb_labels, nb_features]) if emission is None else emission\n\n # BUILD THE WEIGHTS FROM DATA\n def add_datapoint(self, points_sequence, labels_sequence):\n super().add_datapoint(points_sequence, labels_sequence)\n\n if self.is_dataset_sparse:\n for point, label in zip(points_sequence, labels_sequence):\n self.emission[label, point[point >= 0]] += 1\n else:\n for point, label in zip(points_sequence, labels_sequence):\n self.emission[label] += point\n\n def add_centroid(self, points_sequence, marginals):\n if marginals.islog:\n marginals = marginals.exp()\n\n super().add_centroid(points_sequence, marginals)\n if self.is_dataset_sparse: # slow?\n for point, unimarginal in zip(points_sequence, marginals.unary):\n self.emission[:, point[point >= 0]] += unimarginal[:, np.newaxis]\n else:\n self.emission += np.dot(marginals.unary.T, points_sequence)\n\n @classmethod\n def from_marginals(cls, points_sequence, marginals):\n \"\"\"Initialize the primal direction.\"\"\"\n # This being called means that the data set is dense\n weights = cls(nb_features=points_sequence.shape[1], nb_labels=marginals.nb_labels,\n is_dataset_sparse=False)\n weights.add_centroid(points_sequence, marginals)\n return weights\n\n # USE THE MODEL ON DATA\n def scores(self, points_sequence):\n unary_scores, binary_scores = super().scores(points_sequence)\n\n if self.is_dataset_sparse: # slow?\n for t, point in enumerate(points_sequence):\n unary_scores[t] += self.emission[:, point[point >= 0]].sum(axis=1)\n else:\n unary_scores += np.dot(points_sequence, self.emission.T)\n\n return unary_scores, binary_scores\n\n # ARITHMETIC OPERATIONS\n def __mul__(self, scalar):\n tmp = super().__mul__(scalar)\n emission = scalar * self.emission\n return DenseWeights(emission, tmp.bias, tmp.transition,\n is_dataset_sparse=self.is_dataset_sparse)\n\n def __iadd__(self, other):\n tmp = super().__iadd__(other)\n if isinstance(other, SparsePrimalDirection):\n self.emission[:, other.sparse_emission.active_set] += other.sparse_emission.values\n else:\n self.emission += other.emission\n return DenseWeights(self.emission, tmp.bias, tmp.transition,\n is_dataset_sparse=self.is_dataset_sparse)\n\n def __sub__(self, other):\n tmp = super().__sub__(other)\n emission = self.emission - other.emission\n return DenseWeights(emission, tmp.bias, tmp.transition,\n is_dataset_sparse=self.is_dataset_sparse)\n\n def squared_norm(self):\n return super().squared_norm() + np.sum(self.emission ** 2)\n\n def inner_product(self, other):\n ans = super().inner_product(other)\n if isinstance(other, SparsePrimalDirection):\n return ans + np.sum(self.emission[:, other.sparse_emission.active_set] *\n other.sparse_emission.values)\n else:\n return ans + np.sum(self.emission * other.emission)\n\n # MISCELLANEOUS\n def display(self):\n super().display()\n if self.is_dataset_sparse:\n emissions = letters2wordimage(self.emission)\n plt.matshow(emissions, cmap=\"Greys\")\n ticks_positions = np.linspace(0, emissions.shape[1],\n self.emission.shape[0] + 2).astype(int)[1:-1]\n plt.xticks(ticks_positions, np.arange(self.emission.shape[0]))\n plt.colorbar(fraction=0.046, pad=0.04)\n\n def to_array(self):\n return [self.transition, self.bias, self.emission]\n" ]
[ [ "matplotlib.pyplot.colorbar", "numpy.dot", "matplotlib.pyplot.matshow", "numpy.zeros", "numpy.sum", "numpy.arange", "numpy.linspace" ] ]
ldylab/Facial-Recognition-Project-GDUT
[ "a5842cbfad7b8d6f906d50f39711ba55c663ea76" ]
[ "Deep Learning Method/T4_efficientnetV2/train.py" ]
[ "import os\nimport math\nimport argparse\nimport torch\nimport torch.optim as optim\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision import transforms\nimport torch.optim.lr_scheduler as lr_scheduler\nfrom model import efficientnetv2_s as create_model\nfrom my_dataset import MyDataSet\nfrom utils import read_split_data, train_one_epoch, evaluate\nimport pandas as pd\nimport time\n\ndef main(args):\n device = torch.device(args.device if torch.cuda.is_available() else \"cpu\")\n\n print(args)\n print('Start Tensorboard with \"tensorboard --logdir=runs\", view at http://localhost:6006/')\n tb_writer = SummaryWriter()\n if os.path.exists(\"./weights\") is False:\n os.makedirs(\"./weights\")\n\n train_images_path, train_images_label, val_images_path, val_images_label = read_split_data(args.data_path)\n\n img_size = {\"s\": [300, 384], # train_size, val_size\n \"m\": [384, 480],\n \"l\": [384, 480]}\n num_model = \"s\"\n\n data_transform = {\n \"train\": transforms.Compose([transforms.RandomResizedCrop(img_size[num_model][0]),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),\n \"val\": transforms.Compose([transforms.Resize(img_size[num_model][1]),\n transforms.CenterCrop(img_size[num_model][1]),\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])}\n\n # 实例化训练数据集\n train_dataset = MyDataSet(images_path=train_images_path,\n images_class=train_images_label,\n transform=data_transform[\"train\"])\n\n # 实例化验证数据集\n val_dataset = MyDataSet(images_path=val_images_path,\n images_class=val_images_label,\n transform=data_transform[\"val\"])\n\n batch_size = args.batch_size\n nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers\n print('Using {} dataloader workers every process'.format(nw))\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=nw,\n collate_fn=train_dataset.collate_fn)\n\n val_loader = torch.utils.data.DataLoader(val_dataset,\n batch_size=batch_size,\n shuffle=False,\n pin_memory=True,\n num_workers=nw,\n collate_fn=val_dataset.collate_fn)\n\n # 如果存在预训练权重则载入\n model = create_model(num_classes=args.num_classes).to(device)\n if args.weights != \"\":\n if os.path.exists(args.weights):\n weights_dict = torch.load(args.weights, map_location=device)\n load_weights_dict = {k: v for k, v in weights_dict.items()\n if model.state_dict()[k].numel() == v.numel()}\n print(model.load_state_dict(load_weights_dict, strict=False))\n else:\n raise FileNotFoundError(\"not found weights file: {}\".format(args.weights))\n\n # 是否冻结权重\n if args.freeze_layers:\n for name, para in model.named_parameters():\n # 除head外,其他权重全部冻结\n if \"head\" not in name:\n para.requires_grad_(False)\n else:\n print(\"training {}\".format(name))\n\n pg = [p for p in model.parameters() if p.requires_grad]\n optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=1E-4)\n # Scheduler https://arxiv.org/pdf/1812.01187.pdf\n lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf # cosine\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)\n\n train_csv = pd.DataFrame(columns=['Loss', 'Acc'])\n\n for epoch in range(args.epochs):\n # train\n train_loss, train_acc = train_one_epoch(model=model,\n optimizer=optimizer,\n data_loader=train_loader,\n device=device,\n epoch=epoch)\n\n scheduler.step()\n\n # validate\n val_loss, val_acc = evaluate(model=model,\n data_loader=val_loader,\n device=device,\n epoch=epoch)\n\n tags = [\"train_loss\", \"train_acc\", \"val_loss\", \"val_acc\", \"learning_rate\"]\n tb_writer.add_scalar(tags[0], train_loss, epoch)\n tb_writer.add_scalar(tags[1], train_acc, epoch)\n tb_writer.add_scalar(tags[2], val_loss, epoch)\n tb_writer.add_scalar(tags[3], val_acc, epoch)\n tb_writer.add_scalar(tags[4], optimizer.param_groups[0][\"lr\"], epoch)\n\n train_csv = train_csv.append({'Loss': train_loss, 'Acc': val_acc}, ignore_index=True)\n\n torch.save(model.state_dict(), \"./weights/model-{}.pth\".format(epoch))\n\n csv_path = os.getcwd() + \"/DataSave/CSV/\"\n now = time.strftime(\"%Y-%m-%d-%H_%M_%S\", time.localtime(time.time()))\n save_name = csv_path + now + csv_note + r\"--EfficientNetV2.csv\"\n train_csv.to_csv(save_name, mode='a', header=True, index=True)\n\nif __name__ == '__main__':\n epoch_times = int(input(\"Epoch Times = \"))\n csv_note = input(\"Notes for csv = \")\n image_root = lambda x: \"/Dataset/all/dataset_expression/\" if x == \"E\" else \"/Dataset/all/dataset_gender/\"\n image_path = os.path.dirname(os.getcwd()) + image_root(input(\"Gender(G) or Expression(E) = \")) # 表情识别\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--num_classes', type=int, default=2)\n parser.add_argument('--epochs', type=int, default=epoch_times)\n parser.add_argument('--batch-size', type=int, default=16)\n parser.add_argument('--lr', type=float, default=0.01)\n parser.add_argument('--lrf', type=float, default=0.01)\n\n # 数据集所在根目录\n # http://download.tensorflow.org/example_images/flower_photos.tgz\n parser.add_argument('--data-path', type=str,\n default=image_path)\n\n # download model weights\n # 链接: https://pan.baidu.com/s/1uZX36rvrfEss-JGj4yfzbQ 密码: 5gu1\n parser.add_argument('--weights', type=str, default='./pre_efficientnetv2-s.pth',\n help='initial weights path')\n parser.add_argument('--freeze-layers', type=bool, default=False)\n parser.add_argument('--device', default='cuda:0', help='device id (i.e. 0 or 0,1 or cpu)') # cuda:0\n\n opt = parser.parse_args()\n\n main(opt)\n" ]
[ [ "pandas.DataFrame", "torch.optim.SGD", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.load", "torch.optim.lr_scheduler.LambdaLR", "torch.utils.tensorboard.SummaryWriter" ] ]
miszkur/data-efficient-NLP
[ "7ad9d988cd17e769ab97802adbdeb6893429af10" ]
[ "src/run_data_aug_experiment.py" ]
[ "import numpy as np\nimport pandas as pd\nimport ml_collections\nimport time\nimport pickle\nimport os\nimport torch\n\nfrom codecarbon import EmissionsTracker\nfrom torch.utils.data import ConcatDataset, Subset, DataLoader\n\nfrom data_processing.ev_parser import create_dataloader, create_dataset\nfrom models.bert import BertClassifier\nfrom learner import Learner\n\n\ndef initialize_results_dict(classes_to_track):\n results = {\n 'accuracy': [], \n 'f1_score':[], \n 'train_time': [], \n 'training_emissions': []\n }\n for class_index in classes_to_track:\n results[class_index] = {\n 'accuracy': [], 'f1_score':[], 'recall': [], 'precision':[], 'incorrect_predictions':[]}\n return results\n\ndef train_all_data(\n config: ml_collections.ConfigDict, \n device: str,\n classes_to_track=[0,1,2,3,4,5,6,7],\n with_augmentations=True):\n\n valid_loader, test_loader = create_dataloader(config, 'valid')\n results = initialize_results_dict(classes_to_track)\n\n train_dataset = create_dataset()\n if with_augmentations:\n augmented_dataset = create_dataset(split='augmented')\n train_dataset = ConcatDataset([train_dataset, augmented_dataset])\n\n for _ in range(5):\n train_loader = DataLoader(\n train_dataset, \n batch_size=config.batch_size,\n shuffle=True)\n\n model = BertClassifier(config=config.bert) \n model.to(device)\n # Train\n learner = Learner(device, model, config.results_dir)\n tracker = EmissionsTracker()\n tracker.start()\n train_start_time = time.time()\n \n learner.train(\n config,\n train_loader=train_loader,\n validation_loader=valid_loader)\n\n results['train_time'].append(time.time() - train_start_time)\n results['training_emissions'].append(tracker.stop())\n \n # Evaluate\n metrics = learner.evaluate(test_loader, classes=classes_to_track)\n loss = metrics['loss']\n accuracy = metrics['accuracy']\n f1_score = metrics['f1_score']\n print(f'Test loss: {loss}, accuracy: {accuracy}, f1 score: {f1_score}')\n results['accuracy'].append(accuracy)\n results['f1_score'].append(f1_score)\n for class_index in classes_to_track:\n for metric_name, value in metrics['classes'][class_index].items():\n if metric_name in list(results[class_index].keys()):\n results[class_index][metric_name].append(value)\n\n print('Saving results..')\n results_path = os.path.join(config.results_dir, f'SUPERVISED_aug_{with_augmentations}.pkl')\n with open(results_path, 'wb') as fp:\n pickle.dump(results, fp)\n\n\n\ndef train_limited_data(\n config: ml_collections.ConfigDict, \n device: str,\n with_augmentations=True,\n data_size=300,\n classes_to_track=[0,1,2,3,4,5,6,7],\n num_splits = 2, # How many AL splits to take\n labeled_indexes=None):\n\n valid_loader, test_loader = create_dataloader(config, 'valid')\n results = initialize_results_dict(classes_to_track)\n\n train_data_path = os.path.join(config.data_dir, 'train_final.csv')\n aug_data_path = os.path.join(config.data_dir, 'augmented_final.csv')\n df_train = pd.read_csv(train_data_path)\n df_aug = pd.read_csv(aug_data_path)\n\n for i in range(5):\n if labeled_indexes is None:\n selected_sample = df_train.sample(n=data_size, random_state=config.seeds[i]) \n else:\n indexes = labeled_indexes[i][:num_splits] \n selected_indexes = np.array(indexes).flatten()\n selected_sample = df_train.iloc[selected_indexes]\n data_size = selected_sample.shape[0]\n if with_augmentations:\n augmented_sample = df_aug[np.isin(df_aug.id, selected_sample.id)]\n selected_sample = pd.concat([selected_sample, augmented_sample])\n selected_sample.drop(columns=['id'], inplace=True)\n selected_sample.reset_index(inplace=True)\n train_dataset = create_dataset(df=selected_sample)\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, \n batch_size=config.batch_size,\n shuffle=True)\n\n model = BertClassifier(config=config.bert) \n model.to(device)\n # Train\n learner = Learner(device, model, config.results_dir)\n tracker = EmissionsTracker()\n tracker.start()\n train_start_time = time.time()\n \n learner.train(\n config,\n train_loader=train_loader,\n validation_loader=valid_loader)\n\n results['train_time'].append(time.time() - train_start_time)\n results['training_emissions'].append(tracker.stop())\n \n # Evaluate\n metrics = learner.evaluate(test_loader, classes=classes_to_track)\n loss = metrics['loss']\n accuracy = metrics['accuracy']\n f1_score = metrics['f1_score']\n print(f'Test loss: {loss}, accuracy: {accuracy}, f1 score: {f1_score}')\n results['accuracy'].append(accuracy)\n results['f1_score'].append(f1_score)\n for class_index in classes_to_track:\n for metric_name, value in metrics['classes'][class_index].items():\n if metric_name in list(results[class_index].keys()):\n results[class_index][metric_name].append(value)\n\n results_path = os.path.join(\n config.results_dir, f'SUPERVISED_{data_size}_aug_{with_augmentations}.pkl')\n print('Saving results to', results_path)\n with open(results_path, 'wb') as fp:\n pickle.dump(results, fp)\n" ]
[ [ "torch.utils.data.ConcatDataset", "numpy.array", "torch.utils.data.DataLoader", "pandas.concat", "pandas.read_csv", "numpy.isin" ] ]
eugenevinitsky/sequential_social_dilemma_games
[ "6de7a797b73e956315d9201200353065a6714f3e" ]
[ "social_dilemmas/envs/map_env.py" ]
[ "\"\"\"Base map class that defines the rendering process\n\"\"\"\n\nimport random\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom gym.spaces import Box, Dict\nfrom ray.rllib.agents.callbacks import DefaultCallbacks\nfrom ray.rllib.env import MultiAgentEnv\n\n_MAP_ENV_ACTIONS = {\n \"MOVE_LEFT\": [0, -1], # Move left\n \"MOVE_RIGHT\": [0, 1], # Move right\n \"MOVE_UP\": [-1, 0], # Move up\n \"MOVE_DOWN\": [1, 0], # Move down\n \"STAY\": [0, 0], # don't move\n \"TURN_CLOCKWISE\": [[0, 1], [-1, 0]], # Clockwise rotation matrix\n \"TURN_COUNTERCLOCKWISE\": [[0, -1], [1, 0]],\n} # Counter clockwise rotation matrix\n# Positive Theta is in the counterclockwise direction\n\nORIENTATIONS = {\"LEFT\": [0, -1], \"RIGHT\": [0, 1], \"UP\": [-1, 0], \"DOWN\": [1, 0]}\n\nDEFAULT_COLOURS = {\n b\" \": np.array([0, 0, 0], dtype=np.uint8), # Black background\n b\"0\": np.array([0, 0, 0], dtype=np.uint8), # Black background beyond map walls\n b\"\": np.array([180, 180, 180], dtype=np.uint8), # Grey board walls\n b\"@\": np.array([180, 180, 180], dtype=np.uint8), # Grey board walls\n b\"A\": np.array([0, 255, 0], dtype=np.uint8), # Green apples\n b\"F\": np.array([255, 255, 0], dtype=np.uint8), # Yellow firing beam\n b\"P\": np.array([159, 67, 255], dtype=np.uint8), # Generic agent (any player)\n # Colours for agents. R value is a unique identifier\n b\"1\": np.array([0, 0, 255], dtype=np.uint8), # Pure blue\n b\"2\": np.array([2, 81, 154], dtype=np.uint8), # Sky blue\n b\"3\": np.array([204, 0, 204], dtype=np.uint8), # Magenta\n b\"4\": np.array([216, 30, 54], dtype=np.uint8), # Red\n b\"5\": np.array([254, 151, 0], dtype=np.uint8), # Orange\n b\"6\": np.array([100, 255, 255], dtype=np.uint8), # Cyan\n b\"7\": np.array([99, 99, 255], dtype=np.uint8), # Lavender\n b\"8\": np.array([250, 204, 255], dtype=np.uint8), # Pink\n b\"9\": np.array([238, 223, 16], dtype=np.uint8), # Yellow\n}\n\n# the axes look like this when printed out\n# WARNING: increasing array position in the direction of down\n# so for example if you move_left when facing left\n# your y position decreases.\n# ^\n# |\n# U\n# P\n# <--LEFT * RIGHT---->\n# D\n# O\n# W\n# N\n# |\n\n\nclass MapEnv(MultiAgentEnv):\n def __init__(\n self,\n ascii_map,\n extra_actions,\n view_len,\n num_agents=1,\n color_map=None,\n return_agent_actions=False,\n use_collective_reward=False,\n ):\n \"\"\"\n\n Parameters\n ----------\n ascii_map: list of strings\n Specify what the map should look like. Look at constant.py for\n further explanation\n extra_actions: dict with action name-value pair\n Environment-specific actions that are not present in _MAP_ENV_ACTIONS\n num_agents: int\n Number of agents to have in the system.\n color_map: dict\n Specifies how to convert between ascii chars and colors\n return_agent_actions: bool\n If true, we the action space will include the actions of other agents\n \"\"\"\n self.num_agents = num_agents\n self.base_map = self.ascii_to_numpy(ascii_map)\n self.view_len = view_len\n self.map_padding = view_len\n self.return_agent_actions = return_agent_actions\n self.use_collective_reward = use_collective_reward\n self.all_actions = _MAP_ENV_ACTIONS.copy()\n self.all_actions.update(extra_actions)\n # Map without agents or beams\n self.world_map = np.full(\n (len(self.base_map), len(self.base_map[0])), fill_value=b\" \", dtype=\"c\"\n )\n # Color mapping\n self.color_map = color_map if color_map is not None else DEFAULT_COLOURS.copy()\n # World map image\n self.world_map_color = np.full(\n (len(self.base_map) + view_len * 2, len(self.base_map[0]) + view_len * 2, 3),\n fill_value=0,\n dtype=np.uint8,\n )\n self.beam_pos = []\n\n self.agents = {}\n\n # returns the agent at a desired position if there is one\n self.pos_dict = {}\n self.spawn_points = [] # where agents can appear\n\n self.wall_points = []\n for row in range(self.base_map.shape[0]):\n for col in range(self.base_map.shape[1]):\n if self.base_map[row, col] == b\"P\":\n self.spawn_points.append([row, col])\n elif self.base_map[row, col] == b\"@\":\n self.wall_points.append([row, col])\n self.setup_agents()\n\n @property\n def observation_space(self):\n obs_space = {\n \"curr_obs\": Box(\n low=0,\n high=255,\n shape=(2 * self.view_len + 1, 2 * self.view_len + 1, 3),\n dtype=np.uint8,\n )\n }\n if self.return_agent_actions:\n # Append the actions of other agents\n obs_space = {\n **obs_space,\n \"other_agent_actions\": Box(\n low=0, high=len(self.all_actions), shape=(self.num_agents - 1,), dtype=np.uint8,\n ),\n \"visible_agents\": Box(low=0, high=1, shape=(self.num_agents - 1,), dtype=np.uint8,),\n \"prev_visible_agents\": Box(\n low=0, high=1, shape=(self.num_agents - 1,), dtype=np.uint8,\n ),\n }\n obs_space = Dict(obs_space)\n # Change dtype so that ray can put all observations into one flat batch\n # with the correct dtype.\n # See DictFlatteningPreprocessor in ray/rllib/models/preprocessors.py.\n obs_space.dtype = np.uint8\n return obs_space\n\n def custom_reset(self):\n \"\"\"Reset custom elements of the map. For example, spawn apples and build walls\"\"\"\n pass\n\n def custom_action(self, agent, action):\n \"\"\"Execute any custom actions that may be defined, like fire or clean\n\n Parameters\n ----------\n agent: agent that is taking the action\n action: key of the action to be taken\n\n Returns\n -------\n updates: list(list(row, col, char))\n List of cells to place onto the map\n \"\"\"\n pass\n\n def custom_map_update(self):\n \"\"\"Custom map updates that don't have to do with agent actions\"\"\"\n pass\n\n def setup_agents(self):\n \"\"\"Construct all the agents for the environment\"\"\"\n raise NotImplementedError\n\n # FIXME(ev) move this to a utils eventually\n def ascii_to_numpy(self, ascii_list):\n \"\"\"converts a list of strings into a numpy array\n\n\n Parameters\n ----------\n ascii_list: list of strings\n List describing what the map should look like\n Returns\n -------\n arr: np.ndarray\n numpy array describing the map with ' ' indicating an empty space\n \"\"\"\n\n arr = np.full((len(ascii_list), len(ascii_list[0])), b\" \", dtype=\"c\")\n for row in range(arr.shape[0]):\n for col in range(arr.shape[1]):\n arr[row, col] = ascii_list[row][col]\n return arr\n\n def step(self, actions):\n \"\"\"Takes in a dict of actions and converts them to a map update\n\n Parameters\n ----------\n actions: dict {agent-id: int}\n dict of actions, keyed by agent-id that are passed to the agent. The agent\n interprets the int and converts it to a command\n\n Returns\n -------\n observations: dict of arrays representing agent observations\n rewards: dict of rewards for each agent\n dones: dict indicating whether each agent is done\n info: dict to pass extra info to gym\n \"\"\"\n\n self.beam_pos = []\n agent_actions = {}\n for agent_id, action in actions.items():\n agent_action = self.agents[agent_id].action_map(action)\n agent_actions[agent_id] = agent_action\n\n # Remove agents from color map\n for agent in self.agents.values():\n row, col = agent.pos[0], agent.pos[1]\n self.single_update_world_color_map(row, col, self.world_map[row, col])\n\n self.update_moves(agent_actions)\n\n for agent in self.agents.values():\n pos = agent.pos\n new_char = agent.consume(self.world_map[pos[0], pos[1]])\n self.single_update_map(pos[0], pos[1], new_char)\n\n # execute custom moves like firing\n self.update_custom_moves(agent_actions)\n\n # execute spawning events\n self.custom_map_update()\n\n map_with_agents = self.get_map_with_agents()\n # Add agents to color map\n for agent in self.agents.values():\n row, col = agent.pos[0], agent.pos[1]\n # Firing beams have priority over agents and should cover them\n if self.world_map[row, col] not in [b\"F\", b\"C\"]:\n self.single_update_world_color_map(row, col, agent.get_char_id())\n\n observations = {}\n rewards = {}\n dones = {}\n info = {}\n for agent in self.agents.values():\n agent.full_map = map_with_agents\n rgb_arr = self.color_view(agent)\n # concatenate on the prev_actions to the observations\n if self.return_agent_actions:\n prev_actions = np.array(\n [actions[key] for key in sorted(actions.keys()) if key != agent.agent_id]\n ).astype(np.uint8)\n visible_agents = self.find_visible_agents(agent.agent_id)\n observations[agent.agent_id] = {\n \"curr_obs\": rgb_arr,\n \"other_agent_actions\": prev_actions,\n \"visible_agents\": visible_agents,\n \"prev_visible_agents\": agent.prev_visible_agents,\n }\n agent.prev_visible_agents = visible_agents\n else:\n observations[agent.agent_id] = {\"curr_obs\": rgb_arr}\n rewards[agent.agent_id] = agent.compute_reward()\n dones[agent.agent_id] = agent.get_done()\n\n if self.use_collective_reward:\n collective_reward = sum(rewards.values())\n for agent in rewards.keys():\n rewards[agent] = collective_reward\n\n dones[\"__all__\"] = np.any(list(dones.values()))\n return observations, rewards, dones, info\n\n def reset(self):\n \"\"\"Reset the environment.\n\n This method is performed in between rollouts. It resets the state of\n the environment.\n\n Returns\n -------\n observation: dict of numpy ndarray\n the initial observation of the space. The initial reward is assumed\n to be zero.\n \"\"\"\n self.beam_pos = []\n self.agents = {}\n self.setup_agents()\n self.reset_map()\n self.custom_map_update()\n\n map_with_agents = self.get_map_with_agents()\n\n observations = {}\n for agent in self.agents.values():\n agent.full_map = map_with_agents\n rgb_arr = self.color_view(agent)\n # concatenate on the prev_actions to the observations\n if self.return_agent_actions:\n # No previous actions so just pass in \"wait\" action\n prev_actions = np.array([4 for _ in range(self.num_agents - 1)]).astype(np.uint8)\n visible_agents = self.find_visible_agents(agent.agent_id)\n observations[agent.agent_id] = {\n \"curr_obs\": rgb_arr,\n \"other_agent_actions\": prev_actions,\n \"visible_agents\": visible_agents,\n \"prev_visible_agents\": visible_agents,\n }\n agent.prev_visible_agents = visible_agents\n else:\n observations[agent.agent_id] = {\"curr_obs\": rgb_arr}\n return observations\n\n @property\n def agent_pos(self):\n return [agent.pos.tolist() for agent in self.agents.values()]\n\n def get_map_with_agents(self):\n \"\"\"Gets a version of the environment map where generic\n 'P' characters have been replaced with specific agent IDs.\n\n Returns:\n 2D array of strings representing the map.\n \"\"\"\n grid = np.copy(self.world_map)\n\n for agent in self.agents.values():\n char_id = agent.get_char_id()\n\n # If agent is not within map, skip.\n if not (0 <= agent.pos[0] < grid.shape[0] and 0 <= agent.pos[1] < grid.shape[1]):\n continue\n\n grid[agent.pos[0], agent.pos[1]] = char_id\n\n # beams should overlay agents\n for beam_pos in self.beam_pos:\n grid[beam_pos[0], beam_pos[1]] = beam_pos[2]\n\n return grid\n\n def check_agent_map(self, agent_map):\n \"\"\"Checks the map to make sure agents aren't duplicated\"\"\"\n unique, counts = np.unique(agent_map, return_counts=True)\n count_dict = dict(zip(unique, counts))\n\n # check for multiple agents\n for i in range(self.num_agents):\n if count_dict[chr(i + 1)] != 1:\n print(\"Error! Wrong number of agent\", i, \"in map!\")\n return False\n return True\n\n def full_map_to_colors(self):\n map_with_agents = self.get_map_with_agents()\n rgb_arr = np.zeros((map_with_agents.shape[0], map_with_agents.shape[1], 3), dtype=int)\n return self.map_to_colors(map_with_agents, self.color_map, rgb_arr)\n\n def color_view(self, agent):\n row, col = agent.pos[0], agent.pos[1]\n view_slice = self.world_map_color[\n row + self.map_padding - self.view_len : row + self.map_padding + self.view_len + 1,\n col + self.map_padding - self.view_len : col + self.map_padding + self.view_len + 1,\n ]\n if agent.orientation == \"UP\":\n rotated_view = view_slice\n elif agent.orientation == \"LEFT\":\n rotated_view = np.rot90(view_slice)\n elif agent.orientation == \"DOWN\":\n rotated_view = np.rot90(view_slice, k=2)\n elif agent.orientation == \"RIGHT\":\n rotated_view = np.rot90(view_slice, k=1, axes=(1, 0))\n return rotated_view\n\n def map_to_colors(self, mmap, color_map, rgb_arr, orientation=\"UP\"):\n \"\"\"Converts a map to an array of RGB values.\n Parameters\n ----------\n mmap: np.ndarray\n map to convert to colors\n Double m to avoid shadowing map.\n color_map: dict\n mapping between array elements and desired colors\n rgb_arr: np.array\n Variable to store the mapping in\n orientation:\n The way in which the output should be oriented.\n UP = no rotation.\n RIGHT = Clockwise 90 degree rotation.\n DOWN = Clockwise 180 degree rotation.\n LEFT = Clockwise 270 degree rotation.\n Returns\n -------\n arr: np.ndarray\n 3-dim numpy array consisting of color map\n \"\"\"\n x_len = mmap.shape[0]\n y_len = mmap.shape[1]\n if orientation == \"UP\":\n for row_elem in range(x_len):\n for col_elem in range(y_len):\n rgb_arr[row_elem, col_elem, :] = color_map[mmap[row_elem, col_elem]]\n elif orientation == \"LEFT\":\n for row_elem in range(x_len):\n for col_elem in range(y_len):\n rgb_arr[row_elem, col_elem, :] = color_map[mmap[col_elem, x_len - 1 - row_elem]]\n elif orientation == \"DOWN\":\n for row_elem in range(x_len):\n for col_elem in range(y_len):\n rgb_arr[row_elem, col_elem, :] = color_map[\n mmap[x_len - 1 - row_elem, y_len - 1 - col_elem]\n ]\n elif orientation == \"RIGHT\":\n for row_elem in range(x_len):\n for col_elem in range(y_len):\n rgb_arr[row_elem, col_elem, :] = color_map[mmap[y_len - 1 - col_elem, row_elem]]\n else:\n raise ValueError(\"Orientation {} is not valid\".format(orientation))\n\n return rgb_arr\n\n def render(self, filename=None):\n \"\"\" Creates an image of the map to plot or save.\n\n Args:\n filename: If a string is passed, will save the image\n to disk at this location.\n \"\"\"\n rgb_arr = self.full_map_to_colors()\n plt.cla()\n plt.imshow(rgb_arr, interpolation=\"nearest\")\n if filename is None:\n plt.show()\n else:\n plt.savefig(filename)\n\n def update_moves(self, agent_actions):\n \"\"\"Converts agent action tuples into a new map and new agent positions.\n Also resolves conflicts over multiple agents wanting a cell.\n\n This method works by finding all conflicts over a cell and randomly assigning them\n to one of the agents that desires the slot. It then sets all of the other agents\n that wanted the cell to have a move of staying. For moves that do not directly\n conflict with another agent for a cell, but may not be temporarily resolvable\n due to an agent currently being in the desired cell, we continually loop through\n the actions until all moves have been satisfied or deemed impossible.\n For example, agent 1 may want to move from [1,2] to [2,2] but agent 2 is in [2,2].\n Agent 2, however, is moving into [3,2]. Agent-1's action is first in the order so at the\n first pass it is skipped but agent-2 moves to [3,2]. In the second pass, agent-1 will\n then be able to move into [2,2].\n\n Parameters\n ----------\n agent_actions: dict\n dict with agent_id as key and action as value\n \"\"\"\n\n reserved_slots = []\n for agent_id, action in agent_actions.items():\n agent = self.agents[agent_id]\n selected_action = self.all_actions[action]\n # TODO(ev) these two parts of the actions\n if \"MOVE\" in action or \"STAY\" in action:\n # rotate the selected action appropriately\n rot_action = self.rotate_action(selected_action, agent.get_orientation())\n new_pos = agent.pos + rot_action\n # allow the agents to confirm what position they can move to\n new_pos = agent.return_valid_pos(new_pos)\n reserved_slots.append((*new_pos, b\"P\", agent_id))\n elif \"TURN\" in action:\n new_rot = self.update_rotation(action, agent.get_orientation())\n agent.update_agent_rot(new_rot)\n\n # now do the conflict resolution part of the process\n\n # helpful for finding the agent in the conflicting slot\n agent_by_pos = {tuple(agent.pos): agent.agent_id for agent in self.agents.values()}\n\n # agent moves keyed by ids\n agent_moves = {}\n\n # lists of moves and their corresponding agents\n move_slots = []\n agent_to_slot = []\n\n for slot in reserved_slots:\n row, col = slot[0], slot[1]\n if slot[2] == b\"P\":\n agent_id = slot[3]\n agent_moves[agent_id] = [row, col]\n move_slots.append([row, col])\n agent_to_slot.append(agent_id)\n\n # cut short the computation if there are no moves\n if len(agent_to_slot) > 0:\n\n # first we will resolve all slots over which multiple agents\n # want the slot\n\n # shuffle so that a random agent has slot priority\n shuffle_list = list(zip(agent_to_slot, move_slots))\n np.random.shuffle(shuffle_list)\n agent_to_slot, move_slots = zip(*shuffle_list)\n unique_move, indices, return_count = np.unique(\n move_slots, return_index=True, return_counts=True, axis=0\n )\n search_list = np.array(move_slots)\n\n # first go through and remove moves that can't possible happen. Three types\n # 1. Trying to move into an agent that has been issued a stay command\n # 2. Trying to move into the spot of an agent that doesn't have a move\n # 3. Two agents trying to walk through one another\n\n # Resolve all conflicts over a space\n if np.any(return_count > 1):\n for move, index, count in zip(unique_move, indices, return_count):\n if count > 1:\n # check that the cell you are fighting over doesn't currently\n # contain an agent that isn't going to move for one of the agents\n # If it does, all the agents commands should become STAY\n # since no moving will be possible\n conflict_indices = np.where((search_list == move).all(axis=1))[0]\n all_agents_id = [agent_to_slot[i] for i in conflict_indices]\n # all other agents now stay in place so update their moves\n # to reflect this\n conflict_cell_free = True\n for agent_id in all_agents_id:\n moves_copy = agent_moves.copy()\n # TODO(ev) code duplication, simplify\n if move.tolist() in self.agent_pos:\n # find the agent that is currently at that spot and make sure\n # that the move is possible. If it won't be, remove it.\n conflicting_agent_id = agent_by_pos[tuple(move)]\n curr_pos = self.agents[agent_id].pos.tolist()\n curr_conflict_pos = self.agents[conflicting_agent_id].pos.tolist()\n conflict_move = agent_moves.get(\n conflicting_agent_id, curr_conflict_pos\n )\n # Condition (1):\n # a STAY command has been issued\n if agent_id == conflicting_agent_id:\n conflict_cell_free = False\n # Condition (2)\n # its command is to stay\n # or you are trying to move into an agent that hasn't\n # received a command\n elif (\n conflicting_agent_id not in moves_copy.keys()\n or curr_conflict_pos == conflict_move\n ):\n conflict_cell_free = False\n\n # Condition (3)\n # It is trying to move into you and you are moving into it\n elif conflicting_agent_id in moves_copy.keys():\n if (\n agent_moves[conflicting_agent_id] == curr_pos\n and move.tolist()\n == self.agents[conflicting_agent_id].pos.tolist()\n ):\n conflict_cell_free = False\n\n # if the conflict cell is open, let one of the conflicting agents\n # move into it\n if conflict_cell_free:\n self.agents[agent_to_slot[index]].update_agent_pos(move)\n agent_by_pos = {\n tuple(agent.pos): agent.agent_id for agent in self.agents.values()\n }\n # ------------------------------------\n # remove all the other moves that would have conflicted\n remove_indices = np.where((search_list == move).all(axis=1))[0]\n all_agents_id = [agent_to_slot[i] for i in remove_indices]\n # all other agents now stay in place so update their moves\n # to stay in place\n for agent_id in all_agents_id:\n agent_moves[agent_id] = self.agents[agent_id].pos.tolist()\n\n # make the remaining un-conflicted moves\n while len(agent_moves.items()) > 0:\n agent_by_pos = {tuple(agent.pos): agent.agent_id for agent in self.agents.values()}\n num_moves = len(agent_moves.items())\n moves_copy = agent_moves.copy()\n del_keys = []\n for agent_id, move in moves_copy.items():\n if agent_id in del_keys:\n continue\n if move in self.agent_pos:\n # find the agent that is currently at that spot and make sure\n # that the move is possible. If it won't be, remove it.\n conflicting_agent_id = agent_by_pos[tuple(move)]\n curr_pos = self.agents[agent_id].pos.tolist()\n curr_conflict_pos = self.agents[conflicting_agent_id].pos.tolist()\n conflict_move = agent_moves.get(conflicting_agent_id, curr_conflict_pos)\n # Condition (1):\n # a STAY command has been issued\n if agent_id == conflicting_agent_id:\n del agent_moves[agent_id]\n del_keys.append(agent_id)\n # Condition (2)\n # its command is to stay\n # or you are trying to move into an agent that hasn't received a command\n elif (\n conflicting_agent_id not in moves_copy.keys()\n or curr_conflict_pos == conflict_move\n ):\n del agent_moves[agent_id]\n del_keys.append(agent_id)\n # Condition (3)\n # It is trying to move into you and you are moving into it\n elif conflicting_agent_id in moves_copy.keys():\n if (\n agent_moves[conflicting_agent_id] == curr_pos\n and move == self.agents[conflicting_agent_id].pos.tolist()\n ):\n del agent_moves[conflicting_agent_id]\n del agent_moves[agent_id]\n del_keys.append(agent_id)\n del_keys.append(conflicting_agent_id)\n # this move is unconflicted so go ahead and move\n else:\n self.agents[agent_id].update_agent_pos(move)\n del agent_moves[agent_id]\n del_keys.append(agent_id)\n\n # no agent is able to move freely, so just move them all\n # no updates to hidden cells are needed since all the\n # same cells will be covered\n if len(agent_moves) == num_moves:\n for agent_id, move in agent_moves.items():\n self.agents[agent_id].update_agent_pos(move)\n break\n\n def update_custom_moves(self, agent_actions):\n for agent_id, action in agent_actions.items():\n # check its not a move based action\n if \"MOVE\" not in action and \"STAY\" not in action and \"TURN\" not in action:\n agent = self.agents[agent_id]\n updates = self.custom_action(agent, action)\n if len(updates) > 0:\n self.update_map(updates)\n\n def update_map(self, new_points):\n \"\"\"For points in new_points, place desired char on the map\n Update the color map as well\"\"\"\n for point in new_points:\n self.single_update_map(*point)\n\n def single_update_map(self, row, col, char):\n self.world_map[row, col] = char\n self.world_map_color[row + self.map_padding, col + self.map_padding] = self.color_map[char]\n\n def single_update_world_color_map(self, row, col, char):\n \"\"\"Only update the color map. This is done separately when agents move, because their own\n position state is not contained in self.world_map, but in their own Agent objects\"\"\"\n self.world_map_color[row + self.map_padding, col + self.map_padding] = self.color_map[char]\n\n def reset_map(self):\n \"\"\"Resets the map to be empty as well as a custom reset set by subclasses\"\"\"\n self.world_map = np.full((len(self.base_map), len(self.base_map[0])), b\" \", dtype=\"c\")\n self.world_map_color = np.full(\n (len(self.base_map) + self.view_len * 2, len(self.base_map[0]) + self.view_len * 2, 3),\n fill_value=0,\n dtype=np.uint8,\n )\n self.build_walls()\n self.custom_reset()\n\n def update_map_fire(\n self,\n firing_pos,\n firing_orientation,\n fire_len,\n fire_char,\n cell_types=[],\n update_char=[],\n blocking_cells=b\"P\",\n beam_width=3,\n ):\n \"\"\"From a firing position, fire a beam that may clean or hit agents\n\n Notes:\n (1) Beams are blocked by agents\n (2) A beam travels along until it hits a blocking cell at which beam the beam\n covers that cell and stops\n (3) If a beam hits a cell whose character is in cell_types, it replaces it with\n the corresponding index in update_char\n (4) As per the rules, the beams fire from in front of the agent and on its\n sides so the beam that starts in front of the agent travels out one\n cell further than it does along the sides.\n (5) This method updates the beam_pos, an internal representation of how\n which cells need to be rendered with fire_char in the agent view\n\n Parameters\n ----------\n firing_pos: (list)\n the row, col from which the beam is fired\n firing_orientation: (string)\n the direction the beam is to be fired in\n fire_len: (int)\n the number of cells forward to fire\n fire_char: (bytes)\n the cell that should be placed where the beam goes\n cell_types: (list of bytes)\n the cells that are affected by the beam\n update_char: (list of bytes)\n the character that should replace the affected cells.\n blocking_cells: (list of bytes)\n cells that block the firing beam\n Returns\n -------\n updates: (tuple (row, col, char))\n the cells that have been hit by the beam and what char will be placed there\n \"\"\"\n agent_by_pos = {tuple(agent.pos): agent_id for agent_id, agent in self.agents.items()}\n start_pos = np.asarray(firing_pos)\n firing_direction = ORIENTATIONS[firing_orientation]\n # compute the other two starting positions\n right_shift = self.rotate_right(firing_direction)\n if beam_width == 1:\n firing_pos = [start_pos]\n elif beam_width == 3:\n firing_pos = [\n start_pos,\n start_pos + right_shift - firing_direction,\n start_pos - right_shift - firing_direction,\n ]\n else:\n raise NotImplementedError()\n firing_points = []\n updates = []\n for pos in firing_pos:\n next_cell = pos + firing_direction\n for i in range(fire_len):\n if (\n self.test_if_in_bounds(next_cell)\n and self.world_map[next_cell[0], next_cell[1]] != b\"@\"\n ):\n # Update the cell if needed\n firing_points.append((next_cell[0], next_cell[1], fire_char))\n for c in range(len(cell_types)):\n if self.world_map[next_cell[0], next_cell[1]] == cell_types[c]:\n updates.append((next_cell[0], next_cell[1], update_char[c]))\n break\n\n # agents absorb beams\n # activate the agents hit function if needed\n if [next_cell[0], next_cell[1]] in self.agent_pos:\n agent_id = agent_by_pos[(next_cell[0], next_cell[1])]\n self.agents[agent_id].hit(fire_char)\n break\n\n # check if the cell blocks beams. For example, waste blocks beams.\n if self.world_map[next_cell[0], next_cell[1]] in blocking_cells:\n break\n\n # increment the beam position\n next_cell += firing_direction\n\n else:\n break\n\n self.beam_pos += firing_points\n return updates\n\n def spawn_point(self):\n \"\"\"Returns a randomly selected spawn point.\"\"\"\n spawn_index = 0\n is_free_cell = False\n curr_agent_pos = [agent.pos.tolist() for agent in self.agents.values()]\n random.shuffle(self.spawn_points)\n for i, spawn_point in enumerate(self.spawn_points):\n if [spawn_point[0], spawn_point[1]] not in curr_agent_pos:\n spawn_index = i\n is_free_cell = True\n assert is_free_cell, \"There are not enough spawn points! Check your map?\"\n return np.array(self.spawn_points[spawn_index])\n\n def spawn_rotation(self):\n \"\"\"Return a randomly selected initial rotation for an agent\"\"\"\n rand_int = np.random.randint(len(ORIENTATIONS.keys()))\n return list(ORIENTATIONS.keys())[rand_int]\n\n def build_walls(self):\n for i in range(len(self.wall_points)):\n row, col = self.wall_points[i]\n self.single_update_map(row, col, b\"@\")\n\n ########################################\n # Utility methods, move these eventually\n ########################################\n\n # TODO(ev) this can be a general property of map_env or a util\n def rotate_action(self, action_vec, orientation):\n # WARNING: Note, we adopt the physics convention that \\theta=0 is in the +y direction\n if orientation == \"UP\":\n return action_vec\n elif orientation == \"LEFT\":\n return self.rotate_left(action_vec)\n elif orientation == \"RIGHT\":\n return self.rotate_right(action_vec)\n else:\n return self.rotate_left(self.rotate_left(action_vec))\n\n def rotate_left(self, action_vec):\n return np.dot(self.all_actions[\"TURN_COUNTERCLOCKWISE\"], action_vec)\n\n def rotate_right(self, action_vec):\n return np.dot(self.all_actions[\"TURN_CLOCKWISE\"], action_vec)\n\n # TODO(ev) this should be an agent property\n def update_rotation(self, action, curr_orientation):\n if action == \"TURN_COUNTERCLOCKWISE\":\n if curr_orientation == \"LEFT\":\n return \"DOWN\"\n elif curr_orientation == \"DOWN\":\n return \"RIGHT\"\n elif curr_orientation == \"RIGHT\":\n return \"UP\"\n else:\n return \"LEFT\"\n else:\n if curr_orientation == \"LEFT\":\n return \"UP\"\n elif curr_orientation == \"UP\":\n return \"RIGHT\"\n elif curr_orientation == \"RIGHT\":\n return \"DOWN\"\n else:\n return \"LEFT\"\n\n # TODO(ev) this definitely should go into utils or the general agent class\n def test_if_in_bounds(self, pos):\n \"\"\"Checks if a selected cell is outside the range of the map\"\"\"\n return 0 <= pos[0] < self.world_map.shape[0] and 0 <= pos[1] < self.world_map.shape[1]\n\n def find_visible_agents(self, agent_id):\n \"\"\"Returns all the agents that can be seen by agent with agent_id\n Args\n ----\n agent_id: str\n The id of the agent whose visible agents we are asking about\n Returns\n -------\n visible_agents: list\n which agents can be seen by the agent with id \"agent_id\"\n \"\"\"\n agent_pos = self.agents[agent_id].pos\n upper_lim = int(agent_pos[0] + self.agents[agent_id].row_size)\n lower_lim = int(agent_pos[0] - self.agents[agent_id].row_size)\n left_lim = int(agent_pos[1] - self.agents[agent_id].col_size)\n right_lim = int(agent_pos[1] + self.agents[agent_id].col_size)\n\n # keep this sorted so the visibility matrix is always in order\n other_agent_pos = [\n self.agents[other_agent_id].pos\n for other_agent_id in sorted(self.agents.keys())\n if other_agent_id != agent_id\n ]\n return np.array(\n [\n 1\n if (lower_lim <= agent_tup[0] <= upper_lim and left_lim <= agent_tup[1] <= right_lim)\n else 0\n for agent_tup in other_agent_pos\n ],\n dtype=np.uint8,\n )\n\n @staticmethod\n def get_environment_callbacks():\n return DefaultCallbacks\n" ]
[ [ "numpy.rot90", "numpy.array", "numpy.dot", "numpy.asarray", "numpy.zeros", "matplotlib.pyplot.savefig", "numpy.copy", "matplotlib.pyplot.cla", "numpy.random.shuffle", "numpy.any", "matplotlib.pyplot.show", "numpy.unique", "matplotlib.pyplot.imshow" ] ]
simonsobs/ocs
[ "24c6a617ea3038fccdb40bfd602ffd541415a476" ]
[ "tests/test_ocs_twisted.py" ]
[ "import time\nimport numpy as np\nimport pytest\nfrom ocs.ocs_twisted import Pacemaker\n\ndef test_quantized():\n \"\"\"\n Tests that Pacemaker forces regular sampling rate when quantize=True.\n\n \"\"\"\n sample_freq = 5\n pm = Pacemaker(sample_freq, quantize=True)\n times = []\n for i in range(10):\n pm.sleep()\n print(\"Sample time: {}\".format(time.time()))\n times.append(time.time())\n time.sleep(1/sample_freq/3)\n diffs = np.diff(np.array(times))\n # Checks if the diffs (minus the first point due to quantization) match\n tolerance = 1/sample_freq / 5\n assert np.all(np.abs(diffs - 1/sample_freq)[1:] < tolerance)\n\n\ndef test_nonquantized():\n \"\"\"\n Tests that pacemaker forces a regular sample rate when quantize=False.\n In this case, all diffs should be less than the tolerance because\n quantization doesn't mess up the first spacing.\n \"\"\"\n sample_freq = 5\n pm = Pacemaker(sample_freq, quantize=False)\n times = []\n for _ in range(10):\n pm.sleep()\n print(\"Sample time: {}\".format(time.time()))\n times.append(time.time())\n time.sleep(1/sample_freq/3)\n diffs = np.diff(np.array(times))\n # Checks if the diffs (minus the first point due to quantization) match\n tolerance = 1/sample_freq / 5\n assert np.all(np.abs(diffs - 1/sample_freq) < tolerance)\n\n\ndef test_non_integer_quantization():\n \"\"\"\n Trying to quantize with a non-integer sampling frequency should raise an\n error.\n \"\"\"\n with pytest.raises(ValueError):\n Pacemaker(5.5, quantize=True)\n" ]
[ [ "numpy.array", "numpy.abs" ] ]
arpastrana/coronary-mesh-convolution
[ "617e96face7f5686ef9772473c623b4dd3494047" ]
[ "transforms/geodesics.py" ]
[ "from utils.inlet import IndexFinder\nimport potpourri3d as pp3d\nimport torch\n\n\nclass InletGeodesics(object):\n \"\"\"Compute the shortest geodesic distances from each vertex to the vessel inlet.\n\n Args:\n -\n \"\"\"\n\n def __init__(self):\n self.inlet_indices = IndexFinder()\n\n def __call__(self, data):\n solver = pp3d.MeshHeatMethodDistanceSolver(data.pos.numpy(), data.face.t().numpy())\n\n # Compute the minimum geodesic distances to the inlet\n inlet, _ = self.inlet_indices(data.dir)\n geodesics = solver.compute_distance_multisource(inlet)\n\n # Append the features in single precision\n data.geo = torch.from_numpy(geodesics).float()\n\n return data\n\n def __repr__(self):\n return '{}()'.format(self.__class__.__name__)\n" ]
[ [ "torch.from_numpy" ] ]
bashish101/ir
[ "cc90e86827c19035f38d0d85154f073a86aa9796" ]
[ "tfidf_model.py" ]
[ "import numpy as np\nfrom collections import Counter\n\nclass TFIDF():\n def __init__(self,\n data):\n super(TFIDF, self).__init__()\n self.data = data\n self.N = len(data)\n \n self.tf_dict = {}\n self.df_dict = {}\n self.idf_dict = {}\n \n # Create data structures with tfidf for all documents\n self.create_tf_dict()\n self.create_df_dict()\n self.create_idf_dict()\n self.create_tfidf_dict()\n \n def create_df_dict(self):\n '''Creates dictionary of df values for each word'''\n self.df_dict = {}\n for doc_text in self.data:\n for word in set(doc_text):\n self.df_dict[word] = self.df_dict.get(word, 0) + 1\n return self.df_dict\n\n def create_idf_dict(self):\n '''Creates dictionary of idf values for each word'''\n self.idf_dict = {}\n maximum = 0\n delta = 0.5\n for word, df in self.df_dict.items():\n idf = np.log((self.N + delta) / (df + delta))\n self.idf_dict[word] = idf\n return self.idf_dict\n \n def create_tf_dict(self):\n self.tf_dict = {}\n for doc_num, doc_text in enumerate(self.data):\n self.tf_dict[doc_num] = {}\n if (len(doc_text) == 0):\n continue\n word_freq_list = Counter(doc_text).most_common()\n # max normalization\n max_freq = word_freq_list[0][1]\n for word, freq in word_freq_list:\n self.tf_dict[doc_num][word] = freq / max_freq \n\n return self.tf_dict\n \n def create_tfidf_dict(self):\n '''Creates dictionary of tfidf vectors for each document in data'''\n self.tfidf_dict = {}\n for doc_num, tf_per_doc in self.tf_dict.items():\n self.tfidf_dict[doc_num] = {}\n for word, tf in tf_per_doc.items():\n self.tfidf_dict[doc_num][word] = tf * self.idf_dict.get(word, 0)\n\n return self.tfidf_dict\n \n def compute_query_tfidf(self, query):\n '''Computes tfidf vector representation for a given (tokenized) query'''\n query_vec = {}\n word_freq_list = Counter(query).most_common()\n max_freq = word_freq_list[0][1]\n for word, freq in word_freq_list:\n # Get idf from pre-computed data structure\n idf = self.idf_dict.get(word, 0)\n # Compute tf for query (similar to that for a document)\n tf = (0.5 + 0.5 * freq / max_freq)\n query_vec[word] = tf * idf\n return query_vec\n \n def match(self, query_vec, doc_vec):\n '''Computes cosine similarity score (range [-1, 1]) between two vectors'''\n q_norm = np.linalg.norm([score for score in query_vec.values()])\n d_norm = np.linalg.norm([score for score in doc_vec.values()])\n score = 0\n for term, val in query_vec.items():\n score += val * doc_vec.get(term, 0)\n score /= q_norm * d_norm\n return score\n" ]
[ [ "numpy.log" ] ]
TomographicImaging/CCPi-Framework
[ "0a434c8b8086d2b759b4f3d66f7cc42e8099dfe9" ]
[ "Wrappers/Python/cil/framework/framework.py" ]
[ "# -*- coding: utf-8 -*-\n# This work is part of the Core Imaging Library (CIL) developed by CCPi \n# (Collaborative Computational Project in Tomographic Imaging), with \n# substantial contributions by UKRI-STFC and University of Manchester.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software \n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport copy\nimport numpy\nimport warnings\nfrom functools import reduce\nfrom numbers import Number\nimport ctypes, platform\nfrom ctypes import util\nimport math\nfrom cil.utilities.multiprocessing import NUM_THREADS\n# check for the extension\n\nif platform.system() == 'Linux':\n dll = 'libcilacc.so'\nelif platform.system() == 'Windows':\n dll_file = 'cilacc.dll'\n dll = util.find_library(dll_file)\nelif platform.system() == 'Darwin':\n dll = 'libcilacc.dylib'\nelse:\n raise ValueError('Not supported platform, ', platform.system())\n\ncilacc = ctypes.cdll.LoadLibrary(dll)\n\ndef find_key(dic, val):\n \"\"\"return the key of dictionary dic given the value\"\"\"\n return [k for k, v in dic.items() if v == val][0]\n\ndef message(cls, msg, *args):\n msg = \"{0}: \" + msg\n for i in range(len(args)):\n msg += \" {%d}\" %(i+1)\n args = list(args)\n args.insert(0, cls.__name__ )\n \n return msg.format(*args )\n\nclass ImageGeometry(object):\n RANDOM = 'random'\n RANDOM_INT = 'random_int'\n CHANNEL = 'channel'\n VERTICAL = 'vertical'\n HORIZONTAL_X = 'horizontal_x'\n HORIZONTAL_Y = 'horizontal_y'\n \n @property\n def shape(self):\n\n shape_dict = {ImageGeometry.CHANNEL: self.channels,\n ImageGeometry.VERTICAL: self.voxel_num_z,\n ImageGeometry.HORIZONTAL_Y: self.voxel_num_y, \n ImageGeometry.HORIZONTAL_X: self.voxel_num_x}\n\n shape = []\n for label in self.dimension_labels:\n shape.append(shape_dict[label])\n\n return tuple(shape)\n\n @shape.setter\n def shape(self, val):\n print(\"Deprecated - shape will be set automatically\")\n\n @property\n def spacing(self):\n\n spacing_dict = {ImageGeometry.CHANNEL: self.channel_spacing,\n ImageGeometry.VERTICAL: self.voxel_size_z,\n ImageGeometry.HORIZONTAL_Y: self.voxel_size_y, \n ImageGeometry.HORIZONTAL_X: self.voxel_size_x}\n\n spacing = []\n for label in self.dimension_labels:\n spacing.append(spacing_dict[label])\n\n return tuple(spacing)\n\n @property\n def length(self):\n return len(self.dimension_labels)\n\n @property\n def dimension_labels(self):\n \n labels_default = DataOrder.CIL_IG_LABELS\n\n shape_default = [ self.channels - 1, #channels default is 1\n self.voxel_num_z,\n self.voxel_num_y,\n self.voxel_num_x]\n\n try:\n labels = list(self.__dimension_labels)\n except AttributeError:\n labels = labels_default.copy()\n\n for i, x in enumerate(shape_default):\n if x == 0:\n try:\n labels.remove(labels_default[i])\n except ValueError:\n pass #if not in custom list carry on\n return tuple(labels)\n \n @dimension_labels.setter\n def dimension_labels(self, val):\n self.set_labels(val)\n \n def set_labels(self, labels):\n labels_default = DataOrder.CIL_IG_LABELS\n\n #check input and store. This value is not used directly\n if labels is not None:\n for x in labels:\n if x not in labels_default:\n raise ValueError('Requested axis are not possible. Accepted label names {},\\ngot {}'\\\n .format(labels_default,labels))\n \n self.__dimension_labels = tuple(labels)\n \n def __eq__(self, other):\n\n if not isinstance(other, self.__class__):\n return False\n \n if self.voxel_num_x == other.voxel_num_x \\\n and self.voxel_num_y == other.voxel_num_y \\\n and self.voxel_num_z == other.voxel_num_z \\\n and self.voxel_size_x == other.voxel_size_x \\\n and self.voxel_size_y == other.voxel_size_y \\\n and self.voxel_size_z == other.voxel_size_z \\\n and self.center_x == other.center_x \\\n and self.center_y == other.center_y \\\n and self.center_z == other.center_z \\\n and self.channels == other.channels \\\n and self.channel_spacing == other.channel_spacing \\\n and self.dimension_labels == other.dimension_labels:\n\n return True\n \n return False\n\n @property\n def dtype(self):\n return self.__dtype\n\n @dtype.setter\n def dtype(self, val):\n self.__dtype = val \n\n def __init__(self, \n voxel_num_x=0, \n voxel_num_y=0, \n voxel_num_z=0, \n voxel_size_x=1, \n voxel_size_y=1, \n voxel_size_z=1, \n center_x=0, \n center_y=0, \n center_z=0, \n channels=1, \n **kwargs):\n \n self.voxel_num_x = int(voxel_num_x)\n self.voxel_num_y = int(voxel_num_y)\n self.voxel_num_z = int(voxel_num_z)\n self.voxel_size_x = float(voxel_size_x)\n self.voxel_size_y = float(voxel_size_y)\n self.voxel_size_z = float(voxel_size_z)\n self.center_x = center_x\n self.center_y = center_y\n self.center_z = center_z \n self.channels = channels\n self.channel_labels = None\n self.channel_spacing = 1.0\n self.dimension_labels = kwargs.get('dimension_labels', None)\n self.dtype = kwargs.get('dtype', numpy.float32)\n\n def subset(self, dimensions=None, **kw):\n '''Returns a new sliced and/or reshaped ImageGeometry'''\n\n if not kw.get('suppress_warning', False):\n warnings.warn('Subset has been deprecated and will be removed in following version. Use reorder() and get_slice() instead',\n DeprecationWarning)\n\n if dimensions is None:\n return self.get_slice(**kw)\n else:\n if len(dimensions) != len(self.dimension_labels):\n raise ValueError('The axes list for subset must contain the dimension_labels {0} got {1}'.format(self.dimension_labels, dimensions))\n \n temp = self.copy()\n temp.set_labels(dimensions)\n return temp \n\n def get_slice(self,channel=None, vertical=None, horizontal_x=None, horizontal_y=None):\n '''\n Returns a new ImageGeometry of a single slice of in the requested direction.\n '''\n geometry_new = self.copy()\n if channel is not None:\n geometry_new.channels = 1\n \n try:\n geometry_new.channel_labels = [self.channel_labels[channel]]\n except:\n geometry_new.channel_labels = None\n\n if vertical is not None:\n geometry_new.voxel_num_z = 0\n \n if horizontal_y is not None:\n geometry_new.voxel_num_y = 0\n\n if horizontal_x is not None:\n geometry_new.voxel_num_x = 0\n\n return geometry_new\n\n def get_order_by_label(self, dimension_labels, default_dimension_labels):\n order = []\n for i, el in enumerate(default_dimension_labels):\n for j, ek in enumerate(dimension_labels):\n if el == ek:\n order.append(j)\n break\n return order\n\n def get_min_x(self):\n return self.center_x - 0.5*self.voxel_num_x*self.voxel_size_x\n \n def get_max_x(self):\n return self.center_x + 0.5*self.voxel_num_x*self.voxel_size_x\n \n def get_min_y(self):\n return self.center_y - 0.5*self.voxel_num_y*self.voxel_size_y\n \n def get_max_y(self):\n return self.center_y + 0.5*self.voxel_num_y*self.voxel_size_y\n \n def get_min_z(self):\n if not self.voxel_num_z == 0:\n return self.center_z - 0.5*self.voxel_num_z*self.voxel_size_z\n else:\n return 0\n \n def get_max_z(self):\n if not self.voxel_num_z == 0:\n return self.center_z + 0.5*self.voxel_num_z*self.voxel_size_z\n else:\n return 0\n \n def clone(self):\n '''returns a copy of the ImageGeometry'''\n return copy.deepcopy(self)\n\n def copy(self):\n '''alias of clone'''\n return self.clone()\n \n def __str__ (self):\n repres = \"\"\n repres += \"Number of channels: {0}\\n\".format(self.channels)\n repres += \"channel_spacing: {0}\\n\".format(self.channel_spacing)\n\n if self.voxel_num_z > 0:\n repres += \"voxel_num : x{0},y{1},z{2}\\n\".format(self.voxel_num_x, self.voxel_num_y, self.voxel_num_z)\n repres += \"voxel_size : x{0},y{1},z{2}\\n\".format(self.voxel_size_x, self.voxel_size_y, self.voxel_size_z)\n repres += \"center : x{0},y{1},z{2}\\n\".format(self.center_x, self.center_y, self.center_z)\n else:\n repres += \"voxel_num : x{0},y{1}\\n\".format(self.voxel_num_x, self.voxel_num_y)\n repres += \"voxel_size : x{0},y{1}\\n\".format(self.voxel_size_x, self.voxel_size_y)\n repres += \"center : x{0},y{1}\\n\".format(self.center_x, self.center_y)\n\n return repres\n def allocate(self, value=0, **kwargs):\n '''allocates an ImageData according to the size expressed in the instance\n \n :param value: accepts numbers to allocate an uniform array, or a string as 'random' or 'random_int' to create a random array or None.\n :type value: number or string, default None allocates empty memory block, default 0\n :param dtype: numerical type to allocate\n :type dtype: numpy type, default numpy.float32\n '''\n\n dtype = kwargs.get('dtype', self.dtype)\n\n if kwargs.get('dimension_labels', None) is not None:\n raise ValueError(\"Deprecated: 'dimension_labels' cannot be set with 'allocate()'. Use 'geometry.set_labels()' to modify the geometry before using allocate.\")\n\n out = ImageData(geometry=self.copy(), \n dtype=dtype, \n suppress_warning=True)\n\n if isinstance(value, Number):\n # it's created empty, so we make it 0\n out.array.fill(value)\n else:\n if value == ImageGeometry.RANDOM:\n seed = kwargs.get('seed', None)\n if seed is not None:\n numpy.random.seed(seed)\n if dtype in [ numpy.complex , numpy.complex64 , numpy.complex128 ] :\n r = numpy.random.random_sample(self.shape) + 1j * numpy.random.random_sample(self.shape)\n out.fill(r)\n else: \n out.fill(numpy.random.random_sample(self.shape))\n elif value == ImageGeometry.RANDOM_INT:\n seed = kwargs.get('seed', None)\n if seed is not None:\n numpy.random.seed(seed)\n max_value = kwargs.get('max_value', 100)\n r = numpy.random.randint(max_value,size=self.shape, dtype=numpy.int32)\n out.fill(numpy.asarray(r, dtype=self.dtype))\n elif value is None:\n pass\n else:\n raise ValueError('Value {} unknown'.format(value))\n\n return out\n \nclass ComponentDescription(object):\n r'''This class enables the creation of vectors and unit vectors used to describe the components of a tomography system\n '''\n def __init__ (self, dof):\n self.__dof = dof\n\n @staticmethod \n def CreateVector(val):\n try:\n vec = numpy.asarray(val, dtype=numpy.float32).reshape(len(val))\n except:\n raise ValueError(\"Can't convert to numpy array\")\n \n return vec\n\n @staticmethod \n def CreateUnitVector(val):\n vec = ComponentDescription.CreateVector(val)\n dot_product = vec.dot(vec)\n if abs(dot_product)>1e-8:\n vec = (vec/numpy.sqrt(dot_product))\n else:\n raise ValueError(\"Can't return a unit vector of a zero magnitude vector\")\n return vec\n\n def length_check(self, val):\n try:\n val_length = len(val)\n except:\n raise ValueError(\"Vectors for {0}D geometries must have length = {0}. Got {1}\".format(self.__dof,val))\n \n if val_length != self.__dof:\n raise ValueError(\"Vectors for {0}D geometries must have length = {0}. Got {1}\".format(self.__dof,val))\n\nclass PositionVector(ComponentDescription):\n r'''This class creates a component of a tomography system with a position attribute\n '''\n @property\n def position(self):\n try:\n return self.__position\n except:\n raise AttributeError\n\n @position.setter\n def position(self, val): \n self.length_check(val)\n self.__position = ComponentDescription.CreateVector(val)\n\n\nclass DirectionVector(ComponentDescription):\n r'''This class creates a component of a tomography system with a direction attribute\n '''\n @property\n def direction(self): \n try:\n return self.__direction\n except:\n raise AttributeError\n\n @direction.setter\n def direction(self, val):\n self.length_check(val) \n self.__direction = ComponentDescription.CreateUnitVector(val)\n\n \nclass PositionDirectionVector(PositionVector, DirectionVector):\n r'''This class creates a component of a tomography system with position and direction attributes\n '''\n pass\n\nclass Detector1D(PositionVector):\n r'''This class creates a component of a tomography system with position and direction_x attributes used for 1D panels\n '''\n @property\n def direction_x(self):\n try:\n return self.__direction_x\n except:\n raise AttributeError\n\n @direction_x.setter\n def direction_x(self, val):\n self.length_check(val)\n self.__direction_x = ComponentDescription.CreateUnitVector(val)\n\nclass Detector2D(PositionVector):\n r'''This class creates a component of a tomography system with position, direction_x and direction_y attributes used for 2D panels\n '''\n @property\n def direction_x(self):\n try:\n return self.__direction_x\n except:\n raise AttributeError\n\n @property\n def direction_y(self):\n try:\n return self.__direction_y\n except:\n raise AttributeError\n\n def set_direction(self, x, y):\n self.length_check(x)\n x = ComponentDescription.CreateUnitVector(x)\n\n self.length_check(y)\n y = ComponentDescription.CreateUnitVector(y)\n\n dot_product = x.dot(y)\n if not numpy.isclose(dot_product, 0):\n raise ValueError(\"vectors detector.direction_x and detector.direction_y must be orthogonal\")\n\n self.__direction_y = y \n self.__direction_x = x\n\nclass SystemConfiguration(object):\n r'''This is a generic class to hold the description of a tomography system\n '''\n\n SYSTEM_SIMPLE = 'simple' \n SYSTEM_OFFSET = 'offset' \n SYSTEM_ADVANCED = 'advanced' \n\n @property\n def dimension(self):\n if self._dimension == 2:\n return '2D'\n else:\n return '3D' \n\n @dimension.setter\n def dimension(self,val):\n if val != 2 and val != 3:\n raise ValueError('Can set up 2D and 3D systems only. got {0}D'.format(val))\n else:\n self._dimension = val\n\n @property\n def geometry(self):\n return self.__geometry\n\n @geometry.setter\n def geometry(self,val):\n if val != AcquisitionGeometry.CONE and val != AcquisitionGeometry.PARALLEL:\n raise ValueError('geom_type = {} not recognised please specify \\'cone\\' or \\'parallel\\''.format(val))\n else:\n self.__geometry = val\n\n def __init__(self, dof, geometry): \n \"\"\"Initialises the system component attributes for the acquisition type\n \"\"\" \n self.dimension = dof\n self.geometry = geometry\n \n if geometry == AcquisitionGeometry.PARALLEL:\n self.ray = DirectionVector(dof)\n else:\n self.source = PositionVector(dof)\n\n if dof == 2:\n self.detector = Detector1D(dof)\n self.rotation_axis = PositionVector(dof)\n else:\n self.detector = Detector2D(dof)\n self.rotation_axis = PositionDirectionVector(dof)\n \n def __str__(self):\n \"\"\"Implements the string representation of the system configuration\n \"\"\" \n raise NotImplementedError\n\n def __eq__(self, other):\n \"\"\"Implements the equality check of the system configuration\n \"\"\" \n raise NotImplementedError\n\n def update_reference_frame(self):\n \"\"\"Returns the components of the system in the reference frame of the rotation axis at position 0\n \"\"\" \n raise NotImplementedError\n\n def get_centre_slice(self):\n \"\"\"Returns the 2D system configuration corersponding to the centre slice\n \"\"\" \n raise NotImplementedError\n\n def calculate_magnification(self):\n r'''Calculates the magnification of the system using the source to rotate axis,\n and source to detector distance along the direction.\n\n :return: returns [dist_source_center, dist_center_detector, magnification], [0] distance from the source to the rotate axis, [1] distance from the rotate axis to the detector, [2] magnification of the system\n :rtype: list\n '''\n raise NotImplementedError\n \n def system_description(self):\n r'''Returns `simple` if the the geometry matches the default definitions with no offsets or rotations,\n \\nReturns `offset` if the the geometry matches the default definitions with centre-of-rotation or detector offsets\n \\nReturns `advanced` if the the geometry has rotated or tilted rotation axis or detector, can also have offsets\n ''' \n raise NotImplementedError\n\n def copy(self):\n '''returns a copy of SystemConfiguration'''\n return copy.deepcopy(self)\n\nclass Parallel2D(SystemConfiguration):\n r'''This class creates the SystemConfiguration of a parallel beam 2D tomographic system\n \n :param ray_direction: A 2D vector describing the x-ray direction (x,y)\n :type ray_direction: list, tuple, ndarray\n :param detector_pos: A 2D vector describing the position of the centre of the detector (x,y)\n :type detector_pos: list, tuple, ndarray\n :param detector_direction_x: A 2D vector describing the direction of the detector_x (x,y)\n :type detector_direction_x: list, tuple, ndarray\n :param rotation_axis_pos: A 2D vector describing the position of the axis of rotation (x,y)\n :type rotation_axis_pos: list, tuple, ndarray \n '''\n def __init__ (self, ray_direction, detector_pos, detector_direction_x, rotation_axis_pos):\n \"\"\"Constructor method\n \"\"\"\n super(Parallel2D, self).__init__(dof=2, geometry = 'parallel')\n\n #source\n self.ray.direction = ray_direction\n\n #detector\n self.detector.position = detector_pos\n self.detector.direction_x = detector_direction_x\n \n #rotate axis\n self.rotation_axis.position = rotation_axis_pos\n\n def update_reference_frame(self):\n r'''Transforms the system origin to the rotate axis\n ''' \n self.detector.position -= self.rotation_axis.position\n self.rotation_axis.position = [0,0]\n\n\n def align_reference_frame(self):\n r'''Transforms the system origin to the rotate axis and aligns the ray along the positive Y direction\n ''' \n self.update_reference_frame()\n\n ray_vec = -self.ray.direction\n\n axis_rotation = numpy.eye(2)\n if numpy.allclose(ray_vec,[0,-1]):\n pass\n elif numpy.allclose(ray_vec,[0,1]):\n axis_rotation[0][0] = -1\n axis_rotation[1][1] = -1\n else:\n theta = math.atan2(ray_vec[0],-ray_vec[1])\n axis_rotation[0][0] = axis_rotation[1][1] = math.cos(theta)\n axis_rotation[0][1] = math.sin(theta)\n axis_rotation[1][0] = -math.sin(theta)\n\n rotation_matrix = numpy.matrix(axis_rotation)\n \n self.ray.direction = rotation_matrix.dot(self.ray.direction.reshape(2,1))\n self.detector.position = rotation_matrix.dot(self.detector.position.reshape(2,1))\n self.detector.direction_x = rotation_matrix.dot(self.detector.direction_x.reshape(2,1))\n\n def system_description(self):\n r'''Returns `simple` if the the geometry matches the default definitions with no offsets or rotations,\n \\nReturns `offset` if the the geometry matches the default definitions with centre-of-rotation or detector offsets\n \\nReturns `advanced` if the the geometry has rotated or tilted rotation axis or detector, can also have offsets\n ''' \n new = self.copy()\n new.align_reference_frame()\n\n try:\n det_unit = ComponentDescription.CreateUnitVector(new.detector.position)\n except ValueError: #pass test if detector is on origin\n det_unit = [0,1]\n\n if not numpy.allclose(new.ray.direction,[0,1]) or\\\n not numpy.allclose(new.detector.direction_x,[1,0]):\n return SystemConfiguration.SYSTEM_ADVANCED\n elif not numpy.allclose(det_unit,[0,1]):\n return SystemConfiguration.SYSTEM_OFFSET\n else:\n return SystemConfiguration.SYSTEM_SIMPLE\n\n\n def __str__(self):\n def csv(val):\n return numpy.array2string(val, separator=', ')\n \n repres = \"2D Parallel-beam tomography\\n\"\n repres += \"System configuration:\\n\"\n repres += \"\\tRay direction: {0}\\n\".format(csv(self.ray.direction))\n repres += \"\\tRotation axis position: {0}\\n\".format(csv(self.rotation_axis.position))\n repres += \"\\tDetector position: {0}\\n\".format(csv(self.detector.position))\n repres += \"\\tDetector direction x: {0}\\n\".format(csv(self.detector.direction_x))\n return repres\n\n def __eq__(self, other):\n\n if not isinstance(other, self.__class__):\n return False\n \n if numpy.allclose(self.ray.direction, other.ray.direction) \\\n and numpy.allclose(self.detector.position, other.detector.position)\\\n and numpy.allclose(self.detector.direction_x, other.detector.direction_x)\\\n and numpy.allclose(self.rotation_axis.position, other.rotation_axis.position):\n return True\n \n return False\n\n def get_centre_slice(self):\n return self\n\n def calculate_magnification(self):\n return [None, None, 1.0]\n\nclass Parallel3D(SystemConfiguration):\n r'''This class creates the SystemConfiguration of a parallel beam 3D tomographic system\n \n :param ray_direction: A 3D vector describing the x-ray direction (x,y,z)\n :type ray_direction: list, tuple, ndarray\n :param detector_pos: A 3D vector describing the position of the centre of the detector (x,y,z)\n :type detector_pos: list, tuple, ndarray\n :param detector_direction_x: A 3D vector describing the direction of the detector_x (x,y,z)\n :type detector_direction_x: list, tuple, ndarray\n :param detector_direction_y: A 3D vector describing the direction of the detector_y (x,y,z)\n :type detector_direction_y: list, tuple, ndarray\n :param rotation_axis_pos: A 3D vector describing the position of the axis of rotation (x,y,z)\n :type rotation_axis_pos: list, tuple, ndarray\n :param rotation_axis_direction: A 3D vector describing the direction of the axis of rotation (x,y,z)\n :type rotation_axis_direction: list, tuple, ndarray \n '''\n def __init__ (self, ray_direction, detector_pos, detector_direction_x, detector_direction_y, rotation_axis_pos, rotation_axis_direction):\n \"\"\"Constructor method\n \"\"\"\n super(Parallel3D, self).__init__(dof=3, geometry = 'parallel')\n \n #source\n self.ray.direction = ray_direction\n\n #detector\n self.detector.position = detector_pos\n self.detector.set_direction(detector_direction_x, detector_direction_y)\n\n #rotate axis\n self.rotation_axis.position = rotation_axis_pos\n self.rotation_axis.direction = rotation_axis_direction\n\n def update_reference_frame(self):\n r'''Transforms the system origin to the rotate axis with z direction aligned to the rotate axis direction\n ''' \n #shift detector\n self.detector.position = (self.detector.position - self.rotation_axis.position)\n self.rotation_axis.position = [0,0,0]\n\n #calculate rotation matrix to align rotation axis direction with z\n a = self.rotation_axis.direction\n\n if numpy.allclose(a,[0,0,1]):\n return\n elif numpy.allclose(a,[0,0,-1]):\n axis_rotation = numpy.eye(3)\n axis_rotation[1][1] = -1\n axis_rotation[2][2] = -1\n else:\n vx = numpy.array([[0, 0, -a[0]], [0, 0, -a[1]], [a[0], a[1], 0]])\n axis_rotation = numpy.eye(3) + vx + vx.dot(vx) * 1 / (1 + a[2])\n \n rotation_matrix = numpy.matrix(axis_rotation)\n\n #sanity check\n new_rotation_axis_direction = rotation_matrix.dot(self.rotation_axis.direction.reshape(3,1))\n\n if not numpy.allclose(new_rotation_axis_direction.flatten(), [0,0,1], atol=1e-7):\n raise ValueError(\"Failed to align reference frame\")\n\n #apply transform\n self.rotation_axis.direction = [0,0,1]\n self.ray.direction = rotation_matrix.dot(self.ray.direction.reshape(3,1))\n self.detector.position = rotation_matrix.dot(self.detector.position.reshape(3,1))\n new_x = rotation_matrix.dot(self.detector.direction_x.reshape(3,1))\n new_y = rotation_matrix.dot(self.detector.direction_y.reshape(3,1))\n self.detector.set_direction(new_x, new_y)\n\n def align_reference_frame(self):\n r'''Transforms the system origin to the rotate axis with z direction aligned to the rotate axis direction, and aligns the ray direction along the positive Y direction\n ''' \n self.update_reference_frame()\n\n ray_vec = -self.ray.direction\n axis_rotation = numpy.eye(3)\n\n if numpy.allclose(ray_vec,[0,-1,0]):\n pass\n elif numpy.allclose(ray_vec,[0,1,0]):\n axis_rotation[0][0] = -1\n axis_rotation[1][1] = -1\n else:\n theta = math.atan2(ray_vec[0],ray_vec[1])\n axis_rotation[0][0] = axis_rotation[1][1] = math.cos(theta)\n axis_rotation[0][1] = -math.sin(theta)\n axis_rotation[1][0] = math.sin(theta)\n\n rotation_matrix = numpy.matrix(axis_rotation)\n \n self.ray.direction = rotation_matrix.dot(self.ray.direction.reshape(3,1))\n self.detector.position = rotation_matrix.dot(self.detector.position.reshape(3,1))\n\n new_direction_x = rotation_matrix.dot(self.detector.direction_x.reshape(3,1))\n new_direction_y = rotation_matrix.dot(self.detector.direction_y.reshape(3,1))\n\n self.detector.set_direction(new_direction_x, new_direction_y)\n\n def system_description(self):\n r'''Returns `simple` if the the geometry matches the default definitions with no offsets or rotations,\n \\nReturns `offset` if the the geometry matches the default definitions with centre-of-rotation or detector offsets\n \\nReturns `advanced` if the the geometry has rotated or tilted rotation axis or detector, can also have offsets\n ''' \n new = self.copy()\n new.align_reference_frame()\n\n try:\n det_unit = ComponentDescription.CreateUnitVector(new.detector.position)\n except ValueError: #pass test if detector is on origin\n det_unit = [0,1,0]\n\n if not numpy.allclose(new.ray.direction,[0,1,0]) or\\\n not numpy.allclose(new.detector.direction_x,[1,0,0]) or\\\n not numpy.allclose(new.detector.direction_y,[0,0,1]) or\\\n not numpy.allclose(new.rotation_axis.direction,[0,0,1]):\n return SystemConfiguration.SYSTEM_ADVANCED\n elif not numpy.allclose(det_unit,[0,1,0]):\n return SystemConfiguration.SYSTEM_OFFSET\n else:\n return SystemConfiguration.SYSTEM_SIMPLE\n \n return False\n\n\n def __str__(self):\n def csv(val):\n return numpy.array2string(val, separator=', ')\n\n repres = \"3D Parallel-beam tomography\\n\"\n repres += \"System configuration:\\n\"\n repres += \"\\tRay direction: {0}\\n\".format(csv(self.ray.direction))\n repres += \"\\tRotation axis position: {0}\\n\".format(csv(self.rotation_axis.position))\n repres += \"\\tRotation axis direction: {0}\\n\".format(csv(self.rotation_axis.direction))\n repres += \"\\tDetector position: {0}\\n\".format(csv(self.detector.position))\n repres += \"\\tDetector direction x: {0}\\n\".format(csv(self.detector.direction_x))\n repres += \"\\tDetector direction y: {0}\\n\".format(csv(self.detector.direction_y)) \n return repres\n\n def __eq__(self, other):\n\n if not isinstance(other, self.__class__):\n return False\n \n if numpy.allclose(self.ray.direction, other.ray.direction) \\\n and numpy.allclose(self.detector.position, other.detector.position)\\\n and numpy.allclose(self.detector.direction_x, other.detector.direction_x)\\\n and numpy.allclose(self.detector.direction_y, other.detector.direction_y)\\\n and numpy.allclose(self.rotation_axis.position, other.rotation_axis.position)\\\n and numpy.allclose(self.rotation_axis.direction, other.rotation_axis.direction):\n \n return True\n \n return False\n\n def calculate_magnification(self):\n return [None, None, 1.0]\n\n def get_centre_slice(self):\n \"\"\"Returns the 2D system configuration corersponding to the centre slice\n \"\"\" \n dp1 = self.rotation_axis.direction.dot(self.ray.direction)\n dp2 = self.rotation_axis.direction.dot(self.detector.direction_x)\n\n if numpy.isclose(dp1, 0) and numpy.isclose(dp2, 0):\n temp = self.copy()\n\n #convert to rotation axis reference frame\n temp.update_reference_frame()\n\n ray_direction = temp.ray.direction[0:2]\n detector_position = temp.detector.position[0:2]\n detector_direction_x = temp.detector.direction_x[0:2]\n rotation_axis_position = temp.rotation_axis.position[0:2]\n\n return Parallel2D(ray_direction, detector_position, detector_direction_x, rotation_axis_position)\n\n else:\n raise ValueError('Cannot convert geometry to 2D. Requires axis of rotation to be perpenidular to ray direction and the detector direction x.')\n\n\nclass Cone2D(SystemConfiguration):\n r'''This class creates the SystemConfiguration of a cone beam 2D tomographic system\n \n :param source_pos: A 2D vector describing the position of the source (x,y)\n :type source_pos: list, tuple, ndarray\n :param detector_pos: A 2D vector describing the position of the centre of the detector (x,y)\n :type detector_pos: list, tuple, ndarray\n :param detector_direction_x: A 2D vector describing the direction of the detector_x (x,y)\n :type detector_direction_x: list, tuple, ndarray\n :param rotation_axis_pos: A 2D vector describing the position of the axis of rotation (x,y)\n :type rotation_axis_pos: list, tuple, ndarray \n '''\n\n def __init__ (self, source_pos, detector_pos, detector_direction_x, rotation_axis_pos):\n \"\"\"Constructor method\n \"\"\"\n super(Cone2D, self).__init__(dof=2, geometry = 'cone')\n\n #source\n self.source.position = source_pos\n\n #detector\n self.detector.position = detector_pos\n self.detector.direction_x = detector_direction_x\n\n #rotate axis\n self.rotation_axis.position = rotation_axis_pos\n\n def update_reference_frame(self):\n r'''Transforms the system origin to the rotate axis\n ''' \n self.source.position -= self.rotation_axis.position\n self.detector.position -= self.rotation_axis.position\n self.rotation_axis.position = [0,0]\n\n def align_reference_frame(self):\n r'''Transforms the system origin to the rotate axis and aligns the source position along the negative Y direction\n ''' \n self.update_reference_frame()\n\n src_dir = ComponentDescription.CreateUnitVector(self.source.position)\n\n axis_rotation = numpy.eye(2)\n if numpy.allclose(src_dir,[0,-1]):\n pass\n elif numpy.allclose(src_dir,[0,1]):\n axis_rotation[0][0] = -1\n axis_rotation[1][1] = -1\n else:\n theta = math.atan2(src_dir[0],src_dir[1])\n axis_rotation[0][0] = axis_rotation[1][1] = math.cos(theta)\n axis_rotation[0][1] = -math.sin(theta)\n axis_rotation[1][0] = math.sin(theta)\n\n rotation_matrix = numpy.matrix(axis_rotation)\n \n self.source.position = rotation_matrix.dot(self.source.position.reshape(2,1))\n self.detector.position = rotation_matrix.dot(self.detector.position.reshape(2,1))\n self.detector.direction_x = rotation_matrix.dot(self.detector.direction_x.reshape(2,1))\n\n\n def system_description(self):\n r'''Returns `simple` if the the geometry matches the default definitions with no offsets or rotations,\n \\nReturns `offset` if the the geometry matches the default definitions with centre-of-rotation or detector offsets\n \\nReturns `advanced` if the the geometry has rotated or tilted rotation axis or detector, can also have offsets\n ''' \n new = self.copy()\n new.align_reference_frame()\n dot_prod = (new.detector.position - new.source.position).dot(new.detector.direction_x)\n\n if abs(dot_prod)>1e-6:\n return SystemConfiguration.SYSTEM_ADVANCED\n elif abs(new.source.position[0])>1e-6 or\\\n abs(new.detector.position[0])>1e-6:\n return SystemConfiguration.SYSTEM_OFFSET \n else:\n return SystemConfiguration.SYSTEM_SIMPLE\n\n def __str__(self):\n def csv(val):\n return numpy.array2string(val, separator=', ')\n\n repres = \"2D Cone-beam tomography\\n\"\n repres += \"System configuration:\\n\"\n repres += \"\\tSource position: {0}\\n\".format(csv(self.source.position))\n repres += \"\\tRotation axis position: {0}\\n\".format(csv(self.rotation_axis.position))\n repres += \"\\tDetector position: {0}\\n\".format(csv(self.detector.position))\n repres += \"\\tDetector direction x: {0}\\n\".format(csv(self.detector.direction_x)) \n return repres \n\n def __eq__(self, other):\n\n if not isinstance(other, self.__class__):\n return False\n \n if numpy.allclose(self.source.position, other.source.position) \\\n and numpy.allclose(self.detector.position, other.detector.position)\\\n and numpy.allclose(self.detector.direction_x, other.detector.direction_x)\\\n and numpy.allclose(self.rotation_axis.position, other.rotation_axis.position):\n return True\n \n return False\n\n def get_centre_slice(self):\n return self\n\n def calculate_magnification(self):\n #64bit for maths\n rotation_axis_position = self.rotation_axis.position.astype(numpy.float64)\n source_position = self.source.position.astype(numpy.float64)\n detector_position = self.detector.position.astype(numpy.float64)\n direction_x = self.detector.direction_x.astype(numpy.float64)\n\n ab = (rotation_axis_position - source_position)\n dist_source_center = float(numpy.sqrt(ab.dot(ab)))\n\n ab_unit = ab / numpy.sqrt(ab.dot(ab))\n\n n = ComponentDescription.CreateVector([direction_x[1], -direction_x[0]]).astype(numpy.float64)\n\n #perpendicular distance between source and detector centre\n sd = float((detector_position - source_position).dot(n))\n ratio = float(ab_unit.dot(n))\n\n source_to_detector = sd / ratio\n dist_center_detector = source_to_detector - dist_source_center\n magnification = (dist_center_detector + dist_source_center) / dist_source_center\n\n return [dist_source_center, dist_center_detector, magnification]\n\nclass Cone3D(SystemConfiguration):\n r'''This class creates the SystemConfiguration of a cone beam 3D tomographic system\n \n :param source_pos: A 3D vector describing the position of the source (x,y,z)\n :type source_pos: list, tuple, ndarray\n :param detector_pos: A 3D vector describing the position of the centre of the detector (x,y,z)\n :type detector_pos: list, tuple, ndarray\n :param detector_direction_x: A 3D vector describing the direction of the detector_x (x,y,z)\n :type detector_direction_x: list, tuple, ndarray\n :param detector_direction_y: A 3D vector describing the direction of the detector_y (x,y,z)\n :type detector_direction_y: list, tuple, ndarray \n :param rotation_axis_pos: A 3D vector describing the position of the axis of rotation (x,y,z)\n :type rotation_axis_pos: list, tuple, ndarray\n :param rotation_axis_direction: A 3D vector describing the direction of the axis of rotation (x,y,z)\n :type rotation_axis_direction: list, tuple, ndarray \n '''\n\n def __init__ (self, source_pos, detector_pos, detector_direction_x, detector_direction_y, rotation_axis_pos, rotation_axis_direction):\n \"\"\"Constructor method\n \"\"\"\n super(Cone3D, self).__init__(dof=3, geometry = 'cone')\n\n #source\n self.source.position = source_pos\n\n #detector\n self.detector.position = detector_pos\n self.detector.set_direction(detector_direction_x, detector_direction_y)\n\n #rotate axis\n self.rotation_axis.position = rotation_axis_pos\n self.rotation_axis.direction = rotation_axis_direction\n\n def update_reference_frame(self):\n r'''Transforms the system origin to the rotate axis with z direction aligned to the rotate axis direction\n ''' \n #shift \n self.detector.position = (self.detector.position - self.rotation_axis.position)\n self.source.position = (self.source.position - self.rotation_axis.position)\n self.rotation_axis.position = [0,0,0]\n\n #calculate rotation matrix to align rotation axis direction with z\n a = self.rotation_axis.direction\n if numpy.allclose(a,[0,0,1]):\n return\n elif numpy.allclose(a,[0,0,-1]):\n axis_rotation = numpy.eye(3)\n axis_rotation[1][1] = -1\n axis_rotation[2][2] = -1\n else:\n vx = numpy.array([[0, 0, -a[0]], [0, 0, -a[1]], [a[0], a[1], 0]])\n axis_rotation = numpy.eye(3) + vx + vx.dot(vx) * 1 / (1 + a[2])\n \n rotation_matrix = numpy.matrix(axis_rotation)\n\n #sanity check\n new_rotation_axis_direction = rotation_matrix.dot(self.rotation_axis.direction.reshape(3,1))\n\n if not numpy.allclose(new_rotation_axis_direction.flatten(), [0,0,1], atol=1e-7):\n raise ValueError(\"Failed to align reference frame\")\n \n #apply transform\n self.rotation_axis.direction = [0,0,1]\n self.source.position = rotation_matrix.dot(self.source.position.reshape(3,1))\n self.detector.position = rotation_matrix.dot(self.detector.position.reshape(3,1))\n new_x = rotation_matrix.dot(self.detector.direction_x.reshape(3,1)) \n new_y = rotation_matrix.dot(self.detector.direction_y.reshape(3,1))\n self.detector.set_direction(new_x, new_y)\n\n def align_reference_frame(self):\n r'''Transforms the system origin to the rotate axis with z direction aligned to the rotate axis direction, and aligns the source direction along the negative Y direction\n ''' \n self.update_reference_frame()\n\n src_dir = ComponentDescription.CreateUnitVector(self.source.position)\n\n axis_rotation = numpy.eye(3)\n if numpy.allclose(src_dir,[0,-1,0]):\n pass\n elif numpy.allclose(src_dir,[0,1,0]):\n axis_rotation[0][0] = -1\n axis_rotation[1][1] = -1\n else:\n theta = math.atan2(src_dir[0],-src_dir[1])\n axis_rotation[0][0] = axis_rotation[1][1] = math.cos(theta)\n axis_rotation[0][1] = math.sin(theta)\n axis_rotation[1][0] = -math.sin(theta)\n\n rotation_matrix = numpy.matrix(axis_rotation)\n \n self.source.position = rotation_matrix.dot(self.source.position.reshape(3,1))\n self.detector.position = rotation_matrix.dot(self.detector.position.reshape(3,1))\n\n new_direction_x = rotation_matrix.dot(self.detector.direction_x.reshape(3,1))\n new_direction_y = rotation_matrix.dot(self.detector.direction_y.reshape(3,1))\n\n self.detector.set_direction(new_direction_x, new_direction_y)\n\n def system_description(self):\n r'''Returns `simple` if the the geometry matches the default definitions with no offsets or rotations,\n \\nReturns `offset` if the the geometry matches the default definitions with centre-of-rotation or detector offsets\n \\nReturns `advanced` if the the geometry has rotated or tilted rotation axis or detector, can also have offsets\n ''' \n new = self.copy()\n new.align_reference_frame()\n\n dot_prod_a = (new.detector.position - new.source.position).dot(new.detector.direction_x)\n dot_prod_b = (new.detector.position - new.source.position).dot(new.detector.direction_y)\n dot_prod_c = (new.detector.direction_x).dot(new.rotation_axis.direction)\n dot_prod_d = (new.detector.position - new.source.position).dot(new.rotation_axis.direction)\n\n if abs(dot_prod_a)>1e-6 or\\\n abs(dot_prod_b)>1e-6 or\\\n abs(dot_prod_c)>1e-6 or\\\n abs(dot_prod_d)>1e-6: \n return SystemConfiguration.SYSTEM_ADVANCED\n\n elif abs(new.source.position[0])>1e-6 or\\\n abs(new.source.position[2])>1e-6 or\\\n abs(new.detector.position[0])>1e-6 or\\\n abs(new.detector.position[2])>1e-6:\n return SystemConfiguration.SYSTEM_OFFSET\n else:\n return SystemConfiguration.SYSTEM_SIMPLE\n\n\n def get_centre_slice(self):\n \"\"\"Returns the 2D system configuration corersponding to the centre slice\n \"\"\" \n #requires the rotate axis to be perpendicular to the normal of the detector, and perpendicular to detector_direction_x\n vec1= numpy.cross(self.detector.direction_x, self.detector.direction_y) \n dp1 = self.rotation_axis.direction.dot(vec1)\n dp2 = self.rotation_axis.direction.dot(self.detector.direction_x)\n \n if numpy.isclose(dp1, 0) and numpy.isclose(dp2, 0):\n temp = self.copy()\n temp.update_reference_frame()\n source_position = temp.source.position[0:2]\n detector_position = temp.detector.position[0:2]\n detector_direction_x = temp.detector.direction_x[0:2]\n rotation_axis_position = temp.rotation_axis.position[0:2]\n\n return Cone2D(source_position, detector_position, detector_direction_x, rotation_axis_position)\n else:\n raise ValueError('Cannot convert geometry to 2D. Requires axis of rotation to be perpendicular to the detector.')\n \n def __str__(self):\n def csv(val):\n return numpy.array2string(val, separator=', ')\n\n repres = \"3D Cone-beam tomography\\n\"\n repres += \"System configuration:\\n\"\n repres += \"\\tSource position: {0}\\n\".format(csv(self.source.position))\n repres += \"\\tRotation axis position: {0}\\n\".format(csv(self.rotation_axis.position))\n repres += \"\\tRotation axis direction: {0}\\n\".format(csv(self.rotation_axis.direction))\n repres += \"\\tDetector position: {0}\\n\".format(csv(self.detector.position))\n repres += \"\\tDetector direction x: {0}\\n\".format(csv(self.detector.direction_x))\n repres += \"\\tDetector direction y: {0}\\n\".format(csv(self.detector.direction_y))\n return repres \n\n def __eq__(self, other):\n\n if not isinstance(other, self.__class__):\n return False\n \n if numpy.allclose(self.source.position, other.source.position) \\\n and numpy.allclose(self.detector.position, other.detector.position)\\\n and numpy.allclose(self.detector.direction_x, other.detector.direction_x)\\\n and numpy.allclose(self.detector.direction_y, other.detector.direction_y)\\\n and numpy.allclose(self.rotation_axis.position, other.rotation_axis.position)\\\n and numpy.allclose(self.rotation_axis.direction, other.rotation_axis.direction):\n \n return True\n \n return False\n\n def calculate_magnification(self):\n \n #64bit for maths\n rotation_axis_position = self.rotation_axis.position.astype(numpy.float64)\n source_position = self.source.position.astype(numpy.float64)\n detector_position = self.detector.position.astype(numpy.float64)\n direction_x = self.detector.direction_x.astype(numpy.float64)\n\n ab = (rotation_axis_position - source_position)\n dist_source_center = float(numpy.sqrt(ab.dot(ab)))\n\n ab_unit = ab / numpy.sqrt(ab.dot(ab))\n\n #dey y and det x are perpendicular unit vectors so n is a unit vector\n #unit vector orthogonal to the detector\n direction_y = self.detector.direction_y.astype(numpy.float64)\n n = numpy.cross(direction_x,direction_y)\n\n #perpendicular distance between source and detector centre\n sd = float((detector_position - source_position).dot(n))\n ratio = float(ab_unit.dot(n))\n\n source_to_detector = sd / ratio\n dist_center_detector = source_to_detector - dist_source_center\n magnification = (dist_center_detector + dist_source_center) / dist_source_center\n\n return [dist_source_center, dist_center_detector, magnification]\n\nclass Panel(object):\n r'''This is a class describing the panel of the system. \n \n :param num_pixels: num_pixels_h or (num_pixels_h, num_pixels_v) containing the number of pixels of the panel\n :type num_pixels: int, list, tuple\n :param pixel_size: pixel_size_h or (pixel_size_h, pixel_size_v) containing the size of the pixels of the panel\n :type pixel_size: int, lust, tuple\n :param origin: the position of pixel 0 (the data origin) of the panel `top-left`, `top-right`, `bottom-left`, `bottom-right`\n :type origin: string \n '''\n\n @property\n def num_pixels(self):\n return self.__num_pixels\n\n @num_pixels.setter\n def num_pixels(self, val):\n\n if isinstance(val,int):\n num_pixels_temp = [val, 1]\n else:\n try:\n length_val = len(val)\n except:\n raise TypeError('num_pixels expected int x or [int x, int y]. Got {}'.format(type(val)))\n\n\n if length_val == 2:\n try:\n val0 = int(val[0])\n val1 = int(val[1])\n except:\n raise TypeError('num_pixels expected int x or [int x, int y]. Got {0},{1}'.format(type(val[0]), type(val[1])))\n\n num_pixels_temp = [val0, val1]\n else:\n raise ValueError('num_pixels expected int x or [int x, int y]. Got {}'.format(val))\n \n if num_pixels_temp[1] > 1 and self._dimension == 2:\n raise ValueError('2D acquisitions expects a 1D panel. Expected num_pixels[1] = 1. Got {}'.format(num_pixels_temp[1]))\n if num_pixels_temp[0] < 1 or num_pixels_temp[1] < 1:\n raise ValueError('num_pixels (x,y) must be >= (1,1). Got {}'.format(num_pixels_temp))\n else:\n self.__num_pixels = numpy.array(num_pixels_temp, dtype=numpy.int16)\n\n @property\n def pixel_size(self):\n return self.__pixel_size\n\n @pixel_size.setter\n def pixel_size(self, val):\n\n if val is None:\n pixel_size_temp = [1.0,1.0] \n else:\n try:\n length_val = len(val)\n except:\n try:\n temp = float(val)\n pixel_size_temp = [temp, temp]\n\n except:\n raise TypeError('pixel_size expected float xy or [float x, float y]. Got {}'.format(val)) \n else:\n if length_val == 2:\n try:\n temp0 = float(val[0]) \n temp1 = float(val[1]) \n pixel_size_temp = [temp0, temp1]\n except:\n raise ValueError('pixel_size expected float xy or [float x, float y]. Got {}'.format(val))\n else:\n raise ValueError('pixel_size expected float xy or [float x, float y]. Got {}'.format(val))\n \n if pixel_size_temp[0] <= 0 or pixel_size_temp[1] <= 0:\n raise ValueError('pixel_size (x,y) at must be > (0.,0.). Got {}'.format(pixel_size_temp)) \n\n self.__pixel_size = numpy.array(pixel_size_temp)\n\n @property\n def origin(self):\n return self.__origin\n\n @origin.setter\n def origin(self, val):\n allowed = ['top-left', 'top-right','bottom-left','bottom-right']\n if val in allowed:\n self.__origin=val\n else:\n raise ValueError('origin expected one of {0}. Got {1}'.format(allowed, val))\n\n def __str__(self):\n repres = \"Panel configuration:\\n\" \n repres += \"\\tNumber of pixels: {0}\\n\".format(self.num_pixels)\n repres += \"\\tPixel size: {0}\\n\".format(self.pixel_size)\n repres += \"\\tPixel origin: {0}\\n\".format(self.origin)\n return repres \n\n def __eq__(self, other):\n\n if not isinstance(other, self.__class__):\n return False\n \n if numpy.array_equal(self.num_pixels, other.num_pixels) \\\n and numpy.allclose(self.pixel_size, other.pixel_size) \\\n and self.origin == other.origin: \n return True\n \n return False\n\n def __init__ (self, num_pixels, pixel_size, origin, dimension): \n \"\"\"Constructor method\n \"\"\"\n self._dimension = dimension\n self.num_pixels = num_pixels\n self.pixel_size = pixel_size\n self.origin = origin\n\nclass Channels(object):\n r'''This is a class describing the channels of the data. \n This will be created on initialisation of AcquisitionGeometry.\n \n :param num_channels: The number of channels of data\n :type num_channels: int\n :param channel_labels: A list of channel labels\n :type channel_labels: list, optional\n '''\n\n @property\n def num_channels(self):\n return self.__num_channels\n\n @num_channels.setter\n def num_channels(self, val): \n try:\n val = int(val)\n except TypeError:\n raise ValueError('num_channels expected a positive integer. Got {}'.format(type(val)))\n\n if val > 0:\n self.__num_channels = val\n else:\n raise ValueError('num_channels expected a positive integer. Got {}'.format(val))\n\n @property\n def channel_labels(self):\n return self.__channel_labels\n\n @channel_labels.setter\n def channel_labels(self, val): \n if val is None or len(val) == self.__num_channels:\n self.__channel_labels = val \n else:\n raise ValueError('labels expected to have length {0}. Got {1}'.format(self.__num_channels, len(val)))\n\n def __str__(self):\n repres = \"Channel configuration:\\n\" \n repres += \"\\tNumber of channels: {0}\\n\".format(self.num_channels)\n \n num_print=min(10,self.num_channels) \n if hasattr(self, 'channel_labels'):\n repres += \"\\tChannel labels 0-{0}: {1}\\n\".format(num_print, self.channel_labels[0:num_print])\n \n return repres\n\n def __eq__(self, other):\n\n if not isinstance(other, self.__class__):\n return False\n \n if self.num_channels != other.num_channels:\n return False\n\n if hasattr(self,'channel_labels'):\n if self.channel_labels != other.channel_labels:\n return False\n \n return True\n\n def __init__ (self, num_channels, channel_labels): \n \"\"\"Constructor method\n \"\"\"\n self.num_channels = num_channels\n if channel_labels is not None:\n self.channel_labels = channel_labels\n\nclass Angles(object):\n r'''This is a class describing the angles of the data. \n\n :param angles: The angular positions of the acquisition data\n :type angles: list, ndarray\n :param initial_angle: The angular offset of the object from the reference frame\n :type initial_angle: float, optional\n :param angle_unit: The units of the stored angles 'degree' or 'radian'\n :type angle_unit: string\n '''\n\n @property\n def angle_data(self):\n return self.__angle_data\n\n @angle_data.setter\n def angle_data(self, val):\n if val is None:\n raise ValueError('angle_data expected to be a list of floats') \n else:\n try:\n self.num_positions = len(val)\n\n except TypeError:\n self.num_positions = 1\n val = [val]\n\n finally:\n try:\n self.__angle_data = numpy.asarray(val, dtype=numpy.float32)\n except:\n raise ValueError('angle_data expected to be a list of floats') \n\n @property\n def initial_angle(self):\n return self.__initial_angle\n\n @initial_angle.setter\n def initial_angle(self, val):\n try:\n val = float(val)\n except:\n raise TypeError('initial_angle expected a float. Got {0}'.format(type(val)))\n\n self.__initial_angle = val\n\n @property\n def angle_unit(self):\n return self.__angle_unit\n\n @angle_unit.setter\n def angle_unit(self,val):\n if val != AcquisitionGeometry.DEGREE and val != AcquisitionGeometry.RADIAN:\n raise ValueError('angle_unit = {} not recognised please specify \\'degree\\' or \\'radian\\''.format(val))\n else:\n self.__angle_unit = val\n\n def __str__(self):\n repres = \"Acquisition description:\\n\"\n repres += \"\\tNumber of positions: {0}\\n\".format(self.num_positions)\n num_print=min(20,self.num_positions) \n repres += \"\\tAngles 0-{0} in {1}s:\\n{2}\\n\".format(num_print, self.angle_unit, numpy.array2string(self.angle_data[0:num_print], separator=', '))\n return repres \n\n def __eq__(self, other):\n\n if not isinstance(other, self.__class__):\n return False\n \n if self.angle_unit != other.angle_unit:\n return False\n\n if self.initial_angle != other.initial_angle:\n return False\n\n if not numpy.allclose(self.angle_data, other.angle_data):\n return False\n \n return True\n\n def __init__ (self, angles, initial_angle, angle_unit): \n \"\"\"Constructor method\n \"\"\"\n self.angle_data = angles\n self.initial_angle = initial_angle\n self.angle_unit = angle_unit\n\nclass Configuration(object):\n r'''This is a class holds the description of the system components. \n '''\n\n def __init__(self):\n self.system = None\n self.angles = None\n self.panel = None\n self.channels = Channels(1, None)\n\n @property\n def configured(self):\n if self.system is None:\n print(\"Please configure AcquisitionGeometry using one of the following methods:\\\n \\n\\tAcquisitionGeometry.create_Parallel2D()\\\n \\n\\tAcquisitionGeometry.create_Cone3D()\\\n \\n\\tAcquisitionGeometry.create_Parallel2D()\\\n \\n\\tAcquisitionGeometry.create_Cone3D()\")\n return False\n\n configured = True\n if self.angles is None:\n print(\"Please configure angular data using the set_angles() method\")\n configured = False\n if self.panel is None:\n print(\"Please configure the panel using the set_panel() method\")\n configured = False\n return configured\n\n def __str__(self):\n repres = \"\"\n if self.configured:\n repres += str(self.system)\n repres += str(self.panel)\n repres += str(self.channels)\n repres += str(self.angles)\n \n return repres\n\n def __eq__(self, other):\n \n if not isinstance(other, self.__class__):\n return False\n\n if self.system == other.system\\\n and self.panel == other.panel\\\n and self.channels == other.channels\\\n and self.angles == other.angles:\n return True\n\n return False\n\n\nclass AcquisitionGeometry(object):\n r'''This class holds the AcquisitionGeometry of the system.\n \n Please initialise using factory:\n AcquisitionGeometry.create_Parallel2D\n AcquisitionGeometry.create_Cone3D\n AcquisitionGeometry.create_Parallel2D\n AcquisitionGeometry.create_Cone3D\n\n\n These initialisation parameters will be deprecated in a future release. \n :param geom_type: A description of the system type 'cone' or 'parallel'\n :type geom_type: string\n :param pixel_num_h: Number of pixels in the horizontal direction\n :type pixel_num_h: int, optional\n :param pixel_num_v: Number of pixels in the vertical direction\n :type pixel_num_v: int, optional \n :param pixel_size_h: Size of pixels in the horizontal direction\n :type pixel_size_h: float, optional \n :param pixel_size_v: Size of pixels in the vertical direction\n :type pixel_size_v: float, optional \n :param chanels: Number of channels\n :type chanels: int, optional \n :param dist_source_center: Distance from the source to the origin\n :type dist_source_center: float, optional\n :param dist_center_detector: Distance from the origin to the centre of the detector\n :type dist_center_detector: float, optional\n\n '''\n\n RANDOM = 'random'\n RANDOM_INT = 'random_int'\n ANGLE_UNIT = 'angle_unit'\n DEGREE = 'degree'\n RADIAN = 'radian'\n CHANNEL = 'channel'\n ANGLE = 'angle'\n VERTICAL = 'vertical'\n HORIZONTAL = 'horizontal'\n PARALLEL = 'parallel'\n CONE = 'cone'\n DIM2 = '2D'\n DIM3 = '3D'\n\n #for backwards compatibility\n @property\n def geom_type(self):\n return self.config.system.geometry\n\n @property\n def num_projections(self):\n return len(self.angles) \n\n @property\n def pixel_num_h(self):\n return self.config.panel.num_pixels[0]\n\n @pixel_num_h.setter\n def pixel_num_h(self, val):\n self.config.panel.num_pixels[0] = val\n\n @property\n def pixel_num_v(self):\n return self.config.panel.num_pixels[1]\n\n @pixel_num_v.setter\n def pixel_num_v(self, val):\n self.config.panel.num_pixels[1] = val\n\n @property\n def pixel_size_h(self):\n return self.config.panel.pixel_size[0]\n\n @pixel_size_h.setter\n def pixel_size_h(self, val):\n self.config.panel.pixel_size[0] = val\n\n @property\n def pixel_size_v(self):\n return self.config.panel.pixel_size[1]\n\n @pixel_size_v.setter\n def pixel_size_v(self, val):\n self.config.panel.pixel_size[1] = val\n\n @property\n def channels(self):\n return self.config.channels.num_channels\n\n @property\n def angles(self):\n return self.config.angles.angle_data\n\n @property\n def dist_source_center(self):\n out = self.config.system.calculate_magnification()\n return out[0]\n\n @property\n def dist_center_detector(self):\n out = self.config.system.calculate_magnification()\n return out[1]\n\n @property\n def magnification(self):\n out = self.config.system.calculate_magnification()\n return out[2]\n\n @property\n def dimension(self):\n return self.config.system.dimension\n\n @property\n def shape(self):\n\n shape_dict = {AcquisitionGeometry.CHANNEL: self.config.channels.num_channels,\n AcquisitionGeometry.ANGLE: self.config.angles.num_positions,\n AcquisitionGeometry.VERTICAL: self.config.panel.num_pixels[1], \n AcquisitionGeometry.HORIZONTAL: self.config.panel.num_pixels[0]}\n\n shape = []\n for label in self.dimension_labels:\n shape.append(shape_dict[label])\n\n return tuple(shape)\n\n @shape.setter\n def shape(self, val):\n print(\"Deprecated - shape will be set automatically\")\n\n @property\n def dimension_labels(self):\n labels_default = DataOrder.CIL_AG_LABELS\n\n shape_default = [self.config.channels.num_channels,\n self.config.angles.num_positions,\n self.config.panel.num_pixels[1],\n self.config.panel.num_pixels[0]\n ]\n\n try:\n labels = list(self.__dimension_labels)\n except AttributeError:\n labels = labels_default.copy()\n\n #remove from list labels where len == 1\n #\n for i, x in enumerate(shape_default):\n if x == 1:\n try:\n labels.remove(labels_default[i])\n except ValueError:\n pass #if not in custom list carry on\n\n return tuple(labels)\n \n @dimension_labels.setter\n def dimension_labels(self, val):\n\n labels_default = DataOrder.CIL_AG_LABELS\n\n #check input and store. This value is not used directly\n if val is not None:\n for x in val:\n if x not in labels_default:\n raise ValueError('Requested axis are not possible. Accepted label names {},\\ngot {}'.format(labels_default,val))\n \n self.__dimension_labels = tuple(val)\n\n\n @property\n def system_description(self):\n return self.config.system.system_description()\n\n @property\n def dtype(self):\n return self.__dtype\n\n @dtype.setter\n def dtype(self, val):\n self.__dtype = val \n\n\n def __init__(self,\n geom_type, \n dimension=None,\n angles=None, \n pixel_num_h=1, \t\n pixel_size_h=1,\n pixel_num_v=1,\n pixel_size_v=1,\n dist_source_center=None,\n dist_center_detector=None,\n channels=1,\n ** kwargs):\n\n \"\"\"Constructor method\n \"\"\"\n\n # default dtype for the acquisition geometry\n self.dtype = kwargs.get('dtype', numpy.float32)\n\n #backward compatibility\n new_setup = kwargs.get('new_setup', False)\n\n #set up old geometry \n if new_setup is False:\n self.config = Configuration()\n \n if angles is None:\n raise ValueError(\"AcquisitionGeometry not configured. Parameter 'angles' is required\")\n\n if geom_type == AcquisitionGeometry.CONE:\n if dist_source_center is None:\n raise ValueError(\"AcquisitionGeometry not configured. Parameter 'dist_source_center' is required\")\n if dist_center_detector is None:\n raise ValueError(\"AcquisitionGeometry not configured. Parameter 'dist_center_detector' is required\")\n\n if pixel_num_v > 1:\n dimension = 3\n num_pixels = [pixel_num_h, pixel_num_v]\n pixel_size = [pixel_size_h, pixel_size_v]\n if geom_type == AcquisitionGeometry.CONE:\n self.config.system = Cone3D(source_pos=[0,-dist_source_center,0], detector_pos=[0,dist_center_detector,0], detector_direction_x=[1,0,0], detector_direction_y=[0,0,1], rotation_axis_pos=[0,0,0], rotation_axis_direction=[0,0,1])\n else:\n self.config.system = Parallel3D(ray_direction=[0,1,0], detector_pos=[0,0,0], detector_direction_x=[1,0,0], detector_direction_y=[0,0,1], rotation_axis_pos=[0,0,0], rotation_axis_direction=[0,0,1])\n else:\n dimension = 2\n num_pixels = [pixel_num_h, 1]\n pixel_size = [pixel_size_h, pixel_size_h] \n if geom_type == AcquisitionGeometry.CONE:\n self.config.system = Cone2D(source_pos=[0,-dist_source_center], detector_pos=[0,dist_center_detector], detector_direction_x=[1,0], rotation_axis_pos=[0,0])\n else:\n self.config.system = Parallel2D(ray_direction=[0,1], detector_pos=[0,0], detector_direction_x=[1,0], rotation_axis_pos=[0,0])\n\n\n self.config.panel = Panel(num_pixels, pixel_size, 'bottom-left', dimension) \n self.config.channels = Channels(channels, channel_labels=None) \n self.config.angles = Angles(angles, 0, kwargs.get(AcquisitionGeometry.ANGLE_UNIT, AcquisitionGeometry.DEGREE))\n\n self.dimension_labels = kwargs.get('dimension_labels', None)\n if self.config.configured:\n print(\"AcquisitionGeometry configured using deprecated method\")\n else:\n raise ValueError(\"AcquisitionGeometry not configured\")\n\n def set_angles(self, angles, initial_angle=0, angle_unit='degree'):\n r'''This method configures the angular information of an AcquisitionGeometry object. \n\n :param angles: The angular positions of the acquisition data\n :type angles: list, ndarray\n :param initial_angle: The angular offset of the object from the reference frame\n :type initial_angle: float, optional\n :param angle_unit: The units of the stored angles 'degree' or 'radian'\n :type angle_unit: string\n :return: returns a configured AcquisitionGeometry object\n :rtype: AcquisitionGeometry \n '''\n self.config.angles = Angles(angles, initial_angle, angle_unit)\n return self\n\n def set_panel(self, num_pixels, pixel_size=(1,1), origin='bottom-left'):\n\n r'''This method configures the panel information of an AcquisitionGeometry object. \n \n :param num_pixels: num_pixels_h or (num_pixels_h, num_pixels_v) containing the number of pixels of the panel\n :type num_pixels: int, list, tuple\n :param pixel_size: pixel_size_h or (pixel_size_h, pixel_size_v) containing the size of the pixels of the panel\n :type pixel_size: int, list, tuple, optional\n :param origin: the position of pixel 0 (the data origin) of the panel 'top-left', 'top-right', 'bottom-left', 'bottom-right'\n :type origin: string, default 'bottom-left'\n :return: returns a configured AcquisitionGeometry object\n :rtype: AcquisitionGeometry \n ''' \n self.config.panel = Panel(num_pixels, pixel_size, origin, self.config.system._dimension)\n return self\n\n def set_channels(self, num_channels=1, channel_labels=None):\n r'''This method configures the channel information of an AcquisitionGeometry object. \n \n :param num_channels: The number of channels of data\n :type num_channels: int, optional\n :param channel_labels: A list of channel labels\n :type channel_labels: list, optional\n :return: returns a configured AcquisitionGeometry object\n :rtype: AcquisitionGeometry \n ''' \n self.config.channels = Channels(num_channels, channel_labels)\n return self\n\n def set_labels(self, labels=None):\n r'''This method configures the dimension labels of an AcquisitionGeometry object. \n \n :param labels: The order of the dimensions describing the data.\\\n Expects a list containing at least one of the unique labels: 'channel' 'angle' 'vertical' 'horizontal'\n default = ['channel','angle','vertical','horizontal']\n :type labels: list, optional\n :return: returns a configured AcquisitionGeometry object\n :rtype: AcquisitionGeometry \n ''' \n self.dimension_labels = labels\n return self\n \n @staticmethod\n def create_Parallel2D(ray_direction=[0, 1], detector_position=[0, 0], detector_direction_x=[1, 0], rotation_axis_position=[0, 0]):\n r'''This creates the AcquisitionGeometry for a parallel beam 2D tomographic system\n\n :param ray_direction: A 2D vector describing the x-ray direction (x,y)\n :type ray_direction: list, tuple, ndarray, optional\n :param detector_position: A 2D vector describing the position of the centre of the detector (x,y)\n :type detector_position: list, tuple, ndarray, optional\n :param detector_direction_x: A 2D vector describing the direction of the detector_x (x,y)\n :type detector_direction_x: list, tuple, ndarray\n :param rotation_axis_position: A 2D vector describing the position of the axis of rotation (x,y)\n :type rotation_axis_position: list, tuple, ndarray, optional\n :return: returns a configured AcquisitionGeometry object\n :rtype: AcquisitionGeometry\n '''\n AG = AcquisitionGeometry('', new_setup=True)\n AG.config = Configuration()\n AG.config.system = Parallel2D(ray_direction, detector_position, detector_direction_x, rotation_axis_position)\n return AG \n\n @staticmethod\n def create_Cone2D(source_position, detector_position, detector_direction_x=[1,0], rotation_axis_position=[0,0]):\n r'''This creates the AcquisitionGeometry for a cone beam 2D tomographic system \n\n :param source_position: A 2D vector describing the position of the source (x,y)\n :type source_position: list, tuple, ndarray\n :param detector_position: A 2D vector describing the position of the centre of the detector (x,y)\n :type detector_position: list, tuple, ndarray\n :param detector_direction_x: A 2D vector describing the direction of the detector_x (x,y)\n :type detector_direction_x: list, tuple, ndarray\n :param rotation_axis_position: A 2D vector describing the position of the axis of rotation (x,y)\n :type rotation_axis_position: list, tuple, ndarray, optional\n :return: returns a configured AcquisitionGeometry object\n :rtype: AcquisitionGeometry \n ''' \n AG = AcquisitionGeometry('', new_setup=True)\n AG.config = Configuration()\n AG.config.system = Cone2D(source_position, detector_position, detector_direction_x, rotation_axis_position)\n return AG \n\n @staticmethod\n def create_Parallel3D(ray_direction=[0,1,0], detector_position=[0,0,0], detector_direction_x=[1,0,0], detector_direction_y=[0,0,1], rotation_axis_position=[0,0,0], rotation_axis_direction=[0,0,1]):\n r'''This creates the AcquisitionGeometry for a parallel beam 3D tomographic system\n \n :param ray_direction: A 3D vector describing the x-ray direction (x,y,z)\n :type ray_direction: list, tuple, ndarray, optional\n :param detector_position: A 3D vector describing the position of the centre of the detector (x,y,z)\n :type detector_position: list, tuple, ndarray, optional\n :param detector_direction_x: A 3D vector describing the direction of the detector_x (x,y,z)\n :type detector_direction_x: list, tuple, ndarray\n :param detector_direction_y: A 3D vector describing the direction of the detector_y (x,y,z)\n :type detector_direction_y: list, tuple, ndarray\n :param rotation_axis_position: A 3D vector describing the position of the axis of rotation (x,y,z)\n :type rotation_axis_position: list, tuple, ndarray, optional\n :param rotation_axis_direction: A 3D vector describing the direction of the axis of rotation (x,y,z)\n :type rotation_axis_direction: list, tuple, ndarray, optional \n :return: returns a configured AcquisitionGeometry object\n :rtype: AcquisitionGeometry \n '''\n AG = AcquisitionGeometry('', new_setup=True)\n AG.config = Configuration()\n AG.config.system = Parallel3D(ray_direction, detector_position, detector_direction_x, detector_direction_y, rotation_axis_position, rotation_axis_direction)\n return AG \n\n @staticmethod\n def create_Cone3D(source_position, detector_position, detector_direction_x=[1,0,0], detector_direction_y=[0,0,1], rotation_axis_position=[0,0,0], rotation_axis_direction=[0,0,1]):\n r'''This creates the AcquisitionGeometry for a cone beam 3D tomographic system\n \n :param source_position: A 3D vector describing the position of the source (x,y,z)\n :type source_position: list, tuple, ndarray, optional\n :param detector_position: A 3D vector describing the position of the centre of the detector (x,y,z)\n :type detector_position: list, tuple, ndarray, optional\n :param detector_direction_x: A 3D vector describing the direction of the detector_x (x,y,z)\n :type detector_direction_x: list, tuple, ndarray\n :param detector_direction_y: A 3D vector describing the direction of the detector_y (x,y,z)\n :type detector_direction_y: list, tuple, ndarray\n :param rotation_axis_position: A 3D vector describing the position of the axis of rotation (x,y,z)\n :type rotation_axis_position: list, tuple, ndarray, optional\n :param rotation_axis_direction: A 3D vector describing the direction of the axis of rotation (x,y,z)\n :type rotation_axis_direction: list, tuple, ndarray, optional\n :return: returns a configured AcquisitionGeometry object\n :rtype: AcquisitionGeometry \n '''\n AG = AcquisitionGeometry('', new_setup=True)\n AG.config = Configuration()\n AG.config.system = Cone3D(source_position, detector_position, detector_direction_x, detector_direction_y, rotation_axis_position, rotation_axis_direction)\n return AG \n\n def get_order_by_label(self, dimension_labels, default_dimension_labels):\n order = []\n for i, el in enumerate(default_dimension_labels):\n for j, ek in enumerate(dimension_labels):\n if el == ek:\n order.append(j)\n break\n return order\n\n def __eq__(self, other):\n\n if isinstance(other, self.__class__) and self.config == other.config :\n return True\n return False\n\n def clone(self):\n '''returns a copy of the AcquisitionGeometry'''\n return copy.deepcopy(self)\n\n def copy(self):\n '''alias of clone'''\n return self.clone()\n\n def get_centre_slice(self):\n '''returns a 2D AcquisitionGeometry that corresponds to the centre slice of the input'''\n\n if self.dimension == '2D':\n return self\n \n AG_2D = copy.deepcopy(self)\n AG_2D.config.system = self.config.system.get_centre_slice()\n AG_2D.config.panel.num_pixels[1] = 1\n AG_2D.config.panel.pixel_size[1] = abs(self.config.system.detector.direction_y[2]) * self.config.panel.pixel_size[1]\n return AG_2D\n\n def get_ImageGeometry(self, resolution=1.0):\n '''returns a default configured ImageGeometry object based on the AcquisitionGeomerty'''\n\n num_voxel_xy = int(numpy.ceil(self.config.panel.num_pixels[0] * resolution))\n voxel_size_xy = self.config.panel.pixel_size[0] / (resolution * self.magnification)\n\n if self.dimension == '3D':\n num_voxel_z = int(numpy.ceil(self.config.panel.num_pixels[1] * resolution))\n voxel_size_z = self.config.panel.pixel_size[1] / (resolution * self.magnification)\n else:\n num_voxel_z = 0\n voxel_size_z = 1\n \n return ImageGeometry(num_voxel_xy, num_voxel_xy, num_voxel_z, voxel_size_xy, voxel_size_xy, voxel_size_z, channels=self.channels)\n\n def __str__ (self):\n return str(self.config)\n\n def subset(self, dimensions=None, **kw):\n '''Returns a new sliced and/or reshaped AcquisitionGeometry'''\n \n if not kw.get('suppress_warning', False):\n warnings.warn('Subset has been deprecated and will be removed in following version. Use reorder() and get_slice() instead',\n DeprecationWarning)\n \n if dimensions is None:\n return self.get_slice(**kw)\n else:\n if len(dimensions) != len(self.dimension_labels):\n raise ValueError('The axes list for subset must contain the dimension_labels {0} got {1}'.format(self.dimension_labels, dimensions))\n\n temp = self.copy()\n temp.set_labels(dimensions)\n return temp \n\n def get_slice(self, channel=None, angle=None, vertical=None, horizontal=None):\n '''\n Returns a new AcquisitionGeometry of a single slice of in the requested direction. Will only return reconstructable geometries.\n '''\n geometry_new = self.copy()\n\n if channel is not None:\n geometry_new.config.channels.num_channels = 1\n if hasattr(geometry_new.config.channels,'channel_labels'):\n geometry_new.config.panel.channel_labels = geometry_new.config.panel.channel_labels[channel]\n\n if angle is not None:\n geometry_new.config.angles.angle_data = geometry_new.config.angles.angle_data[angle]\n \n if vertical is not None:\n if geometry_new.geom_type == AcquisitionGeometry.PARALLEL or vertical == 'centre' or vertical == geometry_new.pixel_num_v//2:\n geometry_new = geometry_new.get_centre_slice()\n else:\n raise ValueError(\"Can only subset centre slice geometry on cone-beam data. Expected vertical = 'centre'. Got vertical = {0}\".format(vertical))\n \n if horizontal is not None:\n raise ValueError(\"Cannot calculate system geometry for a horizontal slice\")\n\n return geometry_new\n\n def allocate(self, value=0, **kwargs):\n '''allocates an AcquisitionData according to the size expressed in the instance\n \n :param value: accepts numbers to allocate an uniform array, or a string as 'random' or 'random_int' to create a random array or None.\n :type value: number or string, default None allocates empty memory block\n :param dtype: numerical type to allocate\n :type dtype: numpy type, default numpy.float32\n '''\n dtype = kwargs.get('dtype', self.dtype)\n\n if kwargs.get('dimension_labels', None) is not None:\n raise ValueError(\"Deprecated: 'dimension_labels' cannot be set with 'allocate()'. Use 'geometry.set_labels()' to modify the geometry before using allocate.\")\n\n out = AcquisitionData(geometry=self.copy(), \n dtype=dtype,\n suppress_warning=True)\n\n if isinstance(value, Number):\n # it's created empty, so we make it 0\n out.array.fill(value)\n else:\n if value == AcquisitionGeometry.RANDOM:\n seed = kwargs.get('seed', None)\n if seed is not None:\n numpy.random.seed(seed)\n if dtype in [ numpy.complex , numpy.complex64 , numpy.complex128 ] :\n r = numpy.random.random_sample(self.shape) + 1j * numpy.random.random_sample(self.shape)\n out.fill(r)\n else:\n out.fill(numpy.random.random_sample(self.shape))\n elif value == AcquisitionGeometry.RANDOM_INT:\n seed = kwargs.get('seed', None)\n if seed is not None:\n numpy.random.seed(seed)\n max_value = kwargs.get('max_value', 100)\n r = numpy.random.randint(max_value,size=self.shape, dtype=numpy.int32)\n out.fill(numpy.asarray(r, dtype=self.dtype))\n elif value is None:\n pass\n else:\n raise ValueError('Value {} unknown'.format(value))\n \n return out\n \nclass DataContainer(object):\n '''Generic class to hold data\n \n Data is currently held in a numpy arrays'''\n\n @property\n def geometry(self):\n return None\n\n @geometry.setter\n def geometry(self, val):\n if val is not None:\n raise TypeError(\"DataContainers cannot hold a geometry, use ImageData or AcquisitionData instead\")\n\n @property\n def dimension_labels(self):\n\n if self.__dimension_labels is None:\n default_labels = [0]*self.number_of_dimensions\n for i in range(self.number_of_dimensions):\n default_labels[i] = 'dimension_{0:02}'.format(i)\n return tuple(default_labels)\n else:\n return self.__dimension_labels\n \n @dimension_labels.setter\n def dimension_labels(self, val):\n if val is None:\n self.__dimension_labels = None\n elif len(list(val))==self.number_of_dimensions:\n self.__dimension_labels = tuple(val)\n else:\n raise ValueError(\"dimension_labels expected a list containing {0} strings got {1}\".format(self.number_of_dimensions, val))\n\n @property\n def shape(self):\n '''Returns the shape of the of the DataContainer'''\n return self.array.shape\n\n @shape.setter\n def shape(self, val):\n print(\"Deprecated - shape will be set automatically\")\n\n @property\n def number_of_dimensions(self):\n '''Returns the shape of the of the DataContainer'''\n return len(self.array.shape)\n\n @property\n def dtype(self):\n '''Returns the dtype of the data array. \n If geometry exists, the dtype of the geometry = dtype of the array''' \n self.geometry.dtype = self.array.dtype \n return self.array.dtype\n\n @property\n def size(self):\n '''Returns the number of elements of the DataContainer'''\n return self.array.size\n\n __container_priority__ = 1\n def __init__ (self, array, deep_copy=True, dimension_labels=None, \n **kwargs):\n '''Holds the data'''\n \n if type(array) == numpy.ndarray:\n if deep_copy:\n self.array = array.copy()\n else:\n self.array = array \n else:\n raise TypeError('Array must be NumpyArray, passed {0}'\\\n .format(type(array)))\n\n #Don't set for derived classes\n if type(self) is DataContainer:\n self.dimension_labels = dimension_labels\n\n # finally copy the geometry, and force dtype of the geometry of the data = the dype of the data\n if 'geometry' in kwargs.keys():\n self.geometry = kwargs['geometry']\n try:\n self.geometry.dtype = self.dtype \n except:\n pass \n \n def get_dimension_size(self, dimension_label):\n\n if dimension_label in self.dimension_labels:\n i = self.dimension_labels.index(dimension_label)\n return self.shape[i]\n else:\n raise ValueError('Unknown dimension {0}. Should be one of {1}'.format(dimension_label,\n self.dimension_labels))\n def get_dimension_axis(self, dimension_label):\n\n if dimension_label in self.dimension_labels:\n return self.dimension_labels.index(dimension_label)\n else:\n raise ValueError('Unknown dimension {0}. Should be one of {1}'.format(dimension_label,\n self.dimension_labels))\n \n def as_array(self):\n '''Returns the pointer to the array.\n '''\n return self.array\n\n def subset(self, dimensions=None, **kw):\n '''Creates a DataContainer containing a subset of self according to the \n labels in dimensions'''\n \n if not kw.get('suppress_warning', False):\n warnings.warn('Subset has been deprecated and will be removed in following version. Use reorder() and get_slice() instead',\n DeprecationWarning)\n\n if dimensions is None:\n return self.get_slice(**kw)\n else:\n temp = self.copy()\n temp.reorder(dimensions)\n return temp\n\n def get_slice(self,**kw):\n '''\n Returns a new DataContainer containing a single slice of in the requested direction. \\\n Pass keyword arguments <dimension label>=index\n '''\n new_array = None\n\n #get ordered list of current dimensions\n dimension_labels_list = list(self.dimension_labels)\n\n #remove axes from array and labels\n for key, value in kw.items():\n if value is not None:\n axis = dimension_labels_list.index(key)\n dimension_labels_list.remove(key)\n if new_array is None:\n new_array = self.as_array().take(indices=value, axis=axis)\n else:\n new_array = new_array.take(indices=value, axis=axis)\n\n if new_array.ndim > 1:\n return DataContainer(new_array, False, dimension_labels_list, suppress_warning=True)\n else:\n return VectorData(new_array, dimension_labels=dimension_labels_list)\n \n def reorder(self, order=None):\n '''\n reorders the data in memory as requested.\n\n :param order: ordered list of labels from self.dimension_labels, or order for engine 'astra' or 'tigre'\n :type order: list, sting \n '''\n\n if order == 'astra' or order == 'tigre':\n order = DataOrder.get_order_for_engine(order, self.geometry) \n\n try:\n if len(order) != len(self.shape):\n raise ValueError('The axes list for resorting must have {0} dimensions. Got {1}'.format(len(self.shape), len(order)))\n except TypeError as ae:\n raise ValueError('The order must be an iterable with __len__ implemented, like a list or a tuple. Got {}'.format(type(order)))\n \n correct = True\n for el in order:\n correct = correct and el in self.dimension_labels\n if not correct:\n raise ValueError('The axes list for resorting must contain the dimension_labels {0} got {1}'.format(self.dimension_labels, order))\n \n new_order = [0]*len(self.shape)\n dimension_labels_new = [0]*len(self.shape)\n\n for i, axis in enumerate(order):\n new_order[i] = self.dimension_labels.index(axis)\n dimension_labels_new[i] = axis\n\n self.array = numpy.ascontiguousarray(numpy.transpose(self.array, new_order))\n\n if self.geometry is None:\n self.dimension_labels = dimension_labels_new\n else:\n self.geometry.set_labels(dimension_labels_new)\n \n def fill(self, array, **dimension):\n '''fills the internal data array with the DataContainer, numpy array or number provided\n \n :param array: number, numpy array or DataContainer to copy into the DataContainer\n :type array: DataContainer or subclasses, numpy array or number\n :param dimension: dictionary, optional\n \n if the passed numpy array points to the same array that is contained in the DataContainer,\n it just returns\n\n In case a DataContainer or subclass is passed, there will be a check of the geometry, \n if present, and the array will be resorted if the data is not in the appropriate order.\n\n User may pass a named parameter to specify in which axis the fill should happen:\n\n dc.fill(some_data, vertical=1, horizontal_x=32)\n will copy the data in some_data into the data container.\n '''\n if id(array) == id(self.array):\n return\n if dimension == {}:\n if isinstance(array, numpy.ndarray):\n if array.shape != self.shape:\n raise ValueError('Cannot fill with the provided array.' + \\\n 'Expecting {0} got {1}'.format(\n self.shape,array.shape))\n numpy.copyto(self.array, array)\n elif isinstance(array, Number):\n self.array.fill(array) \n elif issubclass(array.__class__ , DataContainer):\n if hasattr(self, 'geometry') and hasattr(array, 'geometry'):\n if self.geometry != array.geometry:\n numpy.copyto(self.array, array.subset(dimensions=array.dimension_labels).as_array())\n return\n numpy.copyto(self.array, array.as_array())\n else:\n raise TypeError('Can fill only with number, numpy array or DataContainer and subclasses. Got {}'.format(type(array)))\n else:\n \n axis = [':']* self.number_of_dimensions\n dimension_labels = list(self.dimension_labels)\n for k,v in dimension.items():\n i = dimension_labels.index(k)\n axis[i] = v\n\n command = 'self.array['\n i = 0\n for el in axis:\n if i > 0:\n command += ','\n command += str(el)\n i+=1\n \n if isinstance(array, numpy.ndarray):\n command = command + \"] = array[:]\" \n elif issubclass(array.__class__, DataContainer):\n command = command + \"] = array.as_array()[:]\" \n elif isinstance (array, Number):\n command = command + \"] = array\"\n else:\n raise TypeError('Can fill only with number, numpy array or DataContainer and subclasses. Got {}'.format(type(array)))\n exec(command)\n \n \n def check_dimensions(self, other):\n return self.shape == other.shape\n \n ## algebra \n \n def __add__(self, other):\n return self.add(other)\n def __mul__(self, other):\n return self.multiply(other)\n def __sub__(self, other):\n return self.subtract(other)\n def __div__(self, other):\n return self.divide(other)\n def __truediv__(self, other):\n return self.divide(other)\n def __pow__(self, other):\n return self.power(other)\n \n \n # reverse operand\n def __radd__(self, other):\n return self + other\n # __radd__\n \n def __rsub__(self, other):\n return (-1 * self) + other\n # __rsub__\n \n def __rmul__(self, other):\n return self * other\n # __rmul__\n \n def __rdiv__(self, other):\n tmp = self.power(-1)\n tmp *= other\n return tmp\n # __rdiv__\n def __rtruediv__(self, other):\n return self.__rdiv__(other)\n \n def __rpow__(self, other):\n if isinstance(other, Number) :\n fother = numpy.ones(numpy.shape(self.array)) * other\n return type(self)(fother ** self.array , \n dimension_labels=self.dimension_labels,\n geometry=self.geometry)\n # __rpow__\n \n # in-place arithmetic operators:\n # (+=, -=, *=, /= , //=,\n # must return self\n \n def __iadd__(self, other):\n kw = {'out':self}\n return self.add(other, **kw)\n \n def __imul__(self, other):\n kw = {'out':self}\n return self.multiply(other, **kw)\n \n def __isub__(self, other):\n kw = {'out':self}\n return self.subtract(other, **kw)\n \n def __idiv__(self, other):\n kw = {'out':self}\n return self.divide(other, **kw)\n \n def __itruediv__(self, other):\n kw = {'out':self}\n return self.divide(other, **kw)\n \n def __neg__(self):\n '''negation operator'''\n return -1 * self \n \n def __str__ (self, representation=False):\n repres = \"\"\n repres += \"Number of dimensions: {0}\\n\".format(self.number_of_dimensions)\n repres += \"Shape: {0}\\n\".format(self.shape)\n repres += \"Axis labels: {0}\\n\".format(self.dimension_labels)\n if representation:\n repres += \"Representation: \\n{0}\\n\".format(self.array)\n return repres\n \n def get_data_axes_order(self,new_order=None):\n '''returns the axes label of self as a list\n \n if new_order is None returns the labels of the axes as a sorted-by-key list\n if new_order is a list of length number_of_dimensions, returns a list\n with the indices of the axes in new_order with respect to those in \n self.dimension_labels: i.e.\n self.dimension_labels = {0:'horizontal',1:'vertical'}\n new_order = ['vertical','horizontal']\n returns [1,0]\n '''\n if new_order is None:\n return self.dimension_labels\n else:\n if len(new_order) == self.number_of_dimensions:\n\n axes_order = [0]*len(self.shape)\n for i, axis in enumerate(new_order):\n axes_order[i] = self.dimension_labels.index(axis)\n return axes_order\n else:\n raise ValueError('Expecting {0} axes, got {2}'\\\n .format(len(self.shape),len(new_order)))\n \n def clone(self):\n '''returns a copy of DataContainer'''\n return copy.deepcopy(self)\n\n def copy(self):\n '''alias of clone'''\n return self.clone()\n \n ## binary operations\n \n def pixel_wise_binary(self, pwop, x2, *args, **kwargs): \n out = kwargs.get('out', None)\n \n if out is None:\n if isinstance(x2, (int, float, complex, \\\n numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\\\n numpy.float, numpy.float16, numpy.float32, numpy.float64, \\\n numpy.complex)):\n out = pwop(self.as_array() , x2 , *args, **kwargs )\n elif issubclass(x2.__class__ , DataContainer):\n out = pwop(self.as_array() , x2.as_array() , *args, **kwargs )\n elif isinstance(x2, numpy.ndarray):\n out = pwop(self.as_array() , x2 , *args, **kwargs )\n else:\n raise TypeError('Expected x2 type as number or DataContainer, got {}'.format(type(x2)))\n geom = self.geometry\n if geom is not None:\n geom = self.geometry.copy()\n return type(self)(out,\n deep_copy=False, \n dimension_labels=self.dimension_labels,\n geometry= None if self.geometry is None else self.geometry.copy(), \n suppress_warning=True)\n \n \n elif issubclass(type(out), DataContainer) and issubclass(type(x2), DataContainer):\n if self.check_dimensions(out) and self.check_dimensions(x2):\n kwargs['out'] = out.as_array()\n pwop(self.as_array(), x2.as_array(), *args, **kwargs )\n #return type(self)(out.as_array(),\n # deep_copy=False, \n # dimension_labels=self.dimension_labels,\n # geometry=self.geometry)\n return out\n else:\n raise ValueError(message(type(self),\"Wrong size for data memory: out {} x2 {} expected {}\".format( out.shape,x2.shape ,self.shape)))\n elif issubclass(type(out), DataContainer) and \\\n isinstance(x2, (int,float,complex, numpy.int, numpy.int8, \\\n numpy.int16, numpy.int32, numpy.int64,\\\n numpy.float, numpy.float16, numpy.float32,\\\n numpy.float64, numpy.complex)):\n if self.check_dimensions(out):\n kwargs['out']=out.as_array()\n pwop(self.as_array(), x2, *args, **kwargs )\n return out\n else:\n raise ValueError(message(type(self),\"Wrong size for data memory: \", out.shape,self.shape))\n elif issubclass(type(out), numpy.ndarray):\n if self.array.shape == out.shape and self.array.dtype == out.dtype:\n kwargs['out'] = out\n pwop(self.as_array(), x2, *args, **kwargs)\n #return type(self)(out,\n # deep_copy=False, \n # dimension_labels=self.dimension_labels,\n # geometry=self.geometry)\n else:\n raise ValueError (message(type(self), \"incompatible class:\" , pwop.__name__, type(out)))\n \n def add(self, other, *args, **kwargs):\n if hasattr(other, '__container_priority__') and \\\n self.__class__.__container_priority__ < other.__class__.__container_priority__:\n return other.add(self, *args, **kwargs)\n return self.pixel_wise_binary(numpy.add, other, *args, **kwargs)\n \n def subtract(self, other, *args, **kwargs):\n if hasattr(other, '__container_priority__') and \\\n self.__class__.__container_priority__ < other.__class__.__container_priority__:\n return other.subtract(self, *args, **kwargs)\n return self.pixel_wise_binary(numpy.subtract, other, *args, **kwargs)\n\n def multiply(self, other, *args, **kwargs):\n if hasattr(other, '__container_priority__') and \\\n self.__class__.__container_priority__ < other.__class__.__container_priority__:\n return other.multiply(self, *args, **kwargs)\n return self.pixel_wise_binary(numpy.multiply, other, *args, **kwargs)\n \n def divide(self, other, *args, **kwargs):\n if hasattr(other, '__container_priority__') and \\\n self.__class__.__container_priority__ < other.__class__.__container_priority__:\n return other.divide(self, *args, **kwargs)\n return self.pixel_wise_binary(numpy.divide, other, *args, **kwargs)\n \n def power(self, other, *args, **kwargs):\n return self.pixel_wise_binary(numpy.power, other, *args, **kwargs) \n \n def maximum(self, x2, *args, **kwargs):\n return self.pixel_wise_binary(numpy.maximum, x2, *args, **kwargs)\n \n def minimum(self,x2, out=None, *args, **kwargs):\n return self.pixel_wise_binary(numpy.minimum, x2=x2, out=out, *args, **kwargs)\n\n\n def sapyb(self, a, y, b, out=None, num_threads=NUM_THREADS):\n '''performs a*self + b * y. Can be done in-place\n \n Parameters\n ----------\n a : multiplier for self, can be a number or a numpy array or a DataContainer\n y : DataContainer \n b : multiplier for y, can be a number or a numpy array or a DataContainer\n out : return DataContainer, if None a new DataContainer is returned, default None. \n out can be self or y.\n num_threads : number of threads to use during the calculation, using the CIL C library\n \n It will try to use the CIL C library and default to numpy operations, in case the C library does\n not handle the types.\n \n Example:\n -------\n\n a = 2\n b = 3\n ig = ImageGeometry(10,11)\n x = ig.allocate(1)\n y = ig.allocate(2)\n out = x.sapyb(a,y,b)\n '''\n ret_out = False\n \n if out is None:\n out = self * 0.\n ret_out = True\n\n if out.dtype in [ numpy.float32, numpy.float64 ]:\n # handle with C-lib _axpby\n try:\n self._axpby(a, b, y, out, out.dtype, num_threads)\n if ret_out:\n return out\n return\n except RuntimeError as rte:\n warnings.warn(\"sapyb defaulting to Python due to: {}\".format(rte))\n except TypeError as te:\n warnings.warn(\"sapyb defaulting to Python due to: {}\".format(te))\n finally:\n pass\n \n\n # cannot be handled by _axpby\n ax = self * a\n y.multiply(b, out=out)\n out.add(ax, out=out)\n \n if ret_out:\n return out\n\n\n def axpby(self, a, b, y, out, dtype=numpy.float32, num_threads=NUM_THREADS):\n '''Deprecated. Alias of _axpby'''\n warnings.warn('The use of axpby is deprecated and will be removed in following version. Use sapyb instead',\n DeprecationWarning)\n self._axpby(a,b,y,out, dtype, num_threads)\n\n\n def _axpby(self, a, b, y, out, dtype=numpy.float32, num_threads=NUM_THREADS):\n '''performs axpby with cilacc C library, can be done in-place.\n \n Does the operation .. math:: a*x+b*y and stores the result in out, where x is self\n\n :param a: scalar\n :type a: float\n :param b: scalar\n :type b: float\n :param y: DataContainer\n :param out: DataContainer instance to store the result\n :param dtype: data type of the DataContainers\n :type dtype: numpy type, optional, default numpy.float32\n :param num_threads: number of threads to run on\n :type num_threads: int, optional, default 1/2 CPU of the system\n '''\n\n c_float_p = ctypes.POINTER(ctypes.c_float)\n c_double_p = ctypes.POINTER(ctypes.c_double)\n\n #convert a and b to numpy arrays and get the reference to the data (length = 1 or ndx.size)\n try:\n nda = a.as_array()\n except:\n nda = numpy.asarray(a)\n\n try:\n ndb = b.as_array()\n except:\n ndb = numpy.asarray(b)\n\n a_vec = 0\n if nda.size > 1:\n a_vec = 1\n\n b_vec = 0\n if ndb.size > 1:\n b_vec = 1\n\n # get the reference to the data\n ndx = self.as_array()\n ndy = y.as_array()\n ndout = out.as_array()\n\n if ndout.dtype != dtype:\n raise Warning(\"out array of type {0} does not match requested dtype {1}. Using {0}\".format(ndout.dtype, dtype))\n dtype = ndout.dtype\n if ndx.dtype != dtype:\n ndx = ndx.astype(dtype, casting='safe')\n if ndy.dtype != dtype:\n ndy = ndy.astype(dtype, casting='safe')\n if nda.dtype != dtype:\n nda = nda.astype(dtype, casting='same_kind')\n if ndb.dtype != dtype:\n ndb = ndb.astype(dtype, casting='same_kind')\n\n if dtype == numpy.float32:\n x_p = ndx.ctypes.data_as(c_float_p)\n y_p = ndy.ctypes.data_as(c_float_p)\n out_p = ndout.ctypes.data_as(c_float_p)\n a_p = nda.ctypes.data_as(c_float_p)\n b_p = ndb.ctypes.data_as(c_float_p)\n f = cilacc.saxpby\n\n elif dtype == numpy.float64:\n x_p = ndx.ctypes.data_as(c_double_p)\n y_p = ndy.ctypes.data_as(c_double_p)\n out_p = ndout.ctypes.data_as(c_double_p)\n a_p = nda.ctypes.data_as(c_double_p)\n b_p = ndb.ctypes.data_as(c_double_p)\n f = cilacc.daxpby\n\n else:\n raise TypeError('Unsupported type {}. Expecting numpy.float32 or numpy.float64'.format(dtype))\n\n #out = numpy.empty_like(a)\n\n \n # int psaxpby(float * x, float * y, float * out, float a, float b, long size)\n cilacc.saxpby.argtypes = [ctypes.POINTER(ctypes.c_float), # pointer to the first array \n ctypes.POINTER(ctypes.c_float), # pointer to the second array \n ctypes.POINTER(ctypes.c_float), # pointer to the third array \n ctypes.POINTER(ctypes.c_float), # pointer to A\n ctypes.c_int, # type of type of A selector (int)\n ctypes.POINTER(ctypes.c_float), # pointer to B\n ctypes.c_int, # type of type of B selector (int)\n ctypes.c_longlong, # type of size of first array \n ctypes.c_int] # number of threads\n cilacc.daxpby.argtypes = [ctypes.POINTER(ctypes.c_double), # pointer to the first array \n ctypes.POINTER(ctypes.c_double), # pointer to the second array \n ctypes.POINTER(ctypes.c_double), # pointer to the third array \n ctypes.POINTER(ctypes.c_double), # type of A (c_double)\n ctypes.c_int, # type of type of A selector (int) \n ctypes.POINTER(ctypes.c_double), # type of B (c_double)\n ctypes.c_int, # type of type of B selector (int) \n ctypes.c_longlong, # type of size of first array \n ctypes.c_int] # number of threads\n\n if f(x_p, y_p, out_p, a_p, a_vec, b_p, b_vec, ndx.size, num_threads) != 0:\n raise RuntimeError('axpby execution failed')\n \n\n ## unary operations\n def pixel_wise_unary(self, pwop, *args, **kwargs):\n out = kwargs.get('out', None)\n if out is None:\n out = pwop(self.as_array() , *args, **kwargs )\n return type(self)(out,\n deep_copy=False, \n dimension_labels=self.dimension_labels,\n geometry=self.geometry, \n suppress_warning=True)\n elif issubclass(type(out), DataContainer):\n if self.check_dimensions(out):\n kwargs['out'] = out.as_array()\n pwop(self.as_array(), *args, **kwargs )\n else:\n raise ValueError(message(type(self),\"Wrong size for data memory: \", out.shape,self.shape))\n elif issubclass(type(out), numpy.ndarray):\n if self.array.shape == out.shape and self.array.dtype == out.dtype:\n kwargs['out'] = out\n pwop(self.as_array(), *args, **kwargs)\n else:\n raise ValueError (message(type(self), \"incompatible class:\" , pwop.__name__, type(out)))\n \n def abs(self, *args, **kwargs):\n return self.pixel_wise_unary(numpy.abs, *args, **kwargs)\n\n def sign(self, *args, **kwargs):\n return self.pixel_wise_unary(numpy.sign, *args, **kwargs)\n \n def sqrt(self, *args, **kwargs):\n return self.pixel_wise_unary(numpy.sqrt, *args, **kwargs)\n\n def conjugate(self, *args, **kwargs):\n return self.pixel_wise_unary(numpy.conjugate, *args, **kwargs)\n\n def exp(self, *args, **kwargs):\n '''Applies exp pixel-wise to the DataContainer'''\n return self.pixel_wise_unary(numpy.exp, *args, **kwargs)\n \n def log(self, *args, **kwargs):\n '''Applies log pixel-wise to the DataContainer'''\n return self.pixel_wise_unary(numpy.log, *args, **kwargs)\n \n ## reductions\n def sum(self, *args, **kwargs):\n return self.as_array().sum(*args, **kwargs)\n def squared_norm(self, **kwargs):\n '''return the squared euclidean norm of the DataContainer viewed as a vector'''\n #shape = self.shape\n #size = reduce(lambda x,y:x*y, shape, 1)\n #y = numpy.reshape(self.as_array(), (size, ))\n return self.dot(self)\n #return self.dot(self)\n def norm(self, **kwargs):\n '''return the euclidean norm of the DataContainer viewed as a vector'''\n return numpy.sqrt(self.squared_norm(**kwargs))\n \n def dot(self, other, *args, **kwargs):\n '''return the inner product of 2 DataContainers viewed as vectors\n \n applies to real and complex data. In such case the dot method returns\n\n a.dot(b.conjugate())\n '''\n method = kwargs.get('method', 'numpy')\n if method not in ['numpy','reduce']:\n raise ValueError('dot: specified method not valid. Expecting numpy or reduce got {} '.format(\n method))\n\n if self.shape == other.shape:\n if method == 'numpy':\n return numpy.dot(self.as_array().ravel(), other.as_array().ravel().conjugate())\n elif method == 'reduce':\n # see https://github.com/vais-ral/CCPi-Framework/pull/273\n # notice that Python seems to be smart enough to use\n # the appropriate type to hold the result of the reduction\n sf = reduce(lambda x,y: x + y[0]*y[1],\n zip(self.as_array().ravel(),\n other.as_array().ravel().conjugate()),\n 0)\n return sf\n else:\n raise ValueError('Shapes are not aligned: {} != {}'.format(self.shape, other.shape))\n \n def min(self, *args, **kwargs):\n '''Returns the min pixel value in the DataContainer'''\n return numpy.min(self.as_array(), *args, **kwargs)\n \n def max(self, *args, **kwargs):\n '''Returns the max pixel value in the DataContainer'''\n return numpy.max(self.as_array(), *args, **kwargs)\n \n def mean(self, *args, **kwargs):\n '''Returns the mean pixel value of the DataContainer'''\n if kwargs.get('dtype', None) is None:\n kwargs['dtype'] = numpy.float64\n return numpy.mean(self.as_array(), *args, **kwargs)\n\n\n # Logic operators between DataContainers and floats \n def __le__(self, other):\n '''Returns boolean array of DataContainer less or equal than DataContainer/float'''\n if isinstance(other, DataContainer):\n return self.as_array()<=other.as_array()\n return self.as_array()<=other\n \n def __lt__(self, other):\n '''Returns boolean array of DataContainer less than DataContainer/float'''\n if isinstance(other, DataContainer):\n return self.as_array()<other.as_array()\n return self.as_array()<other \n \n def __ge__(self, other):\n '''Returns boolean array of DataContainer greater or equal than DataContainer/float''' \n if isinstance(other, DataContainer):\n return self.as_array()>=other.as_array()\n return self.as_array()>=other \n \n def __gt__(self, other):\n '''Returns boolean array of DataContainer greater than DataContainer/float''' \n if isinstance(other, DataContainer):\n return self.as_array()>other.as_array()\n return self.as_array()>other \n \n def __eq__(self, other):\n '''Returns boolean array of DataContainer equal to DataContainer/float''' \n if isinstance(other, DataContainer):\n return self.as_array()==other.as_array()\n return self.as_array()==other \n\n def __ne__(self, other):\n '''Returns boolean array of DataContainer negative to DataContainer/float''' \n if isinstance(other, DataContainer):\n return self.as_array()!=other.as_array()\n return self.as_array()!=other \n \nclass ImageData(DataContainer):\n '''DataContainer for holding 2D or 3D DataContainer'''\n __container_priority__ = 1\n\n @property\n def geometry(self):\n return self.__geometry\n\n @geometry.setter\n def geometry(self, val):\n self.__geometry = val\n\n @property\n def dimension_labels(self):\n return self.geometry.dimension_labels\n \n @dimension_labels.setter\n def dimension_labels(self, val):\n if val is not None:\n raise ValueError(\"Unable to set the dimension_labels directly. Use geometry.set_labels() instead\")\n\n def __init__(self, \n array = None, \n deep_copy=False, \n geometry=None, \n **kwargs):\n\n if not kwargs.get('suppress_warning', False):\n warnings.warn('Direct invocation is deprecated and will be removed in following version. Use allocate from ImageGeometry instead',\n DeprecationWarning)\n\n dtype = kwargs.get('dtype', numpy.float32)\n \n\n if geometry is None:\n raise AttributeError(\"ImageData requires a geometry\")\n \n\n labels = kwargs.get('dimension_labels', None)\n if labels is not None and labels != geometry.dimension_labels:\n raise ValueError(\"Deprecated: 'dimension_labels' cannot be set with 'allocate()'. Use 'geometry.set_labels()' to modify the geometry before using allocate.\")\n\n if array is None: \n array = numpy.empty(geometry.shape, dtype=dtype)\n elif issubclass(type(array) , DataContainer):\n array = array.as_array()\n elif issubclass(type(array) , numpy.ndarray):\n pass\n else:\n raise TypeError('array must be a CIL type DataContainer or numpy.ndarray got {}'.format(type(array)))\n \n if array.shape != geometry.shape:\n raise ValueError('Shape mismatch {} {}'.format(array.shape, geometry.shape))\n\n if array.ndim not in [2,3,4]:\n raise ValueError('Number of dimensions are not 2 or 3 or 4 : {0}'.format(array.ndim))\n \n super(ImageData, self).__init__(array, deep_copy, geometry=geometry, **kwargs)\n \n def subset(self, dimensions=None, **kw):\n '''returns a subset of ImageData and regenerates the geometry'''\n \n if not kw.get('suppress_warning', False):\n warnings.warn('Subset has been deprecated and will be removed in following version. Use reorder() and get_slice() instead',\n DeprecationWarning)\n\n if dimensions is None:\n return self.get_slice(**kw)\n else:\n temp = self.copy()\n temp.reorder(dimensions)\n return temp\n\n def get_slice(self,channel=None, vertical=None, horizontal_x=None, horizontal_y=None, force=False):\n '''\n Returns a new ImageData of a single slice of in the requested direction.\n '''\n try:\n geometry_new = self.geometry.get_slice(channel=channel, vertical=vertical, horizontal_x=horizontal_x, horizontal_y=horizontal_y)\n except ValueError:\n if force:\n geometry_new = None\n else:\n raise ValueError (\"Unable to return slice of requested ImageData. Use 'force=True' to return DataContainer instead.\")\n\n #if vertical = 'centre' slice convert to index and subset, this will interpolate 2 rows to get the center slice value\n if vertical == 'centre':\n dim = self.geometry.dimension_labels.index('vertical') \n centre_slice_pos = (self.geometry.shape[dim]-1) / 2.\n ind0 = int(numpy.floor(centre_slice_pos))\n \n w2 = centre_slice_pos - ind0\n out = DataContainer.get_slice(self, channel=channel, vertical=ind0, horizontal_x=horizontal_x, horizontal_y=horizontal_y)\n \n if w2 > 0:\n out2 = DataContainer.get_slice(self, channel=channel, vertical=ind0 + 1, horizontal_x=horizontal_x, horizontal_y=horizontal_y)\n out = out * (1 - w2) + out2 * w2\n else:\n out = DataContainer.get_slice(self, channel=channel, vertical=vertical, horizontal_x=horizontal_x, horizontal_y=horizontal_y)\n\n if len(out.shape) == 1 or geometry_new is None:\n return out\n else:\n return ImageData(out.array, deep_copy=False, geometry=geometry_new, suppress_warning=True) \n\nclass AcquisitionData(DataContainer):\n '''DataContainer for holding 2D or 3D sinogram'''\n __container_priority__ = 1\n\n @property\n def geometry(self):\n return self.__geometry\n\n @geometry.setter\n def geometry(self, val):\n self.__geometry = val\n\n @property\n def dimension_labels(self):\n return self.geometry.dimension_labels\n\n @dimension_labels.setter\n def dimension_labels(self, val):\n if val is not None:\n raise ValueError(\"Unable to set the dimension_labels directly. Use geometry.set_labels() instead\")\n\n def __init__(self, \n array = None, \n deep_copy=True, \n geometry = None,\n **kwargs):\n if not kwargs.get('suppress_warning', False):\n warnings.warn('Direct invocation is deprecated and will be removed in following version. Use allocate from AcquisitionGeometry instead',\n DeprecationWarning)\n\n dtype = kwargs.get('dtype', numpy.float32)\n\n if geometry is None:\n raise AttributeError(\"AcquisitionData requires a geometry\")\n \n labels = kwargs.get('dimension_labels', None)\n if labels is not None and labels != geometry.dimension_labels:\n raise ValueError(\"Deprecated: 'dimension_labels' cannot be set with 'allocate()'. Use 'geometry.set_labels()' to modify the geometry before using allocate.\")\n\n if array is None: \n array = numpy.empty(geometry.shape, dtype=dtype)\n elif issubclass(type(array) , DataContainer):\n array = array.as_array()\n elif issubclass(type(array) , numpy.ndarray):\n pass\n else:\n raise TypeError('array must be a CIL type DataContainer or numpy.ndarray got {}'.format(type(array)))\n \n if array.shape != geometry.shape:\n raise ValueError('Shape mismatch got {} expected {}'.format(array.shape, geometry.shape))\n \n super(AcquisitionData, self).__init__(array, deep_copy, geometry=geometry,**kwargs)\n \n def subset(self, dimensions=None, **kw):\n '''returns a subset of the AcquisitionData and regenerates the geometry'''\n \n if not kw.get('suppress_warning', False):\n warnings.warn('Subset has been deprecated and will be removed in following version. Use reorder() and get_slice() instead',\n DeprecationWarning)\n\n if dimensions is None:\n return self.get_slice(**kw)\n else:\n temp = self.copy()\n temp.reorder(dimensions)\n return temp\n\n def get_slice(self,channel=None, angle=None, vertical=None, horizontal=None, force=False):\n '''\n Returns a new dataset of a single slice of in the requested direction. \\\n '''\n try:\n geometry_new = self.geometry.get_slice(channel=channel, angle=angle, vertical=vertical, horizontal=horizontal)\n except ValueError:\n if force:\n geometry_new = None\n else:\n raise ValueError (\"Unable to return slice of requested AcquisitionData. Use 'force=True' to return DataContainer instead.\")\n\n #get new data\n #if vertical = 'centre' slice convert to index and subset, this will interpolate 2 rows to get the center slice value\n if vertical == 'centre':\n dim = self.geometry.dimension_labels.index('vertical')\n \n centre_slice_pos = (self.geometry.shape[dim]-1) / 2.\n ind0 = int(numpy.floor(centre_slice_pos))\n w2 = centre_slice_pos - ind0\n out = DataContainer.get_slice(self, channel=channel, angle=angle, vertical=ind0, horizontal=horizontal)\n \n if w2 > 0:\n out2 = DataContainer.get_slice(self, channel=channel, angle=angle, vertical=ind0 + 1, horizontal=horizontal)\n out = out * (1 - w2) + out2 * w2\n else:\n out = DataContainer.get_slice(self, channel=channel, angle=angle, vertical=vertical, horizontal=horizontal)\n\n if len(out.shape) == 1 or geometry_new is None:\n return out\n else:\n return AcquisitionData(out.array, deep_copy=False, geometry=geometry_new, suppress_warning=True)\n\nclass Processor(object):\n\n '''Defines a generic DataContainer processor\n \n accepts a DataContainer as input\n returns a DataContainer\n `__setattr__` allows additional attributes to be defined\n\n `store_output` boolian defining whether a copy of the output is stored. Default is False.\n If no attributes are modified get_output will return this stored copy bypassing `process`\n '''\n def __init__(self, **attributes):\n if not 'store_output' in attributes.keys():\n attributes['store_output'] = False\n\n attributes['output'] = None\n attributes['shouldRun'] = True\n attributes['input'] = None\n\n for key, value in attributes.items():\n self.__dict__[key] = value\n \n def __setattr__(self, name, value):\n if name == 'input':\n self.set_input(value)\n elif name in self.__dict__.keys():\n\n self.__dict__[name] = value\n\n if name == 'shouldRun':\n pass\n elif name == 'output':\n self.__dict__['shouldRun'] = False\n else: \n self.__dict__['shouldRun'] = True\n else:\n raise KeyError('Attribute {0} not found'.format(name))\n \n def set_input(self, dataset):\n \"\"\"\n Set the input data to the processor\n\n Parameters\n ----------\n input : DataContainer\n The input DataContainer\n \"\"\"\n\n if issubclass(type(dataset), DataContainer):\n if self.check_input(dataset):\n self.__dict__['input'] = dataset\n self.__dict__['shouldRun'] = True\n else:\n raise ValueError('Input data not compatible')\n else:\n raise TypeError(\"Input type mismatch: got {0} expecting {1}\"\\\n .format(type(dataset), DataContainer))\n \n def clear_input(self):\n self.__dict__['input']= None\n\n def check_input(self, dataset):\n '''Checks parameters of the input DataContainer\n \n Should raise an Error if the DataContainer does not match expectation, e.g.\n if the expected input DataContainer is 3D and the Processor expects 2D.\n '''\n raise NotImplementedError('Implement basic checks for input DataContainer')\n \n def get_output(self, out=None):\n \"\"\"\n Runs the configured processor and returns the processed data\n\n Parameters\n ----------\n out : DataContainer, optional\n Fills the referenced DataContainer with the processed data and suppresses the return\n \n Returns\n -------\n DataContainer\n The processed data. Suppressed if `out` is passed\n \"\"\"\n if self.output is None or self.shouldRun:\n if out is None:\n out = self.process()\n else:\n self.process(out=out)\n\n if self.store_output: \n self.output = out.copy()\n \n return out\n\n else:\n return self.output.copy()\n \n \n def set_input_processor(self, processor):\n if issubclass(type(processor), DataProcessor):\n self.__dict__['input'] = processor\n else:\n raise TypeError(\"Input type mismatch: got {0} expecting {1}\"\\\n .format(type(processor), DataProcessor))\n \n def get_input(self):\n '''returns the input DataContainer\n \n It is useful in the case the user has provided a DataProcessor as\n input\n '''\n if issubclass(type(self.input), DataProcessor):\n dsi = self.input.get_output()\n else:\n dsi = self.input\n return dsi\n \n def process(self, out=None):\n raise NotImplementedError('process must be implemented')\n \n def __call__(self, x, out=None):\n \n self.set_input(x) \n\n if out is None:\n out = self.get_output() \n else:\n self.get_output(out=out)\n\n self.clear_input()\n \n return out\n\n\nclass DataProcessor(Processor):\n '''Basically an alias of Processor Class'''\n pass\n\nclass DataProcessor23D(DataProcessor):\n '''Regularizers DataProcessor\n '''\n \n def check_input(self, dataset):\n '''Checks number of dimensions input DataContainer\n \n Expected input is 2D or 3D\n '''\n if dataset.number_of_dimensions == 2 or \\\n dataset.number_of_dimensions == 3:\n return True\n else:\n raise ValueError(\"Expected input dimensions is 2 or 3, got {0}\"\\\n .format(dataset.number_of_dimensions))\n \n###### Example of DataProcessors\n\nclass AX(DataProcessor):\n '''Example DataProcessor\n The AXPY routines perform a vector multiplication operation defined as\n\n y := a*x\n where:\n\n a is a scalar\n\n x a DataContainer.\n '''\n \n def __init__(self):\n kwargs = {'scalar':None, \n 'input':None, \n }\n \n #DataProcessor.__init__(self, **kwargs)\n super(AX, self).__init__(**kwargs)\n \n def check_input(self, dataset):\n return True\n \n def process(self, out=None):\n \n dsi = self.get_input()\n a = self.scalar\n if out is None:\n y = DataContainer( a * dsi.as_array() , True, \n dimension_labels=dsi.dimension_labels )\n #self.setParameter(output_dataset=y)\n return y\n else:\n out.fill(a * dsi.as_array())\n \n\n###### Example of DataProcessors\n\nclass CastDataContainer(DataProcessor):\n '''Example DataProcessor\n Cast a DataContainer array to a different type.\n\n y := a*x\n where:\n\n a is a scalar\n\n x a DataContainer.\n '''\n \n def __init__(self, dtype=None):\n kwargs = {'dtype':dtype, \n 'input':None, \n }\n \n #DataProcessor.__init__(self, **kwargs)\n super(CastDataContainer, self).__init__(**kwargs)\n \n def check_input(self, dataset):\n return True\n \n def process(self, out=None):\n \n dsi = self.get_input()\n dtype = self.dtype\n if out is None:\n y = numpy.asarray(dsi.as_array(), dtype=dtype)\n \n return type(dsi)(numpy.asarray(dsi.as_array(), dtype=dtype),\n dimension_labels=dsi.dimension_labels )\n else:\n out.fill(numpy.asarray(dsi.as_array(), dtype=dtype))\n \nclass PixelByPixelDataProcessor(DataProcessor):\n '''Example DataProcessor\n \n This processor applies a python function to each pixel of the DataContainer\n \n f is a python function\n\n x a DataSet.\n '''\n \n def __init__(self):\n kwargs = {'pyfunc':None, \n 'input':None, \n }\n #DataProcessor.__init__(self, **kwargs)\n super(PixelByPixelDataProcessor, self).__init__(**kwargs)\n \n def check_input(self, dataset):\n return True\n \n def process(self, out=None):\n \n pyfunc = self.pyfunc\n dsi = self.get_input()\n \n eval_func = numpy.frompyfunc(pyfunc,1,1)\n\n \n y = DataContainer( eval_func( dsi.as_array() ) , True, \n dimension_labels=dsi.dimension_labels )\n return y\n \n\nclass VectorData(DataContainer):\n '''DataContainer to contain 1D array'''\n\n @property\n def geometry(self):\n return self.__geometry\n\n @geometry.setter\n def geometry(self, val):\n self.__geometry = val\n\n @property\n def dimension_labels(self):\n if hasattr(self,'geometry'):\n return self.geometry.dimension_labels\n else:\n return self.__dimension_labels\n\n @dimension_labels.setter\n def dimension_labels(self, val):\n if hasattr(self,'geometry'):\n self.geometry.dimension_labels = val\n \n self.__dimension_labels = val\n\n def __init__(self, array=None, **kwargs):\n self.geometry = kwargs.get('geometry', None)\n\n dtype = kwargs.get('dtype', numpy.float32)\n \n if self.geometry is None:\n if array is None:\n raise ValueError('Please specify either a geometry or an array')\n else:\n if len(array.shape) > 1:\n raise ValueError('Incompatible size: expected 1D got {}'.format(array.shape))\n out = array\n self.geometry = VectorGeometry(array.shape[0], **kwargs)\n self.length = self.geometry.length\n else:\n self.length = self.geometry.length\n \n if array is None:\n out = numpy.zeros((self.length,), dtype=dtype)\n else:\n if self.length == array.shape[0]:\n out = array\n else:\n raise ValueError('Incompatible size: expecting {} got {}'.format((self.length,), array.shape))\n deep_copy = True\n # need to pass the geometry, othewise None\n super(VectorData, self).__init__(out, deep_copy, self.geometry.dimension_labels, geometry = self.geometry)\n \n\nclass VectorGeometry(object):\n '''Geometry describing VectorData to contain 1D array'''\n RANDOM = 'random'\n RANDOM_INT = 'random_int'\n\n @property\n def dtype(self):\n return self.__dtype\n\n @dtype.setter\n def dtype(self, val):\n self.__dtype = val \n \n def __init__(self, \n length, **kwargs):\n \n self.length = length\n self.shape = (length, )\n self.dtype = kwargs.get('dtype', numpy.float32)\n self.dimension_labels = kwargs.get('dimension_labels', None)\n \n def clone(self):\n '''returns a copy of VectorGeometry'''\n return copy.deepcopy(self)\n\n def copy(self):\n '''alias of clone'''\n return self.clone()\n\n def __eq__(self, other):\n\n if not isinstance(other, self.__class__):\n return False\n \n if self.length == other.length \\\n and self.shape == other.shape \\\n and self.dimension_labels == other.dimension_labels:\n return True\n return False\n\n def allocate(self, value=0, **kwargs):\n '''allocates an VectorData according to the size expressed in the instance\n \n :param value: accepts numbers to allocate an uniform array, or a string as 'random' or 'random_int' to create a random array or None.\n :type value: number or string, default None allocates empty memory block\n :param dtype: numerical type to allocate\n :type dtype: numpy type, default numpy.float32\n :param seed: seed for the random number generator\n :type seed: int, default None\n :param max_value: max value of the random int array\n :type max_value: int, default 100'''\n\n dtype = kwargs.get('dtype', self.dtype)\n # self.dtype = kwargs.get('dtype', numpy.float32)\n out = VectorData(geometry=self.copy(), dtype=dtype)\n if isinstance(value, Number):\n if value != 0:\n out += value\n else:\n if value == VectorGeometry.RANDOM:\n seed = kwargs.get('seed', None)\n if seed is not None:\n numpy.random.seed(seed) \n out.fill(numpy.random.random_sample(self.shape))\n elif value == VectorGeometry.RANDOM_INT:\n seed = kwargs.get('seed', None)\n if seed is not None:\n numpy.random.seed(seed)\n max_value = kwargs.get('max_value', 100)\n r = numpy.random.randint(max_value,size=self.shape, dtype=numpy.int32)\n out.fill(numpy.asarray(r, dtype=self.dtype)) \n elif value is None:\n pass\n else:\n raise ValueError('Value {} unknown'.format(value))\n return out\n\nclass DataOrder():\n ASTRA_IG_LABELS = [ImageGeometry.CHANNEL, ImageGeometry.VERTICAL, ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X]\n TIGRE_IG_LABELS = [ImageGeometry.CHANNEL, ImageGeometry.VERTICAL, ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X]\n ASTRA_AG_LABELS = [AcquisitionGeometry.CHANNEL, AcquisitionGeometry.VERTICAL, AcquisitionGeometry.ANGLE, AcquisitionGeometry.HORIZONTAL]\n TIGRE_AG_LABELS = [AcquisitionGeometry.CHANNEL, AcquisitionGeometry.ANGLE, AcquisitionGeometry.VERTICAL, AcquisitionGeometry.HORIZONTAL]\n CIL_IG_LABELS = [ImageGeometry.CHANNEL, ImageGeometry.VERTICAL, ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X]\n CIL_AG_LABELS = [AcquisitionGeometry.CHANNEL, AcquisitionGeometry.ANGLE, AcquisitionGeometry.VERTICAL, AcquisitionGeometry.HORIZONTAL] \n TOMOPHANTOM_IG_LABELS = [ImageGeometry.CHANNEL, ImageGeometry.VERTICAL, ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X]\n \n @staticmethod\n def get_order_for_engine(engine, geometry):\n if engine == 'astra':\n if isinstance(geometry, AcquisitionGeometry):\n dim_order = DataOrder.ASTRA_AG_LABELS\n else:\n dim_order = DataOrder.ASTRA_IG_LABELS\n elif engine == 'tigre':\n if isinstance(geometry, AcquisitionGeometry):\n dim_order = DataOrder.TIGRE_AG_LABELS\n else:\n dim_order = DataOrder.TIGRE_IG_LABELS\n elif engine == 'cil':\n if isinstance(geometry, AcquisitionGeometry):\n dim_order = DataOrder.CIL_AG_LABELS\n else:\n dim_order = DataOrder.CIL_IG_LABELS\n else:\n raise ValueError(\"Unknown engine expected 'tigre' or 'astra' got {}\".format(engine))\n \n dimensions = []\n for label in dim_order:\n if label in geometry.dimension_labels:\n dimensions.append(label)\n\n return dimensions\n\n @staticmethod\n def check_order_for_engine(engine, geometry):\n order_requested = DataOrder.get_order_for_engine(engine, geometry)\n\n if order_requested == list(geometry.dimension_labels):\n return True\n else:\n raise ValueError(\"Expected dimension_label order {0}, got {1}.\\nTry using `data.reorder('{2}')` to permute for {2}\"\n .format(order_requested, list(geometry.dimension_labels), engine))\n" ]
[ [ "numpy.isclose", "numpy.array_equal", "numpy.empty", "numpy.eye", "numpy.transpose", "numpy.random.randint", "numpy.sqrt", "numpy.cross", "numpy.matrix", "numpy.array", "numpy.copyto", "numpy.zeros", "numpy.shape", "numpy.random.random_sample", "numpy.allclose", "numpy.frompyfunc", "numpy.floor", "numpy.array2string", "numpy.ceil", "numpy.asarray", "numpy.random.seed" ] ]
Computational-Nonlinear-Optics-ORC/Compare-CNLSE
[ "9b56cedbca2a06af3baa9f64e46ebfd4263f86c2" ]
[ "unit_testing/test_BNLSE.py" ]
[ "import sys\nsys.path.append('src')\nfrom combined_functions import dF_sidebands, sim_parameters,\\\n overlap, pulse_propagation_adaptive, pulse_propagation_constant,\\\n float_range\nfrom BNLSE_specific import *\nimport GNLSE_specific as GN\nfrom copy import deepcopy\nfrom numpy.testing import assert_allclose, assert_raises\nfrom test_GNLSE import specific_variables\nfrom formulate import formulate_BNLSE, formulate_GNLSE\nfrom test_GNLSE import pulse as pulse_GNLSE\n\n\n\ndef specific_variables():\n n2 = 2.5e-20\n alphadB = 0\n maxerr = 1e-13\n ss = 1\n gama = 10e-3\n lamda_c = 1051.85e-9\n lamp = 1046.85e-9\n betas = np.array([0, 0, 0, 6.756e-2,\n -1.002e-4, 3.671e-7])*1e-3\n T0 = 10\n P_p = 5\n P_s = 1\n P_i = 2\n fr = 0.\n fwhm = 1\n N_b = 10\n N_g = 17\n z = 18\n dz_less = 100\n df = 0.010\n M = overlap(n2, lamda_c, gama)\n F, fp = dF_sidebands(betas, lamp, lamda_c, n2, M, P_p)\n\n int_fwm = sim_parameters(n2, 1, alphadB, betas, M, fr, T0)\n int_fwm.general_options(maxerr, ss)\n\n\n int_fwm_g = deepcopy(int_fwm)\n int_fwm_g.propagation_parameters(N_g, z, dz_less) \n #print(int_fwm.dz)\n #sys.exit()\n #f_centrals = [fp + i * F for i in range(-1,2)]\n\n fv_g, where_g,f_centrals = GN.fv_creator(fp, df, F, int_fwm_g)\n\n int_fwm = deepcopy(int_fwm)\n int_fwm.propagation_parameters(N_b, z, dz_less) \n fv,where,f_centrals,band_grid_pos = fv_creator(fp, F, int_fwm,df, fv_g)\n #sim_wind = sim_window(fv, lamp, f_centrals, lamda_c, int_fwm_b, where_g)\n sim_wind = sim_window(fv, lamp, f_centrals, lamda_c, int_fwm, where_g)\n return M, int_fwm, sim_wind, n2, alphadB, maxerr, ss, N_b, lamda_c, lamp, \\\n betas, fv, f_centrals\n\n\n\nM, int_fwm, sim_wind, n2, alphadB, maxerr, ss, N, lamda_c, lamda, \\\n betas, fv, f_centrals = specific_variables()\n\n\n#fv, where_g = fv_creator(f_centrals[1], fv[1] - fv[0], f_centrals[1] - f_centrals[0], int_fwm, fv)\n#sim_wind = sim_window(fv, lamp,F, lamda_c, int_fwm, where_g)\ndef test_dF_sidebands():\n \"\"\"\n Tests of the ability of dF_sidebands to find the\n sidebands expected for predetermined conditions.\n \"\"\"\n lamp = 1048.17e-9\n lamda0 = 1051.85e-9\n betas = 0, 0, 0, 6.756e-2 * 1e-3, -1.002e-4 * 1e-3, 3.671*1e-7 * 1e-3\n\n F, f_p = dF_sidebands(betas, lamp, lamda0, n2, M, 5)\n\n f_s, f_i = f_p - F, f_p + F\n lams, lami = (1e-3*c/i for i in (f_s, f_i))\n assert lams, lami == (1200.2167948665879, 930.31510086250455)\n\n\ndef test_noise():\n noise = Noise(int_fwm, sim_wind)\n n1 = noise.noise_func(int_fwm)\n n2 = noise.noise_func(int_fwm)\n print(n1, n2)\n assert_raises(AssertionError, assert_allclose, n1, n2)\n\n\nclass Test_loss:\n def test_loss1(a):\n loss = Loss(int_fwm, sim_wind, amax=alphadB)\n alpha_func = loss.atten_func_full(sim_wind.fv)\n assert_allclose(alpha_func, np.ones_like(alpha_func)*alphadB/4.343)\n\n def test_loss2(a):\n\n loss = Loss(int_fwm, sim_wind, amax=2*alphadB)\n alpha_func = loss.atten_func_full(sim_wind.fv)\n maxim = np.max(alpha_func)\n assert_allclose(maxim, 2*alphadB/4.343)\n\n def test_loss3(a):\n loss = Loss(int_fwm, sim_wind, amax=2*alphadB)\n alpha_func = loss.atten_func_full(sim_wind.fv)\n minim = np.min(alpha_func)\n assert minim == alphadB/4.343\n\nclass Test_energy_conserve_adaptive():\n\n \"---------------constants---------------\"\n ss = 1\n n2 = 2.5e-20\n gama = 10e-3\n lamda_c = 1051.85e-9\n dz_less = 100\n betas = np.array([0, 0, 0, 6.756e-2, # propagation constants [ps^n/m]\n -1.002e-4, 3.671e-7]) * 1e-3\n maxerr = 1e-13\n fr = 0.18\n \"----------Combined parameters-----------\"\n alphadB = 0\n z = 18\n df = 0.01 # frequency step in [Thz]\n lamp = 1048e-9\n P_p = 20\n P_s = 1\n P_i = 1\n T0 = 10 # [ps]\n \"----------------------------------------\"\n \"------------BNLSE parameters------------\"\n N_b = 10\n Df_band = df * 2**N_b\n \"------------GNLSE parameters------------\"\n N_g = 14\n \"----------------------------------------\"\n\n M = overlap(n2, lamda_c, gama)\n F, fp = dF_sidebands(betas, lamp, lamda_c, n2, M, P_p)\n\n\n\n sim_wind_g = \\\n formulate_GNLSE(int_fwm, N_g, z, dz_less, fp, df,\n F, lamp, lamda_c, P_p, P_s,P_i, pulse_GNLSE)[3]\n\n def test_energy_conserve_s0_pulse(self):\n u_b, U_b, int_fwm_b, sim_wind_b, Dop_b, non_integrand_b = \\\n formulate_BNLSE(int_fwm, self.N_b, self.z, self.dz_less,\n self.fp, self.df, self.F, self.Df_band, self.lamp,\n self.lamda_c, self.P_p,self.P_s, self.P_i, pulse, self.sim_wind_g.fv,\n self.sim_wind_g)\n \n ss = 0\n u_out_b, U_out_b, temp1,temp2,temp3 = pulse_propagation_adaptive(\n u_b, U_b, int_fwm_b, sim_wind_b, Dop_b, non_integrand_b.dAdzmm)\n E1 = np.sum(np.linalg.norm(u_b, 2, axis=-1)**2)\n E2 = np.sum(np.linalg.norm(u_out_b, 2, axis=-1)**2)\n\n assert_allclose(E1, E2)\n\n def test_energy_conserve_s1_pulse(self):\n\n u_b, U_b, int_fwm_b, sim_wind_b, Dop_b, non_integrand_b = \\\n formulate_BNLSE(int_fwm, self.N_b, self.z, self.dz_less,\n self.fp, self.df, self.F, self.Df_band, self.lamp,\n self.lamda_c, self.P_p,self.P_s, self.P_i, pulse, self.sim_wind_g.fv,\n self.sim_wind_g)\n ss = 1\n u_out_b, U_out_b, temp1,temp2,temp3 = pulse_propagation_adaptive(\n u_b, U_b, int_fwm_b, sim_wind_b, Dop_b, non_integrand_b.dAdzmm)\n E1 = np.sum(np.linalg.norm(u_b, 2, axis=-1)**2)\n E2 = np.sum(np.linalg.norm(u_out_b, 2, axis=-1)**2)\n\n assert_allclose(E1, E2)\n\n def test_energy_conserve_s0_cw(self):\n u_b, U_b, int_fwm_b, sim_wind_b, Dop_b, non_integrand_b = \\\n formulate_BNLSE(int_fwm, self.N_b, self.z, self.dz_less,\n self.fp, self.df, self.F, self.Df_band, self.lamp,\n self.lamda_c, self.P_p,self.P_s, self.P_i, cw, self.sim_wind_g.fv,\n self.sim_wind_g)\n ss = 0\n u_out_b, U_out_b, temp1,temp2,temp3 = pulse_propagation_adaptive(\n u_b, U_b, int_fwm_b, sim_wind_b, Dop_b, non_integrand_b.dAdzmm)\n E1 = np.sum(np.linalg.norm(u_b, 2, axis=-1)**2)\n E2 = np.sum(np.linalg.norm(u_out_b, 2, axis=-1)**2)\n\n assert_allclose(E1, E2)\n\n def test_energy_conserve_s1_cw(self):\n u_b, U_b, int_fwm_b, sim_wind_b, Dop_b, non_integrand_b = \\\n formulate_BNLSE(int_fwm, self.N_b, self.z, self.dz_less,\n self.fp, self.df, self.F, self.Df_band, self.lamp,\n self.lamda_c, self.P_p,self.P_s, self.P_i, cw, self.sim_wind_g.fv,\n self.sim_wind_g)\n ss = 1\n u_out_b, U_out_b, temp1,temp2,temp3 = pulse_propagation_adaptive(\n u_b, U_b, int_fwm_b, sim_wind_b, Dop_b, non_integrand_b.dAdzmm)\n E1 = np.sum(np.linalg.norm(u_b, 2, axis=-1)**2)\n E2 = np.sum(np.linalg.norm(u_out_b, 2, axis=-1)**2)\n\n assert_allclose(E1, E2)\n\n\nclass Test_energy_conserve_constant():\n\n \"---------------constants---------------\"\n ss = 1\n n2 = 2.5e-20\n gama = 10e-3\n lamda_c = 1051.85e-9\n dz_less = 100\n betas = np.array([0, 0, 0, 6.756e-2, # propagation constants [ps^n/m]\n -1.002e-4, 3.671e-7]) * 1e-3\n maxerr = 1e-13\n fr = 0.18\n \"----------Combined parameters-----------\"\n alphadB = 0\n z = 1\n dz = 0.01\n z = float_range(0, z, dz)\n df = 0.01 # frequency step in [Thz]\n lamp = 1048e-9\n P_p = 20\n P_s = 1\n P_i = 1\n T0 = 10 # [ps]\n \"----------------------------------------\"\n \"------------BNLSE parameters------------\"\n N_b = 10\n Df_band = df * 2**N_b\n \"------------GNLSE parameters------------\"\n N_g = 14\n \"----------------------------------------\"\n\n M = overlap(n2, lamda_c, gama)\n F, fp = dF_sidebands(betas, lamp, lamda_c, n2, M, P_p)\n\n\n\n sim_wind_g = \\\n formulate_GNLSE(int_fwm, N_g, z.end, dz_less, fp, df,\n F, lamp, lamda_c, P_p, P_s,P_i, pulse_GNLSE)[3]\n\n def test_energy_conserve_s0_pulse(self):\n u_b, U_b, int_fwm_b, sim_wind_b, Dop_b, non_integrand_b = \\\n formulate_BNLSE(int_fwm, self.N_b, self.z.end, self.dz_less,\n self.fp, self.df, self.F, self.Df_band, self.lamp,\n self.lamda_c, self.P_p,self.P_s, self.P_i, pulse, self.sim_wind_g.fv,\n self.sim_wind_g)\n \n ss = 0\n u_out_b, U_out_b, temp1,temp2,temp3 = pulse_propagation_constant(\n u_b, U_b, int_fwm_b, sim_wind_b, Dop_b, non_integrand_b.dAdzmm, self.z)\n E1 = np.sum(np.linalg.norm(u_b, 2, axis=-1)**2)\n E2 = np.sum(np.linalg.norm(u_out_b, 2, axis=-1)**2)\n\n assert_allclose(E1, E2)\n\n def test_energy_conserve_s1_pulse(self):\n\n u_b, U_b, int_fwm_b, sim_wind_b, Dop_b, non_integrand_b = \\\n formulate_BNLSE(int_fwm, self.N_b, self.z.end, self.dz_less,\n self.fp, self.df, self.F, self.Df_band, self.lamp,\n self.lamda_c, self.P_p,self.P_s, self.P_i, pulse, self.sim_wind_g.fv,\n self.sim_wind_g)\n ss = 1\n u_out_b, U_out_b, temp1,temp2,temp3 = pulse_propagation_constant(\n u_b, U_b, int_fwm_b, sim_wind_b, Dop_b, non_integrand_b.dAdzmm, self.z)\n E1 = np.sum(np.linalg.norm(u_b, 2, axis=-1)**2)\n E2 = np.sum(np.linalg.norm(u_out_b, 2, axis=-1)**2)\n\n assert_allclose(E1, E2)\n\n def test_energy_conserve_s0_cw(self):\n u_b, U_b, int_fwm_b, sim_wind_b, Dop_b, non_integrand_b = \\\n formulate_BNLSE(int_fwm, self.N_b, self.z.end, self.dz_less,\n self.fp, self.df, self.F, self.Df_band, self.lamp,\n self.lamda_c, self.P_p,self.P_s, self.P_i, cw, self.sim_wind_g.fv,\n self.sim_wind_g)\n ss = 0\n u_out_b, U_out_b, temp1,temp2,temp3 = pulse_propagation_constant(\n u_b, U_b, int_fwm_b, sim_wind_b, Dop_b, non_integrand_b.dAdzmm, self.z)\n E1 = np.sum(np.linalg.norm(u_b, 2, axis=-1)**2)\n E2 = np.sum(np.linalg.norm(u_out_b, 2, axis=-1)**2)\n\n assert_allclose(E1, E2)\n\n def test_energy_conserve_s1_cw(self):\n u_b, U_b, int_fwm_b, sim_wind_b, Dop_b, non_integrand_b = \\\n formulate_BNLSE(int_fwm, self.N_b, self.z.end, self.dz_less,\n self.fp, self.df, self.F, self.Df_band, self.lamp,\n self.lamda_c, self.P_p,self.P_s, self.P_i, cw, self.sim_wind_g.fv,\n self.sim_wind_g)\n ss = 1\n u_out_b, U_out_b, temp1,temp2,temp3 = pulse_propagation_constant(\n u_b, U_b, int_fwm_b, sim_wind_b, Dop_b, non_integrand_b.dAdzmm, self.z)\n E1 = np.sum(np.linalg.norm(u_b, 2, axis=-1)**2)\n E2 = np.sum(np.linalg.norm(u_out_b, 2, axis=-1)**2)\n\n assert_allclose(E1, E2)\n\n\ndef pulse(P, sim_wind, int_fwm):\n return np.array([p ** 0.5 * np.exp(-0.5 * (t_band / int_fwm.T0)**2)\\\n for p, t_band in zip(P, sim_wind.t_band)])\n\n\ndef cw(P, sim_wind, int_fwm):\n woff1 = (sim_wind.p_pos[1] + (int_fwm.nt) // 2) * 2 * pi * sim_wind.df[sim_wind.p_pos[0]]\n\n return np.array([p ** 0.5 * np.exp(1j * (woff1) * sim_wind.t[sim_wind.p_pos[0]])\\\n for p, t_band in zip(P, sim_wind.t_band)])\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.testing.assert_raises" ] ]
jeeva2812/ashoka-tech-for-change
[ "e61de845166f4473d40c577ebbe3ebf4964f61a7" ]
[ "inter_iit_backend/final.py" ]
[ "#!./osmnx/bin/python3\n\n\n\"\"\"Capacited Vehicles Routing Problem (CVRP).\"\"\"\n\nfrom ortools.constraint_solver import routing_enums_pb2\nfrom ortools.constraint_solver import pywrapcp\nimport numpy as np\nimport sys\nimport osmnx as ox\nimport networkx as nx\nimport json\n\nnum_waste_types = 4\nprice = [10, 10, 10, 10]\ncost = 0.01\nnum_vehicles = 1\ndepots = 0\nprop_const = 100 # have to tweak\n\n# G = ox.graph_from_address('IIT Madras, India')\n\ndef print_solution(G, wt_lat_long_amount, num_vehicles, distance_matrix, manager, routing, assignment):\n \"\"\"Prints assignment on console.\"\"\"\n # Display dropped nodes.\n dropped_nodes = 'Dropped nodes:'\n for node in range(routing.Size()):\n if routing.IsStart(node) or routing.IsEnd(node):\n continue\n if assignment.Value(routing.NextVar(node)) == node:\n dropped_nodes += ' {}'.format(manager.IndexToNode(node))\n # print(dropped_nodes)\n # Display routes\n total_distance = 0\n total_load = 0\n total_income = 0\n for vehicle_id in range(num_vehicles):\n index = routing.Start(vehicle_id)\n # plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n route_load = 0\n route_income = 0\n path = []\n lat_lng_path = []\n\n node_index = 0\n while not routing.IsEnd(index):\n node_index = manager.IndexToNode(index)\n curr_load = 0; curr_inc = 0;\n for i in range(num_waste_types):\n curr_load += wt_lat_long_amount[node_index][i]\n curr_inc += wt_lat_long_amount[node_index][i] * price[i]\n route_income += curr_inc\n route_load += curr_load\n # plan_output += ' {0} Load({1}) -> '.format(node_index, route_load)\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(\n previous_index, index, vehicle_id)\n # print('prev_index: {0} index: {1} d: {2}'.format(previous_index, index, routing.GetArcCostForVehicle(previous_index, index, vehicle_id)))\n # print('index', manager.IndexToNode(index))\n path.append(distance_matrix[node_index][manager.IndexToNode(index)])\n lat_lng_path.append({\n 'a0': wt_lat_long_amount[node_index][0],\n 'a1': wt_lat_long_amount[node_index][1],\n 'a2': wt_lat_long_amount[node_index][2],\n 'a3': wt_lat_long_amount[node_index][3],\n 'lat': wt_lat_long_amount[node_index][4],\n 'lng': wt_lat_long_amount[node_index][5],\n })\n # plan_output += ' {0} Load({1})\\n'.format(manager.IndexToNode(index),route_load)\n # plan_output += 'Distance of the route: {}m\\n'.format(route_distance)\n # plan_output += 'Load of the route: {}m\\n'.format(route_load)\n # plan_output += 'Profit of the route: {}\\n'.format(route_income - cost*route_distance) # need to chan\n\n # path.append(nx.shortest_path(G,vertex[node_index],vertex[manager.IndexToNode(index)]))\n # ox.plot_graph_routes(G, path)\n\n # print(plan_output)\n total_distance += route_distance\n total_load += route_load\n total_income += route_income\n result = {\n 'route': lat_lng_path,\n 'total_distance': total_distance,\n 'est_profit': total_income - cost*route_distance\n }\n print(json.dumps(result))\n # print('Total Distance of all vehicle routes: {}m'.format(total_distance))\n # print('Total Load of all vehicle routes: {}'.format(total_load))\n\n\ndef main():\n\n vehicle_cap = float(sys.argv[1])\n query_distance = int(sys.argv[2])\n csv = np.genfromtxt('temp.csv', delimiter=',')\n # print(csv)\n \n r, c = csv.shape\n # print(r,c)\n num_nodes = r-1\n wt_lat_long_amount = csv[1:] # is a dict\n # print(wt_lat_long_amount)\n lat_index = 4; long_index = 5\n\n # print(query_distance)\n G = ox.graph_from_point((wt_lat_long_amount[0][lat_index], wt_lat_long_amount[0][long_index]), distance=query_distance)\n distance_matrix = -1 * np.ones((num_nodes,num_nodes))\n\n \"\"\"Solve the CVRP problem.\"\"\"\n\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(num_nodes, #one less than the number of rows\n num_vehicles, depots)\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n\n # Create and register a transit callback.\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n\n if distance_matrix[from_node][to_node] == -1:\n node1 = ox.get_nearest_node(G, (wt_lat_long_amount[from_node][lat_index], wt_lat_long_amount[from_node][long_index]))\n node2 = ox.get_nearest_node(G, (wt_lat_long_amount[to_node][lat_index], wt_lat_long_amount[to_node][long_index]))\n distance_matrix[from_node][to_node] = nx.shortest_path_length(G, node1, node2, weight='length')\n # print(from_node,'->',to_node, ': ', distance_matrix[from_node][to_node])\n\n total_price = 0\n for i in range(num_waste_types):\n total_price += wt_lat_long_amount[to_node][i] * price[i]\n print\n\n # need to minimise the loss \n # print('Cost: ',(cost*distance_matrix[from_node][to_node] - total_price) )\n return prop_const*(cost*distance_matrix[from_node][to_node]) # multiplying by 10 because everything is int \n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n\n # Add Capacity constraint.\n def demand_callback(from_index):\n \"\"\"Returns the demand of the node.\"\"\"\n # Convert from routing variable Index to demands NodeIndex.\n from_node = manager.IndexToNode(from_index)\n amount = 0\n for i in range(num_waste_types):\n amount += wt_lat_long_amount[from_index][i]\n return amount\n\n demand_callback_index = routing.RegisterUnaryTransitCallback(demand_callback)\n\n routing.AddDimensionWithVehicleCapacity(\n demand_callback_index,\n 0, # null capacity slack\n [vehicle_cap], # vehicle maximum capacities\n True, # start cumul to zero\n 'Capacity')\n \n # Allow to drop nodes.\n # for total_price\n for node in range(1, num_nodes):\n total_price = 0\n for i in range(num_waste_types):\n total_price += price[i] * wt_lat_long_amount[node][i]\n routing.AddDisjunction([manager.NodeToIndex(node)], int(prop_const*total_price))\n\n # Setting first solution heuristic.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n\n # Print solution on console.\n if assignment:\n print_solution(G, wt_lat_long_amount, num_vehicles, distance_matrix, manager, routing, assignment)\n\n\nif __name__ == '__main__':\n main()\n\n\n\n" ]
[ [ "numpy.genfromtxt", "numpy.ones" ] ]
arizonat/softroboticfish7
[ "f8e170f661620e66c1cd49b45a28162c2ff2d9ae" ]
[ "fish/pi/ros/catkin_ws/src/fishstatecontroller/src/ManualController.py" ]
[ "#!/usr/bin/env python\n\nimport cv2\nimport rospy\nimport roslib\nimport numpy as np\nfrom std_msgs.msg import String, Float64\n\nclass ManualController():\n \"\"\"\n Control SoFi by pressing buttons on a keyboard\n Key mappings:\n - 'a': hard left\n - 's': soft left\n - 'd': go forward\n - 'f': soft right\n - 'g': full right\n - (else): do nothing\n \"\"\"\n def __init__(self):\n self.heading_pub = rospy.Publisher('heading_cmd', Float64, queue_size=10)\n self.pitch_pub = rospy.Publisher('pitch_cmd', Float64, queue_size=10)\n self.thrust_pub = rospy.Publisher('thrust_cmd', Float64, queue_size=10)\n\n #Values between -1 and 1 inclusive\n self.heading = 0.0 #-1 for full left, +1 for full right\n self.pitch = 0.0 \n self.thrust = 0.34\n\n self.image = np.zeros((350,450,3), np.uint8)\n instructions = [\"CLICK HERE\", \"Press and hold the\", \"following buttons to move SoFi\",\"\", \"a: HARD LEFT\", \"s: SOFT LEFT\", \"d: GO FORWARD\", \"f: SOFT RIGHT\", \"g: HARD RIGHT\", \"else: DO NOTHING\"]\n org = (20, 25)\n font = cv2.FONT_HERSHEY_SIMPLEX\n color = (255, 255, 255)\n for line in instructions:\n cv2.putText(self.image, line, org, font, 1, color, 2)\n org = (org[0], org[1] + 35)\n \n def run(self):\n rate = rospy.Rate(24)\n cv2.imshow(\"blank\", self.image)\n while not rospy.is_shutdown():\n #read for keyboard presses and set appropriate values for heading, pitch, and thrust \n key = cv2.waitKey(0)\n if key == ord('a'): #HARD LEFT\n self.heading = -1.0\n self.thrust = -1.0\n elif key == ord('s'): #SOFT LEFT\n self.heading = -0.72\n self.thrust = -1.0\n elif key == ord('d'): #GO FORWARD\n self.heading = 0.0\n self.thrust = -1.0\n elif key == ord('f'): #SOFT RIGHT\n self.heading = 0.72\n self.thrust = -1.0\n elif key == ord('g'): #HARD RIGHT\n self.heading = 1.0\n self.thrust = -1.0\n else: #DO NOTHING\n self.heading = 0.0\n self.thrust = 1.0\n\n #publish the appropriate commands to the *_cmd topics\n self.heading_pub.publish(self.heading)\n self.pitch_pub.publish(self.pitch)\n self.thrust_pub.publish(self.thrust)\n rate.sleep()\n\nif __name__ == '__main__':\n rospy.init_node('manual_controller', anonymous=True) \n controller = ManualController()\n print(\"\\nManual Controller: Beginning at 24hz\")\n controller.run()\n print(\"\\nManual Controller: done\")\n" ]
[ [ "numpy.zeros" ] ]
hsaafan/SlowFeatureAnalysis
[ "3290881dcc4d0c12e4b7720ca8e9971595bccd9e" ]
[ "src/sfafd/examples/disturbances.py" ]
[ "\"\"\" Plots the disturbances for some common test sets of the Tennessee Eastman\nProcess\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tepimport as imp\n\n\ndef plot_disturbances(show: bool = True, save: bool = False,\n w_in: float = 8, h_in: float = 6) -> None:\n training_sets = imp.import_sets([4, 5, 10], skip_training=True)\n _, T4 = training_sets[0]\n _, T5 = training_sets[1]\n _, T10 = training_sets[2]\n\n ignored_var = list(range(22, 41))\n T4 = np.delete(T4, ignored_var, axis=0)\n T5 = np.delete(T5, ignored_var, axis=0)\n T10 = np.delete(T10, ignored_var, axis=0)\n\n \"\"\" IDV(4) \"\"\"\n T4_reactor_temp = T4[8, :]\n reactor_temp_setpoint = 120.4 * np.ones_like(T4_reactor_temp)\n T4_rector_cooling_water_flow = T4[31, :]\n\n plt.rcParams.update({'font.size': 16})\n fig4, ax4 = plt.subplots(nrows=2, sharex=True)\n ax4[0].set_title(\"Reactor Cooling Water Flow\")\n ax4[0].set_ylabel(\"Flow $(m^3h^{-1})$\")\n ax4[0].plot(T4_rector_cooling_water_flow)\n\n ax4[1].set_title(\"Reactor Temperature\")\n ax4[1].set_ylabel(\"Temperature ($\\degree C$)\")\n ax4[1].set_xlabel(\"Sample Index\")\n ax4[1].plot(T4_reactor_temp, label=\"Reactor Temperature\")\n ax4[1].plot(reactor_temp_setpoint, label=\"Setpoint\")\n ax4[1].legend()\n\n fig4.set_size_inches(w_in, h_in)\n fig4.tight_layout()\n \"\"\" IDV(5) \"\"\"\n T5_condenser_cooling_water_flow = T5[32, :]\n T5_reactor_pressure = T5[6, :]\n reactor_pressure_setpoint = 2705*np.ones_like(T5_reactor_pressure)\n\n fig5, ax5 = plt.subplots(nrows=2, sharex=True)\n ax5[0].set_title(\"Condenser Cooling Water Flow\")\n ax5[0].set_ylabel(\"Flow $(m^3h^{-1})$\")\n ax5[0].plot(T5_condenser_cooling_water_flow)\n\n ax5[1].set_title(\"Reactor Pressure\")\n ax5[1].set_ylabel(\"Pressure (kPag)\")\n ax5[1].set_xlabel(\"Sample Index\")\n ax5[1].plot(T5_reactor_pressure, label=\"Reactor Pressure\")\n ax5[1].plot(reactor_pressure_setpoint, label=\"Setpoint\")\n ax5[1].legend()\n\n fig5.set_size_inches(w_in, h_in)\n fig5.tight_layout()\n \"\"\" IDV(10) \"\"\"\n T10_c_flow = T10[3, :]\n T10_stripper_temp = T10[17, :]\n stripper_temp_setpoint = 65.731 * np.ones_like(T10_stripper_temp)\n\n fig10, ax10 = plt.subplots(nrows=2, sharex=True)\n ax10[0].set_title(\"A and C Feed Flow\")\n ax10[0].set_ylabel(\"Flow (KSCMH)\")\n ax10[0].plot(T10_c_flow)\n\n ax10[1].set_title(\"Stripper Temperature\")\n ax10[1].set_ylabel(\"Temperature ($\\degree C$)\")\n ax10[1].set_xlabel(\"Sample Index\")\n ax10[1].plot(T10_stripper_temp, label=\"Stripper Temperature\")\n ax10[1].plot(stripper_temp_setpoint, label=\"Setpoint\")\n ax10[1].legend()\n\n fig10.set_size_inches(w_in, h_in)\n fig10.tight_layout()\n if show:\n plt.show()\n if save:\n fig4.savefig(\"IDV(4)\")\n fig5.savefig(\"IDV(5)\")\n fig10.savefig(\"IDV(10)\")\n\n\nif __name__ == \"__main__\":\n plot_disturbances(show=True, save=True)\n" ]
[ [ "matplotlib.pyplot.rcParams.update", "numpy.delete", "numpy.ones_like", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ] ]
wangke1935/euler
[ "e2785eca70e7e4f37d73ac4ce64a3059b0385dc7" ]
[ "tf_euler/python/euler_ops/neighbor_ops.py" ]
[ "# Copyright 2018 Alibaba Group Holding Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tf_euler.python.euler_ops import base\n\nsample_neighbor = base._LIB_OP.sample_neighbor\nget_top_k_neighbor = base._LIB_OP.get_top_k_neighbor\n\n\ndef get_full_neighbor(nodes, edge_types):\n \"\"\"\n Args:\n nodes: A `Tensor` of `int64`.\n edge_types: A 1-D `Tensor` of int32. Specify edge types to filter outgoing\n edges.\n\n Return:\n A tuple of `SparseTensor` (neibors, weights).\n neighbors: A `SparseTensor` of `int64`.\n weights: A `SparseTensor` of `float`.\n types: A `SparseTensor` of `int32`\n \"\"\"\n sp_returns = base._LIB_OP.get_full_neighbor(nodes, edge_types)\n return tf.SparseTensor(*sp_returns[:3]), tf.SparseTensor(*sp_returns[3:6]), \\\n tf.SparseTensor(*sp_returns[6:])\n\n\ndef get_sorted_full_neighbor(nodes, edge_types):\n \"\"\"\n Args:\n nodes: A `Tensor` of `int64`.\n edge_types: A 1-D `Tensor` of int32. Specify edge types to filter outgoing\n edges.\n\n Return:\n A tuple of `SparseTensor` (neibors, weights).\n neighbors: A `SparseTensor` of `int64`.\n weights: A `SparseTensor` of `float`.\n types: A `SparseTensor` of `int32`\n \"\"\"\n sp_returns = base._LIB_OP.get_sorted_full_neighbor(nodes, edge_types)\n return tf.SparseTensor(*sp_returns[:3]), tf.SparseTensor(*sp_returns[3:6]), \\\n tf.SparseTensor(*sp_returns[6:])\n\n\ndef sample_fanout(nodes, edge_types, counts, default_node=-1):\n \"\"\"\n Sample multi-hop neighbors of nodes according to weight in graph.\n\n Args:\n nodes: A 1-D `Tensor` of `int64`.\n edge_types: A list of 1-D `Tensor` of int32. Specify edge types to filter\n outgoing edges in each hop.\n counts: A list of `int`. Specify the number of sampling for each node in\n each hop.\n default_node: A `int`. Specify the node id to fill when there is no neighbor\n for specific nodes.\n\n Return:\n A tuple of list: (samples, weights)\n samples: A list of `Tensor` of `int64`, with the same length as\n `edge_types` and `counts`, with shapes `[num_nodes]`,\n `[num_nodes * count1]`, `[num_nodes * count1 * count2]`, ...\n weights: A list of `Tensor` of `float`, with shapes\n `[num_nodes * count1]`, `[num_nodes * count1 * count2]` ...\n types: A list of `Tensor` of `int32`, with shapes\n `[num_nodes * count1]`, `[num_nodes * count1 * count2]` ...\n \"\"\"\n neighbors_list = [tf.reshape(nodes, [-1])]\n weights_list = []\n type_list = []\n for hop_edge_types, count in zip(edge_types, counts):\n neighbors, weights, types = sample_neighbor(\n neighbors_list[-1], hop_edge_types, count, default_node=default_node)\n neighbors_list.append(tf.reshape(neighbors, [-1]))\n weights_list.append(tf.reshape(weights, [-1]))\n type_list.append(tf.reshape(weights, [-1]))\n return neighbors_list, weights_list, type_list\n\n\ndef get_multi_hop_neighbor(nodes, edge_types):\n \"\"\"\n Get multi-hop neighbors with adjacent matrix.\n\n Args:\n nodes: A 1-D `tf.Tensor` of `int64`.\n edge_types: A list of 1-D `tf.Tensor` of `int32`. Specify edge types to\n filter outgoing edges in each hop.\n\n Return:\n A tuple of list: (nodes, adjcents)\n nodes: A list of N + 1 `tf.Tensor` of `int64`, N is the number of\n hops. Specify node set of each hop, including the root.\n adjcents: A list of N `tf.SparseTensor` of `int64`. Specify adjacent\n matrix between hops.\n \"\"\"\n nodes = tf.reshape(nodes, [-1])\n nodes_list = [nodes]\n adj_list = []\n for hop_edge_types in edge_types:\n neighbor, weight, _ = get_full_neighbor(nodes, hop_edge_types)\n next_nodes, next_idx = tf.unique(neighbor.values, out_idx=tf.int64)\n next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1)\n next_values = weight.values\n next_shape = [tf.size(nodes), tf.size(next_nodes)]\n next_adj = tf.sparse.SparseTensor(next_indices, next_values, next_shape)\n next_adj = tf.sparse.reorder(next_adj)\n nodes_list.append(next_nodes)\n adj_list.append(next_adj)\n nodes = next_nodes\n return nodes_list, adj_list\n" ]
[ [ "tensorflow.sparse.reorder", "tensorflow.SparseTensor", "tensorflow.size", "tensorflow.sparse.SparseTensor", "tensorflow.unique", "tensorflow.reshape", "tensorflow.stack" ] ]
SMT-Dev/exercise_model
[ "b049279dbc38658c69eaa9e0149355c227e46a0b" ]
[ "report-data.py" ]
[ "# -*- coding: utf-8 -*-\n# @Time : 2021/4/21 3:05 PM\n# @Author : Hopen\n\nimport pymysql\nimport re\nfrom pprint import pprint\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom config import *\n\ndb = pymysql.connect(host=HOST, user=USER, password=PASS, db=DB, port=PORT, use_unicode=True, charset=\"utf8\")\n\n\ndef mysql_select(sql_str):\n cursor = db.cursor()\n cursor.execute(sql_str)\n data = cursor.fetchall()\n cursor.close()\n return data\n\n\nclass ProblemEvaluation(object):\n def __init__(self):\n self.id = 0\n self.prob_id = 0\n self.user_id = 0\n self.prob_text = ''\n self.prob_level = 0\n self.lesson = 0\n self.analysis = ''\n self.answer_text = ''\n self.type = ''\n self.point = ''\n self.choice_text = ''\n self.eval_time = ''\n self.res = 0\n\n\nclass EvaluationSet(object):\n def __init__(self):\n self.exer_eval_li = []\n self.user_li = []\n self.prob_li = []\n\n def high_freq_all(self, n):\n \"\"\"\n 返回全体学生的高频错题集\n :param n: 返回的错题数量\n :return: list[prob]\n \"\"\"\n\n def freq_point_all(self, n=None):\n \"\"\"\n 返回全体学生的高频易错考点\n :param n: 返回的考点数量\n :return: list[{point: string, count: int}]\n \"\"\"\n\n def get_avg_rank(self, level=None, time_span=None):\n \"\"\"\n 返回全体学生的平均分排名\n :param level: 可选, 若提供则在 level 内进行筛选\n :param time_span: 可选, 若提供则在指定时间段内进行进行筛选\n :return: list['user_id', 'rank']\n \"\"\"\n\n\nclass ExerciseEvaluation(object):\n def __init__(self):\n self.id = 0\n self.end_time = ''\n self.duration = 0 # 单位: 秒\n self.score = 0\n self.user_id = 0\n self.level = 0\n self.lesson = 0\n\n\nclass User(object):\n def __init__(self):\n self.id = 0\n self.level = 0\n self.name = ''\n self.eval_li = [] # 用户做过的所有题\n self.rev_li = [] # 用户做错的所有题(及其次数) {'prob_id', 'counts'}\n\n def count_avg(self, level=None, time_span=None):\n \"\"\"\n 计算用户个人做题平均分\n :param level: 可选, 若给出则计算 level 内平均分\n :param time_span: 可选, 若给出则计算指定时间段内平均分\n :return: float\n \"\"\"\n if not level:\n return round(sum([eva['score'] for eva in self.eval_li]) / len(self.eval_li), 2)\n else:\n return round(sum([eva['score'] for eva in self.eval_li if eva['level'] == level]) / len(self.eval_li), 2)\n\n def high_freq_user(self, n=None):\n \"\"\"\n 返回用户个人高频错题集\n :param n: 返回的错题个数\n :return: list[{'prob': {}, count: int}]\n \"\"\"\n if n:\n return self.rev_li.sort(key=lambda x: x['counts'])[: n]\n else:\n return self.rev_li.sort(key=lambda x: x['counts'])\n\n def freq_point_user(self):\n \"\"\"\n 返回用户个人的错题考点分布\n :return: list[str]\n \"\"\"\n\n def get_recent(self, n):\n \"\"\"\n 返回用户最近几次做题的得分, 时间顺序\n :param n: 练习的个数\n :return: list[int]\n \"\"\"\n return [eva['score'] for eva in sorted(self.eval_li, key=lambda x: x['end_time'])][:n]\n\n\ndef draw_plot(title, x_list, y_list, xlabel=None, ylabel=None):\n \"\"\" 绘制折线图 \"\"\"\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.plot(x_list, y_list)\n plt.show()\n\n\ndef draw_bar(title, label_list, num_list, xlabel=None, ylabel=None):\n \"\"\" 绘制水平条形图 \"\"\"\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.xticks(label_list)\n plt.barh(range(len(label_list)), num_list)\n plt.show()\n\n\ndef draw_pie(title, label_list, num_list, xlabel=None, ylabel=None):\n \"\"\" 绘制饼图 \"\"\"\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.pie(num_list, labels=label_list)\n plt.axis(\"equal\")\n plt.show()\n\n\n# 从 MySQL 中装填数据\nsql = 'select * from t_exer_eval'\nexer_eval_li = mysql_select(sql)\nsql = 'select * from t_user'\nuser_li = mysql_select(sql)\n\nuser = User()\neval_set = EvaluationSet()\nN = 10\n\n# 绘图\ndraw_plot('学生得分情况趋势', user.name, user.get_recent(N))\ndraw_plot('个人平均分趋势', user.name, user.count_avg())\nerr_set = eval_set.freq_point_all()\ndraw_pie('全体错题考点分布', [x['count'] for x in err_set], [x['point'] for x in err_set])\ndraw_bar('高频错题 TOP'+str(N), range(0, N), eval_set.high_freq_all(N))\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.pie", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.axis", "matplotlib.pyplot.show", "matplotlib.pyplot.xticks" ] ]
mapazarr/segmentation
[ "3f6ca88378935a7b82a8b7da18d6ce054b870c55" ]
[ "segmentation/utils/plot_utils.py" ]
[ "import os\nimport copy\nimport math\n\nimport numpy as np\nimport pandas as pd\nimport skimage.io as io\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nfrom skimage.segmentation import find_boundaries\nfrom skimage.exposure import rescale_intensity\n\n\n# plotting functions\n\ndef plot_overlay(predicted_contour, plotting_tif, alternate_contour=None, path=None):\n \"\"\"Take in labeled contour data, along with optional mibi tif and second contour,\n and overlay them for comparison\"\n\n Args:\n predicted_contour: 2D numpy array of labeled cell objects\n plotting_tif: 2D or 3D numpy array of imaging signal\n alternate_contour: 2D numpy array of labeled cell objects\n path: path to save the resulting image\n\n outputs:\n plot viewer: plots the outline(s) of the mask(s) as well as intensity from plotting tif\n predicted_contour in red\n alternate_contour in white\n overlay: saves as TIF in file path if specified\n \"\"\"\n\n if plotting_tif is None:\n # will just plot the outlines\n pass\n else:\n if len(plotting_tif.shape) == 2:\n if plotting_tif.shape != predicted_contour.shape:\n raise ValueError(\"plotting_tif and predicted_contour array dimensions not equal.\")\n else:\n # convert RGB image with same data across all three channels\n plotting_tif = np.stack((plotting_tif, plotting_tif, plotting_tif), axis=2)\n elif len(plotting_tif.shape) == 3:\n blank_channel = np.zeros(plotting_tif.shape[:2] + (1,), dtype=plotting_tif.dtype)\n if plotting_tif.shape[2] == 1:\n # pad two empty channels\n plotting_tif = np.concatenate((plotting_tif, blank_channel, blank_channel), axis=2)\n elif plotting_tif.shape[2] == 2:\n # pad one empty channel\n plotting_tif = np.concatenate((plotting_tif, blank_channel), axis=2)\n elif plotting_tif.shape[2] == 3:\n # don't need to do anything\n pass\n else:\n raise ValueError(\"only 3 channels of overlay supported, got {}\".\n format(plotting_tif.shape))\n else:\n raise ValueError(\"plotting tif must be 2D or 3D array, got {}\".\n format(plotting_tif.shape))\n\n if path is not None:\n if os.path.exists(os.path.split(path)[0]) is False:\n raise ValueError(\"File path does not exist.\")\n\n # define borders of cells in mask\n predicted_contour_mask = find_boundaries(predicted_contour,\n connectivity=1, mode='inner').astype(np.uint8)\n predicted_contour_mask[predicted_contour_mask > 0] = 255\n\n if plotting_tif is None:\n # will just save the contour mask\n io.imsave(path, predicted_contour_mask)\n else:\n # rescale each channel to go from 0 to 255\n rescaled = np.zeros(plotting_tif.shape, dtype='uint8')\n print(\"rescaled: {}\".format(rescaled.shape))\n\n for idx in range(plotting_tif.shape[2]):\n if np.max(plotting_tif[:, :, idx]) == 0:\n # don't need to rescale this channel\n pass\n else:\n percentiles = np.percentile(plotting_tif[:, :, idx][plotting_tif[:, :, idx] > 0],\n [5, 95])\n rescaled_intensity = rescale_intensity(plotting_tif[:, :, idx],\n in_range=(percentiles[0], percentiles[1]),\n out_range='uint8')\n rescaled[:, :, idx] = rescaled_intensity\n\n # overlay first contour on all three RGB, to have it show up as white border\n rescaled[predicted_contour_mask > 0, :] = 255\n\n # overlay second contour as red outline if present\n if alternate_contour is not None:\n\n if predicted_contour.shape != alternate_contour.shape:\n raise ValueError(\"predicted_contour and alternate_\"\n \"contour array dimensions not equal.\")\n\n # define borders of cell in mask\n alternate_contour_mask = find_boundaries(alternate_contour, connectivity=1,\n mode='inner').astype(np.uint8)\n rescaled[alternate_contour_mask > 0, 0] = 255\n rescaled[alternate_contour_mask > 0, 1:] = 0\n\n # save as TIF if path supplied, otherwise display on screen\n if path is not None:\n io.imsave(path, rescaled)\n else:\n io.imshow(rescaled)\n\n\ndef randomize_labels(label_map):\n \"\"\"Takes in a labeled matrix and swaps the integers around\n so that color gradient has better contrast\n\n Inputs:\n label_map(2D numpy array): labeled TIF with each object assigned a unique value\n\n Outputs:\n swapped_map(2D numpy array): labeled TIF with object labels permuted\"\"\"\n\n unique_vals = np.unique(label_map)[1:]\n pos_1 = np.random.choice(unique_vals, size=len(unique_vals))\n pos_2 = np.random.choice(unique_vals, size=len(unique_vals))\n\n for i in range(len(pos_1)):\n swap_1 = pos_1[i]\n swap_2 = pos_2[i]\n swap_1_mask = label_map == swap_1\n swap_2_mask = label_map == swap_2\n label_map[swap_1_mask] = swap_2\n label_map[swap_2_mask] = swap_1\n\n label_map = label_map.astype('int16')\n\n return label_map\n\n\ndef outline_objects(L_matrix, list_of_lists):\n \"\"\"takes in an L matrix generated by skimage.label, along with a\n list of lists, and returns a mask that has the\n pixels for all cells from each list represented as integer values for easy plotting\"\"\"\n\n L_plot = copy.deepcopy(L_matrix).astype(float)\n\n for idx, val in enumerate(list_of_lists):\n mask = np.isin(L_plot, val)\n\n # use a negative value to not interfere with cell labels\n L_plot[mask] = -(idx + 2)\n\n L_plot[L_plot > 1] = 1\n L_plot = np.absolute(L_plot)\n L_plot = L_plot.astype('int16')\n return L_plot\n\n\ndef plot_color_map(outline_matrix, names, plotting_colors=None, ground_truth=None, save_path=None):\n \"\"\"Plot label map with cells of specified category colored the same\n\n Args\n outline_matrix: output of outline_objects function which assigns same value to cells\n of same class\n names: list of names for each category to use for plotting\n plotting_colors: list of colors to use for plotting cell categories\n ground truth: optional argument to supply label map of true segmentation to\n be plotted alongside\n save_path: optional argument to save plot as TIF\n\n Returns\n Displays plot in window\"\"\"\n\n if plotting_colors is None:\n plotting_colors = ['Black', 'Grey', 'Blue', 'Green',\n 'Pink', 'moccasin', 'tan', 'sienna', 'firebrick']\n\n num_categories = np.max(outline_matrix)\n plotting_colors = plotting_colors[:num_categories + 1]\n cmap = mpl.colors.ListedColormap(plotting_colors)\n\n if ground_truth is not None:\n fig, ax = plt.subplots(nrows=1, ncols=2)\n mat = ax[0].imshow(outline_matrix, cmap=cmap, vmin=np.min(outline_matrix) - .5,\n vmax=np.max(outline_matrix) + .5)\n swapped = randomize_labels(ground_truth)\n ax[1].imshow(swapped)\n else:\n fig, ax = plt.subplots(nrows=1, ncols=1)\n mat = ax.imshow(outline_matrix, cmap=cmap, vmin=np.min(outline_matrix) - .5,\n vmax=np.max(outline_matrix) + .5)\n\n # tell the colorbar to tick at integers\n cbar = fig.colorbar(mat, ticks=np.arange(np.min(outline_matrix), np.max(outline_matrix) + 1))\n\n cbar.ax.set_yticklabels(names)\n\n fig.tight_layout()\n if save_path is not None:\n fig.savefig(save_path, dpi=200)\n\n\ndef plot_barchart_errors(pd_array, contour_errors, predicted_errors, save_path=None):\n \"\"\"Plot different error types in a barchart, along with cell-size correlation in a scatter plot\n Args\n pd_array: pandas cell array representing error types for each class of cell\n cell_category: list of error types to extract from array\n save_path: optional file path to save generated TIF\n\n Returns\n Display plot on viewer\"\"\"\n\n # make sure all supplied categories are column names\n if np.any(~np.isin(contour_errors + predicted_errors, pd_array.columns)):\n raise ValueError(\"Invalid column name\")\n\n fig, ax = plt.subplots(2, 1, figsize=(10, 10))\n\n ax[0].scatter(pd_array[\"contour_cell_size\"], pd_array[\"predicted_cell_size\"])\n ax[0].set_xlabel(\"Contoured Cell\")\n ax[0].set_ylabel(\"Predicted Cell\")\n\n # compute percentage of different error types\n errors = np.zeros(len(predicted_errors) + len(contour_errors))\n for i in range(len(contour_errors)):\n errors[i] = len(set(pd_array.loc[pd_array[contour_errors[i]], \"contour_cell\"]))\n\n for i in range(len(predicted_errors)):\n errors[i + len(contour_errors)] = len(set(pd_array.loc[pd_array[predicted_errors[i]],\n \"predicted_cell\"]))\n\n errors = errors / len(set(pd_array[\"predicted_cell\"]))\n position = range(len(errors))\n ax[1].bar(position, errors)\n\n ax[1].set_xticks(position)\n ax[1].set_xticklabels(contour_errors + predicted_errors)\n ax[1].set_title(\"Fraction of cells misclassified\")\n\n if save_path is not None:\n fig.savefig(save_path, dpi=200)\n\n\ndef plot_mod_ap(mod_ap_list, thresholds, labels):\n df = pd.DataFrame({'iou': thresholds})\n\n for idx, label in enumerate(labels):\n df[label] = mod_ap_list[idx]['scores']\n\n fig, ax = plt.subplots()\n for label in labels:\n ax.plot('iou', label, data=df, linestyle='-', marker='o')\n\n ax.set_xlabel('IOU Threshold')\n ax.set_ylabel('mAP')\n ax.legend()\n fig.show()\n\n\ndef plot_error_types(errors, labels, error_plotting):\n data_dict = pd.DataFrame(pd.Series(errors[0])).transpose()\n\n for i in range(1, len(labels)):\n data_dict = data_dict.append(errors[i], ignore_index=True)\n\n data_dict['algos'] = labels\n\n fig, axes = plt.subplots(len(error_plotting))\n for i in range(len(error_plotting)):\n barchart_helper(ax=axes[i], values=data_dict[error_plotting[i]], labels=labels,\n title='{} Errors'.format(error_plotting[i]))\n\n fig.show()\n fig.tight_layout()\n\n\ndef barchart_helper(ax, values, labels, title):\n positions = range(len(values))\n ax.bar(positions, values)\n ax.set_xticks(positions)\n ax.set_xticklabels(labels)\n ax.set_title(title)\n" ]
[ [ "numpy.max", "numpy.concatenate", "numpy.zeros", "pandas.DataFrame", "numpy.percentile", "numpy.min", "matplotlib.pyplot.subplots", "numpy.stack", "numpy.absolute", "matplotlib.colors.ListedColormap", "pandas.Series", "numpy.unique", "numpy.isin" ] ]
pepebonet/DeepMP
[ "2fe5493872806fd3556c327c2cacbddc5662d590" ]
[ "deepmp/merge_h5s.py" ]
[ "#!/usr/bin/env python3\nimport os\nimport h5py\nimport click\nimport random\nimport numpy as np\nfrom collections import OrderedDict\n\ndef check_shapes(data1, data2):\n for key in data1.keys():\n if data1[key].shape[1:] != data2[key].shape[1:]:\n raise ValueError(\"Different shapes for dataset: %s. \" % key)\n\n\ndef check_keys(data1, data2):\n if data1.keys() != data2.keys():\n raise ValueError(\"Files have different datasets.\")\n\n\ndef get_size(data):\n\n sizes = [d.shape[0] for d in data.values()]\n\n if max(sizes) != min(sizes):\n raise ValueError(\"Each dataset within a file must have the \"\n \"same number of entries!\")\n\n return sizes[0]\n\n\ndef merge_data(data_list):\n\n data = None\n\n for f in data_list:\n size = get_size(data_list[f])\n if not data:\n data = data_list[f]\n else:\n check_keys(data, data_list[f])\n check_shapes(data, data_list[f])\n for key in data_list[f]:\n data[key] = np.append(data[key], data_list[f][key], axis=0)\n\n return data\n\n\ndef load(filename):\n f = h5py.File(filename, 'r')\n\n data = {}\n for key in f:\n data[key] = f[key][...]\n f.close()\n return data\n\n\ndef save(filename, data):\n f = h5py.File(filename, 'w')\n for key in data:\n f.create_dataset(key, data[key].shape, dtype=data[key].dtype,\n compression='gzip')[...] = data[key]\n f.close()\n\n\ndef get_set(folder, output, label):\n filelist = [os.path.join(folder, el) for el in os.listdir(folder)]\n random.shuffle(filelist)\n data = OrderedDict()\n\n for f in filelist:\n data[f] = load(f)\n\n out_file = os.path.join(output, '{}_file.h5'.format(label))\n save(out_file, merge_data(data))\n\n\n# ------------------------------------------------------------------------------\n# Click\n# ------------------------------------------------------------------------------\n\[email protected](short_help='SVM accuracy output')\[email protected](\n '-f', '--filelist', required=True, multiple=True,\n help='h5 files to concat'\n)\[email protected](\n '-o', '--out_file', required=True, \n help='Output file extension'\n)\ndef main(filelist, out_file):\n data = OrderedDict()\n for f in filelist:\n data[f] = load(f)\n \n save(out_file, merge_data(data))\n\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.append" ] ]
jamesliu/ray
[ "c18caa4db36d466718bdbcb2229aa0b2dc03da1f" ]
[ "python/ray/tests/test_failure_3.py" ]
[ "import os\nimport sys\nimport signal\n\nimport ray\n\nimport numpy as np\nimport pytest\nimport time\n\nfrom ray._private.test_utils import SignalActor, wait_for_pid_to_exit\n\nSIGKILL = signal.SIGKILL if sys.platform != \"win32\" else signal.SIGTERM\n\n\ndef test_worker_exit_after_parent_raylet_dies(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=0)\n cluster.add_node(num_cpus=8, resources={\"foo\": 1})\n cluster.wait_for_nodes()\n\n ray.init(address=cluster.address)\n\n @ray.remote(resources={\"foo\": 1})\n class Actor():\n def get_worker_pid(self):\n return os.getpid()\n\n def get_raylet_pid(self):\n return int(os.environ[\"RAY_RAYLET_PID\"])\n\n actor = Actor.remote()\n worker_pid = ray.get(actor.get_worker_pid.remote())\n raylet_pid = ray.get(actor.get_raylet_pid.remote())\n # Kill the parent raylet.\n os.kill(raylet_pid, SIGKILL)\n os.waitpid(raylet_pid, 0)\n wait_for_pid_to_exit(raylet_pid)\n # Make sure the worker process exits as well.\n wait_for_pid_to_exit(worker_pid)\n\n\[email protected](\n \"ray_start_cluster_head\", [{\n \"num_cpus\": 5,\n \"object_store_memory\": 10**8,\n }],\n indirect=True)\ndef test_parallel_actor_fill_plasma_retry(ray_start_cluster_head):\n @ray.remote\n class LargeMemoryActor:\n def some_expensive_task(self):\n return np.zeros(10**8 // 2, dtype=np.uint8)\n\n actors = [LargeMemoryActor.remote() for _ in range(5)]\n for _ in range(5):\n pending = [a.some_expensive_task.remote() for a in actors]\n while pending:\n [done], pending = ray.wait(pending, num_returns=1)\n\n\[email protected](\n \"ray_start_regular\", [{\n \"_system_config\": {\n \"task_retry_delay_ms\": 500\n }\n }],\n indirect=True)\ndef test_async_actor_task_retries(ray_start_regular):\n # https://github.com/ray-project/ray/issues/11683\n\n signal = SignalActor.remote()\n\n @ray.remote\n class DyingActor:\n def __init__(self):\n print(\"DyingActor init called\")\n self.should_exit = False\n\n def set_should_exit(self):\n print(\"DyingActor.set_should_exit called\")\n self.should_exit = True\n\n async def get(self, x, wait=False):\n print(f\"DyingActor.get called with x={x}, wait={wait}\")\n if self.should_exit:\n os._exit(0)\n if wait:\n await signal.wait.remote()\n return x\n\n # Normal in order actor task retries should work\n dying = DyingActor.options(\n max_restarts=-1,\n max_task_retries=-1,\n ).remote()\n\n assert ray.get(dying.get.remote(1)) == 1\n ray.get(dying.set_should_exit.remote())\n assert ray.get(dying.get.remote(42)) == 42\n\n # Now let's try out of order retries:\n # Task seqno 0 will return\n # Task seqno 1 will be pending and retried later\n # Task seqno 2 will return\n # Task seqno 3 will crash the actor and retried later\n dying = DyingActor.options(\n max_restarts=-1,\n max_task_retries=-1,\n ).remote()\n\n # seqno 0\n ref_0 = dying.get.remote(0)\n assert ray.get(ref_0) == 0\n # seqno 1\n ref_1 = dying.get.remote(1, wait=True)\n # Need a barrier here to ensure ordering between the async and sync call.\n # Otherwise ref2 could be executed prior to ref1.\n for i in range(100):\n if ray.get(signal.cur_num_waiters.remote()) > 0:\n break\n time.sleep(.1)\n assert ray.get(signal.cur_num_waiters.remote()) > 0\n # seqno 2\n ref_2 = dying.set_should_exit.remote()\n assert ray.get(ref_2) is None\n # seqno 3, this will crash the actor because previous task set should exit\n # to true.\n ref_3 = dying.get.remote(3)\n\n # At this point the actor should be restarted. The two pending tasks\n # [ref_1, ref_3] should be retried, but not the completed tasks [ref_0,\n # ref_2]. Critically, if ref_2 was retried, ref_3 can never return.\n ray.get(signal.send.remote())\n assert ray.get(ref_1) == 1\n assert ray.get(ref_3) == 3\n\n\nif __name__ == \"__main__\":\n import pytest\n sys.exit(pytest.main([\"-v\", __file__]))\n" ]
[ [ "numpy.zeros" ] ]
CQUlearningsystemgroup/YIcongPeng
[ "f0e500cbb3c54da64db8930e5b2208e7d52186f0" ]
[ "include/tensorvision/core.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Core functions of TV.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\n\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nimport tensorvision.utils as utils\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\ntf.app.flags.DEFINE_boolean(\n 'summary', True, ('Whether or not to save summaries to tensorboard.'))\n\n\ndef load_weights(checkpoint_dir, sess, saver):\n \"\"\"\n Load the weights of a model stored in saver.\n\n Parameters\n ----------\n checkpoint_dir : str\n The directory of checkpoints.\n sess : tf.Session\n A Session to use to restore the parameters.\n saver : tf.train.Saver\n\n Returns\n -----------\n int\n training step of checkpoint\n \"\"\"\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n logging.info(ckpt.model_checkpoint_path)\n file = os.path.basename(ckpt.model_checkpoint_path)\n checkpoint_path = os.path.join(checkpoint_dir, file)\n saver.restore(sess, checkpoint_path)\n return int(file.split('-')[1])\n\n\ndef build_training_graph(hypes, queue, modules):\n \"\"\"\n Build the tensorflow graph out of the model files.\n\n Parameters\n ----------\n hypes : dict\n Hyperparameters\n queue: tf.queue\n Data Queue\n modules : tuple\n The modules load in utils.\n\n Returns\n -------\n tuple\n (q, train_op, loss, eval_lists) where\n q is a dict with keys 'train' and 'val' which includes queues,\n train_op is a tensorflow op,\n loss is a float,\n eval_lists is a dict with keys 'train' and 'val'\n \"\"\"\n\n data_input = modules['input']\n encoder = modules['arch']\n objective = modules['objective']\n optimizer = modules['solver']\n\n\n # Add Input Producers to the Graph\n with tf.name_scope(\"Inputs\"):\n image, labels = data_input.inputs(hypes, queue, phase='train')\n\n # Run inference on the encoder network\n logits = encoder.inference(hypes, image, train=True)\n \n # Build decoder on top of the logits\n decoded_logits = objective.decoder(hypes, logits, labels, train=True)\n\n with tf.name_scope(\"Evaluation\"):\n summary_op = tf.summary.merge_all()\n\n graph = {}\n graph['summary_op'] = summary_op\n\n return graph\n\n\ndef build_inference_graph(hypes, modules, image, calib_pl, xy_scale_pl):\n \"\"\"Run one evaluation against the full epoch of data.\n\n Parameters\n ----------\n hypes : dict\n Hyperparameters\n modules : tuble\n the modules load in utils\n image : placeholder\n\n return:\n graph_ops\n \"\"\"\n with tf.name_scope(\"Validation\"):\n\n logits = modules['arch'].inference(hypes, image, train=False)\n labels = (0, 0, 0, calib_pl, 0, xy_scale_pl)\n decoded_logits = modules['objective'].decoder(hypes, logits, labels, \n train=False)\n return decoded_logits\n\n\ndef start_tv_session(hypes):\n \"\"\"\n Run one evaluation against the full epoch of data.\n\n Parameters\n ----------\n hypes : dict\n Hyperparameters\n\n Returns\n -------\n tuple\n (sess, saver, summary_op, summary_writer, threads)\n \"\"\"\n # Build the summary operation based on the TF collection of Summaries.\n if FLAGS.summary:\n tf.contrib.layers.summarize_collection(tf.GraphKeys.WEIGHTS)\n tf.contrib.layers.summarize_collection(tf.GraphKeys.BIASES)\n summary_op = tf.summary.merge_all()\n else:\n summary_op = None\n\n # Create a saver for writing training checkpoints.\n if 'keep_checkpoint_every_n_hours' in hypes['solver']:\n kc = hypes['solver']['keep_checkpoint_every_n_hours']\n else:\n kc = 10000.0\n\n saver = tf.train.Saver(max_to_keep=int(hypes['logging']['max_to_keep']))\n\n sess = tf.get_default_session()\n\n # Run the Op to initialize the variables.\n if 'init_function' in hypes:\n _initalize_variables = hypes['init_function']\n _initalize_variables(hypes)\n else:\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # Start the queue runners.\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Instantiate a SummaryWriter to output summaries and the Graph.\n summary_writer = tf.summary.FileWriter(hypes['dirs']['output_dir'],\n graph=sess.graph)\n\n tv_session = {}\n tv_session['sess'] = sess\n tv_session['saver'] = saver\n tv_session['summary_op'] = summary_op\n tv_session['writer'] = summary_writer\n tv_session['coord'] = coord\n tv_session['threads'] = threads\n\n return tv_session\n" ]
[ [ "tensorflow.get_default_session", "tensorflow.train.start_queue_runners", "tensorflow.train.Coordinator", "tensorflow.train.get_checkpoint_state", "tensorflow.app.flags.DEFINE_boolean", "tensorflow.name_scope", "tensorflow.contrib.layers.summarize_collection", "tensorflow.summary.merge_all", "tensorflow.summary.FileWriter", "tensorflow.global_variables_initializer" ] ]
MayukhBagchiTrento/Ax
[ "7c925ba8365af714d9671208de490ba48814bfaa" ]
[ "ax/models/torch/botorch_defaults.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Any, Callable, Dict, List, Optional, Tuple\n\nimport torch\nfrom ax.core.types import TConfig\nfrom ax.models.model_utils import best_observed_point, get_observed\nfrom ax.models.torch.utils import (\n HYPERSPHERE,\n SIMPLEX,\n _to_inequality_constraints,\n sample_hypersphere_positive_quadrant,\n sample_simplex,\n)\nfrom ax.models.torch_base import TorchModel\nfrom botorch.acquisition.acquisition import AcquisitionFunction\nfrom botorch.acquisition.fixed_feature import FixedFeatureAcquisitionFunction\nfrom botorch.acquisition.objective import ConstrainedMCObjective, LinearMCObjective\nfrom botorch.acquisition.utils import get_acquisition_function, get_infeasible_cost\nfrom botorch.exceptions.errors import UnsupportedError\nfrom botorch.fit import fit_gpytorch_model\nfrom botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP\nfrom botorch.models.gp_regression_fidelity import SingleTaskMultiFidelityGP\nfrom botorch.models.gpytorch import GPyTorchModel\nfrom botorch.models.model import Model\nfrom botorch.models.model_list_gp_regression import ModelListGP\nfrom botorch.models.multitask import FixedNoiseMultiTaskGP, MultiTaskGP\nfrom botorch.optim.optimize import optimize_acqf\nfrom botorch.utils import (\n get_objective_weights_transform,\n get_outcome_constraint_transforms,\n)\nfrom gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood\nfrom gpytorch.mlls.sum_marginal_log_likelihood import SumMarginalLogLikelihood\nfrom torch import Tensor\n\n\nMIN_OBSERVED_NOISE_LEVEL = 1e-7\n\n\ndef get_and_fit_model(\n Xs: List[Tensor],\n Ys: List[Tensor],\n Yvars: List[Tensor],\n task_features: List[int],\n fidelity_features: List[int],\n metric_names: List[str],\n state_dict: Optional[Dict[str, Tensor]] = None,\n refit_model: bool = True,\n **kwargs: Any,\n) -> GPyTorchModel:\n r\"\"\"Instantiates and fits a botorch ModelListGP using the given data.\n\n Args:\n Xs: List of X data, one tensor per outcome.\n Ys: List of Y data, one tensor per outcome.\n Yvars: List of observed variance of Ys.\n task_features: List of columns of X that are tasks.\n fidelity_features: List of columns of X that are fidelity parameters.\n metric_names: Names of each outcome Y in Ys.\n state_dict: If provided, will set model parameters to this state\n dictionary. Otherwise, will fit the model.\n refit_model: Flag for refitting model.\n\n Returns:\n A fitted GPyTorchModel.\n \"\"\"\n if len(fidelity_features) > 0 and len(task_features) > 0:\n raise NotImplementedError(\n \"Currently do not support MF-GP models with task_features!\"\n )\n if len(fidelity_features) > 1:\n raise NotImplementedError(\n \"Fidelity MF-GP models currently support only a single fidelity parameter!\"\n )\n if len(task_features) > 1:\n raise NotImplementedError(\n f\"This model only supports 1 task feature (got {task_features})\"\n )\n elif len(task_features) == 1:\n task_feature = task_features[0]\n else:\n task_feature = None\n model = None\n if task_feature is None:\n if len(Xs) == 1:\n # Use single output, single task GP\n model = _get_model(\n X=Xs[0],\n Y=Ys[0],\n Yvar=Yvars[0],\n task_feature=task_feature,\n fidelity_features=fidelity_features,\n **kwargs,\n )\n elif all(torch.equal(Xs[0], X) for X in Xs[1:]):\n # Use batched multioutput, single task GP\n Y = torch.cat(Ys, dim=-1)\n Yvar = torch.cat(Yvars, dim=-1)\n model = _get_model(\n X=Xs[0],\n Y=Y,\n Yvar=Yvar,\n task_feature=task_feature,\n fidelity_features=fidelity_features,\n **kwargs,\n )\n # TODO: Is this equivalent an \"else:\" here?\n if model is None:\n # Use a ModelListGP\n models = [\n _get_model(X=X, Y=Y, Yvar=Yvar, task_feature=task_feature, **kwargs)\n for X, Y, Yvar in zip(Xs, Ys, Yvars)\n ]\n model = ModelListGP(*models)\n model.to(Xs[0])\n if state_dict is not None:\n model.load_state_dict(state_dict)\n if state_dict is None or refit_model:\n # TODO: Add bounds for optimization stability - requires revamp upstream\n bounds = {}\n if isinstance(model, ModelListGP):\n mll = SumMarginalLogLikelihood(model.likelihood, model)\n else:\n # pyre-ignore: [16]\n mll = ExactMarginalLogLikelihood(model.likelihood, model)\n mll = fit_gpytorch_model(mll, bounds=bounds)\n return model\n\n\ndef predict_from_model(model: Model, X: Tensor) -> Tuple[Tensor, Tensor]:\n r\"\"\"Predicts outcomes given a model and input tensor.\n\n Args:\n model: A botorch Model.\n X: A `n x d` tensor of input parameters.\n\n Returns:\n Tensor: The predicted posterior mean as an `n x o`-dim tensor.\n Tensor: The predicted posterior covariance as a `n x o x o`-dim tensor.\n \"\"\"\n with torch.no_grad():\n posterior = model.posterior(X)\n mean = posterior.mean.cpu().detach()\n # TODO: Allow Posterior to (optionally) return the full covariance matrix\n variance = posterior.variance.cpu().detach().clamp_min(0) # pyre-ignore\n cov = torch.diag_embed(variance)\n return mean, cov\n\n\ndef get_NEI(\n model: Model,\n objective_weights: Tensor,\n outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,\n X_observed: Optional[Tensor] = None,\n X_pending: Optional[Tensor] = None,\n **kwargs: Any,\n) -> AcquisitionFunction:\n r\"\"\"Instantiates a qNoisyExpectedImprovement acquisition function.\n\n Args:\n objective_weights: The objective is to maximize a weighted sum of\n the columns of f(x). These are the weights.\n outcome_constraints: A tuple of (A, b). For k outcome constraints\n and m outputs at f(x), A is (k x m) and b is (k x 1) such that\n A f(x) <= b. (Not used by single task models)\n X_observed: A tensor containing points observed for all objective\n outcomes and outcomes that appear in the outcome constraints (if\n there are any).\n X_pending: A tensor containing points whose evaluation is pending (i.e.\n that have been submitted for evaluation) present for all objective\n outcomes and outcomes that appear in the outcome constraints (if\n there are any).\n mc_samples: The number of MC samples to use (default: 512).\n qmc: If True, use qMC instead of MC (default: True).\n prune_baseline: If True, prune the baseline points for NEI (default: True).\n\n Returns:\n qNoisyExpectedImprovement: The instantiated acquisition function.\n \"\"\"\n if X_observed is None:\n raise ValueError(\"There are no feasible observed points.\")\n # Parse random_scalarization params\n objective_weights = _extract_random_scalarization_settings(\n objective_weights, outcome_constraints, **kwargs\n )\n # construct Objective module\n if outcome_constraints is None:\n objective = LinearMCObjective(weights=objective_weights)\n else:\n obj_tf = get_objective_weights_transform(objective_weights)\n con_tfs = get_outcome_constraint_transforms(outcome_constraints)\n X_observed = torch.as_tensor(X_observed)\n inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf)\n objective = ConstrainedMCObjective(\n objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost\n )\n return get_acquisition_function(\n acquisition_function_name=\"qNEI\",\n model=model,\n objective=objective,\n X_observed=X_observed,\n X_pending=X_pending,\n prune_baseline=kwargs.get(\"prune_baseline\", True),\n mc_samples=kwargs.get(\"mc_samples\", 512),\n qmc=kwargs.get(\"qmc\", True),\n # pyre-fixme[6]: Expected `Optional[int]` for 9th param but got\n # `Union[float, int]`.\n seed=torch.randint(1, 10000, (1,)).item(),\n )\n\n\ndef scipy_optimizer(\n acq_function: AcquisitionFunction,\n bounds: Tensor,\n n: int,\n inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,\n fixed_features: Optional[Dict[int, float]] = None,\n rounding_func: Optional[Callable[[Tensor], Tensor]] = None,\n **kwargs: Any,\n) -> Tuple[Tensor, Tensor]:\n r\"\"\"Optimizer using scipy's minimize module on a numpy-adpator.\n\n Args:\n acq_function: A botorch AcquisitionFunction.\n bounds: A `2 x d`-dim tensor, where `bounds[0]` (`bounds[1]`) are the\n lower (upper) bounds of the feasible hyperrectangle.\n n: The number of candidates to generate.\n inequality constraints: A list of tuples (indices, coefficients, rhs),\n with each tuple encoding an inequality constraint of the form\n `\\sum_i (X[indices[i]] * coefficients[i]) >= rhs`\n fixed_features: A map {feature_index: value} for features that should\n be fixed to a particular value during generation.\n rounding_func: A function that rounds an optimization result\n appropriately (i.e., according to `round-trip` transformations).\n\n Returns:\n 2-element tuple containing\n\n - A `n x d`-dim tensor of generated candidates.\n - In the case of joint optimization, a scalar tensor containing\n the joint acquisition value of the `n` points. In the case of\n sequential optimization, a `n`-dim tensor of conditional acquisition\n values, where `i`-th element is the expected acquisition value\n conditional on having observed candidates `0,1,...,i-1`.\n \"\"\"\n\n num_restarts: int = kwargs.get(\"num_restarts\", 20)\n raw_samples: int = kwargs.get(\"num_raw_samples\", 50 * num_restarts)\n\n if kwargs.get(\"joint_optimization\", False):\n sequential = False\n else:\n sequential = True\n # use SLSQP by default for small problems since it yields faster wall times\n if \"method\" not in kwargs:\n kwargs[\"method\"] = \"SLSQP\"\n X, expected_acquisition_value = optimize_acqf(\n acq_function=acq_function,\n bounds=bounds,\n q=n,\n num_restarts=num_restarts,\n raw_samples=raw_samples,\n options=kwargs,\n inequality_constraints=inequality_constraints,\n fixed_features=fixed_features,\n sequential=sequential,\n post_processing_func=rounding_func,\n )\n return X, expected_acquisition_value\n\n\ndef recommend_best_observed_point(\n model: TorchModel,\n bounds: List[Tuple[float, float]],\n objective_weights: Tensor,\n outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,\n linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,\n fixed_features: Optional[Dict[int, float]] = None,\n model_gen_options: Optional[TConfig] = None,\n target_fidelities: Optional[Dict[int, float]] = None,\n) -> Optional[Tensor]:\n \"\"\"\n A wrapper around `ax.models.model_utils.best_observed_point` for TorchModel\n that recommends a best point from previously observed points using either a\n \"max_utility\" or \"feasible_threshold\" strategy.\n\n Args:\n model: A TorchModel.\n bounds: A list of (lower, upper) tuples for each column of X.\n objective_weights: The objective is to maximize a weighted sum of\n the columns of f(x). These are the weights.\n outcome_constraints: A tuple of (A, b). For k outcome constraints\n and m outputs at f(x), A is (k x m) and b is (k x 1) such that\n A f(x) <= b.\n linear_constraints: A tuple of (A, b). For k linear constraints on\n d-dimensional x, A is (k x d) and b is (k x 1) such that\n A x <= b.\n fixed_features: A map {feature_index: value} for features that\n should be fixed to a particular value in the best point.\n model_gen_options: A config dictionary that can contain\n model-specific options.\n target_fidelities: A map {feature_index: value} of fidelity feature\n column indices to their respective target fidelities. Used for\n multi-fidelity optimization.\n\n Returns:\n A d-array of the best point, or None if no feasible point was observed.\n \"\"\"\n if target_fidelities:\n raise NotImplementedError(\n \"target_fidelities not implemented for base BotorchModel\"\n )\n\n x_best = best_observed_point(\n model=model,\n bounds=bounds,\n objective_weights=objective_weights,\n outcome_constraints=outcome_constraints,\n linear_constraints=linear_constraints,\n fixed_features=fixed_features,\n options=model_gen_options,\n )\n if x_best is None:\n return None\n return x_best.to(dtype=model.dtype, device=torch.device(\"cpu\"))\n\n\ndef recommend_best_out_of_sample_point(\n model: TorchModel,\n bounds: List[Tuple[float, float]],\n objective_weights: Tensor,\n outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,\n linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,\n fixed_features: Optional[Dict[int, float]] = None,\n model_gen_options: Optional[TConfig] = None,\n target_fidelities: Optional[Dict[int, float]] = None,\n) -> Optional[Tensor]:\n \"\"\"\n Identify the current best point by optimizing the posterior mean of the model.\n This is \"out-of-sample\" because it considers un-observed designs as well.\n\n Return None if no such point can be identified.\n\n Args:\n model: A TorchModel.\n bounds: A list of (lower, upper) tuples for each column of X.\n objective_weights: The objective is to maximize a weighted sum of\n the columns of f(x). These are the weights.\n outcome_constraints: A tuple of (A, b). For k outcome constraints\n and m outputs at f(x), A is (k x m) and b is (k x 1) such that\n A f(x) <= b.\n linear_constraints: A tuple of (A, b). For k linear constraints on\n d-dimensional x, A is (k x d) and b is (k x 1) such that\n A x <= b.\n fixed_features: A map {feature_index: value} for features that\n should be fixed to a particular value in the best point.\n model_gen_options: A config dictionary that can contain\n model-specific options.\n target_fidelities: A map {feature_index: value} of fidelity feature\n column indices to their respective target fidelities. Used for\n multi-fidelity optimization.\n\n Returns:\n A d-array of the best point, or None if no feasible point exists.\n \"\"\"\n options = model_gen_options or {}\n fixed_features = fixed_features or {}\n acf_options = options.get(\"acquisition_function_kwargs\", {})\n optimizer_options = options.get(\"optimizer_kwargs\", {})\n\n X_observed = get_observed(\n Xs=model.Xs, # pyre-ignore: [16]\n objective_weights=objective_weights,\n outcome_constraints=outcome_constraints,\n )\n\n if hasattr(model, \"_get_best_point_acqf\"):\n acq_function, non_fixed_idcs = model._get_best_point_acqf( # pyre-ignore: [16]\n X_observed=X_observed,\n objective_weights=objective_weights,\n mc_samples=acf_options.get(\"mc_samples\", 512),\n fixed_features=fixed_features,\n target_fidelities=target_fidelities,\n outcome_constraints=outcome_constraints,\n seed_inner=acf_options.get(\"seed_inner\", None),\n qmc=acf_options.get(\"qmc\", True),\n )\n else:\n raise RuntimeError(\"The model should implement _get_best_point_acqf.\")\n\n inequality_constraints = _to_inequality_constraints(linear_constraints)\n # TODO: update optimizers to handle inequality_constraints\n # (including transforming constraints b/c of fixed features)\n if inequality_constraints is not None:\n raise UnsupportedError(\"Inequality constraints are not supported!\")\n\n return_best_only = optimizer_options.get(\"return_best_only\", True)\n bounds_ = torch.tensor(bounds, dtype=model.dtype, device=model.device)\n bounds_ = bounds_.transpose(-1, -2)\n if non_fixed_idcs is not None:\n bounds_ = bounds_[..., non_fixed_idcs]\n\n candidates, _ = optimize_acqf(\n acq_function=acq_function,\n bounds=bounds_,\n q=1,\n num_restarts=optimizer_options.get(\"num_restarts\", 60),\n raw_samples=optimizer_options.get(\"raw_samples\", 1024),\n inequality_constraints=inequality_constraints,\n fixed_features=None, # handled inside the acquisition function\n options={\n \"batch_limit\": optimizer_options.get(\"batch_limit\", 8),\n \"maxiter\": optimizer_options.get(\"maxiter\", 200),\n \"nonnegative\": optimizer_options.get(\"nonnegative\", False),\n \"method\": \"L-BFGS-B\",\n },\n return_best_only=return_best_only,\n )\n rec_point = candidates.detach().cpu()\n if isinstance(acq_function, FixedFeatureAcquisitionFunction):\n rec_point = acq_function._construct_X_full(rec_point)\n if return_best_only:\n rec_point = rec_point.view(-1)\n return rec_point\n\n\ndef _get_model(\n X: Tensor,\n Y: Tensor,\n Yvar: Tensor,\n task_feature: Optional[int] = None,\n fidelity_features: Optional[List[int]] = None,\n **kwargs: Any,\n) -> GPyTorchModel:\n \"\"\"Instantiate a model of type depending on the input data.\n\n Args:\n X: A `n x d` tensor of input features.\n Y: A `n x m` tensor of input observations.\n Yvar: A `n x m` tensor of input variances (NaN if unobserved).\n task_feature: The index of the column pertaining to the task feature\n (if present).\n fidelity_features: List of columns of X that are fidelity parameters.\n\n Returns:\n A GPyTorchModel (unfitted).\n \"\"\"\n Yvar = Yvar.clamp_min_(MIN_OBSERVED_NOISE_LEVEL)\n is_nan = torch.isnan(Yvar)\n any_nan_Yvar = torch.any(is_nan)\n all_nan_Yvar = torch.all(is_nan)\n if any_nan_Yvar and not all_nan_Yvar:\n if task_feature:\n # TODO (jej): Replace with inferred noise before making perf judgements.\n Yvar[Yvar != Yvar] = MIN_OBSERVED_NOISE_LEVEL\n else:\n raise ValueError(\n \"Mix of known and unknown variances indicates valuation function \"\n \"errors. Variances should all be specified, or none should be.\"\n )\n if fidelity_features is None:\n fidelity_features = []\n if len(fidelity_features) == 0:\n # only pass linear_truncated arg if there are fidelities\n kwargs = {k: v for k, v in kwargs.items() if k != \"linear_truncated\"}\n if len(fidelity_features) > 0:\n if task_feature:\n raise NotImplementedError( # pragma: no cover\n \"multi-task multi-fidelity models not yet available\"\n )\n # at this point we can assume that there is only a single fidelity parameter\n gp = SingleTaskMultiFidelityGP(\n train_X=X, train_Y=Y, data_fidelity=fidelity_features[0], **kwargs\n )\n elif task_feature is None and all_nan_Yvar:\n gp = SingleTaskGP(train_X=X, train_Y=Y, **kwargs)\n elif task_feature is None:\n gp = FixedNoiseGP(train_X=X, train_Y=Y, train_Yvar=Yvar, **kwargs)\n elif all_nan_Yvar:\n gp = MultiTaskGP(train_X=X, train_Y=Y, task_feature=task_feature, **kwargs)\n else:\n gp = FixedNoiseMultiTaskGP(\n train_X=X, train_Y=Y, train_Yvar=Yvar, task_feature=task_feature, **kwargs\n )\n return gp\n\n\ndef _extract_random_scalarization_settings(\n objective_weights: Tensor,\n outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,\n **kwargs: Any,\n) -> Tensor:\n \"\"\"Generate a random weighting based on scalarization settings.\"\"\"\n use_random_scalarization = kwargs.get(\"random_scalarization\", False)\n random_weights = None\n if use_random_scalarization:\n # Pareto Optimization incompatible with outcome constraints.\n if outcome_constraints is not None:\n raise ValueError(\n \"Random scalarization for pareto frontier exploration \"\n \"is incompatible with outcome constraints. Remove one.\"\n )\n # Set distribution and sample weights.\n distribution = kwargs.get(\"random_scalarization_distribution\", SIMPLEX)\n if distribution == SIMPLEX:\n random_weights = sample_simplex(len(objective_weights))\n elif distribution == HYPERSPHERE:\n random_weights = sample_hypersphere_positive_quadrant(\n len(objective_weights)\n )\n\n if random_weights is not None:\n objective_weights = torch.mul(objective_weights, random_weights)\n return objective_weights\n" ]
[ [ "torch.diag_embed", "torch.device", "torch.cat", "torch.mul", "torch.isnan", "torch.any", "torch.no_grad", "torch.all", "torch.randint", "torch.tensor", "torch.as_tensor", "torch.equal" ] ]
bsvh/umdkb
[ "3cfe36650df26760d1296070aaf6f167271f8e3e" ]
[ "raspberry_pi/main.py" ]
[ "#!/usr/bin/env python3\nimport numpy as np\nimport core\nimport os\nimport cv2\nimport time\nimport dac8552\n\n# file save locations\ntare_current_filename = './calibration_values/tare_current.npy'\nbl_factor_filename = './calibration_values/bl_factor.npy'\ng_filename = './calibration_values/g_value.npy'\ntarget_step_filename = './calibration_values/target_step.npy'\ncalibration_filename = './calibration_values/calibration_file.npy'\n\ndef main_menu():\n print('Operations:')\n print('-'*50)\n print('1. Position calibration')\n print('2. Motor adjustment')\n print('3. View Camera')\n print('4. Gravity input')\n print('5. B/L constant calibration (velocity mode)')\n print('6. 0/Tare balance')\n print('7. Mass measurement (force mode)')\n print('-'*50)\n return input('Selection: ')\n\ntry:\n # start the DAC\n dac = dac8552.DAC8552()\n while False:\n core.set_DAC_voltage(dac,3.3)\n time.sleep(1)\n core.set_DAC_voltage(dac,0)\n time.sleep(1)\n # open camera and set properties\n cap = cv2.VideoCapture(0)\n cap.set(cv2.CAP_PROP_FRAME_WIDTH,1280);\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT,720);\n\n # load initial values\n tare_current = np.load(tare_current_filename)\n bl_factor = np.load(bl_factor_filename)\n g = np.load(g_filename)\n target_step = np.load(target_step_filename)\n calibration_array = np.load(calibration_filename)\n lower_step = calibration_array[0,0]\n upper_step = calibration_array[0,-1]\n lower_limit = int(calibration_array[1,0])\n upper_limit = int(calibration_array[1,-1])\n current_pixel = core.get_camera_position(cap)\n current_step = int(core.pixel_to_step(current_pixel, calibration_filename))\n \n #run watt balance\n while(True):\n os.system('clear')\n print('INTRO\\n')\n user_input = main_menu()\n\n if user_input == '1':\n # postion calibration\n # align camera\n os.system('clear')\n print(\"First, align the rod with black tape with the red box shown on the screen,\")\n print('ensuring that the tape is within the red box.')\n core.display_tracker_box(cap)\n\n #set upper and lower pixel limits\n limits_set = 0\n\n while limits_set == 0:\n os.system('clear')\n print('Now, enter values for the upper pixel limit (shown in black)')\n print('and the lower pixel limit (shown in blue). Limits must fall')\n print('between 1 and 719. 719 is the BOTTOM of the frame.\\n')\n upper_limit = input('Upper pixel limit (minimum 1, default 230): ')\n if not upper_limit:\n upper_limit = '230'\n upper_limit = int(upper_limit)\n lower_limit = input('Lower pixel limit (maximum 720, default 480): ')\n if not lower_limit:\n lower_limit = '480'\n lower_limit = int(lower_limit)\n core.display_tracker_box(cap, limits = [lower_limit,upper_limit])\n user_input = input('Continue adjusting limits? (y/N)')\n if user_input.lower() == 'n' or not user_input.lower():\n limits_set = 1\n\n # jogs motor to upper position and prompts for height\n os.system('clear')\n print('Jogging motor to upper limit pixel...')\n current_step = core.jog_to_pixel(cap, current_step, upper_limit, show_image = True)\n upper_step = current_step\n os.system('clear')\n upper_limit_height = int(input('Please enter mass pan height above base in mm: '))\n upper_limit_height_err = int(input('and the uncertainty in mm: '))\n\n # jogs motor to lower position and prompts for height\n os.system('clear')\n print('Jogging motor to lower limit pixel...')\n current_step = core.jog_to_pixel(cap, current_step, lower_limit, show_image = True)\n lower_step = current_step\n os.system('clear')\n lower_limit_height = int(input('Please enter mass pan height above base in mm: '))\n lower_limit_height_err = int(input('and the uncertainty in mm: '))\n\n # creates calibration file\n os.system('clear')\n print('Creating calibration file, please wait...')\n core.create_calibration_file(cap, current_step, calibration_filename,\n [lower_step,upper_step],\n [lower_limit,upper_limit],\n [lower_limit_height,upper_limit_height])\n callib_array = np.load(calibration_filename)\n print(callib_array)\n\n\n elif user_input == '2':\n # motor adjustment\n # ask for step number and direction\n os.system('clear')\n steps = int(input('Steps (3200 steps per revolution, SIGNED): '))\n\n # sets acceleration and velocities for motor\n velocity = 3200 # INPUT GOOD VALUE\n acceleration = 2000 # INPUT GOOD VALUE\n\n # moves motor\n jog_steps, jog_times, current_step = core.motor_jog(current_step, velocity, acceleration, steps)\n\n elif user_input == '3':\n # display camera\n core.display_tracker_box(cap, limits = [lower_limit,upper_limit])\n \n elif user_input == '4':\n # gravity input\n g[0] = float(input('Total gravitational acceleration (m/s^2): '))\n g[1] = float(input('Uncertainty in total gravitational acceleration (m/s^2): '))\n np.save(g_filename,g)\n\n elif user_input == '5':\n # b/l constant calibration\n acceleration = 4000\n target_velocity = 1000\n step_limits = [lower_step, upper_step]\n bl_factor, current_step, target_step = \\\n core.velocity_mode(cap, current_step, step_limits, acceleration, target_velocity,\n calibration_filename, buffer = 1)\n np.save(bl_factor_filename, bl_factor)\n np.save(target_step_filename, target_step)\n\n elif user_input == '6':\n # tare balance\n tare_current, current_step = core.force_mode(dac, cap, current_step, target_step, calibration_filename)\n np.save(tare_current_filename, tare_current)\n\n elif user_input == '7':\n current, current_step = core.force_mode(dac, cap, current_step, target_step, calibration_filename)\n mass, mass_err = core.mass_calc(current, bl_factor, g, tare_current)\n\n print('I = {:f} +/- {:f}\\n'.format(current[0], current[1]))\n print('B/L Factor = {:f} +/- {:f}\\n'.format(bl_factor[0], bl_factor[1]))\n print('g = {:f} +/- {:f}\\n'.format(g[0], g[1]))\n print('m = {:f} +/- {:f}\\n'.format(mass, mass_err))\n\n else:\n print('make a valid selection\\n')\n\n input(\"Press the <ENTER> key to continue...\")\n\nexcept KeyboardInterrupt:\n # release camera\n cap.release()\n dac.power_down(DAC_A, MODE_POWER_DOWN_100K)\n dac.power_down(DAC_B, MODE_POWER_DOWN_100K)\ncap.release()\ndac.power_down(DAC_A, MODE_POWER_DOWN_100K)\ndac.power_down(DAC_B, MODE_POWER_DOWN_100K)" ]
[ [ "numpy.load", "numpy.save" ] ]
AleksiKnuutila/clip-retrieval
[ "1ad24a3ef7c848af7fffcc797cce01c019d3f6e5" ]
[ "clip_retrieval/clip_back.py" ]
[ "from flask import Flask, request, make_response\nfrom flask_restful import Resource, Api\nfrom flask_cors import CORS\nimport clip\nimport faiss\nimport torch\nimport json\nfrom PIL import Image\nfrom io import BytesIO\nfrom PIL import Image\nimport base64\nimport os\nimport fire\nfrom pathlib import Path\nimport pandas as pd\nimport urllib\nimport io\nimport numpy as np\nfrom werkzeug.middleware.dispatcher import DispatcherMiddleware\nfrom prometheus_client import make_wsgi_app\n\nimport h5py\nfrom tqdm import tqdm\nfrom prometheus_client import start_http_server, Summary\nimport random\nimport time\nfrom prometheus_client import Histogram\nfrom prometheus_client import REGISTRY\n\nfor coll in list(REGISTRY._collector_to_names.keys()):\n REGISTRY.unregister(coll)\n\nFULL_KNN_REQUEST_TIME = Histogram('full_knn_request_time', 'Time spent processing knn request')\nDOWNLOAD_TIME = Histogram('download_time', 'Time spent downloading an url')\nTEXT_CLIP_INFERENCE_TIME = Histogram('text_clip_inference_time', 'Time spent doing a text clip inference')\nIMAGE_CLIP_INFERENCE_TIME = Histogram('image_clip_inference_time', 'Time spent doing a image clip inference')\nMETADATA_GET_TIME = Histogram('metadata_get_time', 'Time spent retrieving metadata')\nKNN_INDEX_TIME = Histogram('knn_index_time', 'Time spent doing a knn on the index')\nIMAGE_PREPRO_TIME = Histogram('image_prepro_time', 'Time spent doing the image preprocessing')\nTEXT_PREPRO_TIME = Histogram('text_prepro_time', 'Time spent doing the text preprocessing')\n\ndef metric_to_average(metric):\n metric_data = metric.collect()[0]\n metric_name = metric_data.name\n metric_description = metric_data.documentation\n samples = metric_data.samples\n metric_sum = [sample.value for sample in samples if sample.name == metric_name+\"_sum\"][0]\n metric_count = [sample.value for sample in samples if sample.name == metric_name+\"_count\"][0]\n if metric_count == 0:\n return metric_name, metric_description, 0, 0.0\n return metric_name, metric_description, metric_count, 1.0*metric_sum/metric_count\n\nclass Health(Resource):\n def get(self):\n return \"ok\"\n\nclass MetricsSummary(Resource):\n def __init__(self, **kwargs):\n super().__init__()\n\n def get(self):\n full_knn_name, full_description, full_knn_count, full_knn_avg = metric_to_average(FULL_KNN_REQUEST_TIME)\n if full_knn_count == 0:\n s = \"No request yet, go do some\"\n else:\n sub_metrics = sorted([(name, description, metric_count, avg, avg/full_knn_avg)\n for (name, description, metric_count, avg) in [metric_to_average(metric) for metric in [\n DOWNLOAD_TIME,\n TEXT_CLIP_INFERENCE_TIME,\n IMAGE_CLIP_INFERENCE_TIME,\n METADATA_GET_TIME,\n KNN_INDEX_TIME,\n IMAGE_PREPRO_TIME,\n TEXT_PREPRO_TIME,\n ]]], key=lambda e:-e[3])\n\n sub_metrics_strings = [(name, description, int(metric_count), f\"{avg:0.4f}s\", f\"{proportion*100:0.1f}%\") \\\n for name, description, metric_count, avg, proportion in sub_metrics]\n \n s=\"\"\n s+=f\"Among {full_knn_count} calls to the knn end point with an average latency of {full_knn_avg:0.4f}s \"+\\\n \"per request, the step costs are (in order): \\n\\n\"\n df = pd.DataFrame(data=sub_metrics_strings, columns=(\"name\", \"description\", \"calls\", \"average\", \"proportion\"))\n s+=df.to_string()\n\n response = make_response(s, 200)\n response.mimetype = \"text/plain\"\n return response\n\nclass IndicesList(Resource):\n def __init__(self, **kwargs):\n super().__init__()\n self.indices = kwargs['indices']\n\n\n def get(self):\n return list(self.indices.keys())\n\n@DOWNLOAD_TIME.time()\ndef download_image(url):\n request = urllib.request.Request(\n url,\n data=None,\n headers={\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0\"\n },\n )\n with urllib.request.urlopen(request, timeout=10) as r:\n img_stream = io.BytesIO(r.read())\n return img_stream\n\nclass MetadataService(Resource):\n def __init__(self, **kwargs):\n super().__init__()\n self.indices_loaded = kwargs['indices_loaded']\n self.columns_to_return = kwargs['columns_to_return']\n\n def post(self):\n json_data = request.get_json(force=True)\n ids = json_data['ids']\n indice_name = json_data[\"indice_name\"]\n metadata_provider = self.indices_loaded[indice_name][\"metadata_provider\"]\n metas = metadata_provider.get(ids, self.columns_to_return)\n metas_with_ids = [{\"id\": item_id, \"metadata\": meta_to_dict(meta)} for item_id, meta in zip(ids, metas)]\n return metas_with_ids\n \n\nclass KnnService(Resource):\n def __init__(self, **kwargs):\n super().__init__()\n self.indices_loaded = kwargs['indices_loaded']\n self.device = kwargs['device']\n self.model = kwargs['model']\n self.preprocess = kwargs['preprocess']\n self.columns_to_return = kwargs['columns_to_return']\n\n\n def query(self, text_input=None, image_input=None, image_url_input=None, modality=\"image\", num_images=100, num_result_ids=100, indice_name=None):\n if text_input is None and image_input is None and image_url_input is None:\n raise ValueError(\"must fill one of text, image and image url input\")\n if indice_name is None:\n indice_name = next(iter(self.indices_loaded.keys()))\n image_index = self.indices_loaded[indice_name][\"image_index\"]\n text_index = self.indices_loaded[indice_name][\"text_index\"]\n metadata_provider = self.indices_loaded[indice_name][\"metadata_provider\"]\n\n if text_input is not None:\n with TEXT_PREPRO_TIME.time():\n text = clip.tokenize([text_input]).to(self.device)\n with TEXT_CLIP_INFERENCE_TIME.time():\n text_features = self.model.encode_text(text)\n text_features /= text_features.norm(dim=-1, keepdim=True)\n query = text_features.cpu().detach().numpy().astype(\"float32\")\n if image_input is not None or image_url_input is not None:\n if image_input is not None:\n binary_data = base64.b64decode(image_input)\n img_data = BytesIO(binary_data)\n elif image_url_input is not None:\n img_data = download_image(image_url_input)\n with IMAGE_PREPRO_TIME.time():\n img = Image.open(img_data)\n prepro = self.preprocess(img).unsqueeze(0).to(self.device)\n with IMAGE_CLIP_INFERENCE_TIME.time():\n image_features = self.model.encode_image(prepro)\n image_features /= image_features.norm(dim=-1, keepdim=True)\n query = image_features.cpu().detach().numpy().astype(\"float32\")\n \n index = image_index if modality == \"image\" else text_index\n\n with KNN_INDEX_TIME.time():\n D, I = index.search(query, num_result_ids)\n nb_results = np.where(I[0] == -1)[0]\n if len(nb_results) > 0:\n nb_results = nb_results[0]\n else:\n nb_results = len(I[0])\n result_indices = I[0][:nb_results]\n result_distances = D[0][:nb_results]\n results = []\n with METADATA_GET_TIME.time():\n metas = metadata_provider.get(result_indices[:num_images], self.columns_to_return)\n for key, (d, i) in enumerate(zip(result_distances, result_indices)):\n output = {}\n meta = None if key+1 > len(metas) else metas[key]\n if meta is not None and \"image_path\" in meta:\n path = meta[\"image_path\"]\n if os.path.exists(path):\n img = Image.open(path)\n buffered = BytesIO()\n img.save(buffered, format=\"JPEG\")\n img_str = base64.b64encode(buffered.getvalue()).decode(\"utf-8\") \n output[\"image\"] = img_str\n if meta is not None:\n output.update(meta_to_dict(meta))\n output[\"id\"] = i.item()\n output[\"similarity\"] = d.item()\n results.append(output)\n return results\n\n @FULL_KNN_REQUEST_TIME.time()\n def post(self):\n json_data = request.get_json(force=True)\n text_input = json_data.get(\"text\", None)\n image_input = json_data.get(\"image\", None)\n image_url_input = json_data.get(\"image_url\", None)\n modality = json_data[\"modality\"]\n num_images = json_data[\"num_images\"]\n num_result_ids = json_data.get(\"num_result_ids\", num_images)\n indice_name = json_data[\"indice_name\"]\n return self.query(text_input, image_input, image_url_input, modality, num_images, num_result_ids, indice_name)\n \n\n\ndef meta_to_dict(meta):\n output = {}\n for k, v in meta.items():\n if isinstance(v, bytes):\n v = v.decode()\n elif type(v).__module__ == np.__name__:\n v = v.item()\n output[k] = v\n return output\n\nclass ParquetMetadataProvider:\n def __init__(self, parquet_folder):\n data_dir = Path(parquet_folder)\n self.metadata_df = pd.concat(\n pd.read_parquet(parquet_file)\n for parquet_file in sorted(data_dir.glob('*.parquet'))\n )\n\n def get(self, ids, cols=None):\n if cols is None:\n cols = self.metadata_df.columns.tolist()\n else:\n cols = list(set(self.metadata_df.columns.tolist()) & set(cols))\n\n return [self.metadata_df[i:(i+1)][cols].to_dict(orient='records')[0] for i in ids]\n\n\ndef parquet_to_hdf5(parquet_folder, output_hdf5_file, columns_to_return):\n f = h5py.File(output_hdf5_file, 'w')\n data_dir = Path(parquet_folder)\n ds = f.create_group('dataset') \n for parquet_files in tqdm(sorted(data_dir.glob('*.parquet'))):\n df = pd.read_parquet(parquet_files)\n for k in df.keys():\n if k not in columns_to_return:\n continue\n col = df[k]\n if col.dtype == 'float64' or col.dtype=='float32':\n col=col.fillna(0.0)\n if col.dtype == 'int64' or col.dtype=='int32':\n col=col.fillna(0)\n if col.dtype == 'object':\n col=col.fillna('')\n z = col.to_numpy()\n if k not in ds:\n ds.create_dataset(k, data=z, maxshape=(None,), compression=\"gzip\")\n else:\n prevlen = len(ds[k])\n ds[k].resize((prevlen+len(z),))\n ds[k][prevlen:] = z\n \n del ds\n f.close()\n\nclass Hdf5MetadataProvider:\n def __init__(self, hdf5_file):\n f = h5py.File(hdf5_file, 'r')\n self.ds = f['dataset']\n def get(self, ids, cols=None):\n items = [{} for _ in range(len(ids))]\n if cols is None:\n cols = self.ds.keys()\n else:\n cols = list(self.ds.keys() & set(cols))\n for k in cols:\n sorted_ids = sorted([(k, i) for i, k in list(enumerate(ids))])\n for_hdf5 = [k for k,_ in sorted_ids]\n for_np = [i for _,i in sorted_ids]\n g = self.ds[k][for_hdf5]\n gg = g[for_np]\n for i, e in enumerate(gg):\n items[i][k] = e\n return items\n\ndef load_clip_indices(indices_paths, enable_hdf5, enable_faiss_memory_mapping, columns_to_return):\n print('loading clip...')\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n model, preprocess = clip.load(\"ViT-B/32\", device=device, jit=False)\n\n indices = json.load(open(indices_paths))\n\n indices_loaded = {}\n\n for name, indice_folder in indices.items():\n image_present = os.path.exists(indice_folder+\"/image.index\")\n text_present = os.path.exists(indice_folder+\"/text.index\")\n hdf5_path = indice_folder+\"/metadata.hdf5\"\n parquet_folder = indice_folder+\"/metadata\"\n print('loading metadata...')\n if enable_hdf5:\n if not os.path.exists(hdf5_path):\n parquet_to_hdf5(parquet_folder, hdf5_path, columns_to_return)\n metadata_provider = Hdf5MetadataProvider(hdf5_path)\n else:\n metadata_provider = ParquetMetadataProvider(parquet_folder)\n\n print('loading indices...')\n if image_present:\n if enable_faiss_memory_mapping:\n image_index = faiss.read_index(indice_folder+\"/image.index\", faiss.IO_FLAG_MMAP|faiss.IO_FLAG_READ_ONLY)\n else:\n image_index = faiss.read_index(indice_folder+\"/image.index\")\n else:\n image_index = None\n if text_present:\n if enable_faiss_memory_mapping:\n text_index = faiss.read_index(indice_folder+\"/text.index\", faiss.IO_FLAG_MMAP|faiss.IO_FLAG_READ_ONLY)\n else:\n text_index = faiss.read_index(indice_folder+\"/text.index\")\n else:\n text_index = None\n indices_loaded[name]={\n 'metadata_provider': metadata_provider,\n 'image_index': image_index,\n 'text_index': text_index\n }\n \n return indices_loaded, indices, device, model, preprocess\n\n\n\ndef clip_back(indices_paths=\"indices_paths.json\", port=1234, enable_hdf5=False, enable_faiss_memory_mapping=False, columns_to_return=None):\n if columns_to_return is None:\n columns_to_return = [\"url\", \"image_path\", \"caption\", \"NSFW\"]\n indices_loaded, indices, device, model, preprocess = load_clip_indices(indices_paths, enable_hdf5, enable_faiss_memory_mapping, columns_to_return)\n\n app = Flask(__name__)\n app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {\n '/metrics': make_wsgi_app()\n })\n api = Api(app)\n api.add_resource(MetricsSummary, '/metrics-summary')\n api.add_resource(IndicesList, '/indices-list', resource_class_kwargs={'indices': indices})\n api.add_resource(MetadataService, '/metadata', resource_class_kwargs={'indices_loaded': indices_loaded,\\\n 'columns_to_return': columns_to_return})\n api.add_resource(KnnService, '/knn-service', resource_class_kwargs={'indices_loaded': indices_loaded, 'device': device, \\\n 'model': model, 'preprocess': preprocess, 'columns_to_return': columns_to_return})\n api.add_resource(Health, '/')\n CORS(app)\n app.run(host=\"0.0.0.0\", port=port, debug=False)\n\n\nif __name__ == '__main__':\n fire.Fire(clip_back)\n" ]
[ [ "pandas.DataFrame", "pandas.read_parquet", "torch.cuda.is_available", "numpy.where" ] ]
larry-fuy/tensorflow_xeonphi
[ "787ab22d490e79ea8c06511d60d6cddf1b2dd2c2" ]
[ "tensorflow/contrib/session_bundle/session_bundle_test.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for session_bundle.py.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path\nimport shutil\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.session_bundle import constants\n\nfrom tensorflow.contrib.session_bundle import manifest_pb2\nfrom tensorflow.contrib.session_bundle import session_bundle\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.util import compat\n\n\nclass SessionBundleLoadTest(tf.test.TestCase):\n\n def testBasic(self):\n base_path = tf.test.test_src_dir_path(\n \"contrib/session_bundle/example/half_plus_two/00000123\")\n tf.reset_default_graph()\n sess, meta_graph_def = session_bundle.load_session_bundle_from_path(\n base_path, target=\"\", config=tf.ConfigProto(device_count={\"CPU\": 2}))\n\n self.assertTrue(sess)\n asset_path = os.path.join(base_path, constants.ASSETS_DIRECTORY)\n with sess.as_default():\n path1, path2 = sess.run([\"filename1:0\", \"filename2:0\"])\n self.assertEqual(\n compat.as_bytes(os.path.join(asset_path, \"hello1.txt\")), path1)\n self.assertEqual(\n compat.as_bytes(os.path.join(asset_path, \"hello2.txt\")), path2)\n\n collection_def = meta_graph_def.collection_def\n\n signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value\n self.assertEquals(len(signatures_any), 1)\n\n signatures = manifest_pb2.Signatures()\n signatures_any[0].Unpack(signatures)\n default_signature = signatures.default_signature\n input_name = default_signature.regression_signature.input.tensor_name\n output_name = default_signature.regression_signature.output.tensor_name\n y = sess.run([output_name], {input_name: np.array([[0], [1], [2], [3]])})\n # The operation is y = 0.5 * x + 2\n self.assertEqual(y[0][0], 2)\n self.assertEqual(y[0][1], 2.5)\n self.assertEqual(y[0][2], 3)\n self.assertEqual(y[0][3], 3.5)\n\n def testBadPath(self):\n base_path = tf.test.test_src_dir_path(\"/no/such/a/dir\")\n tf.reset_default_graph()\n with self.assertRaises(RuntimeError) as cm:\n _, _ = session_bundle.load_session_bundle_from_path(\n base_path, target=\"local\",\n config=tf.ConfigProto(device_count={\"CPU\": 2}))\n self.assertTrue(\"Expected meta graph file missing\" in str(cm.exception))\n\n\nclass SessionBundleLoadNoVarsTest(tf.test.TestCase):\n \"\"\"Test the case where there are no variables in the graph.\"\"\"\n\n def setUp(self):\n self.base_path = os.path.join(tf.test.get_temp_dir(), \"no_vars\")\n if not os.path.exists(self.base_path):\n os.mkdir(self.base_path)\n\n # Create a simple graph with a variable, then convert variables to\n # constants and export the graph.\n with tf.Graph().as_default() as g:\n x = tf.placeholder(tf.float32, name=\"x\")\n w = tf.Variable(3.0)\n y = tf.sub(w * x, 7.0, name=\"y\") # pylint: disable=unused-variable\n tf.add_to_collection(\"meta\", \"this is meta\")\n\n with self.test_session(graph=g) as session:\n tf.initialize_all_variables().run()\n new_graph_def = graph_util.convert_variables_to_constants(\n session, g.as_graph_def(), [\"y\"])\n\n filename = os.path.join(self.base_path, constants.META_GRAPH_DEF_FILENAME)\n tf.train.export_meta_graph(\n filename, graph_def=new_graph_def, collection_list=[\"meta\"])\n\n def tearDown(self):\n shutil.rmtree(self.base_path)\n\n def testGraphWithoutVarsLoadsCorrectly(self):\n session, _ = session_bundle.load_session_bundle_from_path(self.base_path)\n got = session.run([\"y:0\"], {\"x:0\": 5.0})[0]\n self.assertEquals(got, 5.0 * 3.0 - 7.0)\n self.assertEquals(tf.get_collection(\"meta\"), [b\"this is meta\"])\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.contrib.session_bundle.session_bundle.load_session_bundle_from_path", "tensorflow.train.export_meta_graph", "numpy.array", "tensorflow.initialize_all_variables", "tensorflow.test.test_src_dir_path", "tensorflow.Graph", "tensorflow.reset_default_graph", "tensorflow.Variable", "tensorflow.get_collection", "tensorflow.ConfigProto", "tensorflow.placeholder", "tensorflow.test.get_temp_dir", "tensorflow.test.main", "tensorflow.sub", "tensorflow.add_to_collection", "tensorflow.contrib.session_bundle.manifest_pb2.Signatures" ] ]
adws2/Spin_model
[ "cc977b5244ba2989f1f48e89264b4da4486503f6" ]
[ "Spin_calculator.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\n\nclass calculator():\n def __init__(self, samples):\n self.samples = samples\n self.N = samples.shape[0]\n self.M = samples.shape[1]\n self.len_T = samples.shape[2]\n \n \n def magnetization(self):\n ## sample shape : N X M X len_T\n ## M ~ <S>\n \n M_array = np.zeros(self.len_T)\n \n for temp_idx in range(self.len_T):\n M = abs(np.average(self.samples[:,:,temp_idx], axis = 0))\n ave_M = np.average(M)\n M_array[temp_idx] = ave_M\n \n return M_array\n \n \n def susceptibility(self,Temperature_array):\n ## sample shape : N X M X len_T\n ## X ~ <M^2> - <M>^2\n \n sucep_array = np.zeros(self.len_T)\n \n for temp_idx,Temperature in enumerate(Temperature_array):\n M = abs(np.average(self.samples[:,:,temp_idx], axis = 0))\n ave_M = np.average(M)\n\n M2 = M**2\n ave_M2 = np.average(M2)\n\n suscep = np.sqrt(self.N) * (ave_M2 - ave_M**2) / Temperature\n sucep_array[temp_idx] = suscep\n \n return sucep_array\n \n \n def energy(self,J):\n ## sample shape : N X M X len_T\n ## E ~ -Sum Jij * Si * Sj\n \n E_array = np.zeros(self.len_T)\n \n for temp_idx in range(self.len_T):\n E = np.diag((-self.samples[:,:,temp_idx].T@[email protected][:,:,temp_idx])/(2*np.sqrt(self.N)))\n ave_E = np.average(E)\n E_array[temp_idx] = ave_E\n \n return E_array\n \n \n def Specific_heat(self,Temperature_array,J):\n ## sample shape : N X M X len_T\n ## X ~ <E^2> - <E>^2\n \n C_array = np.zeros(self.len_T)\n \n for temp_idx,Temperature in enumerate(Temperature_array):\n E = np.diag((-self.samples[:,:,temp_idx].T@[email protected][:,:,temp_idx])\n /(2*np.sqrt(self.N)))\n ave_E = np.average(E)\n \n E2 = E**2\n ave_E2 = np.average(E2)\n \n C = self.N * (ave_E2 - ave_E**2) / (Temperature**2)\n C_array[temp_idx] = C\n \n return C_array\n \n \n \n def Edward_Anderson(self):\n ## sample shape : N X M X len_T\n ## q_EA ~ <Si * Sj> -- i,j 는 다른 샘플\n \n q_array = np.zeros(self.len_T)\n \n for temp_idx in range(self.len_T):\n \n overlap = self.samples[:,:,temp_idx][email protected][:,:,temp_idx] / self.N - np.eye(self.M)\n q = abs(np.average(overlap))\n q_array[temp_idx] = q\n \n return q_array\n\n\n def SG_susceptibility(self):\n ## sample shape : N X M X len_T\n ## X_SG ~ < (<SiSj> - <Si><Sj>)^2 > \n \n X_array = np.zeros(self.len_T)\n \n for temp_idx in range(self.len_T):\n m_i = np.average(self.samples[:,:,temp_idx] ,axis = 1)\n chi = 0.\n\n for i in range(self.N):\n for j in range(self.N):\n SiSj = self.samples[i,:,temp_idx]@self.samples[j,:,temp_idx]/self.M\n MiMj = m_i[i]*m_i[j]\n chi += ((SiSj-MiMj)**2)/self.N\n\n X_array[temp_idx] = chi\n \n return X_array\n" ]
[ [ "numpy.average", "numpy.sqrt", "numpy.zeros", "numpy.eye" ] ]
AlexanderSing/TrackEval
[ "373e643f8989445f0253af6748e9e247d6ae6322" ]
[ "trackeval/metrics/track_map.py" ]
[ "import numpy as np\nfrom ._base_metric import _BaseMetric\nfrom .. import _timing\nfrom functools import partial\nfrom .. import utils\nfrom ..utils import TrackEvalException\n\n\nclass TrackMAP(_BaseMetric):\n \"\"\"Class which implements the TrackMAP metrics\"\"\"\n\n @staticmethod\n def get_default_metric_config():\n \"\"\"Default class config values\"\"\"\n default_config = {\n 'USE_AREA_RANGES': True, # whether to evaluate for certain area ranges\n 'AREA_RANGES': [[0 ** 2, 32 ** 2], # additional area range sets for which TrackMAP is evaluated\n [32 ** 2, 96 ** 2], # (all area range always included), default values for TAO\n [96 ** 2, 1e5 ** 2]], # evaluation\n 'AREA_RANGE_LABELS': [\"area_s\", \"area_m\", \"area_l\"], # the labels for the area ranges\n 'USE_TIME_RANGES': True, # whether to evaluate for certain time ranges (length of tracks)\n 'TIME_RANGES': [[0, 3], [3, 10], [10, 1e5]], # additional time range sets for which TrackMAP is evaluated\n # (all time range always included) , default values for TAO evaluation\n 'TIME_RANGE_LABELS': [\"time_s\", \"time_m\", \"time_l\"], # the labels for the time ranges\n 'IOU_THRESHOLDS': np.arange(0.5, 0.96, 0.05), # the IoU thresholds\n 'RECALL_THRESHOLDS': np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01) + 1), endpoint=True),\n # recall thresholds at which precision is evaluated\n 'MAX_DETECTIONS': 0, # limit the maximum number of considered tracks per sequence (0 for unlimited)\n 'PRINT_CONFIG': True\n }\n return default_config\n\n def __init__(self, config=None):\n super().__init__()\n self.config = utils.init_config(config, self.get_default_metric_config(), self.get_name())\n\n self.num_ig_masks = 1\n self.lbls = ['all']\n self.use_area_rngs = self.config['USE_AREA_RANGES']\n if self.use_area_rngs:\n self.area_rngs = self.config['AREA_RANGES']\n self.area_rng_lbls = self.config['AREA_RANGE_LABELS']\n self.num_ig_masks += len(self.area_rng_lbls)\n self.lbls += self.area_rng_lbls\n\n self.use_time_rngs = self.config['USE_TIME_RANGES']\n if self.use_time_rngs:\n self.time_rngs = self.config['TIME_RANGES']\n self.time_rng_lbls = self.config['TIME_RANGE_LABELS']\n self.num_ig_masks += len(self.time_rng_lbls)\n self.lbls += self.time_rng_lbls\n\n self.array_labels = self.config['IOU_THRESHOLDS']\n self.rec_thrs = self.config['RECALL_THRESHOLDS']\n\n self.maxDet = self.config['MAX_DETECTIONS']\n self.float_array_fields = ['AP_' + lbl for lbl in self.lbls] + ['AR_' + lbl for lbl in self.lbls]\n self.fields = self.float_array_fields\n self.summary_fields = self.float_array_fields\n\n @_timing.time\n def eval_sequence(self, data):\n \"\"\"Calculates GT and Tracker matches for one sequence for TrackMAP metrics. Adapted from\n https://github.com/TAO-Dataset/\"\"\"\n\n # Initialise results to zero for each sequence as the fields are only defined over the set of all sequences\n res = {}\n for field in self.fields:\n res[field] = [0 for _ in self.array_labels]\n\n gt_ids, dt_ids = data['gt_track_ids'], data['dt_track_ids']\n\n if len(gt_ids) == 0 and len(dt_ids) == 0:\n for idx in range(self.num_ig_masks):\n res[idx] = None\n return res\n\n # get track data\n gt_tr_areas = data.get('gt_track_areas', None) if self.use_area_rngs else None\n gt_tr_lengths = data.get('gt_track_lengths', None) if self.use_time_rngs else None\n gt_tr_iscrowd = data.get('gt_track_iscrowd', None)\n dt_tr_areas = data.get('dt_track_areas', None) if self.use_area_rngs else None\n dt_tr_lengths = data.get('dt_track_lengths', None) if self.use_time_rngs else None\n is_nel = data.get('not_exhaustively_labeled', False)\n\n # compute ignore masks for different track sets to eval\n gt_ig_masks = self._compute_track_ig_masks(len(gt_ids), track_lengths=gt_tr_lengths, track_areas=gt_tr_areas,\n iscrowd=gt_tr_iscrowd)\n dt_ig_masks = self._compute_track_ig_masks(len(dt_ids), track_lengths=dt_tr_lengths, track_areas=dt_tr_areas,\n is_not_exhaustively_labeled=is_nel, is_gt=False)\n\n boxformat = data.get('boxformat', 'xywh')\n ious = self._compute_track_ious(data['dt_tracks'], data['gt_tracks'], iou_function=data['iou_type'],\n boxformat=boxformat)\n\n for mask_idx in range(self.num_ig_masks):\n gt_ig_mask = gt_ig_masks[mask_idx]\n\n # Sort gt ignore last\n gt_idx = np.argsort([g for g in gt_ig_mask], kind=\"mergesort\")\n gt_ids = [gt_ids[i] for i in gt_idx]\n\n ious_sorted = ious[:, gt_idx] if len(ious) > 0 else ious\n\n num_thrs = len(self.array_labels)\n num_gt = len(gt_ids)\n num_dt = len(dt_ids)\n\n # Array to store the \"id\" of the matched dt/gt\n gt_m = np.zeros((num_thrs, num_gt)) - 1\n dt_m = np.zeros((num_thrs, num_dt)) - 1\n\n gt_ig = np.array([gt_ig_mask[idx] for idx in gt_idx])\n dt_ig = np.zeros((num_thrs, num_dt))\n\n for iou_thr_idx, iou_thr in enumerate(self.array_labels):\n if len(ious_sorted) == 0:\n break\n\n for dt_idx, _dt in enumerate(dt_ids):\n iou = min([iou_thr, 1 - 1e-10])\n # information about best match so far (m=-1 -> unmatched)\n # store the gt_idx which matched for _dt\n m = -1\n for gt_idx, _ in enumerate(gt_ids):\n # if this gt already matched continue\n if gt_m[iou_thr_idx, gt_idx] > 0:\n continue\n # if _dt matched to reg gt, and on ignore gt, stop\n if m > -1 and gt_ig[m] == 0 and gt_ig[gt_idx] == 1:\n break\n # continue to next gt unless better match made\n if ious_sorted[dt_idx, gt_idx] < iou - np.finfo('float').eps:\n continue\n # if match successful and best so far, store appropriately\n iou = ious_sorted[dt_idx, gt_idx]\n m = gt_idx\n\n # No match found for _dt, go to next _dt\n if m == -1:\n continue\n\n # if gt to ignore for some reason update dt_ig.\n # Should not be used in evaluation.\n dt_ig[iou_thr_idx, dt_idx] = gt_ig[m]\n # _dt match found, update gt_m, and dt_m with \"id\"\n dt_m[iou_thr_idx, dt_idx] = gt_ids[m]\n gt_m[iou_thr_idx, m] = _dt\n\n dt_ig_mask = dt_ig_masks[mask_idx]\n\n dt_ig_mask = np.array(dt_ig_mask).reshape((1, num_dt)) # 1 X num_dt\n dt_ig_mask = np.repeat(dt_ig_mask, num_thrs, 0) # num_thrs X num_dt\n\n # Based on dt_ig_mask ignore any unmatched detection by updating dt_ig\n dt_ig = np.logical_or(dt_ig, np.logical_and(dt_m == -1, dt_ig_mask))\n # store results for given video and category\n res[mask_idx] = {\n \"dt_ids\": dt_ids,\n \"gt_ids\": gt_ids,\n \"dt_matches\": dt_m,\n \"gt_matches\": gt_m,\n \"dt_scores\": data['dt_track_scores'],\n \"gt_ignore\": gt_ig,\n \"dt_ignore\": dt_ig,\n }\n\n return res\n\n def combine_sequences(self, all_res):\n \"\"\"Combines metrics across all sequences. Computes precision and recall values based on track matches.\n Adapted from https://github.com/TAO-Dataset/\n \"\"\"\n num_thrs = len(self.array_labels)\n num_recalls = len(self.rec_thrs)\n\n # -1 for absent categories\n precision = -np.ones(\n (num_thrs, num_recalls, self.num_ig_masks)\n )\n recall = -np.ones((num_thrs, self.num_ig_masks))\n\n for ig_idx in range(self.num_ig_masks):\n ig_idx_results = [res[ig_idx] for res in all_res.values() if res[ig_idx] is not None]\n\n # Remove elements which are None\n if len(ig_idx_results) == 0:\n continue\n\n # Append all scores: shape (N,)\n # limit considered tracks for each sequence if maxDet > 0\n if self.maxDet == 0:\n dt_scores = np.concatenate([res[\"dt_scores\"] for res in ig_idx_results], axis=0)\n\n dt_idx = np.argsort(-dt_scores, kind=\"mergesort\")\n\n dt_m = np.concatenate([e[\"dt_matches\"] for e in ig_idx_results],\n axis=1)[:, dt_idx]\n dt_ig = np.concatenate([e[\"dt_ignore\"] for e in ig_idx_results],\n axis=1)[:, dt_idx]\n elif self.maxDet > 0:\n dt_scores = np.concatenate([res[\"dt_scores\"][0:self.maxDet] for res in ig_idx_results], axis=0)\n\n dt_idx = np.argsort(-dt_scores, kind=\"mergesort\")\n\n dt_m = np.concatenate([e[\"dt_matches\"][:, 0:self.maxDet] for e in ig_idx_results],\n axis=1)[:, dt_idx]\n dt_ig = np.concatenate([e[\"dt_ignore\"][:, 0:self.maxDet] for e in ig_idx_results],\n axis=1)[:, dt_idx]\n else:\n raise Exception(\"Number of maximum detections must be >= 0, but is set to %i\" % self.maxDet)\n\n gt_ig = np.concatenate([res[\"gt_ignore\"] for res in ig_idx_results])\n # num gt anns to consider\n num_gt = np.count_nonzero(gt_ig == 0)\n\n if num_gt == 0:\n continue\n\n tps = np.logical_and(dt_m != -1, np.logical_not(dt_ig))\n fps = np.logical_and(dt_m == -1, np.logical_not(dt_ig))\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n\n for iou_thr_idx, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n num_tp = len(tp)\n rc = tp / num_gt\n if num_tp:\n recall[iou_thr_idx, ig_idx] = rc[-1]\n else:\n recall[iou_thr_idx, ig_idx] = 0\n\n # np.spacing(1) ~= eps\n pr = tp / (fp + tp + np.spacing(1))\n pr = pr.tolist()\n\n # Ensure precision values are monotonically decreasing\n for i in range(num_tp - 1, 0, -1):\n if pr[i] > pr[i - 1]:\n pr[i - 1] = pr[i]\n\n # find indices at the predefined recall values\n rec_thrs_insert_idx = np.searchsorted(rc, self.rec_thrs, side=\"left\")\n\n pr_at_recall = [0.0] * num_recalls\n\n try:\n for _idx, pr_idx in enumerate(rec_thrs_insert_idx):\n pr_at_recall[_idx] = pr[pr_idx]\n except IndexError:\n pass\n\n precision[iou_thr_idx, :, ig_idx] = (np.array(pr_at_recall))\n\n res = {'precision': precision, 'recall': recall}\n\n # compute the precision and recall averages for the respective alpha thresholds and ignore masks\n for lbl in self.lbls:\n res['AP_' + lbl] = np.zeros((len(self.array_labels)), dtype=np.float)\n res['AR_' + lbl] = np.zeros((len(self.array_labels)), dtype=np.float)\n\n for a_id, alpha in enumerate(self.array_labels):\n for lbl_idx, lbl in enumerate(self.lbls):\n p = precision[a_id, :, lbl_idx]\n if len(p[p > -1]) == 0:\n mean_p = -1\n else:\n mean_p = np.mean(p[p > -1])\n res['AP_' + lbl][a_id] = mean_p\n res['AR_' + lbl][a_id] = recall[a_id, lbl_idx]\n\n return res\n\n def combine_classes_class_averaged(self, all_res, ignore_empty_classes=True):\n \"\"\"Combines metrics across all classes by averaging over the class values\n Note mAP is not well defined for 'empty classes' so 'ignore empty classes' is always true here.\n \"\"\"\n res = {}\n for field in self.fields:\n res[field] = np.zeros((len(self.array_labels)), dtype=np.float)\n field_stacked = np.array([res[field] for res in all_res.values()])\n\n for a_id, alpha in enumerate(self.array_labels):\n values = field_stacked[:, a_id]\n if len(values[values > -1]) == 0:\n mean = -1\n else:\n mean = np.mean(values[values > -1])\n res[field][a_id] = mean\n return res\n\n def combine_classes_det_averaged(self, all_res):\n \"\"\"Combines metrics across all classes by averaging over the detection values\"\"\"\n\n res = {}\n for field in self.fields:\n res[field] = np.zeros((len(self.array_labels)), dtype=np.float)\n field_stacked = np.array([res[field] for res in all_res.values()])\n\n for a_id, alpha in enumerate(self.array_labels):\n values = field_stacked[:, a_id]\n if len(values[values > -1]) == 0:\n mean = -1\n else:\n mean = np.mean(values[values > -1])\n res[field][a_id] = mean\n return res\n\n def _compute_track_ig_masks(self, num_ids, track_lengths=None, track_areas=None, iscrowd=None,\n is_not_exhaustively_labeled=False, is_gt=True):\n \"\"\"\n Computes ignore masks for different track sets to evaluate\n :param num_ids: the number of track IDs\n :param track_lengths: the lengths of the tracks (number of timesteps)\n :param track_areas: the average area of a track\n :param iscrowd: whether a track is marked as crowd\n :param is_not_exhaustively_labeled: whether the track category is not exhaustively labeled\n :param is_gt: whether it is gt\n :return: the track ignore masks\n \"\"\"\n # for TAO tracks for classes which are not exhaustively labeled are not evaluated\n if not is_gt and is_not_exhaustively_labeled:\n track_ig_masks = [[1 for _ in range(num_ids)] for i in range(self.num_ig_masks)]\n else:\n # consider all tracks\n track_ig_masks = [[0 for _ in range(num_ids)]]\n\n # consider tracks with certain area\n if self.use_area_rngs:\n for rng in self.area_rngs:\n track_ig_masks.append([0 if rng[0] - np.finfo('float').eps <= area <= rng[1] + np.finfo('float').eps\n else 1 for area in track_areas])\n\n # consider tracks with certain duration\n if self.use_time_rngs:\n for rng in self.time_rngs:\n track_ig_masks.append([0 if rng[0] - np.finfo('float').eps <= length\n <= rng[1] + np.finfo('float').eps else 1 for length in track_lengths])\n\n # for YouTubeVIS evaluation tracks with crowd tag are not evaluated\n if is_gt and iscrowd:\n track_ig_masks = [np.logical_or(mask, iscrowd) for mask in track_ig_masks]\n\n return track_ig_masks\n\n @staticmethod\n def _compute_bb_track_iou(dt_track, gt_track, boxformat='xywh'):\n \"\"\"\n Calculates the track IoU for one detected track and one ground truth track for bounding boxes\n :param dt_track: the detected track (format: dictionary with frame index as keys and\n numpy arrays as values)\n :param gt_track: the ground truth track (format: dictionary with frame index as keys and\n numpy array as values)\n :param boxformat: the format of the boxes\n :return: the track IoU\n \"\"\"\n intersect = 0\n union = 0\n image_ids = set(gt_track.keys()) | set(dt_track.keys())\n for image in image_ids:\n g = gt_track.get(image, None)\n d = dt_track.get(image, None)\n if boxformat == 'xywh':\n if d is not None and g is not None:\n dx, dy, dw, dh = d\n gx, gy, gw, gh = g\n w = max(min(dx + dw, gx + gw) - max(dx, gx), 0)\n h = max(min(dy + dh, gy + gh) - max(dy, gy), 0)\n i = w * h\n u = dw * dh + gw * gh - i\n intersect += i\n union += u\n elif d is None and g is not None:\n union += g[2] * g[3]\n elif d is not None and g is None:\n union += d[2] * d[3]\n elif boxformat == 'x0y0x1y1':\n if d is not None and g is not None:\n dx0, dy0, dx1, dy1 = d\n gx0, gy0, gx1, gy1 = g\n w = max(min(dx1, gx1) - max(dx0, gx0), 0)\n h = max(min(dy1, gy1) - max(dy0, gy0), 0)\n i = w * h\n u = (dx1 - dx0) * (dy1 - dy0) + (gx1 - gx0) * (gy1 - gy0) - i\n intersect += i\n union += u\n elif d is None and g is not None:\n union += (g[2] - g[0]) * (g[3] - g[1])\n elif d is not None and g is None:\n union += (d[2] - d[0]) * (d[3] - d[1])\n else:\n raise TrackEvalException('BoxFormat not implemented')\n if intersect > union:\n raise TrackEvalException(\"Intersection value > union value. Are the box values corrupted?\")\n return intersect / union if union > 0 else 0\n\n @staticmethod\n def _compute_mask_track_iou(dt_track, gt_track):\n \"\"\"\n Calculates the track IoU for one detected track and one ground truth track for segmentation masks\n :param dt_track: the detected track (format: dictionary with frame index as keys and\n pycocotools rle encoded masks as values)\n :param gt_track: the ground truth track (format: dictionary with frame index as keys and\n pycocotools rle encoded masks as values)\n :return: the track IoU\n \"\"\"\n # only loaded when needed to reduce minimum requirements\n from pycocotools import mask as mask_utils\n\n intersect = .0\n union = .0\n image_ids = set(gt_track.keys()) | set(dt_track.keys())\n for image in image_ids:\n g = gt_track.get(image, None)\n d = dt_track.get(image, None)\n if d and g:\n intersect += mask_utils.area(mask_utils.merge([d, g], True))\n union += mask_utils.area(mask_utils.merge([d, g], False))\n elif not d and g:\n union += mask_utils.area(g)\n elif d and not g:\n union += mask_utils.area(d)\n if union < 0.0 - np.finfo('float').eps:\n raise TrackEvalException(\"Union value < 0. Are the segmentaions corrupted?\")\n if intersect > union:\n raise TrackEvalException(\"Intersection value > union value. Are the segmentations corrupted?\")\n iou = intersect / union if union > 0.0 + np.finfo('float').eps else 0.0\n return iou\n\n @staticmethod\n def _compute_track_ious(dt, gt, iou_function='bbox', boxformat='xywh'):\n \"\"\"\n Calculate track IoUs for a set of ground truth tracks and a set of detected tracks\n \"\"\"\n\n if len(gt) == 0 and len(dt) == 0:\n return []\n\n if iou_function == 'bbox':\n track_iou_function = partial(TrackMAP._compute_bb_track_iou, boxformat=boxformat)\n elif iou_function == 'mask':\n track_iou_function = partial(TrackMAP._compute_mask_track_iou)\n else:\n raise Exception('IoU function not implemented')\n\n ious = np.zeros([len(dt), len(gt)])\n for i, j in np.ndindex(ious.shape):\n ious[i, j] = track_iou_function(dt[i], gt[j])\n return ious\n\n @staticmethod\n def _row_print(*argv):\n \"\"\"Prints results in an evenly spaced rows, with more space in first row\"\"\"\n if len(argv) == 1:\n argv = argv[0]\n to_print = '%-40s' % argv[0]\n for v in argv[1:]:\n to_print += '%-12s' % str(v)\n print(to_print)\n" ]
[ [ "numpy.concatenate", "numpy.ndindex", "numpy.array", "numpy.count_nonzero", "numpy.logical_not", "numpy.logical_or", "numpy.zeros", "numpy.round", "numpy.ones", "numpy.logical_and", "numpy.mean", "numpy.finfo", "numpy.arange", "numpy.argsort", "numpy.cumsum", "numpy.repeat", "numpy.searchsorted", "numpy.spacing" ] ]
andma1/remind-eora
[ "3006c1ddecd1766ea2aef65115c53b08358be7ea" ]
[ "app/AI/script.py" ]
[ "\nimport os\nimport cv2\nimport time\nimport argparse\nimport requests\nimport numpy as np\nfrom tqdm import tqdm\nfrom PIL import Image\nfrom io import BytesIO\nimport matplotlib.pyplot as plt\n\n\n################################################\n################################################\n\n\ndef rleToMask(rleString,height,width):\n rows,cols = height,width\n rleNumbers = [int(numstring) for numstring in rleString.split(' ')]\n rlePairs = np.array(rleNumbers).reshape(-1,2)\n img = np.zeros(rows*cols,dtype=np.uint8)\n for index,length in rlePairs:\n index -= 1\n img[index:index+length] = 255\n img = img.reshape(cols,rows)\n img = img.T\n return img\n\ndef auth():\n res = requests.get('https://www.visionhub.ru/api/v2/auth/generate_token/')\n if res.ok:\n token = res.json()['token']\n else:\n raise Exception(f'Failed to auth, reason : {res.reason}')\n return token\n\n\ndef push_task(image_path, token):\n print('image_path :', image_path)\n print('token :', token)\n \n res = requests.post('https://www.visionhub.ru/api/v2/process/img2img/', \n headers={'Authorization': f'Bearer {token}'},\n files={'image': open(image_path, 'rb')},\n data={'model': 'people_segmentator'})\n if res.ok:\n task_id = res.json()['task_id']\n return task_id\n else:\n raise Exception(f'Failed to process, reason : {res.reason}')\n\n\ndef get_status(task_id):\n res = requests.get(f'https://www.visionhub.ru/api/v2/task_result/{task_id}/',\n headers={'Authorization': f'Bearer {token}'})\n if res.ok:\n res_json = res.json()\n return res_json\n else:\n raise Exception(f'Failed to get task_result, reason : {res.reason}')\n\n\ndef overlay_transparent(background, overlay, x, y):\n background_width = background.shape[1]\n background_height = background.shape[0]\n if x >= background_width or y >= background_height:\n return background\n \n h, w = overlay.shape[0], overlay.shape[1]\n if x + w > background_width:\n w = background_width - x\n overlay = overlay[:, :w]\n if y + h > background_height:\n h = background_height - y\n overlay = overlay[:h]\n if overlay.shape[2] < 4:\n overlay = np.concatenate([overlay,np.ones((overlay.shape[0], overlay.shape[1], 1), \n dtype=overlay.dtype) * 255], axis = 2)\n overlay_image = overlay[..., :3]\n mask = overlay[..., 3:] / 255.0\n background[y:y+h, x:x+w] = (1.0 - mask) * background[y:y+h, x:x+w] + mask * overlay_image\n \n return background\n\n\n################################################\n################################################\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-d', help='input directory')\nparser.add_argument('-r', help='save directory')\nargs = parser.parse_args()\n\ninput_path = '/home/centos/remind-eora/public/files'\nsave_path = '/home/centos/remind-eora/public/results'\n\ninput_dir_name = args.d\nsave_name = args.r\n\nfolder_path = os.path.join(input_path, input_dir_name)\n\nif not os.path.exists(os.path.join(save_path, input_dir_name)):\n os.makedirs(os.path.join(save_path, input_dir_name))\nsave_path = os.path.join(save_path, input_dir_name, save_name)\n\nif folder_path is None or save_path is None:\n raise Exception('Folder path or Save path not given !')\n\n\n################################################\n################################################\n\n\n##########################\n## Main processes ##\n##########################\n\nstart = time.time()\n\nimgs_path = None\nbackground = None\nfor file_name in os.listdir(folder_path):\n path = os.path.join(folder_path, file_name)\n if os.path.isdir(path):\n imgs_path = path\n elif os.path.isfile(path):\n background = cv2.imread(path)\n\nif imgs_path is None or background is None:\n raise Exception('Given folder is invalid !')\n\noriginal_size = (background.shape[1], background.shape[0])\n\nprint('\\nGetting token...')\ntoken = auth()\n\nprint('\\nPushing tasks...')\nimages = []\ntask_ids = []\nfor img_name in tqdm(os.listdir(imgs_path)):\n images.append(cv2.imread(os.path.join(imgs_path, img_name)))\n task_ids.append(push_task(os.path.join(imgs_path, img_name), token))\n\nprint('\\nWaiting for results...')\npngs = [None for _ in task_ids]\ndone = [False for _ in task_ids]\nwhile sum(done) != len(done):\n for i, task_id in enumerate(task_ids):\n if not done[i]:\n res_json = get_status(task_id)\n if res_json['status'] == 'DONE':\n rle = eval(res_json['prediction'])\n mask = rleToMask(rle['counts'], *rle['size'])\n png = np.concatenate((images[i], mask[:,:,np.newaxis]), axis=2)\n \n mask = png[:,:,-1].astype(bool)\n ys, xs = np.meshgrid(np.arange(mask.shape[1]), \n np.arange(mask.shape[0]))\n x_min, x_max = np.min(xs[mask]), np.max(xs[mask])\n y_min, y_max = np.min(ys[mask]), np.max(ys[mask])\n png = png[x_min:x_max, y_min:y_max]\n \n pngs[i] = png\n done[i] = True\n print(f'{sum(done)} done...')\n\nprint('\\nDone !')\n\npngs.sort(key=lambda x: x.shape[0])\nnew_order_start = []\nnew_order_finish = []\nfor i in range(len(pngs)):\n if i%2 == 0: \n new_order_start.append(pngs[i])\n else: \n new_order_finish.append(pngs[i])\npngs = new_order_start + new_order_finish[::-1]\n\n####################\n## Settings ##\n####################\nx_padding_ratio = 0.1\ny_padding_ratio = 0.1\nspace = 0\n\n#################################\n## Background validation ##\n#################################\nshape_0 = max([png.shape[0] for png in pngs])\nshape_1 = sum([png.shape[1] for png in pngs]) + (len(pngs) - 1) * space\nnew_x, new_y = (2 * x_padding_ratio * background.shape[0] + shape_0, \n 2 * y_padding_ratio * background.shape[1] + shape_1)\nratio = max(new_x/background.shape[0], new_y/background.shape[1])\nresize_shape = (int(ratio*background.shape[1]), \n int(ratio*background.shape[0]))\nbackground = cv2.resize(background, resize_shape)\n\n######################\n## Overlaying ##\n######################\n\ncurrent_x = background.shape[0] - (background.shape[0] - shape_0) // 2\ncurrent_y = (background.shape[1] - shape_1) // 2\nfor png in pngs:\n overlay_transparent(background, png, current_y, current_x-png.shape[0])\n current_y += png.shape[1]\n current_y += space\n\nbackground = cv2.resize(background, original_size)\ncv2.imwrite(save_path, background)\n\nfinish = time.time()\nprint('Execution finished :', finish-start, 'sec')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.concatenate", "numpy.max", "numpy.array", "numpy.zeros", "numpy.ones", "numpy.min", "numpy.arange" ] ]
vdye/astropy
[ "2865905fa520c540a025e34fd52ee5a3b28d75d9" ]
[ "astropy/io/fits/tests/test_header.py" ]
[ "# -*- coding: utf-8 -*-\n# Licensed under a 3-clause BSD style license - see PYFITS.rst\n\nimport copy\nimport collections\n\nfrom io import StringIO, BytesIO\n\nimport pytest\nimport numpy as np\n\nfrom astropy.io import fits\nfrom astropy.io.fits.verify import VerifyWarning\nfrom astropy.utils.exceptions import AstropyUserWarning\n\nfrom . import FitsTestCase\nfrom astropy.io.fits.card import _pad\nfrom astropy.io.fits.header import _pad_length\nfrom astropy.io.fits.util import encode_ascii\n\n\ndef test_shallow_copy():\n \"\"\"Make sure that operations on a shallow copy do not alter the original.\n #4990.\"\"\"\n original_header = fits.Header([('a', 1), ('b', 1)])\n copied_header = copy.copy(original_header)\n\n # Modifying the original dict should not alter the copy\n original_header['c'] = 100\n assert 'c' not in copied_header\n\n # and changing the copy should not change the original.\n copied_header['a'] = 0\n assert original_header['a'] == 1\n\n\ndef test_init_with_header():\n \"\"\"Make sure that creating a Header from another Header makes a copy if\n copy is True.\"\"\"\n\n original_header = fits.Header([('a', 10)])\n new_header = fits.Header(original_header, copy=True)\n original_header['a'] = 20\n assert new_header['a'] == 10\n\n new_header['a'] = 0\n assert original_header['a'] == 20\n\n\ndef test_init_with_dict():\n dict1 = {'a': 11, 'b': 12, 'c': 13, 'd': 14, 'e': 15}\n h1 = fits.Header(dict1)\n for i in dict1:\n assert dict1[i] == h1[i]\n\n\ndef test_init_with_ordereddict():\n # Create a list of tuples. Each tuple consisting of a letter and the number\n list1 = [(i, j) for j, i in enumerate('abcdefghijklmnopqrstuvwxyz')]\n # Create an ordered dictionary and a header from this dictionary\n dict1 = collections.OrderedDict(list1)\n h1 = fits.Header(dict1)\n # Check that the order is preserved of the initial list\n assert all(h1[val] == list1[i][1] for i, val in enumerate(h1))\n\n\nclass TestHeaderFunctions(FitsTestCase):\n \"\"\"Test Header and Card objects.\"\"\"\n\n def test_rename_keyword(self):\n \"\"\"Test renaming keyword with rename_keyword.\"\"\"\n header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])\n header.rename_keyword('A', 'B')\n assert 'A' not in header\n assert 'B' in header\n assert header[0] == 'B'\n assert header['B'] == 'B'\n assert header.comments['B'] == 'C'\n\n @pytest.mark.parametrize('key', ['A', 'a'])\n def test_indexing_case(self, key):\n \"\"\"Check that indexing is case insensitive\"\"\"\n header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])\n assert key in header\n assert header[key] == 'B'\n assert header.get(key) == 'B'\n assert header.index(key) == 0\n assert header.comments[key] == 'C'\n assert header.count(key) == 1\n header.remove(key, ignore_missing=False)\n\n def test_card_constructor_default_args(self):\n \"\"\"Test Card constructor with default argument values.\"\"\"\n\n c = fits.Card()\n assert '' == c.keyword\n\n def test_card_from_bytes(self):\n \"\"\"\n Test loading a Card from a `bytes` object (assuming latin-1 encoding).\n \"\"\"\n\n c = fits.Card.fromstring(b\"ABC = 'abc'\")\n assert c.keyword == 'ABC'\n assert c.value == 'abc'\n\n def test_string_value_card(self):\n \"\"\"Test Card constructor with string value\"\"\"\n\n c = fits.Card('abc', '<8 ch')\n assert str(c) == _pad(\"ABC = '<8 ch '\")\n c = fits.Card('nullstr', '')\n assert str(c) == _pad(\"NULLSTR = ''\")\n\n def test_boolean_value_card(self):\n \"\"\"Test Card constructor with boolean value\"\"\"\n\n c = fits.Card(\"abc\", True)\n assert str(c) == _pad(\"ABC = T\")\n\n c = fits.Card.fromstring('ABC = F')\n assert c.value is False\n\n def test_long_integer_value_card(self):\n \"\"\"Test Card constructor with long integer value\"\"\"\n\n c = fits.Card('long_int', -467374636747637647347374734737437)\n assert str(c) == _pad(\"LONG_INT= -467374636747637647347374734737437\")\n\n def test_floating_point_value_card(self):\n \"\"\"Test Card constructor with floating point value\"\"\"\n\n c = fits.Card('floatnum', -467374636747637647347374734737437.)\n\n if (str(c) != _pad(\"FLOATNUM= -4.6737463674763E+32\") and\n str(c) != _pad(\"FLOATNUM= -4.6737463674763E+032\")):\n assert str(c) == _pad(\"FLOATNUM= -4.6737463674763E+32\")\n\n def test_complex_value_card(self):\n \"\"\"Test Card constructor with complex value\"\"\"\n\n c = fits.Card('abc',\n (1.2345377437887837487e88 + 6324767364763746367e-33j))\n f1 = _pad(\"ABC = (1.23453774378878E+88, 6.32476736476374E-15)\")\n f2 = _pad(\"ABC = (1.2345377437887E+088, 6.3247673647637E-015)\")\n f3 = _pad(\"ABC = (1.23453774378878E+88, 6.32476736476374E-15)\")\n if str(c) != f1 and str(c) != f2:\n assert str(c) == f3\n\n def test_card_image_constructed_too_long(self):\n \"\"\"Test that over-long cards truncate the comment\"\"\"\n\n # card image constructed from key/value/comment is too long\n # (non-string value)\n c = fits.Card('abc', 9, 'abcde' * 20)\n with pytest.warns(fits.verify.VerifyWarning):\n assert (str(c) ==\n \"ABC = 9 \"\n \"/ abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeab\")\n c = fits.Card('abc', 'a' * 68, 'abcdefg')\n with pytest.warns(fits.verify.VerifyWarning):\n assert str(c) == f\"ABC = '{'a' * 68}'\"\n\n def test_constructor_filter_illegal_data_structures(self):\n \"\"\"Test that Card constructor raises exceptions on bad arguments\"\"\"\n\n pytest.raises(ValueError, fits.Card, ('abc',), {'value': (2, 3)})\n pytest.raises(ValueError, fits.Card, 'key', [], 'comment')\n\n def test_keyword_too_long(self):\n \"\"\"Test that long Card keywords are allowed, but with a warning\"\"\"\n\n pytest.warns(UserWarning, fits.Card, 'abcdefghi', 'long')\n\n def test_illegal_characters_in_key(self):\n \"\"\"\n Test that Card constructor allows illegal characters in the keyword,\n but creates a HIERARCH card.\n \"\"\"\n\n # This test used to check that a ValueError was raised, because a\n # keyword like 'abc+' was simply not allowed. Now it should create a\n # HIERARCH card.\n\n with pytest.warns(AstropyUserWarning) as w:\n c = fits.Card('abc+', 9)\n assert len(w) == 1\n assert c.image == _pad('HIERARCH abc+ = 9')\n\n def test_add_history(self):\n header = fits.Header([('A', 'B', 'C'), ('HISTORY', 1),\n ('HISTORY', 2), ('HISTORY', 3), ('', '', ''),\n ('', '', '')])\n header.add_history(4)\n # One of the blanks should get used, so the length shouldn't change\n assert len(header) == 6\n assert header.cards[4].value == 4\n assert header['HISTORY'] == [1, 2, 3, 4]\n assert repr(header['HISTORY']) == '1\\n2\\n3\\n4'\n\n header.add_history(0, after='A')\n assert len(header) == 6\n assert header.cards[1].value == 0\n assert header['HISTORY'] == [0, 1, 2, 3, 4]\n\n def test_add_blank(self):\n header = fits.Header([('A', 'B', 'C'), ('', 1), ('', 2), ('', 3),\n ('', '', ''), ('', '', '')])\n header.add_blank(4)\n # This time a new blank should be added, and the existing blanks don't\n # get used... (though this is really kinda sketchy--there's a\n # distinction between truly blank cards, and cards with blank keywords\n # that isn't currently made int he code)\n assert len(header) == 7\n assert header.cards[6].value == 4\n assert header[''] == [1, 2, 3, '', '', 4]\n assert repr(header['']) == '1\\n2\\n3\\n\\n\\n4'\n\n header.add_blank(0, after='A')\n assert len(header) == 8\n assert header.cards[1].value == 0\n assert header[''] == [0, 1, 2, 3, '', '', 4]\n\n header[''] = 5\n header[' '] = 6\n assert header[''] == [0, 1, 2, 3, '', '', 4, 5, 6]\n assert header[' '] == [0, 1, 2, 3, '', '', 4, 5, 6]\n\n def test_update(self):\n class FakeHeader(list):\n def keys(self):\n return [l[0] for l in self]\n\n def __getitem__(self, key):\n return next(l[1:] for l in self if l[0] == key)\n\n header = fits.Header()\n header.update({'FOO': ('BAR', 'BAZ')})\n header.update(FakeHeader([('A', 1), ('B', 2, 'comment')]))\n\n assert set(header.keys()) == {'FOO', 'A', 'B'}\n assert header.comments['B'] == 'comment'\n\n # test that comments are preserved\n tmphdr = fits.Header()\n tmphdr['HELLO'] = (1, 'this is a comment')\n header.update(tmphdr)\n assert set(header.keys()) == {'FOO', 'A', 'B', 'HELLO'}\n assert header.comments['HELLO'] == 'this is a comment'\n\n header.update(NAXIS1=100, NAXIS2=100)\n assert set(header.keys()) == {'FOO', 'A', 'B', 'HELLO', 'NAXIS1', 'NAXIS2'}\n assert set(header.values()) == {'BAR', 1, 2, 100, 100}\n\n def test_update_comment(self):\n hdul = fits.open(self.data('arange.fits'))\n hdul[0].header.update({'FOO': ('BAR', 'BAZ')})\n assert hdul[0].header['FOO'] == 'BAR'\n assert hdul[0].header.comments['FOO'] == 'BAZ'\n\n with pytest.raises(ValueError):\n hdul[0].header.update({'FOO2': ('BAR', 'BAZ', 'EXTRA')})\n\n hdul.writeto(self.temp('test.fits'))\n hdul.close()\n\n hdul = fits.open(self.temp('test.fits'), mode='update')\n hdul[0].header.comments['FOO'] = 'QUX'\n hdul.close()\n\n hdul = fits.open(self.temp('test.fits'))\n assert hdul[0].header.comments['FOO'] == 'QUX'\n\n hdul[0].header.add_comment(0, after='FOO')\n assert str(hdul[0].header.cards[-1]).strip() == 'COMMENT 0'\n hdul.close()\n\n def test_commentary_cards(self):\n # commentary cards\n val = \"A commentary card's value has no quotes around it.\"\n c = fits.Card(\"HISTORY\", val)\n assert str(c) == _pad('HISTORY ' + val)\n val = \"A commentary card has no comment.\"\n c = fits.Card(\"COMMENT\", val, \"comment\")\n assert str(c) == _pad('COMMENT ' + val)\n\n def test_commentary_card_created_by_fromstring(self):\n # commentary card created by fromstring()\n c = fits.Card.fromstring(\n \"COMMENT card has no comments. \"\n \"/ text after slash is still part of the value.\")\n assert (c.value == 'card has no comments. '\n '/ text after slash is still part of the value.')\n assert c.comment == ''\n\n def test_commentary_card_will_not_parse_numerical_value(self):\n # commentary card will not parse the numerical value\n c = fits.Card.fromstring(\"HISTORY (1, 2)\")\n assert str(c) == _pad(\"HISTORY (1, 2)\")\n\n def test_equal_sign_after_column8(self):\n # equal sign after column 8 of a commentary card will be part ofthe\n # string value\n c = fits.Card.fromstring(\"HISTORY = (1, 2)\")\n assert str(c) == _pad(\"HISTORY = (1, 2)\")\n\n def test_blank_keyword(self):\n c = fits.Card('', ' / EXPOSURE INFORMATION')\n assert str(c) == _pad(' / EXPOSURE INFORMATION')\n c = fits.Card.fromstring(str(c))\n assert c.keyword == ''\n assert c.value == ' / EXPOSURE INFORMATION'\n\n def test_specify_undefined_value(self):\n # this is how to specify an undefined value\n c = fits.Card(\"undef\", fits.card.UNDEFINED)\n assert str(c) == _pad(\"UNDEF =\")\n\n def test_complex_number_using_string_input(self):\n # complex number using string input\n c = fits.Card.fromstring('ABC = (8, 9)')\n assert str(c) == _pad(\"ABC = (8, 9)\")\n\n def test_fixable_non_standard_fits_card(self, capsys):\n # fixable non-standard FITS card will keep the original format\n c = fits.Card.fromstring('abc = + 2.1 e + 12')\n assert c.value == 2100000000000.0\n with pytest.warns(fits.verify.VerifyWarning,\n match=r'Verification reported errors'):\n assert str(c) == _pad(\"ABC = +2.1E+12\")\n\n def test_fixable_non_fsc(self):\n # fixable non-FSC: if the card is not parsable, it's value will be\n # assumed\n # to be a string and everything after the first slash will be comment\n c = fits.Card.fromstring(\n \"no_quote= this card's value has no quotes \"\n \"/ let's also try the comment\")\n with pytest.warns(fits.verify.VerifyWarning,\n match=r'Verification reported errors'):\n assert (str(c) == \"NO_QUOTE= 'this card''s value has no quotes' \"\n \"/ let's also try the comment \")\n\n def test_undefined_value_using_string_input(self):\n # undefined value using string input\n c = fits.Card.fromstring('ABC = ')\n assert str(c) == _pad(\"ABC =\")\n\n def test_mislocated_equal_sign(self, capsys):\n # test mislocated \"=\" sign\n c = fits.Card.fromstring('XYZ= 100')\n assert c.keyword == 'XYZ'\n assert c.value == 100\n with pytest.warns(fits.verify.VerifyWarning,\n match=r'Verification reported errors'):\n assert str(c) == _pad(\"XYZ = 100\")\n\n def test_equal_only_up_to_column_10(self, capsys):\n # the test of \"=\" location is only up to column 10\n\n # This test used to check if Astropy rewrote this card to a new format,\n # something like \"HISTO = '= (1, 2)\". But since ticket #109 if the\n # format is completely wrong we don't make any assumptions and the card\n # should be left alone\n c = fits.Card.fromstring(\"HISTO = (1, 2)\")\n with pytest.warns(AstropyUserWarning,\n match=r'header keyword is invalid'):\n assert str(c) == _pad(\"HISTO = (1, 2)\")\n\n # Likewise this card should just be left in its original form and\n # we shouldn't guess how to parse it or rewrite it.\n c = fits.Card.fromstring(\" HISTORY (1, 2)\")\n with pytest.warns(AstropyUserWarning,\n match=r'header keyword is invalid'):\n assert str(c) == _pad(\" HISTORY (1, 2)\")\n\n def test_verify_invalid_equal_sign(self):\n # verification\n c = fits.Card.fromstring('ABC= a6')\n with pytest.warns(AstropyUserWarning) as w:\n c.verify()\n err_text1 = (\"Card 'ABC' is not FITS standard (equal sign not at \"\n \"column 8)\")\n err_text2 = (\"Card 'ABC' is not FITS standard (invalid value \"\n \"string: 'a6'\")\n assert len(w) == 4\n assert err_text1 in str(w[1].message)\n assert err_text2 in str(w[2].message)\n\n def test_fix_invalid_equal_sign(self):\n fix_text = \"Fixed 'ABC' card to meet the FITS standard.\"\n c = fits.Card.fromstring('ABC= a6')\n with pytest.warns(AstropyUserWarning, match=fix_text) as w:\n c.verify('fix')\n assert len(w) == 4\n assert str(c) == _pad(\"ABC = 'a6 '\")\n\n def test_long_string_value(self):\n # test long string value\n c = fits.Card('abc', 'long string value ' * 10, 'long comment ' * 10)\n assert (str(c) ==\n \"ABC = 'long string value long string value long string value long string &' \"\n \"CONTINUE 'value long string value long string value long string value long &' \"\n \"CONTINUE 'string value long string value long string value &' \"\n \"CONTINUE '&' / long comment long comment long comment long comment long \"\n \"CONTINUE '&' / comment long comment long comment long comment long comment \"\n \"CONTINUE '' / long comment \")\n\n def test_long_string_value_with_multiple_long_words(self):\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/11298\n \"\"\"\n c = fits.Card('WHATEVER',\n 'SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_'\n '03)-AAABBBCCC.n.h5 SuperNavigationParameters_XXXX_YYYY'\n '_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.xml')\n assert (str(c) ==\n \"WHATEVER= 'SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n&'\"\n \"CONTINUE '.h5 &' \"\n \"CONTINUE 'SuperNavigationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.&'\"\n \"CONTINUE 'xml' \")\n\n def test_long_unicode_string(self):\n \"\"\"Regression test for\n https://github.com/spacetelescope/PyFITS/issues/1\n\n So long as a unicode string can be converted to ASCII it should have no\n different behavior in this regard from a byte string.\n \"\"\"\n\n h1 = fits.Header()\n h1['TEST'] = 'abcdefg' * 30\n\n h2 = fits.Header()\n h2['TEST'] = 'abcdefg' * 30\n\n assert str(h1) == str(h2)\n\n def test_long_string_repr(self):\n \"\"\"Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/193\n\n Ensure that the __repr__() for cards represented with CONTINUE cards is\n split across multiple lines (broken at each *physical* card).\n \"\"\"\n\n header = fits.Header()\n header['TEST1'] = ('Regular value', 'Regular comment')\n header['TEST2'] = ('long string value ' * 10, 'long comment ' * 10)\n header['TEST3'] = ('Regular value', 'Regular comment')\n\n assert (repr(header).splitlines() ==\n [str(fits.Card('TEST1', 'Regular value', 'Regular comment')),\n \"TEST2 = 'long string value long string value long string value long string &' \",\n \"CONTINUE 'value long string value long string value long string value long &' \",\n \"CONTINUE 'string value long string value long string value &' \",\n \"CONTINUE '&' / long comment long comment long comment long comment long \",\n \"CONTINUE '&' / comment long comment long comment long comment long comment \",\n \"CONTINUE '' / long comment \",\n str(fits.Card('TEST3', 'Regular value', 'Regular comment'))])\n\n def test_blank_keyword_long_value(self):\n \"\"\"Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/194\n\n Test that a blank keyword ('') can be assigned a too-long value that is\n continued across multiple cards with blank keywords, just like COMMENT\n and HISTORY cards.\n \"\"\"\n\n value = 'long string value ' * 10\n header = fits.Header()\n header[''] = value\n\n assert len(header) == 3\n assert ' '.join(header['']) == value.rstrip()\n\n # Ensure that this works like other commentary keywords\n header['COMMENT'] = value\n header['HISTORY'] = value\n assert header['COMMENT'] == header['HISTORY']\n assert header['COMMENT'] == header['']\n\n def test_long_string_from_file(self):\n c = fits.Card('abc', 'long string value ' * 10, 'long comment ' * 10)\n hdu = fits.PrimaryHDU()\n hdu.header.append(c)\n hdu.writeto(self.temp('test_new.fits'))\n\n hdul = fits.open(self.temp('test_new.fits'))\n c = hdul[0].header.cards['abc']\n hdul.close()\n assert (str(c) ==\n \"ABC = 'long string value long string value long string value long string &' \"\n \"CONTINUE 'value long string value long string value long string value long &' \"\n \"CONTINUE 'string value long string value long string value &' \"\n \"CONTINUE '&' / long comment long comment long comment long comment long \"\n \"CONTINUE '&' / comment long comment long comment long comment long comment \"\n \"CONTINUE '' / long comment \")\n\n def test_word_in_long_string_too_long(self):\n # if a word in a long string is too long, it will be cut in the middle\n c = fits.Card('abc', 'longstringvalue' * 10, 'longcomment' * 10)\n assert (str(c) ==\n \"ABC = 'longstringvaluelongstringvaluelongstringvaluelongstringvaluelongstr&'\"\n \"CONTINUE 'ingvaluelongstringvaluelongstringvaluelongstringvaluelongstringvalu&'\"\n \"CONTINUE 'elongstringvalue&' \"\n \"CONTINUE '&' / longcommentlongcommentlongcommentlongcommentlongcommentlongcomme\"\n \"CONTINUE '' / ntlongcommentlongcommentlongcommentlongcomment \")\n\n def test_long_string_value_via_fromstring(self, capsys):\n # long string value via fromstring() method\n c = fits.Card.fromstring(\n _pad(\"abc = 'longstring''s testing & ' \"\n \"/ comments in line 1\") +\n _pad(\"continue 'continue with long string but without the \"\n \"ampersand at the end' /\") +\n _pad(\"continue 'continue must have string value (with quotes)' \"\n \"/ comments with ''. \"))\n with pytest.warns(fits.verify.VerifyWarning,\n match=r'Verification reported errors'):\n assert (str(c) ==\n \"ABC = 'longstring''s testing continue with long string but without the &' \"\n \"CONTINUE 'ampersand at the endcontinue must have string value (with quotes)&' \"\n \"CONTINUE '' / comments in line 1 comments with ''. \")\n\n def test_continue_card_with_equals_in_value(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/117\n \"\"\"\n\n c = fits.Card.fromstring(\n _pad(\"EXPR = '/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits * &'\") +\n _pad(\"CONTINUE '5.87359e-12 * MWAvg(Av=0.12)&'\") +\n _pad(\"CONTINUE '&' / pysyn expression\"))\n\n assert c.keyword == 'EXPR'\n assert (c.value ==\n '/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits '\n '* 5.87359e-12 * MWAvg(Av=0.12)')\n assert c.comment == 'pysyn expression'\n\n def test_final_continue_card_lacks_ampersand(self):\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/3282\n \"\"\"\n\n h = fits.Header()\n h['SVALUE'] = 'A' * 69\n assert repr(h).splitlines()[-1] == _pad(\"CONTINUE 'AA'\")\n\n def test_final_continue_card_ampersand_removal_on_long_comments(self):\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/3282\n \"\"\"\n\n c = fits.Card('TEST', 'long value' * 10, 'long comment &' * 10)\n assert (str(c) ==\n \"TEST = 'long valuelong valuelong valuelong valuelong valuelong valuelong &' \"\n \"CONTINUE 'valuelong valuelong valuelong value&' \"\n \"CONTINUE '&' / long comment &long comment &long comment &long comment &long \"\n \"CONTINUE '&' / comment &long comment &long comment &long comment &long comment \"\n \"CONTINUE '' / &long comment & \")\n\n def test_hierarch_card_creation(self):\n # Test automatic upgrade to hierarch card\n with pytest.warns(AstropyUserWarning, match='HIERARCH card will be created') as w:\n c = fits.Card('ESO INS SLIT2 Y1FRML',\n 'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)')\n assert len(w) == 1\n assert (str(c) ==\n \"HIERARCH ESO INS SLIT2 Y1FRML= \"\n \"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'\")\n\n # Test manual creation of hierarch card\n c = fits.Card('hierarch abcdefghi', 10)\n assert str(c) == _pad(\"HIERARCH abcdefghi = 10\")\n c = fits.Card('HIERARCH ESO INS SLIT2 Y1FRML',\n 'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)')\n assert (str(c) ==\n \"HIERARCH ESO INS SLIT2 Y1FRML= \"\n \"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'\")\n\n def test_hierarch_with_abbrev_value_indicator(self):\n \"\"\"Regression test for\n https://github.com/spacetelescope/PyFITS/issues/5\n \"\"\"\n\n c = fits.Card.fromstring(\"HIERARCH key.META_4='calFileVersion'\")\n assert c.keyword == 'key.META_4'\n assert c.value == 'calFileVersion'\n assert c.comment == ''\n\n def test_hierarch_not_warn(self):\n \"\"\"Check that compressed image headers do not issue HIERARCH warnings.\n \"\"\"\n filename = fits.util.get_testdata_filepath('compressed_image.fits')\n with fits.open(filename) as hdul:\n header = hdul[1].header\n with pytest.warns(None) as warning_list:\n header[\"HIERARCH LONG KEYWORD\"] = 42\n assert len(warning_list) == 0\n assert header[\"LONG KEYWORD\"] == 42\n assert header[\"HIERARCH LONG KEYWORD\"] == 42\n\n # Check that it still warns if we do not use HIERARCH\n with pytest.warns(fits.verify.VerifyWarning,\n match=r'greater than 8 characters'):\n header[\"LONG KEYWORD2\"] = 1\n assert header[\"LONG KEYWORD2\"] == 1\n\n def test_hierarch_keyword_whitespace(self):\n \"\"\"\n Regression test for\n https://github.com/spacetelescope/PyFITS/issues/6\n\n Make sure any leading or trailing whitespace around HIERARCH\n keywords is stripped from the actual keyword value.\n \"\"\"\n\n c = fits.Card.fromstring(\n \"HIERARCH key.META_4 = 'calFileVersion'\")\n assert c.keyword == 'key.META_4'\n assert c.value == 'calFileVersion'\n assert c.comment == ''\n\n # Test also with creation via the Card constructor\n c = fits.Card('HIERARCH key.META_4', 'calFileVersion')\n assert c.keyword == 'key.META_4'\n assert c.value == 'calFileVersion'\n assert c.comment == ''\n\n def test_verify_mixed_case_hierarch(self):\n \"\"\"Regression test for\n https://github.com/spacetelescope/PyFITS/issues/7\n\n Assures that HIERARCH keywords with lower-case characters and other\n normally invalid keyword characters are not considered invalid.\n \"\"\"\n\n c = fits.Card('HIERARCH WeirdCard.~!@#_^$%&', 'The value', 'a comment')\n # This should not raise any exceptions\n c.verify('exception')\n assert c.keyword == 'WeirdCard.~!@#_^$%&'\n assert c.value == 'The value'\n assert c.comment == 'a comment'\n\n # Test also the specific case from the original bug report\n header = fits.Header([\n ('simple', True),\n ('BITPIX', 8),\n ('NAXIS', 0),\n ('EXTEND', True, 'May contain datasets'),\n ('HIERARCH key.META_0', 'detRow')\n ])\n hdu = fits.PrimaryHDU(header=header)\n hdu.writeto(self.temp('test.fits'))\n with fits.open(self.temp('test.fits')) as hdul:\n header2 = hdul[0].header\n assert (str(header.cards[header.index('key.META_0')]) ==\n str(header2.cards[header2.index('key.META_0')]))\n\n def test_missing_keyword(self):\n \"\"\"Test that accessing a non-existent keyword raises a KeyError.\"\"\"\n\n header = fits.Header()\n # De-referencing header through the inline function should behave\n # identically to accessing it in the pytest.raises context below.\n pytest.raises(KeyError, lambda k: header[k], 'NAXIS')\n # Test exception with message\n with pytest.raises(KeyError, match=r\"Keyword 'NAXIS' not found.\"):\n header['NAXIS']\n\n def test_hierarch_card_lookup(self):\n header = fits.Header()\n header['hierarch abcdefghi'] = 10\n assert 'abcdefghi' in header\n assert header['abcdefghi'] == 10\n # This used to be assert_false, but per ticket\n # https://aeon.stsci.edu/ssb/trac/pyfits/ticket/155 hierarch keywords\n # should be treated case-insensitively when performing lookups\n assert 'ABCDEFGHI' in header\n\n def test_hierarch_card_delete(self):\n header = fits.Header()\n header['hierarch abcdefghi'] = 10\n del header['hierarch abcdefghi']\n\n def test_hierarch_card_insert_delete(self):\n header = fits.Header()\n with pytest.warns(fits.verify.VerifyWarning,\n match=r'greater than 8 characters'):\n header['abcdefghi'] = 10\n header['abcdefgh'] = 10\n header['abcdefg'] = 10\n with pytest.warns(fits.verify.VerifyWarning,\n match=r'greater than 8 characters'):\n header.insert(2, ('abcdefghij', 10))\n del header['abcdefghij']\n with pytest.warns(fits.verify.VerifyWarning,\n match=r'greater than 8 characters'):\n header.insert(2, ('abcdefghij', 10))\n del header[2]\n assert list(header.keys())[2] == 'abcdefg'.upper()\n\n def test_hierarch_create_and_update(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158\n\n Tests several additional use cases for working with HIERARCH cards.\n \"\"\"\n\n msg = 'a HIERARCH card will be created'\n\n header = fits.Header()\n with pytest.warns(VerifyWarning) as w:\n header.update({'HIERARCH BLAH BLAH': 'TESTA'})\n assert len(w) == 0\n assert 'BLAH BLAH' in header\n assert header['BLAH BLAH'] == 'TESTA'\n\n header.update({'HIERARCH BLAH BLAH': 'TESTB'})\n assert len(w) == 0\n assert header['BLAH BLAH'], 'TESTB'\n\n # Update without explicitly stating 'HIERARCH':\n header.update({'BLAH BLAH': 'TESTC'})\n assert len(w) == 1\n assert len(header) == 1\n assert header['BLAH BLAH'], 'TESTC'\n\n # Test case-insensitivity\n header.update({'HIERARCH blah blah': 'TESTD'})\n assert len(w) == 1\n assert len(header) == 1\n assert header['blah blah'], 'TESTD'\n\n header.update({'blah blah': 'TESTE'})\n assert len(w) == 2\n assert len(header) == 1\n assert header['blah blah'], 'TESTE'\n\n # Create a HIERARCH card > 8 characters without explicitly stating\n # 'HIERARCH'\n header.update({'BLAH BLAH BLAH': 'TESTA'})\n assert len(w) == 3\n assert msg in str(w[0].message)\n\n header.update({'HIERARCH BLAH BLAH BLAH': 'TESTB'})\n assert len(w) == 3\n assert header['BLAH BLAH BLAH'], 'TESTB'\n\n # Update without explicitly stating 'HIERARCH':\n header.update({'BLAH BLAH BLAH': 'TESTC'})\n assert len(w) == 4\n assert header['BLAH BLAH BLAH'], 'TESTC'\n\n # Test case-insensitivity\n header.update({'HIERARCH blah blah blah': 'TESTD'})\n assert len(w) == 4\n assert header['blah blah blah'], 'TESTD'\n\n header.update({'blah blah blah': 'TESTE'})\n assert len(w) == 5\n assert header['blah blah blah'], 'TESTE'\n\n def test_short_hierarch_create_and_update(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158\n\n Tests several additional use cases for working with HIERARCH cards,\n specifically where the keyword is fewer than 8 characters, but contains\n invalid characters such that it can only be created as a HIERARCH card.\n \"\"\"\n\n msg = 'a HIERARCH card will be created'\n\n header = fits.Header()\n with pytest.warns(VerifyWarning) as w:\n header.update({'HIERARCH BLA BLA': 'TESTA'})\n assert len(w) == 0\n assert 'BLA BLA' in header\n assert header['BLA BLA'] == 'TESTA'\n\n header.update({'HIERARCH BLA BLA': 'TESTB'})\n assert len(w) == 0\n assert header['BLA BLA'], 'TESTB'\n\n # Update without explicitly stating 'HIERARCH':\n header.update({'BLA BLA': 'TESTC'})\n assert len(w) == 1\n assert header['BLA BLA'], 'TESTC'\n\n # Test case-insensitivity\n header.update({'HIERARCH bla bla': 'TESTD'})\n assert len(w) == 1\n assert len(header) == 1\n assert header['bla bla'], 'TESTD'\n\n header.update({'bla bla': 'TESTE'})\n assert len(w) == 2\n assert len(header) == 1\n assert header['bla bla'], 'TESTE'\n\n header = fits.Header()\n with pytest.warns(VerifyWarning) as w:\n # Create a HIERARCH card containing invalid characters without\n # explicitly stating 'HIERARCH'\n header.update({'BLA BLA': 'TESTA'})\n print([x.category for x in w])\n assert len(w) == 1\n assert msg in str(w[0].message)\n\n header.update({'HIERARCH BLA BLA': 'TESTB'})\n assert len(w) == 1\n assert header['BLA BLA'], 'TESTB'\n\n # Update without explicitly stating 'HIERARCH':\n header.update({'BLA BLA': 'TESTC'})\n assert len(w) == 2\n assert header['BLA BLA'], 'TESTC'\n\n # Test case-insensitivity\n header.update({'HIERARCH bla bla': 'TESTD'})\n assert len(w) == 2\n assert len(header) == 1\n assert header['bla bla'], 'TESTD'\n\n header.update({'bla bla': 'TESTE'})\n assert len(w) == 3\n assert len(header) == 1\n assert header['bla bla'], 'TESTE'\n\n def test_header_setitem_invalid(self):\n header = fits.Header()\n\n def test():\n header['FOO'] = ('bar', 'baz', 'qux')\n\n pytest.raises(ValueError, test)\n\n def test_header_setitem_1tuple(self):\n header = fits.Header()\n header['FOO'] = ('BAR',)\n header['FOO2'] = (None,)\n assert header['FOO'] == 'BAR'\n assert header['FOO2'] is None\n assert header[0] == 'BAR'\n assert header.comments[0] == ''\n assert header.comments['FOO'] == ''\n\n def test_header_setitem_2tuple(self):\n header = fits.Header()\n header['FOO'] = ('BAR', 'BAZ')\n header['FOO2'] = (None, None)\n assert header['FOO'] == 'BAR'\n assert header['FOO2'] is None\n assert header[0] == 'BAR'\n assert header.comments[0] == 'BAZ'\n assert header.comments['FOO'] == 'BAZ'\n assert header.comments['FOO2'] == ''\n\n def test_header_set_value_to_none(self):\n \"\"\"\n Setting the value of a card to None should simply give that card an\n undefined value. Undefined value should map to None.\n \"\"\"\n\n header = fits.Header()\n header['FOO'] = 'BAR'\n assert header['FOO'] == 'BAR'\n header['FOO'] = None\n assert header['FOO'] is None\n\n # Create a header that contains an undefined value and a defined\n # value.\n hstr = \"UNDEF = \\nDEFINED = 42\"\n header = fits.Header.fromstring(hstr, sep='\\n')\n\n # Explicitly add a card with an UNDEFINED value\n c = fits.Card(\"UNDEF2\", fits.card.UNDEFINED)\n header.extend([c])\n\n # And now assign an undefined value to the header through setitem\n header['UNDEF3'] = fits.card.UNDEFINED\n\n # Tuple assignment\n header.append((\"UNDEF5\", None, \"Undefined value\"), end=True)\n header.append(\"UNDEF6\")\n\n assert header['DEFINED'] == 42\n assert header['UNDEF'] is None\n assert header['UNDEF2'] is None\n assert header['UNDEF3'] is None\n assert header['UNDEF5'] is None\n assert header['UNDEF6'] is None\n\n # Assign an undefined value to a new card\n header['UNDEF4'] = None\n\n # Overwrite an existing value with None\n header[\"DEFINED\"] = None\n\n # All headers now should be undefined\n for c in header.cards:\n assert c.value == fits.card.UNDEFINED\n\n def test_set_comment_only(self):\n header = fits.Header([('A', 'B', 'C')])\n header.set('A', comment='D')\n assert header['A'] == 'B'\n assert header.comments['A'] == 'D'\n\n def test_header_iter(self):\n header = fits.Header([('A', 'B'), ('C', 'D')])\n assert list(header) == ['A', 'C']\n\n def test_header_slice(self):\n header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])\n newheader = header[1:]\n assert len(newheader) == 2\n assert 'A' not in newheader\n assert 'C' in newheader\n assert 'E' in newheader\n\n newheader = header[::-1]\n assert len(newheader) == 3\n assert newheader[0] == 'F'\n assert newheader[1] == 'D'\n assert newheader[2] == 'B'\n\n newheader = header[::2]\n assert len(newheader) == 2\n assert 'A' in newheader\n assert 'C' not in newheader\n assert 'E' in newheader\n\n def test_header_slice_assignment(self):\n \"\"\"\n Assigning to a slice should just assign new values to the cards\n included in the slice.\n \"\"\"\n\n header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])\n\n # Test assigning slice to the same value; this works similarly to numpy\n # arrays\n header[1:] = 1\n assert header[1] == 1\n assert header[2] == 1\n\n # Though strings are iterable they should be treated as a scalar value\n header[1:] = 'GH'\n assert header[1] == 'GH'\n assert header[2] == 'GH'\n\n # Now assign via an iterable\n header[1:] = ['H', 'I']\n assert header[1] == 'H'\n assert header[2] == 'I'\n\n def test_header_slice_delete(self):\n \"\"\"Test deleting a slice of cards from the header.\"\"\"\n\n header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])\n del header[1:]\n assert len(header) == 1\n assert header[0] == 'B'\n del header[:]\n assert len(header) == 0\n\n def test_wildcard_slice(self):\n \"\"\"Test selecting a subsection of a header via wildcard matching.\"\"\"\n\n header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])\n newheader = header['AB*']\n assert len(newheader) == 2\n assert newheader[0] == 0\n assert newheader[1] == 2\n\n def test_wildcard_with_hyphen(self):\n \"\"\"\n Regression test for issue where wildcards did not work on keywords\n containing hyphens.\n \"\"\"\n\n header = fits.Header([('DATE', 1), ('DATE-OBS', 2), ('DATE-FOO', 3)])\n assert len(header['DATE*']) == 3\n assert len(header['DATE?*']) == 2\n assert len(header['DATE-*']) == 2\n\n def test_wildcard_slice_assignment(self):\n \"\"\"Test assigning to a header slice selected via wildcard matching.\"\"\"\n\n header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])\n\n # Test assigning slice to the same value; this works similarly to numpy\n # arrays\n header['AB*'] = 1\n assert header[0] == 1\n assert header[2] == 1\n\n # Though strings are iterable they should be treated as a scalar value\n header['AB*'] = 'GH'\n assert header[0] == 'GH'\n assert header[2] == 'GH'\n\n # Now assign via an iterable\n header['AB*'] = ['H', 'I']\n assert header[0] == 'H'\n assert header[2] == 'I'\n\n def test_wildcard_slice_deletion(self):\n \"\"\"Test deleting cards from a header that match a wildcard pattern.\"\"\"\n\n header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])\n del header['AB*']\n assert len(header) == 1\n assert header[0] == 1\n\n def test_header_history(self):\n header = fits.Header([('ABC', 0), ('HISTORY', 1), ('HISTORY', 2),\n ('DEF', 3), ('HISTORY', 4), ('HISTORY', 5)])\n assert header['HISTORY'] == [1, 2, 4, 5]\n\n def test_header_clear(self):\n header = fits.Header([('A', 'B'), ('C', 'D')])\n header.clear()\n assert 'A' not in header\n assert 'C' not in header\n assert len(header) == 0\n\n def test_header_fromkeys(self):\n header = fits.Header.fromkeys(['A', 'B'])\n assert 'A' in header\n assert header['A'] is None\n assert header.comments['A'] == ''\n assert 'B' in header\n assert header['B'] is None\n assert header.comments['B'] == ''\n\n def test_header_fromkeys_with_value(self):\n header = fits.Header.fromkeys(['A', 'B'], 'C')\n assert 'A' in header\n assert header['A'] == 'C'\n assert header.comments['A'] == ''\n assert 'B' in header\n assert header['B'] == 'C'\n assert header.comments['B'] == ''\n\n def test_header_fromkeys_with_value_and_comment(self):\n header = fits.Header.fromkeys(['A'], ('B', 'C'))\n assert 'A' in header\n assert header['A'] == 'B'\n assert header.comments['A'] == 'C'\n\n def test_header_fromkeys_with_duplicates(self):\n header = fits.Header.fromkeys(['A', 'B', 'A'], 'C')\n assert 'A' in header\n assert ('A', 0) in header\n assert ('A', 1) in header\n assert ('A', 2) not in header\n assert header[0] == 'C'\n assert header['A'] == 'C'\n assert header[('A', 0)] == 'C'\n assert header[2] == 'C'\n assert header[('A', 1)] == 'C'\n\n def test_header_items(self):\n header = fits.Header([('A', 'B'), ('C', 'D')])\n assert list(header.items()) == [('A', 'B'), ('C', 'D')]\n\n def test_header_iterkeys(self):\n header = fits.Header([('A', 'B'), ('C', 'D')])\n for a, b in zip(header.keys(), header):\n assert a == b\n\n def test_header_itervalues(self):\n header = fits.Header([('A', 'B'), ('C', 'D')])\n for a, b in zip(header.values(), ['B', 'D']):\n assert a == b\n\n def test_header_keys(self):\n with fits.open(self.data('arange.fits')) as hdul:\n assert (list(hdul[0].header) ==\n ['SIMPLE', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2', 'NAXIS3',\n 'EXTEND'])\n\n def test_header_list_like_pop(self):\n header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F'),\n ('G', 'H')])\n\n last = header.pop()\n assert last == 'H'\n assert len(header) == 3\n assert list(header) == ['A', 'C', 'E']\n\n mid = header.pop(1)\n assert mid == 'D'\n assert len(header) == 2\n assert list(header) == ['A', 'E']\n\n first = header.pop(0)\n assert first == 'B'\n assert len(header) == 1\n assert list(header) == ['E']\n\n pytest.raises(IndexError, header.pop, 42)\n\n def test_header_dict_like_pop(self):\n header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F'),\n ('G', 'H')])\n pytest.raises(TypeError, header.pop, 'A', 'B', 'C')\n\n last = header.pop('G')\n assert last == 'H'\n assert len(header) == 3\n assert list(header) == ['A', 'C', 'E']\n\n mid = header.pop('C')\n assert mid == 'D'\n assert len(header) == 2\n assert list(header) == ['A', 'E']\n\n first = header.pop('A')\n assert first == 'B'\n assert len(header) == 1\n assert list(header) == ['E']\n\n default = header.pop('X', 'Y')\n assert default == 'Y'\n assert len(header) == 1\n\n pytest.raises(KeyError, header.pop, 'X')\n\n def test_popitem(self):\n header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])\n keyword, value = header.popitem()\n assert keyword not in header\n assert len(header) == 2\n keyword, value = header.popitem()\n assert keyword not in header\n assert len(header) == 1\n keyword, value = header.popitem()\n assert keyword not in header\n assert len(header) == 0\n pytest.raises(KeyError, header.popitem)\n\n def test_setdefault(self):\n header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])\n assert header.setdefault('A') == 'B'\n assert header.setdefault('C') == 'D'\n assert header.setdefault('E') == 'F'\n assert len(header) == 3\n assert header.setdefault('G', 'H') == 'H'\n assert len(header) == 4\n assert 'G' in header\n assert header.setdefault('G', 'H') == 'H'\n assert len(header) == 4\n\n def test_update_from_dict(self):\n \"\"\"\n Test adding new cards and updating existing cards from a dict using\n Header.update()\n \"\"\"\n\n header = fits.Header([('A', 'B'), ('C', 'D')])\n header.update({'A': 'E', 'F': 'G'})\n assert header['A'] == 'E'\n assert header[0] == 'E'\n assert 'F' in header\n assert header['F'] == 'G'\n assert header[-1] == 'G'\n\n # Same as above but this time pass the update dict as keyword arguments\n header = fits.Header([('A', 'B'), ('C', 'D')])\n header.update(A='E', F='G')\n assert header['A'] == 'E'\n assert header[0] == 'E'\n assert 'F' in header\n assert header['F'] == 'G'\n assert header[-1] == 'G'\n\n def test_update_from_iterable(self):\n \"\"\"\n Test adding new cards and updating existing cards from an iterable of\n cards and card tuples.\n \"\"\"\n\n header = fits.Header([('A', 'B'), ('C', 'D')])\n header.update([('A', 'E'), fits.Card('F', 'G')])\n assert header['A'] == 'E'\n assert header[0] == 'E'\n assert 'F' in header\n assert header['F'] == 'G'\n assert header[-1] == 'G'\n\n def test_header_extend(self):\n \"\"\"\n Test extending a header both with and without stripping cards from the\n extension header.\n \"\"\"\n\n hdu = fits.PrimaryHDU()\n hdu2 = fits.ImageHDU()\n hdu2.header['MYKEY'] = ('some val', 'some comment')\n hdu.header += hdu2.header\n assert len(hdu.header) == 5\n assert hdu.header[-1] == 'some val'\n\n # Same thing, but using + instead of +=\n hdu = fits.PrimaryHDU()\n hdu.header = hdu.header + hdu2.header\n assert len(hdu.header) == 5\n assert hdu.header[-1] == 'some val'\n\n # Directly append the other header in full--not usually a desirable\n # operation when the header is coming from another HDU\n hdu.header.extend(hdu2.header, strip=False)\n assert len(hdu.header) == 11\n assert list(hdu.header)[5] == 'XTENSION'\n assert hdu.header[-1] == 'some val'\n assert ('MYKEY', 1) in hdu.header\n\n def test_header_extend_unique(self):\n \"\"\"\n Test extending the header with and without unique=True.\n \"\"\"\n hdu = fits.PrimaryHDU()\n hdu2 = fits.ImageHDU()\n hdu.header['MYKEY'] = ('some val', 'some comment')\n hdu2.header['MYKEY'] = ('some other val', 'some other comment')\n hdu.header.extend(hdu2.header)\n assert len(hdu.header) == 6\n assert hdu.header[-2] == 'some val'\n assert hdu.header[-1] == 'some other val'\n\n hdu = fits.PrimaryHDU()\n hdu2 = fits.ImageHDU()\n hdu.header['MYKEY'] = ('some val', 'some comment')\n hdu2.header['MYKEY'] = ('some other val', 'some other comment')\n hdu.header.extend(hdu2.header, unique=True)\n assert len(hdu.header) == 5\n assert hdu.header[-1] == 'some val'\n\n def test_header_extend_unique_commentary(self):\n \"\"\"\n Test extending header with and without unique=True and commentary\n cards in the header being added. Issue astropy/astropy#3967\n \"\"\"\n for commentary_card in ['', 'COMMENT', 'HISTORY']:\n for is_unique in [True, False]:\n hdu = fits.PrimaryHDU()\n # Make sure we are testing the case we want.\n assert commentary_card not in hdu.header\n hdu2 = fits.ImageHDU()\n hdu2.header[commentary_card] = 'My text'\n hdu.header.extend(hdu2.header, unique=is_unique)\n assert len(hdu.header) == 5\n assert hdu.header[commentary_card][0] == 'My text'\n\n def test_header_extend_update(self):\n \"\"\"\n Test extending the header with and without update=True.\n \"\"\"\n\n hdu = fits.PrimaryHDU()\n hdu2 = fits.ImageHDU()\n hdu.header['MYKEY'] = ('some val', 'some comment')\n hdu.header['HISTORY'] = 'history 1'\n hdu2.header['MYKEY'] = ('some other val', 'some other comment')\n hdu2.header['HISTORY'] = 'history 1'\n hdu2.header['HISTORY'] = 'history 2'\n hdu.header.extend(hdu2.header)\n assert len(hdu.header) == 9\n assert ('MYKEY', 0) in hdu.header\n assert ('MYKEY', 1) in hdu.header\n assert hdu.header[('MYKEY', 1)] == 'some other val'\n assert len(hdu.header['HISTORY']) == 3\n assert hdu.header[-1] == 'history 2'\n\n hdu = fits.PrimaryHDU()\n hdu.header['MYKEY'] = ('some val', 'some comment')\n hdu.header['HISTORY'] = 'history 1'\n hdu.header.extend(hdu2.header, update=True)\n assert len(hdu.header) == 7\n assert ('MYKEY', 0) in hdu.header\n assert ('MYKEY', 1) not in hdu.header\n assert hdu.header['MYKEY'] == 'some other val'\n assert len(hdu.header['HISTORY']) == 2\n assert hdu.header[-1] == 'history 2'\n\n def test_header_extend_update_commentary(self):\n \"\"\"\n Test extending header with and without unique=True and commentary\n cards in the header being added.\n\n Though not quite the same as astropy/astropy#3967, update=True hits\n the same if statement as that issue.\n \"\"\"\n for commentary_card in ['', 'COMMENT', 'HISTORY']:\n for is_update in [True, False]:\n hdu = fits.PrimaryHDU()\n # Make sure we are testing the case we want.\n assert commentary_card not in hdu.header\n hdu2 = fits.ImageHDU()\n hdu2.header[commentary_card] = 'My text'\n hdu.header.extend(hdu2.header, update=is_update)\n assert len(hdu.header) == 5\n assert hdu.header[commentary_card][0] == 'My text'\n\n def test_header_extend_exact(self):\n \"\"\"\n Test that extending an empty header with the contents of an existing\n header can exactly duplicate that header, given strip=False and\n end=True.\n \"\"\"\n\n header = fits.getheader(self.data('test0.fits'))\n header2 = fits.Header()\n header2.extend(header, strip=False, end=True)\n assert header == header2\n\n def test_header_count(self):\n header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])\n assert header.count('A') == 1\n assert header.count('C') == 1\n assert header.count('E') == 1\n header['HISTORY'] = 'a'\n header['HISTORY'] = 'b'\n assert header.count('HISTORY') == 2\n pytest.raises(KeyError, header.count, 'G')\n\n def test_header_append_use_blanks(self):\n \"\"\"\n Tests that blank cards can be appended, and that future appends will\n use blank cards when available (unless useblanks=False)\n \"\"\"\n\n header = fits.Header([('A', 'B'), ('C', 'D')])\n\n # Append a couple blanks\n header.append()\n header.append()\n assert len(header) == 4\n assert header[-1] == ''\n assert header[-2] == ''\n\n # New card should fill the first blank by default\n header.append(('E', 'F'))\n assert len(header) == 4\n assert header[-2] == 'F'\n assert header[-1] == ''\n\n # This card should not use up a blank spot\n header.append(('G', 'H'), useblanks=False)\n assert len(header) == 5\n assert header[-1] == ''\n assert header[-2] == 'H'\n\n def test_header_append_keyword_only(self):\n \"\"\"\n Test appending a new card with just the keyword, and no value or\n comment given.\n \"\"\"\n\n header = fits.Header([('A', 'B'), ('C', 'D')])\n header.append('E')\n assert len(header) == 3\n assert list(header)[-1] == 'E'\n assert header[-1] is None\n assert header.comments['E'] == ''\n\n # Try appending a blank--normally this can be accomplished with just\n # header.append(), but header.append('') should also work (and is maybe\n # a little more clear)\n header.append('')\n assert len(header) == 4\n\n assert list(header)[-1] == ''\n assert header[''] == ''\n assert header.comments[''] == ''\n\n def test_header_insert_use_blanks(self):\n header = fits.Header([('A', 'B'), ('C', 'D')])\n\n # Append a couple blanks\n header.append()\n header.append()\n\n # Insert a new card; should use up one of the blanks\n header.insert(1, ('E', 'F'))\n assert len(header) == 4\n assert header[1] == 'F'\n assert header[-1] == ''\n assert header[-2] == 'D'\n\n # Insert a new card without using blanks\n header.insert(1, ('G', 'H'), useblanks=False)\n assert len(header) == 5\n assert header[1] == 'H'\n assert header[-1] == ''\n\n def test_header_insert_before_keyword(self):\n \"\"\"\n Test that a keyword name or tuple can be used to insert new keywords.\n\n Also tests the ``after`` keyword argument.\n\n Regression test for https://github.com/spacetelescope/PyFITS/issues/12\n \"\"\"\n\n header = fits.Header([\n ('NAXIS1', 10), ('COMMENT', 'Comment 1'),\n ('COMMENT', 'Comment 3')])\n\n header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes'))\n assert list(header.keys())[0] == 'NAXIS'\n assert header[0] == 2\n assert header.comments[0] == 'Number of axes'\n\n header.insert('NAXIS1', ('NAXIS2', 20), after=True)\n assert list(header.keys())[1] == 'NAXIS1'\n assert list(header.keys())[2] == 'NAXIS2'\n assert header[2] == 20\n\n header.insert(('COMMENT', 1), ('COMMENT', 'Comment 2'))\n assert header['COMMENT'] == ['Comment 1', 'Comment 2', 'Comment 3']\n\n header.insert(('COMMENT', 2), ('COMMENT', 'Comment 4'), after=True)\n assert header['COMMENT'] == ['Comment 1', 'Comment 2', 'Comment 3',\n 'Comment 4']\n\n header.insert(-1, ('TEST1', True))\n assert list(header.keys())[-2] == 'TEST1'\n\n header.insert(-1, ('TEST2', True), after=True)\n assert list(header.keys())[-1] == 'TEST2'\n assert list(header.keys())[-3] == 'TEST1'\n\n def test_remove(self):\n header = fits.Header([('A', 'B'), ('C', 'D')])\n\n # When keyword is present in the header it should be removed.\n header.remove('C')\n assert len(header) == 1\n assert list(header) == ['A']\n assert 'C' not in header\n\n # When keyword is not present in the header and ignore_missing is\n # False, KeyError should be raised\n with pytest.raises(KeyError):\n header.remove('F')\n\n # When keyword is not present and ignore_missing is True, KeyError\n # will be ignored\n header.remove('F', ignore_missing=True)\n assert len(header) == 1\n\n # Test for removing all instances of a keyword\n header = fits.Header([('A', 'B'), ('C', 'D'), ('A', 'F')])\n header.remove('A', remove_all=True)\n assert 'A' not in header\n assert len(header) == 1\n assert list(header) == ['C']\n assert header[0] == 'D'\n\n def test_header_comments(self):\n header = fits.Header([('A', 'B', 'C'), ('DEF', 'G', 'H')])\n assert (repr(header.comments) ==\n ' A C\\n'\n ' DEF H')\n\n def test_comment_slices_and_filters(self):\n header = fits.Header([('AB', 'C', 'D'), ('EF', 'G', 'H'),\n ('AI', 'J', 'K')])\n s = header.comments[1:]\n assert list(s) == ['H', 'K']\n s = header.comments[::-1]\n assert list(s) == ['K', 'H', 'D']\n s = header.comments['A*']\n assert list(s) == ['D', 'K']\n\n def test_comment_slice_filter_assign(self):\n header = fits.Header([('AB', 'C', 'D'), ('EF', 'G', 'H'),\n ('AI', 'J', 'K')])\n header.comments[1:] = 'L'\n assert list(header.comments) == ['D', 'L', 'L']\n assert header.cards[header.index('AB')].comment == 'D'\n assert header.cards[header.index('EF')].comment == 'L'\n assert header.cards[header.index('AI')].comment == 'L'\n\n header.comments[::-1] = header.comments[:]\n assert list(header.comments) == ['L', 'L', 'D']\n\n header.comments['A*'] = ['M', 'N']\n assert list(header.comments) == ['M', 'L', 'N']\n\n def test_commentary_slicing(self):\n header = fits.Header()\n\n indices = list(range(5))\n\n for idx in indices:\n header['HISTORY'] = idx\n\n # Just a few sample slice types; this won't get all corner cases but if\n # these all work we should be in good shape\n assert header['HISTORY'][1:] == indices[1:]\n assert header['HISTORY'][:3] == indices[:3]\n assert header['HISTORY'][:6] == indices[:6]\n assert header['HISTORY'][:-2] == indices[:-2]\n assert header['HISTORY'][::-1] == indices[::-1]\n assert header['HISTORY'][1::-1] == indices[1::-1]\n assert header['HISTORY'][1:5:2] == indices[1:5:2]\n\n # Same tests, but copy the values first; as it turns out this is\n # different from just directly doing an __eq__ as in the first set of\n # assertions\n header.insert(0, ('A', 'B', 'C'))\n header.append(('D', 'E', 'F'), end=True)\n assert list(header['HISTORY'][1:]) == indices[1:]\n assert list(header['HISTORY'][:3]) == indices[:3]\n assert list(header['HISTORY'][:6]) == indices[:6]\n assert list(header['HISTORY'][:-2]) == indices[:-2]\n assert list(header['HISTORY'][::-1]) == indices[::-1]\n assert list(header['HISTORY'][1::-1]) == indices[1::-1]\n assert list(header['HISTORY'][1:5:2]) == indices[1:5:2]\n\n def test_update_commentary(self):\n header = fits.Header()\n header['FOO'] = 'BAR'\n header['HISTORY'] = 'ABC'\n header['FRED'] = 'BARNEY'\n header['HISTORY'] = 'DEF'\n header['HISTORY'] = 'GHI'\n\n assert header['HISTORY'] == ['ABC', 'DEF', 'GHI']\n\n # Single value update\n header['HISTORY'][0] = 'FOO'\n assert header['HISTORY'] == ['FOO', 'DEF', 'GHI']\n\n # Single value partial slice update\n header['HISTORY'][1:] = 'BAR'\n assert header['HISTORY'] == ['FOO', 'BAR', 'BAR']\n\n # Multi-value update\n header['HISTORY'][:] = ['BAZ', 'QUX']\n assert header['HISTORY'] == ['BAZ', 'QUX', 'BAR']\n\n def test_commentary_comparison(self):\n \"\"\"\n Regression test for an issue found in *writing* the regression test for\n https://github.com/astropy/astropy/issues/2363, where comparison of\n the list of values for a commentary keyword did not always compare\n correctly with other iterables.\n \"\"\"\n\n header = fits.Header()\n header['HISTORY'] = 'hello world'\n header['HISTORY'] = 'hello world'\n header['COMMENT'] = 'hello world'\n assert header['HISTORY'] != header['COMMENT']\n header['COMMENT'] = 'hello world'\n assert header['HISTORY'] == header['COMMENT']\n\n def test_long_commentary_card(self):\n header = fits.Header()\n header['FOO'] = 'BAR'\n header['BAZ'] = 'QUX'\n longval = 'ABC' * 30\n header['HISTORY'] = longval\n header['FRED'] = 'BARNEY'\n header['HISTORY'] = longval\n\n assert len(header) == 7\n assert list(header)[2] == 'FRED'\n assert str(header.cards[3]) == 'HISTORY ' + longval[:72]\n assert str(header.cards[4]).rstrip() == 'HISTORY ' + longval[72:]\n\n header.set('HISTORY', longval, after='FOO')\n assert len(header) == 9\n assert str(header.cards[1]) == 'HISTORY ' + longval[:72]\n assert str(header.cards[2]).rstrip() == 'HISTORY ' + longval[72:]\n\n header = fits.Header()\n header.update({'FOO': 'BAR'})\n header.update({'BAZ': 'QUX'})\n longval = 'ABC' * 30\n header.add_history(longval)\n header.update({'FRED': 'BARNEY'})\n header.add_history(longval)\n\n assert len(header.cards) == 7\n assert header.cards[2].keyword == 'FRED'\n assert str(header.cards[3]) == 'HISTORY ' + longval[:72]\n assert str(header.cards[4]).rstrip() == 'HISTORY ' + longval[72:]\n\n header.add_history(longval, after='FOO')\n assert len(header.cards) == 9\n assert str(header.cards[1]) == 'HISTORY ' + longval[:72]\n assert str(header.cards[2]).rstrip() == 'HISTORY ' + longval[72:]\n\n def test_totxtfile(self):\n with fits.open(self.data('test0.fits')) as hdul:\n hdul[0].header.totextfile(self.temp('header.txt'))\n\n hdu = fits.ImageHDU()\n hdu.header.update({'MYKEY': 'FOO'})\n hdu.header.extend(hdu.header.fromtextfile(self.temp('header.txt')),\n update=True, update_first=True)\n\n # Write the hdu out and read it back in again--it should be recognized\n # as a PrimaryHDU\n hdu.writeto(self.temp('test.fits'), output_verify='ignore')\n\n with fits.open(self.temp('test.fits')) as hdul:\n assert isinstance(hdul[0], fits.PrimaryHDU)\n\n hdu = fits.ImageHDU()\n hdu.header.update({'MYKEY': 'FOO'})\n hdu.header.extend(hdu.header.fromtextfile(self.temp('header.txt')),\n update=True, update_first=True, strip=False)\n assert 'MYKEY' in hdu.header\n assert 'EXTENSION' not in hdu.header\n assert 'SIMPLE' in hdu.header\n\n hdu.writeto(self.temp('test.fits'), output_verify='ignore',\n overwrite=True)\n\n with fits.open(self.temp('test.fits')) as hdul2:\n assert len(hdul2) == 2\n assert 'MYKEY' in hdul2[1].header\n\n def test_fromfile(self):\n \"\"\"Regression test for https://github.com/astropy/astropy/issues/8711\n \"\"\"\n filename = self.data('scale.fits')\n hdr = fits.Header.fromfile(filename)\n assert hdr['DATASET'] == '2MASS'\n\n def test_header_fromtextfile(self):\n \"\"\"Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/122\n\n Manually write a text file containing some header cards ending with\n newlines and ensure that fromtextfile can read them back in.\n \"\"\"\n\n header = fits.Header()\n header['A'] = ('B', 'C')\n header['B'] = ('C', 'D')\n header['C'] = ('D', 'E')\n\n with open(self.temp('test.hdr'), 'w') as f:\n f.write('\\n'.join(str(c).strip() for c in header.cards))\n\n header2 = fits.Header.fromtextfile(self.temp('test.hdr'))\n assert header == header2\n\n def test_header_fromtextfile_with_end_card(self):\n \"\"\"Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154\n\n Make sure that when a Header is read from a text file that the END card\n is ignored.\n \"\"\"\n\n header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])\n\n # We don't use header.totextfile here because it writes each card with\n # trailing spaces to pad them out to 80 characters. But this bug only\n # presents itself when each card ends immediately with a newline, and\n # no trailing spaces\n with open(self.temp('test.hdr'), 'w') as f:\n f.write('\\n'.join(str(c).strip() for c in header.cards))\n f.write('\\nEND')\n\n new_header = fits.Header.fromtextfile(self.temp('test.hdr'))\n\n assert 'END' not in new_header\n assert header == new_header\n\n def test_append_end_card(self):\n \"\"\"\n Regression test 2 for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154\n\n Manually adding an END card to a header should simply result in a\n ValueError (as was the case in PyFITS 3.0 and earlier).\n \"\"\"\n\n header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])\n\n def setitem(k, v):\n header[k] = v\n\n pytest.raises(ValueError, setitem, 'END', '')\n pytest.raises(ValueError, header.append, 'END')\n pytest.raises(ValueError, header.append, 'END', end=True)\n pytest.raises(ValueError, header.insert, len(header), 'END')\n pytest.raises(ValueError, header.set, 'END')\n\n def test_invalid_end_cards(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/217\n\n This tests the case where the END card looks like a normal card like\n 'END = ' and other similar oddities. As long as a card starts with END\n and looks like it was intended to be the END card we allow it, but with\n a warning.\n \"\"\"\n\n horig = fits.PrimaryHDU(data=np.arange(100)).header\n\n def invalid_header(end, pad):\n # Build up a goofy invalid header\n # Start from a seemingly normal header\n s = horig.tostring(sep='', endcard=False, padding=False)\n # append the bogus end card\n s += end\n # add additional padding if requested\n if pad:\n s += ' ' * _pad_length(len(s))\n\n # This will differ between Python versions\n if isinstance(s, bytes):\n return BytesIO(s)\n else:\n return StringIO(s)\n\n # Basic case motivated by the original issue; it's as if the END card\n # was appened by software that doesn't know to treat it specially, and\n # it is given an = after it\n s = invalid_header('END =', True)\n\n with pytest.warns(AstropyUserWarning, match=\"Unexpected bytes trailing \"\n \"END keyword: ' ='\") as w:\n h = fits.Header.fromfile(s)\n assert h == horig\n assert len(w) == 1\n\n # A case similar to the last but with more spaces between END and the\n # =, as though the '= ' value indicator were placed like that of a\n # normal card\n s = invalid_header('END = ', True)\n with pytest.warns(AstropyUserWarning, match=\"Unexpected bytes trailing \"\n \"END keyword: ' ='\") as w:\n h = fits.Header.fromfile(s)\n assert h == horig\n assert len(w) == 1\n\n # END card with trailing gibberish\n s = invalid_header('END$%&%^*%*', True)\n with pytest.warns(AstropyUserWarning, match=r\"Unexpected bytes trailing \"\n r\"END keyword: '\\$%&%\\^\\*%\\*'\") as w:\n h = fits.Header.fromfile(s)\n assert h == horig\n assert len(w) == 1\n\n # 'END' at the very end of a truncated file without padding; the way\n # the block reader works currently this can only happen if the 'END'\n # is at the very end of the file.\n s = invalid_header('END', False)\n with pytest.warns(AstropyUserWarning, match=\"Missing padding to end of \"\n \"the FITS block\") as w:\n # Don't raise an exception on missing padding, but still produce a\n # warning that the END card is incomplete\n h = fits.Header.fromfile(s, padding=False)\n assert h == horig\n assert len(w) == 1\n\n def test_invalid_characters(self):\n \"\"\"\n Test header with invalid characters\n \"\"\"\n\n # Generate invalid file with non-ASCII character\n h = fits.Header()\n h['FOO'] = 'BAR'\n h['COMMENT'] = 'hello'\n hdul = fits.PrimaryHDU(header=h, data=np.arange(5))\n hdul.writeto(self.temp('test.fits'))\n\n with open(self.temp('test.fits'), 'rb') as f:\n out = f.read()\n out = out.replace(b'hello', 'héllo'.encode('latin1'))\n out = out.replace(b'BAR', 'BÀR'.encode('latin1'))\n with open(self.temp('test2.fits'), 'wb') as f2:\n f2.write(out)\n\n with pytest.warns(AstropyUserWarning, match=\"non-ASCII characters are \"\n \"present in the FITS file\") as w:\n h = fits.getheader(self.temp('test2.fits'))\n assert h['FOO'] == 'B?R'\n assert h['COMMENT'] == 'h?llo'\n assert len(w) == 1\n\n def test_unnecessary_move(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/125\n\n Ensures that a header is not modified when setting the position of a\n keyword that's already in its correct position.\n \"\"\"\n\n header = fits.Header([('A', 'B'), ('B', 'C'), ('C', 'D')])\n\n header.set('B', before=2)\n assert list(header) == ['A', 'B', 'C']\n assert not header._modified\n\n header.set('B', after=0)\n assert list(header) == ['A', 'B', 'C']\n assert not header._modified\n\n header.set('B', before='C')\n assert list(header) == ['A', 'B', 'C']\n assert not header._modified\n\n header.set('B', after='A')\n assert list(header) == ['A', 'B', 'C']\n assert not header._modified\n\n header.set('B', before=2)\n assert list(header) == ['A', 'B', 'C']\n assert not header._modified\n\n # 123 is well past the end, and C is already at the end, so it's in the\n # right place already\n header.set('C', before=123)\n assert list(header) == ['A', 'B', 'C']\n assert not header._modified\n\n header.set('C', after=123)\n assert list(header) == ['A', 'B', 'C']\n assert not header._modified\n\n def test_invalid_float_cards(self):\n \"\"\"Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137\"\"\"\n\n # Create a header containing two of the problematic cards in the test\n # case where this came up:\n hstr = \"FOCALLEN= +1.550000000000e+002\\nAPERTURE= +0.000000000000e+000\"\n h = fits.Header.fromstring(hstr, sep='\\n')\n\n # First the case that *does* work prior to fixing this issue\n assert h['FOCALLEN'] == 155.0\n assert h['APERTURE'] == 0.0\n\n # Now if this were reserialized, would new values for these cards be\n # written with repaired exponent signs?\n with pytest.warns(fits.verify.VerifyWarning,\n match=r'Verification reported errors'):\n assert (str(h.cards['FOCALLEN']) ==\n _pad(\"FOCALLEN= +1.550000000000E+002\"))\n assert h.cards['FOCALLEN']._modified\n with pytest.warns(fits.verify.VerifyWarning,\n match=r'Verification reported errors'):\n assert (str(h.cards['APERTURE']) ==\n _pad(\"APERTURE= +0.000000000000E+000\"))\n assert h.cards['APERTURE']._modified\n assert h._modified\n\n # This is the case that was specifically causing problems; generating\n # the card strings *before* parsing the values. Also, the card strings\n # really should be \"fixed\" before being returned to the user\n h = fits.Header.fromstring(hstr, sep='\\n')\n with pytest.warns(fits.verify.VerifyWarning,\n match=r'Verification reported errors'):\n assert (str(h.cards['FOCALLEN']) ==\n _pad(\"FOCALLEN= +1.550000000000E+002\"))\n assert h.cards['FOCALLEN']._modified\n with pytest.warns(fits.verify.VerifyWarning,\n match=r'Verification reported errors'):\n assert (str(h.cards['APERTURE']) ==\n _pad(\"APERTURE= +0.000000000000E+000\"))\n assert h.cards['APERTURE']._modified\n\n assert h['FOCALLEN'] == 155.0\n assert h['APERTURE'] == 0.0\n assert h._modified\n\n # For the heck of it, try assigning the identical values and ensure\n # that the newly fixed value strings are left intact\n h['FOCALLEN'] = 155.0\n h['APERTURE'] = 0.0\n assert (str(h.cards['FOCALLEN']) ==\n _pad(\"FOCALLEN= +1.550000000000E+002\"))\n assert (str(h.cards['APERTURE']) ==\n _pad(\"APERTURE= +0.000000000000E+000\"))\n\n def test_invalid_float_cards2(self, capsys):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/140\n \"\"\"\n\n # The example for this test requires creating a FITS file containing a\n # slightly misformatted float value. I can't actually even find a way\n # to do that directly through Astropy--it won't let me.\n hdu = fits.PrimaryHDU()\n hdu.header['TEST'] = 5.0022221e-07\n hdu.writeto(self.temp('test.fits'))\n\n # Here we manually make the file invalid\n with open(self.temp('test.fits'), 'rb+') as f:\n f.seek(346) # Location of the exponent 'E' symbol\n f.write(encode_ascii('e'))\n\n with fits.open(self.temp('test.fits')) as hdul, \\\n pytest.warns(AstropyUserWarning) as w:\n hdul.writeto(self.temp('temp.fits'), output_verify='warn')\n assert len(w) == 5\n # The first two warnings are just the headers to the actual warning\n # message (HDU 0, Card 4). I'm still not sure things like that\n # should be output as separate warning messages, but that's\n # something to think about...\n msg = str(w[3].message)\n assert \"(invalid value string: '5.0022221e-07')\" in msg\n\n def test_leading_zeros(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137, part 2\n\n Ticket https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137 also showed that in\n float values like 0.001 the leading zero was unnecessarily being\n stripped off when rewriting the header. Though leading zeros should be\n removed from integer values to prevent misinterpretation as octal by\n python (for now Astropy will still maintain the leading zeros if now\n changes are made to the value, but will drop them if changes are made).\n \"\"\"\n\n c = fits.Card.fromstring(\"APERTURE= +0.000000000000E+000\")\n assert str(c) == _pad(\"APERTURE= +0.000000000000E+000\")\n assert c.value == 0.0\n c = fits.Card.fromstring(\"APERTURE= 0.000000000000E+000\")\n assert str(c) == _pad(\"APERTURE= 0.000000000000E+000\")\n assert c.value == 0.0\n c = fits.Card.fromstring(\"APERTURE= 017\")\n assert str(c) == _pad(\"APERTURE= 017\")\n assert c.value == 17\n\n def test_assign_boolean(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/123\n\n Tests assigning Python and Numpy boolean values to keyword values.\n \"\"\"\n\n fooimg = _pad('FOO = T')\n barimg = _pad('BAR = F')\n h = fits.Header()\n h['FOO'] = True\n h['BAR'] = False\n assert h['FOO'] is True\n assert h['BAR'] is False\n assert str(h.cards['FOO']) == fooimg\n assert str(h.cards['BAR']) == barimg\n\n h = fits.Header()\n h['FOO'] = np.bool_(True)\n h['BAR'] = np.bool_(False)\n assert h['FOO'] is True\n assert h['BAR'] is False\n assert str(h.cards['FOO']) == fooimg\n assert str(h.cards['BAR']) == barimg\n\n h = fits.Header()\n h.append(fits.Card.fromstring(fooimg))\n h.append(fits.Card.fromstring(barimg))\n assert h['FOO'] is True\n assert h['BAR'] is False\n assert str(h.cards['FOO']) == fooimg\n assert str(h.cards['BAR']) == barimg\n\n def test_header_method_keyword_normalization(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/149\n\n Basically ensures that all public Header methods are case-insensitive\n w.r.t. keywords.\n\n Provides a reasonably comprehensive test of several methods at once.\n \"\"\"\n\n h = fits.Header([('abC', 1), ('Def', 2), ('GeH', 3)])\n assert list(h) == ['ABC', 'DEF', 'GEH']\n assert 'abc' in h\n assert 'dEf' in h\n\n assert h['geh'] == 3\n\n # Case insensitivity of wildcards\n assert len(h['g*']) == 1\n\n h['aBc'] = 2\n assert h['abc'] == 2\n # ABC already existed so assigning to aBc should not have added any new\n # cards\n assert len(h) == 3\n\n del h['gEh']\n assert list(h) == ['ABC', 'DEF']\n assert len(h) == 2\n assert h.get('def') == 2\n\n h.set('Abc', 3)\n assert h['ABC'] == 3\n h.set('gEh', 3, before='Abc')\n assert list(h) == ['GEH', 'ABC', 'DEF']\n\n assert h.pop('abC') == 3\n assert len(h) == 2\n\n assert h.setdefault('def', 3) == 2\n assert len(h) == 2\n assert h.setdefault('aBc', 1) == 1\n assert len(h) == 3\n assert list(h) == ['GEH', 'DEF', 'ABC']\n\n h.update({'GeH': 1, 'iJk': 4})\n assert len(h) == 4\n assert list(h) == ['GEH', 'DEF', 'ABC', 'IJK']\n assert h['GEH'] == 1\n\n assert h.count('ijk') == 1\n assert h.index('ijk') == 3\n\n h.remove('Def')\n assert len(h) == 3\n assert list(h) == ['GEH', 'ABC', 'IJK']\n\n def test_end_in_comment(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/142\n\n Tests a case where the comment of a card ends with END, and is followed\n by several blank cards.\n \"\"\"\n\n data = np.arange(100).reshape(10, 10)\n hdu = fits.PrimaryHDU(data=data)\n hdu.header['TESTKW'] = ('Test val', 'This is the END')\n # Add a couple blanks after the END string\n hdu.header.append()\n hdu.header.append()\n hdu.writeto(self.temp('test.fits'))\n\n with fits.open(self.temp('test.fits'), memmap=False) as hdul:\n # memmap = False to avoid leaving open a mmap to the file when we\n # access the data--this causes problems on Windows when we try to\n # overwrite the file later\n assert 'TESTKW' in hdul[0].header\n assert hdul[0].header == hdu.header\n assert (hdul[0].data == data).all()\n\n # Add blanks until the header is extended to two block sizes\n while len(hdu.header) < 36:\n hdu.header.append()\n hdu.writeto(self.temp('test.fits'), overwrite=True)\n\n with fits.open(self.temp('test.fits')) as hdul:\n assert 'TESTKW' in hdul[0].header\n assert hdul[0].header == hdu.header\n assert (hdul[0].data == data).all()\n\n # Test parsing the same header when it's written to a text file\n hdu.header.totextfile(self.temp('test.hdr'))\n header2 = fits.Header.fromtextfile(self.temp('test.hdr'))\n assert hdu.header == header2\n\n def test_assign_unicode(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/134\n\n Assigning a unicode literal as a header value should not fail silently.\n If the value can be converted to ASCII then it should just work.\n Otherwise it should fail with an appropriate value error.\n\n Also tests unicode for keywords and comments.\n \"\"\"\n\n erikku = '\\u30a8\\u30ea\\u30c3\\u30af'\n\n def assign(keyword, val):\n h[keyword] = val\n\n h = fits.Header()\n h['FOO'] = 'BAR'\n assert 'FOO' in h\n assert h['FOO'] == 'BAR'\n assert repr(h) == _pad(\"FOO = 'BAR '\")\n pytest.raises(ValueError, assign, erikku, 'BAR')\n\n h['FOO'] = 'BAZ'\n assert h['FOO'] == 'BAZ'\n assert repr(h) == _pad(\"FOO = 'BAZ '\")\n pytest.raises(ValueError, assign, 'FOO', erikku)\n\n h['FOO'] = ('BAR', 'BAZ')\n assert h['FOO'] == 'BAR'\n assert h.comments['FOO'] == 'BAZ'\n assert repr(h) == _pad(\"FOO = 'BAR ' / BAZ\")\n\n pytest.raises(ValueError, assign, 'FOO', ('BAR', erikku))\n pytest.raises(ValueError, assign, 'FOO', (erikku, 'BAZ'))\n pytest.raises(ValueError, assign, 'FOO', (erikku, erikku))\n\n def test_assign_non_ascii(self):\n \"\"\"\n First regression test for\n https://github.com/spacetelescope/PyFITS/issues/37\n\n Although test_assign_unicode ensures that `str` objects containing\n non-ASCII characters cannot be assigned to headers.\n\n It should not be possible to assign bytes to a header at all.\n \"\"\"\n\n h = fits.Header()\n with pytest.raises(ValueError, match=\"Illegal value: b'Hello'.\"):\n h.set('TEST', b'Hello')\n\n def test_header_strip_whitespace(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/146, and\n for the solution that is optional stripping of whitespace from the end\n of a header value.\n\n By default extra whitespace is stripped off, but if\n `fits.conf.strip_header_whitespace` = False it should not be\n stripped.\n \"\"\"\n\n h = fits.Header()\n h['FOO'] = 'Bar '\n assert h['FOO'] == 'Bar'\n c = fits.Card.fromstring(\"QUX = 'Bar '\")\n h.append(c)\n assert h['QUX'] == 'Bar'\n assert h.cards['FOO'].image.rstrip() == \"FOO = 'Bar '\"\n assert h.cards['QUX'].image.rstrip() == \"QUX = 'Bar '\"\n\n with fits.conf.set_temp('strip_header_whitespace', False):\n assert h['FOO'] == 'Bar '\n assert h['QUX'] == 'Bar '\n assert h.cards['FOO'].image.rstrip() == \"FOO = 'Bar '\"\n assert h.cards['QUX'].image.rstrip() == \"QUX = 'Bar '\"\n\n assert h['FOO'] == 'Bar'\n assert h['QUX'] == 'Bar'\n assert h.cards['FOO'].image.rstrip() == \"FOO = 'Bar '\"\n assert h.cards['QUX'].image.rstrip() == \"QUX = 'Bar '\"\n\n def test_keep_duplicate_history_in_orig_header(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/156\n\n When creating a new HDU from an existing Header read from an existing\n FITS file, if the origianl header contains duplicate HISTORY values\n those duplicates should be preserved just as in the original header.\n\n This bug occurred due to naivete in Header.extend.\n \"\"\"\n\n history = ['CCD parameters table ...',\n ' reference table oref$n951041ko_ccd.fits',\n ' INFLIGHT 12/07/2001 25/02/2002',\n ' all bias frames'] * 3\n\n hdu = fits.PrimaryHDU()\n # Add the history entries twice\n for item in history:\n hdu.header['HISTORY'] = item\n\n hdu.writeto(self.temp('test.fits'))\n\n with fits.open(self.temp('test.fits')) as hdul:\n assert hdul[0].header['HISTORY'] == history\n\n new_hdu = fits.PrimaryHDU(header=hdu.header)\n assert new_hdu.header['HISTORY'] == hdu.header['HISTORY']\n new_hdu.writeto(self.temp('test2.fits'))\n\n with fits.open(self.temp('test2.fits')) as hdul:\n assert hdul[0].header['HISTORY'] == history\n\n def test_invalid_keyword_cards(self):\n \"\"\"\n Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/109\n\n Allow opening files with headers containing invalid keywords.\n \"\"\"\n\n # Create a header containing a few different types of BAD headers.\n c1 = fits.Card.fromstring('CLFIND2D: contour = 0.30')\n c2 = fits.Card.fromstring('Just some random text.')\n c3 = fits.Card.fromstring('A' * 80)\n\n hdu = fits.PrimaryHDU()\n # This should work with some warnings\n with pytest.warns(AstropyUserWarning) as w:\n hdu.header.append(c1)\n hdu.header.append(c2)\n hdu.header.append(c3)\n assert len(w) == 3\n\n hdu.writeto(self.temp('test.fits'))\n\n with pytest.warns(AstropyUserWarning) as w:\n with fits.open(self.temp('test.fits')) as hdul:\n # Merely opening the file should blast some warnings about the\n # invalid keywords\n assert len(w) == 3\n\n header = hdul[0].header\n assert 'CLFIND2D' in header\n assert 'Just som' in header\n assert 'AAAAAAAA' in header\n\n assert header['CLFIND2D'] == ': contour = 0.30'\n assert header['Just som'] == 'e random text.'\n assert header['AAAAAAAA'] == 'A' * 72\n\n # It should not be possible to assign to the invalid keywords\n pytest.raises(ValueError, header.set, 'CLFIND2D', 'foo')\n pytest.raises(ValueError, header.set, 'Just som', 'foo')\n pytest.raises(ValueError, header.set, 'AAAAAAAA', 'foo')\n\n def test_fix_hierarch_with_invalid_value(self, capsys):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/172\n\n Ensures that when fixing a hierarch card it remains a hierarch card.\n \"\"\"\n\n c = fits.Card.fromstring('HIERARCH ESO DET CHIP PXSPACE = 5e6')\n with pytest.warns(fits.verify.VerifyWarning,\n match=r'Verification reported errors'):\n c.verify('fix')\n assert str(c) == _pad('HIERARCH ESO DET CHIP PXSPACE = 5E6')\n\n def test_assign_inf_nan(self):\n \"\"\"\n Regression test for https://github.com/spacetelescope/PyFITS/issues/11\n\n For the time being it should not be possible to assign the floating\n point values inf or nan to a header value, since this is not defined by\n the FITS standard.\n \"\"\"\n\n h = fits.Header()\n pytest.raises(ValueError, h.set, 'TEST', float('nan'))\n pytest.raises(ValueError, h.set, 'TEST', np.nan)\n pytest.raises(ValueError, h.set, 'TEST', float('inf'))\n pytest.raises(ValueError, h.set, 'TEST', np.inf)\n\n def test_update_bool(self):\n \"\"\"\n Regression test for an issue where a value of True in a header\n cannot be updated to a value of 1, and likewise for False/0.\n \"\"\"\n\n h = fits.Header([('TEST', True)])\n h['TEST'] = 1\n assert h['TEST'] is not True\n assert isinstance(h['TEST'], int)\n assert h['TEST'] == 1\n\n h['TEST'] = np.bool_(True)\n assert h['TEST'] is True\n\n h['TEST'] = False\n assert h['TEST'] is False\n h['TEST'] = np.bool_(False)\n assert h['TEST'] is False\n\n h['TEST'] = 0\n assert h['TEST'] is not False\n assert isinstance(h['TEST'], int)\n assert h['TEST'] == 0\n\n h['TEST'] = np.bool_(False)\n assert h['TEST'] is False\n\n def test_update_numeric(self):\n \"\"\"\n Regression test for https://github.com/spacetelescope/PyFITS/issues/49\n\n Ensure that numeric values can be upcast/downcast between int, float,\n and complex by assigning values that compare equal to the existing\n value but are a different type.\n \"\"\"\n\n h = fits.Header()\n h['TEST'] = 1\n\n # int -> float\n h['TEST'] = 1.0\n assert isinstance(h['TEST'], float)\n assert str(h).startswith('TEST = 1.0')\n\n # float -> int\n h['TEST'] = 1\n assert isinstance(h['TEST'], int)\n assert str(h).startswith('TEST = 1')\n\n # int -> complex\n h['TEST'] = 1.0+0.0j\n assert isinstance(h['TEST'], complex)\n assert str(h).startswith('TEST = (1.0, 0.0)')\n\n # complex -> float\n h['TEST'] = 1.0\n assert isinstance(h['TEST'], float)\n assert str(h).startswith('TEST = 1.0')\n\n # float -> complex\n h['TEST'] = 1.0+0.0j\n assert isinstance(h['TEST'], complex)\n assert str(h).startswith('TEST = (1.0, 0.0)')\n\n # complex -> int\n h['TEST'] = 1\n assert isinstance(h['TEST'], int)\n assert str(h).startswith('TEST = 1')\n\n # Now the same tests but with zeros\n h['TEST'] = 0\n\n # int -> float\n h['TEST'] = 0.0\n assert isinstance(h['TEST'], float)\n assert str(h).startswith('TEST = 0.0')\n\n # float -> int\n h['TEST'] = 0\n assert isinstance(h['TEST'], int)\n assert str(h).startswith('TEST = 0')\n\n # int -> complex\n h['TEST'] = 0.0+0.0j\n assert isinstance(h['TEST'], complex)\n assert str(h).startswith('TEST = (0.0, 0.0)')\n\n # complex -> float\n h['TEST'] = 0.0\n assert isinstance(h['TEST'], float)\n assert str(h).startswith('TEST = 0.0')\n\n # float -> complex\n h['TEST'] = 0.0+0.0j\n assert isinstance(h['TEST'], complex)\n assert str(h).startswith('TEST = (0.0, 0.0)')\n\n # complex -> int\n h['TEST'] = 0\n assert isinstance(h['TEST'], int)\n assert str(h).startswith('TEST = 0')\n\n def test_newlines_in_commentary(self):\n \"\"\"\n Regression test for https://github.com/spacetelescope/PyFITS/issues/51\n\n Test data extracted from a header in an actual FITS file found in the\n wild. Names have been changed to protect the innocent.\n \"\"\"\n\n # First ensure that we can't assign new keyword values with newlines in\n # them\n h = fits.Header()\n pytest.raises(ValueError, h.set, 'HISTORY', '\\n')\n pytest.raises(ValueError, h.set, 'HISTORY', '\\nabc')\n pytest.raises(ValueError, h.set, 'HISTORY', 'abc\\n')\n pytest.raises(ValueError, h.set, 'HISTORY', 'abc\\ndef')\n\n test_cards = [\n \"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18 \"\n \"HISTORY File modified by user ' fred' with fv on 2013-04-23T11:16:29 \"\n \"HISTORY File modified by user ' fred' with fv on 2013-11-04T16:59:14 \"\n \"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\\nFile modif\"\n \"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\\nFile modified by use\"\n \"HISTORY r ' fred' with fv on 2013-11-04T16:59:14 \"\n \"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\\nFile modif\"\n \"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\\nFile modified by use\"\n \"HISTORY r ' fred' with fv on 2013-11-04T16:59:14\\nFile modified by user 'wilma' \"\n \"HISTORY with fv on 2013-04-22T21:42:18\\nFile modif\\nied by user 'wilma' with fv \"\n \"HISTORY on 2013-04-23T11:16:29\\nFile modified by use\\nr ' fred' with fv on 2013-1\"\n \"HISTORY 1-04T16:59:14 \"\n ]\n\n for card_image in test_cards:\n c = fits.Card.fromstring(card_image)\n\n if '\\n' in card_image:\n pytest.raises(fits.VerifyError, c.verify, 'exception')\n else:\n c.verify('exception')\n\n def test_long_commentary_card_appended_to_header(self):\n \"\"\"\n If a HISTORY or COMMENT card with a too-long value is appended to a\n header with Header.append (as opposed to assigning to hdr['HISTORY']\n it fails verification.\n\n Regression test for https://github.com/astropy/astropy/issues/11486\n \"\"\"\n\n header = fits.Header()\n value = 'abc' * 90\n # this is what Table does when saving its history metadata key to a\n # FITS file\n header.append(('history', value))\n assert len(header.cards) == 1\n\n # Test Card._split() directly since this was the main problem area\n key, val = header.cards[0]._split()\n assert key == 'HISTORY' and val == value\n\n # Try writing adding this header to an HDU and writing it to a file\n hdu = fits.PrimaryHDU(header=header)\n hdu.writeto(self.temp('test.fits'), overwrite=True)\n\n def test_header_fromstring_bytes(self):\n \"\"\"\n Test reading a Header from a `bytes` string.\n\n See https://github.com/astropy/astropy/issues/8706\n \"\"\"\n\n with open(self.data('test0.fits'), 'rb') as fobj:\n pri_hdr_from_bytes = fits.Header.fromstring(fobj.read())\n\n pri_hdr = fits.getheader(self.data('test0.fits'))\n assert pri_hdr['NAXIS'] == pri_hdr_from_bytes['NAXIS']\n assert pri_hdr == pri_hdr_from_bytes\n assert pri_hdr.tostring() == pri_hdr_from_bytes.tostring()\n\n def test_set_keyword_with_space(self):\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/10479\n \"\"\"\n hdr = fits.Header()\n hdr['KEY2 '] = 2\n hdr['KEY2 '] = 4\n assert len(hdr) == 1\n assert hdr['KEY2'] == 4\n assert hdr['KEY2 '] == 4\n\n def test_strip(self):\n hdr = fits.getheader(self.data('tb.fits'), ext=1)\n hdr['FOO'] = 'bar'\n hdr.strip()\n assert set(hdr) == {'HISTORY', 'FOO'}\n\n hdr = fits.getheader(self.data('tb.fits'), ext=1)\n hdr['FOO'] = 'bar'\n hdr = hdr.copy(strip=True)\n assert set(hdr) == {'HISTORY', 'FOO'}\n\n def test_update_invalid_card(self):\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/5408\n\n Tests updating the value of a card that is malformatted (with an\n invalid value literal).\n\n This tests two ways of reproducing the problem, one working with a\n Card object directly, and one when reading/writing a header containing\n such an invalid card.\n \"\"\"\n\n card = fits.Card.fromstring('KW = INF / Comment')\n card.value = 'FIXED'\n assert tuple(card) == ('KW', 'FIXED', 'Comment')\n card.verify('fix')\n assert tuple(card) == ('KW', 'FIXED', 'Comment')\n\n card = fits.Card.fromstring('KW = INF')\n hdu = fits.PrimaryHDU()\n # This is a loophole to write a header containing a malformatted card\n card._verified = True\n hdu.header.append(card)\n hdu.header.tofile(self.temp('bogus.fits'))\n\n with fits.open(self.temp('bogus.fits')) as hdul:\n hdul[0].header['KW'] = -1\n hdul.writeto(self.temp('bogus_fixed.fits'))\n\n with fits.open(self.temp('bogus_fixed.fits')) as hdul:\n assert hdul[0].header['KW'] == -1\n\n def test_index_numpy_int(self):\n header = fits.Header([('A', 'FOO'), ('B', 2), ('C', 'BAR')])\n idx = np.int8(2)\n assert header[idx] == 'BAR'\n\n header[idx] = 'BAZ'\n assert header[idx] == 'BAZ'\n\n header.insert(idx, ('D', 42))\n assert header[idx] == 42\n\n header.add_comment('HELLO')\n header.add_comment('WORLD')\n assert header['COMMENT'][np.int64(1)] == 'WORLD'\n\n header.append(('C', 'BAZBAZ'))\n assert header[('C', np.int16(0))] == 'BAZ'\n assert header[('C', np.uint32(1))] == 'BAZBAZ'\n\n\nclass TestRecordValuedKeywordCards(FitsTestCase):\n \"\"\"\n Tests for handling of record-valued keyword cards as used by the\n `FITS WCS distortion paper\n <https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.\n\n These tests are derived primarily from the release notes for PyFITS 1.4 (in\n which this feature was first introduced.\n Note that extra leading spaces in the `value` fields should be parsed on input,\n but will be stripped in the cards.\n \"\"\"\n\n def setup(self):\n super().setup()\n self._test_header = fits.Header()\n self._test_header.set('DP1', 'NAXIS: 2')\n self._test_header.set('DP1', 'AXIS.1: 1')\n self._test_header.set('DP1', 'AXIS.2: 2')\n self._test_header.set('DP1', 'NAUX: 2')\n self._test_header.set('DP1', 'AUX.1.COEFF.0: 0')\n self._test_header.set('DP1', 'AUX.1.POWER.0: 1')\n self._test_header.set('DP1', 'AUX.1.COEFF.1: 0.00048828125')\n self._test_header.set('DP1', 'AUX.1.POWER.1: 1')\n\n def test_initialize_rvkc(self):\n \"\"\"\n Test different methods for initializing a card that should be\n recognized as a RVKC\n \"\"\"\n\n c = fits.Card.fromstring(\"DP1 = 'NAXIS: 2' / A comment\")\n assert c.keyword == 'DP1.NAXIS'\n assert c.value == 2.0\n assert c.field_specifier == 'NAXIS'\n assert c.comment == 'A comment'\n\n c = fits.Card.fromstring(\"DP1 = 'NAXIS: 2.1'\")\n assert c.keyword == 'DP1.NAXIS'\n assert c.value == 2.1\n assert c.field_specifier == 'NAXIS'\n\n c = fits.Card.fromstring(\"DP1 = 'NAXIS: a'\")\n assert c.keyword == 'DP1'\n assert c.value == 'NAXIS: a'\n assert c.field_specifier is None\n\n c = fits.Card('DP1', 'NAXIS: 2')\n assert c.keyword == 'DP1.NAXIS'\n assert c.value == 2.0\n assert c.field_specifier == 'NAXIS'\n\n c = fits.Card('DP1', 'NAXIS: 2.0')\n assert c.keyword == 'DP1.NAXIS'\n assert c.value == 2.0\n assert c.field_specifier == 'NAXIS'\n\n c = fits.Card('DP1', 'NAXIS: a')\n assert c.keyword == 'DP1'\n assert c.value == 'NAXIS: a'\n assert c.field_specifier is None\n\n c = fits.Card('DP1.NAXIS', 2)\n assert c.keyword == 'DP1.NAXIS'\n assert c.value == 2.0\n assert c.field_specifier == 'NAXIS'\n\n c = fits.Card('DP1.NAXIS', 2.0)\n assert c.keyword == 'DP1.NAXIS'\n assert c.value == 2.0\n assert c.field_specifier == 'NAXIS'\n\n with pytest.warns(fits.verify.VerifyWarning):\n c = fits.Card('DP1.NAXIS', 'a')\n assert c.keyword == 'DP1.NAXIS'\n assert c.value == 'a'\n assert c.field_specifier is None\n\n def test_parse_field_specifier(self):\n \"\"\"\n Tests that the field_specifier can accessed from a card read from a\n string before any other attributes are accessed.\n \"\"\"\n\n c = fits.Card.fromstring(\"DP1 = 'NAXIS: 2' / A comment\")\n assert c.field_specifier == 'NAXIS'\n assert c.keyword == 'DP1.NAXIS'\n assert c.value == 2.0\n assert c.comment == 'A comment'\n\n def test_update_field_specifier(self):\n \"\"\"\n Test setting the field_specifier attribute and updating the card image\n to reflect the new value.\n \"\"\"\n\n c = fits.Card.fromstring(\"DP1 = 'NAXIS: 2' / A comment\")\n assert c.field_specifier == 'NAXIS'\n c.field_specifier = 'NAXIS1'\n assert c.field_specifier == 'NAXIS1'\n assert c.keyword == 'DP1.NAXIS1'\n assert c.value == 2.0\n assert c.comment == 'A comment'\n assert str(c).rstrip() == \"DP1 = 'NAXIS1: 2' / A comment\"\n\n def test_field_specifier_case_senstivity(self):\n \"\"\"\n The keyword portion of an RVKC should still be case-insensitive, but\n the field-specifier portion should be case-sensitive.\n \"\"\"\n\n header = fits.Header()\n header.set('abc.def', 1)\n header.set('abc.DEF', 2)\n assert header['abc.def'] == 1\n assert header['ABC.def'] == 1\n assert header['aBc.def'] == 1\n assert header['ABC.DEF'] == 2\n assert 'ABC.dEf' not in header\n\n def test_get_rvkc_by_index(self):\n \"\"\"\n Returning a RVKC from a header via index lookup should return the\n float value of the card.\n \"\"\"\n\n assert self._test_header[0] == 2.0\n assert isinstance(self._test_header[0], float)\n assert self._test_header[1] == 1.0\n assert isinstance(self._test_header[1], float)\n\n def test_get_rvkc_by_keyword(self):\n \"\"\"\n Returning a RVKC just via the keyword name should return the full value\n string of the first card with that keyword.\n\n This test was changed to reflect the requirement in ticket\n https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184--previously it required\n _test_header['DP1'] to return the parsed float value.\n \"\"\"\n\n assert self._test_header['DP1'] == 'NAXIS: 2'\n\n def test_get_rvkc_by_keyword_and_field_specifier(self):\n \"\"\"\n Returning a RVKC via the full keyword/field-specifier combination\n should return the floating point value associated with the RVKC.\n \"\"\"\n\n assert self._test_header['DP1.NAXIS'] == 2.0\n assert isinstance(self._test_header['DP1.NAXIS'], float)\n assert self._test_header['DP1.AUX.1.COEFF.1'] == 0.00048828125\n\n def test_access_nonexistent_rvkc(self):\n \"\"\"\n Accessing a nonexistent RVKC should raise an IndexError for\n index-based lookup, or a KeyError for keyword lookup (like a normal\n card).\n \"\"\"\n\n pytest.raises(IndexError, lambda x: self._test_header[x], 8)\n # Test exception with message\n with pytest.raises(KeyError, match=r\"Keyword 'DP1\\.AXIS\\.3' not found.\"):\n self._test_header['DP1.AXIS.3']\n\n def test_update_rvkc(self):\n \"\"\"A RVKC can be updated either via index or keyword access.\"\"\"\n\n self._test_header[0] = 3\n assert self._test_header['DP1.NAXIS'] == 3.0\n assert isinstance(self._test_header['DP1.NAXIS'], float)\n\n self._test_header['DP1.AXIS.1'] = 1.1\n assert self._test_header['DP1.AXIS.1'] == 1.1\n\n def test_update_rvkc_2(self):\n \"\"\"Regression test for an issue that appeared after SVN r2412.\"\"\"\n\n h = fits.Header()\n h['D2IM1.EXTVER'] = 1\n assert h['D2IM1.EXTVER'] == 1.0\n h['D2IM1.EXTVER'] = 2\n assert h['D2IM1.EXTVER'] == 2.0\n\n def test_raw_keyword_value(self):\n c = fits.Card.fromstring(\"DP1 = 'NAXIS: 2' / A comment\")\n assert c.rawkeyword == 'DP1'\n assert c.rawvalue == 'NAXIS: 2'\n\n c = fits.Card('DP1.NAXIS', 2)\n assert c.rawkeyword == 'DP1'\n assert c.rawvalue == 'NAXIS: 2.0'\n\n c = fits.Card('DP1.NAXIS', 2.0)\n assert c.rawkeyword == 'DP1'\n assert c.rawvalue == 'NAXIS: 2.0'\n\n def test_rvkc_insert_after(self):\n \"\"\"\n It should be possible to insert a new RVKC after an existing one\n specified by the full keyword/field-specifier combination.\"\"\"\n\n self._test_header.set('DP1', 'AXIS.3: 1', 'a comment',\n after='DP1.AXIS.2')\n assert self._test_header[3] == 1\n assert self._test_header['DP1.AXIS.3'] == 1\n\n def test_rvkc_delete(self):\n \"\"\"\n Deleting a RVKC should work as with a normal card by using the full\n keyword/field-spcifier combination.\n \"\"\"\n\n del self._test_header['DP1.AXIS.1']\n assert len(self._test_header) == 7\n assert list(self._test_header)[0] == 'DP1.NAXIS'\n assert self._test_header[0] == 2\n assert list(self._test_header)[1] == 'DP1.AXIS.2'\n\n # Perform a subsequent delete to make sure all the index mappings were\n # updated\n del self._test_header['DP1.AXIS.2']\n assert len(self._test_header) == 6\n assert list(self._test_header)[0] == 'DP1.NAXIS'\n assert self._test_header[0] == 2\n assert list(self._test_header)[1] == 'DP1.NAUX'\n assert self._test_header[1] == 2\n\n def test_pattern_matching_keys(self):\n \"\"\"Test the keyword filter strings with RVKCs.\"\"\"\n\n cl = self._test_header['DP1.AXIS.*']\n assert isinstance(cl, fits.Header)\n assert ([str(c).strip() for c in cl.cards] ==\n [\"DP1 = 'AXIS.1: 1'\",\n \"DP1 = 'AXIS.2: 2'\"])\n\n cl = self._test_header['DP1.N*']\n assert ([str(c).strip() for c in cl.cards] ==\n [\"DP1 = 'NAXIS: 2'\",\n \"DP1 = 'NAUX: 2'\"])\n\n cl = self._test_header['DP1.AUX...']\n assert ([str(c).strip() for c in cl.cards] ==\n [\"DP1 = 'AUX.1.COEFF.0: 0'\",\n \"DP1 = 'AUX.1.POWER.0: 1'\",\n \"DP1 = 'AUX.1.COEFF.1: 0.00048828125'\",\n \"DP1 = 'AUX.1.POWER.1: 1'\"])\n\n cl = self._test_header['DP?.NAXIS']\n assert ([str(c).strip() for c in cl.cards] ==\n [\"DP1 = 'NAXIS: 2'\"])\n\n cl = self._test_header['DP1.A*S.*']\n assert ([str(c).strip() for c in cl.cards] ==\n [\"DP1 = 'AXIS.1: 1'\",\n \"DP1 = 'AXIS.2: 2'\"])\n\n def test_pattern_matching_key_deletion(self):\n \"\"\"Deletion by filter strings should work.\"\"\"\n\n del self._test_header['DP1.A*...']\n assert len(self._test_header) == 2\n assert list(self._test_header)[0] == 'DP1.NAXIS'\n assert self._test_header[0] == 2\n assert list(self._test_header)[1] == 'DP1.NAUX'\n assert self._test_header[1] == 2\n\n def test_successive_pattern_matching(self):\n \"\"\"\n A card list returned via a filter string should be further filterable.\n \"\"\"\n\n cl = self._test_header['DP1.A*...']\n assert ([str(c).strip() for c in cl.cards] ==\n [\"DP1 = 'AXIS.1: 1'\",\n \"DP1 = 'AXIS.2: 2'\",\n \"DP1 = 'AUX.1.COEFF.0: 0'\",\n \"DP1 = 'AUX.1.POWER.0: 1'\",\n \"DP1 = 'AUX.1.COEFF.1: 0.00048828125'\",\n \"DP1 = 'AUX.1.POWER.1: 1'\"])\n\n cl2 = cl['*.*AUX...']\n assert ([str(c).strip() for c in cl2.cards] ==\n [\"DP1 = 'AUX.1.COEFF.0: 0'\",\n \"DP1 = 'AUX.1.POWER.0: 1'\",\n \"DP1 = 'AUX.1.COEFF.1: 0.00048828125'\",\n \"DP1 = 'AUX.1.POWER.1: 1'\"])\n\n def test_rvkc_in_cardlist_keys(self):\n \"\"\"\n The CardList.keys() method should return full keyword/field-spec values\n for RVKCs.\n \"\"\"\n\n cl = self._test_header['DP1.AXIS.*']\n assert list(cl) == ['DP1.AXIS.1', 'DP1.AXIS.2']\n\n def test_rvkc_in_cardlist_values(self):\n \"\"\"\n The CardList.values() method should return the values of all RVKCs as\n floating point values.\n \"\"\"\n\n cl = self._test_header['DP1.AXIS.*']\n assert list(cl.values()) == [1.0, 2.0]\n\n def test_rvkc_value_attribute(self):\n \"\"\"\n Individual card values should be accessible by the .value attribute\n (which should return a float).\n \"\"\"\n\n cl = self._test_header['DP1.AXIS.*']\n assert cl.cards[0].value == 1.0\n assert isinstance(cl.cards[0].value, float)\n\n def test_overly_permissive_parsing(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/183\n\n Ensures that cards with standard commentary keywords are never treated\n as RVKCs. Also ensures that cards not stricly matching the RVKC\n pattern are not treated as such.\n \"\"\"\n\n h = fits.Header()\n h['HISTORY'] = 'AXIS.1: 2'\n h['HISTORY'] = 'AXIS.2: 2'\n assert 'HISTORY.AXIS' not in h\n assert 'HISTORY.AXIS.1' not in h\n assert 'HISTORY.AXIS.2' not in h\n assert h['HISTORY'] == ['AXIS.1: 2', 'AXIS.2: 2']\n\n # This is an example straight out of the ticket where everything after\n # the '2012' in the date value was being ignored, allowing the value to\n # successfully be parsed as a \"float\"\n h = fits.Header()\n h['HISTORY'] = 'Date: 2012-09-19T13:58:53.756061'\n assert 'HISTORY.Date' not in h\n assert str(h.cards[0]) == _pad('HISTORY Date: 2012-09-19T13:58:53.756061')\n\n c = fits.Card.fromstring(\n \" 'Date: 2012-09-19T13:58:53.756061'\")\n assert c.keyword == ''\n assert c.value == \"'Date: 2012-09-19T13:58:53.756061'\"\n assert c.field_specifier is None\n\n h = fits.Header()\n h['FOO'] = 'Date: 2012-09-19T13:58:53.756061'\n assert 'FOO.Date' not in h\n assert (str(h.cards[0]) ==\n _pad(\"FOO = 'Date: 2012-09-19T13:58:53.756061'\"))\n\n def test_overly_aggressive_rvkc_lookup(self):\n \"\"\"\n Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184\n\n Ensures that looking up a RVKC by keyword only (without the\n field-specifier) in a header returns the full string value of that card\n without parsing it as a RVKC. Also ensures that a full field-specifier\n is required to match a RVKC--a partial field-specifier that doesn't\n explicitly match any record-valued keyword should result in a KeyError.\n \"\"\"\n\n c1 = fits.Card.fromstring(\"FOO = 'AXIS.1: 2'\")\n c2 = fits.Card.fromstring(\"FOO = 'AXIS.2: 4'\")\n h = fits.Header([c1, c2])\n assert h['FOO'] == 'AXIS.1: 2'\n assert h[('FOO', 1)] == 'AXIS.2: 4'\n assert h['FOO.AXIS.1'] == 2.0\n assert h['FOO.AXIS.2'] == 4.0\n assert 'FOO.AXIS' not in h\n assert 'FOO.AXIS.' not in h\n assert 'FOO.' not in h\n pytest.raises(KeyError, lambda: h['FOO.AXIS'])\n pytest.raises(KeyError, lambda: h['FOO.AXIS.'])\n pytest.raises(KeyError, lambda: h['FOO.'])\n\n def test_fitsheader_script(self):\n \"\"\"Tests the basic functionality of the `fitsheader` script.\"\"\"\n from astropy.io.fits.scripts import fitsheader\n\n # Can an extension by specified by the EXTNAME keyword?\n hf = fitsheader.HeaderFormatter(self.data('zerowidth.fits'))\n output = hf.parse(extensions=['AIPS FQ'])\n assert \"EXTNAME = 'AIPS FQ\" in output\n assert \"BITPIX\" in output\n\n # Can we limit the display to one specific keyword?\n output = hf.parse(extensions=['AIPS FQ'], keywords=['EXTNAME'])\n assert \"EXTNAME = 'AIPS FQ\" in output\n assert \"BITPIX =\" not in output\n assert len(output.split('\\n')) == 3\n\n # Can we limit the display to two specific keywords?\n output = hf.parse(extensions=[1],\n keywords=['EXTNAME', 'BITPIX'])\n assert \"EXTNAME =\" in output\n assert \"BITPIX =\" in output\n assert len(output.split('\\n')) == 4\n\n # Can we use wildcards for keywords?\n output = hf.parse(extensions=[1], keywords=['NAXIS*'])\n assert \"NAXIS =\" in output\n assert \"NAXIS1 =\" in output\n assert \"NAXIS2 =\" in output\n hf.close()\n\n # Can an extension by specified by the EXTNAME+EXTVER keywords?\n hf = fitsheader.HeaderFormatter(self.data('test0.fits'))\n assert \"EXTNAME = 'SCI\" in hf.parse(extensions=['SCI,2'])\n hf.close()\n\n # Can we print the original header before decompression?\n hf = fitsheader.HeaderFormatter(self.data('comp.fits'))\n assert \"XTENSION= 'IMAGE\" in hf.parse(extensions=[1],\n compressed=False)\n assert \"XTENSION= 'BINTABLE\" in hf.parse(extensions=[1],\n compressed=True)\n hf.close()\n\n def test_fitsheader_table_feature(self):\n \"\"\"Tests the `--table` feature of the `fitsheader` script.\"\"\"\n from astropy.io import fits\n from astropy.io.fits.scripts import fitsheader\n test_filename = self.data('zerowidth.fits')\n\n formatter = fitsheader.TableHeaderFormatter(test_filename)\n\n with fits.open(test_filename) as fitsobj:\n # Does the table contain the expected number of rows?\n mytable = formatter.parse([0])\n assert len(mytable) == len(fitsobj[0].header)\n # Repeat the above test when multiple HDUs are requested\n mytable = formatter.parse(extensions=['AIPS FQ', 2, \"4\"])\n assert len(mytable) == (len(fitsobj['AIPS FQ'].header)\n + len(fitsobj[2].header)\n + len(fitsobj[4].header))\n\n # Can we recover the filename and extension name from the table?\n mytable = formatter.parse(extensions=['AIPS FQ'])\n assert np.all(mytable['filename'] == test_filename)\n assert np.all(mytable['hdu'] == 'AIPS FQ')\n assert mytable['value'][mytable['keyword'] == \"EXTNAME\"] == \"AIPS FQ\"\n\n # Can we specify a single extension/keyword?\n mytable = formatter.parse(extensions=['AIPS FQ'],\n keywords=['EXTNAME'])\n assert len(mytable) == 1\n assert mytable['hdu'][0] == \"AIPS FQ\"\n assert mytable['keyword'][0] == \"EXTNAME\"\n assert mytable['value'][0] == \"AIPS FQ\"\n\n # Is an incorrect extension dealt with gracefully?\n mytable = formatter.parse(extensions=['DOES_NOT_EXIST'])\n assert mytable is None\n # Is an incorrect keyword dealt with gracefully?\n mytable = formatter.parse(extensions=['AIPS FQ'],\n keywords=['DOES_NOT_EXIST'])\n assert mytable is None\n formatter.close()\n\n @pytest.mark.parametrize('mode', ['wb', 'wb+', 'ab', 'ab+'])\n def test_hdu_writeto_mode(self, mode):\n\n with open(self.temp('mode.fits'), mode=mode) as ff:\n hdu = fits.ImageHDU(data=np.ones(5))\n hdu.writeto(ff)\n\n\ndef test_subclass():\n \"\"\"Check that subclasses don't get ignored on slicing and copying.\"\"\"\n class MyHeader(fits.Header):\n def append(self, card, *args, **kwargs):\n if isinstance(card, tuple) and len(card) == 2:\n # Just for our checks we add a comment if there is none.\n card += ('no comment',)\n\n return super().append(card, *args, **kwargs)\n\n my_header = MyHeader((('a', 1., 'first'),\n ('b', 2., 'second'),\n ('c', 3.,)))\n\n assert my_header.comments['a'] == 'first'\n assert my_header.comments['b'] == 'second'\n assert my_header.comments['c'] == 'no comment'\n\n slice_ = my_header[1:]\n assert type(slice_) is MyHeader\n assert slice_.comments['b'] == 'second'\n assert slice_.comments['c'] == 'no comment'\n selection = my_header['c*']\n assert type(selection) is MyHeader\n assert selection.comments['c'] == 'no comment'\n copy_ = my_header.copy()\n assert type(copy_) is MyHeader\n assert copy_.comments['b'] == 'second'\n assert copy_.comments['c'] == 'no comment'\n my_header.extend((('d', 4.),))\n assert my_header.comments['d'] == 'no comment'\n" ]
[ [ "numpy.int8", "numpy.uint32", "numpy.ones", "numpy.bool_", "numpy.arange", "numpy.all", "numpy.int64", "numpy.int16" ] ]
isoundy000/FGJumperMaster
[ "10063f167fbba7d9e16375965f7320a3966169f6" ]
[ "FGVisonUtil.py" ]
[ "# -*- coding: utf-8 -*- \nimport cv2\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport math\n\nclass FGVisionUtil:\n '''\n 凡哥的机器视觉工具箱\n\n '''\n\n\n @staticmethod\n def cal_rgb_margin(img):\n '''\n 计算采样色块区域RGB 三通道的阈值\n '''\n # 计算色块的 \n\n (minB, maxB) = FGVisionUtil.cal_single_margin(img, 0)\n (minG, maxG) = FGVisionUtil.cal_single_margin(img, 1)\n (minR, maxR) = FGVisionUtil.cal_single_margin(img, 2)\n\n threshold_lower = np.int32([minB, minG, minR])\n threshold_upper = np.int32([maxB, maxG, maxR])\n \n return (threshold_lower, threshold_upper)\n \n @staticmethod\n def cal_single_margin(img, channel):\n '''\n 计算采样色块区域单个通道的阈值边界\n '''\n # 柱形统计\n hist = cv2.calcHist([img], [channel], None, [256], [0, 256])\n hist = hist.reshape((len(hist)))\n # 概率分布\n prob = hist / hist.sum()\n # 计算颜色累计分布\n prob_accum = np.zeros_like(prob)\n prob_accum[0] = prob[0]\n\n\n # 阈值下界确定状态\n lower_status = False\n # 阈值上界确定状态\n upper_status = False\n\n # 概率累计分布最小值\n\n lower_prob = 0.05\n # 概率累计分布最大值\n\n upper_prob = 0.95\n\n # 阈值下界值\n lower_value = 0\n # 阈值上界值\n upper_value = 0\n\n for i in range(1, len(prob)):\n prob_accum[i] = prob[i] + prob_accum[i-1]\n \n # 确定阈值下界\n if not lower_status and prob_accum[i] > lower_prob:\n lower_value = i\n lower_status = True\n # 确定阈值上界\n if not upper_status and prob_accum[i] > upper_prob:\n upper_value = i\n upper_status = True\n\n\n return (lower_value, upper_value)\n \n @staticmethod\n def draw_gray_hist(img):\n '''\n 绘制灰度图的直方图\n '''\n # 样例图片\n plt.hist(img.ravel(), bins=256, range=[0, 256])\n plt.show()\n\n @staticmethod\n def draw_rgb_hist(img):\n '''\n 绘制RGB彩图的直方图\n '''\n # 获取顶部长条(只有背景图片)\n color = ('b', 'g', 'r') \n for i, col in enumerate(color): \n histr = cv2.calcHist([img], [i], None, [256], [0, 256])\n plt.plot(histr, color=col) \n plt.xlim([0, 256]) \n plt.show() \n\n @staticmethod\n def justify_rgb_value(color):\n '''\n 根据RGB的取值范围重新调整 RGB的值\n '''\n MIN_RGB_VALUE = 0\n MAX_RGB_VALUE = 255\n \n ncolor = np.copy(color)\n for channel in range(3):\n if ncolor[channel] < MIN_RGB_VALUE:\n ncolor[channel] = MIN_RGB_VALUE\n elif ncolor[channel] > MAX_RGB_VALUE:\n ncolor[channel] = MAX_RGB_VALUE\n return ncolor\n \n @staticmethod\n def contours_filter(contours, minWidth=None, minHeight=None, minArea=None):\n '''\n contours筛选器\n ''' \n newCntList = []\n\n for cnt in contours:\n \n rect = cv2.minAreaRect(cnt) # 获取最小矩形区域\n area = cv2.contourArea(cnt) # 获取contour的面积\n\n width = rect[1][0]\n height = rect[1][1]\n\n if minWidth and width < minWidth:\n continue\n if minHeight and height < minHeight:\n continue\n if minArea and area < minArea:\n continue\n\n newCntList.append(cnt)\n return newCntList \n \n \n @staticmethod\n def isPointInRectangle(rect, pt):\n (px, py) = pt\n (x, y, w, h) = rect\n\n if px < x or px > x + w:\n return False\n elif py < y or px > y + h:\n return False\n\n return True\n \n @staticmethod\n def printImgInfo(img):\n print(\"================打印一下图像的属性================\")\n print(\"图像对象的类型 {}\".format(type(img)))\n print(img.shape)\n print(\"图像宽度: {} pixels\".format(img.shape[1]))\n print(\"图像高度: {} pixels\".format(img.shape[0]))\n print(\"通道: {}\".format(img.shape[2]))\n print(\"图像分辨率: {}\".format(img.size))\n print(\"数据类型: {}\".format(img.dtype))\n \n @staticmethod\n def cal_distance(pt1, pt2):\n '''\n 获取棋子与下一跳盒子的距离\n '''\n (x1, y1) = pt1\n (x2, y2) = pt2\n\n return math.sqrt(math.pow((x2 - x1), 2) + math.pow((y2 - y1), 2))" ]
[ [ "numpy.zeros_like", "matplotlib.pyplot.xlim", "numpy.copy", "matplotlib.pyplot.plot", "matplotlib.pyplot.show", "numpy.int32" ] ]
dimitar-petrov/manim
[ "a595f8cb8da242ae01f12d80327d98af6dcbd2c5" ]
[ "manim/mobject/types/image_mobject.py" ]
[ "\"\"\"Mobjects representing raster images.\"\"\"\n\n__all__ = [\"AbstractImageMobject\", \"ImageMobject\", \"ImageMobjectFromCamera\"]\n\nimport pathlib\n\nimport numpy as np\n\nfrom PIL import Image\n\nfrom ... import config\nfrom ...constants import *\nfrom ...mobject.mobject import Mobject\nfrom ...mobject.shape_matchers import SurroundingRectangle\nfrom ...utils.bezier import interpolate\nfrom ...utils.color import color_to_int_rgb, WHITE\nfrom ...utils.images import get_full_raster_image_path\nfrom manim.constants import QUALITIES, DEFAULT_QUALITY\n\n\nclass AbstractImageMobject(Mobject):\n \"\"\"\n Automatically filters out black pixels\n\n Parameters\n ----------\n scale_to_resolution : :class:`int`\n At this resolution the image is placed pixel by pixel onto the screen, so it will look the sharpest and best.\n This is a custom parameter of ImageMobject so that rendering a scene with e.g. the ``--quality low`` or ``--quality medium`` flag for faster rendering won't effect the position of the image on the screen.\n \"\"\"\n\n def __init__(self, scale_to_resolution, pixel_array_dtype=\"uint8\", **kwargs):\n self.pixel_array_dtype = pixel_array_dtype\n self.scale_to_resolution = scale_to_resolution\n Mobject.__init__(self, **kwargs)\n\n def get_pixel_array(self):\n raise NotImplementedError()\n\n def set_color(self):\n # Likely to be implemented in subclasses, but no obgligation\n pass\n\n def reset_points(self):\n # Corresponding corners of image are fixed to these 3 points\n self.points = np.array(\n [\n UP + LEFT,\n UP + RIGHT,\n DOWN + LEFT,\n ]\n )\n self.center()\n h, w = self.get_pixel_array().shape[:2]\n if self.scale_to_resolution:\n self.height = h / self.scale_to_resolution * config[\"frame_height\"]\n else:\n self.height = 3 ## this is the case for ImageMobjectFromCamera\n self.stretch_to_fit_height(self.height)\n self.stretch_to_fit_width(self.height * w / h)\n\n\nclass ImageMobject(AbstractImageMobject):\n \"\"\"Displays an Image from a numpy array or a file.\n\n Parameters\n ----------\n scale_to_resolution : :class:`int`\n At this resolution the image is placed pixel by pixel onto the screen, so it will look the sharpest and best.\n This is a custom parameter of ImageMobject so that rendering a scene with e.g. the ``--quality low`` or ``--quality medium`` flag for faster rendering won't effect the position of the image on the screen.\n\n\n Example\n -------\n .. manim:: ImageFromArray\n :save_last_frame:\n\n class ImageFromArray(Scene):\n def construct(self):\n image = ImageMobject(np.uint8([[0, 100, 30, 200],\n [255, 0, 5, 33]]))\n image.set_height(7)\n self.add(image)\n\n \"\"\"\n\n def __init__(\n self,\n filename_or_array,\n scale_to_resolution=QUALITIES[DEFAULT_QUALITY][\"pixel_height\"],\n invert=False,\n image_mode=\"RGBA\",\n **kwargs,\n ):\n self.invert = invert\n self.image_mode = image_mode\n if isinstance(filename_or_array, (str, pathlib.PurePath)):\n path = get_full_raster_image_path(filename_or_array)\n image = Image.open(path).convert(self.image_mode)\n self.pixel_array = np.array(image)\n else:\n self.pixel_array = np.array(filename_or_array)\n self.pixel_array_dtype = kwargs.get(\"pixel_array_dtype\", \"uint8\")\n self.change_to_rgba_array()\n if self.invert:\n self.pixel_array[:, :, :3] = 255 - self.pixel_array[:, :, :3]\n AbstractImageMobject.__init__(self, scale_to_resolution, **kwargs)\n\n def change_to_rgba_array(self):\n \"\"\"Converts an RGB array into RGBA with the alpha value opacity maxed.\"\"\"\n pa = self.pixel_array\n if len(pa.shape) == 2:\n pa = pa.reshape(list(pa.shape) + [1])\n if pa.shape[2] == 1:\n pa = pa.repeat(3, axis=2)\n if pa.shape[2] == 3:\n alphas = 255 * np.ones(\n list(pa.shape[:2]) + [1], dtype=self.pixel_array_dtype\n )\n pa = np.append(pa, alphas, axis=2)\n self.pixel_array = pa\n\n def get_pixel_array(self):\n \"\"\"A simple getter method.\"\"\"\n return self.pixel_array\n\n def set_color(self, color, alpha=None, family=True):\n rgb = color_to_int_rgb(color)\n self.pixel_array[:, :, :3] = rgb\n if alpha is not None:\n self.pixel_array[:, :, 3] = int(255 * alpha)\n for submob in self.submobjects:\n submob.set_color(color, alpha, family)\n self.color = color\n return self\n\n def set_opacity(self, alpha):\n \"\"\"Sets the image's opacity.\n\n Parameters\n ----------\n alpha : float\n The alpha value of the object, 1 being opaque and 0 being\n transparent.\n \"\"\"\n self.pixel_array[:, :, 3] = int(255 * alpha)\n return self\n\n def fade(self, darkness=0.5, family=True):\n \"\"\"Sets the image's opacity using a 1 - alpha relationship.\n\n Parameters\n ----------\n darkness : float\n The alpha value of the object, 1 being transparent and 0 being\n opaque.\n family : Boolean\n Whether the submobjects of the ImageMobject should be affected.\n \"\"\"\n self.set_opacity(1 - darkness)\n super().fade(darkness, family)\n return self\n\n def interpolate_color(self, mobject1, mobject2, alpha):\n \"\"\"Interpolates an array of pixel color values into another array of\n equal size.\n\n Parameters\n ----------\n mobject1 : ImageMobject\n The ImageMobject to tranform from.\n\n mobject1 : ImageMobject\n\n The ImageMobject to tranform into.\n alpha : float\n Used to track the lerp relationship. Not opacity related.\n \"\"\"\n assert mobject1.pixel_array.shape == mobject2.pixel_array.shape, (\n f\"Mobject pixel array shapes incompatible for interpolation.\\n\"\n f\"Mobject 1 ({mobject1}) : {mobject1.pixel_array.shape}\\n\"\n f\"Mobject 2 ({mobject2}) : {mobject2.pixel_array.shape}\"\n )\n self.pixel_array = interpolate(\n mobject1.pixel_array, mobject2.pixel_array, alpha\n ).astype(self.pixel_array_dtype)\n\n\n# TODO, add the ability to have the dimensions/orientation of this\n# mobject more strongly tied to the frame of the camera it contains,\n# in the case where that's a MovingCamera\n\n\nclass ImageMobjectFromCamera(AbstractImageMobject):\n def __init__(self, camera, default_display_frame_config=None, **kwargs):\n self.camera = camera\n if default_display_frame_config is None:\n default_display_frame_config = {\n \"stroke_width\": 3,\n \"stroke_color\": WHITE,\n \"buff\": 0,\n }\n self.default_display_frame_config = default_display_frame_config\n self.pixel_array = self.camera.pixel_array\n AbstractImageMobject.__init__(self, scale_to_resolution=False, **kwargs)\n\n # TODO: Get rid of this.\n def get_pixel_array(self):\n self.pixel_array = self.camera.pixel_array\n return self.pixel_array\n\n def add_display_frame(self, **kwargs):\n config = dict(self.default_display_frame_config)\n config.update(kwargs)\n self.display_frame = SurroundingRectangle(self, **config)\n self.add(self.display_frame)\n return self\n\n def interpolate_color(self, mobject1, mobject2, alpha):\n assert mobject1.pixel_array.shape == mobject2.pixel_array.shape, (\n f\"Mobject pixel array shapes incompatible for interpolation.\\n\"\n f\"Mobject 1 ({mobject1}) : {mobject1.pixel_array.shape}\\n\"\n f\"Mobject 2 ({mobject2}) : {mobject2.pixel_array.shape}\"\n )\n self.pixel_array = interpolate(\n mobject1.pixel_array, mobject2.pixel_array, alpha\n ).astype(self.pixel_array_dtype)\n" ]
[ [ "numpy.array", "numpy.append" ] ]
xaviervasques/face_classification
[ "7d947e233b82f16f0b8d0d33df5a0dac207623ce" ]
[ "src/image_gradcam_demo.py" ]
[ "import sys\n\nimport cv2\nimport numpy as np\nfrom keras.models import load_model\n\nfrom utils.grad_cam import compile_gradient_function\nfrom utils.grad_cam import compile_saliency_function\nfrom utils.grad_cam import register_gradient\nfrom utils.grad_cam import modify_backprop\nfrom utils.grad_cam import calculate_guided_gradient_CAM\nfrom utils.datasets import get_labels\nfrom utils.inference import detect_faces\nfrom utils.inference import apply_offsets\nfrom utils.inference import load_detection_model\nfrom utils.preprocessor import preprocess_input\nfrom utils.inference import draw_bounding_box\nfrom utils.inference import load_image\n\n\n# parameters\nimage_path = \"/Users/xaviervasques/Desktop/Solvay.jpg\"\n# task = sys.argv[2]\ntask = 'emotion'\nif task == 'emotion':\n labels = get_labels('fer2013')\n offsets = (0, 0)\n # model_filename = '../trained_models/fer2013_big_XCEPTION.54-0.66.hdf5'\n model_filename = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'\nelif task == 'gender':\n labels = get_labels('imdb')\n offsets = (30, 60)\n model_filename = '../trained_models/gender_models/gender_mini_XCEPTION.21-0.95.hdf5'\n\ncolor = (0, 255, 0)\n\n# loading models\ndetection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'\nmodel = load_model(model_filename, compile=False)\ntarget_size = model.input_shape[1:3]\nface_detection = load_detection_model(detection_model_path)\n\n# loading images\nrgb_image = load_image(image_path, grayscale=False)\ngray_image = load_image(image_path, grayscale=True)\ngray_image = np.squeeze(gray_image)\ngray_image = gray_image.astype('uint8')\nfaces = detect_faces(face_detection, gray_image)\n\n# start prediction for every image\nfor face_coordinates in faces:\n\n x1, x2, y1, y2 = apply_offsets(face_coordinates, offsets)\n rgb_face = rgb_image[y1:y2, x1:x2]\n\n x1, x2, y1, y2 = apply_offsets(face_coordinates, offsets)\n gray_face = gray_image[y1:y2, x1:x2]\n\n # processing input\n try:\n gray_face = cv2.resize(gray_face, (target_size))\n except:\n continue\n gray_face = preprocess_input(gray_face, True)\n gray_face = np.expand_dims(gray_face, 0)\n gray_face = np.expand_dims(gray_face, -1)\n\n # prediction\n predicted_class = np.argmax(model.predict(gray_face))\n label_text = labels[predicted_class]\n\n gradient_function = compile_gradient_function(model,\n predicted_class, 'conv2d_7')\n register_gradient()\n guided_model = modify_backprop(model, 'GuidedBackProp', task)\n saliency_function = compile_saliency_function(guided_model, 'conv2d_7')\n\n guided_gradCAM = calculate_guided_gradient_CAM(gray_face,\n gradient_function, saliency_function)\n guided_gradCAM = cv2.resize(guided_gradCAM, (x2-x1, y2-y1))\n rgb_guided_gradCAM = np.repeat(guided_gradCAM[:, :, np.newaxis], 3, axis=2)\n rgb_image[y1:y2, x1:x2, :] = rgb_guided_gradCAM\n draw_bounding_box((x1, y1, x2 - x1, y2 - y1), rgb_image, color)\nbgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)\ncv2.imwrite('../images/guided_gradCAM.png', bgr_image)\n" ]
[ [ "numpy.repeat", "numpy.squeeze", "numpy.expand_dims" ] ]
silverbulletmdc/reid-strong-baseline
[ "09cbd204242194077f4fa4c2803c4694940a1c8e" ]
[ "tools/heatmap.py" ]
[ "# encoding: utf-8\n\"\"\"\n@author: sherlock\n@contact: [email protected]\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nfrom os import mkdir\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.backends import cudnn\nfrom torch import nn\nimport torch.nn.functional as F\n\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\nfrom config import cfg\nfrom data import make_data_loader\nfrom modeling import build_model\nfrom utils.logger import setup_logger\n\n\ndef get_heatmap(model, query, gallerys, upsample_shape):\n \"\"\"\n get heatmap\n\n :param model:\n :param torch.Tensor query: [3, H, W]\n :param torch.Tensor gallerys: [B, 3, H, W]\n :return:\n \"\"\"\n all_img = torch.cat([query.unsqueeze(0), gallerys])\n featuremaps = model.base(all_img) # [B+1, C, H, W]\n B, C, H, W = featuremaps.shape\n B = B - 1\n\n global_featuremaps = model.gap(featuremaps) # [B, C, 1, 1]\n query_featuermap = global_featuremaps[0] # [C, 1, 1]\n gallery_featuremaps = global_featuremaps[1:] # [B, C, 1, 1]\n distances = (gallery_featuremaps - query_featuermap) ** 2 # [B, C, 1, 1]\n query_heatmaps = torch.sum(featuremaps[0] * distances, dim=1) # [B, H, W]\n gallery_heatmaps = torch.sum(featuremaps[1:] * distances, dim=1) # [B, H, W]\n\n output_heatmaps = []\n for heatmaps in (query_heatmaps, gallery_heatmaps):\n for heatmap in heatmaps:\n heatmap = heatmap - torch.min(heatmap)\n heatmap = heatmap / torch.max(heatmap)\n heatmap = (255 * heatmap).type(torch.uint8)\n heatmap_np = heatmap.detach().cpu().numpy()\n output_heatmaps.append(cv2.resize(heatmap_np, upsample_shape))\n\n return output_heatmaps, torch.sum(distances, dim=1)\n\n\ndef denormalize(img, cfg):\n \"\"\"\n 从tensor恢复图片\n\n :param torch.Tensor img: [C, H, W]\n :param cfg:\n :return: np uint8 rgb图像\n \"\"\"\n std = img.new_tensor(cfg.INPUT.PIXEL_STD)\n mean = img.new_tensor(cfg.INPUT.PIXEL_MEAN)\n img = img * std.view(-1, 1, 1) + mean.view(-1, 1, 1)\n img *= 255\n img = img.permute(1, 2, 0)\n img = img.detach().cpu().numpy().astype(np.uint8)\n return img\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"ReID Baseline Inference\")\n parser.add_argument(\n \"--config_file\", default=\"\", help=\"path to config file\", type=str\n )\n parser.add_argument(\n \"--id\", required=True, type=int\n )\n parser.add_argument(\"opts\", help=\"Modify config options using the command-line\", default=None,\n nargs=argparse.REMAINDER)\n\n args = parser.parse_args()\n\n if args.config_file != \"\":\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n\n output_dir = cfg.OUTPUT_DIR\n\n if output_dir and not os.path.exists(output_dir):\n mkdir(output_dir)\n\n logger = setup_logger(\"reid_baseline\", output_dir, 0)\n logger.info(\"Running with config:\\n{}\".format(cfg))\n\n if cfg.MODEL.DEVICE == \"cuda\":\n os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID\n cudnn.benchmark = True\n\n query_loader: DataLoader\n gallery_loader: DataLoader\n query_loader, gallery_loader, num_classes = make_data_loader(cfg, get_demo_dataset=True)\n query_data = query_loader.dataset[args.id]\n\n model = build_model(cfg, num_classes)\n model.load_param(cfg.TEST.WEIGHT)\n\n if not os.path.exists('heatmaps/{}'.format(args.id)):\n os.makedirs('heatmaps/{}'.format(args.id))\n\n device = cfg.MODEL.DEVICE\n\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n model.to(cfg.MODEL.DEVICE)\n\n model.eval()\n with torch.no_grad():\n data, pid, camid, path = query_data\n query = data.to(device) if torch.cuda.device_count() >= 1 else data\n\n for batch in gallery_loader:\n data, pids, camids, paths = batch\n B = data.shape[0]\n gallerys = data.to(device) if torch.cuda.device_count() >= 1 else data\n\n heatmaps, distances = get_heatmap(model.module, query, gallerys, (224, 224))\n\n query_img = denormalize(query, cfg)\n for i in range(B):\n query_heatmap = cv2.applyColorMap(heatmaps[i], cv2.COLORMAP_JET)\n query_heatmap = (query_heatmap * 0.3 + query_img * 0.5).astype(np.uint8)\n\n gallery_heatmap = cv2.applyColorMap(heatmaps[B + i], cv2.COLORMAP_JET)\n\n gallery_img = denormalize(gallerys[i], cfg)\n gallery_heatmap = (gallery_heatmap * 0.3 + gallery_img * 0.5).astype(np.uint8)\n\n heatmap = np.concatenate((query_heatmap, gallery_heatmap), axis=1)\n\n plt.imshow(heatmap)\n plt.savefig('heatmaps/{}/{}_{}_{}_{}.png'.format(args.id, distances[i].item(), i, camids[i], pids[i]==pid))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.concatenate", "torch.min", "torch.max", "torch.no_grad", "torch.cuda.device_count", "matplotlib.pyplot.imshow", "torch.nn.DataParallel", "torch.sum" ] ]
BUPTJinZhang/TCTS_CNN
[ "945f728654367d460800971fd1bbb9c1d49db734" ]
[ "nets/TrajectoryletNet_3dpw.py" ]
[ "import tensorflow as tf\nfrom layers.Trajectoryblock import TrajBlock as TB\nfrom layers.Trajectoryblock import TrajBlock_2str as TB_str\nimport pdb\nimport numpy as np\n\n\ndef TrajNet(images, keep_prob, seq_length, input_length, stacklength, num_hidden, filter_size):\n with tf.variable_scope('TrajNet', reuse=False):\n print('TrajectoryletNet_final')\n # print 'is_training', is_training\n h = images[:, 0:seq_length, :, :]\n gt_images = images[:, seq_length:]\n dims = gt_images.shape[-1]\n inputs = h\n inputs = tf.transpose(h, [0, 2, 3, 1])\n\n out = []\n loss = 0\n inputs = tf.layers.conv2d(inputs, num_hidden[0], 1, padding='same', activation=tf.nn.leaky_relu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='h0')\n # p = inputs.numpy().copy()\n for i in range(stacklength):\n inputs = TB('TrajBlock' + str(i), filter_size, num_hidden, keep_prob)(inputs)\n\n out = tf.layers.conv2d(inputs, seq_length - input_length, filter_size, padding='same',\n activation=tf.nn.leaky_relu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='trajout_conv')\n\n out = tf.layers.conv2d(out, seq_length - input_length, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='trajout_conv1')\n\n # pdb.set_tracie()\n out = tf.transpose(out, [0, 3, 1, 2])\n\n # loss\n gen_images = out\n loss += tf.reduce_mean(tf.norm(gen_images - gt_images, axis=3, keep_dims=True, name='normal'))\n return [gen_images, loss]\n\n\ndef TrajNet_2str(images, images_v, keep_prob, seq_length, input_length, stacklength, num_hidden, filter_size):\n with tf.variable_scope('TrajNet', reuse=False):\n print('TrajectoryletNet_final')\n # print 'is_training', is_training\n # =================POSITION PROCESSING======================\n h = images[:, 0:seq_length, :, :]\n gt_images = images[:, seq_length:]\n dims = gt_images.shape[-1]\n inputs = h\n inputs = tf.transpose(h, [0, 2, 3, 1])\n # ==========================================================\n # =================VELOCITY PROCESSING======================\n h_v = images_v[:, 0:seq_length, :, :]\n # gt_images_v = images_v[:, seq_length:]\n # dims = gt_images.shape[-1]\n inputs_v = h_v\n inputs_v = tf.transpose(h_v, [0, 2, 3, 1])\n # ==========================================================\n\n out = []\n loss = 0\n inputs = tf.layers.conv2d(inputs, num_hidden[0], 1, padding='same', activation=tf.nn.leaky_relu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='h0')\n\n inputs_v = tf.layers.conv2d(inputs_v, num_hidden[0], 1, padding='same', activation=tf.nn.leaky_relu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='h0_v')\n\n # p = inputs.numpy().copy()\n\n inputs = TB('TrajBlock1', filter_size, num_hidden, keep_prob)(inputs)\n inputs = tf.layers.conv2d(inputs, num_hidden[0], filter_size, padding='same', activation=tf.nn.leaky_relu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='h1')\n inputs = TB('TrajBlock2', filter_size, num_hidden, keep_prob)(inputs)\n inputs_v = TB_str('TrajBlock1', filter_size, num_hidden, keep_prob)(inputs_v)\n inputs_v = tf.layers.conv2d(inputs_v, num_hidden[0], filter_size, padding='same', activation=tf.nn.leaky_relu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='h1_v')\n inputs_v = TB_str('TrajBlock2', filter_size, num_hidden, keep_prob)(inputs_v)\n\n out = tf.layers.conv2d(inputs, seq_length - input_length, filter_size, padding='same',\n activation=tf.nn.leaky_relu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='trajout_conv')\n\n out = tf.layers.conv2d(out, seq_length - input_length, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='trajout_conv1')\n # ====================================================================================\n out_v = tf.layers.conv2d(inputs_v, seq_length - input_length, filter_size, padding='same',\n activation=tf.nn.leaky_relu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='trajout_conv_v')\n\n out_v = tf.layers.conv2d(out_v, seq_length - input_length, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='trajout_conv1_v')\n # ==============================================================================\n out_fromv = tf.zeros(out_v.shape)\n tem_0 = inputs[:, :, :, 9] + out_v[:, :, :, 0]\n tem_1 = tem_0 + out_v[:, :, :, 1]\n tem_2 = tem_1 + out_v[:, :, :, 2]\n tem_3 = tem_2 + out_v[:, :, :, 3]\n tem_4 = tem_3 + out_v[:, :, :, 4]\n tem_5 = tem_4 + out_v[:, :, :, 5]\n tem_6 = tem_5 + out_v[:, :, :, 6]\n tem_7 = tem_6 + out_v[:, :, :, 7]\n tem_8 = tem_7 + out_v[:, :, :, 8]\n tem_9 = tem_8 + out_v[:, :, :, 9]\n tem_10 = tem_9 + out_v[:, :, :, 10]\n tem_11 = tem_10 + out_v[:, :, :, 11]\n tem_12 = tem_11 + out_v[:, :, :, 12]\n tem_13 = tem_12 + out_v[:, :, :, 13]\n tem_14 = tem_13 + out_v[:, :, :, 14]\n tem_15 = tem_14 + out_v[:, :, :, 15]\n tem_16 = tem_15 + out_v[:, :, :, 16]\n tem_17 = tem_16 + out_v[:, :, :, 17]\n tem_18 = tem_17 + out_v[:, :, :, 18]\n tem_19 = tem_18 + out_v[:, :, :, 19]\n tem_20 = tem_19 + out_v[:, :, :, 20]\n tem_21 = tem_20 + out_v[:, :, :, 21]\n tem_22 = tem_21 + out_v[:, :, :, 22]\n tem_23 = tem_22 + out_v[:, :, :, 23]\n tem_24 = tem_23 + out_v[:, :, :, 24]\n tem_25 = tem_24 + out_v[:, :, :, 25]\n tem_26 = tem_25 + out_v[:, :, :, 26]\n tem_27 = tem_26 + out_v[:, :, :, 27]\n tem_28 = tem_27 + out_v[:, :, :, 28]\n tem_29 = tem_28 + out_v[:, :, :, 19]\n out_fromv = tf.stack([tem_0, tem_1, tem_2, tem_3, tem_4, tem_5, tem_6, tem_7, tem_8, tem_9, tem_10,\n tem_11, tem_12, tem_13, tem_14, tem_15, tem_16, tem_17, tem_18, tem_19,\n tem_20, tem_21, tem_22, tem_23, tem_24, tem_25, tem_26, tem_27, tem_28, tem_29], axis=3)\n # =====================================================\n tem_p0 = out[:, :, :, 0]\n tem_v0 = out_fromv[:, :, :, 0]\n tem_p0 = tf.expand_dims(tem_p0, axis=3)\n tem_v0 = tf.expand_dims(tem_v0, axis=3)\n y_0 = tf.concat([tem_p0, tem_v0], axis=3)\n y_0 = tf.layers.conv2d(y_0, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat0')\n\n tem_p1 = out[:, :, :, 1]\n tem_v1 = out_fromv[:, :, :, 1]\n tem_p1 = tf.expand_dims(tem_p1, axis=3)\n tem_v1 = tf.expand_dims(tem_v1, axis=3)\n y_1 = tf.concat([tem_p1, tem_v1], axis=3)\n y_1 = tf.layers.conv2d(y_1, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat1')\n\n tem_p2 = out[:, :, :, 2]\n tem_v2 = out_fromv[:, :, :, 2]\n tem_p2 = tf.expand_dims(tem_p2, axis=3)\n tem_v2 = tf.expand_dims(tem_v2, axis=3)\n y_2 = tf.concat([tem_p2, tem_v2], axis=3)\n y_2 = tf.layers.conv2d(y_2, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat2')\n\n tem_p3 = out[:, :, :, 3]\n tem_v3 = out_fromv[:, :, :, 3]\n tem_p3 = tf.expand_dims(tem_p3, axis=3)\n tem_v3 = tf.expand_dims(tem_v3, axis=3)\n y_3 = tf.concat([tem_p3, tem_v3], axis=3)\n y_3 = tf.layers.conv2d(y_3, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat3')\n\n tem_p4 = out[:, :, :, 4]\n tem_v4 = out_fromv[:, :, :, 4]\n tem_p4 = tf.expand_dims(tem_p4, axis=3)\n tem_v4 = tf.expand_dims(tem_v4, axis=3)\n y_4 = tf.concat([tem_p4, tem_v4], axis=3)\n y_4 = tf.layers.conv2d(y_4, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat4')\n\n tem_p5 = out[:, :, :, 5]\n tem_v5 = out_fromv[:, :, :, 5]\n tem_p5 = tf.expand_dims(tem_p5, axis=3)\n tem_v5 = tf.expand_dims(tem_v5, axis=3)\n y_5 = tf.concat([tem_p5, tem_v5], axis=3)\n y_5 = tf.layers.conv2d(y_5, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat5')\n\n tem_p6 = out[:, :, :, 6]\n tem_v6 = out_fromv[:, :, :, 6]\n tem_p6 = tf.expand_dims(tem_p6, axis=3)\n tem_v6 = tf.expand_dims(tem_v6, axis=3)\n y_6 = tf.concat([tem_p6, tem_v6], axis=3)\n y_6 = tf.layers.conv2d(y_6, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat6')\n\n tem_p7 = out[:, :, :, 7]\n tem_v7 = out_fromv[:, :, :, 7]\n tem_p7 = tf.expand_dims(tem_p7, axis=3)\n tem_v7 = tf.expand_dims(tem_v7, axis=3)\n y_7 = tf.concat([tem_p7, tem_v7], axis=3)\n y_7 = tf.layers.conv2d(y_7, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat7')\n\n tem_p8 = out[:, :, :, 8]\n tem_v8 = out_fromv[:, :, :, 8]\n tem_p8 = tf.expand_dims(tem_p8, axis=3)\n tem_v8 = tf.expand_dims(tem_v8, axis=3)\n y_8 = tf.concat([tem_p8, tem_v8], axis=3)\n y_8 = tf.layers.conv2d(y_8, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat8')\n\n tem_p9 = out[:, :, :, 9]\n tem_v9 = out_fromv[:, :, :, 9]\n tem_p9 = tf.expand_dims(tem_p9, axis=3)\n tem_v9 = tf.expand_dims(tem_v9, axis=3)\n y_9 = tf.concat([tem_p9, tem_v9], axis=3)\n y_9 = tf.layers.conv2d(y_9, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat9')\n\n tem_p10 = out[:, :, :, 10]\n tem_v10 = out_fromv[:, :, :, 10]\n tem_p10 = tf.expand_dims(tem_p10, axis=3)\n tem_v10 = tf.expand_dims(tem_v10, axis=3)\n y_10 = tf.concat([tem_p10, tem_v10], axis=3)\n y_10 = tf.layers.conv2d(y_10, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat10')\n\n tem_p11 = out[:, :, :, 11]\n tem_v11 = out_fromv[:, :, :, 11]\n tem_p11 = tf.expand_dims(tem_p11, axis=3)\n tem_v11 = tf.expand_dims(tem_v11, axis=3)\n y_11 = tf.concat([tem_p11, tem_v11], axis=3)\n y_11 = tf.layers.conv2d(y_11, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat11')\n\n tem_p12 = out[:, :, :, 12]\n tem_v12 = out_fromv[:, :, :, 12]\n tem_p12 = tf.expand_dims(tem_p12, axis=3)\n tem_v12 = tf.expand_dims(tem_v12, axis=3)\n y_12 = tf.concat([tem_p12, tem_v12], axis=3)\n y_12 = tf.layers.conv2d(y_12, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat12')\n\n tem_p13 = out[:, :, :, 13]\n tem_v13 = out_fromv[:, :, :, 13]\n tem_p13 = tf.expand_dims(tem_p13, axis=3)\n tem_v13 = tf.expand_dims(tem_v13, axis=3)\n y_13 = tf.concat([tem_p13, tem_v13], axis=3)\n y_13 = tf.layers.conv2d(y_13, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat13')\n\n tem_p14 = out[:, :, :, 14]\n tem_v14 = out_fromv[:, :, :, 14]\n tem_p14 = tf.expand_dims(tem_p14, axis=3)\n tem_v14 = tf.expand_dims(tem_v14, axis=3)\n y_14 = tf.concat([tem_p14, tem_v14], axis=3)\n y_14 = tf.layers.conv2d(y_14, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat14')\n\n tem_p15 = out[:, :, :, 15]\n tem_v15 = out_fromv[:, :, :, 15]\n tem_p15 = tf.expand_dims(tem_p15, axis=3)\n tem_v15 = tf.expand_dims(tem_v15, axis=3)\n y_15 = tf.concat([tem_p15, tem_v15], axis=3)\n y_15 = tf.layers.conv2d(y_15, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat15')\n\n tem_p16 = out[:, :, :, 16]\n tem_v16 = out_fromv[:, :, :, 16]\n tem_p16 = tf.expand_dims(tem_p16, axis=3)\n tem_v16 = tf.expand_dims(tem_v16, axis=3)\n y_16 = tf.concat([tem_p16, tem_v16], axis=3)\n y_16 = tf.layers.conv2d(y_16, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat16')\n\n tem_p17 = out[:, :, :, 17]\n tem_v17 = out_fromv[:, :, :, 17]\n tem_p17 = tf.expand_dims(tem_p17, axis=3)\n tem_v17 = tf.expand_dims(tem_v17, axis=3)\n y_17 = tf.concat([tem_p17, tem_v17], axis=3)\n y_17 = tf.layers.conv2d(y_17, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat17')\n\n tem_p18 = out[:, :, :, 18]\n tem_v18 = out_fromv[:, :, :, 18]\n tem_p18 = tf.expand_dims(tem_p18, axis=3)\n tem_v18 = tf.expand_dims(tem_v18, axis=3)\n y_18 = tf.concat([tem_p18, tem_v18], axis=3)\n y_18 = tf.layers.conv2d(y_18, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat18')\n\n tem_p19 = out[:, :, :, 19]\n tem_v19 = out_fromv[:, :, :, 19]\n tem_p19 = tf.expand_dims(tem_p19, axis=3)\n tem_v19 = tf.expand_dims(tem_v19, axis=3)\n y_19 = tf.concat([tem_p17, tem_v19], axis=3)\n y_19 = tf.layers.conv2d(y_19, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat19')\n\n tem_p20 = out[:, :, :, 20]\n tem_v20 = out_fromv[:, :, :, 20]\n tem_p20 = tf.expand_dims(tem_p20, axis=3)\n tem_v20 = tf.expand_dims(tem_v20, axis=3)\n y_20 = tf.concat([tem_p20, tem_v20], axis=3)\n y_20 = tf.layers.conv2d(y_20, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat20')\n\n tem_p21 = out[:, :, :, 21]\n tem_v21 = out_fromv[:, :, :, 21]\n tem_p21 = tf.expand_dims(tem_p21, axis=3)\n tem_v21 = tf.expand_dims(tem_v21, axis=3)\n y_21 = tf.concat([tem_p21, tem_v21], axis=3)\n y_21 = tf.layers.conv2d(y_21, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat21')\n\n tem_p22 = out[:, :, :, 22]\n tem_v22 = out_fromv[:, :, :, 22]\n tem_p22 = tf.expand_dims(tem_p22, axis=3)\n tem_v22 = tf.expand_dims(tem_v22, axis=3)\n y_22 = tf.concat([tem_p22, tem_v22], axis=3)\n y_22 = tf.layers.conv2d(y_22, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat22')\n\n tem_p23 = out[:, :, :, 23]\n tem_v23 = out_fromv[:, :, :, 23]\n tem_p23 = tf.expand_dims(tem_p23, axis=3)\n tem_v23 = tf.expand_dims(tem_v23, axis=3)\n y_23 = tf.concat([tem_p23, tem_v23], axis=3)\n y_23 = tf.layers.conv2d(y_23, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat23')\n\n tem_p24 = out[:, :, :, 24]\n tem_v24 = out_fromv[:, :, :, 24]\n tem_p24 = tf.expand_dims(tem_p24, axis=3)\n tem_v24 = tf.expand_dims(tem_v24, axis=3)\n y_24 = tf.concat([tem_p24, tem_v24], axis=3)\n y_24 = tf.layers.conv2d(y_24, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat24')\n\n tem_p25 = out[:, :, :, 25]\n tem_v25 = out_fromv[:, :, :, 25]\n tem_p25 = tf.expand_dims(tem_p25, axis=3)\n tem_v25 = tf.expand_dims(tem_v25, axis=3)\n y_25 = tf.concat([tem_p25, tem_v25], axis=3)\n y_25 = tf.layers.conv2d(y_25, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat25')\n\n tem_p26 = out[:, :, :, 26]\n tem_v26 = out_fromv[:, :, :, 26]\n tem_p26 = tf.expand_dims(tem_p26, axis=3)\n tem_v26 = tf.expand_dims(tem_v26, axis=3)\n y_26 = tf.concat([tem_p26, tem_v26], axis=3)\n y_26 = tf.layers.conv2d(y_26, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat26')\n\n tem_p27 = out[:, :, :, 27]\n tem_v27 = out_fromv[:, :, :, 27]\n tem_p27 = tf.expand_dims(tem_p27, axis=3)\n tem_v27 = tf.expand_dims(tem_v27, axis=3)\n y_27 = tf.concat([tem_p27, tem_v27], axis=3)\n y_27 = tf.layers.conv2d(y_27, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat27')\n\n tem_p28 = out[:, :, :, 28]\n tem_v28 = out_fromv[:, :, :, 28]\n tem_p28 = tf.expand_dims(tem_p28, axis=3)\n tem_v28 = tf.expand_dims(tem_v28, axis=3)\n y_28 = tf.concat([tem_p28, tem_v28], axis=3)\n y_28 = tf.layers.conv2d(y_28, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat28')\n\n tem_p29 = out[:, :, :, 29]\n tem_v29 = out_fromv[:, :, :, 29]\n tem_p29 = tf.expand_dims(tem_p29, axis=3)\n tem_v29 = tf.expand_dims(tem_v29, axis=3)\n y_29 = tf.concat([tem_p29, tem_v29], axis=3)\n y_29 = tf.layers.conv2d(y_29, 1, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='concat29')\n\n # for j in range(seq_length - input_length):\n # tem = out[:, :, :, j]\n # tem_v = out_fromv[:, :, :, j]\n # tem = tf.expand_dims(tem, axis=3)\n # tem_v = tf.expand_dims(tem_v, axis=3)\n # y_0 = tf.concat([tem, tem_v], axis=3)\n # if j == 0:\n # z = y_0\n # else:\n z = tf.concat([y_0, y_1, y_2, y_3, y_4, y_5, y_6, y_7, y_8, y_9,\n y_10, y_11, y_12, y_13, y_14, y_15, y_16, y_17, y_18, y_19, y_20, y_21,\n y_22, y_23, y_24, y_25, y_26, y_27, y_28, y_29], axis=3)\n\n z = TB('TrajBlockout', filter_size, num_hidden, keep_prob)(z)\n z = tf.layers.conv2d(z, num_hidden[0], filter_size, padding='same', activation=tf.nn.leaky_relu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='2str_out_conv0')\n z = TB('TrajBlockout_1', filter_size, num_hidden, keep_prob)(z)\n # z = TB('TrajBlockout_2', filter_size, num_hidden, keep_prob)(z)\n z = tf.layers.conv2d(z, seq_length - input_length, filter_size, padding='same',\n activation=tf.nn.leaky_relu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='2strout_conv')\n\n z = tf.layers.conv2d(z, seq_length - input_length, 1, padding='same', activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name='2strout_conv1')\n\n\n\n\n # pdb.set_tracie()\n z = tf.transpose(z, [0, 3, 1, 2])\n # out = tf.transpose(out, [0, 3, 1, 2])\n\n # loss\n gen_images = z\n loss += tf.reduce_mean(tf.norm(gen_images - gt_images, axis=3, keep_dims=True, name='normal'))\n return [gen_images, loss]\n" ]
[ [ "tensorflow.zeros", "tensorflow.concat", "tensorflow.expand_dims", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.transpose", "tensorflow.norm", "tensorflow.variable_scope", "tensorflow.stack" ] ]
kaikai581/petals-to-the-metal
[ "ec629c954bc46fcc4641d005415485ddf3c04498" ]
[ "utilities/helpers.py" ]
[ "#!/usr/bin/env python\n\nfrom torchvision import datasets\nimport copy\nimport git\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport time\nimport torch\nimport torchvision\n\n# redirect output\nmatplotlib.use('Agg')\n\ndef get_git_root(path):\n '''\n Get the root directory of this project.\n '''\n git_repo = git.Repo(path, search_parent_directories=True)\n git_root = git_repo.git.rev_parse('--show-toplevel')\n return git_root\n\n# file scope variables\ndata_dir = os.path.join(get_git_root(__file__), 'data/imagefolder-jpeg-224x224')\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\ndef easy_savedataframe(dataframe, outfpn):\n '''\n A helper function for saving dataframes easily.\n Simple specify the full pathname of the output, and folders will be created if not exist.\n '''\n outfp = os.path.dirname(outfpn)\n if not os.path.exists(outfp):\n os.makedirs(outfp)\n dataframe.to_csv(outfpn, index=False)\n\ndef easy_savefig(outfpn):\n '''\n A helper function for saving figures easily.\n Simple specify the full pathname of the output, and folders will be created if not exist.\n '''\n outfp = os.path.dirname(outfpn)\n if not os.path.exists(outfp):\n os.makedirs(outfp)\n plt.savefig(outfpn)\n plt.close()\n\ndef easy_savemodel(model, outfpn):\n '''\n A helper function for saving model weights easily.\n Simple specify the full pathname of the output, and folders will be created if not exist.\n '''\n outfp = os.path.dirname(outfpn)\n if not os.path.exists(outfp):\n os.makedirs(outfp)\n torch.save(model.state_dict(), outfpn)\n\ndef get_image_size(data_transforms):\n image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),\n data_transforms[x]) for x in ['train', 'val']}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=4) for x in ['train', 'val']}\n # Get a batch of training data\n inputs, _ = next(iter(dataloaders['train']))\n return inputs.cpu().data[0].size()[2]\n\ndef get_nclasses(data_transforms):\n '''\n Return the number of classes given the PyTorch data transform.\n '''\n # get information from data_transforms\n image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n for x in ['train', 'val']}\n class_names = image_datasets['train'].classes\n\n return len(class_names)\n\ndef imshow(inp, title=None):\n '''Imshow for Tensor.'''\n inp = inp.numpy().transpose((1, 2, 0))\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n plt.imshow(inp)\n if title is not None:\n plt.title(title)\n # since I am using non-interactive backend, the line below causes warnings\n # ref: https://stackoverflow.com/questions/13336823/matplotlib-python-error/13336944\n # plt.pause(0.001) # pause a bit so that plots are updated\n\ndef model_size_all(model):\n '''\n Print the number of all parameters of a model.\n '''\n return sum(p.numel() for p in model.parameters())\n\ndef model_size_trainable(model):\n '''\n Print the number of trainable parameters of a model.\n '''\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\ndef train_model(model, criterion, optimizer, scheduler, data_transforms, num_epochs=25, start_epoch=0):\n '''A helper function to train a model.'''\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n # containers for training information\n img_size = get_image_size(data_transforms)\n df_fpns = {'train': 'epoch_info/{}x{}/train_epoch_info.csv'.format(img_size, img_size), 'val': 'epoch_info/{}x{}/validation_epoch_info.csv'.format(img_size, img_size)}\n column_names = ['epoch', 'start_epoch', 'loss', 'accuracy', 'learning_rate', 'train_time', 'optimizer', 'scheduler_step_size']\n epoch_losses = dict()\n for cat in ['train', 'val']:\n outfpn = df_fpns[cat]\n if os.path.exists(outfpn):\n epoch_losses[cat] = pd.read_csv(outfpn)\n else:\n epoch_losses[cat] = pd.DataFrame(columns=column_names)\n # epoch_losses[cat] = epoch_losses[cat].set_index(column_names[:2])\n\n # get information from data_transforms\n image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n for x in ['train', 'val']}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=4)\n for x in ['train', 'val']}\n dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\n\n for epoch in range(num_epochs):\n epoch_start_time = time.time()\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics\n # For an understanding of the following line, refer to\n # https://stackoverflow.com/questions/61092523/what-is-running-loss-in-pytorch-and-how-is-it-calculated\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n if phase == 'train' and scheduler:\n scheduler.step()\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n phase, epoch_loss, epoch_acc))\n\n # store the training information\n row = [epoch+start_epoch+1, start_epoch, epoch_loss, epoch_acc.item(), optimizer.param_groups[0]['lr'], time.time()-epoch_start_time, 'SGD' if scheduler else 'ADAM', scheduler.step_size if scheduler else -1]\n epoch_losses[phase].loc[len(epoch_losses[phase])] = row\n\n # deep copy the model\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # save training information\n easy_savedataframe(epoch_losses['train'], df_fpns['train'])\n easy_savedataframe(epoch_losses['val'], df_fpns['val'])\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model\n\ndef visualize_images(data_transforms):\n '''\n Visualize a few images.\n '''\n # get information from data_transforms\n image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n for x in ['train', 'val']}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=4)\n for x in ['train', 'val']}\n class_names = image_datasets['train'].classes\n \n # Get a batch of training data\n train_imgs, classes = next(iter(dataloaders['train']))\n\n # Make a grid from batch\n out = torchvision.utils.make_grid(train_imgs)\n\n # Number of images shown is determined by the batch_size argument\n # in the construction of Dataloader.\n imshow(out, title=[class_names[x] for x in classes])\n easy_savefig(outfpn='plots/preview_data.png')\n\ndef visualize_model(model, data_transforms, num_images=6):\n '''\n Apply trained model to several figures in the validation sample.\n '''\n was_training = model.training\n model.eval()\n images_so_far = 0\n plt.figure()\n\n # get information from data_transforms\n image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])\n for x in ['train', 'val']}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=4)\n for x in ['train', 'val']}\n class_names = image_datasets['train'].classes\n\n with torch.no_grad():\n for _, (inputs, labels) in enumerate(dataloaders['val']):\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n\n for j in range(inputs.size()[0]):\n images_so_far += 1\n ax = plt.subplot(num_images//2, 2, images_so_far)\n ax.axis('off')\n ax.set_title('predicted: {}'.format(class_names[preds[j]]))\n imshow(inputs.cpu().data[j])\n\n if images_so_far == num_images:\n model.train(mode=was_training)\n return\n model.train(mode=was_training)" ]
[ [ "matplotlib.use", "matplotlib.pyplot.subplot", "numpy.array", "torch.max", "matplotlib.pyplot.savefig", "torch.no_grad", "matplotlib.pyplot.title", "pandas.DataFrame", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "torch.sum", "torch.cuda.is_available", "torch.utils.data.DataLoader", "numpy.clip", "pandas.read_csv", "torch.set_grad_enabled", "matplotlib.pyplot.imshow" ] ]
slamajakub/visnav-py
[ "872363a8f115ae2dc8966f7c890891a41cb60b16" ]
[ "src/api-server.py" ]
[ "import math\r\nimport socket\r\nimport json\r\nfrom datetime import datetime\r\nfrom json import JSONDecodeError\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport quaternion\r\nimport sys\r\nimport pytz\r\n\r\nfrom algo import tools\r\nfrom algo.base import AlgorithmBase\r\nfrom algo.keypoint import KeypointAlgo\r\nfrom algo.model import SystemModel\r\nfrom algo.tools import PositioningException\r\nfrom batch1 import get_system_model\r\nfrom missions.didymos import DidymosPrimary, DidymosSystemModel, DidymosSecondary\r\nfrom render.render import RenderEngine\r\n\r\nfrom settings import *\r\nfrom testloop import TestLoop\r\n\r\n\r\nclass ApiServer:\r\n class QuitException(Exception):\r\n pass\r\n\r\n def __init__(self, mission, hires=True, addr='127.0.0.1', port=50007):\r\n self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self._addr = addr\r\n self._port = port\r\n self._sock.bind(('', port))\r\n self._sock.listen(1)\r\n self._sock.settimeout(5)\r\n\r\n self._mission = mission\r\n self._sm = sm = get_system_model(mission, hires)\r\n self._target_d2 = '2' in mission\r\n self._renderer = RenderEngine(sm.cam.width, sm.cam.height, antialias_samples=16 if hires else 0)\r\n self._renderer.set_frustum(sm.cam.x_fov, sm.cam.y_fov, sm.min_altitude*.1, sm.max_distance)\r\n if isinstance(sm, DidymosSystemModel):\r\n self.asteroids = [\r\n DidymosPrimary(hi_res_shape_model=hires),\r\n DidymosSecondary(hi_res_shape_model=hires),\r\n ]\r\n else:\r\n self.asteroids = [sm.asteroid]\r\n\r\n self._obj_idxs = [\r\n self._renderer.load_object(\r\n ast.hires_target_model_file if hires else ast.target_model_file,\r\n smooth=ast.render_smooth_faces)\r\n for ast in self.asteroids]\r\n\r\n self._wireframe_obj_idxs = [\r\n self._renderer.load_object(os.path.join(BASE_DIR, 'data/ryugu+tex-%s-100.obj'%ast), wireframe=True)\r\n for ast in ('d1', 'd2')]\r\n\r\n self._logpath = os.path.join(LOG_DIR, 'api-server', self._mission)\r\n os.makedirs(self._logpath, exist_ok=True)\r\n\r\n re = RenderEngine(sm.view_width, sm.view_height, antialias_samples=0)\r\n oi = re.load_object(sm.asteroid.target_model_file, smooth=sm.asteroid.render_smooth_faces)\r\n self._keypoint = KeypointAlgo(sm, re, oi)\r\n\r\n\r\n def _render(self, params):\r\n time = params[0]\r\n sun_ast_v = tools.normalize_v(np.array(params[1][:3]))\r\n d1_v = np.array(params[2][:3])\r\n d1_q = np.quaternion(*(params[2][3:7]))\r\n d2_v = np.array(params[3][:3])\r\n d2_q = np.quaternion(*(params[3][3:7]))\r\n sc_v = np.array(params[4][:3])\r\n sc_q = np.quaternion(*(params[4][3:7]))\r\n\r\n d1, d2 = self.asteroids\r\n q = SystemModel.sc2gl_q.conj() * sc_q.conj()\r\n rel_rot_q = np.array([q * d1_q * d1.ast2sc_q.conj(), q * d2_q * d2.ast2sc_q.conj()])\r\n rel_pos_v = np.array([tools.q_times_v(q, d1_v - sc_v), tools.q_times_v(q, d2_v - sc_v)]) * 0.001\r\n light_v = tools.q_times_v(q, sun_ast_v)\r\n\r\n img = TestLoop.render_navcam_image_static(self._sm, self._renderer, self._obj_idxs,\r\n rel_pos_v, rel_rot_q, light_v)\r\n\r\n date = datetime.fromtimestamp(time, pytz.utc) # datetime.now()\r\n fname = os.path.join(self._logpath, date.isoformat()[:-6].replace(':', '')) + '.png'\r\n cv2.imwrite(fname, img, [cv2.IMWRITE_PNG_COMPRESSION, 0])\r\n\r\n return fname\r\n\r\n def _get_pose(self, params):\r\n # load target navcam image\r\n fname = params[0]\r\n img = cv2.imread(fname, cv2.IMREAD_GRAYSCALE)\r\n\r\n # asteroid position relative to the sun\r\n self._sm.asteroid.real_position = np.array(params[1][:3])\r\n\r\n # set asteroid orientation\r\n d1, d2 = self.asteroids\r\n ast = d2 if self._target_d2 else d1\r\n init_ast_q = np.quaternion(*(params[3 if self._target_d2 else 2][3:7])).normalized()\r\n self._sm.asteroid_q = init_ast_q * ast.ast2sc_q.conj()\r\n\r\n # set spacecraft orientation\r\n init_sc_q = np.quaternion(*(params[4][3:7])).normalized()\r\n self._sm.spacecraft_q = init_sc_q\r\n\r\n # sc-asteroid relative location\r\n ast_v = np.array(params[3 if self._target_d2 else 2][:3])*0.001 # relative to barycenter\r\n sc_v = np.array(params[4][:3])*0.001 # relative to barycenter\r\n init_sc_pos = tools.q_times_v(SystemModel.sc2gl_q.conj() * init_sc_q.conj(), ast_v - sc_v)\r\n self._sm.spacecraft_pos = init_sc_pos\r\n\r\n # run keypoint algo\r\n err = None\r\n try:\r\n self._keypoint.solve_pnp(img, fname[:-4], KeypointAlgo.AKAZE)\r\n except PositioningException as e:\r\n err = e\r\n\r\n if err is None:\r\n # resulting sc-ast relative orientation\r\n sc_q = self._sm.spacecraft_q\r\n rel_q = sc_q.conj() * self._sm.asteroid_q * ast.ast2sc_q\r\n\r\n # sc-ast vector in meters\r\n rel_v = tools.q_times_v(sc_q * SystemModel.sc2gl_q, np.array(self._sm.spacecraft_pos)*1000)\r\n\r\n # collect to one result list\r\n result = [list(rel_v), list(quaternion.as_float_array(rel_q))]\r\n\r\n # render a result image\r\n self._render_result([fname]\r\n + [list(np.array(self._sm.spacecraft_pos)*1000) + (result[1] if err is None else [float('nan')]*4)]\r\n + [list(np.array(init_sc_pos)*1000) + list(quaternion.as_float_array(init_sc_q.conj() * init_ast_q))])\r\n\r\n if err is not None:\r\n raise err\r\n\r\n # send back in json format\r\n return json.dumps(result)\r\n\r\n def _render_result(self, params):\r\n fname = params[0]\r\n img = cv2.imread(fname, cv2.IMREAD_COLOR)\r\n\r\n if np.all(np.logical_not(np.isnan(params[1]))):\r\n rel_pos_v = np.array(params[1][:3]) * 0.001\r\n rel_rot_q = np.quaternion(*(params[1][3:7]))\r\n color = np.array((0, 1, 0))*0.6\r\n else:\r\n rel_pos_v = np.array(params[2][:3]) * 0.001\r\n rel_rot_q = np.quaternion(*(params[2][3:7]))\r\n color = np.array((0, 0, 1))*0.6\r\n\r\n # ast_v = np.array(params[1][:3])\r\n # ast_q = np.quaternion(*(params[1][3:7]))\r\n # sc_v = np.array(params[2][:3])\r\n # sc_q = np.quaternion(*(params[2][3:7]))\r\n #\r\n ast_idx = 1 if self._target_d2 else 0\r\n ast = self.asteroids[ast_idx]\r\n # q = SystemModel.sc2gl_q.conj() * sc_q.conj()\r\n # rel_rot_q = q * ast_q * ast.ast2sc_q.conj()\r\n # rel_pos_v = tools.q_times_v(q, ast_v - sc_v) * 0.001\r\n\r\n rel_rot_q = SystemModel.sc2gl_q.conj() * rel_rot_q * ast.ast2sc_q.conj()\r\n #rel_pos_v = tools.q_times_v(SystemModel.sc2gl_q.conj(), rel_pos_v) * 0.001\r\n\r\n overlay = self._renderer.render_wireframe(self._wireframe_obj_idxs[ast_idx], rel_pos_v, rel_rot_q, color)\r\n overlay = cv2.resize(overlay, (img.shape[1], img.shape[0]))\r\n\r\n blend_coef = 0.6\r\n alpha = np.zeros(list(img.shape[:2]) + [1])\r\n alpha[np.any(overlay > 0, axis=2)] = blend_coef\r\n result = (overlay * alpha + img * (1 - alpha)).astype('uint8')\r\n\r\n fout = fname[:-4] + '-res.png'\r\n cv2.imwrite(fout, result, [cv2.IMWRITE_PNG_COMPRESSION, 0])\r\n\r\n return fout\r\n\r\n def _handle(self, call):\r\n if len(call) == 0:\r\n return None\r\n\r\n error = False\r\n rval = False\r\n\r\n idx = call.find(' ')\r\n command = call[:idx] if idx >= 0 else call\r\n params = []\r\n try:\r\n params = json.loads(call[idx + 1:]) if idx >= 0 else []\r\n except JSONDecodeError as e:\r\n error = 'Invalid parameters: ' + str(e) + ' \"' + call[idx + 1:] + '\"'\r\n\r\n if command == 'quit':\r\n raise self.QuitException()\r\n\r\n ok = False\r\n last_exception = None\r\n\r\n if not error:\r\n for i in range(3):\r\n try:\r\n if command == 'render':\r\n rval = self._render(params)\r\n ok = True\r\n elif command == 'get_pose':\r\n try:\r\n rval = self._get_pose(params)\r\n ok = True\r\n except PositioningException as e:\r\n error = 'algo failed: ' + str(e)\r\n else:\r\n error = 'invalid command: ' + command\r\n break\r\n except (ValueError, TypeError) as e:\r\n error = 'invalid args: ' + str(e)\r\n break\r\n except Exception as e:\r\n print('Trying to open compute engine again because of: %s' % e)\r\n last_exception = e\r\n self._reset()\r\n if ok:\r\n break\r\n\r\n if not ok and last_exception is not None:\r\n error = 'Exception encountered: %s' % last_exception\r\n\r\n out = ' '.join((('0' if error else '1'),) + ((error,) if error else (rval,) if rval else tuple()))\r\n return out\r\n\r\n def listen(self):\r\n # main loop here\r\n print('server started, waiting for connections')\r\n since_reset = 0\r\n while True:\r\n # outer loop accepting connections (max 1)\r\n try:\r\n conn, addr = self._sock.accept()\r\n try:\r\n with conn:\r\n while True:\r\n # inner loop accepting multiple requests on same connection\r\n req = self._receive(conn)\r\n for call in req.strip(' \\n\\r\\t').split('\\n'):\r\n # in case multiple calls in one request\r\n out = self._handle(call.strip(' \\n\\r\\t'))\r\n if out is not None:\r\n out = out.strip(' \\n\\r\\t') + '\\n'\r\n conn.sendall(out.encode('utf-8'))\r\n since_reset += 1\r\n if since_reset >= 1000:\r\n self._reset()\r\n since_reset = 0\r\n except ConnectionAbortedError:\r\n print('client closed the connection')\r\n except socket.timeout:\r\n pass\r\n\r\n\r\n def _receive(self, conn):\r\n chunks = []\r\n buffer_size = 1024\r\n while True:\r\n try:\r\n chunk = conn.recv(buffer_size)\r\n chunks.append(chunk)\r\n if chunk == b'':\r\n if len(chunks) > 1:\r\n break\r\n else:\r\n raise ConnectionAbortedError()\r\n elif len(chunk) < buffer_size:\r\n break\r\n except socket.timeout:\r\n pass\r\n\r\n return (b''.join(chunks)).decode('utf-8')\r\n\r\n def close(self):\r\n self._sock.close()\r\n\r\n def _reset(self):\r\n print('Restarting computing engine to avoid memory leak [NOT IMPLEMENTED]')\r\n # CloseComputeEngine(\"localhost\", \"\")\r\n # OpenComputeEngine(\"localhost\", (\"-l\", \"srun\", \"-np\", \"1\"))\r\n # RestoreSession(\"data/default-visit.session\", 0)\r\n\r\n\r\nif __name__ == '__main__':\r\n server = ApiServer(sys.argv[1], hires=False)\r\n try:\r\n server.listen()\r\n finally:\r\n server.close()\r\n quit()\r\n" ]
[ [ "numpy.any", "numpy.quaternion", "numpy.array", "numpy.isnan" ] ]
drammock/python-quantities
[ "6ee561e918c00fec773481fa9a2c73e8e106f061" ]
[ "quantities/tests/test_methods.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom .. import units as pq\nfrom .common import TestCase\nimport numpy as np\n\nclass TestQuantityMethods(TestCase):\n\n def setUp(self):\n self.q = [[1, 2], [3, 4]] * pq.m\n\n def test_tolist(self):\n self.assertEqual(self.q.tolist(), [[1*pq.m, 2*pq.m], [3*pq.m, 4*pq.m]])\n\n def test_sum(self):\n self.assertQuantityEqual(self.q.sum(), 10*pq.m)\n self.assertQuantityEqual(self.q.sum(0), [4, 6]*pq.m)\n self.assertQuantityEqual(self.q.sum(1), [3, 7]*pq.m)\n\n def test_nansum(self):\n import numpy as np\n qnan = [[1,2], [3,4], [np.nan,np.nan]] * pq.m\n self.assertQuantityEqual(qnan.nansum(), 10*pq.m )\n self.assertQuantityEqual(qnan.nansum(0), [4,6]*pq.m )\n\n def test_fill(self):\n self.q.fill(6 * pq.ft)\n self.assertQuantityEqual(self.q, [[6, 6], [6, 6]] * pq.ft)\n self.q.fill(5)\n self.assertQuantityEqual(self.q, [[5, 5], [5, 5]] * pq.ft)\n\n def test_reshape(self):\n self.assertQuantityEqual(self.q.reshape([1,4]), [[1, 2, 3, 4]] * pq.m)\n\n def test_transpose(self):\n self.assertQuantityEqual(self.q.transpose(), [[1, 3], [2, 4]] * pq.m)\n\n def test_flatten(self):\n self.assertQuantityEqual(self.q.flatten(), [1, 2, 3, 4] * pq.m)\n\n def test_ravel(self):\n self.assertQuantityEqual(self.q.ravel(), [1, 2, 3, 4] * pq.m)\n\n def test_squeeze(self):\n self.assertQuantityEqual(\n self.q.reshape([1,4]).squeeze(),\n [1, 2, 3, 4] * pq.m\n )\n\n def test_take(self):\n self.assertQuantityEqual(self.q.take([0,1,2,3]), self.q.flatten())\n\n def test_put(self):\n q = self.q.flatten()\n q.put([0,2], [10,20]*pq.m)\n self.assertQuantityEqual(q, [10, 2, 20, 4]*pq.m)\n\n q = self.q.flatten()\n q.put([0, 2], [1, 2]*pq.mm)\n self.assertQuantityEqual(q, [0.001, 2, 0.002, 4]*pq.m)\n\n q = self.q.flatten()/pq.mm\n q.put([0, 2], [1, 2])\n self.assertQuantityEqual(q.simplified, [1, 2000, 2, 4000])\n self.assertQuantityEqual(q, [0.001, 2, 0.002, 4]*pq.m/pq.mm)\n\n q = self.q.flatten()\n self.assertRaises(ValueError, q.put, [0, 2], [4, 6] * pq.J)\n self.assertRaises(ValueError, q.put, [0, 2], [4, 6])\n\n def test_repeat(self):\n self.assertQuantityEqual(self.q.repeat(2), [1,1,2,2,3,3,4,4]*pq.m)\n\n def test_sort(self):\n q = [4, 5, 2, 3, 1, 6] * pq.m\n q.sort()\n self.assertQuantityEqual(q, [1, 2, 3, 4, 5, 6] * pq.m)\n\n def test_argsort(self):\n q = [1, 4, 5, 6, 2, 9] * pq.MeV\n self.assertQuantityEqual(q.argsort(), [0, 4, 1, 2, 3, 5])\n\n def test_diagonal(self):\n q = [[1, 2, 3], [1, 2, 3], [1, 2, 3]] * pq.m\n self.assertQuantityEqual(q.diagonal(offset=1), [2, 3] * pq.m)\n\n def test_compress(self):\n self.assertQuantityEqual(\n self.q.compress([False, True], axis=0),\n [[3, 4]] * pq.m\n )\n self.assertQuantityEqual(\n self.q.compress([False, True], axis=1),\n [[2], [4]] * pq.m\n )\n\n def test_searchsorted(self):\n self.assertQuantityEqual(\n self.q.flatten().searchsorted([1.5, 2.5] * pq.m),\n [1, 2]\n )\n\n self.assertRaises(ValueError, self.q.flatten().searchsorted, [1.5, 2.5])\n\n def test_nonzero(self):\n q = [1, 0, 5, 6, 0, 9] * pq.m\n self.assertQuantityEqual(q.nonzero()[0], [0, 2, 3, 5])\n\n def methodWithOut(self, name, result, q=None, *args, **kw):\n import numpy as np\n from .. import Quantity\n\n if q is None:\n q = self.q\n\n self.assertQuantityEqual(\n getattr(q.copy(), name)(*args,**kw),\n result\n )\n if isinstance(result, Quantity):\n # deliberately using an incompatible unit\n out = Quantity(np.empty_like(result.magnitude), pq.s, copy=False)\n else:\n out = np.empty_like(result)\n ret = getattr(q.copy(), name)(*args, out=out, **kw)\n self.assertQuantityEqual(\n ret,\n result\n )\n # returned array should be the same as out\n self.assertEqual(id(ret),id(out))\n # but the units had to be adjusted\n if isinstance(result, Quantity):\n self.assertEqual(ret.units,result.units)\n else:\n self.assertEqual(\n getattr(ret, 'units', pq.dimensionless),\n pq.dimensionless\n )\n\n\n def test_max(self):\n self.methodWithOut('max', 4 * pq.m)\n self.methodWithOut('max', [3, 4] * pq.m, axis=0)\n self.methodWithOut('max', [2, 4] * pq.m, axis=1)\n\n def test_nanmax(self):\n q = np.append(self.q, np.nan) * self.q.units\n self.assertQuantityEqual(q.nanmax(), 4*pq.m)\n\n def test_argmax(self):\n import numpy as np\n self.assertQuantityEqual(self.q.argmax(), 3)\n self.assertQuantityEqual(self.q.argmax(axis=0), [1, 1])\n self.assertQuantityEqual(self.q.argmax(axis=1), [1, 1])\n # apparently, numpy's argmax does not return the same object when out is specified.\n # instead, we test here for shared data\n out = np.r_[0, 0]\n ret = self.q.argmax(axis=0,out=out)\n self.assertQuantityEqual(ret, [1, 1])\n self.assertEqual(ret.ctypes.data, out.ctypes.data)\n\n def test_nanargmax(self):\n q = np.append(self.q, np.nan) * self.q.units\n self.assertEqual(self.q.nanargmax(), 3)\n\n def test_min(self):\n self.methodWithOut('min', 1 * pq.m)\n self.methodWithOut('min', [1, 2] * pq.m, axis=0)\n self.methodWithOut('min', [1, 3] * pq.m, axis=1)\n\n def test_nanmin(self):\n q = np.append(self.q, np.nan) * self.q.units\n self.assertQuantityEqual(q.nanmin(), 1*pq.m)\n\n def test_argmin(self):\n import numpy as np\n self.assertQuantityEqual(self.q.argmin(), 0)\n self.assertQuantityEqual(self.q.argmin(axis=0), [0, 0])\n self.assertQuantityEqual(self.q.argmin(axis=1), [0, 0])\n # apparently, numpy's argmax does not return the same object when out is specified.\n # instead, we test here for shared data\n out = np.r_[2, 2]\n ret = self.q.argmin(axis=0,out=out)\n self.assertQuantityEqual(ret, [0, 0])\n self.assertEqual(ret.ctypes.data, out.ctypes.data)\n\n def test_nanargmax(self):\n q = np.append(self.q, np.nan) * self.q.units\n self.assertEqual(self.q.nanargmin(), 0)\n \n def test_ptp(self):\n self.methodWithOut('ptp', 3 * pq.m)\n self.methodWithOut('ptp', [2, 2] * pq.m, axis=0)\n self.methodWithOut('ptp', [1, 1] * pq.m, axis=1)\n\n def test_clip(self):\n self.methodWithOut(\n 'clip',\n [[1, 2], [2, 2]] * pq.m,\n max=2*pq.m,\n )\n self.methodWithOut(\n 'clip',\n [[3, 3], [3, 4]] * pq.m,\n min=3*pq.m,\n )\n self.methodWithOut(\n 'clip',\n [[2, 2], [3, 3]] * pq.m,\n min=2*pq.m, max=3*pq.m\n )\n self.assertRaises(ValueError, self.q.clip, pq.J)\n self.assertRaises(ValueError, self.q.clip, 1)\n\n def test_round(self):\n q = [1, 1.33, 5.67, 22] * pq.m\n self.methodWithOut(\n 'round',\n [1, 1, 6, 22] * pq.m,\n q=q,\n decimals=0,\n )\n self.methodWithOut(\n 'round',\n [0, 0, 10, 20] * pq.m,\n q=q,\n decimals=-1,\n )\n self.methodWithOut(\n 'round',\n [1, 1.3, 5.7, 22] * pq.m,\n q=q,\n decimals=1,\n )\n\n def test_trace(self):\n self.methodWithOut('trace', (1+4) * pq.m)\n\n def test_cumsum(self):\n self.methodWithOut('cumsum', [1, 3, 6, 10] * pq.m)\n self.methodWithOut('cumsum', [[1, 2], [4, 6]] * pq.m, axis=0)\n self.methodWithOut('cumsum', [[1, 3], [3, 7]] * pq.m, axis=1)\n\n def test_mean(self):\n self.methodWithOut('mean', 2.5 * pq.m)\n self.methodWithOut('mean', [2, 3] * pq.m, axis=0)\n self.methodWithOut('mean', [1.5, 3.5] * pq.m, axis=1)\n\n def test_nanmean(self):\n import numpy as np \n q = [[1,2], [3,4], [np.nan,np.nan]] * pq.m\n self.assertQuantityEqual(q.nanmean(), self.q.mean())\n\n def test_var(self):\n self.methodWithOut('var', 1.25 * pq.m**2)\n self.methodWithOut('var', [1, 1] * pq.m**2, axis=0)\n self.methodWithOut('var', [0.25, 0.25] * pq.m**2, axis=1)\n\n def test_std(self):\n self.methodWithOut('std', 1.1180339887498949 * pq.m)\n self.methodWithOut('std', [1, 1] * pq.m, axis=0)\n self.methodWithOut('std', [0.5, 0.5] * pq.m, axis=1)\n\n def test_nanstd(self):\n import numpy as np \n q0 = [[1,2], [3,4]] * pq.m\n q1 = [[1,2], [3,4], [np.nan,np.nan]] * pq.m\n self.assertQuantityEqual(q0.std(), q1.nanstd())\n\n def test_prod(self):\n self.methodWithOut('prod', 24 * pq.m**4)\n self.methodWithOut('prod', [3, 8] * pq.m**2, axis=0)\n self.methodWithOut('prod', [2, 12] * pq.m**2, axis=1)\n\n def test_cumprod(self):\n self.assertRaises(ValueError, self.q.cumprod)\n self.assertQuantityEqual((self.q/pq.m).cumprod(), [1, 2, 6, 24])\n q = self.q/pq.m\n self.methodWithOut(\n 'cumprod',\n [1, 2, 6, 24],\n q=q,\n )\n self.methodWithOut(\n 'cumprod',\n [[1, 2], [3, 8]],\n q=q,\n axis=0,\n )\n self.methodWithOut(\n 'cumprod',\n [[1, 2], [3, 12]],\n q=q,\n axis=1,\n )\n\n def test_conj(self):\n self.assertQuantityEqual((self.q*(1+1j)).conj(), self.q*(1-1j))\n self.assertQuantityEqual((self.q*(1+1j)).conjugate(), self.q*(1-1j))\n\n def test_real(self):\n test_q = self.q * (1 + 1j)\n test_q.real = [[39.3701, 39.3701], [39.3701, 39.3701]] * pq.inch\n self.assertQuantityEqual(test_q.real, [[1., 1.], [1., 1.]] * pq.m)\n\n def test_imag(self):\n test_q = self.q * (1 + 1j)\n test_q.imag = [[39.3701, 39.3701], [39.3701, 39.3701]] * pq.inch\n self.assertQuantityEqual(test_q.imag, [[1., 1.], [1., 1.]] * pq.m)\n\n def test_getitem(self):\n self.assertRaises(IndexError, self.q.__getitem__, (0,10))\n self.assertQuantityEqual(self.q[0], [1,2]*pq.m)\n self.assertQuantityEqual(self.q[1,1], 4*pq.m)\n\n def test_setitem (self):\n self.assertRaises(ValueError, self.q.__setitem__, (0,0), 1)\n self.assertRaises(ValueError, self.q.__setitem__, (0,0), 1*pq.J)\n self.assertRaises(ValueError, self.q.__setitem__, 0, 1)\n self.assertRaises(ValueError, self.q.__setitem__, 0, [1, 2])\n self.assertRaises(ValueError, self.q.__setitem__, 0, 1*pq.J)\n\n q = self.q.copy()\n q[0] = 1*pq.m\n self.assertQuantityEqual(q, [[1,1],[3,4]]*pq.m)\n\n q[0] = (1,2)*pq.m\n self.assertQuantityEqual(q, self.q)\n\n q[:] = 1*pq.m\n self.assertQuantityEqual(q, [[1,1],[1,1]]*pq.m)\n\n # check and see that dimensionless numbers work correctly\n q = [0,1,2,3]*pq.dimensionless\n q[0] = 1\n self.assertQuantityEqual(q, [1,1,2,3])\n q[0] = pq.m/pq.mm\n self.assertQuantityEqual(q, [1000, 1,2,3])\n\n q = [0,1,2,3] * pq.m/pq.mm\n q[0] = 1\n self.assertQuantityEqual(q, [0.001,1,2,3]*pq.m/pq.mm)\n\n def test_iterator(self):\n for q in self.q.flatten():\n self.assertQuantityEqual(q.units, pq.m)\n" ]
[ [ "numpy.empty_like", "numpy.append" ] ]
dp-cont/dp-cont
[ "193bea06c1861645f16bd080e45eb0f282e8469a" ]
[ "percentile_estimator/smooth.py" ]
[ "import math\n\nimport pickle\nfrom os import path\nimport numpy as np\nfrom scipy.stats import laplace\nfrom scipy.stats import norm\n\nimport primitive\nfrom percentile_estimator.estimator import Estimator\n\n\nclass Smooth(Estimator):\n\n def __init__(self, users, args):\n super(Smooth, self).__init__(users, args)\n\n def obtain_ss(self, m, p, b):\n user_file_name = 'data/ss/%s-%d-%f-%f' % (self.args.user_type, m, p, b)\n if not path.exists(user_file_name):\n sorted_data = np.sort(np.copy(self.users.data[:m]))\n p_rank = int(self.args.m * p)\n p_percentile = sorted_data[p_rank]\n ss = primitive.smooth_ell(p_rank, b, self.users.max_ell, sorted_data)\n pickle.dump([ss, p_percentile], open(user_file_name, 'wb'))\n\n [ss, p_percentile] = pickle.load(open(user_file_name, 'rb'))\n return ss, p_percentile\n\n def obtain_ell(self, p=0):\n # 0.9499 is the default p for other methods,\n if p == 0.99499:\n p = 0.995 * 0.85\n p = 0.995\n noise = 'lap'\n m = self.args.m\n n = self.args.n\n eps = self.args.epsilon\n delta = 1 / n ** 2\n # delta = 2e-20\n b = eps / (2 * math.log(1 / delta))\n\n ss, p_percentile = self.obtain_ss(m, p, b)\n\n if noise == 'lap':\n a = eps / 2\n else:\n a = eps / math.sqrt(-math.log(delta))\n\n if noise == 'lap':\n ns = laplace.rvs()\n else:\n ns = norm.rvs()\n\n return max(0, p_percentile + ss / a * ns)\n" ]
[ [ "scipy.stats.laplace.rvs", "numpy.copy", "scipy.stats.norm.rvs" ] ]
traveller59/torchplus
[ "726df6b9f31522c22be02cd29a5608f597042355" ]
[ "train/optim.py" ]
[ "from collections import defaultdict, Iterable\n\nimport torch\nfrom copy import deepcopy\nfrom itertools import chain\nfrom torch.autograd import Variable\n\nrequired = object()\n\n\ndef param_fp32_copy(params):\n param_copy = [\n param.clone().type(torch.cuda.FloatTensor).detach() for param in params\n ]\n for param in param_copy:\n param.requires_grad = True\n return param_copy\n\ndef isinf(tensor):\n return tensor == torch.FloatTensor([float('inf')]).type_as(tensor)\n\ndef set_grad(params, params_with_grad, scale=1.0):\n for param, param_w_grad in zip(params, params_with_grad):\n if param.grad is None:\n param.grad = torch.nn.Parameter(\n param.data.new().resize_(*param.data.size()))\n grad = param_w_grad.grad.data\n if scale is not None:\n grad /= scale\n if torch.isnan(grad).any() or isinf(grad).any():\n return True # invalid grad\n param.grad.data.copy_(grad)\n return False\n\nclass MixedPrecisionWrapper(object):\n \"\"\"mixed precision optimizer wrapper.\n Arguments:\n optimizer (torch.optim.Optimizer): an instance of \n :class:`torch.optim.Optimizer`\n scale: (float): a scalar for grad scale.\n auto_scale: (bool): whether enable auto scale.\n The algorihm of auto scale is discribled in \n http://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html\n \"\"\"\n\n def __init__(self,\n optimizer,\n scale=None,\n auto_scale=True,\n inc_factor=2.0,\n dec_factor=0.5,\n num_iters_be_stable=500):\n if not isinstance(optimizer, torch.optim.Optimizer):\n raise ValueError(\"must provide a torch.optim.Optimizer\")\n self.optimizer = optimizer\n if hasattr(self.optimizer, 'name'):\n self.name = self.optimizer.name # for ckpt system\n param_groups_copy = []\n for i, group in enumerate(optimizer.param_groups):\n group_copy = {n: v for n, v in group.items() if n != 'params'}\n group_copy['params'] = param_fp32_copy(group['params'])\n param_groups_copy.append(group_copy)\n\n # switch param_groups, may be dangerous\n self.param_groups = optimizer.param_groups\n optimizer.param_groups = param_groups_copy\n self.grad_scale = scale\n self.auto_scale = auto_scale\n self.inc_factor = inc_factor\n self.dec_factor = dec_factor\n self.stable_iter_count = 0\n self.num_iters_be_stable = num_iters_be_stable\n\n def __getstate__(self):\n return self.optimizer.__getstate__()\n\n def __setstate__(self, state):\n return self.optimizer.__setstate__(state)\n\n def __repr__(self):\n return self.optimizer.__repr__()\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def load_state_dict(self, state_dict):\n return self.optimizer.load_state_dict(state_dict)\n\n def zero_grad(self):\n return self.optimizer.zero_grad()\n\n def step(self, closure=None):\n for g, g_copy in zip(self.param_groups, self.optimizer.param_groups):\n invalid = set_grad(g_copy['params'], g['params'], self.grad_scale)\n if invalid:\n if self.grad_scale is None or self.auto_scale is False:\n raise ValueError(\"nan/inf detected but auto_scale disabled.\")\n self.grad_scale *= self.dec_factor\n print('scale decay to {}'.format(self.grad_scale))\n return\n if self.auto_scale is True:\n self.stable_iter_count += 1\n if self.stable_iter_count > self.num_iters_be_stable:\n if self.grad_scale is not None:\n self.grad_scale *= self.inc_factor\n self.stable_iter_count = 0\n\n if closure is None:\n self.optimizer.step()\n else:\n self.optimizer.step(closure)\n for g, g_copy in zip(self.param_groups, self.optimizer.param_groups):\n for p_copy, p in zip(g_copy['params'], g['params']):\n p.data.copy_(p_copy.data)\n\n" ]
[ [ "torch.isnan" ] ]
mia-jingyi/simglucose
[ "a90bd8750fce362be91668ed839b3b252bc0d58d" ]
[ "simglucose/patient/t1dpatient.py" ]
[ "from .base import Patient\nimport numpy as np\nfrom scipy.integrate import ode, solve_ivp\nimport pandas as pd\nfrom collections import namedtuple\nfrom typing import NamedTuple\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass Action(NamedTuple): # This plays nicer with serialization\n CHO: float\n insulin: float\n\n\nObservation = namedtuple(\"observation\", ['Gsub'])\n\n\nclass T1DPatient(Patient):\n SAMPLE_TIME = 1 # min\n EAT_RATE = 5 # g/min CHO\n\n def __init__(self,\n params,\n init_state=None,\n random_init_bg=False,\n seed=None,\n t0=0):\n '''\n T1DPatient constructor.\n Inputs:\n - params: a pandas sequence\n - init_state: customized initial state.\n If not specified, load the default initial state in\n params.iloc[2:15]\n - t0: simulation start time, it is 0 by default\n '''\n self._params = params\n self._init_state = init_state\n self.random_init_bg = random_init_bg\n self._seed = seed\n self.t0 = t0\n self.reset()\n\n @classmethod\n def withID(cls, patient_id, patient_para_file, **kwargs):\n '''\n Construct patient by patient_id\n id are integers from 1 to 30.\n 1 - 10: adolescent#001 - adolescent#010\n 11 - 20: adult#001 - adult#001\n 21 - 30: child#001 - child#010\n '''\n patient_params = pd.read_csv(patient_para_file)\n params = patient_params.iloc[patient_id - 1, :]\n return cls(params, **kwargs)\n\n @classmethod\n def withName(cls, name, patient_para_file, **kwargs):\n '''\n Construct patient by name.\n Names can be\n adolescent#001 - adolescent#010\n adult#001 - adult#001\n child#001 - child#010\n '''\n patient_params = pd.read_csv(patient_para_file)\n params = patient_params.loc[patient_params.Name == name].squeeze()\n return cls(params, **kwargs)\n\n @property\n def state(self):\n return self._odesolver.y\n\n @property\n def t(self):\n return self._odesolver.t\n\n @property\n def sample_time(self):\n return self.SAMPLE_TIME\n\n def step(self, action):\n # Convert announcing meal to the meal amount to eat at the moment\n to_eat = self._announce_meal(action.CHO)\n action = action._replace(CHO=to_eat)\n\n # Detect eating or not and update last digestion amount\n if action.CHO > 0 and self._last_action.CHO <= 0:\n logger.info('t = {}, patient starts eating ...'.format(self.t))\n self._last_Qsto = self.state[0] + self.state[1]\n self._last_foodtaken = 0\n self.is_eating = True\n\n if to_eat > 0:\n # print(action.CHO)\n logger.debug('t = {}, patient eats {} g'.format(\n self.t, action.CHO))\n\n if self.is_eating:\n self._last_foodtaken += action.CHO # g\n\n # Detect eating ended\n if action.CHO <= 0 and self._last_action.CHO > 0:\n logger.info('t = {}, Patient finishes eating!'.format(self.t))\n self.is_eating = False\n\n # Update last input\n self._last_action = action\n\n # ODE solver\n # print('Current simulation time: {}'.format(self.t))\n # print(self._last_Qsto)\n self._odesolver.set_f_params(action, self._params, self._last_Qsto,\n self._last_foodtaken)\n if self._odesolver.successful():\n self._odesolver.integrate(self._odesolver.t + self.sample_time)\n else:\n logger.error('ODE solver failed!!')\n raise\n\n @staticmethod\n def model(t, x, action, params, last_Qsto, last_foodtaken):\n # finding state labels\n # x_0: stomach solid\n # x_1: stomach liquid\n # x_2: gut\n # x_3: plasma glucose\n # x_4: tissue glucose\n # x_5: plasma insulin\n # x_6: insulin action on glucose utilization, X(t)\n # x_7: insulin action on glucose production, I'(t)\n # x_8: delayed insulin action on liver, X^L\n # x_9: liver insulin\n # x_10: subcutaneous insulin compartment 1, I_sc1\n # x_11: subcutaneous insulin compartment 2, I_sc2\n # x_12: subcutaneous glucose\n\n dxdt = np.zeros(13)\n d = action.CHO * 1000 # g -> mg\n insulin = action.insulin * 6000 / params.BW # U/min -> pmol/kg/min\n basal = params.u2ss * params.BW / 6000 # U/min\n\n # Glucose in the stomach\n qsto = x[0] + x[1]\n Dbar = last_Qsto + last_foodtaken\n\n # Stomach solid\n dxdt[0] = -params.kmax * x[0] + d\n\n if Dbar > 0:\n aa = 5 / 2 / (1 - params.b) / Dbar\n cc = 5 / 2 / params.d / Dbar\n kgut = params.kmin + (params.kmax - params.kmin) / 2 * (\n np.tanh(aa * (qsto - params.b * Dbar)) -\n np.tanh(cc * (qsto - params.d * Dbar)) + 2)\n else:\n kgut = params.kmax\n\n # stomach liquid\n dxdt[1] = params.kmax * x[0] - x[1] * kgut\n\n # intestine\n dxdt[2] = kgut * x[1] - params.kabs * x[2]\n\n # Rate of appearance\n Rat = params.f * params.kabs * x[2] / params.BW\n # Glucose Production\n EGPt = params.kp1 - params.kp2 * x[3] - params.kp3 * x[8]\n # Glucose Utilization\n Uiit = params.Fsnc\n\n # renal excretion\n if x[3] > params.ke2:\n Et = params.ke1 * (x[3] - params.ke2)\n else:\n Et = 0\n\n # glucose kinetics\n # plus dextrose IV injection input u[2] if needed\n dxdt[3] = max(EGPt, 0) + Rat - Uiit - Et - \\\n params.k1 * x[3] + params.k2 * x[4]\n dxdt[3] = (x[3] >= 0) * dxdt[3]\n\n Vmt = params.Vm0 + params.Vmx * x[6]\n Kmt = params.Km0\n Uidt = Vmt * x[4] / (Kmt + x[4])\n dxdt[4] = -Uidt + params.k1 * x[3] - params.k2 * x[4]\n dxdt[4] = (x[4] >= 0) * dxdt[4]\n\n # insulin kinetics\n dxdt[5] = -(params.m2 + params.m4) * x[5] + params.m1 * x[9] + params.ka1 * \\\n x[10] + params.ka2 * x[11] # plus insulin IV injection u[3] if needed\n It = x[5] / params.Vi\n dxdt[5] = (x[5] >= 0) * dxdt[5]\n\n # insulin action on glucose utilization\n dxdt[6] = -params.p2u * x[6] + params.p2u * (It - params.Ib)\n\n # insulin action on production\n dxdt[7] = -params.ki * (x[7] - It)\n\n dxdt[8] = -params.ki * (x[8] - x[7])\n\n # insulin in the liver (pmol/kg)\n dxdt[9] = -(params.m1 + params.m30) * x[9] + params.m2 * x[5]\n dxdt[9] = (x[9] >= 0) * dxdt[9]\n\n # subcutaneous insulin kinetics\n dxdt[10] = insulin - (params.ka1 + params.kd) * x[10]\n dxdt[10] = (x[10] >= 0) * dxdt[10]\n\n dxdt[11] = params.kd * x[10] - params.ka2 * x[11]\n dxdt[11] = (x[11] >= 0) * dxdt[11]\n\n # subcutaneous glucose\n dxdt[12] = (-params.ksc * x[12] + params.ksc * x[3])\n dxdt[12] = (x[12] >= 0) * dxdt[12]\n\n if action.insulin > basal:\n logger.debug('t = {}, injecting insulin: {}'.format(\n t, action.insulin))\n\n return dxdt\n\n @property\n def observation(self):\n '''\n return the observation from patient\n for now, only the subcutaneous glucose level is returned\n TODO: add heart rate as an observation\n '''\n GM = self.state[12] # subcutaneous glucose (mg/kg)\n Gsub = GM / self._params.Vg\n observation = Observation(Gsub=Gsub)\n return observation\n\n def _announce_meal(self, meal):\n '''\n patient announces meal.\n The announced meal will be added to self.planned_meal\n The meal is consumed in self.EAT_RATE\n The function will return the amount to eat at current time\n '''\n self.planned_meal += meal\n if self.planned_meal > 0:\n to_eat = min(self.EAT_RATE, self.planned_meal)\n self.planned_meal -= to_eat\n self.planned_meal = max(0, self.planned_meal)\n else:\n to_eat = 0\n return to_eat\n\n @property\n def seed(self):\n return self._seed\n\n @seed.setter\n def seed(self, seed):\n self._seed = seed\n self.reset()\n\n def reset(self):\n '''\n Reset the patient state to default intial state\n '''\n if self._init_state is None:\n self.init_state = self._params.iloc[2:15]\n else:\n self.init_state = self._init_state\n\n self.random_state = np.random.RandomState(self.seed)\n if self.random_init_bg:\n # Only randomize glucose related states, x4, x5, and x13\n mean = [\n 1.0 * self.init_state[3], 1.0 * self.init_state[4],\n 1.0 * self.init_state[12]\n ]\n cov = np.diag([\n 0.1 * self.init_state[3], 0.1 * self.init_state[4],\n 0.1 * self.init_state[12]\n ])\n bg_init = self.random_state.multivariate_normal(mean, cov)\n self.init_state[3] = 1.0 * bg_init[0]\n self.init_state[4] = 1.0 * bg_init[1]\n self.init_state[12] = 1.0 * bg_init[2]\n\n self._last_Qsto = self.init_state[0] + self.init_state[1]\n self._last_foodtaken = 0\n self.name = self._params.Name\n\n self._odesolver = ode(self.model).set_integrator('dopri5')\n self._odesolver.set_initial_value(self.init_state, self.t0)\n\n self._last_action = Action(CHO=0, insulin=0)\n self.is_eating = False\n self.planned_meal = 0\n\n\nclass T1DPatientNew(Patient):\n SAMPLE_TIME = 1 # min\n EAT_RATE = 5 # g/min CHO\n\n def __init__(self,\n params,\n init_state=None,\n random_init_bg=False,\n seed=None,\n t0=0):\n '''\n T1DPatient constructor.\n Inputs:\n - params: a pandas sequence\n - init_state: customized initial state.\n If not specified, load the default initial state in\n params.iloc[2:15]\n - t0: simulation start time, it is 0 by default\n '''\n self._params = params\n self._init_state = init_state\n self.random_init_bg = random_init_bg\n self._seed = seed\n self.t0 = t0\n self._t = self.t0\n self.reset()\n\n @classmethod\n def withID(cls, patient_id, patient_para_file, **kwargs):\n '''\n Construct patient by patient_id\n id are integers from 1 to 30.\n 1 - 10: adolescent#001 - adolescent#010\n 11 - 20: adult#001 - adult#001\n 21 - 30: child#001 - child#010\n '''\n patient_params = pd.read_csv(patient_para_file)\n params = patient_params.iloc[patient_id - 1, :]\n return cls(params, **kwargs)\n\n @classmethod\n def withName(cls, name, patient_para_file, **kwargs):\n '''\n Construct patient by name.\n Names can be\n adolescent#001 - adolescent#010\n adult#001 - adult#001\n child#001 - child#010\n '''\n patient_params = pd.read_csv(patient_para_file)\n params = patient_params.loc[patient_params.Name == name].squeeze()\n return cls(params, **kwargs)\n\n @property\n def state(self):\n return self._state\n\n @property\n def t(self):\n return self._t\n\n @property\n def sample_time(self):\n return self.SAMPLE_TIME\n\n def step(self, action):\n # Convert announcing meal to the meal amount to eat at the moment\n to_eat = self._announce_meal(action.CHO)\n action = action._replace(CHO=to_eat)\n\n # Detect eating or not and update last digestion amount\n if action.CHO > 0 and self._last_action.CHO <= 0:\n logger.info('t = {}, patient starts eating ...'.format(self.t))\n self._last_Qsto = self.state[0] + self.state[1]\n self._last_foodtaken = 0\n self.is_eating = True\n\n if to_eat > 0:\n # print(action.CHO)\n logger.debug('t = {}, patient eats {} g'.format(\n self.t, action.CHO))\n\n if self.is_eating:\n self._last_foodtaken += action.CHO # g\n\n # Detect eating ended\n if action.CHO <= 0 and self._last_action.CHO > 0:\n logger.info('t = {}, Patient finishes eating!'.format(self.t))\n self.is_eating = False\n\n # Update last input\n self._last_action = action\n\n # ODE solver\n # print('Current simulation time: {}'.format(self.t))\n # print(self._last_Qsto)\n sol = solve_ivp(fun=lambda time, state: self.model(time, state, action, self._params,\n self._last_Qsto, self._last_foodtaken),\n t_span=(self.t, self.t+self.sample_time),\n y0=self.state,\n method='LSODA',\n rtol=10)\n self._state = np.array(sol.y[:, -1], dtype=float)\n self._t = sol.t[-1]\n self.sol = sol\n if not sol.success:\n raise ValueError('Integrator Failed')\n\n @staticmethod\n def model(t, x, action, params, last_Qsto, last_foodtaken):\n # finding state labels\n # x_0: stomach solid\n # x_1: stomach liquid\n # x_2: gut\n # x_3: plasma glucose\n # x_4: tissue glucose\n # x_5: plasma insulin\n # x_6: insulin action on glucose utilization, X(t)\n # x_7: insulin action on glucose production, I'(t)\n # x_8: delayed insulin action on liver, X^L\n # x_9: liver insulin\n # x_10: subcutaneous insulin compartment 1, I_sc1\n # x_11: subcutaneous insulin compartment 2, I_sc2\n # x_12: subcutaneous glucose\n\n dxdt = np.zeros(13)\n d = action.CHO * 1000 # g -> mg\n insulin = action.insulin * 6000 / params.BW # U/min -> pmol/kg/min\n basal = params.u2ss * params.BW / 6000 # U/min\n\n # Glucose in the stomach\n qsto = x[0] + x[1]\n Dbar = last_Qsto + last_foodtaken\n\n # Stomach solid\n dxdt[0] = -params.kmax * x[0] + d\n\n if Dbar > 0:\n aa = 5 / 2 / (1 - params.b) / Dbar\n cc = 5 / 2 / params.d / Dbar\n kgut = params.kmin + (params.kmax - params.kmin) / 2 * (\n np.tanh(aa * (qsto - params.b * Dbar)) -\n np.tanh(cc * (qsto - params.d * Dbar)) + 2)\n else:\n kgut = params.kmax\n\n # stomach liquid\n dxdt[1] = params.kmax * x[0] - x[1] * kgut\n\n # intestine\n dxdt[2] = kgut * x[1] - params.kabs * x[2]\n\n # Rate of appearance\n Rat = params.f * params.kabs * x[2] / params.BW\n # Glucose Production\n EGPt = params.kp1 - params.kp2 * x[3] - params.kp3 * x[8]\n # Glucose Utilization\n Uiit = params.Fsnc\n\n # renal excretion\n if x[3] > params.ke2:\n Et = params.ke1 * (x[3] - params.ke2)\n else:\n Et = 0\n\n # glucose kinetics\n # plus dextrose IV injection input u[2] if needed\n dxdt[3] = max(EGPt, 0) + Rat - Uiit - Et - \\\n params.k1 * x[3] + params.k2 * x[4]\n dxdt[3] = (x[3] >= 0) * dxdt[3]\n\n Vmt = params.Vm0 + params.Vmx * x[6]\n Kmt = params.Km0\n Uidt = Vmt * x[4] / (Kmt + x[4])\n dxdt[4] = -Uidt + params.k1 * x[3] - params.k2 * x[4]\n dxdt[4] = (x[4] >= 0) * dxdt[4]\n\n # insulin kinetics\n dxdt[5] = -(params.m2 + params.m4) * x[5] + params.m1 * x[9] + params.ka1 * \\\n x[10] + params.ka2 * x[11] # plus insulin IV injection u[3] if needed\n It = x[5] / params.Vi\n dxdt[5] = (x[5] >= 0) * dxdt[5]\n\n # insulin action on glucose utilization\n dxdt[6] = -params.p2u * x[6] + params.p2u * (It - params.Ib)\n\n # insulin action on production\n dxdt[7] = -params.ki * (x[7] - It)\n\n dxdt[8] = -params.ki * (x[8] - x[7])\n\n # insulin in the liver (pmol/kg)\n dxdt[9] = -(params.m1 + params.m30) * x[9] + params.m2 * x[5]\n dxdt[9] = (x[9] >= 0) * dxdt[9]\n\n # subcutaneous insulin kinetics\n dxdt[10] = insulin - (params.ka1 + params.kd) * x[10]\n dxdt[10] = (x[10] >= 0) * dxdt[10]\n\n dxdt[11] = params.kd * x[10] - params.ka2 * x[11]\n dxdt[11] = (x[11] >= 0) * dxdt[11]\n\n # subcutaneous glucose\n dxdt[12] = (-params.ksc * x[12] + params.ksc * x[3])\n dxdt[12] = (x[12] >= 0) * dxdt[12]\n\n if action.insulin > basal:\n logger.debug('t = {}, injecting insulin: {}'.format(\n t, action.insulin))\n\n return dxdt\n\n @property\n def observation(self):\n '''\n return the observation from patient\n for now, only the subcutaneous glucose level is returned\n TODO: add heart rate as an observation\n '''\n GM = self.state[12] # subcutaneous glucose (mg/kg)\n Gsub = GM / self._params.Vg\n observation = Observation(Gsub=Gsub)\n return observation\n\n def _announce_meal(self, meal):\n '''\n patient announces meal.\n The announced meal will be added to self.planned_meal\n The meal is consumed in self.EAT_RATE\n The function will return the amount to eat at current time\n '''\n self.planned_meal += meal\n if self.planned_meal > 0:\n to_eat = min(self.EAT_RATE, self.planned_meal)\n self.planned_meal -= to_eat\n self.planned_meal = max(0, self.planned_meal)\n else:\n to_eat = 0\n return to_eat\n\n @property\n def seed(self):\n return self._seed\n\n @seed.setter\n def seed(self, seed):\n self._seed = seed\n self.reset()\n\n def reset(self):\n '''\n Reset the patient state to default intial state\n '''\n if self._init_state is None:\n self.init_state = self._params.iloc[2:15]\n else:\n self.init_state = self._init_state\n\n self.random_state = np.random.RandomState(self.seed)\n if self.random_init_bg:\n # Only randomize glucose related states, x4, x5, and x13\n mean = [\n 1.0 * self.init_state[3], 1.0 * self.init_state[4],\n 1.0 * self.init_state[12]\n ]\n cov = np.diag([\n 0.1 * self.init_state[3], 0.1 * self.init_state[4],\n 0.1 * self.init_state[12]\n ])\n bg_init = self.random_state.multivariate_normal(mean, cov)\n self.init_state[3] = 1.0 * bg_init[0]\n self.init_state[4] = 1.0 * bg_init[1]\n self.init_state[12] = 1.0 * bg_init[2]\n\n self._last_Qsto = self.init_state[0] + self.init_state[1]\n self._last_foodtaken = 0\n self.name = self._params.Name\n\n self._t = self.t0\n self._state = self.init_state\n\n self._last_action = Action(CHO=0, insulin=0)\n self.is_eating = False\n self.planned_meal = 0\n\n\nif __name__ == '__main__':\n logger.setLevel(logging.INFO)\n # create console handler and set level to debug\n ch = logging.StreamHandler()\n # ch.setLevel(logging.DEBUG)\n ch.setLevel(logging.INFO)\n # create formatter\n formatter = logging.Formatter('%(name)s: %(levelname)s: %(message)s')\n # add formatter to ch\n ch.setFormatter(formatter)\n # add ch to logger\n logger.addHandler(ch)\n\n p = T1DPatient.withName('adolescent#001')\n basal = p._params.u2ss * p._params.BW / 6000 # U/min\n t = []\n CHO = []\n insulin = []\n BG = []\n while p.t < 1000:\n ins = basal\n carb = 0\n if p.t == 100:\n carb = 80\n ins = 80.0 / 6.0 + basal\n # if p.t == 150:\n # ins = 80.0 / 12.0 + basal\n act = Action(insulin=ins, CHO=carb)\n t.append(p.t)\n CHO.append(act.CHO)\n insulin.append(act.insulin)\n BG.append(p.observation.Gsub)\n p.step(act)\n\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots(3, sharex=True)\n ax[0].plot(t, BG)\n ax[1].plot(t, CHO)\n ax[2].plot(t, insulin)\n plt.show()\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.random.RandomState", "matplotlib.pyplot.subplots", "numpy.tanh", "matplotlib.pyplot.show", "pandas.read_csv", "scipy.integrate.ode", "numpy.diag" ] ]
SAMMiCA/robot_home_service
[ "fa1a49346b38e6f82e7cf0c7f9c09349d956c972" ]
[ "scene_graph/test_visualize.py" ]
[ "import vispy\nfrom vispy.scene import visuals, SceneCanvas\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport cv2\nimport os\nimport time\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\nfrom torch.multiprocessing import Process, Queue\nimport torch.multiprocessing as mp\n\n\nclass ScannetVis(QWidget):\n \"\"\"Class that creates and handles a visualizer for a pointcloud\"\"\"\n\n def __init__(self, scan, task, offset=0, skip_im=10, mesh_plot=True, parent=None):\n super(ScannetVis, self).__init__(parent=parent)\n\n self.scan = scan\n self.task = task\n self.offset = offset\n self.offset_prev = offset\n self.skip_im = skip_im\n self.mesh_plot = mesh_plot\n self.num_imgs = 0\n\n self.keyboard_inputs = None\n\n self.view_point_cloud = True\n\n # self.total = len(self.rgb_names)\n\n self.checkBox_list = []\n self.checkBox_with_3D = []\n\n self.reset()\n self.initUI()\n self.update_scan()\n\n\n def initUI(self):\n self.setStyleSheet(\"background-color: white;\")\n self.principalLayout = QHBoxLayout(self)\n\n ''' left left Frame : RGB with yolact & depth frame '''\n self.left2Frame = QFrame(self)\n self.left2Frame.setFrameShape(QFrame.StyledPanel)\n self.left2Frame.setFrameShadow(QFrame.Raised)\n self.vertical2Layout = QVBoxLayout(self.left2Frame)\n # self.vertical2Layout.setSpacing(0)\n self.principalLayout.addWidget(self.left2Frame)\n\n # self.vertical2_1Layout = QVBoxLayout(self.left2Frame)\n # self.vertical2Layout.addWidget(self.left2Frame)\n # add rgb depth\n self.img_canvas.create_native()\n self.img_canvas.native.setMinimumSize(320, 480)\n self.vertical2Layout.addWidget(self.img_canvas.native)\n\n ''' left Frame : 3D reconstructed Scene '''\n self.leftFrame = QFrame(self)\n self.leftFrame.setFrameShape(QFrame.StyledPanel)\n self.leftFrame.setFrameShadow(QFrame.Raised)\n self.verticalLayout = QVBoxLayout(self.leftFrame)\n # self.verticalLayout.setSpacing(0)\n self.principalLayout.addWidget(self.leftFrame)\n\n self.canvas.create_native()\n self.canvas.native.setMinimumSize(640, 480)\n self.verticalLayout.addWidget(self.canvas.native)\n\n ''' left center Frame : 3D Scene graph'''\n self.SGFrame = QFrame(self)\n self.SGFrame.setFrameShape(QFrame.StyledPanel)\n self.SGFrame.setFrameShadow(QFrame.Raised)\n # self.verticalSGLayout = QVBoxLayout(self.SGFrame)\n # self.verticalLayout.setSpacing(0)\n self.principalLayout.addWidget(self.SGFrame)\n\n self.scene_graph_canvas.create_native()\n self.scene_graph_canvas.native.setMinimumSize(640, 480)\n self.verticalLayout.addWidget(self.scene_graph_canvas.native)\n\n ''' center Frame : control pannel '''\n self.keyFrame = QFrame(self)\n self.keyFrame.setFrameShape(QFrame.StyledPanel)\n self.keyFrame.setFrameShadow(QFrame.Raised)\n self.keysverticalLayout = QVBoxLayout(self.keyFrame)\n\n self.label1 = QLabel(\"To navigate: \"\n \"\\n n: next (next scan) \"\n \"\\n s: start (start processing sequential rgb-d images)\"\n \"\\n p: pause (pause processing)\"\n \"\\n q: quit (exit program)\"\n\n \"\\n\\n To control 3D view: \"\n \"\\n LMB: orbits the view around its center point\"\n \"\\n RMB or scroll: change scale_factor (i.e. zoom level)\"\n \"\\n SHIFT + LMB: translate the center point\"\n \"\\n SHIFT + RMB: change FOV\")\n self.label2 = QLabel(\"To find specific objects in 3D Space : \")\n # self.keysverticalLayout.addWidget(self.label1)\n # self.keysverticalLayout.addWidget(self.label2)\n self.vertical2Layout.addWidget(self.label1)\n self.vertical2Layout.addWidget(self.label2)\n\n self.le = QLineEdit(self)\n self.vertical2Layout.addWidget(self.le)\n\n self.spb = QPushButton('search', self)\n self.vertical2Layout.addWidget(self.spb)\n self.spb.clicked.connect(self.search_button_click)\n\n self.cpb = QPushButton('clear', self)\n self.vertical2Layout.addWidget(self.cpb)\n self.cpb.clicked.connect(self.clear_button_click)\n\n self.verticalLayoutR = QVBoxLayout()\n self.verticalLayoutR.addWidget(self.keyFrame)\n self.verticalLayoutR.setContentsMargins(0, 0, 0, 0)\n self.verticalLayoutR.setSpacing(0)\n self.principalLayout.addLayout(self.verticalLayoutR)\n\n ''' Right Frame : result images of searched objects '''\n self.rightFrame = QFrame(self)\n self.rightFrame.setFrameShape(QFrame.StyledPanel)\n self.rightFrame.setFrameShadow(QFrame.Raised)\n self.verticalLayoutRight = QVBoxLayout(self.rightFrame)\n self.verticalLayoutRight.setContentsMargins(0, 0, 0, 0)\n self.verticalLayoutRight.setSpacing(0)\n self.principalLayout.addWidget(self.rightFrame)\n\n self.setLayout(self.principalLayout)\n self.setWindowTitle('Searching objects')\n self.setGeometry(300, 300, 300, 200)\n self.show()\n\n def reset(self):\n \"\"\" Reset. \"\"\"\n # last key press (it should have a mutex, but visualization is not\n # safety critical, so let's do things wrong)\n self.action = \"no\" # no, next, back, quit are the possibilities\n\n ''' 3D points cloud or mesh SceneCanvas '''\n if (self.view_point_cloud):\n # new canvas prepared for visualizing data\n self.canvas = SceneCanvas(keys='interactive', show=True)\n # interface (n next, b back, q quit, very simple)\n self.canvas.events.key_press.connect(self.key_press)\n self.canvas.events.draw.connect(self.draw)\n # grid\n self.grid = self.canvas.central_widget.add_grid()\n\n # add point cloud views\n self.scan_view = vispy.scene.widgets.ViewBox(\n border_color='white', parent=self.canvas.scene)\n self.grid.add_widget(self.scan_view, 0, 0)\n\n # Camera location settings\n self.scene_cam = vispy.scene.cameras.BaseCamera()\n # self.scene_cam.center = (-10, -10, 10)\n # self.scan_view.add(self.scene_cam)\n # self.scene_cam.pre_transform.set_range()\n\n canvas2 = vispy.app.Canvas()\n w = QMainWindow()\n widget = QWidget()\n w.setCentralWidget(widget)\n widget.setLayout(QVBoxLayout())\n widget.layout().addWidget(canvas2.native)\n widget.layout().addWidget(QPushButton())\n w.show()\n\n if self.mesh_plot:\n self.scan_vis = visuals.Mesh()\n self.scan_vis_mean = visuals.Line()\n self.scan_vis_cam = visuals.Line()\n self.scan_bbox_3d = visuals.Line()\n self.label_vis = visuals.Text()\n\n self.scan_view.add(self.scan_vis)\n self.scan_view.add(self.scan_vis_mean)\n self.scan_view.add(self.scan_vis_cam)\n self.scan_view.add(self.scan_bbox_3d)\n self.scan_view.add(self.label_vis)\n else:\n self.scan_vis = visuals.Markers()\n self.scan_view.add(self.scan_vis)\n\n self.scan_view.camera = 'arcball'\n self.tr = self.scan_vis.transforms.get_transform(map_from='visual', map_to='canvas')\n # self.scan_view.camera = self.scene_cam\n # self.scan_view.camera = 'arcball' , 'turntable'\n # self.scan_view.camera.transform.rotate(90, (0,1,0))\n\n\n visuals.XYZAxis(parent=self.scan_view.scene)\n\n ''' 2D images SceneCanvas '''\n # img canvas size\n self.canvas_W = 320\n self.canvas_H = 280\n self.multiplier = 2\n\n ''' new canvas for RGB & Depth img '''\n self.img_canvas = SceneCanvas(keys='interactive', show=True,\n size=(self.canvas_W, self.canvas_H * self.multiplier))\n self.img_grid = self.img_canvas.central_widget.add_grid()\n # interface (n next, s start, p pause, q quit, )\n self.img_canvas.events.key_press.connect(self.key_press)\n self.img_canvas.events.draw.connect(self.draw)\n\n # add rgb views\n self.rgb_img_raw_view = vispy.scene.widgets.ViewBox(\n border_color='white', parent=self.img_canvas.scene)\n self.img_grid.add_widget(self.rgb_img_raw_view, 0, 0)\n self.rgb_img_raw_vis = visuals.Image(cmap='viridis')\n self.rgb_img_raw_view.add(self.rgb_img_raw_vis)\n\n # add a view for the depth\n self.depth_img_view = vispy.scene.widgets.ViewBox(\n border_color='white', parent=self.img_canvas.scene)\n self.img_grid.add_widget(self.depth_img_view, 1, 0)\n self.depth_img_vis = visuals.Image(cmap='viridis')\n self.depth_img_view.add(self.depth_img_vis)\n\n ''' new canvas for 3D scene graph img '''\n self.scene_graph_canvas = SceneCanvas(keys='interactive', show=True,\n size=(640, 480))\n self.scene_graph_grid = self.scene_graph_canvas.central_widget.add_grid()\n self.scene_graph_canvas.events.key_press.connect(self.key_press)\n self.scene_graph_canvas.events.draw.connect(self.draw)\n\n # add a view for 3D scene graphs\n self.scene_graph_view = vispy.scene.widgets.ViewBox(\n border_color='white', parent=self.scene_graph_canvas.scene)\n self.scene_graph_grid.add_widget(self.scene_graph_view, 0, 0)\n self.scene_graph_vis = visuals.Image(cmap='viridis')\n self.scene_graph_view.add(self.scene_graph_vis)\n\n\n if (not self.scan.use_gpu):\n # add a depth clustered mask views\n self.depth_mask_view = vispy.scene.widgets.ViewBox(\n border_color='white', parent=self.img_canvas.scene)\n self.img_grid.add_widget(self.depth_mask_view, 0, 1)\n self.depth_mask_vis = visuals.Image(cmap='viridis')\n self.depth_mask_view.add(self.depth_mask_vis)\n\n # add a scene graph results\n self.scene_graph_view = vispy.scene.widgets.ViewBox(\n border_color='white', parent=self.img_canvas.scene)\n self.img_grid.add_widget(self.scene_graph_view, 1, 1)\n self.scene_graph_vis = visuals.Image(cmap='viridis')\n self.scene_graph_view.add(self.scene_graph_vis)\n\n\n def get_mpl_colormap(self, cmap_name):\n cmap = plt.get_cmap(cmap_name)\n\n # Initialize the matplotlib color map\n sm = plt.cm.ScalarMappable(cmap=cmap)\n\n # Obtain linear color range\n color_range = sm.to_rgba(np.linspace(0, 1, 256), bytes=True)[:, 2::-1]\n\n return color_range.reshape(256, 3).astype(np.float32) / 255.0\n\n\n def center_view_point(self, input, R, t, R_90_x):\n input = input - t\n input = np.matmul(input, R)\n # input = np.matmul(input, R_90_x)\n\n return input\n\n def update_yolact(self):\n title = \"scan \" + str(self.task.num_steps_taken())\n obs = self.task.get_observations()\n # rgb_image = obs['rgb']\n # depth_image = obs['depth']\n rgb_image = self.task.env.last_event.frame\n depth_image = self.task.env.last_event.depth_frame\n agent_y = self.task.env.last_event.metadata['agent']['position']['y']\n camera_locs = np.array(\n [\n obs['rel_position_change']['agent_locs'][0], \n # agent_y + 0.675 * obs['rel_position_change']['agent_locs'][-2], \n -0.675 * (1 - obs['rel_position_change']['agent_locs'][-2]), \n obs['rel_position_change']['agent_locs'][1],\n obs['rel_position_change']['agent_locs'][2],\n obs['rel_position_change']['agent_locs'][-1]\n ]\n )\n\n # draw color & depth image\n self.img_canvas.title = title\n\n _, _, _, _ = self.scan.open_scan(\n rgb_image=rgb_image,\n depth_image=depth_image,\n camera_locs=camera_locs,\n frame_num=self.task.num_steps_taken(),\n recon=False\n )\n\n text_str = 'Frame %d ' % (self.task.num_steps_taken())\n font_face = cv2.FONT_HERSHEY_DUPLEX\n font_scale = 0.6\n font_thickness = 1\n text_w, text_h = cv2.getTextSize(text_str, font_face, font_scale, font_thickness)[0]\n masked_img = self.scan.masked_img.copy()\n masked_img = cv2.resize(masked_img, (320, 240), interpolation=cv2.INTER_AREA)\n\n x1, y1 = 0, 0\n text_pt = (x1, y1 + 15)\n text_color = [255, 255, 255]\n color = [0, 0, 0]\n cv2.rectangle(masked_img, (x1, y1), (x1 + text_w, y1 + text_h + 4), color, -1)\n cv2.putText(masked_img, text_str, text_pt, font_face, font_scale, text_color, font_thickness, cv2.LINE_AA)\n\n self.rgb_img_raw_vis.set_data(masked_img)\n self.rgb_img_raw_vis.update()\n\n depth_img = cv2.resize(self.scan.depth_im.copy(), (320, 240), interpolation=cv2.INTER_AREA)\n self.depth_img_vis.set_data(depth_img)\n self.depth_img_vis.update()\n\n\n def update_3d_recon(self):\n title = \"scan \" + str(self.task.num_steps_taken())\n if (self.task.num_steps_taken() % self.skip_im == 0):\n start_time = time.time()\n obs = self.task.get_observations()\n # rgb_image = obs['rgb']\n # depth_image = obs['depth']\n rgb_image = self.task.env.last_event.frame\n depth_image = self.task.env.last_event.depth_frame\n agent_y = self.task.env.last_event.metadata['agent']['position']['y']\n camera_locs = np.array(\n [\n obs['rel_position_change']['agent_locs'][0], \n # agent_y + 0.675 * obs['rel_position_change']['agent_locs'][-2], \n -0.675 * (1 - obs['rel_position_change']['agent_locs'][-2]), \n obs['rel_position_change']['agent_locs'][1],\n obs['rel_position_change']['agent_locs'][2],\n obs['rel_position_change']['agent_locs'][-1]\n ]\n )\n verts, faces, norms, colors = self.scan.open_scan(\n rgb_image=rgb_image,\n depth_image=depth_image,\n camera_locs=camera_locs,\n frame_num=self.task.num_steps_taken(),\n recon=True\n )\n self.verts, self.faces, self.norms, self.colors = verts, faces, norms, colors\n\n # cam_pose_test = np.loadtxt(self.pose_names[self.offset])\n camera_matrix = self.scan.tsdf_vol.cam_pose\n # rot_90_x = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]])\n rot_90_x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n R_ = camera_matrix[:3, :3]\n t_ = camera_matrix[:3, -1:].transpose()\n self.R_ = R_\n self.rot_90_x = rot_90_x\n if (camera_matrix.size != 0):\n if (self.view_point_cloud):\n self.canvas.title = title\n\n verts_ = self.center_view_point(verts, R_, self.scan.tsdf_vol.camera_3D_pose, rot_90_x)\n self.scan_vis.set_data(vertices=verts_,\n faces=faces,\n vertex_colors=colors/255.)\n self.scan_vis.update()\n\n self.prev_R = R_\n self.prev_t = self.scan.tsdf_vol.camera_3D_pose\n if self.scan.num_dets_to_consider > 0 and self.scan.tsdf_vol.debug_same_node_detector:\n self.mean_pose = np.array(self.scan.tsdf_vol.mask_centers)\n if len(self.mean_pose) > 0:\n mean_pose = self.center_view_point(self.mean_pose, R_, self.scan.tsdf_vol.camera_3D_pose, rot_90_x)\n\n # find object's position and visualize\n self.label_vis.text = self.scan.tsdf_vol.class_label\n self.label_vis.pos = mean_pose\n self.label_vis.font_size = int(40)\n\n else:\n if (len(self.label_vis.text) != 0):\n pos = self.label_vis.pos\n pos = np.matmul(pos, np.linalg.inv(rot_90_x))\n pos = np.matmul(pos, np.linalg.inv(self.prev_R)) - self.prev_t\n\n # self.label_vis.pos = np.matmul(pos, R_) + t_\n\n self.label_vis.pos = self.center_view_point(pos, R_,\n self.scan.tsdf_vol.camera_3D_pose, rot_90_x)\n self.label_vis.update()\n\n\n self.cam_frustum = np.array(self.scan.tsdf_vol.cam_frustum)\n self.cam_frustum = self.cam_frustum - self.scan.tsdf_vol.camera_3D_pose\n self.cam_frustum = np.matmul(self.cam_frustum, R_)\n self.cam_frustum = np.matmul(self.cam_frustum, rot_90_x)\n\n self.scan_vis_cam.set_data(\n self.cam_frustum,\n color='blue',\n width=3,\n connect=self.scan.tsdf_vol.cam_connect\n )\n self.scan_vis_cam.update()\n\n #if self.scan.num_dets_to_consider > 0 and not self.scan.use_gpu:\n\n if ('camera' in self.label_vis.text):\n self.label_vis.text.pop()\n pose_ = self.label_vis.pos[:-1, :]\n self.label_vis.pos = pose_\n\n self.label_vis.text += self.scan.tsdf_vol.cam_label\n self.label_vis.pos = np.append(self.label_vis.pos, self.cam_frustum[:1, :], axis=0)\n\n\n # Draw Scene graph images\n generated_scene_graph_file = os.path.join(self.scan.tsdf_vol.scene_graph_path,\n 'scene_graph' + str(self.task.num_steps_taken())+'.png')\n if os.path.exists(generated_scene_graph_file):\n print('Draw scene graph{}'.format(self.task.num_steps_taken()))\n sg_img = cv2.cvtColor(cv2.imread(generated_scene_graph_file),\n cv2.COLOR_BGR2RGB)\n self.sg_img = cv2.resize(sg_img, (640, 480), interpolation=cv2.INTER_AREA)\n self.scene_graph_vis.set_data(self.sg_img)\n self.scene_graph_vis.update()\n\n print(\"--- %s seconds of %d to %d images---\" % (time.time() - start_time, self.task.num_steps_taken()-self.skip_im+1, self.task.num_steps_taken()))\n print(\"--- fps : {} ---\".format(self.skip_im / (time.time() - start_time)))\n\n\n def update_scan(self):\n # update_yolact images\n self.update_yolact()\n\n # Reconstruct 3D Scene and detect same nodes or not\n self.update_3d_recon()\n\n def update_seq_scan(self):\n if (self.view_point_cloud):\n if self.canvas.events.key_press.blocked():\n self.canvas.events.key_press.unblock()\n if self.img_canvas.events.key_press.blocked():\n self.img_canvas.events.key_press.unblock()\n if self.scene_graph_canvas.events.key_press.blocked():\n self.scene_graph_canvas.events.key_press.unblock()\n\n if(self.start):\n self.offset += 1\n\n self.update_yolact()\n self.update_3d_recon()\n\n self.canvas.scene.update()\n self.img_canvas.scene.update()\n self.scene_graph_canvas.update()\n self.canvas.on_draw(None)\n self.img_canvas.on_draw(None)\n self.scene_graph_canvas.on_draw(None)\n\n # interface\n def key_press(self, event):\n # if (self.view_point_cloud):\n # self.canvas.events.key_press.block()\n # self.img_canvas.events.key_press.block()\n self.keyboard_inputs = event.key\n if event.key == 'N':\n self.offset += 1\n # if self.offset >= self.total:\n # self.offset = 0\n self.update_scan()\n elif event.key == 'S':\n # Start to process RGB-D sequences\n self.start = True\n self.timer1 = vispy.app.Timer(0.033, connect=self.on_timer1, start=True)\n self.timer2 = vispy.app.Timer(0.033, connect=self.on_timer2, start=True)\n\n elif event.key == 'P':\n # Pause to process RGB sequences\n self.start = False\n\n elif event.key == 'U':\n # test when updated draw function\n self.canvas.scene.update()\n self.img_canvas.scene.update()\n self.scene_graph_canvas.update()\n\n elif event.key == 'Q' or event.key == 'Escape':\n self.destroy()\n\n\n def on_timer1(self, event):\n # self.update_seq_scan()\n if(self.start):\n self.offset += 1\n self.update_yolact()\n\n def on_timer2(self, event):\n if (self.start):\n # self.offset += 1\n self.update_3d_recon()\n\n def search_button_click(self):\n print('searching object : {}'.format(self.le.text()))\n objects_dict = self.scan.tsdf_vol.node_data\n\n is_obj_exist = []\n\n self.clear_searched_items(self.verticalLayoutRight)\n\n for key, val in objects_dict.items():\n if (val['class'] == self.le.text()):\n print('find {}'.format(self.le.text()))\n\n thumbnail_path = os.path.join(self.scan.tsdf_vol.bbox_path, 'thumbnail_' + str(key) +\n '_' + str(int(objects_dict[str(key)]['detection_cnt'] / 2)) + '.png')\n cv2_img = cv2.cvtColor(cv2.imread(thumbnail_path), cv2.COLOR_BGR2RGB)\n image = QImage(cv2_img.data, cv2_img.shape[1], cv2_img.shape[0], cv2_img.strides[0], QImage.Format_RGB888)\n image_frame = QLabel()\n image_frame.setPixmap(QPixmap.fromImage(image))\n self.verticalLayoutRight.addWidget(image_frame)\n\n checkBox = QCheckBox(val['class'] + str(key))\n self.checkBox_list += [[checkBox, val['class'], str(key)]]\n\n scan_bbox_3d = visuals.Line()\n self.checkBox_with_3D += [scan_bbox_3d]\n self.scan_view.add(scan_bbox_3d)\n\n checkBox.stateChanged.connect(self.checkBoxState)\n\n # searched_obj = QLabel(val['class'] + str(key))\n self.verticalLayoutRight.addWidget(checkBox)\n is_obj_exist += [True]\n\n if(not is_obj_exist):\n searched_obj = QLabel(\"Nothing was found!\")\n self.verticalLayoutRight.addWidget(searched_obj)\n else:\n searched_obj = QLabel(\"Check box if you want to find objects in 3D Scene.\")\n self.verticalLayoutRight.addWidget(searched_obj)\n\n\n def clear_button_click(self):\n print('clear previous searched object')\n self.clear_searched_items(self.verticalLayoutRight)\n\n\n def clear_searched_items(self, layout):\n # reset rearching results widget\n while layout.count() > 0:\n item = layout.takeAt(0)\n if not item:\n continue\n\n w = item.widget()\n if w:\n w.deleteLater()\n\n # reset visuals.Line for 3D BBox of searched objects\n for i, check in enumerate(self.checkBox_list):\n self.checkBox_with_3D[i].parent = None\n self.checkBox_with_3D[i] = visuals.Line()\n self.scan_view.add(self.checkBox_with_3D[i])\n\n self.checkBox_list = []\n self.checkBox_with_3D = []\n\n\n def checkBoxState(self):\n # checkBox_list is composed of [QcheckBox, class_name, class_3D_ID]\n for i, check in enumerate(self.checkBox_list):\n if check[0].isChecked() == True:\n print('checked!!!')\n # Find 3D BBox in 3D Scene Canvas\\\n bbox_3d = np.array(self.scan.tsdf_vol.bbox_3ds[check[2]])\n bbox_3d = self.center_view_point(bbox_3d, self.R_, self.scan.tsdf_vol.camera_3D_pose, self.rot_90_x)\n\n bbox_connect = np.array([[0,1], [1,2], [2,3], [3,0],\n [4,5], [5,6], [6,7], [7,4],\n [0,4], [1,5], [2,6], [3,7]])\n self.checkBox_with_3D[i].set_data(bbox_3d,\n color='green',\n width=1000.0,\n connect=bbox_connect)\n else:\n self.checkBox_with_3D[i].parent = None\n self.checkBox_with_3D[i] = visuals.Line()\n self.scan_view.add(self.checkBox_with_3D[i])\n\n\n def draw(self, event):\n # print('draw states!!')\n # print('event key: {}'.format(self.keyboard_inputs))\n if (self.view_point_cloud):\n if self.canvas.events.key_press.blocked():\n self.canvas.events.key_press.unblock()\n if self.img_canvas.events.key_press.blocked():\n self.img_canvas.events.key_press.unblock()\n if self.scene_graph_canvas.events.key_press.blocked():\n self.scene_graph_canvas.events.key_press.unblock()\n\n if self.keyboard_inputs == 'P':\n # Pause to process RGB sequences\n self.start = False\n # if self.keyboard_inputs == 'S':\n # self.update_seq_scan()\n\n\n def destroy(self):\n # destroy the visualization\n if (self.view_point_cloud):\n self.canvas.close()\n self.img_canvas.close()\n self.scene_graph_canvas.close()\n vispy.app.quit()\n\n def run(self):\n vispy.app.use_app(backend_name=\"PyQt5\", call_reuse=True)\n vispy.app.run()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.array", "numpy.matmul", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.cm.ScalarMappable", "numpy.append", "numpy.linspace", "numpy.linalg.inv" ] ]
KexinFeng/5523-data-mining
[ "0d812ba8d67c5533ed874e3e66148a8b87b427c1" ]
[ "csci5523-decision-tree-and-random-forest/dtinduce.py" ]
[ "import numpy as np\nimport pandas as pd\nimport time\n# from TreeNode import TreeNode\n# from Serialize import Serializer, DeSerializer\nimport csv\nimport sys\n\ndef Serializer(Node, fp):\n\n if Node.isleaf:\n output = Node.label\n writer = csv.writer(fp)\n writer.writerow([output])\n return\n else:\n assert not Node is None\n\n\n output = [Node.att_id, Node.split_val]\n writer = csv.writer(fp, delimiter=',')\n writer.writerow(output)\n\n Serializer(Node.left, fp)\n Serializer(Node.right, fp)\n\n\nclass TreeNode():\n\n def __init__(self, att_id=None, split_val=None, label=None, isleaf=None):\n self.att_id = att_id\n self.split_val = split_val\n\n self.isleaf = isleaf\n self.label = label\n\n self.left = None\n self.right = None\n\n\n def set_init(self, att_id, split_val):\n self.att_id = att_id\n self.split_val = split_val\n\n\nclass DecisionTree():\n # minfreq = 20\n image_db = None\n label_db = None\n N_db = 0\n image_unreduced = None\n\n image = None\n label = None\n N = 0 # the actually used size\n\n minfreq = None\n\n col_idx = None\n # boost_att_id = None\n num_att = 0\n\n\n def readData(self, filepath, minfreq_input=20):\n\n file = pd.read_csv(filepath, header=None)\n print('train_file: ', filepath)\n\n self.image_db = file.iloc[:, 1:].values\n self.label_db = file.iloc[:, 0].values\n self.N_db, self.num_att = self.image_db.shape\n\n # initialize the current training data\n self.minfreq = minfreq_input\n\n self.image = self.image_db\n self.label = self.label_db\n self.N = self.image.shape[0]\n\n self.col_idx = np.arange(self.num_att)\n self.image_unreduced = self.image\n\n def reduction(self, col_idx=None):\n print('before reduction:', self.image.shape)\n\n if col_idx is None:\n # rowstd = np.std(X, axis = 0)\n nonzero_row = np.sum(np.where(self.image > 0, 1, 0), axis=0)\n self.col_idx = np.argwhere(nonzero_row > 0)[:, 0]\n # self.col_idx = np.argwhere(nonzero_row > self.minfreq)[:, 0]\n # self.boost_att_id = np.argwhere(nonzero_row < self.minfreq)[:, 0]\n\n else:\n self.col_idx = col_idx\n\n self.image = self.image_unreduced[:, self.col_idx]\n\n self.N, self.num_att = self.image.shape\n\n print('after reduction:', self.image.shape)\n\n\n def find_best_split(self, E_imid, F_att):\n best_impurity = None\n best_split = None\n best_att = None\n best_att_ord = None\n # n_labl = len(E_imid)\n\n class_table_init = np.zeros((10, 2), dtype=np.int) # left and right\n for lab in self.label[E_imid]:\n class_table_init[int(lab), 1] += 1\n\n for ord, att in enumerate(F_att):\n cand_splits = self.image[E_imid, att] # 1-1 -> E_imid\n sorted_idx = np.argsort(cand_splits)\n\n # impurity, split = self.scan_split(E_imid[sorted_idx], cand_splits[sorted_idx])\n att_impurity = None\n att_split = None\n\n class_table = np.array(class_table_init, copy=True) # pointer vs copy\n\n last_label = self.label[E_imid[sorted_idx[0]]]\n last_split = cand_splits[sorted_idx[0]]\n # last_label = None\n # last_split = None\n\n # buffer_labels = {}\n # last_buffer_labels = {}\n has_split_degeneracy = False\n\n # print('label', 'value' )\n # print('class_table_ CHECK!')\n # print(class_table)\n\n for sidx in sorted_idx:\n\n # query!\n imid = E_imid[sidx]\n label = self.label[imid]\n # class_table[label, 0] += 1\n # class_table[label, 1] -= 1\n\n # do impurity calculation\n if not cand_splits[sidx] == last_split and att_impurity is None:\n impurity = self.Gini(class_table)\n att_impurity = impurity\n att_split = cand_splits[sidx]\n\n\n if not cand_splits[sidx] == last_split and not label == last_label \\\n or not cand_splits[sidx] == last_split and label == last_label and has_split_degeneracy:\n\n impurity = self.Gini(class_table)\n\n if att_impurity is None or att_impurity > impurity:\n att_impurity = impurity\n att_split = cand_splits[sidx]\n\n # update the lasts\n if not cand_splits[sidx] == last_split:\n last_split = cand_splits[sidx]\n has_split_degeneracy = False\n else:\n if not label == last_label:\n # if split remains the same && label changes, it has split degeneracy. i.e split corresponds to 2 labels\n has_split_degeneracy = True\n if not label == last_label:\n last_label = label\n\n class_table[label, 0] += 1\n class_table[label, 1] -= 1\n\n\n\n # print(ord, att)\n # print('cand_split:', cand_splits, 'label:', E_imid[sorted_idx])\n # print('att_impurity:', att_impurity)\n # print('att_split:', att_split)\n\n if att_impurity is None: # e.g. [1569: 0.0, 4968: 0.0]\n continue\n elif best_impurity is None or best_impurity > att_impurity:\n best_impurity = att_impurity\n best_split = att_split\n best_att = att\n best_att_ord = ord\n\n # print('best_impurity:', best_impurity)\n # print('best_split:', best_split)\n # print('best_att ', best_att)\n # print(ord)\n\n\n # for pair in zip(cand_splits[sorted_idx], E_imid[sorted_idx]):\n # print(pair[0], pair[1])\n assert not best_att is None\n\n return best_att, best_att_ord, best_split\n\n\n def majority_vote(self, E_imid):\n labels = self.label[E_imid]\n hist = np.zeros((10,), dtype=np.int)\n for lb in labels:\n hist[lb] += 1\n return np.argmax(hist)\n\n\n def Gini(self, class_table):\n nlnr = np.sum(class_table, 0)\n assert nlnr[0] > 0\n assert nlnr[1] > 0\n\n class_table_norm = class_table/nlnr\n IlIr = 1 - np.sum(np.power(class_table_norm, 2), 0)\n return np.sum(nlnr * IlIr) / np.sum(nlnr)\n\n\n def branching(self, att_id, split_val, E_imid):\n right = np.where(self.image[E_imid, att_id] >= split_val, True, False)\n left = ~right\n\n return [E_imid[left], E_imid[right]]\n\n\n def tree_induction(self, E_imid, F_att):\n # E_imid: a list of img id\n # F_att: a list of att id\n # node: parent tree node\n\n stopping = False\n pure = False\n if len(E_imid) < self.minfreq:\n # below min_sup in this node\n stopping = True\n # print('below minsup!')\n elif np.all(self.label[E_imid] == self.label[E_imid[0]]):\n # pure node\n stopping = True\n pure = True\n # print('node pure')\n\n if len(F_att) == 0:\n # out_of_attribute\n stopping = True\n # print('out_of_attrib')\n\n\n if stopping:\n # Stopping criterion\n node = TreeNode()\n node.isleaf = True\n if pure:\n node.label = self.label[E_imid[0]]\n else:\n node.label = self.majority_vote(E_imid)\n return node\n\n else:\n att_id, att_ord, split_val = self.find_best_split(E_imid, F_att)\n if att_id is None:\n node = TreeNode()\n node.label = self.majority_vote(E_imid)\n return node\n\n F_att = np.delete(F_att, att_ord)\n\n # print('att_id:{}'.format(att_id), 'split_val {}'.format(split_val))\n # print('support {}'.format(len(E_imid)), '# att {}'.format(len(E_imid)))\n # print('')\n\n\n node = TreeNode(att_id, split_val)\n\n # branching:\n branch_cand = self.branching(att_id, split_val, E_imid)\n\n # for b in branch_cand:\n # assert len(b)>0\n # sorted_id = np.argsort(self.image[E_imid, att_id])\n # print(self.label[E_imid[sorted_id]])\n # print(self.image[E_imid[sorted_id], att_id])\n\n\n # for E_i in branch_cand:\n node.left = self.tree_induction(branch_cand[0], F_att)\n node.right = self.tree_induction(branch_cand[1], F_att)\n return node\n\n\n # def build_tree(self):\n # E_init = np.arange(self.N)\n # F_att = np.arange(self.num_att)\n #\n # att_id, att_ord, split_val = self.find_best_split(E_init, F_att)\n # np.delete(F_att, att_ord)\n # # att_id : split_att\n # # split_val: float32\n #\n # root = TreeNode(att_id, split_val)\n #\n # # branching:\n # branch_cand = self.branching(att_id, split_val, E_init)\n # # a list of [E_imid]\n #\n # start_time = time.time()\n #\n # # for E_i in branch_cand:\n # self.tree_induction(branch_cand[0], F_att, root.left)\n # print('left finished, time: {}'.format(time.time() - start_time))\n # self.tree_induction(branch_cand[1], F_att, root.right)\n # print('right finished, time: {}'.format(time.time() - start_time))\n #\n # return root\n\n def validation(self, root, filename):\n print('Write preds to file: ', filename)\n\n with open(filename, 'w', newline='\\n') as fw:\n writer = csv.writer(fw, delimiter=',')\n for ind in range(self.N):\n label = self.label[ind]\n pred = self.go_to_node(root, ind)\n writer.writerow([label, pred])\n # print([label, pred])\n\n\n\n def go_to_node(self, root, ind):\n node = root\n while node.isleaf is None:\n att = node.att_id\n if self.image[ind, att] < node.split_val:\n node = node.left\n else:\n node = node.right\n\n return node.label\n\n\ndef main(train_file=None, minfreq=20, model_file=None, argv=sys.argv):\n if not len(argv) == 1:\n\n if len(argv) == 4:\n train_file = argv[1]\n minfreq = int(argv[2])\n model_file = argv[3]\n\n else:\n print(\"Usage:\" + '$ python3 dtinduce.py <trainFile> <minfreq> <model_file>')\n print(\"Eg:\" + \"$ python3 dtinduce.py ./data/rep2/test.csv 20 demo.csv\")\n sys.exit(1)\n elif train_file is None:\n print(\"Usage:\" + '$ python3 dtinduce.py <trainFile> <minfreq> <model_file>')\n print(\"Eg:\" + \"$ python3 dtinduce.py ./data/rep2/test.csv 20 demo.csv\")\n sys.exit(1)\n\n start_time = time.time()\n # print(train_file)\n # print(minfreq)\n # print(model_file)\n\n dt = DecisionTree()\n # train_file = './data/rep2/train.csv'\n\n dt.readData(train_file, minfreq)\n dt.reduction()\n\n # dt.find_best_split(np.arange(dt.N), np.arange(dt.num_att))\n # np.random.seed(0)\n # classtable = np.random.randn(3, 2).astype(int)+10\n\n # classtable = np.ones((2, 2))\n # print(classtable)\n # print(dt.Gini(classtable))\n\n # E = np.arange(dt.N)\n # F = np.arange(dt.num_att)\n # result = dt.find_best_split(E, F[10:200])\n # print(result)\n\n # print(E)\n # result = dt.branching(0, dt.image[0, 0], E)\n # print(result[0])\n # print(result[1])\n##############################################\n# Train and write the tree\n print('training ...')\n E_init = np.arange(dt.N)\n F_att = np.arange(dt.num_att)\n root = dt.tree_induction(E_init, F_att)\n\n # tree_file = 'demo.csv'\n tree_file = model_file\n with open(tree_file, 'w', newline='\\n') as fw:\n writer = csv.writer(fw, delimiter=',')\n writer.writerow(dt.col_idx)\n writer.writerow([dt.minfreq])\n\n Serializer(root, fw)\n\n\n\n# below is in dtclassify.py\n##############################################\n# Rebuild the tree\n\n # filename = 'demo.csv'\n # iterator = read_file(filename)\n #\n # eff_col = next(iterator)\n # col_idx = list(map(int, eff_col))\n # root_rebuilt = DeSerializer(iterator)\n # assert next(iterator, None) is None\n\n# ##############################################\n# # Test\n# testfile = './data/rep2/train.csv'\n# dtt = DecisionTree()\n# dtt.readData(testfile)\n# dtt.reduction(dt.col_idx)\n#\n# dtt.validation(root, 'prediction.csv')\n#\n# ##############################################\n\n\n print('')\n print('tot_time:', time.time() - start_time)\n\n\nif __name__ == '__main__':\n\n train_file = './data/rep2/test.csv'\n minfreq = 20\n model_file = 'demo.csv'\n\n main(train_file, minfreq, model_file)\n\n# tree traverse\n# diff to boost find_best_split" ]
[ [ "numpy.array", "numpy.delete", "numpy.zeros", "numpy.sum", "numpy.where", "numpy.arange", "numpy.argmax", "numpy.argsort", "numpy.argwhere", "numpy.all", "numpy.power", "pandas.read_csv" ] ]
nghoanglong/text2sql-lgesql
[ "7526d8a771bea8eb87f936cfb21050badb4c290f" ]
[ "preprocess/common_utils.py" ]
[ "#coding=utf8\nimport os, sqlite3\nimport numpy as np\nfrom resources import vncorenlp\nfrom nltk.corpus import stopwords\nfrom itertools import product, combinations\nfrom utils.constants import MAX_RELATIVE_DIST\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\ndef quote_normalization(question):\n \"\"\" Normalize all usage of quotation marks into a separate \\\" \"\"\"\n new_question, quotation_marks = [], [\"'\", '\"', '`', '‘', '’', '“', '”', '``', \"''\", \"‘‘\", \"’’\"]\n for idx, tok in enumerate(question):\n if len(tok) > 2 and tok[0] in quotation_marks and tok[-1] in quotation_marks:\n new_question += [\"\\\"\", tok[1:-1], \"\\\"\"]\n elif len(tok) > 2 and tok[0] in quotation_marks:\n new_question += [\"\\\"\", tok[1:]]\n elif len(tok) > 2 and tok[-1] in quotation_marks:\n new_question += [tok[:-1], \"\\\"\" ]\n elif tok in quotation_marks:\n new_question.append(\"\\\"\")\n elif len(tok) == 2 and tok[0] in quotation_marks:\n # special case: the length of entity value is 1\n if idx + 1 < len(question) and question[idx + 1] in quotation_marks:\n new_question += [\"\\\"\", tok[1]]\n else:\n new_question.append(tok)\n else:\n new_question.append(tok)\n return new_question\n\nclass Preprocessor():\n\n def __init__(self, db_dir='data/database', db_content=True):\n super(Preprocessor, self).__init__()\n self.db_dir = db_dir\n self.db_content = db_content\n # self.nlp = stanza.Pipeline('en', processors='tokenize,pos,lemma')#, use_gpu=False)\n self.vnnlp = vncorenlp.VNCoreNLP()\n # self.stopwords = stopwords.words(\"english\")\n self.stopwords = []\n\n def pipeline(self, entry: dict, db: dict, verbose: bool = False):\n \"\"\" db should be preprocessed \"\"\"\n entry = self.preprocess_question(entry, db, verbose=verbose)\n entry = self.schema_linking(entry, db, verbose=verbose)\n entry = self.extract_subgraph(entry, db, verbose=verbose)\n return entry\n\n def preprocess_database(self, db: dict, verbose: bool = False):\n \"\"\" Tokenize, lemmatize, lowercase table and column names for each database \"\"\"\n table_toks, table_names = [], []\n for tab in db['table_names']:\n doc = self.vnnlp.tokenize(tab)\n tab = [tok.lower() for sent in doc for tok in sent]\n table_toks.append(tab)\n table_names.append(\" \".join(tab))\n db['processed_table_toks'], db['processed_table_names'] = table_toks, table_names\n column_toks, column_names = [], []\n for _, c in db['column_names']:\n doc = self.vnnlp.tokenize(c)\n c = [tok.lower() for sent in doc for tok in sent]\n column_toks.append(c)\n column_names.append(\" \".join(c))\n db['processed_column_toks'], db['processed_column_names'] = column_toks, column_names\n column2table = list(map(lambda x: x[0], db['column_names'])) # from column id to table id\n table2columns = [[] for _ in range(len(table_names))] # from table id to column ids list\n for col_id, col in enumerate(db['column_names']):\n if col_id == 0: continue\n table2columns[col[0]].append(col_id)\n db['column2table'], db['table2columns'] = column2table, table2columns\n\n t_num, c_num, dtype = len(db['table_names']), len(db['column_names']), '<U100'\n\n # relations in tables, tab_num * tab_num\n tab_mat = np.array([['table-table-generic'] * t_num for _ in range(t_num)], dtype=dtype)\n table_fks = set(map(lambda pair: (column2table[pair[0]], column2table[pair[1]]), db['foreign_keys']))\n for (tab1, tab2) in table_fks:\n if (tab2, tab1) in table_fks:\n tab_mat[tab1, tab2], tab_mat[tab2, tab1] = 'table-table-fkb', 'table-table-fkb'\n else:\n tab_mat[tab1, tab2], tab_mat[tab2, tab1] = 'table-table-fk', 'table-table-fkr'\n tab_mat[list(range(t_num)), list(range(t_num))] = 'table-table-identity'\n\n # relations in columns, c_num * c_num\n col_mat = np.array([['column-column-generic'] * c_num for _ in range(c_num)], dtype=dtype)\n for i in range(t_num):\n col_ids = [idx for idx, t in enumerate(column2table) if t == i]\n col1, col2 = list(zip(*list(product(col_ids, col_ids))))\n col_mat[col1, col2] = 'column-column-sametable'\n col_mat[list(range(c_num)), list(range(c_num))] = 'column-column-identity'\n if len(db['foreign_keys']) > 0:\n col1, col2 = list(zip(*db['foreign_keys']))\n col_mat[col1, col2], col_mat[col2, col1] = 'column-column-fk', 'column-column-fkr'\n col_mat[0, list(range(c_num))] = '*-column-generic'\n col_mat[list(range(c_num)), 0] = 'column-*-generic'\n col_mat[0, 0] = '*-*-identity'\n\n # relations between tables and columns, t_num*c_num and c_num*t_num\n tab_col_mat = np.array([['table-column-generic'] * c_num for _ in range(t_num)], dtype=dtype)\n col_tab_mat = np.array([['column-table-generic'] * t_num for _ in range(c_num)], dtype=dtype)\n cols, tabs = list(zip(*list(map(lambda x: (x, column2table[x]), range(1, c_num))))) # ignore *\n col_tab_mat[cols, tabs], tab_col_mat[tabs, cols] = 'column-table-has', 'table-column-has'\n if len(db['primary_keys']) > 0:\n cols, tabs = list(zip(*list(map(lambda x: (x, column2table[x]), db['primary_keys']))))\n col_tab_mat[cols, tabs], tab_col_mat[tabs, cols] = 'column-table-pk', 'table-column-pk'\n col_tab_mat[0, list(range(t_num))] = '*-table-generic'\n tab_col_mat[list(range(t_num)), 0] = 'table-*-generic'\n\n relations = np.concatenate([\n np.concatenate([tab_mat, tab_col_mat], axis=1),\n np.concatenate([col_tab_mat, col_mat], axis=1)\n ], axis=0)\n db['relations'] = relations.tolist()\n\n if verbose:\n print('Tables:', ', '.join(db['table_names']))\n print('Lemmatized:', ', '.join(table_names))\n print('Columns:', ', '.join(list(map(lambda x: x[1], db['column_names']))))\n print('Lemmatized:', ', '.join(column_names), '\\n')\n return db\n\n def preprocess_question(self, entry: dict, db: dict, verbose: bool = False):\n \"\"\" Tokenize, lemmatize, lowercase question\"\"\"\n # stanza tokenize, lemmatize and POS tag\n question = ' '.join(quote_normalization(entry['question_toks']))\n doc_toks = self.vnnlp.tokenize(question)\n raw_toks = [tok.lower() for sent in doc_toks for tok in sent]\n toks = raw_toks\n doc_pos = self.vnnlp.pos_tag(question)\n pos_tags = [tok[1] for sent in doc_pos for tok in sent]\n\n entry['raw_question_toks'] = raw_toks\n entry['processed_question_toks'] = toks\n entry['pos_tags'] = pos_tags\n\n # relations in questions, q_num * q_num\n q_num, dtype = len(toks), '<U100'\n if q_num <= MAX_RELATIVE_DIST + 1:\n dist_vec = ['question-question-dist' + str(i) if i != 0 else 'question-question-identity'\n for i in range(- MAX_RELATIVE_DIST, MAX_RELATIVE_DIST + 1, 1)]\n starting = MAX_RELATIVE_DIST\n else:\n dist_vec = ['question-question-generic'] * (q_num - MAX_RELATIVE_DIST - 1) + \\\n ['question-question-dist' + str(i) if i != 0 else 'question-question-identity' \\\n for i in range(- MAX_RELATIVE_DIST, MAX_RELATIVE_DIST + 1, 1)] + \\\n ['question-question-generic'] * (q_num - MAX_RELATIVE_DIST - 1)\n starting = q_num - 1\n q_mat = np.array([dist_vec[starting - i: starting - i + q_num] for i in range(q_num)], dtype=dtype)\n entry['relations'] = q_mat.tolist()\n\n if verbose:\n print('Question:', entry['question'])\n print('Tokenized:', ' '.join(entry['raw_question_toks']))\n print('Lemmatized:', ' '.join(entry['processed_question_toks']))\n print('Pos tags:', ' '.join(entry['pos_tags']), '\\n')\n return entry\n\n def extract_subgraph(self, entry: dict, db: dict, verbose: bool = False):\n sql = entry['sql']\n used_schema = {'table': set(), 'column': set()}\n used_schema = self.extract_subgraph_from_sql(sql, used_schema)\n entry['used_tables'] = sorted(list(used_schema['table']))\n entry['used_columns'] = sorted(list(used_schema['column']))\n\n if verbose:\n print('Used tables:', entry['used_tables'])\n print('Used columns:', entry['used_columns'], '\\n')\n return entry\n\n def extract_subgraph_from_sql(self, sql: dict, used_schema: dict):\n select_items = sql['select'][1]\n # select clause\n for _, val_unit in select_items:\n if val_unit[0] == 0:\n col_unit = val_unit[1]\n used_schema['column'].add(col_unit[1])\n else:\n col_unit1, col_unit2 = val_unit[1:]\n used_schema['column'].add(col_unit1[1])\n used_schema['column'].add(col_unit2[1])\n # from clause conds\n table_units = sql['from']['table_units']\n for _, t in table_units:\n if type(t) == dict:\n used_schema = self.extract_subgraph_from_sql(t, used_schema)\n else:\n used_schema['table'].add(t)\n # from, where and having conds\n used_schema = self.extract_subgraph_from_conds(sql['from']['conds'], used_schema)\n used_schema = self.extract_subgraph_from_conds(sql['where'], used_schema)\n used_schema = self.extract_subgraph_from_conds(sql['having'], used_schema)\n # groupBy and orderBy clause\n groupBy = sql['groupBy']\n for col_unit in groupBy:\n used_schema['column'].add(col_unit[1])\n orderBy = sql['orderBy']\n if len(orderBy) > 0:\n orderBy = orderBy[1]\n for val_unit in orderBy:\n if val_unit[0] == 0:\n col_unit = val_unit[1]\n used_schema['column'].add(col_unit[1])\n else:\n col_unit1, col_unit2 = val_unit[1:]\n used_schema['column'].add(col_unit1[1])\n used_schema['column'].add(col_unit2[1])\n # union, intersect and except clause\n if sql['intersect']:\n used_schema = self.extract_subgraph_from_sql(sql['intersect'], used_schema)\n if sql['union']:\n used_schema = self.extract_subgraph_from_sql(sql['union'], used_schema)\n if sql['except']:\n used_schema = self.extract_subgraph_from_sql(sql['except'], used_schema)\n return used_schema\n\n def extract_subgraph_from_conds(self, conds: list, used_schema: dict):\n if len(conds) == 0:\n return used_schema\n for cond in conds:\n if cond in ['and', 'or']:\n continue\n val_unit, val1, val2 = cond[2:]\n if val_unit[0] == 0:\n col_unit = val_unit[1]\n used_schema['column'].add(col_unit[1])\n else:\n col_unit1, col_unit2 = val_unit[1:]\n used_schema['column'].add(col_unit1[1])\n used_schema['column'].add(col_unit2[1])\n if type(val1) == list:\n used_schema['column'].add(val1[1])\n elif type(val1) == dict:\n used_schema = self.extract_subgraph_from_sql(val1, used_schema)\n if type(val2) == list:\n used_schema['column'].add(val1[1])\n elif type(val2) == dict:\n used_schema = self.extract_subgraph_from_sql(val2, used_schema)\n return used_schema\n\n def schema_linking(self, entry: dict, db: dict, verbose: bool = False):\n \"\"\" Perform schema linking: both question and database need to be preprocessed \"\"\"\n raw_question_toks, question_toks = entry['raw_question_toks'], entry['processed_question_toks']\n table_toks, column_toks = db['processed_table_toks'], db['processed_column_toks']\n table_names, column_names = db['processed_table_names'], db['processed_column_names']\n q_num, t_num, c_num, dtype = len(question_toks), len(table_toks), len(column_toks), '<U100'\n\n # relations between questions and tables, q_num*t_num and t_num*q_num\n table_matched_pairs = {'partial': [], 'exact': []}\n q_tab_mat = np.array([['question-table-nomatch'] * t_num for _ in range(q_num)], dtype=dtype)\n tab_q_mat = np.array([['table-question-nomatch'] * q_num for _ in range(t_num)], dtype=dtype)\n max_len = max([len(t) for t in table_toks])\n index_pairs = list(filter(lambda x: x[1] - x[0] <= max_len, combinations(range(q_num + 1), 2)))\n index_pairs = sorted(index_pairs, key=lambda x: x[1] - x[0])\n for i, j in index_pairs:\n phrase = ' '.join(question_toks[i: j])\n if phrase in self.stopwords: continue\n for idx, name in enumerate(table_names):\n if phrase == name: # fully match will overwrite partial match due to sort\n q_tab_mat[range(i, j), idx] = 'question-table-exactmatch'\n tab_q_mat[idx, range(i, j)] = 'table-question-exactmatch'\n if verbose:\n table_matched_pairs['exact'].append(str((name, idx, phrase, i, j)))\n elif (j - i == 1 and phrase in name.split()) or (j - i > 1 and phrase in name):\n q_tab_mat[range(i, j), idx] = 'question-table-partialmatch'\n tab_q_mat[idx, range(i, j)] = 'table-question-partialmatch'\n if verbose:\n table_matched_pairs['partial'].append(str((name, idx, phrase, i, j)))\n\n # relations between questions and columns\n column_matched_pairs = {'partial': [], 'exact': [], 'value': []}\n q_col_mat = np.array([['question-column-nomatch'] * c_num for _ in range(q_num)], dtype=dtype)\n col_q_mat = np.array([['column-question-nomatch'] * q_num for _ in range(c_num)], dtype=dtype)\n max_len = max([len(c) for c in column_toks])\n index_pairs = list(filter(lambda x: x[1] - x[0] <= max_len, combinations(range(q_num + 1), 2)))\n index_pairs = sorted(index_pairs, key=lambda x: x[1] - x[0])\n for i, j in index_pairs:\n phrase = ' '.join(question_toks[i: j])\n if phrase in self.stopwords: continue\n for idx, name in enumerate(column_names):\n if phrase == name: # fully match will overwrite partial match due to sort\n q_col_mat[range(i, j), idx] = 'question-column-exactmatch'\n col_q_mat[idx, range(i, j)] = 'column-question-exactmatch'\n if verbose:\n column_matched_pairs['exact'].append(str((name, idx, phrase, i, j)))\n elif (j - i == 1 and phrase in name.split()) or (j - i > 1 and phrase in name):\n q_col_mat[range(i, j), idx] = 'question-column-partialmatch'\n col_q_mat[idx, range(i, j)] = 'column-question-partialmatch'\n if verbose:\n column_matched_pairs['partial'].append(str((name, idx, phrase, i, j)))\n if self.db_content:\n db_file = os.path.join(self.db_dir, db['db_id'], db['db_id'] + '.sqlite')\n if not os.path.exists(db_file):\n raise ValueError('[ERROR]: database file %s not found ...' % (db_file))\n conn = sqlite3.connect(db_file)\n conn.text_factory = lambda b: b.decode(errors='ignore')\n conn.execute('pragma foreign_keys=ON')\n for i, (tab_id, col_name) in enumerate(db['column_names_original']):\n if i == 0 or 'id' in column_toks[i]: # ignore * and special token 'id'\n continue\n tab_name = db['table_names_original'][tab_id]\n try:\n cursor = conn.execute(\"SELECT DISTINCT \\\"%s\\\" FROM \\\"%s\\\";\" % (col_name, tab_name))\n cell_values = cursor.fetchall()\n cell_values = [str(each[0]) for each in cell_values]\n cell_values = [[str(float(each))] if is_number(each) else each.lower().split() for each in cell_values]\n except Exception as e:\n print(e)\n for j, word in enumerate(raw_question_toks):\n word = str(float(word)) if is_number(word) else word\n for c in cell_values:\n if word in c and 'nomatch' in q_col_mat[j, i] and word not in self.stopwords:\n q_col_mat[j, i] = 'question-column-valuematch'\n col_q_mat[i, j] = 'column-question-valuematch'\n if verbose:\n column_matched_pairs['value'].append(str((column_names[i], i, word, j, j + 1)))\n break\n conn.close()\n\n # two symmetric schema linking matrix: q_num x (t_num + c_num), (t_num + c_num) x q_num\n q_col_mat[:, 0] = 'question-*-generic'\n col_q_mat[0] = '*-question-generic'\n q_schema = np.concatenate([q_tab_mat, q_col_mat], axis=1)\n schema_q = np.concatenate([tab_q_mat, col_q_mat], axis=0)\n entry['schema_linking'] = (q_schema.tolist(), schema_q.tolist())\n\n if verbose:\n print('Question:', ' '.join(question_toks))\n print('Table matched: (table name, column id, question span, start id, end id)')\n print('Exact match:', ', '.join(table_matched_pairs['exact']) if table_matched_pairs['exact'] else 'empty')\n print('Partial match:', ', '.join(table_matched_pairs['partial']) if table_matched_pairs['partial'] else 'empty')\n print('Column matched: (column name, column id, question span, start id, end id)')\n print('Exact match:', ', '.join(column_matched_pairs['exact']) if column_matched_pairs['exact'] else 'empty')\n print('Partial match:', ', '.join(column_matched_pairs['partial']) if column_matched_pairs['partial'] else 'empty')\n print('Value match:', ', '.join(column_matched_pairs['value']) if column_matched_pairs['value'] else 'empty', '\\n')\n return entry\n" ]
[ [ "numpy.concatenate" ] ]
hanneshapke/similarity
[ "632c743ba817e7a49870e5d242eace9b2d248496" ]
[ "tests/visualization/test_neighbors_viz.py" ]
[ "from unittest import mock\nimport pytest\n\nimport tensorflow as tf\nfrom tensorflow_similarity.visualization import viz_neigbors_imgs\nfrom tensorflow_similarity.visualization.neighbors_viz import _get_class_label\nfrom tensorflow_similarity.types import Lookup\n\n\nclass TestGetClassLabel():\n\n @pytest.fixture\n def class_mapping(self):\n return {0: 'foo', 1: 'bar'}\n\n def test_example_class_is_none(self, class_mapping):\n c_lbl = _get_class_label(None, class_mapping)\n assert c_lbl == 'No Label'\n\n def test_class_mapping_is_none(self):\n c_lbl = _get_class_label(0, None)\n assert c_lbl == '0'\n\n def test_get_class_label(self, class_mapping):\n c_lbl = _get_class_label(0, class_mapping)\n assert c_lbl == 'foo'\n\n c_lbl = _get_class_label(1, class_mapping)\n assert c_lbl == 'bar'\n\n def test_example_class_not_in_mapping(self, class_mapping):\n c_lbl = _get_class_label(2, class_mapping)\n assert c_lbl == '2'\n\n def test_class_mapping_must_implement_get(self):\n msg = \"'list' object has no attribute 'get'\"\n with pytest.raises(AttributeError, match=msg):\n _ = _get_class_label(0, ['foo', 'bar'])\n\n\[email protected]('tensorflow_similarity.visualization.neighbors_viz.plt.show')\[email protected]('tensorflow_similarity.visualization.neighbors_viz.plt.subplots',\n autospec=True)\ndef test_viz_neighbors_imgs(mock_subplots, mock_show):\n ax_0 = mock.Mock()\n ax_1 = mock.Mock()\n ax_2 = mock.Mock()\n ax_3 = mock.Mock()\n mock_subplots.return_value = (None, [ax_0, ax_1, ax_2, ax_3])\n\n query_img = tf.constant([1.0])\n nn = [\n Lookup(\n rank=0, # Incorrect class but class id in class mapping\n distance=0.2,\n label=2,\n data=tf.constant([2.0])),\n Lookup(\n rank=0, # Incorrect class and class if not in mapping\n distance=0.3,\n label=3,\n data=tf.constant([3.0])),\n Lookup(\n rank=0, # Correct class and class in mapping\n distance=0.1,\n label=1,\n data=tf.constant([4.0]))\n ]\n\n viz_neigbors_imgs(example=query_img,\n example_class=1,\n neighbors=nn,\n class_mapping={\n 1: 'foo',\n 2: 'bar'\n },\n fig_size=(10, 10),\n cmap='Blues')\n\n mock_subplots.assert_called_with(nrows=1, ncols=4, figsize=(10, 10))\n\n ax_0.imshow.assert_called_with(query_img, cmap='Blues')\n ax_0.set_xticks.assert_called_with([])\n ax_0.set_yticks.assert_called_with([])\n ax_0.set_title.assert_called_with('foo')\n\n ax_1.imshow.assert_called_with(tf.constant([2.0]), cmap='Reds')\n ax_1.set_xticks.assert_called_with([])\n ax_1.set_yticks.assert_called_with([])\n ax_1.set_title.assert_called_with('bar - 0.20000')\n\n ax_2.imshow.assert_called_with(tf.constant([3.0]), cmap='Reds')\n ax_2.set_xticks.assert_called_with([])\n ax_2.set_yticks.assert_called_with([])\n ax_2.set_title.assert_called_with('3 - 0.30000')\n\n ax_3.imshow.assert_called_with(tf.constant([4.0]), cmap='Blues')\n ax_3.set_xticks.assert_called_with([])\n ax_3.set_yticks.assert_called_with([])\n ax_3.set_title.assert_called_with('foo - 0.10000')\n" ]
[ [ "tensorflow.constant" ] ]
hooloong/My_TensorFlow
[ "ef115989035b9ae14938dca47c0814b0d16dd6ba" ]
[ "Test05_DQN_Game/Test05_DQN_train.py" ]
[ "import pygame\nimport random\nfrom pygame.locals import *\nimport numpy as np\nfrom collections import deque\nimport tensorflow as tf # http://blog.topspeedsnail.com/archives/10116\nimport cv2 # http://blog.topspeedsnail.com/archives/4755\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n\nSCREEN_SIZE = [320, 400]\nBAR_SIZE = [50, 5]\nBALL_SIZE = [15, 15]\n\n# 神经网络的输出\nMOVE_STAY = [1, 0, 0]\nMOVE_LEFT = [0, 1, 0]\nMOVE_RIGHT = [0, 0, 1]\n\n\nclass Game(object):\n def __init__(self):\n pygame.init()\n self.clock = pygame.time.Clock()\n self.screen = pygame.display.set_mode(SCREEN_SIZE)\n pygame.display.set_caption('Simple Game')\n\n self.ball_pos_x = SCREEN_SIZE[0] // 2 - BALL_SIZE[0] / 2\n self.ball_pos_y = SCREEN_SIZE[1] // 2 - BALL_SIZE[1] / 2\n\n self.ball_dir_x = -1 # -1 = left 1 = right\n self.ball_dir_y = -1 # -1 = up 1 = down\n self.ball_pos = pygame.Rect(self.ball_pos_x, self.ball_pos_y, BALL_SIZE[0], BALL_SIZE[1])\n\n self.bar_pos_x = SCREEN_SIZE[0] // 2 - BAR_SIZE[0] // 2\n self.bar_pos = pygame.Rect(self.bar_pos_x, SCREEN_SIZE[1] - BAR_SIZE[1], BAR_SIZE[0], BAR_SIZE[1])\n\n # action是MOVE_STAY、MOVE_LEFT、MOVE_RIGHT\n # ai控制棒子左右移动;返回游戏界面像素数和对应的奖励。(像素->奖励->强化棒子往奖励高的方向移动)\n def step(self, action):\n\n if action == MOVE_LEFT:\n self.bar_pos_x = self.bar_pos_x - 2\n elif action == MOVE_RIGHT:\n self.bar_pos_x = self.bar_pos_x + 2\n else:\n pass\n if self.bar_pos_x < 0:\n self.bar_pos_x = 0\n if self.bar_pos_x > SCREEN_SIZE[0] - BAR_SIZE[0]:\n self.bar_pos_x = SCREEN_SIZE[0] - BAR_SIZE[0]\n\n self.screen.fill(BLACK)\n self.bar_pos.left = self.bar_pos_x\n pygame.draw.rect(self.screen, WHITE, self.bar_pos)\n\n self.ball_pos.left += self.ball_dir_x * 2\n self.ball_pos.bottom += self.ball_dir_y * 3\n pygame.draw.rect(self.screen, WHITE, self.ball_pos)\n\n if self.ball_pos.top <= 0 or self.ball_pos.bottom >= (SCREEN_SIZE[1] - BAR_SIZE[1] + 1):\n self.ball_dir_y = self.ball_dir_y * -1\n if self.ball_pos.left <= 0 or self.ball_pos.right >= (SCREEN_SIZE[0]):\n self.ball_dir_x = self.ball_dir_x * -1\n\n reward = 0\n if self.bar_pos.top <= self.ball_pos.bottom and (\n self.bar_pos.left < self.ball_pos.right and self.bar_pos.right > self.ball_pos.left):\n reward = 1 # 击中奖励\n elif self.bar_pos.top <= self.ball_pos.bottom and (\n self.bar_pos.left > self.ball_pos.right or self.bar_pos.right < self.ball_pos.left):\n reward = -1 # 没击中惩罚\n\n # 获得游戏界面像素\n screen_image = pygame.surfarray.array3d(pygame.display.get_surface())\n pygame.display.update()\n # 返回游戏界面像素和对应的奖励\n return reward, screen_image\n\n\n# learning_rate\nLEARNING_RATE = 0.99\n# 更新梯度\nINITIAL_EPSILON = 1.0\nFINAL_EPSILON = 0.05\n# 测试观测次数\nEXPLORE = 500000\nOBSERVE = 50000\n# 存储过往经验大小\nREPLAY_MEMORY = 500000\n\nBATCH = 100\n\noutput = 3 # 输出层神经元数。代表3种操作-MOVE_STAY:[1, 0, 0] MOVE_LEFT:[0, 1, 0] MOVE_RIGHT:[0, 0, 1]\ninput_image = tf.placeholder(\"float\", [None, 80, 100, 4]) # 游戏像素\naction = tf.placeholder(\"float\", [None, output]) # 操作\n\n\n# 定义CNN-卷积神经网络 参考:http://blog.topspeedsnail.com/archives/10451\ndef convolutional_neural_network(input_image):\n weights = {'w_conv1': tf.Variable(tf.zeros([8, 8, 4, 32])),\n 'w_conv2': tf.Variable(tf.zeros([4, 4, 32, 64])),\n 'w_conv3': tf.Variable(tf.zeros([3, 3, 64, 64])),\n 'w_fc4': tf.Variable(tf.zeros([3456, 784])),\n 'w_out': tf.Variable(tf.zeros([784, output]))}\n\n biases = {'b_conv1': tf.Variable(tf.zeros([32])),\n 'b_conv2': tf.Variable(tf.zeros([64])),\n 'b_conv3': tf.Variable(tf.zeros([64])),\n 'b_fc4': tf.Variable(tf.zeros([784])),\n 'b_out': tf.Variable(tf.zeros([output]))}\n\n conv1 = tf.nn.relu(\n tf.nn.conv2d(input_image, weights['w_conv1'], strides=[1, 4, 4, 1], padding=\"VALID\") + biases['b_conv1'])\n conv2 = tf.nn.relu(\n tf.nn.conv2d(conv1, weights['w_conv2'], strides=[1, 2, 2, 1], padding=\"VALID\") + biases['b_conv2'])\n conv3 = tf.nn.relu(\n tf.nn.conv2d(conv2, weights['w_conv3'], strides=[1, 1, 1, 1], padding=\"VALID\") + biases['b_conv3'])\n conv3_flat = tf.reshape(conv3, [-1, 3456])\n fc4 = tf.nn.relu(tf.matmul(conv3_flat, weights['w_fc4']) + biases['b_fc4'])\n\n output_layer = tf.matmul(fc4, weights['w_out']) + biases['b_out']\n return output_layer\n\n\n# 深度强化学习入门: https://www.nervanasys.com/demystifying-deep-reinforcement-learning/\n# 训练神经网络\ndef train_neural_network(input_image):\n predict_action = convolutional_neural_network(input_image)\n\n argmax = tf.placeholder(\"float\", [None, output])\n gt = tf.placeholder(\"float\", [None])\n\n action = tf.reduce_sum(tf.multiply(predict_action, argmax), reduction_indices=1)\n cost = tf.reduce_mean(tf.square(action - gt))\n optimizer = tf.train.AdamOptimizer(1e-6).minimize(cost)\n\n game = Game()\n D = deque()\n\n _, image = game.step(MOVE_STAY)\n # 转换为灰度值\n image = cv2.cvtColor(cv2.resize(image, (100, 80)), cv2.COLOR_BGR2GRAY)\n # 转换为二值\n ret, image = cv2.threshold(image, 1, 255, cv2.THRESH_BINARY)\n input_image_data = np.stack((image, image, image, image), axis=2)\n\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n\n saver = tf.train.Saver()\n\n n = 0\n epsilon = INITIAL_EPSILON\n while True:\n action_t = predict_action.eval(feed_dict={input_image: [input_image_data]})[0]\n\n argmax_t = np.zeros([output], dtype=np.int)\n if (random.random() <= INITIAL_EPSILON):\n maxIndex = random.randrange(output)\n else:\n maxIndex = np.argmax(action_t)\n argmax_t[maxIndex] = 1\n if epsilon > FINAL_EPSILON:\n epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE\n\n # for event in pygame.event.get(): macOS需要事件循环,否则白屏\n #\tif event.type == QUIT:\n #\t\tpygame.quit()\n #\t\tsys.exit()\n reward, image = game.step(list(argmax_t))\n\n image = cv2.cvtColor(cv2.resize(image, (100, 80)), cv2.COLOR_BGR2GRAY)\n ret, image = cv2.threshold(image, 1, 255, cv2.THRESH_BINARY)\n image = np.reshape(image, (80, 100, 1))\n input_image_data1 = np.append(image, input_image_data[:, :, 0:3], axis=2)\n\n D.append((input_image_data, argmax_t, reward, input_image_data1))\n\n if len(D) > REPLAY_MEMORY:\n D.popleft()\n\n if n > OBSERVE:\n minibatch = random.sample(D, BATCH)\n input_image_data_batch = [d[0] for d in minibatch]\n argmax_batch = [d[1] for d in minibatch]\n reward_batch = [d[2] for d in minibatch]\n input_image_data1_batch = [d[3] for d in minibatch]\n\n gt_batch = []\n\n out_batch = predict_action.eval(feed_dict={input_image: input_image_data1_batch})\n\n for i in range(0, len(minibatch)):\n gt_batch.append(reward_batch[i] + LEARNING_RATE * np.max(out_batch[i]))\n\n optimizer.run(feed_dict={gt: gt_batch, argmax: argmax_batch, input_image: input_image_data_batch})\n\n input_image_data = input_image_data1\n n = n + 1\n\n if n % 10000 == 0:\n saver.save(sess, './game.cpk', global_step=n) # 保存模型\n\n print(n, \"epsilon:\", epsilon, \" \", \"action:\", maxIndex, \" \", \"reward:\", reward)\n\n\ntrain_neural_network(input_image)" ]
[ [ "numpy.max", "tensorflow.multiply", "tensorflow.zeros", "tensorflow.train.AdamOptimizer", "tensorflow.nn.conv2d", "tensorflow.initialize_all_variables", "numpy.zeros", "numpy.reshape", "tensorflow.matmul", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.reshape", "numpy.stack", "tensorflow.placeholder", "numpy.argmax", "numpy.append", "tensorflow.square" ] ]
Pandinosaurus/CutMix-PyTorch
[ "1bc006b5f40c284a6bf4ef881906e2109a27ef6c" ]
[ "utils.py" ]
[ "# original code: https://github.com/eladhoffer/convNet.pytorch/blob/master/preprocess.py\n\nimport torch\nimport random\n\n__all__ = [\"Compose\", \"Lighting\", \"ColorJitter\"]\n\n\nclass Compose(object):\n \"\"\"Composes several transforms together.\n\n Args:\n transforms (list of ``Transform`` objects): list of transforms to compose.\n\n Example:\n >>> transforms.Compose([\n >>> transforms.CenterCrop(10),\n >>> transforms.ToTensor(),\n >>> ])\n \"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img):\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass Lighting(object):\n \"\"\"Lighting noise(AlexNet - style PCA - based noise)\"\"\"\n\n def __init__(self, alphastd, eigval, eigvec):\n self.alphastd = alphastd\n self.eigval = torch.Tensor(eigval)\n self.eigvec = torch.Tensor(eigvec)\n\n def __call__(self, img):\n if self.alphastd == 0:\n return img\n\n alpha = img.new().resize_(3).normal_(0, self.alphastd)\n rgb = self.eigvec.type_as(img).clone() \\\n .mul(alpha.view(1, 3).expand(3, 3)) \\\n .mul(self.eigval.view(1, 3).expand(3, 3)) \\\n .sum(1).squeeze()\n\n return img.add(rgb.view(3, 1, 1).expand_as(img))\n\n\nclass Grayscale(object):\n\n def __call__(self, img):\n gs = img.clone()\n gs[0].mul_(0.299).add_(0.587, gs[1]).add_(0.114, gs[2])\n gs[1].copy_(gs[0])\n gs[2].copy_(gs[0])\n return gs\n\n\nclass Saturation(object):\n\n def __init__(self, var):\n self.var = var\n\n def __call__(self, img):\n gs = Grayscale()(img)\n alpha = random.uniform(-self.var, self.var)\n return img.lerp(gs, alpha)\n\n\nclass Brightness(object):\n\n def __init__(self, var):\n self.var = var\n\n def __call__(self, img):\n gs = img.new().resize_as_(img).zero_()\n alpha = random.uniform(-self.var, self.var)\n return img.lerp(gs, alpha)\n\n\nclass Contrast(object):\n\n def __init__(self, var):\n self.var = var\n\n def __call__(self, img):\n gs = Grayscale()(img)\n gs.fill_(gs.mean())\n alpha = random.uniform(-self.var, self.var)\n return img.lerp(gs, alpha)\n\n\nclass ColorJitter(object):\n\n def __init__(self, brightness=0.4, contrast=0.4, saturation=0.4):\n self.brightness = brightness\n self.contrast = contrast\n self.saturation = saturation\n\n def __call__(self, img):\n self.transforms = []\n if self.brightness != 0:\n self.transforms.append(Brightness(self.brightness))\n if self.contrast != 0:\n self.transforms.append(Contrast(self.contrast))\n if self.saturation != 0:\n self.transforms.append(Saturation(self.saturation))\n\n random.shuffle(self.transforms)\n transform = Compose(self.transforms)\n # print(transform)\n return transform(img)\n" ]
[ [ "torch.Tensor" ] ]
FieldMrFive/ray
[ "a22d6ef95594a3b95fac5b2eb17f7f21be2888e8" ]
[ "python/ray/rllib/optimizers/async_samples_optimizer.py" ]
[ "\"\"\"Implements the IMPALA architecture.\n\nhttps://arxiv.org/abs/1802.01561\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport numpy as np\nimport random\nimport time\nimport threading\n\nfrom six.moves import queue\n\nimport ray\nfrom ray.rllib.optimizers.multi_gpu_impl import LocalSyncParallelOptimizer\nfrom ray.rllib.optimizers.policy_optimizer import PolicyOptimizer\nfrom ray.rllib.utils.actors import TaskPool\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils.timer import TimerStat\nfrom ray.rllib.utils.window_stat import WindowStat\n\nlogger = logging.getLogger(__name__)\n\nNUM_DATA_LOAD_THREADS = 16\n\n\nclass AsyncSamplesOptimizer(PolicyOptimizer):\n \"\"\"Main event loop of the IMPALA architecture.\n\n This class coordinates the data transfers between the learner thread\n and remote evaluators (IMPALA actors).\n \"\"\"\n\n @override(PolicyOptimizer)\n def _init(self,\n train_batch_size=500,\n sample_batch_size=50,\n num_envs_per_worker=1,\n num_gpus=0,\n lr=0.0005,\n replay_buffer_num_slots=0,\n replay_proportion=0.0,\n num_data_loader_buffers=1,\n max_sample_requests_in_flight_per_worker=2,\n broadcast_interval=1,\n num_sgd_iter=1,\n minibatch_buffer_size=1,\n learner_queue_size=16,\n _fake_gpus=False):\n self.train_batch_size = train_batch_size\n self.sample_batch_size = sample_batch_size\n self.broadcast_interval = broadcast_interval\n\n self._stats_start_time = time.time()\n self._last_stats_time = {}\n self._last_stats_sum = {}\n\n if num_gpus > 1 or num_data_loader_buffers > 1:\n logger.info(\n \"Enabling multi-GPU mode, {} GPUs, {} parallel loaders\".format(\n num_gpus, num_data_loader_buffers))\n if num_data_loader_buffers < minibatch_buffer_size:\n raise ValueError(\n \"In multi-gpu mode you must have at least as many \"\n \"parallel data loader buffers as minibatch buffers: \"\n \"{} vs {}\".format(num_data_loader_buffers,\n minibatch_buffer_size))\n self.learner = TFMultiGPULearner(\n self.local_evaluator,\n lr=lr,\n num_gpus=num_gpus,\n train_batch_size=train_batch_size,\n num_data_loader_buffers=num_data_loader_buffers,\n minibatch_buffer_size=minibatch_buffer_size,\n num_sgd_iter=num_sgd_iter,\n learner_queue_size=learner_queue_size,\n _fake_gpus=_fake_gpus)\n else:\n self.learner = LearnerThread(self.local_evaluator,\n minibatch_buffer_size, num_sgd_iter,\n learner_queue_size)\n self.learner.start()\n\n if len(self.remote_evaluators) == 0:\n logger.warning(\"Config num_workers=0 means training will hang!\")\n\n # Stats\n self._optimizer_step_timer = TimerStat()\n self.num_weight_syncs = 0\n self.num_replayed = 0\n self._stats_start_time = time.time()\n self._last_stats_time = {}\n self._last_stats_val = {}\n\n # Kick off async background sampling\n self.sample_tasks = TaskPool()\n weights = self.local_evaluator.get_weights()\n for ev in self.remote_evaluators:\n ev.set_weights.remote(weights)\n for _ in range(max_sample_requests_in_flight_per_worker):\n self.sample_tasks.add(ev, ev.sample.remote())\n\n self.batch_buffer = []\n\n if replay_proportion:\n if replay_buffer_num_slots * sample_batch_size <= train_batch_size:\n raise ValueError(\n \"Replay buffer size is too small to produce train, \"\n \"please increase replay_buffer_num_slots.\",\n replay_buffer_num_slots, sample_batch_size,\n train_batch_size)\n self.replay_proportion = replay_proportion\n self.replay_buffer_num_slots = replay_buffer_num_slots\n self.replay_batches = []\n\n def add_stat_val(self, key, val):\n if key not in self._last_stats_sum:\n self._last_stats_sum[key] = 0\n self._last_stats_time[key] = self._stats_start_time\n self._last_stats_sum[key] += val\n\n def get_mean_stats_and_reset(self):\n now = time.time()\n mean_stats = {\n key: round(val / (now - self._last_stats_time[key]), 3)\n for key, val in self._last_stats_sum.items()\n }\n\n for key in self._last_stats_sum.keys():\n self._last_stats_sum[key] = 0\n self._last_stats_time[key] = time.time()\n\n return mean_stats\n\n @override(PolicyOptimizer)\n def step(self):\n assert self.learner.is_alive()\n with self._optimizer_step_timer:\n sample_timesteps, train_timesteps = self._step()\n\n if sample_timesteps > 0:\n self.add_stat_val(\"sample_throughput\", sample_timesteps)\n if train_timesteps > 0:\n self.add_stat_val(\"train_throughput\", train_timesteps)\n\n self.num_steps_sampled += sample_timesteps\n self.num_steps_trained += train_timesteps\n\n @override(PolicyOptimizer)\n def stop(self):\n self.learner.stopped = True\n\n @override(PolicyOptimizer)\n def stats(self):\n def timer_to_ms(timer):\n return round(1000 * timer.mean, 3)\n\n timing = {\n \"optimizer_step_time_ms\": timer_to_ms(self._optimizer_step_timer),\n \"learner_grad_time_ms\": timer_to_ms(self.learner.grad_timer),\n \"learner_load_time_ms\": timer_to_ms(self.learner.load_timer),\n \"learner_load_wait_time_ms\": timer_to_ms(\n self.learner.load_wait_timer),\n \"learner_dequeue_time_ms\": timer_to_ms(self.learner.queue_timer),\n }\n stats = dict({\n \"num_weight_syncs\": self.num_weight_syncs,\n \"num_steps_replayed\": self.num_replayed,\n \"timing_breakdown\": timing,\n \"learner_queue\": self.learner.learner_queue_size.stats(),\n }, **self.get_mean_stats_and_reset())\n self._last_stats_val.clear()\n if self.learner.stats:\n stats[\"learner\"] = self.learner.stats\n return dict(PolicyOptimizer.stats(self), **stats)\n\n def _step(self):\n sample_timesteps, train_timesteps = 0, 0\n num_sent = 0\n weights = None\n\n for ev, sample_batch in self._augment_with_replay(\n self.sample_tasks.completed_prefetch()):\n self.batch_buffer.append(sample_batch)\n if sum(b.count\n for b in self.batch_buffer) >= self.train_batch_size:\n train_batch = self.batch_buffer[0].concat_samples(\n self.batch_buffer)\n self.learner.inqueue.put(train_batch)\n self.batch_buffer = []\n\n # If the batch was replayed, skip the update below.\n if ev is None:\n continue\n\n sample_timesteps += sample_batch.count\n\n # Put in replay buffer if enabled\n if self.replay_buffer_num_slots > 0:\n self.replay_batches.append(sample_batch)\n if len(self.replay_batches) > self.replay_buffer_num_slots:\n self.replay_batches.pop(0)\n\n # Note that it's important to pull new weights once\n # updated to avoid excessive correlation between actors\n if weights is None or (self.learner.weights_updated\n and num_sent >= self.broadcast_interval):\n self.learner.weights_updated = False\n weights = ray.put(self.local_evaluator.get_weights())\n num_sent = 0\n ev.set_weights.remote(weights)\n self.num_weight_syncs += 1\n num_sent += 1\n\n # Kick off another sample request\n self.sample_tasks.add(ev, ev.sample.remote())\n\n while not self.learner.outqueue.empty():\n count = self.learner.outqueue.get()\n train_timesteps += count\n\n return sample_timesteps, train_timesteps\n\n def _augment_with_replay(self, sample_futures):\n def can_replay():\n num_needed = int(\n np.ceil(self.train_batch_size / self.sample_batch_size))\n return len(self.replay_batches) > num_needed\n\n for ev, sample_batch in sample_futures:\n sample_batch = ray.get(sample_batch)\n yield ev, sample_batch\n\n if can_replay():\n f = self.replay_proportion\n while random.random() < f:\n f -= 1\n replay_batch = random.choice(self.replay_batches)\n self.num_replayed += replay_batch.count\n yield None, replay_batch\n\n\nclass LearnerThread(threading.Thread):\n \"\"\"Background thread that updates the local model from sample trajectories.\n\n The learner thread communicates with the main thread through Queues. This\n is needed since Ray operations can only be run on the main thread. In\n addition, moving heavyweight gradient ops session runs off the main thread\n improves overall throughput.\n \"\"\"\n\n def __init__(self, local_evaluator, minibatch_buffer_size, num_sgd_iter,\n learner_queue_size):\n threading.Thread.__init__(self)\n self.learner_queue_size = WindowStat(\"size\", 50)\n self.local_evaluator = local_evaluator\n self.inqueue = queue.Queue(maxsize=learner_queue_size)\n self.outqueue = queue.Queue()\n self.minibatch_buffer = MinibatchBuffer(\n self.inqueue, minibatch_buffer_size, num_sgd_iter)\n self.queue_timer = TimerStat()\n self.grad_timer = TimerStat()\n self.load_timer = TimerStat()\n self.load_wait_timer = TimerStat()\n self.daemon = True\n self.weights_updated = False\n self.stats = {}\n self.stopped = False\n\n def run(self):\n while not self.stopped:\n self.step()\n\n def step(self):\n with self.queue_timer:\n batch, _ = self.minibatch_buffer.get()\n\n with self.grad_timer:\n fetches = self.local_evaluator.learn_on_batch(batch)\n self.weights_updated = True\n self.stats = fetches.get(\"stats\", {})\n\n self.outqueue.put(batch.count)\n self.learner_queue_size.push(self.inqueue.qsize())\n\n\nclass TFMultiGPULearner(LearnerThread):\n \"\"\"Learner that can use multiple GPUs and parallel loading.\"\"\"\n\n def __init__(self,\n local_evaluator,\n num_gpus=1,\n lr=0.0005,\n train_batch_size=500,\n num_data_loader_buffers=1,\n minibatch_buffer_size=1,\n num_sgd_iter=1,\n learner_queue_size=16,\n _fake_gpus=False):\n # Multi-GPU requires TensorFlow to function.\n import tensorflow as tf\n\n LearnerThread.__init__(self, local_evaluator, minibatch_buffer_size,\n num_sgd_iter, learner_queue_size)\n self.lr = lr\n self.train_batch_size = train_batch_size\n if not num_gpus:\n self.devices = [\"/cpu:0\"]\n elif _fake_gpus:\n self.devices = [\"/cpu:{}\".format(i) for i in range(num_gpus)]\n else:\n self.devices = [\"/gpu:{}\".format(i) for i in range(num_gpus)]\n logger.info(\"TFMultiGPULearner devices {}\".format(self.devices))\n assert self.train_batch_size % len(self.devices) == 0\n assert self.train_batch_size >= len(self.devices), \"batch too small\"\n\n if set(self.local_evaluator.policy_map.keys()) != {\"default\"}:\n raise NotImplementedError(\"Multi-gpu mode for multi-agent\")\n self.policy = self.local_evaluator.policy_map[\"default\"]\n\n # per-GPU graph copies created below must share vars with the policy\n # reuse is set to AUTO_REUSE because Adam nodes are created after\n # all of the device copies are created.\n self.par_opt = []\n with self.local_evaluator.tf_sess.graph.as_default():\n with self.local_evaluator.tf_sess.as_default():\n with tf.variable_scope(\"default\", reuse=tf.AUTO_REUSE):\n if self.policy._state_inputs:\n rnn_inputs = self.policy._state_inputs + [\n self.policy._seq_lens\n ]\n else:\n rnn_inputs = []\n adam = tf.train.AdamOptimizer(self.lr)\n for _ in range(num_data_loader_buffers):\n self.par_opt.append(\n LocalSyncParallelOptimizer(\n adam,\n self.devices,\n [v for _, v in self.policy._loss_inputs],\n rnn_inputs,\n 999999, # it will get rounded down\n self.policy.copy))\n\n self.sess = self.local_evaluator.tf_sess\n self.sess.run(tf.global_variables_initializer())\n\n self.idle_optimizers = queue.Queue()\n self.ready_optimizers = queue.Queue()\n for opt in self.par_opt:\n self.idle_optimizers.put(opt)\n for i in range(NUM_DATA_LOAD_THREADS):\n self.loader_thread = _LoaderThread(self, share_stats=(i == 0))\n self.loader_thread.start()\n\n self.minibatch_buffer = MinibatchBuffer(\n self.ready_optimizers, minibatch_buffer_size, num_sgd_iter)\n\n @override(LearnerThread)\n def step(self):\n assert self.loader_thread.is_alive()\n with self.load_wait_timer:\n opt, released = self.minibatch_buffer.get()\n if released:\n self.idle_optimizers.put(opt)\n\n with self.grad_timer:\n fetches = opt.optimize(self.sess, 0)\n self.weights_updated = True\n self.stats = fetches.get(\"stats\", {})\n\n self.outqueue.put(self.train_batch_size)\n self.learner_queue_size.push(self.inqueue.qsize())\n\n\nclass _LoaderThread(threading.Thread):\n def __init__(self, learner, share_stats):\n threading.Thread.__init__(self)\n self.learner = learner\n self.daemon = True\n if share_stats:\n self.queue_timer = learner.queue_timer\n self.load_timer = learner.load_timer\n else:\n self.queue_timer = TimerStat()\n self.load_timer = TimerStat()\n\n def run(self):\n while True:\n self._step()\n\n def _step(self):\n s = self.learner\n with self.queue_timer:\n batch = s.inqueue.get()\n\n opt = s.idle_optimizers.get()\n\n with self.load_timer:\n tuples = s.policy._get_loss_inputs_dict(batch)\n data_keys = [ph for _, ph in s.policy._loss_inputs]\n if s.policy._state_inputs:\n state_keys = s.policy._state_inputs + [s.policy._seq_lens]\n else:\n state_keys = []\n opt.load_data(s.sess, [tuples[k] for k in data_keys],\n [tuples[k] for k in state_keys])\n\n s.ready_optimizers.put(opt)\n\n\nclass MinibatchBuffer(object):\n \"\"\"Ring buffer of recent data batches for minibatch SGD.\"\"\"\n\n def __init__(self, inqueue, size, num_passes):\n \"\"\"Initialize a minibatch buffer.\n\n Arguments:\n inqueue: Queue to populate the internal ring buffer from.\n size: Max number of data items to buffer.\n num_passes: Max num times each data item should be emitted.\n \"\"\"\n self.inqueue = inqueue\n self.size = size\n self.max_ttl = num_passes\n self.cur_max_ttl = 1 # ramp up slowly to better mix the input data\n self.buffers = [None] * size\n self.ttl = [0] * size\n self.idx = 0\n\n def get(self):\n \"\"\"Get a new batch from the internal ring buffer.\n\n Returns:\n buf: Data item saved from inqueue.\n released: True if the item is now removed from the ring buffer.\n \"\"\"\n if self.ttl[self.idx] <= 0:\n self.buffers[self.idx] = self.inqueue.get()\n self.ttl[self.idx] = self.cur_max_ttl\n if self.cur_max_ttl < self.max_ttl:\n self.cur_max_ttl += 1\n buf = self.buffers[self.idx]\n self.ttl[self.idx] -= 1\n released = self.ttl[self.idx] <= 0\n if released:\n self.buffers[self.idx] = None\n self.idx = (self.idx + 1) % len(self.buffers)\n return buf, released\n" ]
[ [ "numpy.ceil", "tensorflow.global_variables_initializer", "tensorflow.variable_scope", "tensorflow.train.AdamOptimizer" ] ]
vsewall/aphantasia
[ "f1e6297dd3412c4798531766b966c104775db62e" ]
[ "depth/adabins/infer.py" ]
[ "import os\nimport sys\nimport glob\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom PIL import Image\nfrom torchvision import transforms\nfrom tqdm import tqdm\n\nfrom . import model_io\nfrom . import utils\nfrom .models import UnetAdaptiveBins\n\ndef _is_pil_image(img):\n return isinstance(img, Image.Image)\n\ndef _is_numpy_image(img):\n return isinstance(img, np.ndarray) and (img.ndim in {2, 3})\n\nclass ToTensor(object):\n def __init__(self):\n self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n\n def __call__(self, image, target_size=(640, 480)):\n # image = image.resize(target_size)\n image = self.to_tensor(image)\n image = self.normalize(image)\n return image\n\n def to_tensor(self, pic):\n if not (_is_pil_image(pic) or _is_numpy_image(pic)):\n raise TypeError(\n 'pic should be PIL Image or ndarray. Got {}'.format(type(pic)))\n\n if isinstance(pic, np.ndarray):\n img = torch.from_numpy(pic.transpose((2, 0, 1)))\n return img\n\n # handle PIL Image\n if pic.mode == 'I':\n img = torch.from_numpy(np.array(pic, np.int32, copy=False))\n elif pic.mode == 'I;16':\n img = torch.from_numpy(np.array(pic, np.int16, copy=False))\n else:\n img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))\n # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK\n if pic.mode == 'YCbCr':\n nchannel = 3\n elif pic.mode == 'I;16':\n nchannel = 1\n else:\n nchannel = len(pic.mode)\n img = img.view(pic.size[1], pic.size[0], nchannel)\n\n img = img.transpose(0, 1).transpose(0, 2).contiguous()\n if isinstance(img, torch.ByteTensor):\n return img.float()\n else:\n return img\n\n\nclass InferenceHelper:\n def __init__(self, model_path='models/AdaBins_nyu.pt', device='cuda:0'):\n self.toTensor = ToTensor()\n self.device = device\n if 'nyu' in model_path:\n self.min_depth = 1e-3\n self.max_depth = 10\n self.saving_factor = 1000 # used to save in 16 bit\n model = UnetAdaptiveBins.build(n_bins=256, min_val=self.min_depth, max_val=self.max_depth)\n elif 'kitti' in model_path:\n self.min_depth = 1e-3\n self.max_depth = 80\n self.saving_factor = 256\n model = UnetAdaptiveBins.build(n_bins=256, min_val=self.min_depth, max_val=self.max_depth)\n else:\n raise ValueError(\"dataset can be either 'nyu' or 'kitti' but got {}\".format(dataset))\n\n model, _, _ = model_io.load_checkpoint(model_path, model)\n model.eval()\n self.model = model.to(self.device)\n\n @torch.no_grad()\n def predict_pil(self, pil_image, visualized=False):\n # pil_image = pil_image.resize((640, 480))\n img = np.asarray(pil_image) / 255.\n\n img = self.toTensor(img).unsqueeze(0).float().to(self.device)\n bin_centers, pred = self.predict(img)\n\n if visualized:\n viz = utils.colorize(torch.from_numpy(pred).unsqueeze(0), vmin=None, vmax=None, cmap='magma')\n # pred = np.asarray(pred*1000, dtype='uint16')\n viz = Image.fromarray(viz)\n return bin_centers, pred, viz\n return bin_centers, pred\n\n @torch.no_grad()\n def predict(self, image):\n bins, pred = self.model(image)\n pred = np.clip(pred.cpu().numpy(), self.min_depth, self.max_depth)\n\n # Flip\n image = torch.Tensor(np.array(image.cpu().numpy())[..., ::-1].copy()).to(self.device)\n pred_lr = self.model(image)[-1]\n pred_lr = np.clip(pred_lr.cpu().numpy()[..., ::-1], self.min_depth, self.max_depth)\n\n # Take average of original and mirror\n final = 0.5 * (pred + pred_lr)\n final = nn.functional.interpolate(torch.Tensor(final), image.shape[-2:],\n mode='bilinear', align_corners=True).cpu().numpy()\n\n final[final < self.min_depth] = self.min_depth\n final[final > self.max_depth] = self.max_depth\n final[np.isinf(final)] = self.max_depth\n final[np.isnan(final)] = self.min_depth\n\n centers = 0.5 * (bins[:, 1:] + bins[:, :-1])\n centers = centers.cpu().squeeze().numpy()\n centers = centers[centers > self.min_depth]\n centers = centers[centers < self.max_depth]\n\n return centers, final\n\n @torch.no_grad()\n def predict_dir(self, test_dir, out_dir):\n os.makedirs(out_dir, exist_ok=True)\n transform = ToTensor()\n all_files = glob.glob(os.path.join(test_dir, \"*\"))\n self.model.eval()\n for f in tqdm(all_files):\n image = np.asarray(Image.open(f), dtype='float32') / 255.\n image = transform(image).unsqueeze(0).to(self.device)\n\n centers, final = self.predict(image)\n # final = final.squeeze().cpu().numpy()\n\n final = (final * self.saving_factor).astype('uint16')\n basename = os.path.basename(f).split('.')[0]\n save_path = os.path.join(out_dir, basename + \".png\")\n\n Image.fromarray(final).save(save_path)\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n from time import time\n\n img = Image.open(\"test_imgs/classroom__rgb_00283.jpg\")\n start = time()\n inferHelper = InferenceHelper()\n centers, pred = inferHelper.predict_pil(img)\n print(f\"took :{time() - start}s\")\n plt.imshow(pred.squeeze(), cmap='magma_r')\n plt.show()\n" ]
[ [ "numpy.isinf", "numpy.array", "numpy.isnan", "numpy.asarray", "torch.no_grad", "torch.from_numpy", "matplotlib.pyplot.show", "torch.Tensor" ] ]
idaholab/SR2ML
[ "2aa5e0be02786523cdeaf898d42411a7068d30b7" ]
[ "src/FTModel.py" ]
[ "# Copyright 2020, Battelle Energy Alliance, LLC\n# ALL RIGHTS RESERVED\n\"\"\"\nCreated on April 30, 2018\n\n@author: mandd\n\"\"\"\n\n#External Modules---------------------------------------------------------------\nimport numpy as np\n#External Modules End-----------------------------------------------------------\n\n#Internal Modules---------------------------------------------------------------\nfrom PluginBaseClasses.ExternalModelPluginBase import ExternalModelPluginBase\nfrom .PostProcessors.FTStructure import FTStructure\n#Internal Modules End-----------------------------------------------------------\n\n\nclass FTModel(ExternalModelPluginBase):\n \"\"\"\n This class is designed to create a Fault-Tree model\n \"\"\"\n def __init__(self):\n \"\"\"\n Constructor\n @ In, None\n @ Out, None\n \"\"\"\n ExternalModelPluginBase.__init__(self)\n\n def _readMoreXML(self, container, xmlNode):\n \"\"\"\n Method to read the portion of the XML that belongs to the Fault-Tree model\n @ In, container, object, self-like object where all the variables can be stored\n @ In, xmlNode, xml.etree.ElementTree.Element, XML node that needs to be read\n @ Out, None\n \"\"\"\n container.mapping = {}\n container.InvMapping = {}\n\n for child in xmlNode:\n if child.tag == 'topEvents':\n container.topEventID = child.text.strip()\n elif child.tag == 'map':\n container.mapping[child.get('var')] = child.text.strip()\n container.InvMapping[child.text.strip()] = child.get('var')\n elif child.tag == 'variables':\n variables = [str(var.strip()) for var in child.text.split(\",\")]\n else:\n raise IOError(\"FTModel: xml node \" + str (child.tag) + \" is not allowed\")\n\n def initialize(self, container, runInfoDict, inputFiles):\n \"\"\"\n Method to initialize this plugin\n @ In, container, object, self-like object where all the variables can be stored\n @ In, runInfoDict, dict, dictionary containing all the RunInfo parameters (XML node <RunInfo>)\n @ In, inputFiles, list, list of input files (if any)\n @ Out, None\n \"\"\"\n pass\n\n def createNewInput(self, container, inputs, samplerType, **Kwargs):\n \"\"\"\n This function has been added for this model in order to be able to create a FTstructure from multiple files\n @ In, myInput, list, the inputs (list) to start from to generate the new one\n @ In, samplerType, string, is the type of sampler that is calling to generate a new input\n @ In, **kwargs, dict, is a dictionary that contains the information coming from the sampler,\n a mandatory key is the sampledVars'that contains a dictionary {'name variable':value}\n @ Out, ([(inputDict)],copy.deepcopy(kwargs)), tuple, return the new input in a tuple form\n \"\"\"\n container.faultTreeModel = FTStructure(inputs, container.topEventID)\n container.faultTreeModel.FTsolver()\n return Kwargs\n\n def run(self, container, Inputs):\n \"\"\"\n This method determines the status of the TopEvent of the FT provided the status of its Basic Events\n @ In, container, object, self-like object where all the variables can be stored\n @ In, Inputs, dict, dictionary of inputs from RAVEN\n \"\"\"\n if self.checkTypeOfAnalysis(container,Inputs):\n value = self.runTimeDep(container, Inputs)\n else:\n value = self.runStatic(container, Inputs)\n\n container.__dict__[container.topEventID]= value[container.topEventID]\n\n def checkTypeOfAnalysis(self,container,Inputs):\n \"\"\"\n This method checks which type of analysis to be performed:\n - True: dynamic (time dependent)\n - False: static\n @ In, container, object, self-like object where all the variables can be stored\n @ In, Inputs, dict, dictionary of inputs from RAVEN\n @ Out, analysisType, bool, type of analysis to be performed\n\n \"\"\"\n arrayValues=set()\n for key in Inputs.keys():\n if key in container.mapping.keys():\n arrayValues.add(Inputs[key])\n analysisType = None\n if arrayValues.difference({0.,1.}):\n analysisType = True\n else:\n analysisType = False\n return analysisType\n\n def runStatic(self, container, Inputs):\n \"\"\"\n This method performs a static analysis of the FT model\n @ In, container, object, self-like object where all the variables can be stored\n @ In, Inputs, dict, dictionary of inputs from RAVEN\n @ Out, value, float, value of the Tope Event of the FT\n \"\"\"\n\n inputForFT = {}\n for key in container.InvMapping.keys():\n inputForFT[key] = Inputs[container.InvMapping[key]]\n value = container.faultTreeModel.evaluateFT(inputForFT)\n return value\n\n def runTimeDep(self, container, Inputs):\n \"\"\"\n This method performs a dynamic analysis of the FT model\n @ In, container, object, self-like object where all the variables can be stored\n @ In, Inputs, dict, dictionary of inputs from RAVEN\n @ Out, outcome, dict, time depedendnt value of the Tope Event of the FT\n \"\"\"\n times = []\n times.append(0.)\n for key in Inputs.keys():\n if key in container.mapping.keys() and Inputs[key]!=1.:\n times.append(Inputs[key])\n times = sorted(times, key=float)\n\n outcome={}\n outcome[container.topEventID] = np.asarray([0.])\n\n for time in times:\n inputToPass=self.inputToBePassed(container,time,Inputs)\n tempOut = self.runStatic(container, inputToPass)\n for var in outcome.keys():\n if tempOut[var] == 1.:\n if time == 0.:\n outcome[var] = np.asarray([1.])\n else:\n if outcome[var][0] <= 0:\n outcome[var] = np.asarray([time])\n return outcome\n\n def inputToBePassed(self,container,time,Inputs):\n \"\"\"\n This method return the status of the input variables at time t=time\n @ In, container, object, self-like object where all the variables can be stored\n @ In, Inputs, dict, dictionary of inputs from RAVEN\n @ In, time, float, time at which the input variables need to be evaluated\n @ Out, inputToBePassed, dict, value of the FT basic events at t=time\n \"\"\"\n inputToBePassed = {}\n for key in Inputs.keys():\n if key in container.mapping.keys():\n if Inputs[key] == 0. or Inputs[key] == 1.:\n inputToBePassed[key] = Inputs[key]\n else:\n if Inputs[key] > time:\n inputToBePassed[key] = np.asarray([0.])\n else:\n inputToBePassed[key] = np.asarray([1.])\n return inputToBePassed\n" ]
[ [ "numpy.asarray" ] ]
ppizarror/grafica
[ "a5e0c6455d390ffbb2e88d56bf8941aafaf013d1" ]
[ "examples/ex_camera.py" ]
[ "# coding=utf-8\n\"\"\"\nCamera example.\n\"\"\"\n\nimport glfw\nfrom OpenGL.GL import *\nimport numpy as np\nimport sys\nimport os.path\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nimport grafica.transformations as tr\nimport grafica.basic_shapes as bs\nimport grafica.easy_shaders as es\nimport grafica.lighting_shaders as ls\nimport grafica.performance_monitor as pm\nfrom grafica.assets_path import getAssetPath\n\n__author__ = \"Ivan Sipiran\"\n__license__ = \"MIT\"\n\n\n# A class to store the application control\nclass Controller:\n def __init__(self):\n self.fillPolygon = True\n self.camPosition = np.array([0.0, 0.05, 2.0])\n self.pitch = 0.0\n self.yaw = -np.pi / 2\n self.camUp = np.array([0.0, 1.0, 1.0])\n self.camRight = np.array([0, 0, 0])\n self.front = np.array([0, 0, 0])\n\n\n# We will use the global controller as communication with the callback function\ncontroller = Controller()\n\n\ndef on_key(window, key, scancode, action, mods):\n if action != glfw.PRESS:\n return\n\n global controller\n print(action)\n if key == glfw.KEY_W:\n controller.camPosition = controller.camPosition + controller.front * 0.0175\n elif key == glfw.KEY_S:\n controller.camPosition = controller.camPosition - controller.front * 0.0175\n elif key == glfw.KEY_D:\n if controller.yaw >= np.pi:\n controller.yaw = -np.pi + 0.061\n else:\n controller.yaw = controller.yaw + 0.061\n\n elif key == glfw.KEY_A:\n if controller.yaw <= -np.pi:\n controller.yaw = np.pi - 0.061\n else:\n controller.yaw = controller.yaw - 0.061\n\n elif key == glfw.KEY_SPACE:\n controller.fillPolygon = not controller.fillPolygon\n\n elif key == glfw.KEY_ESCAPE:\n glfw.set_window_should_close(window, True)\n\n\ndef processCamera():\n global controller\n\n yaw = controller.yaw\n pitch = controller.pitch\n\n frontx = np.cos(yaw) * np.cos(pitch)\n fronty = np.sin(pitch)\n frontz = np.sin(yaw) * np.cos(pitch)\n controller.front = np.array([frontx, fronty, frontz])\n controller.front = controller.front / np.linalg.norm(controller.front)\n\n controller.camRight = np.cross(controller.front, controller.camUp)\n controller.camRight = controller.camRight / np.linalg.norm(controller.camRight)\n\n controller.camUp = np.cross(controller.camRight, controller.front)\n controller.camUp = controller.camUp / np.linalg.norm(controller.camUp)\n\n\ndef readOFF(filename, color):\n vertices = []\n normals = []\n faces = []\n\n with open(filename, 'r') as file:\n line = file.readline().strip()\n assert line == \"OFF\"\n\n line = file.readline().strip()\n aux = line.split(' ')\n\n numVertices = int(aux[0])\n numFaces = int(aux[1])\n\n for i in range(numVertices):\n aux = file.readline().strip().split(' ')\n vertices += [float(coord) for coord in aux[0:]]\n\n vertices = np.asarray(vertices)\n vertices = np.reshape(vertices, (numVertices, 3))\n print(f'Vertices shape: {vertices.shape}')\n\n normals = np.zeros((numVertices, 3), dtype=np.float32)\n print(f'Normals shape: {normals.shape}')\n\n for i in range(numFaces):\n aux = file.readline().strip().split(' ')\n aux = [int(index) for index in aux[0:]]\n faces += [aux[1:]]\n\n vecA = [vertices[aux[2]][0] - vertices[aux[1]][0], vertices[aux[2]][1] - vertices[aux[1]][1],\n vertices[aux[2]][2] - vertices[aux[1]][2]]\n vecB = [vertices[aux[3]][0] - vertices[aux[2]][0], vertices[aux[3]][1] - vertices[aux[2]][1],\n vertices[aux[3]][2] - vertices[aux[2]][2]]\n\n res = np.cross(vecA, vecB)\n normals[aux[1]][0] += res[0]\n normals[aux[1]][1] += res[1]\n normals[aux[1]][2] += res[2]\n\n normals[aux[2]][0] += res[0]\n normals[aux[2]][1] += res[1]\n normals[aux[2]][2] += res[2]\n\n normals[aux[3]][0] += res[0]\n normals[aux[3]][1] += res[1]\n normals[aux[3]][2] += res[2]\n # print(faces)\n norms = np.linalg.norm(normals, axis=1)\n normals = normals / norms[:, None]\n\n color = np.asarray(color)\n color = np.tile(color, (numVertices, 1))\n\n vertexData = np.concatenate((vertices, color), axis=1)\n vertexData = np.concatenate((vertexData, normals), axis=1)\n\n print(vertexData.shape)\n\n indices = []\n vertexDataF = []\n index = 0\n\n for face in faces:\n vertex = vertexData[face[0], :]\n vertexDataF += vertex.tolist()\n vertex = vertexData[face[1], :]\n vertexDataF += vertex.tolist()\n vertex = vertexData[face[2], :]\n vertexDataF += vertex.tolist()\n\n indices += [index, index + 1, index + 2]\n index += 3\n\n return bs.Shape(vertexDataF, indices)\n\n\nif __name__ == \"__main__\":\n\n # Initialize glfw\n if not glfw.init():\n glfw.set_window_should_close(window, True)\n\n width = 1000\n height = 1000\n title = \"Camera demo\"\n window = glfw.create_window(width, height, title, None, None)\n\n if not window:\n glfw.terminate()\n glfw.set_window_should_close(window, True)\n\n glfw.make_context_current(window)\n\n # Connecting the callback function 'on_key' to handle keyboard events\n glfw.set_key_callback(window, on_key)\n\n # Defining shader programs\n pipeline = ls.SimpleFlatShaderProgram()\n mvpPipeline = es.SimpleModelViewProjectionShaderProgram()\n\n # Telling OpenGL to use our shader program\n glUseProgram(pipeline.shaderProgram)\n\n # Setting up the clear screen color\n glClearColor(0.85, 0.85, 0.85, 1.0)\n\n # As we work in 3D, we need to check which part is in front,\n # and which one is at the back\n glEnable(GL_DEPTH_TEST)\n\n\n # Convenience function to ease initialization\n def createGPUShape(pipeline, shape):\n gpuShape = es.GPUShape().initBuffers()\n pipeline.setupVAO(gpuShape)\n gpuShape.fillBuffers(shape.vertices, shape.indices, GL_STATIC_DRAW)\n return gpuShape\n\n\n # Creating shapes on GPU memory\n gpuAxis = createGPUShape(mvpPipeline, bs.createAxis(7))\n\n shape = readOFF(getAssetPath('Maze.off'), (0.9, 0.6, 0.2))\n gpuShape = createGPUShape(pipeline, shape)\n\n # Setting uniforms that will NOT change on each iteration\n glUseProgram(pipeline.shaderProgram)\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"La\"), 1.0, 1.0, 1.0)\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"Ld\"), 1.0, 1.0, 1.0)\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"Ls\"), 1.0, 1.0, 1.0)\n\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"Ka\"), 0.2, 0.2, 0.2)\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"Kd\"), 0.9, 0.9, 0.9)\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"Ks\"), 1.0, 1.0, 1.0)\n\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"lightPosition\"), 3, 3, 3)\n\n glUniform1ui(glGetUniformLocation(pipeline.shaderProgram, \"shininess\"), 100)\n glUniform1f(glGetUniformLocation(pipeline.shaderProgram, \"constantAttenuation\"), 0.001)\n glUniform1f(glGetUniformLocation(pipeline.shaderProgram, \"linearAttenuation\"), 0.1)\n glUniform1f(glGetUniformLocation(pipeline.shaderProgram, \"quadraticAttenuation\"), 0.01)\n\n # Setting up the projection transform\n projection = tr.perspective(45, float(width) / float(height), 0.1, 100)\n glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, \"projection\"), 1, GL_TRUE, projection)\n\n glUseProgram(mvpPipeline.shaderProgram)\n glUniformMatrix4fv(glGetUniformLocation(mvpPipeline.shaderProgram, \"projection\"), 1, GL_TRUE, projection)\n modelTransform = tr.uniformScale(0.1)\n glUniformMatrix4fv(glGetUniformLocation(mvpPipeline.shaderProgram, \"model\"), 1, GL_TRUE, modelTransform)\n\n t0 = glfw.get_time()\n\n perfMonitor = pm.PerformanceMonitor(glfw.get_time(), 0.5)\n\n # glfw will swap buffers as soon as possible\n glfw.swap_interval(0)\n dx = 0\n\n while not glfw.window_should_close(window):\n\n # Measuring performance\n perfMonitor.update(glfw.get_time())\n glfw.set_window_title(window, title + str(perfMonitor))\n\n # Using GLFW to check for input events\n glfw.poll_events()\n\n processCamera()\n if glfw.get_key(window, glfw.KEY_W) == glfw.PRESS:\n controller.camPosition = controller.camPosition + controller.front * 0.000175\n\n if glfw.get_key(window, glfw.KEY_S) == glfw.PRESS:\n controller.camPosition = controller.camPosition - controller.front * 0.000175\n\n view = tr.lookAt(\n controller.camPosition,\n controller.camPosition + controller.front,\n controller.camUp)\n\n # Clearing the screen in both, color and depth\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n # Filling or not the shapes depending on the controller state\n if (controller.fillPolygon):\n glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)\n else:\n glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n\n # Drawing shapes\n glUseProgram(pipeline.shaderProgram)\n glUniform3f(glGetUniformLocation(pipeline.shaderProgram, \"viewPosition\"), controller.camPosition[0],\n controller.camPosition[1], controller.camPosition[2])\n glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, \"view\"), 1, GL_TRUE, view)\n\n modelTransformPlane = tr.uniformScale(0.1)\n glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, \"model\"), 1, GL_TRUE, modelTransformPlane)\n pipeline.drawCall(gpuShape)\n\n glUseProgram(mvpPipeline.shaderProgram)\n glUniformMatrix4fv(glGetUniformLocation(mvpPipeline.shaderProgram, \"view\"), 1, GL_TRUE, view)\n mvpPipeline.drawCall(gpuAxis, GL_LINES)\n\n # Once the drawing is rendered, buffers are swap so an uncomplete drawing is never seen.\n glfw.swap_buffers(window)\n dx += 0.001\n\n # freeing GPU memory\n gpuAxis.clear()\n gpuShape.clear()\n # gpuHelix.clear()\n\n glfw.terminate()\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.sin", "numpy.linalg.norm", "numpy.asarray", "numpy.reshape", "numpy.zeros", "numpy.tile", "numpy.cos", "numpy.cross" ] ]
grburgess/pyspi
[ "084884c3fd06a09ef3a850cd19e7b751d7929e59" ]
[ "pyspi/test/test_spipointing.py" ]
[ "# Test spi pointing\n\ndef test_spipointing():\n from pyspi.utils.response.spi_pointing import SPIPointing\n from pyspi.io.package_data import get_path_of_external_data_dir\n from pyspi.io.get_files import get_files\n import os\n import numpy as np\n \n pointing_id = 169600130010\n get_files(pointing_id)\n\n geom_save_path = os.path.join(get_path_of_external_data_dir(),\n 'pointing_data',\n str(pointing_id),\n 'sc_orbit_param.fits.gz')\n\n point = SPIPointing(geom_save_path)\n\n assert np.sum(point.sc_matrix) == 1222.1378651210798, \\\n 'SPI pointing test failed'\n" ]
[ [ "numpy.sum" ] ]
elitonfilho/segsr
[ "74ee5a4794f0894cffbb08f696f601b7d914513c" ]
[ "utils/utils.py" ]
[ "from pathlib import Path\nfrom shutil import copy\nimport torch\nimport pandas as pd\nfrom datetime import datetime\n\n\ndef create_pretrain_folder(args, cfg):\n if cfg.TRAIN.model_save_path:\n path_save_model = Path(cfg.TRAIN.model_save_path).resolve()\n if path_save_model.exists():\n time = datetime.now().strftime('%Y%m%d-%H%M')\n path_save_model.rename(\n path_save_model.parent / f'{path_save_model.stem}_{time}')\n path_save_model.mkdir()\n copy(args.cfg, path_save_model / 'config.yaml')\n return path_save_model\n\n\ndef save_model(cfg, best_results, netG, netD):\n save_path = Path(cfg.TRAIN.model_save_path).resolve()\n torch.save(netG, save_path / f'{cfg.TRAIN.model_name}_g_{best_results[\"epoch\"]}.pth')\n torch.save(netD, save_path / f'{cfg.TRAIN.model_name}_d_{best_results[\"epoch\"]}.pth')\n\n\ndef save_train_stats(cfg, epoch, stats):\n out_path = Path(cfg.TRAIN.model_save_path, 'train_stats.csv').resolve()\n len_ds = stats['batch_sizes']\n data_frame = pd.DataFrame(\n data={\n 'Epoch': epoch,\n 'Loss_D': stats['d_loss']/len_ds,\n 'Loss_G': stats['g_loss']/len_ds,\n 'Score_D': stats['d_score']/len_ds,\n 'Score_G': stats['g_score']/len_ds,\n 'Loss_adv': stats['adv']/len_ds,\n 'Loss_img': stats['img']/len_ds,\n 'Loss_tv': stats['tv']/len_ds,\n 'Loss_per': stats['per']/len_ds,\n 'Loss_seg': stats['seg']/len_ds,\n\n }, index=[0])\n data_frame.to_csv(out_path, index_label='Epoch', mode='a', header=not out_path.exists())\n\n\ndef save_val_stats(cfg, epoch, stats):\n out_path = Path(cfg.TRAIN.model_save_path, 'val_stats.csv').resolve()\n if cfg.TRAIN.use_seg:\n data_frame = pd.DataFrame(\n data={\n 'Epoch': epoch,\n 'PSNR': stats['psnr'],\n 'SSIM': stats['ssim'],\n 'IoU': stats['iou'],\n 'Acc': stats['acc']\n }, index=[0])\n else:\n data_frame = pd.DataFrame(\n data={\n 'Epoch': epoch,\n 'PSNR': stats['psnr'],\n 'SSIM': stats['ssim'],\n }, index=[0])\n data_frame.to_csv(out_path, index_label='Epoch', mode='a', header=not out_path.exists())" ]
[ [ "pandas.DataFrame", "torch.save" ] ]
ThatGuyKev/facebook-chat-analysis
[ "74f1ce3f3dc83c9dd587e8df0b7e77f9580cd2d0" ]
[ "start-analysis.py" ]
[ "from jsonutilities import handleFile\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport nltk\r\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\r\nsentiment_analyzer = SentimentIntensityAnalyzer()\r\n\r\n\r\ndef analyze(chat):\r\n # Use jsonutilites for clean up\r\n contents, hours = handleFile(chat+'.json')\r\n\r\n neutral, negative, positive = 0, 0, 0\r\n\r\n for index, sentence in enumerate(contents):\r\n\r\n scores = sentiment_analyzer.polarity_scores(sentence)\r\n scores.pop('compound', None)\r\n\r\n maxAttribute = max(scores, key=lambda k: scores[k])\r\n\r\n if maxAttribute == \"neu\":\r\n neutral += 1\r\n elif maxAttribute == \"neg\":\r\n negative += 1\r\n else:\r\n positive += 1\r\n\r\n total = neutral + negative + positive\r\n print(\"Negative: {0}% | Neutral: {1}% | Positive: {2}%\".format(\r\n negative*100/total, neutral*100/total, positive*100/total))\r\n\r\n labels = 'Neutral', 'Negative', 'Positive'\r\n sizes = [neutral, negative, positive]\r\n colors = ['#00bcd7', '#D22500', '#41CB00']\r\n\r\n # Plot\r\n figs, (ax1, ax2) = plt.subplots(2, figsize=(8, 8))\r\n\r\n def func(pct, allvals):\r\n absolute = int(pct/100.*np.sum(allvals))\r\n return \"{:.1f}%\\n({:d})\".format(pct, absolute)\r\n\r\n\r\n wedges, texts, autotexts = ax1.pie(sizes, autopct=lambda pct: func(pct, sizes),\r\n colors=colors,\r\n textprops=dict(color=\"w\"))\r\n\r\n ax1.legend(wedges, labels,\r\n title=\"labels\",\r\n loc=\"center left\",\r\n bbox_to_anchor=(1, 0, 0.5, 1))\r\n\r\n plt.setp(autotexts, size=8, weight=\"bold\")\r\n\r\n ax1.set_title(\"Chat Analysis - Chat with {0}\".format(chat.capitalize()))\r\n\r\n time_groups = {}\r\n\r\n for i in range(24):\r\n time_groups[str(i)] = 0\r\n\r\n for hour in hours:\r\n time_groups[str(int(hour))] += 1\r\n\r\n\r\n\r\n ax2.bar(range(len(time_groups)), time_groups.values(), align='center')\r\n\r\n ax2.set_xticks(np.arange(len(time_groups)))\r\n ax2.set_xticklabels(time_groups.keys())\r\n\r\n ax2.set_xlabel('Time groups with 1 hour interval')\r\n ax2.set_ylabel('Frequency')\r\n\r\n ax2.set_title(\"Timing Analysis - Chat with {0}\".format(chat.capitalize()))\r\n \r\n plt.show()\r\n\r\nanalyze(sys.argv[1])" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.setp", "numpy.sum", "matplotlib.pyplot.subplots" ] ]
Arjitg450/Machine-Learning-Case-Studies
[ "497d2e7e5cd66750d2539a8e2b9a07f27df2aea3" ]
[ "22. Neural Networks from Scratch/perceptron_and.py" ]
[ "from perceptron.perceptron import Perceptron\nimport numpy as np\n\nX = np.array([[0,0],[0,1],[1,0],[1,1]])\ny = np.array([[0],[0],[0],[1]])\n\nprint(\"[INFO] training perceptron...\")\n\np= Perceptron(X.shape[1], alpha=0.1)\np.fit(X,y, epochs=20)\nprint(\"[INFO[ testing perceptron...\")\nfor (x, target) in zip(X,y):\n pred = p.predict(x)\n print(\"[INFO] data = {}. ground truth={}, pred = {}\".format(x, target[0], pred))\n \n" ]
[ [ "numpy.array" ] ]
lichao312214129/easylearn
[ "102ff264a7672b246244a489e0fbde8e3897c52f" ]
[ "eslearn/machine_learning/clustering/_base_clustering.py" ]
[ "#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n\"\"\"\nThis class is the base class for classification\n\"\"\"\n\nimport numpy as np\nimport time\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nfrom sklearn.model_selection import StratifiedKFold, KFold\nfrom sklearn.metrics import make_scorer, accuracy_score, auc, f1_score\nfrom sklearn.pipeline import Pipeline\nfrom joblib import Memory\nfrom shutil import rmtree\nimport warnings\nfrom sklearn.exceptions import ConvergenceWarning\n\nfrom eslearn.base import AbstractMachineLearningBase\nfrom eslearn.utils.timer import timer\n\n\nwarnings.filterwarnings(\"ignore\", category=ConvergenceWarning, module=\"sklearn\")\n\n\nclass BaseClustering(AbstractMachineLearningBase):\n \"\"\"Base class for classification\n\n Parameters\n ----------\n None\n\n Attributes\n ----------\n model_: Fited model object, default None\n\n weights_: ndarray of shape(n_class, n_features) if the model is linear model, else shape(1,n_features), default None\n Feature weights of the fited model\n\n weights_norm_: ndarray of shape(n_class, n_features) if the model is linear model, else shape(1,n_features), default None\n Normalized feature weights. Using StandardScaler (z-score) to get the normalized feature weights.\n\n \"\"\"\n\n def __init__(self,\n search_strategy='grid', \n k=2, \n metric=accuracy_score, \n n_iter_of_randomedsearch=10, \n n_jobs=2, \n location='cachedir',\n verbose=False):\n\n self.search_strategy = search_strategy\n self.k = k\n self.metric = metric\n self.n_iter_of_randomedsearch = n_iter_of_randomedsearch\n self.n_jobs = n_jobs\n self.location = location\n self.verbose = verbose\n \n self.model_ = None\n self.weights_ = None\n self.weights_norm_ = None\n\n @timer\n def fit_(self, x=None, y=None):\n \"\"\"Fit the pipeline_\"\"\"\n \n # TODO: Extending to other cross-validation methods\n # TODO: when no param's length greater than 1, do not use GridSearchCV or RandomizedSearchCV for speeding up\n \n cv = StratifiedKFold(n_splits=self.k) # Default is StratifiedKFold\n if self.is_search:\n if self.search_strategy == 'grid':\n self.model_ = GridSearchCV(\n self.pipeline_, n_jobs=self.n_jobs, param_grid=self.param_search_, cv=cv, \n scoring = make_scorer(self.metric), refit=True\n )\n elif self.search_strategy == 'random':\n self.model_ = RandomizedSearchCV(\n self.pipeline_, n_jobs=self.n_jobs, param_distributions=self.param_search_, cv=cv, \n scoring = make_scorer(self.metric), refit=True, n_iter=self.n_iter_of_randomedsearch,\n )\n else:\n print(\"Please specify which search strategy!\\n\")\n return\n else:\n self.model_ = self.pipeline_\n \n # start = time.time()\n self.model_.fit(x, y)\n # end = time.time()\n # print(end - start)\n\n # Delete the temporary cache before exiting\n # self.memory.clear(warn=False)\n return self\n \n def predict(self, x):\n y_hat = self.model_.predict(x)\n \n # TODO?\n if hasattr(self.model_, 'decision_function'):\n y_prob = self.model_.decision_function(x)\n elif hasattr(self.model_, 'predict_proba'):\n y_prob = self.model_.predict_proba(x)[:,1]\n else:\n y_prob = y_hat\n \n return y_hat, y_prob\n \n def get_weights_(self, x=None, y=None):\n \"\"\"\n If the model is linear model, the weights are coefficients.\n If the model is not the linear model, the weights are calculated by occlusion test <Transfer learning improves resting-state functional\n connectivity pattern analysis using convolutional neural networks>.\n \"\"\"\n \n if self.is_search:\n best_model = self.model_.best_estimator_\n else:\n best_model = self.model_\n \n feature_preprocessing = best_model['feature_preprocessing']\n dim_reduction = best_model.get_params().get('dim_reduction',None)\n feature_selection = best_model.get_params().get('feature_selection', None)\n estimator = best_model['estimator']\n\n # Get weight according to model type: linear model or nonlinear model\n if hasattr(estimator, \"coef_\"): # Linear model\n coef = estimator.coef_\n if feature_selection and (feature_selection != \"passthrough\"):\n self.weights_ = feature_selection.inverse_transform(coef)\n else:\n self.weights_ = coef\n \n if dim_reduction and (dim_reduction != \"passthrough\"):\n self.weights_ = dim_reduction.inverse_transform(self.weights_)\n \n else: # Nonlinear model\n # TODO: Consider the problem of slow speed caused by a large number of features\n x_reduced_selected = x.copy()\n if feature_preprocessing and (feature_preprocessing != \"passthrough\"):\n x_reduced_selected = feature_preprocessing.fit_transform(x_reduced_selected)\n if dim_reduction and (dim_reduction != \"passthrough\"):\n x_reduced_selected = dim_reduction.fit_transform(x_reduced_selected)\n if feature_selection and (feature_selection != \"passthrough\"):\n x_reduced_selected = feature_selection.fit_transform(x_reduced_selected, y)\n \n y_hat = self.model_.predict(x)\n score_true = self.metric(y, y_hat)\n len_feature = x_reduced_selected.shape[1]\n self.weights_ = np.zeros([1,len_feature])\n \n if len_feature > 1000:\n print(f\"***There are {len_feature} features, it may take a long time to get the weight!***\\n\")\n print(\"***I suggest that you reduce the dimension of features***\\n\")\n \n for ifeature in range(len_feature):\n print(f\"Getting weight for the {ifeature+1}th feature...\\n\")\n x_ = x_reduced_selected.copy()\n x_[:,ifeature] = 0\n y_hat = estimator.predict(x_)\n self.weights_[0, ifeature] = score_true - self.metric(y, y_hat)\n \n # Back to original space\n if feature_selection and (feature_selection != \"passthrough\"):\n self.weights_ = feature_selection.inverse_transform(self.weights_)\n if dim_reduction and (dim_reduction != \"passthrough\"):\n self.weights_ = dim_reduction.inverse_transform(self.weights_) \n \n # Normalize weights\n self.weights_norm_ = StandardScaler().fit_transform(self.weights_.T).T\n \n \n\nif __name__==\"__main__\":\n baseclf = BaseClustering()" ]
[ [ "sklearn.metrics.make_scorer", "sklearn.model_selection.StratifiedKFold", "numpy.zeros", "sklearn.preprocessing.StandardScaler" ] ]
guyk1971/stable-baselines
[ "ac7a1f3c32851577d5a4fc76e2c42760b9379634" ]
[ "my_zoo/train_zoo.py" ]
[ "import os\nimport time\nimport difflib\nimport argparse\nimport importlib\nfrom pprint import pprint\nfrom collections import OrderedDict\n\n\nimport gym\nimport numpy as np\nimport yaml\n# Optional dependencies\ntry:\n import pybullet_envs\nexcept ImportError:\n pybullet_envs = None\ntry:\n import highway_env\nexcept ImportError:\n highway_env = None\n\ntry:\n import mpi4py\n from mpi4py import MPI\nexcept ImportError:\n mpi4py = None\n\nfrom stable_baselines.common import set_global_seeds\nfrom stable_baselines.common.cmd_util import make_atari_env\nfrom stable_baselines.common.vec_env import VecFrameStack, SubprocVecEnv, VecNormalize, DummyVecEnv\nfrom stable_baselines.common.noise import AdaptiveParamNoiseSpec, NormalActionNoise, OrnsteinUhlenbeckActionNoise\nfrom stable_baselines.ppo2.ppo2 import constfn\n\nfrom zoo.utils import make_env, ALGOS, linear_schedule, get_latest_run_id, get_wrapper_class, find_saved_model\nfrom zoo.utils.hyperparams_opt import hyperparam_optimization\nfrom zoo.utils.noise import LinearNormalActionNoise\n\n\nif __name__ == '__main__':\n #----------- Supress Tensorflow version warnings----------------------\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}\n import warnings\n\n # https://stackoverflow.com/questions/15777951/how-to-suppress-pandas-future-warning\n warnings.simplefilter(action='ignore', category=FutureWarning)\n warnings.simplefilter(action='ignore', category=Warning)\n import tensorflow as tf\n\n tf.get_logger().setLevel('INFO')\n tf.autograph.set_verbosity(0)\n import logging\n\n tf.get_logger().setLevel(logging.ERROR)\n #-----------------------------------------------------------------------\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, nargs='+', default=[\"CartPole-v1\"], help='environment ID(s)')\n parser.add_argument('-tb', '--tensorboard-log', help='Tensorboard log dir', default='', type=str)\n parser.add_argument('-i', '--trained-agent', help='Path to a pretrained agent to continue training',\n default='', type=str)\n parser.add_argument('--algo', help='RL Algorithm', default='ppo2',\n type=str, required=False, choices=list(ALGOS.keys()))\n parser.add_argument('-n', '--n-timesteps', help='Overwrite the number of timesteps', default=-1,\n type=int)\n parser.add_argument('--log-interval', help='Override log interval (default: -1, no change)', default=-1,\n type=int)\n parser.add_argument('-f', '--log-folder', help='Log folder', type=str, default='logs')\n parser.add_argument('--seed', help='Random generator seed', type=int, default=0)\n parser.add_argument('--n-trials', help='Number of trials for optimizing hyperparameters', type=int, default=10)\n parser.add_argument('-optimize', '--optimize-hyperparameters', action='store_true', default=False,\n help='Run hyperparameters search')\n parser.add_argument('--n-jobs', help='Number of parallel jobs when optimizing hyperparameters', type=int, default=1)\n parser.add_argument('--sampler', help='Sampler to use when optimizing hyperparameters', type=str,\n default='tpe', choices=['random', 'tpe', 'skopt'])\n parser.add_argument('--pruner', help='Pruner to use when optimizing hyperparameters', type=str,\n default='median', choices=['halving', 'median', 'none'])\n parser.add_argument('--verbose', help='Verbose mode (0: no output, 1: INFO)', default=1,\n type=int)\n parser.add_argument('--gym-packages', type=str, nargs='+', default=[], help='Additional external Gym environemnt package modules to import (e.g. gym_minigrid)')\n args = parser.parse_args()\n\n # Going through custom gym packages to let them register in the global registory\n for env_module in args.gym_packages:\n importlib.import_module(env_module)\n\n env_ids = args.env\n registered_envs = set(gym.envs.registry.env_specs.keys())\n\n for env_id in env_ids:\n # If the environment is not found, suggest the closest match\n if env_id not in registered_envs:\n try:\n closest_match = difflib.get_close_matches(env_id, registered_envs, n=1)[0]\n except IndexError:\n closest_match = \"'no close match found...'\"\n raise ValueError('{} not found in gym registry, you maybe meant {}?'.format(env_id, closest_match))\n\n set_global_seeds(args.seed)\n\n if args.trained_agent != \"\":\n valid_extension = args.trained_agent.endswith('.pkl') or args.trained_agent.endswith('.zip')\n assert valid_extension and os.path.isfile(args.trained_agent), \\\n \"The trained_agent must be a valid path to a .zip/.pkl file\"\n\n # if we run in multi process, the code runs for each worker and the worker can get its rank as the below\n rank = 0\n if mpi4py is not None and MPI.COMM_WORLD.Get_size() > 1:\n print(\"Using MPI for multiprocessing with {} workers\".format(MPI.COMM_WORLD.Get_size()))\n rank = MPI.COMM_WORLD.Get_rank()\n print(\"Worker rank: {}\".format(rank))\n # make sure that each worker has its own seed\n args.seed += rank\n # we allow only one worker to \"speak\"\n if rank != 0:\n args.verbose = 0\n args.tensorboard_log = ''\n\n # now we start train a model for each of the environment in the list\n for env_id in env_ids:\n tensorboard_log = None if args.tensorboard_log == '' else os.path.join(args.tensorboard_log, env_id)\n\n is_atari = False\n if 'NoFrameskip' in env_id:\n is_atari = True\n\n print(\"=\" * 10, env_id, \"=\" * 10)\n\n # Load hyperparameters from yaml file\n with open('hyperparams/{}.yml'.format(args.algo), 'r') as f:\n hyperparams_dict = yaml.load(f)\n if env_id in list(hyperparams_dict.keys()):\n hyperparams = hyperparams_dict[env_id]\n elif is_atari:\n hyperparams = hyperparams_dict['atari']\n else:\n raise ValueError(\"Hyperparameters not found for {}-{}\".format(args.algo, env_id))\n\n # Sort hyperparams that will be saved\n saved_hyperparams = OrderedDict([(key, hyperparams[key]) for key in sorted(hyperparams.keys())])\n algo_ = args.algo\n # HER is only a wrapper around an algo\n if args.algo == 'her':\n algo_ = saved_hyperparams['model_class']\n assert algo_ in {'sac', 'ddpg', 'dqn', 'td3'}, \"{} is not compatible with HER\".format(algo_)\n # Retrieve the model class\n hyperparams['model_class'] = ALGOS[saved_hyperparams['model_class']]\n if hyperparams['model_class'] is None:\n raise ValueError('{} requires MPI to be installed'.format(algo_))\n\n if args.verbose > 0:\n pprint(saved_hyperparams)\n\n ############################\n # Build the Env\n n_envs = hyperparams.get('n_envs', 1)\n if args.verbose > 0:\n print(\"Using {} environments\".format(n_envs))\n\n # Delete keys so the dict can be pass to the model constructor\n if 'n_envs' in hyperparams.keys():\n del hyperparams['n_envs']\n\n normalize = False\n normalize_kwargs = {}\n if 'normalize' in hyperparams.keys():\n normalize = hyperparams['normalize']\n if isinstance(normalize, str):\n normalize_kwargs = eval(normalize)\n normalize = True\n del hyperparams['normalize']\n\n # obtain a class object from a wrapper name string in hyperparams\n # and delete the entry\n env_wrapper = get_wrapper_class(hyperparams)\n if 'env_wrapper' in hyperparams.keys():\n del hyperparams['env_wrapper']\n\n def create_env(n_envs):\n \"\"\"\n Create the environment and wrap it if necessary\n :param n_envs: (int)\n :return: (gym.Env)\n \"\"\"\n global hyperparams\n\n if is_atari:\n if args.verbose > 0:\n print(\"Using Atari wrapper\")\n env = make_atari_env(env_id, num_env=n_envs, seed=args.seed)\n # Frame-stacking with 4 frames\n env = VecFrameStack(env, n_stack=4)\n elif algo_ in ['dqn', 'ddpg']:\n if hyperparams.get('normalize', False):\n print(\"WARNING: normalization not supported yet for DDPG/DQN\")\n env = gym.make(env_id)\n env.seed(args.seed)\n if env_wrapper is not None:\n env = env_wrapper(env)\n else:\n if n_envs == 1:\n env = DummyVecEnv([make_env(env_id, 0, args.seed, wrapper_class=env_wrapper)])\n else:\n # env = SubprocVecEnv([make_env(env_id, i, args.seed) for i in range(n_envs)])\n # On most env, SubprocVecEnv does not help and is quite memory hungry\n env = DummyVecEnv([make_env(env_id, i, args.seed, wrapper_class=env_wrapper) for i in range(n_envs)])\n if normalize:\n if args.verbose > 0:\n if len(normalize_kwargs) > 0:\n print(\"Normalization activated: {}\".format(normalize_kwargs))\n else:\n print(\"Normalizing input and reward\")\n env = VecNormalize(env, **normalize_kwargs)\n # Optional Frame-stacking\n if hyperparams.get('frame_stack', False):\n n_stack = hyperparams['frame_stack']\n env = VecFrameStack(env, n_stack)\n print(\"Stacking {} frames\".format(n_stack))\n del hyperparams['frame_stack']\n return env\n\n env = create_env(n_envs)\n # Stop env processes to free memory\n if args.optimize_hyperparameters and n_envs > 1:\n env.close()\n\n\n ############################\n # Build the Agent\n # Create learning rate schedules for ppo2 and sac\n if algo_ in [\"ppo2\", \"sac\", \"td3\"]:\n for key in ['learning_rate', 'cliprange', 'cliprange_vf']:\n if key not in hyperparams:\n continue\n if isinstance(hyperparams[key], str):\n schedule, initial_value = hyperparams[key].split('_')\n initial_value = float(initial_value)\n hyperparams[key] = linear_schedule(initial_value)\n elif isinstance(hyperparams[key], (float, int)):\n # Negative value: ignore (ex: for clipping)\n if hyperparams[key] < 0:\n continue\n hyperparams[key] = constfn(float(hyperparams[key]))\n else:\n raise ValueError('Invalid value for {}: {}'.format(key, hyperparams[key]))\n\n # Should we overwrite the number of timesteps?\n if args.n_timesteps > 0:\n if args.verbose:\n print(\"Overwriting n_timesteps with n={}\".format(args.n_timesteps))\n n_timesteps = args.n_timesteps\n else:\n n_timesteps = int(hyperparams['n_timesteps'])\n del hyperparams['n_timesteps']\n\n if 'policy_kwargs' in hyperparams.keys():\n hyperparams['policy_kwargs'] = eval(hyperparams['policy_kwargs'])\n\n # Parse noise string for DDPG and SAC\n if algo_ in ['ddpg', 'sac', 'td3'] and hyperparams.get('noise_type') is not None:\n noise_type = hyperparams['noise_type'].strip()\n noise_std = hyperparams['noise_std']\n n_actions = env.action_space.shape[0]\n if 'adaptive-param' in noise_type:\n assert algo_ == 'ddpg', 'Parameter is not supported by SAC'\n hyperparams['param_noise'] = AdaptiveParamNoiseSpec(initial_stddev=noise_std,\n desired_action_stddev=noise_std)\n elif 'normal' in noise_type:\n if 'lin' in noise_type:\n hyperparams['action_noise'] = LinearNormalActionNoise(mean=np.zeros(n_actions),\n sigma=noise_std * np.ones(n_actions),\n final_sigma=hyperparams.get('noise_std_final', 0.0) * np.ones(n_actions),\n max_steps=n_timesteps)\n else:\n hyperparams['action_noise'] = NormalActionNoise(mean=np.zeros(n_actions),\n sigma=noise_std * np.ones(n_actions))\n elif 'ornstein-uhlenbeck' in noise_type:\n hyperparams['action_noise'] = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions),\n sigma=noise_std * np.ones(n_actions))\n else:\n raise RuntimeError('Unknown noise type \"{}\"'.format(noise_type))\n print(\"Applying {} noise with std {}\".format(noise_type, noise_std))\n del hyperparams['noise_type']\n del hyperparams['noise_std']\n if 'noise_std_final' in hyperparams:\n del hyperparams['noise_std_final']\n\n if ALGOS[args.algo] is None:\n raise ValueError('{} requires MPI to be installed'.format(args.algo))\n\n if os.path.isfile(args.trained_agent):\n # Continue training\n print(\"Loading pretrained agent\")\n # Policy should not be changed\n del hyperparams['policy']\n model = ALGOS[args.algo].load(args.trained_agent, env=env,\n tensorboard_log=tensorboard_log, verbose=args.verbose, **hyperparams)\n\n exp_folder = args.trained_agent[:-4]\n if normalize:\n print(\"Loading saved running average\")\n env.load_running_average(exp_folder)\n\n elif args.optimize_hyperparameters:\n\n if args.verbose > 0:\n print(\"Optimizing hyperparameters\")\n\n\n def create_model(*_args, **kwargs):\n \"\"\"\n Helper to create a model with different hyperparameters\n \"\"\"\n return ALGOS[args.algo](env=create_env(n_envs), tensorboard_log=tensorboard_log,\n verbose=0, **kwargs)\n\n\n data_frame = hyperparam_optimization(args.algo, create_model, create_env, n_trials=args.n_trials,\n n_timesteps=n_timesteps, hyperparams=hyperparams,\n n_jobs=args.n_jobs, seed=args.seed,\n sampler_method=args.sampler, pruner_method=args.pruner,\n verbose=args.verbose)\n\n report_name = \"report_{}_{}-trials-{}-{}-{}_{}.csv\".format(env_id, args.n_trials, n_timesteps,\n args.sampler, args.pruner, int(time.time()))\n\n log_path = os.path.join(args.log_folder, args.algo, report_name)\n\n if args.verbose:\n print(\"Writing report to {}\".format(log_path))\n\n os.makedirs(os.path.dirname(log_path), exist_ok=True)\n data_frame.to_csv(log_path)\n exit()\n else:\n # Train an agent from scratch\n model = ALGOS[args.algo](env=env, tensorboard_log=tensorboard_log, verbose=args.verbose, **hyperparams)\n\n kwargs = {}\n if args.log_interval > -1:\n kwargs = {'log_interval': args.log_interval}\n\n model.learn(n_timesteps, **kwargs)\n\n\n\n # Save trained model\n log_path = \"{}/{}/\".format(args.log_folder, args.algo)\n save_path = os.path.join(log_path, \"{}_{}\".format(env_id, get_latest_run_id(log_path, env_id) + 1))\n params_path = \"{}/{}\".format(save_path, env_id)\n os.makedirs(params_path, exist_ok=True)\n\n # Only save worker of rank 0 when using mpi\n if rank == 0:\n print(\"Saving to {}\".format(save_path))\n model.save(\"{}/{}\".format(save_path, env_id))\n # Save hyperparams\n with open(os.path.join(params_path, 'config.yml'), 'w') as f:\n yaml.dump(saved_hyperparams, f)\n\n if normalize:\n # Unwrap\n if isinstance(env, VecFrameStack):\n env = env.venv\n # Important: save the running average, for testing the agent we need that normalization\n env.save_running_average(params_path)\n" ]
[ [ "tensorflow.autograph.set_verbosity", "numpy.ones", "numpy.zeros", "tensorflow.get_logger" ] ]
andreaeverett/birth-weights
[ "84ba6f1e5c6481e5a41d10f556ef10f45fc3d22a" ]
[ "1_Import_Data.py" ]
[ "#This file imports National Vital Statistics births data for 2014 and 2015 and\n# transforms from fixed-width into CSV format\n\nimport pandas as pd\n\n#Positions corresponding to variables in the fixed-width text files\n\npositions = [(8,12), (13,14), (18,22), (22,23), (31,32), (32,33), (49,50),\n (72, 73), (73,74), (74,76), (76,78), (78,79), (83,84), (103,104),\n (104,106), (106,107), (107,109), (109,110), (110,111), (114,115),\n (115,116), (116,117), (118,119), (119,120), (120,121), (122, 123),\n (123, 124), (125,126), (141,142), (146,148), (148,150), (150, 152),\n (152,153), (153,155), (155,156), (159,160), (160,161), (161,162), (162,163),\n (164,165), (170,172), (172,174), (174,176), (178,179), (181,182), (197,200),\n (200,202), (205,208), (208,210), (213,216), (216,218), (223,225), (225,226),\n (226,227), (237,239), (241,243), (243,244), (250,251), (251,252), (252,254),\n (254,256), (256,258), (258,260), (260,261), (261,262), (262,263), (263,264),\n (264,265), (265,266), (266,267), (267,268), (268,269), (269,270), (279,281),\n (281, 282), (282, 286), (286,287), (291,294), (294,295), (298,301), (302,303),\n (303,305), (305,306), (306,307), (312,313), (313,314), (314,315), (315,316),\n (316,317), (317,318), (318,319), (319, 320), (320,321), (321,322), (322,323),\n (323,324), (324,325), (325,326), (326,327), (328, 329), (329,330), (330,331),\n (331,333), (334,335), (335,336), (336,337), (342,343), (343,344), (344,345),\n (345, 346), (346,347), (347,348), (348,349), (349,350), (350,351), (351,352),\n (352,353), (359,360), (360,361), (362,363), (363,364), (382,383), (383,384), (384,385),\n (385,386), (386,387), (387,388), (388,389), (389,390), (390,391), (391,392),\n (392,393), (393,394), (394,395), (400,401), (401,402), (402,403), (403,404),\n (404,405), (405,406), (406,407), (407,408), (408,409), (414,415), (415,416), (416,417),\n (417,418), (418,419), (420, 421), (421,422), (422,423), (423,424), (424,425),\n (426,427), (432,433), (433,434), (434,435), (435,436), (436,437), (437,438),\n (443,445), (445,446), (446,447), (447,449), (449,450), (453,454), (455,456),\n (458,459), (474,475), (475,476), (476,478), (480,484), (487,488), (488,489),\n (489,491), (491,493), (493,494), (497,498), (498,500), (500,502), (502,503),\n (503,507), (508,510), (510,511), (516,517), (517,518), (518,519), (519,520),\n (520,521), (521,522), (523,524), (524,525), (525,526), (526,527), (527,528),\n (528,529), (530,531), (536,537), (537,538), (538,539), (539,540), (540,541),\n (541,542), (542,543), (543,544), (544,545), (545,546), (546,547), (547,548),\n (548,549), (549,550), (550,551), (551,552), (552,553), (553,554), (554,555),\n (555,556), (556,557), (557,558), (558,559), (559,560), (560,561), (566,567),\n (567,568), (568,569), (569,570), (1329,1330), (1330,1331), (1331,1332), (1332,1333),\n (1333,1334), (1334,1335), (1335,1336), (1336,1337), (1337,1338), (1339,1340),\n (1340,1341), (1341,1342), (1342,1343), (1343,1344), (1344,1345)]\n\n#Variable names corresponding to the fixed-width positions\n\nheaders = [\"DOB_YY\", \"DOB_MM\", \"DOB_TT\", \"DOB_WK\", \"BFACIL\", \"F_FACILITY\", \"BFACIL3\", \"MAGE_IMPFLG\", \"MAGE_REPFLG\",\n \"MAGER\", \"MAGER14\", \"MAGER9\", \"MBSTATE_REC\", \"RESTATUS\", \"MRACE31\", \"MRACE6\", \"MRACE15\", \"MBRACE\", \"MRACEIMP\",\n \"MHISP_R\", \"F_MHISP\", \"MRACEHISP\", \"MAR_P\", \"DMAR\", \"MAR_IMP\", \"F_MAR_P\", \"MEDUC\", \"F_MEDUC\", \"FAGERPT_FLG\",\n \"FAGECOMB\", \"FAGEREC11\", \"FRACE31\", \"FRACE6\", \"FRACE15\", \"FBRACE\", \"FHISP_R\", \"F_FHISP\", \"FRACEHISP\", \"FEDUC\",\n \"FILLER_F\", \"PRIORLIVE\", \"PRIORDEAD\", \"PRIORTERM\", \"LBO_REC\", \"TBO_REC\", \"ILLB_R\", \"ILLB_R11\", \"ILOP_R\", \"ILOP_R11\",\n \"ILP_R\", \"ILP_R11\", \"PRECARE\", \"F_MPCB\", \"PRECARE5\", \"PREVIS\", \"PREVIS_REC\", \"F_TPCV\", \"WIC\", \"F_WIC\", \"CIG_0\",\n \"CIG_1\", \"CIG_2\", \"CIG_3\", \"CIG0_R\", \"CIG1_R\", \"CIG2_R\", \"CIG3_R\", \"F_CIGS_0\", \"F_CIGS_1\", \"F_CIGS_2\", \"F_CIGS_3\",\n \"CIG_REC\", \"F_TOBACO\", \"M_Ht_In\", \"F_M_HT\", \"BMI\", \"BMI_R\", \"PWgt_R\", \"F_PWGT\", \"DWgt_R\", \"F_DWGT\", \"WTGAIN\",\n \"WTGAIN_REC\", \"F_WTGAIN\", \"RF_PDIAB\", \"RF_GDIAB\", \"RF_PHYPE\", \"RF_GHYPE\", \"RF_EHYPE\", \"RF_PPTERM\", \"F_RF_PDIAB\",\n \"F_RF_GDIAB\", \"F_RF_PHYPER\", \"F_RF_GHYPER\", \"F_RF_ECLAMP\", \"F_RF_PPB\", \"RF_INFTR\", \"RF_FEDRG\", \"RF_ARTEC\",\n \"F_RF_INF_DRG\", \"F_RF_INF_ART\", \"RF_CESAR\", \"RF_CESARN\", \"F_RF_CESAR\", \"F_RF_NCESAR\", \"NO_RISKS\", \"IP_GON\",\n \"IP_SYPH\", \"IP_CHLAM\", \"IP_HEPB\", \"IP_HEPC\", \"F_IP_GONOR\", \"F_IP_SYPH\", \"F_IP_CHLAM\", \"F_IP_HEPATB\", \"F_IP_HEPATC\",\n \"NO_INFEC\", \"OB_ECVS\", \"OB_ECVF\", \"F_OB_SUCC\", \"F_OB_FAIL\", \"LD_INDL\", \"LD_AUGM\", \"LD_STER\", \"LD_ANTB\", \"LD_CHOR\",\n \"LD_ANES\", \"F_LD_INDL\", \"F_LD_AUGM\", \"F_LD_STER\", \"F_LD_ANTB\", \"F_LD_CHOR\", \"F_LD_ANES\", \"NO_LBRDLV\", \"ME_PRES\",\n \"ME_ROUT\", \"ME_TRIAL\", \"F_ME_PRES\", \"F_ME_ROUT\", \"F_ME_TRIAL\", \"RDMETH_REC\", \"DMETH_REC\", \"F_DMETH_REC\", \"MM_MTR\",\n \"MM_PLAC\", \"MM_RUPT\", \"MM_UHYST\", \"MM_AICU\", \"F_MM_MTR\", \"F_MM_ PLAC\", \"F_MM_RUPT\", \"F_MM_UHYST\", \"F_MM_AICU\",\n \"NO_MMORB\", \"ATTEND\", \"MTRAN\", \"PAY\", \"PAY_REC\", \"F_PAY\", \"F_PAY_REC\", \"APGAR5\", \"APGAR5R\", \"F_APGAR5\", \"APGAR10\",\n \"APGAR10R\", \"DPLURAL\", \"IMP_PLUR\", \"SETORDER_R\", \"SEX\", \"IMP_SEX\", \"DLMP_MM\", \"DLMP_YY\", \"COMPGST_IMP\", \"OBGEST_FLG\",\n \"COMBGEST\", \"GESTREC10\", \"GESTREC3\", \"LMPUSED\", \"OEGest_Comb\", \"OEGest_R10\", \"OEGest_R3\", \"DBWT\", \"BWTR12\", \"BWTR4\",\n \"AB_AVEN1\", \"AB_AVEN6\", \"AB_NICU\", \"AB_SURF\", \"AB_ANTI\", \"AB_SEIZ\", \"F_AB_VENT\", \"F_AB_VENT6\", \"F_AB_NIUC\",\n \"F_AB_SURFAC\", \"F_AB_ANTIBIO\", \"F_AB_SEIZ\", \"NO_ABNORM\", \"CA_ANEN\", \"CA_MNSB\", \"CA_CCHD\", \"CA_CDH\", \"CA_OMPH\",\n \"CA_GAST\", \"F_CA_ANEN\", \"F_CA_MENIN\", \"F_CA_HEART\", \"F_CA_HERNIA\", \"F_CA_OMPHA\", \"F_CA_GASTRO\", \"CA_LIMB\", \"CA_CLEFT\",\n \"CA_CLPAL\", \"CA_DOWN\", \"CA_DISOR\", \"CA_HYPO\", \"F_CA_LIMB\", \"F_CA_CLEFTLP\", \"F_CA_CLEFT\", \"F_CA_DOWNS\", \"F_CA_CHROM\",\n \"F_CA_HYPOS\", \"NO_CONGEN\", \"ITRAN\", \"ILIVE\", \"BFED\", \"F_BFED\", \"UBFACIL\", \"URF_DIAB\", \"URF_CHYPER\", \"URF_PHYPER\", \"URF_ECLAM\",\n \"UME_FORCP\", \"UME_VAC\", \"UOP_INDUC\", \"ULD_BREECH\", \"UCA_ANEN\", \"UCA_SPINA\", \"UCA_OMPHA\", \"UCA_CELFTLP\", \"UCA_HERNIA\",\n \"UCA_DOWNS\"]\n\n#The original files were large and needed to be chunked into 8 text files each\n\nfiles2014 = [\"births2014.txtaa\", \"births2014.txtac\", \"births2014.txtae\", \"births2014.txtag\", \"births2014.txtab\",\n \"births2014.txtad\", \"births2014.txtaf\", \"births2014.txtah\"]\n\nfiles2015 = [\"births2015.txtaa\", \"births2015.txtac\", \"births2015.txtae\", \"births2015.txtag\", \"births2015.txtab\",\n \"births2015.txtad\", \"births2015.txtaf\", \"births2015.txtah\"]\n\n#Read in the chunked fixed-width files & convert to CSVs\n\nfor file in files2014:\n pd.read_fwf(str(\"data_files/chunks/\" + file), header=None, names=headers, colspecs=positions).to_csv(str(\"data_files/chunks_csv2014/\" + file))\n\nfor file in files2015:\n pd.read_fwf(str(\"data_files/chunks2015/\" + file), header=None, names=headers, colspecs=positions).to_csv(str(\"data_files/chunks_csv2015/\" + file))\n\n\n#Next create one CSV per year\nimport os\npath = \"./data_files/chunks_csv2014/\"\nfull_fnames = [os.path.join(path, fn) for fn in files2014]\n\ndf_from_chunk = (pd.read_csv(f) for f in full_fnames)\nbirths2014 = pd.concat(df_from_chunk, ignore_index=True)\n\nbirths2014.to_csv(\"data_files/allbirths2014.csv\")\n\npath2 = \"./data_files/chunks_csv2015/\"\nfull_fnames2015 = [os.path.join(path2, fn) for fn in files2015]\n\ndf_from_chunk = (pd.read_csv(f) for f in full_fnames2015)\nbirths2015 = pd.concat(df_from_chunk, ignore_index=True)\n\nbirths2015.to_csv(\"data_files/allbirths2015.csv\")\n" ]
[ [ "pandas.read_csv", "pandas.concat" ] ]
williamlidberg/Remnants-of-charcoal-kilns
[ "690b2aa0ef31904d34613db6e5824f5e4c78b3cb" ]
[ "train.py" ]
[ "import utils.generator\nimport utils.unet\nimport utils.loss\nimport utils.metric\nimport tensorflow as tf\nimport numpy as np\nimport random\n\nimport os\n\n\ndef write_dataset(selected, log_path, name):\n with open(os.path.join(log_path, name), 'w') as f:\n for path in selected:\n f.write('{}\\n'.format(path))\n\n\ndef parse_weighting_mode(weighting):\n '''Convert given string into weighting mode representation\n Supported modes are: \"auto\", \"none\" and comma separated weights\n Parameters\n ----------\n weighting : String indicating weighting mode\n Returns\n -------\n Weighting mode representation (\"auto\"|None|list)\n '''\n if weighting.lower() == 'auto':\n return 'auto'\n elif weighting.lower() == 'none':\n return None\n else:\n return [float(f) for f in weighting.split(',')]\n\n\ndef main(img_path, gt_path, log_path, seed, epochs, depth, batch_size,\n steps_per_epoch, unet_mode, classes, weighting):\n\n # convert string representations of class labels and weights into lists\n classes = [int(f) for f in classes.split(',')]\n weighting = parse_weighting_mode(weighting)\n\n # set seed for tensorflow (and everything else except data generators,\n # which use their own random number gnerators, which are seeded separately)\n # does not work for some cuDNN operations - so possibly not totally\n # deterministic\n if seed is not None:\n random.seed(seed)\n np.random.seed(seed)\n tf.random.set_seed(seed)\n train_gen = utils.generator.DataGenerator(img_path, gt_path, classes,\n seed=seed, size=0.8,\n steps_per_epoch=steps_per_epoch,\n augment=True,\n class_weights=weighting,\n batch_size=batch_size)\n valid_gen = utils.generator.DataGenerator(img_path, gt_path, classes,\n seed=seed,\n exclude=train_gen.selected,\n steps_per_epoch=steps_per_epoch,\n augment=False)\n\n write_dataset(train_gen.selected, log_path, 'train_imgs.txt')\n write_dataset(valid_gen.selected, log_path, 'valid_imgs.txt')\n\n unet = utils.unet.XceptionUNet(train_gen.input_shape, depth=depth,\n classes=train_gen.class_num,\n mode=unet_mode)\n metrics = ['accuracy', tf.keras.metrics.Recall()]\n # record IoU for each class separately\n# for i in range(train_gen.class_num):\n# metrics.append(tf.keras.metrics.OneHotIoU(num_classes=train_gen.class_num,\n# target_class_ids=[i,],\n# name='{}_iou'.format(i)))\n unet.model.compile(\n # optimizer=\"rmsprop\",\n optimizer=\"adam\",\n # optimizer=tf.keras.optimizers.SGD(momentum=0.9),\n # loss=jaccard_distance_loss,\n # loss='binary_crossentropy',\n loss='categorical_crossentropy',\n sample_weight_mode=\"temporal\",\n # loss=utils.loss.cross_entropy,\n metrics=metrics)\n # utils.metric.f1_m, utils.metric.recall_m])\n # \"categorical_crossentropy\")\n\n callbacks = [\n # tf.keras.callbacks.EarlyStopping(monitor='loss', patience=10,\n # mode='min'),\n tf.keras.callbacks.ReduceLROnPlateau(monitor='loss', patience=10,\n min_lr=0.00001, mode='min'),\n tf.keras.callbacks.ModelCheckpoint(\n os.path.join(log_path, 'trained.h5'),\n monitor='val_loss',\n save_weights_only=True,\n verbose=0, save_best_only=True),\n tf.keras.callbacks.TensorBoard(log_dir=log_path, histogram_freq=5,\n write_grads=True, batch_size=2,\n write_images=True),\n tf.keras.callbacks.CSVLogger(os.path.join(log_path, 'log.csv'),\n append=True, separator=';')\n ]\n unet.model.fit_generator(train_gen, epochs=epochs, verbose=0,\n callbacks=callbacks,\n validation_data=valid_gen)\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(\n description='Train Model',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-I', '--img_path', action='append', help='Add path '\n 'to input images')\n parser.add_argument('gt_path', help='Path to groundtruth image folder')\n parser.add_argument('log_path', help='Path to folder where logging data '\n 'will be stored')\n parser.add_argument('--seed', help='Random seed', default=None, type=int)\n parser.add_argument('--depth', help='Depth of the used network',\n default=None, type=int)\n parser.add_argument('--epochs', default=100, type=int)\n parser.add_argument('--steps_per_epoch', default=None, type=int)\n parser.add_argument('--unet_mode', choices=utils.unet.XceptionUNet.UNET_MODES,\n default=utils.unet.XceptionUNet.UNET_MODES[0], \n help='Choose UNet architecture configuration')\n parser.add_argument('--batch_size', help='Number of patches per batch',\n type=int, default=4)\n parser.add_argument('--classes', help='List of class labels in ground '\n 'truth - order needs to correspond to weighting order',\n default='0,1')\n parser.add_argument('--weighting', help='Configure class weights - can be '\n '\"auto\", \"none\" or defined weight string, e.g., '\n '\"0.1,1\"',\n default='0.1,1')\n\n args = vars(parser.parse_args())\n main(**args)" ]
[ [ "tensorflow.keras.callbacks.TensorBoard", "numpy.random.seed", "tensorflow.random.set_seed", "tensorflow.keras.metrics.Recall", "tensorflow.keras.callbacks.ReduceLROnPlateau" ] ]
AlchemicalChest/gaussian-process-svi
[ "56111e9a125cdd322473685b1b7ef875d584653e" ]
[ "GPSVI/test/testquad.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 21 11:09:51 2015\n\n@author: Ziang\n\"\"\"\n#from numpy.polynomial.hermite import hermval\n#from numpy.polynomial.hermite import hermgauss\n\nfrom numpy import inf\n#from math import factorial\nfrom math import sqrt\nfrom math import pi\nfrom sympy import mpmath\nfrom scipy.integrate import quad\nfrom GaussHermiteQuadrature import GaussHermiteQuadrature as gaussquad\n#def H(n, x):\n# return hermval(x, [0]*n+[1])\n#def hermite_weight(n, x):\n# return pow(2,n-1)*factorial(n)*sqrt(pi)/(pow(n,2)*pow(H(n-1, x), 2))\nmu = 1\nsigma = 2\n\nsigmoid = lambda x : 1/(1+mpmath.exp(-x))\npolynomial = lambda x : 2*x**6+4*x**9+x+2\nexponential = lambda x : mpmath.exp(x)\nnormalpdf = lambda x : 1 / (sqrt(2*pi)*sigma) * mpmath.exp(-(x-mu)**2/(2*sigma**2))\n\n#N = 10\n#x,u = hermgauss(N)\n#z = x\n#\n#\n#hquad = 0\n#for i in range(N):\n# w = hermite_weight(N, x[i])\n# hquad += w * polynomial(sqrt(2)*sigma*x[i]+mu)\n#hquad /= sqrt(pi)\n#\n#print('hquad=', hquad)\n\nhquad = gaussquad.hermite_quad(mu, sigma, func=exponential, N=20)\nprint('hquad=', hquad)\n\n#test = gaussquad.hermite_quad_test(mu, sigma, func=polynomial, N=10)\n#print('test=', test)\n\nfunc = lambda x: exponential(x) * normalpdf(x)\nprint('quad =', quad(func, -inf, inf))\n" ]
[ [ "scipy.integrate.quad" ] ]
JimCurryWang/Deep-Learning-Jot
[ "b72e36b54089f7a8b92409b69b7187e84103f76e" ]
[ "RNN/Embedding_RNN.py" ]
[ "import torch\nimport torchvision\nimport torch.nn.functional as F\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms \nfrom torch import optim \nfrom torch import nn \nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport numpy as np\n\n# Set device\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# Hyperparameters\n# ---- embedding part ---\nvocab_size = 100\nembedding_dim = 50 \n\n# --- LSTM part ----\nsequence_length = 28\n# input_size = 28\nhidden_size = 256\nnum_layers = 2\nnum_classes = 10\n\nlearning_rate = 0.005\nbatch_size = 64\nnum_epochs = 3\n\n\nclass EmbeddingLSTM(nn.Module):\n '''Embedding vector with LSTM (many-to-one)\n '''\n def __init__(self, vocab_size, embedding_dim, hidden_size, num_layers, num_classes, pretrained_weights=None):\n super(EmbeddingLSTM, self).__init__()\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.pretrained_weights = pretrained_weights\n\n if self.pretrained_weights is not None:\n self.embedding = nn.Embedding.from_pretrained(self.pretrained_weights) \n self.embedding.weight.requires_grad = False\n else:\n # Embedding(vocab_size=100, embedding_dim=50)\n self.embedding = nn.Embedding(\n num_embeddings=vocab_size, embedding_dim=embedding_dim\n ) \n self.embedding.weight.requires_grad = True\n\n \n self.lstm = nn.LSTM(\n input_size=embedding_dim, hidden_size=hidden_size,\n num_layers=num_layers, batch_first=True\n )\n\n # Using the last rnn output with fc to obtain the final classificaiton result\n self.fc = nn.Linear(\n in_features=hidden_size, out_features=num_classes\n )\n\n def forward(self, x):\n '''\n '''\n embed = self.embedding(x) # in=[64, 28], out=[64, 28, 50]\n\n out, _ = self.lstm(embed) # x=[64, 28, 50], out=[64, 28, 256]=(batch, seq_len, 1 * hidden_size)\n\n # Decode the hidden state of the last time step\n # only take the last hidden state and send it into fc\n out = out[:, -1, :] # out=[64, 256]\n out = self.fc(out) # out=[64, 10]\n\n return out\n\nif __name__ == '__main__':\n\n # Initialize network\n \n pre = False\n if pre:\n # torch.FloatTensor()\n # \"FloatTensor\" containing pretrained weights\n pretrained_weights = torch.rand(size=(100,50)) \n model = EmbeddingLSTM(\n vocab_size, embedding_dim, hidden_size, num_layers, num_classes, \n pretrained_weights=pretrained_weights\n ).to(device)\n else:\n model = EmbeddingLSTM(\n vocab_size, embedding_dim, hidden_size, num_layers, num_classes, \n pretrained_weights=None\n ).to(device)\n\n # Loss and optimizer\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n\n # Generate test data \n FakeData = torch.randint(low=0, high=10, size=[batch_size, sequence_length+1]) # [64, 29]\n data = FakeData[:,1:] # [64, 28]\n targets = FakeData[:,0] # [64], torch.LongTensor([0,1,2,4...7,8,9...]])\n\n # Send data to device\n data = data.to(device=device)\n targets = targets.to(device=device)\n\n # Forward\n scores = model(data) # [64, 10]\n loss = criterion(scores, targets)\n\n # Backward\n optimizer.zero_grad()\n loss.backward()\n\n # Gradient descent update step/adam step\n optimizer.step()\n\n" ]
[ [ "torch.nn.Linear", "torch.rand", "torch.nn.Embedding.from_pretrained", "torch.nn.Embedding", "torch.nn.LSTM", "torch.randint", "torch.cuda.is_available", "torch.nn.CrossEntropyLoss" ] ]