repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
christophe-rannou/imbalanced-learn
|
[
"c3f3b0fd9815e206ea63f3f11728f097608bf580"
] |
[
"examples/ensemble/plot_balance_cascade.py"
] |
[
"\"\"\"\n===============\nBalance cascade\n===============\n\nAn illustration of the balance cascade ensemble method.\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom sklearn.datasets import make_classification\nfrom sklearn.decomposition import PCA\n\nfrom imblearn.ensemble import BalanceCascade\n\nprint(__doc__)\n\nsns.set()\n\n# Define some color for the plotting\nalmost_black = '#262626'\npalette = sns.color_palette()\n\n\n# Generate the dataset\nX, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],\n n_informative=3, n_redundant=1, flip_y=0,\n n_features=20, n_clusters_per_class=1,\n n_samples=5000, random_state=10)\n\n# Instanciate a PCA object for the sake of easy visualisation\npca = PCA(n_components=2)\n# Fit and transform x to visualise inside a 2D feature space\nX_vis = pca.fit_transform(X)\n\n# Apply Balance Cascade method\nbc = BalanceCascade()\nX_resampled, y_resampled = bc.fit_sample(X, y)\nX_res_vis = []\nfor X_res in X_resampled:\n X_res_vis.append(pca.transform(X_res))\n\n# Two subplots, unpack the axes array immediately\nf, (ax1, ax2) = plt.subplots(1, 2)\n\nax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label=\"Class #0\", alpha=0.5,\n edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)\nax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label=\"Class #1\", alpha=0.5,\n edgecolor=almost_black, facecolor=palette[2], linewidth=0.15)\nax1.set_title('Original set')\n\nax2.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label=\"Class #0\", alpha=0.5,\n edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)\nfor iy, e in enumerate(X_res_vis):\n ax2.scatter(e[y_resampled[iy] == 1, 0], e[y_resampled[iy] == 1, 1],\n label=\"Class #1\", alpha=0.5, edgecolor=almost_black,\n facecolor=np.random.rand(3,), linewidth=0.15)\nax2.set_title('Balance cascade')\n\nplt.show()\n"
] |
[
[
"numpy.random.rand",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"sklearn.decomposition.PCA",
"sklearn.datasets.make_classification"
]
] |
ann0218/codehome
|
[
"a5eb10c849743f42f4bcf440b6a62a351b38e8a9"
] |
[
"mean shift.py"
] |
[
"import matplotlib.pyplot as plt\nimport numpy as np\n\nnp.set_printoptions(threshold=np.inf)\nfrom sklearn import datasets\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.spatial import distance\n\n\ndef neigbourhood_points(X, x_centroid, dist=3):\n eligible_X = []\n for x in X:\n distance_between = distance.euclidean(x, x_centroid)\n if distance_between <= dist:\n eligible_X.append(x)\n\n eligible_X = np.array(eligible_X)\n mean = np.mean(eligible_X, axis=0)\n return eligible_X, mean\n\n\ndata_num = 100\ndata_dim = 2\ndata = 0 + 2 * np.random.randn(data_num, data_dim)\ntemp = 10 + 3 * np.random.randn(data_num, data_dim)\ndata = np.concatenate((data, temp), axis=0)\ntemp = 0 + 2 * np.random.randn(data_num, data_dim)\ntemp[:, 0] = temp[:, 0] + 20\ndata = np.concatenate((data, temp), axis=0)\ntemp = 0 + 1.5 * np.random.randn(data_num, data_dim)\ntemp[:, 0] = temp[:, 0] + 30\ntemp[:, 1] = temp[:, 1] + 20\ndata = np.concatenate((data, temp), axis=0)\ntemp = 0 + 1 * np.random.randn(data_num, data_dim)\ntemp[:, 1] = temp[:, 1] + 30\n\n\ndata_num = data_num * 4\nx = np.copy(data)\ncolor = ['red', 'blue', 'pink', 'gold', 'black']\n\n\n\nbindist = 4\nfor iteration in range(15):\n mean = np.zeros((data_num, data_dim))\n\n for i in range(data_num):\n eligible_X, mean[i] = neigbourhood_points(data, x[i], dist=4.31415926)\n plt.clf()\n plt.scatter(data[:, 0], data[:, 1], color='blue', s=50, alpha=0.3)\n\n plt.scatter(x[:, 0], x[:, 1], color='red', s=50, alpha=0.1)\n\n x = mean\n plt.title('Iteration' + str(iteration))\n plt.grid()\n plt.show()\n plt.pause(0.2)\n\nthreshold = 1.0\ncenter = x[0, :].reshape((1, 2))\nfor i in range(data_num):\n\n found = False\n for j in range(center.shape[0]):\n dst = distance.euclidean(x[i], center[j])\n\n if dst < threshold:\n found = True\n break\n if not found:\n center = np.concatenate((center, x[i].reshape((1, 2))), axis=0)\n\nprint(center)\n\nK = center.shape[0]\n\nfor i in range(data_num):\n dst_array = []\n for k in range(K):\n dst_1 = distance.euclidean(center[k, :], data[i, :])\n dst_array.append(dst_1)\n\n cluster = np.argmin(dst_array)\n plt.scatter(data[i, 0], data[i, 1], color=color[cluster], s=50, alpha=0.5, )\nfor k in range(K):\n plt.scatter(center[k, 0], center[k, 1], color=color[k], s=150, alpha=1, marker='*')\n\n# plt.xlim(-6,6)\n# plt.ylim(-6,6)\n\n\nplt.grid()\nplt.show()\n"
] |
[
[
"numpy.concatenate",
"numpy.array",
"scipy.spatial.distance.euclidean",
"numpy.zeros",
"numpy.argmin",
"matplotlib.pyplot.clf",
"numpy.set_printoptions",
"numpy.copy",
"matplotlib.pyplot.grid",
"numpy.random.randn",
"numpy.mean",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show"
]
] |
zenithfang/supervised_dispnet
|
[
"f81dfccfdc944e015d8fae17e24b3e664bec14d6"
] |
[
"models/Disp_vgg_feature.py"
] |
[
"import torch.cuda\nimport torch.nn as nn\nimport torch\nimport torch.utils.model_zoo as model_zoo\nimport torch.nn.functional as F\nimport torchvision.models as models\n#from .model_utils import * #use . represent relative address\n#from utils.util_functions import unsqueeze_dim0_tensor\n\ndef upsample_nn_nearest(x):\n# return F.upsample(x, scale_factor=2, mode='nearest')\n return F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)\ndef initilize_modules(modules):\n for m in modules:\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight)\n if m.bias is not None:\n torch.nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n torch.nn.init.constant_(m.weight, 1)\n torch.nn.init.constant_(m.bias, 0)\n\ndef Conv2dBlock2(c_in, c_out, k_size, stride, padding, leaky=False):\n if leaky:\n return nn.Sequential(\n nn.Conv2d(c_in, c_out, k_size, stride, padding),\n nn.LeakyReLU(0.1),\n nn.Conv2d(c_out, c_out, k_size, 1, padding),\n nn.LeakyReLU(0.1)\n )\n else:\n return nn.Sequential(\n \tnn.Conv2d(c_in, c_out, k_size, stride, padding),\n\t nn.ReLU(inplace=True),\n\t nn.Conv2d(c_out, c_out, k_size, 1, padding),\n\t nn.ReLU(inplace=True)\n\t )\n\n\ndef Conv2dBlock1(c_in, c_out, k_size, stride, padding, leaky=False):\n if leaky:\n \treturn nn.Sequential(\n nn.Conv2d(c_in, c_out, k_size, stride, padding),\n nn.LeakyReLU(0.1)\n )\n else:\n \treturn nn.Sequential(\n nn.Conv2d(c_in, c_out, k_size, stride, padding),\n nn.ReLU(inplace=True)\n )\n\n\ndef ConvTranspose2dBlock1(c_in, c_out, k_size, stride, padding, output_padding, leaky=False):\n\n if leaky:\n \treturn nn.Sequential(\n nn.ConvTranspose2d(c_in, c_out, k_size, stride, padding, output_padding),\n nn.LeakyReLU(0.1)\n )\n else:\n \treturn nn.Sequential(\n nn.ConvTranspose2d(c_in, c_out, k_size, stride, padding, output_padding),\n nn.ReLU(inplace=True)\n )\n\ndef predict_disp(in_planes):\n return nn.Sequential(\n nn.Conv2d(in_planes, 1, kernel_size=3, padding=1),\n nn.Sigmoid()\n )\n\nclass Disp_vgg_feature(nn.Module):\n def __init__(self, datasets='kitti', use_pretrained_weights=False):\n super(Disp_vgg_feature, self).__init__()\n self.use_pretrained_weights = use_pretrained_weights\n self.only_train_dec = False\n\n if datasets == 'kitti':\n self.alpha = 10\n self.beta = 0.01\n elif datasets == 'nyu':\n self.alpha = 10#not sure about this number choice(I just think nyu should be more detailed)\n self.beta = 0.1\n\n self.features = models.vgg16(pretrained=False)\n # self.vgg16_model = models.vgg16(pretrained=True)\n # self.conv1 = self.vgg16_model._modules['features'][0:5]\n # self.conv2 = self.vgg16_model._modules['features'][5:10]\n # self.conv3 = self.vgg16_model._modules['features'][10:17]\n # self.conv4 = self.vgg16_model._modules['features'][17:24]\n # self.conv5 = self.vgg16_model._modules['features'][24:31]\n\n self.upconv4 = ConvTranspose2dBlock1(512, 256, 4, 2, 1, 0)\n self.iconv4 = Conv2dBlock1(256 + 512, 256, 3, 1, 1)\n\n self.upconv3 = ConvTranspose2dBlock1(256, 128, 4, 2, 1, 0)\n self.iconv3 = Conv2dBlock1(128 + 256, 128, 3, 1, 1)\n\n self.upconv2 = ConvTranspose2dBlock1(128, 64, 4, 2, 1, 0)\n self.iconv2 = Conv2dBlock1(64 + 128 + 1, 64, 3, 1, 1)\n\n self.upconv1 = ConvTranspose2dBlock1(64, 32, 4, 2, 1, 0)\n self.iconv1 = Conv2dBlock1(32 + 64 + 1, 32, 3, 1, 1)\n\n self.upconv0 = ConvTranspose2dBlock1(32, 16, 4, 2, 1, 0)\n self.iconv0 = Conv2dBlock1(16 + 1, 16, 3, 1, 1)\n\n self.disp3 = predict_disp(128)\n self.disp2 = predict_disp(64)\n self.disp1 = predict_disp(32)\n self.disp0 = predict_disp(16)\n\n def init_weights(self, use_pretrained_weights=False):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight)\n if m.bias is not None:\n torch.nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n torch.nn.init.constant_(m.weight, 1)\n torch.nn.init.constant_(m.bias, 0)\n if use_pretrained_weights:\n print(\"loading pretrained weights downloaded from pytorch.org\")\n self.load_vgg_params(model_zoo.load_url('https://download.pytorch.org/models/vgg16-397923af.pth'))\n else:\n print(\"do not load pretrained weights for the monocular model\")\n\n def load_vgg_params(self, params):\n model_dict = self._modules['features'].state_dict()\n # 1. filter out unnecessary keys\n pretrained_dict = {k: v for k, v in params.items() if k in model_dict}\n # 2. overwrite entries in the existing state dict\n model_dict.update(pretrained_dict) \n # 3. load the new state dict\n self._modules['features'].load_state_dict(model_dict)\n\n def forward(self, x):\n conv1 = self.features._modules['features'][0:5](x)\n conv2 = self.features._modules['features'][5:10](conv1)\n conv3 = self.features._modules['features'][10:17](conv2)\n conv4 = self.features._modules['features'][17:24](conv3)\n conv5 = self.features._modules['features'][24:31](conv4)\n\n # conv1 = self.conv1(x)\n # conv2 = self.conv2(conv1)\n # conv3 = self.conv3(conv2)\n # conv4 = self.conv4(conv3)\n # conv5 = self.conv5(conv4)\n\n if self.use_pretrained_weights and self.only_train_dec:\n conv1 = conv1.detach()\n conv2 = conv2.detach()\n conv3 = conv3.detach()\n conv4 = conv4.detach()\n conv5 = conv5.detach()\n\n skip1 = conv1\n skip2 = conv2\n skip3 = conv3\n skip4 = conv4\n\n upconv4 = self.upconv4(conv5) # H/16\n concat4 = torch.cat((upconv4, skip4), 1)\n iconv4 = self.iconv4(concat4)\n\n upconv3 = self.upconv3(iconv4) # H/8\n concat3 = torch.cat((upconv3, skip3), 1)\n iconv3 = self.iconv3(concat3)\n disp3 = self.alpha * self.disp3(iconv3)+self.beta\n disp3up = upsample_nn_nearest(disp3)\n\n upconv2 = self.upconv2(iconv3) # H/4\n concat2 = torch.cat((upconv2, skip2, disp3up), 1)\n iconv2 = self.iconv2(concat2)\n disp2 = self.alpha * self.disp2(iconv2)+self.beta\n disp2up = upsample_nn_nearest(disp2)\n\n upconv1 = self.upconv1(iconv2) # H/2\n concat1 = torch.cat((upconv1, skip1, disp2up), 1)\n iconv1 = self.iconv1(concat1)\n disp1 = self.alpha * self.disp1(iconv1)+self.beta\n disp1up = upsample_nn_nearest(disp1)\n\n upconv0 = self.upconv0(iconv1)\n concat0 = torch.cat((upconv0, disp1up), 1)\n iconv0 = self.iconv0(concat0)\n disp0 = self.alpha * self.disp0(iconv0)+self.beta\n\n if self.training:\n return disp0, disp1, disp2, disp3\n else:\n return disp0\n"
] |
[
[
"torch.cat",
"torch.nn.Sigmoid",
"torch.nn.init.constant_",
"torch.nn.functional.interpolate",
"torch.nn.LeakyReLU",
"torch.nn.init.xavier_uniform_",
"torch.nn.ConvTranspose2d",
"torch.utils.model_zoo.load_url",
"torch.nn.ReLU",
"torch.nn.Conv2d"
]
] |
yuexy/mmocr
|
[
"82488024db159266e66ea6b0d6f84a5a18e87362"
] |
[
"mmocr/models/ner/losses/masked_cross_entropy_loss.py"
] |
[
"# Copyright (c) OpenMMLab. All rights reserved.\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom mmocr.models.builder import LOSSES\n\n\[email protected]_module()\nclass MaskedCrossEntropyLoss(nn.Module):\n \"\"\"The implementation of masked cross entropy loss.\n\n The mask has 1 for real tokens and 0 for padding tokens,\n which only keep active parts of the cross entropy loss.\n Args:\n num_labels (int): Number of classes in labels.\n ignore_index (int): Specifies a target value that is ignored\n and does not contribute to the input gradient.\n \"\"\"\n\n def __init__(self, num_labels=None, ignore_index=0):\n super().__init__()\n self.num_labels = num_labels\n self.criterion = CrossEntropyLoss(ignore_index=ignore_index)\n\n def forward(self, logits, img_metas):\n '''Loss forword.\n Args:\n logits: Model output with shape [N, C].\n img_metas (dict): A dict containing the following keys:\n - img (list]): This parameter is reserved.\n - labels (list[int]): The labels for each word\n of the sequence.\n - texts (list): The words of the sequence.\n - input_ids (list): The ids for each word of\n the sequence.\n - attention_mask (list): The mask for each word\n of the sequence. The mask has 1 for real tokens\n and 0 for padding tokens. Only real tokens are\n attended to.\n - token_type_ids (list): The tokens for each word\n of the sequence.\n '''\n\n labels = img_metas['labels']\n attention_masks = img_metas['attention_masks']\n\n # Only keep active parts of the loss\n if attention_masks is not None:\n active_loss = attention_masks.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)[active_loss]\n active_labels = labels.view(-1)[active_loss]\n loss = self.criterion(active_logits, active_labels)\n else:\n loss = self.criterion(\n logits.view(-1, self.num_labels), labels.view(-1))\n return {'loss_cls': loss}\n"
] |
[
[
"torch.nn.CrossEntropyLoss"
]
] |
yozoon/TrenchDepositionAutomation
|
[
"4eb1dd9fbabe7a782aa2070de144240616c00472"
] |
[
"trench_automation/util.py"
] |
[
"from os import listdir, path\n\nimport numpy as np\nimport sklearn.neighbors as neighbors\nimport vtk\nfrom vtk.util.numpy_support import vtk_to_numpy\n\n\ndef extract_line(filename):\n # Read the VTP file\n reader = vtk.vtkXMLPolyDataReader()\n reader.SetFileName(filename)\n reader.Update()\n\n # Extract the polygon data\n polydata = reader.GetOutput()\n\n # Apply a filter to connect contiguous line segments\n # (This step is necessary since otherwise we would have many small line elements)\n strip = vtk.vtkStripper()\n strip.SetInputData(polydata)\n strip.SetJoinContiguousSegments(True)\n strip.Update()\n\n # Retrieve the filter output\n filtered = strip.GetOutput()\n\n # Extract Points\n point_coordinates = vtk_to_numpy(filtered.GetPoints().GetData())\n\n # Extract Line data\n lines = filtered.GetLines()\n lines_array = vtk_to_numpy(lines.GetData())\n\n # Extract the surface line (as separate x, y and z array)\n return [np.array(d) for d in point_coordinates[lines_array[1:]].T]\n\n\ndef line_to_distance(tx, ty, x, y):\n nbrs = neighbors.NearestNeighbors(\n n_neighbors=1, metric=\"euclidean\").fit(np.vstack([x, y]).T)\n dist, _ = nbrs.kneighbors(np.vstack([tx, ty]).T)\n return dist\n"
] |
[
[
"numpy.array",
"sklearn.neighbors.NearestNeighbors",
"numpy.vstack"
]
] |
NSLS-II/pyCHX
|
[
"e82e343903e477c4359b03c4d079eb1e5202c25f"
] |
[
"pyCHX/chx_generic_functions.py"
] |
[
"from pyCHX.chx_libs import *\n#from tqdm import *\nfrom pyCHX.chx_libs import ( colors, markers )\nfrom scipy.special import erf\n\nfrom skimage.filters import prewitt\nfrom skimage.draw import line_aa, line, polygon, ellipse, circle\n\nfrom modest_image import imshow\nimport matplotlib.cm as mcm\nfrom matplotlib import cm\nimport copy, scipy \nimport PIL \nfrom shutil import copyfile\nimport datetime, pytz\nfrom skbeam.core.utils import radial_grid, angle_grid, radius_to_twotheta, twotheta_to_q\nfrom os import listdir\nimport numpy as np\n\n\nmarkers = ['o', 'D', 'v', '^', '<', '>', 'p', 's', 'H',\n 'h', '*', 'd', \n '8', '1', '3', '2', '4', '+', 'x', '_', '|', ',', '1',] \nmarkers = np.array( markers *100 )\n\n\n\n\nflatten_nestlist = lambda l: [item for sublist in l for item in sublist]\n\"\"\"a function to flatten a nest list\ne.g., flatten( [ ['sg','tt'],'ll' ] )\ngives ['sg', 'tt', 'l', 'l']\n\"\"\"\n\n\ndef get_frames_from_dscan( uid, detector = 'eiger4m_single_image' ):\n '''Get frames from a dscan by giving uid and detector '''\n hdr = db[uid]\n return db.get_images(hdr, detector )\n\n\ndef get_roi_intensity( img, roi_mask):\n qind, pixelist = roi.extract_label_indices(roi_mask)\n noqs = len(np.unique(qind))\n avgs = np.zeros(noqs)\n for i in tqdm( range(1,1+noqs)):\n avgs[i-1] = ( np.average( img[roi_mask==i] ) ) \n return avgs \n \n \ndef generate_h5_list(inDir, filename):\n '''YG DEV at 9/19/2019@CHX generate a lst file containing all h5 fiels in inDir\n Input:\n inDir: the input direction\n filename: the filename for output (have to lst as extension)\n Output:\n Save the all h5 filenames in a lst file \n '''\n fp_list = listdir( inDir )\n if filename[-4:] !='.lst':\n filename += '.lst'\n for FP in fp_list:\n FP_ = inDir+FP\n if os.path.isdir(FP_):\n fp = listdir( FP_ )\n for fp_ in fp:\n if '.h5' in fp_: \n append_txtfile( filename = filename, \n data = np.array( [ FP_+'/'+fp_ ]))\n print('The full path of all the .h5 in %s has been saved in %s.'%(inDir, filename))\n print( 'You can use ./analysis/run_gui to visualize all the h5 file.') \n \n \ndef fit_one_peak_curve( x,y, fit_range=None ):\n '''YG Dev@Aug 10, 2019 fit a curve with a single Lorentzian shape\n Parameters:\n x: one-d array, x-axis data\n y: one-d array, y-axis data\n fit_range: [x1, x2], a list of index, to define the x-range for fit\n Return:\n center: float, center of the peak\n center_std: float, error bar of center in the fitting\n fwhm: float, full width at half max intensity of the peak, 2*sigma\n fwhm_std:float, error bar of the full width at half max intensity of the peak\n xf: the x in the fit\n out: the fitting class resutled from lmfit\n \n '''\n from lmfit.models import LinearModel, LorentzianModel\n peak = LorentzianModel()\n background = LinearModel()\n model = peak + background\n if fit_range is not None: \n x1,x2=fit_range\n xf= x[x1:x2]\n yf = y[x1:x2]\n else:\n xf = x\n yf = y\n model.set_param_hint('slope', value=5 )\n model.set_param_hint('intercept', value=0 )\n model.set_param_hint('center', value=0.005 )\n model.set_param_hint('amplitude', value= 0.1 )\n model.set_param_hint('sigma', value=0.003 )\n #out=model.fit(yf, x=xf)#, method='nelder')\n out=model.fit(yf, x=xf, method= 'leastsq' ) \n cen = out.params['center'].value\n cen_std = out.params['center'].stderr\n wid = out.params['sigma'].value *2\n wid_std = out.params['sigma'].stderr *2\n return cen, cen_std, wid, wid_std , xf, out\n\n\ndef plot_xy_with_fit( x, y, xf, out, \n cen, cen_std,wid, wid_std,\n xlim=[1e-3,0.01],xlabel= 'q ('r'$\\AA^{-1}$)',\n ylabel='I(q)', filename=None):\n '''YG Dev@Aug 10, 2019 to plot x,y with fit, \n currently this code is dedicated to plot q-Iq with fit and show the fittign parameter, peak pos, peak wid '''\n \n yf2=out.model.eval(params=out.params, x=xf)\n fig, ax = plt.subplots( )\n plot1D(x=x,y=y,ax=ax,m='o', ls='',c='k', legend='data')\n plot1D(x=xf,y=yf2,ax=ax,m='', ls='-',c='r', legend='fit',logy=True)\n ax.set_xlim( xlim ) \n #ax.set_ylim( 0.1, 4)\n #ax.set_title(uid+'--t=%.2f'%tt)\n ax.set_xlabel( xlabel ) \n ax.set_ylabel(ylabel )\n txts = r'peak' + r' = %.5f +/- %.5f '%( cen, cen_std ) \n ax.text(x =0.02, y=.2, s=txts, fontsize=14, transform=ax.transAxes)\n txts = r'wid' + r' = %.4f +/- %.4f'%( wid, wid_std) \n #txts = r'$\\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$'\n ax.text(x =0.02, y=.1, s=txts, fontsize=14, transform=ax.transAxes)\n plt.tight_layout() \n if filename is not None:\n plt.savefig( filename )\n return ax\n \n \n\n \n \ndef get_touched_qwidth( qcenters ):\n '''YG Dev@CHX April 2019, get touched qwidth by giving qcenters \n '''\n qwX = np.zeros_like(qcenters)\n qW= qcenters[1:] - qcenters[:-1]\n qwX[0] = qW[0]\n for i in range(1,len(qcenters)-1):\n #print(i)\n qwX[i] = min( qW[i-1], qW[i] )\n qwX[-1] = qW[-1]\n qwX *=0.9999\n return qwX\n\n\n\ndef append_txtfile( filename, data, fmt='%s', *argv,**kwargs ):\n '''YG. Dev May 10, 2109 append data to a file\n Create an empty file if the file dose not exist, otherwise, will append the data to it\n Input:\n fp: filename\n data: the data to be append\n fmt: the parameter defined in np.savetxt\n \n '''\n from numpy import savetxt\n exists = os.path.isfile( filename)\n if not exists:\n np.savetxt( filename, [ ] , fmt='%s', )\n print('create new file')\n \n f=open( filename, 'a') \n savetxt( f, data, fmt = fmt , *argv,**kwargs )\n f.close() \n\ndef get_roi_mask_qval_qwid_by_shift( new_cen, new_mask, old_cen,old_roi_mask, \n setup_pargs, geometry,\n limit_qnum= None):\n '''YG Dev April 22, 2019 Get roi_mask, qval_dict, qwid_dict by shift the pre-defined big roi_mask'''\n center=setup_pargs['center']\n roi_mask1 = shift_mask( new_cen=center, new_mask=new_mask, old_cen=old_cen,\n old_roi_mask=old_roi_mask, limit_qnum= limit_qnum) \n qval_dict_, qwid_dict_ = get_masked_qval_qwid_dict_using_Rmax( \n new_mask=new_mask, setup_pargs=setup_pargs, \n old_roi_mask=old_roi_mask, old_cen=old_cen, geometry = geometry ) \n w,w1 = get_zero_nozero_qind_from_roi_mask(roi_mask1,new_mask) \n #print(w,w1)\n qval_dictx = { k:v for (k,v) in list(qval_dict_.items()) if k in w1 }\n qwid_dictx = { k:v for (k,v) in list(qwid_dict_.items()) if k in w1 }\n qval_dict={}\n qwid_dict={}\n for i, k in enumerate( list(qval_dictx.keys())):\n qval_dict[i] = qval_dictx[k]\n qwid_dict[i] = qwid_dictx[k] \n return roi_mask1, qval_dict, qwid_dict\n\n\ndef get_zero_nozero_qind_from_roi_mask(roi_mask,mask):\n '''YG Dev April 22, 2019 Get unique qind of roi_mask with zero and non-zero pixel number'''\n qind, pixelist = roi.extract_label_indices(roi_mask*mask)\n noqs = len(np.unique(qind))\n nopr = np.bincount(qind, minlength=(noqs+1))[1:]\n w=np.where(nopr==0)[0]\n w1=np.where(nopr!=0)[0]\n return w, w1\n\n\n\ndef get_masked_qval_qwid_dict_using_Rmax( new_mask, setup_pargs, old_roi_mask, old_cen, geometry ): \n '''YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask using a Rmax method '''\n cy,cx= setup_pargs['center']\n my,mx=new_mask.shape\n Rmax = int(np.ceil(max( np.hypot(cx,cy),np.hypot(cx-mx,cy-my),np.hypot(cx,cy-my),np.hypot(cx-mx,cy) )))\n Fmask = np.zeros([Rmax*2,Rmax*2],dtype=int)\n Fmask[ Rmax-cy : Rmax-cy+my, Rmax-cx: Rmax-cx + mx]=new_mask\n roi_mask1 = shift_mask( new_cen=[Rmax,Rmax], new_mask=np.ones_like(Fmask), old_cen=old_cen,\n old_roi_mask=old_roi_mask, limit_qnum= None) \n setup_pargs_={ 'center':[Rmax,Rmax], 'dpix': setup_pargs['dpix'], 'Ldet': setup_pargs['Ldet'],\n 'lambda_': setup_pargs['lambda_'], }\n qval_dict1, qwid_dict1 = get_masked_qval_qwid_dict( roi_mask1, Fmask, setup_pargs_, geometry ) \n #w = get_zero_qind_from_roi_mask(roi_mask1,Fmask)\n return qval_dict1, qwid_dict1#,w\n\n\n\ndef get_masked_qval_qwid_dict( roi_mask, mask, setup_pargs, geometry ):\n '''YG Dev April 22, 2019 Get qval_dict, qwid_dict by applying mask to roi_mask '''\n \n qval_dict_, qwid_dict_ = get_qval_qwid_dict( roi_mask, setup_pargs, geometry= geometry) \n w,w1 = get_zero_nozero_qind_from_roi_mask(roi_mask,mask)\n qval_dictx = { k:v for (k,v) in list(qval_dict_.items()) if k not in w }\n qwid_dictx = { k:v for (k,v) in list(qwid_dict_.items()) if k not in w }\n qval_dict={}\n qwid_dict={}\n for i, k in enumerate( list(qval_dictx.keys())):\n qval_dict[i] = qval_dictx[k]\n qwid_dict[i] = qwid_dictx[k] \n return qval_dict, qwid_dict\n\n\ndef get_qval_qwid_dict( roi_mask, setup_pargs, geometry='saxs'):\n '''YG Dev April 6, 2019\n Get qval_dict and qwid_dict by giving roi_mask, setup_pargs\n Input: \n roi_mask: integer type 2D array\n setup_pargs: dict, should at least contains, center (direct beam center), dpix (in mm),\n lamda_: in A-1, Ldet: in mm\n e.g., \n {'Ldet': 1495.0, abs #essential\n 'center': [-4469, 363], #essential\n 'dpix': 0.075000003562308848, #essential\n 'exposuretime': 0.99999702,\n 'lambda_': 0.9686265, #essential\n 'path': '/XF11ID/analysis/2018_1/jianheng/Results/b85dad/',\n 'timeperframe': 1.0,\n 'uid': 'uid=b85dad'}\n geometry: support saxs for isotropic transmission SAXS \n ang_saxs for anisotropic transmission SAXS\n flow_saxs for anisotropic transmission SAXS under flow (center symetric)\n \n Return: \n qval_dict: dict, key as q-number, val: q val\n qwid_dict: dict, key as q-number, val: q width (qmax - qmin)\n \n TODOLIST: to make GiSAXS work \n \n '''\n \n origin = setup_pargs['center']#[::-1]\n shape = roi_mask.shape\n qp_map = radial_grid(origin, shape)\n phi_map = np.degrees( angle_grid(origin, shape) ) \n two_theta = radius_to_twotheta( setup_pargs['Ldet'], setup_pargs['dpix'] * qp_map )\n q_map = utils.twotheta_to_q(two_theta, setup_pargs['lambda_']) \n qind, pixelist = roi.extract_label_indices(roi_mask)\n Qval = np.unique(qind)\n qval_dict_ = {} \n qwid_dict_ = {}\n for j, i in enumerate( Qval):\n qval = q_map[ roi_mask == i ]\n #print( qval )\n if geometry=='saxs':\n qval_dict_[j] = [( qval.max() + qval.min() )/2] # np.mean(qval)\n qwid_dict_[j] = [( qval.max() - qval.min() ) ]\n \n elif geometry=='ang_saxs':\n aval = phi_map[ roi_mask == i ]\n #print(j,i,qval, aval)\n qval_dict_[j] = np.zeros(2)\n qwid_dict_[j] = np.zeros(2) \n \n qval_dict_[j][0] = ( qval.max() + qval.min() )/2 # np.mean(qval)\n qwid_dict_[j][0] = ( qval.max() - qval.min() ) \n \n if ( (aval.max() * aval.min())<0 ) & ( aval.max() > 90 ):\n qval_dict_[j][1] = ( aval.max() + aval.min() )/2 -180 # np.mean(qval)\n qwid_dict_[j][1] = abs( aval.max() - aval.min() -360 ) \n #print('here -- %s'%j)\n else: \n qval_dict_[j][1] = ( aval.max() + aval.min() )/2 # np.mean(qval)\n qwid_dict_[j][1] = abs( aval.max() - aval.min() )\n \n elif geometry=='flow_saxs':\n sx,sy = roi_mask.shape \n cx,cy = origin\n aval = (phi_map[cx:])[ roi_mask[cx:] == i ]\n if len(aval)==0:\n aval = (phi_map[:cx])[ roi_mask[:cx] == i ] + 180 \n \n qval_dict_[j] = np.zeros(2)\n qwid_dict_[j] = np.zeros(2) \n qval_dict_[j][0] = ( qval.max() + qval.min() )/2 # np.mean(qval)\n qwid_dict_[j][0] = ( qval.max() - qval.min() ) \n #print(aval)\n if ( (aval.max() * aval.min())<0 ) & ( aval.max() > 90 ):\n qval_dict_[j][1] = ( aval.max() + aval.min() )/2 -180 # np.mean(qval)\n qwid_dict_[j][1] = abs( aval.max() - aval.min() -360 ) \n #print('here -- %s'%j)\n else: \n qval_dict_[j][1] = ( aval.max() + aval.min() )/2 # np.mean(qval)\n qwid_dict_[j][1] = abs( aval.max() - aval.min() ) \n \n return qval_dict_, qwid_dict_ \n\n\n\ndef get_SG_norm( FD, pixelist, bins=1, mask=None, window_size= 11, order= 5 ):\n '''Get normalization of a time series by SavitzkyGolay filter\n Input:\n FD: file handler for a compressed data\n pixelist: pixel list for a roi_mask\n bins: the bin number for the time series, if number = total number of the time frame, \n it means SG of the time averaged image\n mask: the additional mask\n window_size, order, for the control of SG filter, see chx_generic_functions.py/sgolay2d for details\n Return:\n norm: shape as ( length of FD, length of pixelist ) \n '''\n if mask is None:\n mask = 1\n beg = FD.beg\n end = FD.end\n N = end-beg\n BEG = beg\n if bins==1:\n END = end\n NB = N\n MOD=0\n else: \n END = N//bins \n MOD = N%bins\n NB = END\n norm = np.zeros( [ end, len(pixelist) ] ) \n for i in tqdm( range( NB ) ): \n if bins == 1:\n img = FD.rdframe(i + BEG) \n else: \n for j in range( bins):\n ct = i * bins + j + BEG\n #print(ct)\n if j==0: \n img = FD.rdframe( ct )\n n = 1.0\n else:\n (p,v) = FD.rdrawframe(ct) \n np.ravel( img )[p] += v\n #img += FD.rdframe( ct )\n n += 1\n img /= n \n avg_imgf = sgolay2d( img, window_size= window_size, order= order) * mask\n normi = np.ravel(avg_imgf)[pixelist] \n if bins==1:\n norm[i+beg] = normi\n else:\n norm[ i*bins+beg: (i+1)*bins+beg ] = normi\n if MOD: \n for j in range(MOD):\n ct = (1+i) * bins + j + BEG\n if j==0: \n img = FD.rdframe( ct )\n n = 1.0\n else:\n (p,v) = FD.rdrawframe(ct) \n np.ravel( img )[p] += v\n n += 1\n img /= n \n #print(ct,n) \n img = FD.rdframe( ct )\n avg_imgf = sgolay2d( img, window_size= window_size, order= order) * mask\n normi = np.ravel(avg_imgf)[pixelist] \n norm[ (i+1)*bins + beg: (i+2)*bins + beg ] = normi\n return norm\n\ndef shift_mask( new_cen, new_mask, old_cen, old_roi_mask, limit_qnum=None ):\n '''Y.G. Dev April 2019@CHX to make a new roi_mask by shift and crop the old roi_mask, which is much bigger than the new mask\n Input:\n new_cen: [x,y] in uint of pixel\n new_mask: provide the shape of the new roi_mask and also multiply this mask to the shifted mask\n old_cen: [x,y] in uint of pixel \n old_roi_mask: the roi_mask to be shifted\n limit_qnum: integer, if not None, defines the max number of unique values of nroi_mask \n \n Output:\n the shifted/croped roi_mask \n '''\n nsx,nsy = new_mask.shape\n down, up, left, right = new_cen[0], nsx - new_cen[0], new_cen[1], nsy - new_cen[1]\n x1,x2,y1,y2 = [ old_cen[0] - down, old_cen[0] + up , old_cen[1] - left, old_cen[1] + right ]\n nroi_mask_ = old_roi_mask[ x1:x2, y1:y2 ] * new_mask \n nroi_mask = np.zeros_like( nroi_mask_ ) \n qind, pixelist = roi.extract_label_indices(nroi_mask_)\n qu = np.unique(qind)\n #noqs = len( qu )\n #nopr = np.bincount(qind, minlength=(noqs+1))[1:]\n #qm = nopr>0\n for j, qv in enumerate(qu):\n nroi_mask[nroi_mask_ == qv] = j +1 \n if limit_qnum is not None:\n nroi_mask[ nroi_mask > limit_qnum ]=0 \n return nroi_mask\n\n\ndef plot_q_g2fitpara_general( g2_dict, g2_fitpara, geometry ='saxs', ylim = None,\n plot_all_range=True, plot_index_range = None, show_text=True,return_fig=False,\n show_fit=True, ylabel='g2', qth_interest = None, max_plotnum_fig=1600,qphi_analysis=False,\n *argv,**kwargs): \n '''\n Mar 29,2019, Y.G.@CHX\n \n plot q~fit parameters\n \n Parameters\n ---------- \n qval_dict, dict, with key as roi number,\n format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs\n format as {1: [qr1], 2: [qr2] ...} for saxs\n format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs\n rate: relaxation_rate\n plot_index_range: \n Option:\n if power_variable = False, power =2 to fit q^2~rate, \n Otherwise, power is variable.\n show_fit:, bool, if False, not show the fit\n \n ''' \n \n if 'uid' in kwargs.keys():\n uid_ = kwargs['uid'] \n else:\n uid_ = 'uid' \n if 'path' in kwargs.keys():\n path = kwargs['path'] \n else:\n path = '' \n data_dir = path \n if ylabel=='g2':\n ylabel='g_2'\n if ylabel=='g4':\n ylabel='g_4' \n \n if geometry =='saxs':\n if qphi_analysis:\n geometry = 'ang_saxs' \n \n \n qval_dict_, fit_res_ = g2_dict, g2_fitpara \n \n (qr_label, qz_label, num_qz, num_qr, num_short,\n num_long, short_label, long_label,short_ulabel,\n long_ulabel,ind_long, master_plot,\n mastp) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) \n fps = [] \n \n #print(qr_label, qz_label, short_ulabel, long_ulabel) \n #$print( num_short, num_long )\n beta, relaxation_rate, baseline, alpha = ( g2_fitpara['beta'], \n g2_fitpara['relaxation_rate'],\n g2_fitpara['baseline'],\n g2_fitpara['alpha'] )\n \n fps=[]\n for s_ind in range( num_short ):\n ind_long_i = ind_long[ s_ind ]\n num_long_i = len( ind_long_i ) \n betai, relaxation_ratei, baselinei, alphai = (beta[ind_long_i], relaxation_rate[ind_long_i],\n baseline[ind_long_i], alpha[ind_long_i] )\n qi = long_ulabel\n #print(s_ind, qi, np.array( betai) )\n \n if RUN_GUI:\n fig = Figure(figsize=(10, 12)) \n else:\n #fig = plt.figure( )\n if num_long_i <=4:\n if master_plot != 'qz':\n fig = plt.figure(figsize=(8, 6)) \n else:\n if num_short>1:\n fig = plt.figure(figsize=(8, 4))\n else:\n fig = plt.figure(figsize=(10, 6))\n #print('Here')\n elif num_long_i > max_plotnum_fig:\n num_fig = int(np.ceil(num_long_i/max_plotnum_fig)) #num_long_i //16\n fig = [ plt.figure(figsize=figsize) for i in range(num_fig) ]\n #print( figsize )\n else:\n #print('Here')\n if master_plot != 'qz':\n fig = plt.figure(figsize=figsize)\n else:\n fig = plt.figure(figsize=(10, 10))\n \n if master_plot == 'qz':\n if geometry=='ang_saxs':\n title_short = 'Angle= %.2f'%( short_ulabel[s_ind] ) + r'$^\\circ$' \n elif geometry=='gi_saxs':\n title_short = r'$Q_z= $' + '%.4f'%( short_ulabel[s_ind] ) + r'$\\AA^{-1}$'\n else:\n title_short = '' \n else: #qr\n if geometry=='ang_saxs' or geometry=='gi_saxs':\n title_short = r'$Q_r= $' + '%.5f '%( short_ulabel[s_ind] ) + r'$\\AA^{-1}$' \n else:\n title_short='' \n #print(geometry) \n #filename =''\n til = '%s:--->%s'%(uid_, title_short )\n if num_long_i <=4: \n plt.title( til,fontsize= 14, y =1.15)\n else:\n plt.title( til,fontsize=20, y =1.06) \n #print( num_long ) \n if num_long!=1: \n #print( 'here')\n plt.axis('off') \n #sy = min(num_long_i,4) \n sy = min(num_long_i, int( np.ceil( min(max_plotnum_fig,num_long_i)/4)) ) \n\n else: \n sy =1\n sx = min(4, int( np.ceil( min(max_plotnum_fig,num_long_i)/float(sy) ) )) \n temp = sy\n sy = sx\n sx = temp\n if sx==1:\n if sy==1:\n plt.axis('on') \n ax1 = fig.add_subplot( 4,1,1 )\n ax2 = fig.add_subplot( 4,1,2 )\n ax3 = fig.add_subplot( 4,1,3 )\n ax4 = fig.add_subplot( 4,1,4 ) \n plot1D(x=qi, y=betai, m='o', ls='--', c='k', ax=ax1, legend=r'$\\beta$', title='')\n plot1D(x=qi, y=alphai, m='o', ls='--',c='r', ax=ax2, legend=r'$\\alpha$', title='')\n plot1D(x=qi, y=baselinei, m='o', ls='--', c='g', ax=ax3, legend=r'$baseline$', title='')\n plot1D(x=qi, y=relaxation_ratei, m='o', c='b', ls='--', ax=ax4, legend= r'$\\gamma$ $(s^{-1})$' , title='')\n \n ax4.set_ylabel( r'$\\gamma$ $(s^{-1})$' ) \n ax4.set_xlabel(r\"$q $ $(\\AA)$\", fontsize=16) \n ax3.set_ylabel( r'$baseline' ) \n ax2.set_ylabel( r'$\\alpha$' ) \n ax1.set_ylabel( r'$\\beta$' ) \n fig.tight_layout()\n fp = data_dir + uid_ + 'g2_q_fit_para_%s.png'%short_ulabel[s_ind] \n fig.savefig( fp , dpi=fig.dpi) \n fps.append(fp)\n outputfile = data_dir + '%s_g2_q_fitpara_plot'%uid_ + '.png'\n #print(uid)\n combine_images( fps, outputfile, outsize= [ 2000,2400 ] ) \n \n \n \n\n\ndef plot_q_rate_general( qval_dict, rate, geometry ='saxs', ylim = None, logq=True, lograte=True,\n plot_all_range=True, plot_index_range = None, show_text=True,return_fig=False,\n show_fit=True,\n *argv,**kwargs): \n '''\n Mar 29,2019, Y.G.@CHX\n \n plot q~rate in log-log scale \n \n Parameters\n ---------- \n qval_dict, dict, with key as roi number,\n format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs\n format as {1: [qr1], 2: [qr2] ...} for saxs\n format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs\n rate: relaxation_rate\n plot_index_range: \n Option:\n if power_variable = False, power =2 to fit q^2~rate, \n Otherwise, power is variable.\n show_fit:, bool, if False, not show the fit\n \n ''' \n \n if 'uid' in kwargs.keys():\n uid = kwargs['uid'] \n else:\n uid = 'uid' \n if 'path' in kwargs.keys():\n path = kwargs['path'] \n else:\n path = '' \n (qr_label, qz_label, num_qz, num_qr, num_short,\n num_long, short_label, long_label,short_ulabel,\n long_ulabel,ind_long, master_plot,\n mastp) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry)\n \n fig,ax = plt.subplots()\n plt.title(r'$Q$''-Rate-%s'%(uid),fontsize=20, y =1.06)\n Nqz = num_short \n if Nqz!=1:\n ls = '--'\n else:\n ls='' \n #print(Nqz)\n for i in range(Nqz):\n ind_long_i = ind_long[ i ] \n y = np.array( rate )[ind_long_i] \n x = long_label[ind_long_i] \n #print(i, x, y, D0 ) \n if Nqz!=1:\n label=r'$q_z=%.5f$'%short_ulabel[i]\n else:\n label=''\n ax.loglog(x, y, marker = 'o', ls =ls, label=label) \n if Nqz!=1:legend = ax.legend(loc='best')\n\n if plot_index_range is not None:\n d1,d2 = plot_index_range\n d2 = min( len(x)-1, d2 ) \n ax.set_xlim( (x**power)[d1], (x**power)[d2] )\n ax.set_ylim( y[d1],y[d2])\n \n if ylim is not None:\n ax.set_ylim( ylim )\n \n ax.set_ylabel('Relaxation rate 'r'$\\gamma$'\"($s^{-1}$) (log)\")\n ax.set_xlabel(\"$q$\"r'($\\AA$) (log)')\n fp = path + '%s_Q_Rate_loglog'%(uid) + '.png'\n fig.savefig( fp, dpi=fig.dpi)\n fig.tight_layout()\n if return_fig:\n return fig,ax\n \n \n \ndef plot_xy_x2( x, y, x2=None, pargs=None, loglog=False, logy=True, fig_ax=None, \n xlabel= 'q ('r'$\\AA^{-1}$)', xlabel2='q (pixel)', title= '_q_Iq',\n ylabel = 'I(q)',save=True, *argv,**kwargs):\n '''YG.@CHX 2019/10/ Plot x, y, x2, if have, will plot as twiny( same y, different x)\n This funciton is primary for plot q-Iq\n \n Input:\n x: one-d array, x in one unit\n y: one-d array, \n x2:one-d array, x in anoter unit\n pargs: dict, could include 'uid', 'path'\n loglog: if True, if plot x and y in log, by default plot in y-log\n save: if True, save the plot in the path defined in pargs\n kwargs: could include xlim (in unit of index), ylim (in unit of real value)\n \n '''\n if fig_ax is None:\n fig, ax1 = plt.subplots()\n else:\n fig,ax1=fig_ax\n if pargs is not None:\n uid = pargs['uid']\n path = pargs['path']\n else:\n uid='XXX'\n path='' \n if loglog: \n ax1.loglog( x,y, '-o') \n elif logy: \n ax1.semilogy( x,y, '-o') \n else: \n ax1.plot( x,y, '-o') \n ax1.set_xlabel( xlabel ) \n ax1.set_ylabel( ylabel )\n title = ax1.set_title( '%s--'%uid + title) \n Nx= len(x)\n if 'xlim' in kwargs.keys():\n xlim = kwargs['xlim']\n if xlim[1]>Nx:\n xlim[1]=Nx-1\n else:\n xlim=[ 0, Nx]\n if 'ylim' in kwargs.keys():\n ylim = kwargs['ylim']\n else:\n ylim=[y.min(), y.max()] \n lx1,lx2=xlim \n ax1.set_xlim( [ x[lx1], x[lx2] ] ) \n ax1.set_ylim( ylim ) \n if x2 is not None:\n ax2 = ax1.twiny() \n ax2.set_xlabel( xlabel2 ) \n ax2.set_ylabel( ylabel )\n ax2.set_xlim( [ x2[lx1], x2[lx2] ] ) \n title.set_y(1.1)\n fig.subplots_adjust(top=0.85)\n if save:\n path = pargs['path']\n fp = path + '%s_q_Iq'%uid + '.png' \n fig.savefig( fp, dpi=fig.dpi)\n\n \n \n \ndef save_oavs_tifs( uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1,threshold = 0 ):\n '''save oavs as png'''\n tifs = list( db[uid].data( 'OAV_image') )[0] \n try:\n pixel_scalebar=np.ceil(scalebar_size/md['OAV resolution um_pixel'])\n except:\n pixel_scalebar=None\n print('No OAVS resolution is available.')\n \n text_string='%s $\\mu$m'%scalebar_size\n h = db[uid]\n oavs=tifs\n\n oav_period=h['descriptors'][0]['configuration']['OAV']['data']['OAV_cam_acquire_period']\n oav_expt=h['descriptors'][0]['configuration']['OAV']['data']['OAV_cam_acquire_time']\n oav_times=[]\n for i in range(len(oavs)):\n oav_times.append(oav_expt+i*oav_period)\n fig=plt.subplots(int(np.ceil(len(oavs)/3)),3,figsize=(3*5.08,int(np.ceil(len(oavs)/3))*4))\n for m in range(len(oavs)):\n plt.subplot(int(np.ceil(len(oavs)/3)),3,m+1)\n #plt.subplots(figsize=(5.2,4))\n img = oavs[m]\n try:\n ind = np.flipud(img*scale)[:,:,2] < threshold\n except:\n ind = np.flipud(img*scale) < threshold\n rgb_cont_img=np.copy(np.flipud(img))\n #rgb_cont_img[ind,0]=1000\n if brightness_scale !=1:\n rgb_cont_img=scale_rgb(rgb_cont_img,scale=brightness_scale)\n\n plt.imshow(rgb_cont_img,interpolation='none',resample=True, cmap = 'gray')\n plt.axis('equal')\n cross=[685,440,50] # definintion of direct beam: x, y, size\n plt.plot([cross[0]-cross[2]/2,cross[0]+cross[2]/2],[cross[1],cross[1]],'r-')\n plt.plot([cross[0],cross[0]],[cross[1]-cross[2]/2,cross[1]+cross[2]/2],'r-')\n if pixel_scalebar is not None:\n plt.plot([1100,1100+pixel_scalebar],[150,150],'r-',Linewidth=5) # scale bar.\n plt.text(1000,50,text_string,fontsize=14,color='r')\n plt.text(600,50,str(oav_times[m])[:5]+' [s]',fontsize=14,color='r') \n plt.axis('off')\n plt.savefig( data_dir + 'uid=%s_OVA_images.png'%uid) \n \n \n \n \n \ndef shift_mask_old( mask, shiftx, shifty):\n '''YG Dev Feb 4@CHX create new mask by shift mask in x and y direction with unit in pixel \n Input:\n mask: int-type array, \n shiftx: int scalar, shift value in x direction with unit in pixel \n shifty: int scalar, shift value in y direction with unit in pixel \n Output:\n maskn: int-type array, shifted mask \n \n '''\n qind, pixelist = roi.extract_label_indices( mask )\n dims = mask.shape\n imgwidthy = dims[1] #dimension in y, but in plot being x\n imgwidthx = dims[0] #dimension in x, but in plot being y\n pixely = pixelist%imgwidthy\n pixelx = pixelist//imgwidthy\n pixelyn = pixely + shiftx\n pixelxn = pixelx + shifty\n w = (pixelyn < imgwidthy ) & (pixelyn >= 0 ) & (pixelxn < imgwidthx ) & (pixelxn >= 0 ) \n pixelist_new = pixelxn[w] * imgwidthy + pixelyn[w]\n maskn = np.zeros_like( mask )\n maskn.ravel()[pixelist_new] = qind[w] \n return maskn\n\n\ndef get_current_time():\n '''get current time in a fomart of year/month/date/hour(24)/min/sec/,\n e.g. 2009-01-05 22:14:39\n '''\n loc_dt = datetime.datetime.now(pytz.timezone('US/Eastern'))\n fmt = \"%Y-%m-%d %H:%M:%S\" \n return loc_dt.strftime(fmt)\n\n\n\ndef evalue_array( array, verbose = True ):\n '''Y.G., Dev Nov 1, 2018 Get min, max, avg, std of an array '''\n _min, _max, avg, std = np.min( array), np.max( array), np.average( array ), np.std( array )\n if verbose:\n print( 'The min, max, avg, std of this array are: %s %s %s %s, respectively.'%(_min, _max, avg, std ) )\n return _min, _max, avg, std\n \n \n\ndef find_good_xpcs_uids( fuids, Nlim=100, det = [ '4m', '1m', '500'] ):\n '''Y.G., Dev Nov 1, 2018 Find the good xpcs series\n Input:\n fuids: list, a list of full uids\n Nlim: integer, the smallest number of images to be considered as XCPS sereis\n det: list, a list of detector (can be short string of the full name of the detector)\n Return:\n the xpcs uids list\n \n '''\n guids = []\n for i, uid in enumerate(fuids):\n if db[uid]['start']['plan_name'] == 'count' or db[uid]['start']['plan_name'] == 'manual_count': \n head = db[uid]['start']\n for dec in head['detectors']:\n for dt in det:\n if dt in dec:\n if 'number of images' in head:\n if float(head['number of images'] ) >= Nlim:\n #print(i, uid)\n guids.append(uid)\n G = np.unique( guids ) \n print('Found %s uids for XPCS series.'%len(G) ) \n return G\n\n\ndef create_fullImg_with_box( shape, box_nx = 9 , box_ny = 8, ):\n '''Y.G. 2018/10/26 Divide image with multi touched boxes\n Input\n shape: the shape of image\n box_nx: the number of box in x\n box_ny: the number width of box in y\n Return:\n roi_mask, (* mask )\n '''\n \n #shape = mask.shape\n Wrow, Wcol = int( np.ceil( shape[0]/box_nx )), int(np.ceil(shape[1]/box_ny) )\n #print(Wrow, Wcol)\n roi_mask = np.zeros( shape, dtype=np.int32 )\n for i in range( box_nx ):\n for j in range(box_ny): \n roi_mask[ i*Wrow: (i+1)*Wrow , j*Wcol: (j+1)*Wcol ] = i * box_ny + j + 1\n #roi_mask *= mask \n return roi_mask\n\n\n\ndef get_refl_y0( inc_ang, inc_y0, Ldet, pixel_size, ): \n ''' Get reflection beam center y\n Input:\n inc_ang: incident angle in degree\n inc_y0: incident beam y center in pixel\n Ldet: sample to detector distance in meter\n pixel_size: pixel size in meter\n Return: reflection beam center y in pixel \n '''\n return Ldet * np.tan( np.radians(inc_ang)) * 2 / pixel_size + inc_y0\n \n \ndef lin2log_g2(lin_tau,lin_g2,num_points=False):\n \"\"\"\n Lutz developed at Aug,2018\n function to resample g2 with linear time steps into logarithmics\n g2 values between consecutive logarthmic time steps are averaged to increase statistics\n calling sequence: lin2log_g2(lin_tau,lin_g2,num_points=False)\n num_points=False -> determine number of logortihmically sampled time points automatically (8 pts./decade)\n num_points=18 -> use 18 logarithmically spaced time points\n \"\"\"\n #prep taus and g2s: remove nan and first data point at tau=0\n rem = lin_tau==0\n #print('lin_tau: '+str(lin_tau.size))\n #print('lin_g2: '+str(lin_g2.size))\n lin_tau[rem]=np.nan\n #lin_tau[0]=np.nan;#lin_g2[0]=np.nan\n lin_g2 = lin_g2[np.isfinite(lin_tau)]\n lin_tau = lin_tau[np.isfinite(lin_tau)]\n #print('from lin-to-log-g2_sampling: ',lin_tau)\n if num_points == False:\n # automatically decide how many log-points (8/decade)\n dec=np.ceil((np.log10(lin_tau.max())-np.log10(lin_tau.min()))*8)\n else:\n dec=num_points\n log_tau=np.logspace(np.log10(lin_tau[0]),np.log10(lin_tau.max()),dec)\n # re-sample correlation function:\n log_g2=[]\n for i in range(log_tau.size-1):\n y=[i,log_tau[i]-(log_tau[i+1]-log_tau[i])/2,log_tau[i]+(log_tau[i+1]-log_tau[i])/2]\n #x=lin_tau[lin_tau>y[1]]\n x1=lin_tau>y[1]; x2=lin_tau<y[2]; x=x1*x2\n #print(np.average(lin_g2[x]))\n if np.isfinite(np.average(lin_g2[x])):\n log_g2.append(np.average(lin_g2[x]))\n else:\n log_g2.append(np.interp(log_tau[i],lin_tau,lin_g2))\n if i == log_tau.size-2:\n #print(log_tau[i+1])\n y=[i+1,log_tau[i+1]-(log_tau[i+1]-log_tau[i])/2,log_tau[i+1]]\n x1=lin_tau>y[1]; x2=lin_tau<y[2]; x=x1*x2\n log_g2.append(np.average(lin_g2[x]))\n return [log_tau,log_g2]\n\n\n\ndef get_eigerImage_per_file( data_fullpath ):\n f= h5py.File(data_fullpath) \n dset_keys = list(f['/entry/data'].keys())\n dset_keys.sort()\n dset_root=\"/entry/data\"\n dset_keys = [dset_root + \"/\" + dset_key for dset_key in dset_keys]\n dset = f[dset_keys[0]]\n return len(dset)\n\ndef copy_data( old_path, new_path = '/tmp_data/data/' ): \n '''YG Dev July@CHX\n Copy Eiger file containing master and data files to a new path\n old_path: the full path of the Eiger master file\n new_path: the new path\n \n '''\n import shutil,glob\n #old_path = sud[2][0]\n #new_path = '/tmp_data/data/'\n fps = glob.glob( old_path[:-10] + '*' )\n for fp in tqdm(fps):\n if not os.path.exists( new_path + os.path.basename(fp)):\n shutil.copy( fp, new_path )\n print('The files %s are copied: %s.'%( old_path[:-10] + '*' , new_path + os.path.basename(fp) ) )\n \ndef delete_data( old_path, new_path = '/tmp_data/data/' ):\n '''YG Dev July@CHX\n Delete copied Eiger file containing master and data in a new path\n old_path: the full path of the Eiger master file\n new_path: the new path\n ''' \n import shutil,glob\n #old_path = sud[2][0]\n #new_path = '/tmp_data/data/'\n fps = glob.glob( old_path[:-10] + '*' )\n for fp in tqdm(fps): \n nfp = new_path + os.path.basename(fp)\n if os.path.exists( nfp ):\n os.remove( nfp ) \n \n \ndef show_tif_series( tif_series, Nx=None, center=None, w= 50, vmin=None, vmax= None, cmap = cmap_vge_hdr,\n logs=False, figsize=[10,16] ):\n '''\n tif_series: list of 2D tiff images\n Nx: the number in the row for dispalying\n center: the center of iamge (or direct beam pixel)\n w: the ROI half size in pixel\n vmin: the min intensity value for plot\n vmax: if None, will be max intensity value of the ROI \n figsize: size of the plot (in inch)\n \n '''\n \n if center is not None:\n cy,cx = center\n #infs = sorted(sample_list)\n N = len( tif_series )\n if Nx is None:\n sy = int( np.sqrt(N)) \n else:\n sy = Nx\n sx = int( np.ceil( N/sy ) ) \n fig = plt.figure( figsize =figsize ) \n for i in range( N ):\n #print(i)\n ax = fig.add_subplot( sx, sy, i+1)\n #d = (np.array( PIL.Image.open( infs[i] ).convert('I') ))[ cy-w:cy+w, cx-w:cx+w ]\n d = tif_series[i][::-1]\n #vmax= np.max(d)\n #pritn(vmax)\n #vmin= 10#np.min(d)\n show_img( d, logs = logs, show_colorbar= False,show_ticks =False,\n ax= [fig, ax], image_name= '%02d'%(i+1), cmap = cmap, \n vmin= vmin, vmax= vmax, \n aspect=1, save=False, path=None)\n return fig, ax\n \n\n\n\n\nfrom scipy.special import erf\ndef ps( y,shift=.5, replot=True, logplot='off', x= None):\n '''\n Dev 16, 2018\n Modified ps() function in 95-utilities.py\n function to determine statistic on line profile (assumes either peak or erf-profile)\n Input:\n y: 1D array, the data for analysis\n shift: scale for peak presence (0.5 -> peak has to be taller factor 2 above background)\n replot: if True, will plot data (if error func) with the fit and peak/cen/com position\n logplot: if on, will plot in log scale\n x: if not None, give x-data\n \n \n '''\n if x is None:\n x = np.arange( len(y) )\n x=np.array(x)\n y=np.array(y)\n \n PEAK=x[np.argmax(y)]\n PEAK_y=np.max(y)\n COM=np.sum(x * y) / np.sum(y) \n ### from Maksim: assume this is a peak profile:\n def is_positive(num):\n return True if num > 0 else False\n # Normalize values first:\n ym = (y - np.min(y)) / (np.max(y) - np.min(y)) - shift # roots are at Y=0\n positive = is_positive(ym[0])\n list_of_roots = []\n for i in range(len(y)):\n current_positive = is_positive(ym[i])\n if current_positive != positive:\n list_of_roots.append(x[i - 1] + (x[i] - x[i - 1]) / (abs(ym[i]) + abs(ym[i - 1])) * abs(ym[i - 1]))\n positive = not positive\n if len(list_of_roots) >= 2:\n FWHM=abs(list_of_roots[-1] - list_of_roots[0])\n CEN=list_of_roots[0]+0.5*(list_of_roots[1]-list_of_roots[0])\n ps.fwhm=FWHM\n ps.cen=CEN\n yf=ym\n #return {\n # 'fwhm': abs(list_of_roots[-1] - list_of_roots[0]),\n # 'x_range': list_of_roots,\n #}\n else: # ok, maybe it's a step function..\n #print('no peak...trying step function...') \n ym = ym + shift\n def err_func(x, x0, k=2, A=1, base=0 ): #### erf fit from Yugang\n return base - A * erf(k*(x-x0))\n mod = Model( err_func )\n ### estimate starting values:\n x0=np.mean(x)\n #k=0.1*(np.max(x)-np.min(x))\n pars = mod.make_params( x0=x0, k=2, A = 1., base = 0. ) \n result = mod.fit(ym, pars, x = x )\n CEN=result.best_values['x0']\n FWHM = result.best_values['k']\n A = result.best_values['A']\n b = result.best_values['base'] \n yf_ = err_func(x, CEN, k=FWHM, A=A, base=b ) #result.best_fit\n yf = (yf_ ) * (np.max(y) - np.min(y)) + np.min(y)\n \n #(y - np.min(y)) / (np.max(y) - np.min(y)) - shift\n \n \n ps.cen = CEN\n ps.fwhm = FWHM\n \n if replot:\n ### re-plot results: \n if logplot=='on':\n fig, ax = plt.subplots() #plt.figure() \n ax.semilogy([PEAK,PEAK],[np.min(y),np.max(y)],'k--',label='PEAK')\n ax.hold(True)\n ax.semilogy([CEN,CEN],[np.min(y),np.max(y)],'r-.',label='CEN')\n ax.semilogy([COM,COM],[np.min(y),np.max(y)],'g.-.',label='COM')\n ax.semilogy(x,y,'bo-')\n #plt.xlabel(field);plt.ylabel(intensity_field)\n ax.legend()\n #plt.title('uid: '+str(uid)+' @ '+str(t)+'\\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9)\n #plt.show() \n else:\n #plt.close(999)\n fig, ax = plt.subplots() #plt.figure()\n ax.plot([PEAK,PEAK],[np.min(y),np.max(y)],'k--',label='PEAK')\n \n #ax.hold(True)\n ax.plot([CEN,CEN],[np.min(y),np.max(y)],'m-.',label='CEN')\n ax.plot([COM,COM],[np.min(y),np.max(y)],'g.-.',label='COM')\n ax.plot(x,y,'bo--')\n ax.plot(x,yf,'r-', label='Fit')\n \n #plt.xlabel(field);plt.ylabel(intensity_field)\n ax.legend()\n #plt.title('uid: '+str(uid)+' @ '+str(t)+'\\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9)\n #plt.show()\n\n ### assign values of interest as function attributes:\n ps.peak=PEAK\n ps.com=COM\n return ps.cen\n\n\n\n\n\n\n\n\n\ndef create_seg_ring( ring_edges, ang_edges, mask, setup_pargs ):\n '''YG Dev April 6, 2018\n Create segment ring mask\n Input:\n ring_edges: edges of rings (in pixel), e.g., [ [320,340], [450, 460], ]\n ang_edges: edges of angles, e.g., [ [20,40], [50, 60], ]\n mask: bool type 2D array\n set_pargs: dict, should at least contains, center\n e.g., \n {'Ldet': 1495.0, abs #essential\n 'center': [-4469, 363], #essential\n 'dpix': 0.075000003562308848, #essential\n 'exposuretime': 0.99999702,\n 'lambda_': 0.9686265, #essential\n 'path': '/XF11ID/analysis/2018_1/jianheng/Results/b85dad/',\n 'timeperframe': 1.0,\n 'uid': 'uid=b85dad'}\n Return:\n roi_mask: segmented ring mask: two-D array\n qval_dict: dict, key as q-number, val: q val\n \n '''\n \n roi_mask_qr, qr, qr_edge = get_ring_mask(mask, inner_radius= None, outer_radius = None, \n width = None, num_rings = None, edges= np.array( ring_edges), unit='pixel',\n pargs= setup_pargs) \n \n roi_mask_ang, ang_center, ang_edge = get_angular_mask( mask, inner_angle= None, \n outer_angle = None, width = None, edges = np.array( ang_edges ),\n num_angles = None, center = center, flow_geometry= False )\n \n \n roi_mask, good_ind = combine_two_roi_mask( roi_mask_qr, roi_mask_ang,pixel_num_thres=100) \n qval_dict_ = get_qval_dict( qr_center = qr, qz_center = ang_center,one_qz_multi_qr=False)\n qval_dict = { i:qval_dict_[k] for (i,k) in enumerate( good_ind) } \n return roi_mask, qval_dict\n\n\n\n\ndef find_bad_pixels_FD( bad_frame_list, FD, img_shape = [514, 1030], \n threshold= 15, show_progress=True):\n '''Designed to find bad pixel list in 500K\n threshold: the max intensity in 5K \n '''\n bad = np.zeros( img_shape, dtype=bool )\n if show_progress:\n for i in tqdm(bad_frame_list[ bad_frame_list>=FD.beg]):\n p,v = FD.rdrawframe(i)\n w = np.where( v > threshold)[0]\n bad.ravel()[ p[w] ] = 1\n # x,y = np.where( imgsa[i] > threshold)\n # bad[x[0],y[0]] = 1 \n else:\n for i in bad_frame_list[ bad_frame_list>=FD.beg]:\n p,v = FD.rdrawframe(i)\n w = np.where( v > threshold)[0]\n bad.ravel()[ p[w] ] = 1 \n \n return ~bad\n\n\ndef get_q_iq_using_dynamic_mask( FD, mask, setup_pargs, bin_number=1, threshold=15 ):\n '''DEV by Yugang@CHX, June 6, 2019\n Get circular average of a time series using a dynamics mask, which pixel values are defined as\n zeors if above a threshold. \n Return an averaged q(pix)-Iq-q(A-1) of the whole time series using bin frames with bin_number\n Input:\n FD: the multifile handler for the time series\n mask: a two-d bool type array\n setup_pargs: dict, parameters of setup for calculate q-Iq\n should have keys as \n 'dpix', 'Ldet','lambda_', 'center'\n bin_number: bin number of the frame\n threshold: define the dynamics mask, which pixel values are defined as\n zeors if above this threshold\n Output:\n qp_saxs: q in pixel\n iq_saxs: intenstity\n q_saxs: q in A-1\n '''\n beg = FD.beg\n end = FD.end\n shape = FD.rdframe(beg).shape\n Nimg_ = FD.end-FD.beg \n #Nimg_ = 100\n Nimg = Nimg_//bin_number\n time_edge = np.array(create_time_slice( N= Nimg_, \n slice_num= Nimg, slice_width= bin_number )) + beg \n for n in tqdm( range(Nimg) ): \n t1,t2 = time_edge[n]\n #print(t1,t2)\n if bin_number==1:\n avg_imgi = FD.rdframe(t1)\n else: \n avg_imgi = get_avg_imgc( FD, beg=t1,end=t2, sampling = 1, \n plot_ = False,show_progress= False) \n badpi = find_bad_pixels_FD( np.arange(t1,t2) , FD, \n img_shape = avg_imgi.shape, threshold= threshold, show_progress=False )\n img = avg_imgi* mask * badpi\n qp_saxsi, iq_saxsi, q_saxsi = get_circular_average( img, \n mask * badpi, save= False,\n pargs=setup_pargs )\n #print( img.max())\n if t1==FD.beg:\n qp_saxs, iq_saxs, q_saxs = np.zeros_like( qp_saxsi ), np.zeros_like( iq_saxsi ), np.zeros_like( q_saxsi )\n qp_saxs += qp_saxsi\n iq_saxs += iq_saxsi\n q_saxs += q_saxsi\n qp_saxs /= Nimg\n iq_saxs /= Nimg\n q_saxs /= Nimg\n \n return qp_saxs, iq_saxs, q_saxs\n\ndef get_waxs_beam_center( gamma, origin = [432, 363], Ldet = 1495, pixel_size = 75 * 1e-3 ):\n '''YG Feb 10, 2018\n Calculate beam center for WAXS geometry by giving beam center at gamma=0 and the target gamma\n Input:\n gamma: angle in degree\n Ldet: sample to detector distance, 1495 mm for CHX WAXS\n origin: beam center for gamma = 0, (python x,y coordinate in pixel)\n pxiel size: 75 * 1e-3 mm for Eiger 1M\n output:\n beam center: for the target gamma, in pixel \n '''\n return [ np.int( origin[0] + np.tan( np.radians(gamma)) * Ldet/pixel_size) ,origin[1] ] \n\n\n\ndef get_img_from_iq( qp, iq, img_shape, center):\n '''YG Jan 24, 2018\n Get image from circular average \n Input:\n qp: q in pixel unit\n iq: circular average\n image_shape, e.g., [256,256]\n center: [center_y, center_x] e.g., [120, 200]\n Output:\n img: recovered image\n '''\n pixelist = np.arange( img_shape[0] * img_shape[1] )\n pixely = pixelist%img_shape[1] -center[1] \n pixelx = pixelist//img_shape[1] - center[0] \n r= np.hypot(pixelx, pixely) #leave as float.\n #r= np.int_( np.hypot(pixelx, pixely) +0.5 ) + 0.5 \n return (np.interp( r, qp, iq )).reshape( img_shape )\n\n\ndef average_array_withNan( array, axis=0, mask=None):\n '''YG. Jan 23, 2018\n Average array invovling np.nan along axis \n \n Input:\n array: ND array, actually should be oneD or twoD at this stage..TODOLIST for ND\n axis: the average axis\n mask: bool, same shape as array, if None, will mask all the nan values \n Output:\n avg: averaged array along axis\n '''\n shape = array.shape\n if mask is None:\n mask = np.isnan(array)\n #mask = np.ma.masked_invalid(array).mask \n array_ = np.ma.masked_array(array, mask=mask) \n try:\n sums = np.array( np.ma.sum( array_[:,:], axis= axis ) )\n except:\n sums = np.array( np.ma.sum( array_[:], axis= axis ) )\n \n cts = np.sum(~mask,axis=axis)\n #print(cts)\n return sums/cts\n\ndef deviation_array_withNan( array, axis=0, mask=None):\n '''YG. Jan 23, 2018\n Get the deviation of array invovling np.nan along axis \n \n Input:\n array: ND array\n axis: the average axis\n mask: bool, same shape as array, if None, will mask all the nan values \n Output:\n dev: the deviation of array along axis\n '''\n avg2 = average_array_withNan( array**2, axis = axis, mask = mask )\n avg = average_array_withNan( array, axis = axis, mask = mask )\n return np.sqrt( avg2 - avg**2 )\n\n\n\ndef refine_roi_mask( roi_mask, pixel_num_thres=10):\n '''YG Dev Jan20,2018\n remove bad roi which pixel numbe is lower pixel_num_thres \n roi_mask: array, \n pixel_num_thres: integer, the low limit pixel number in each roi of the combined mask, \n i.e., if the pixel number in one roi of the combined mask smaller than pixel_num_thres,\n that roi will be considered as bad one and be removed. \n ''' \n new_mask = np.zeros_like( roi_mask )\n qind, pixelist = roi.extract_label_indices(roi_mask)\n noqs = len(np.unique(qind))\n nopr = np.bincount(qind, minlength=(noqs+1))[1:] \n good_ind = np.where( nopr >= pixel_num_thres)[0] +1 \n l = len(good_ind) \n new_ind = np.arange( 1, l+1 )\n for i, gi in enumerate( good_ind ):\n new_mask.ravel()[ \n np.where( roi_mask.ravel() == gi)[0] ] = new_ind[i] \n return new_mask, good_ind -1\n\ndef shrink_image_stack( imgs, bins):\n '''shrink imgs by bins\n imgs: shape as [Nimg, imx, imy] '''\n Nimg, imx, imy = imgs.shape\n bx, by = bins\n imgsk = np.zeros( [Nimg, imx//bx, imy//by] )\n N = len(imgs)\n for i in range(N):\n imgsk[i] = shrink_image(imgs[i], bins )\n return imgsk\n \ndef shrink_image(img, bins ):\n '''YG Dec 12, 2017 dev@CHX shrink a two-d image by factor as bins, i.e., bins_x, bins_y\n input:\n img: 2d array, \n bins: integer list, eg. [2,2]\n output:\n imgb: binned img\n '''\n m,n = img.shape\n bx, by = bins\n Nx, Ny = m//bx, n//by\n #print(Nx*bx, Ny*by)\n return img[:Nx*bx, :Ny*by].reshape( Nx,bx, Ny, by).mean(axis=(1,3) )\n \n \ndef get_diff_fv( g2_fit_paras, qval_dict, ang_init=137.2):\n '''YG@CHX Nov 9,2017\n Get flow velocity and diff from g2_fit_paras '''\n g2_fit_para_ = g2_fit_paras.copy()\n qr = np.array( [qval_dict[k][0] for k in sorted( qval_dict.keys())] )\n qang = np.array( [qval_dict[k][1] for k in sorted( qval_dict.keys())] )\n #x=g2_fit_para_.pop( 'relaxation_rate' )\n #x=g2_fit_para_.pop( 'flow_velocity' )\n g2_fit_para_['diff'] = g2_fit_paras[ 'relaxation_rate' ]/qr**2\n cos_part = np.abs( np.cos( np.radians( qang - ang_init)) )\n g2_fit_para_['fv'] = g2_fit_paras[ 'flow_velocity' ]/cos_part/qr\n return g2_fit_para_\n\n\n \n \n# function to get indices of local extrema (=indices of speckle echo maximum amplitudes):\ndef get_echos(dat_arr,min_distance=10):\n \"\"\"\n getting local maxima and minima from 1D data -> e.g. speckle echos\n strategy: using peak_local_max (from skimage) with min_distance parameter to find well defined local maxima\n using np.argmin to find absolute minima between relative maxima\n returns [max_ind,min_ind] -> lists of indices corresponding to local maxima/minima\n by LW 10/23/2018\n \"\"\"\n from skimage.feature import peak_local_max\n max_ind=peak_local_max(dat_arr, min_distance) # !!! careful, skimage function reverses the order (wtf?)\n min_ind=[]\n for i in range(len(max_ind[:-1])):\n min_ind.append(max_ind[i+1][0]+np.argmin(dat_arr[max_ind[i+1][0]:max_ind[i][0]]))\n #unfortunately, skimage function fu$$s up the format: max_ind is an array of a list of lists...fix this:\n mmax_ind=[]\n for l in max_ind:\n mmax_ind.append(l[0])\n #return [mmax_ind,min_ind]\n return [list(reversed(mmax_ind)),list(reversed(min_ind))]\n\n \ndef pad_length(arr,pad_val=np.nan):\n \"\"\"\n arr: 2D matrix\n pad_val: values being padded\n adds pad_val to each row, to make the length of each row equal to the lenght of the longest row of the original matrix\n -> used to convert python generic data object to HDF5 native format\n function fixes python bug in padding (np.pad) integer array with np.nan\n by LW 12/30/2017\n \"\"\"\n max_len=[]\n for i in range(np.shape(arr)[0]):\n #print(np.size(arr[i]))\n max_len.append([np.size(arr[i])])\n #print(max_len)\n max_len=np.max(max_len)\n for l in range(np.shape(arr)[0]):\n arr[l]=np.pad(arr[l]*1.,(0,max_len-np.size(arr[l])),mode='constant',constant_values=pad_val)\n return arr\n\n\n\ndef save_array_to_tiff(array, output, verbose=True):\n '''Y.G. Nov 1, 2017\n Save array to a tif file\n '''\n img = PIL.Image.fromarray(array) \n img.save( output ) \n if verbose:\n print( 'The data is save to: %s.'%( output ))\n \n \n \ndef load_pilatus(filename):\n '''Y.G. Nov 1, 2017\n Load a pilatus 2D image\n ''' \n return np.array( PIL.Image.open(filename).convert('I') )\n \ndef ls_dir(inDir, have_list=[], exclude_list=[] ):\n '''Y.G. Aug 1, 2019\n List all filenames in a filefolder \n inDir: fullpath of the inDir\n have_string: only retrun filename containing the string\n exclude_string: only retrun filename not containing the string \n \n '''\n from os import listdir\n from os.path import isfile, join\n\n tifs = np.array( [f for f in listdir(inDir) if isfile(join(inDir, f))] )\n tifs_ = []\n for tif in tifs:\n flag=1\n for string in have_list: \n if string not in tif:\n flag *=0\n for string in exclude_list:\n if string in tif:\n flag *=0 \n if flag:\n tifs_.append( tif )\n \n return np.array( tifs_ )\n\n\ndef ls_dir2(inDir, string=None):\n '''Y.G. Nov 1, 2017\n List all filenames in a filefolder (not include hidden files and subfolders)\n inDir: fullpath of the inDir\n string: if not None, only retrun filename containing the string\n '''\n from os import listdir\n from os.path import isfile, join\n if string is None:\n tifs = np.array( [f for f in listdir(inDir) if isfile(join(inDir, f))] )\n else:\n tifs = np.array( [f for f in listdir(inDir) if (isfile(join(inDir, f)))&(string in f) ] ) \n return tifs\n \ndef re_filename( old_filename, new_filename, inDir=None, verbose=True ):\n '''Y.G. Nov 28, 2017\n Rename old_filename with new_filename in a inDir \n inDir: fullpath of the inDir, if None, the filename should have the fullpath\n old_filename/ new_filename: string \n an example:\n re_filename( 'uid=run20_pos1_fra_5_20000_tbins=0.010_ms_g2_two_g2.png', \n 'uid=run17_pos1_fra_5_20000_tbins=0.010_ms_g2_two_g2.png',\n '/home/yuzhang/Analysis/Timepix/2017_3/Results/run17/run17_pos1/'\n )\n '''\n if inDir is not None:\n os.rename(inDir + old_filename, inDir+new_filename)\n else:\n os.rename( old_filename, new_filename)\n print('The file: %s is changed to: %s.'%(old_filename, new_filename))\n \n \ndef re_filename_dir( old_pattern, new_pattern, inDir,verbose=True ):\n '''Y.G. Nov 28, 2017\n Rename all filenames with old_pattern with new_pattern in a inDir \n inDir: fullpath of the inDir, if None, the filename should have the fullpath\n old_pattern, new_pattern\n an example,\n re_filename_dir('20_', '17_', inDir )\n '''\n fps = ls_dir(inDir)\n for fp in fps:\n if old_pattern in fp:\n old_filename = fp\n new_filename = fp.replace(old_pattern, new_pattern)\n re_filename( old_filename, new_filename, inDir,verbose= verbose ) \n \ndef get_roi_nr(qdict,q,phi,q_nr=True,phi_nr=False,q_thresh=0, p_thresh=0, silent=True, qprecision=5):\n \"\"\"\n function to return roi number from qval_dict, corresponding Q and phi, lists (sets) of all available Qs and phis\n [roi_nr,Q,phi,Q_list,phi_list]=get_roi_nr(..)\n calling sequence: get_roi_nr(qdict,q,phi,q_nr=True,phi_nr=False, verbose=True)\n qdict: qval_dict from analysis pipeline/hdf5 result file\n q: q of interest, can be either value (q_nr=False) or q-number (q_nr=True)\n q_thresh: threshold for comparing Q-values, set to 0 for exact comparison\n phi: phi of interest, can be either value (phi_nr=False) or q-number (phi_nr=True)\n p_thresh: threshold for comparing phi values, set to 0 for exact comparison\n silent=True/False: Don't/Do print lists of available qs and phis, q and phi of interest\n by LW 10/21/2017\n update by LW 08/22/2018: introduced thresholds for comparison of Q and phi values (before: exact match required)\n update 2019/09/28 add qprecision to get unique Q\n \"\"\"\n qs=[]\n phis=[]\n for i in qdict.keys():\n qs.append(qdict[i][0])\n phis.append(qdict[i][1])\n from collections import OrderedDict\n\n qslist=list(OrderedDict.fromkeys(qs))\n qslist = np.unique( np.round(qslist, qprecision ) )\n phislist=list(OrderedDict.fromkeys(phis))\n qslist=list(np.sort(qslist))\n #print('Q_list: %s'%qslist)\n phislist=list(np.sort(phislist))\n if q_nr:\n qinterest=qslist[q]\n #qindices = [i for i,x in enumerate(qs) if x == qinterest]\n qindices = [i for i,x in enumerate(qs) if np.abs(x-qinterest) < q_thresh]\n #print('q_indicies: ',qindices)\n else: \n qinterest=q\n qindices = [i for i,x in enumerate(qs) if np.abs(x-qinterest) < q_thresh] # new\n if phi_nr:\n phiinterest=phislist[phi]\n phiindices = [i for i,x in enumerate(phis) if x == phiinterest]\n else:\n phiinterest=phi\n phiindices = [i for i,x in enumerate(phis) if np.abs(x-phiinterest) < p_thresh] # new\n #print('phi: %s phi_index: %s'%(phiinterest,phiindices))\n #qindices = [i for i,x in enumerate(qs) if x == qinterest]\n #phiindices = [i for i,x in enumerate(phis) if x == phiinterest]\n ret_list=[list(set(qindices).intersection(phiindices))[0],qinterest,phiinterest,qslist,phislist]\n if silent == False:\n print('list of available Qs:')\n print(qslist)\n print('list of available phis:')\n print(phislist)\n print('Roi number for Q= '+str(ret_list[1])+' and phi= '+str(ret_list[2])+': '+str(ret_list[0]))\n return ret_list\n \ndef get_fit_by_two_linear(x,y, mid_xpoint1, mid_xpoint2=None, xrange=None, ):\n '''YG Octo 16,2017 Fit a curve with two linear func, the curve is splitted by mid_xpoint, \n namely, fit the curve in two regions defined by (xmin,mid_xpoint ) and (mid_xpoint2, xmax) \n Input:\n x: 1D np.array\n y: 1D np.array\n mid_xpoint: float, the middle point of x \n xrange: [x1,x2]\n Return:\n D1, gmfit1, D2, gmfit2 :\n fit parameter (slope, background) of linear fit1\n convinent fit class, gmfit1(x) gives yvale\n fit parameter (slope, background) of linear fit2\n convinent fit class, gmfit2(x) gives yvale \n \n '''\n if xrange is None:\n x1,x2 = min(x), max(x)\n x1,x2=xrange \n if mid_xpoint2 is None:\n mid_xpoint2= mid_xpoint1\n D1, gmfit1 = linear_fit( x,y, xrange= [ x1,mid_xpoint1 ]) \n D2, gmfit2 = linear_fit( x,y, xrange= [mid_xpoint2, x2 ])\n return D1, gmfit1, D2, gmfit2 \n\ndef get_cross_point( x, gmfit1, gmfit2 ): \n '''YG Octo 16,2017 \n Get croess point of two curve\n ''' \n y1 = gmfit1(x)\n y2 = gmfit2(x)\n return x[np.argmin( np.abs(y1-y2) )]\n \ndef get_curve_turning_points( x, y, mid_xpoint1, mid_xpoint2=None, xrange=None, ):\n '''YG Octo 16,2017 \n Get a turning point of a curve by doing a two-linear fit\n ''' \n D1, gmfit1, D2, gmfit2 = get_fit_by_two_linear(x,y, mid_xpoint1, mid_xpoint2, xrange ) \n return get_cross_point( x, gmfit1, gmfit2 )\n \n \ndef plot_fit_two_linear_fit(x,y, gmfit1, gmfit2, ax=None ):\n '''YG Octo 16,2017 Plot data with two fitted linear func \n ''' \n if ax is None:\n fig, ax =plt.subplots() \n plot1D( x = x, y = y, ax =ax, c='k', legend='data', m='o', ls='')#logx=True, logy=True )\n plot1D( x = x, y = gmfit1(x), ax =ax, c='r', m='', ls='-',legend='fit1' )\n plot1D( x = x, y = gmfit2(x), ax =ax, c='b', m='', ls='-',legend='fit2' )\n return ax \n \n\ndef linear_fit( x,y, xrange=None):\n '''YG Octo 16,2017 copied from XPCS_SAXS\n a linear fit\n '''\n if xrange is not None:\n xmin, xmax = xrange\n x1,x2 = find_index( x,xmin,tolerance= None),find_index( x,xmax,tolerance= None)\n x_ = x[x1:x2]\n y_ = y[x1:x2] \n else:\n x_=x\n y_=y\n D0 = np.polyfit(x_, y_, 1)\n gmfit = np.poly1d(D0) \n return D0, gmfit\n\n\ndef find_index( x,x0,tolerance= None):\n '''YG Octo 16,2017 copied from SAXS\n find index of x0 in x\n #find the position of P in a list (plist) with tolerance\n '''\n \n N=len(x)\n i=0\n if x0 > max(x):\n position= len(x) -1\n elif x0<min(x):\n position=0\n else:\n position = np.argmin( np.abs( x - x0 ) ) \n return position\n\ndef find_index_old( x,x0,tolerance= None):\n '''YG Octo 16,2017 copied from SAXS\n find index of x0 in x\n #find the position of P in a list (plist) with tolerance\n '''\n \n \n N=len(x)\n i=0\n position=None\n if tolerance==None:\n tolerance = (x[1]-x[0])/2.\n if x0 > max(x):\n position= len(x) -1\n elif x0<min(x):\n position=0\n else:\n for item in x:\n if abs(item-x0)<=tolerance:\n position=i\n #print 'Found Index!!!'\n break\n i+=1\n \n \n return position\n\n\n\ndef sgolay2d( z, window_size, order, derivative=None):\n \"\"\"YG Octo 16, 2017\n Modified from http://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html\n Procedure for sg2D:\n https://en.wikipedia.org/wiki/Savitzky%E2%80%93Golay_filter#Two-dimensional_convolution_coefficients\n \n Two-dimensional smoothing and differentiation can also be applied to tables of data values, such as intensity \n values in a photographic image which is composed of a rectangular grid of pixels.[16] [17] The trick is to transform \n part of the table into a row by a simple ordering of the indices of the pixels. Whereas the one-dimensional filter \n coefficients are found by fitting a polynomial in the subsidiary variable, z to a set of m data points, the \n two-dimensional coefficients are found by fitting a polynomial in subsidiary variables v and w to a set of m x m \n data points. The following example, for a bicubic polynomial and m = 5, illustrates the process, which parallels the \n process for the one dimensional case, above.[18]\n\n The square of 25 data values, d1 - d25\n becomes a vector when the rows are placed one after another.\n The Jacobian has 10 columns, one for each of the parameters a00 - a03 and 25 rows, one for each pair of v and w values.\n The convolution coefficients are calculated as\n The first row of C contains 25 convolution coefficients which can be multiplied with the 25 data values to provide a\n smoothed value for the central data point (13) of the 25.\n\n \"\"\"\n # number of terms in the polynomial expression\n n_terms = ( order + 1 ) * ( order + 2) / 2.0\n\n if window_size % 2 == 0:\n raise ValueError('window_size must be odd')\n\n if window_size**2 < n_terms:\n raise ValueError('order is too high for the window size')\n\n half_size = window_size // 2\n\n # exponents of the polynomial. \n # p(x,y) = a0 + a1*x + a2*y + a3*x^2 + a4*y^2 + a5*x*y + ... \n # this line gives a list of two item tuple. Each tuple contains \n # the exponents of the k-th term. First element of tuple is for x\n # second element for y.\n # Ex. exps = [(0,0), (1,0), (0,1), (2,0), (1,1), (0,2), ...]\n exps = [ (k-n, n) for k in range(order+1) for n in range(k+1) ]\n\n # coordinates of points\n ind = np.arange(-half_size, half_size+1, dtype=np.float64)\n dx = np.repeat( ind, window_size )\n dy = np.tile( ind, [window_size, 1]).reshape(window_size**2, )\n\n # build matrix of system of equation\n A = np.empty( (window_size**2, len(exps)) )\n for i, exp in enumerate( exps ):\n A[:,i] = (dx**exp[0]) * (dy**exp[1])\n\n # pad input array with appropriate values at the four borders\n new_shape = z.shape[0] + 2*half_size, z.shape[1] + 2*half_size\n Z = np.zeros( (new_shape) )\n # top band\n band = z[0, :]\n Z[:half_size, half_size:-half_size] = band - np.abs( np.flipud( z[1:half_size+1, :] ) - band )\n # bottom band\n band = z[-1, :]\n Z[-half_size:, half_size:-half_size] = band + np.abs( np.flipud( z[-half_size-1:-1, :] ) -band )\n # left band\n band = np.tile( z[:,0].reshape(-1,1), [1,half_size])\n Z[half_size:-half_size, :half_size] = band - np.abs( np.fliplr( z[:, 1:half_size+1] ) - band )\n # right band\n band = np.tile( z[:,-1].reshape(-1,1), [1,half_size] )\n Z[half_size:-half_size, -half_size:] = band + np.abs( np.fliplr( z[:, -half_size-1:-1] ) - band )\n # central band\n Z[half_size:-half_size, half_size:-half_size] = z\n\n # top left corner\n band = z[0,0]\n Z[:half_size,:half_size] = band - np.abs( np.flipud(np.fliplr(z[1:half_size+1,1:half_size+1]) ) - band )\n # bottom right corner\n band = z[-1,-1]\n Z[-half_size:,-half_size:] = band + np.abs( np.flipud(np.fliplr(z[-half_size-1:-1,-half_size-1:-1]) ) - band )\n\n # top right corner\n band = Z[half_size,-half_size:]\n Z[:half_size,-half_size:] = band - np.abs( np.flipud(Z[half_size+1:2*half_size+1,-half_size:]) - band )\n # bottom left corner\n band = Z[-half_size:,half_size].reshape(-1,1)\n Z[-half_size:,:half_size] = band - np.abs( np.fliplr(Z[-half_size:, half_size+1:2*half_size+1]) - band )\n\n # solve system and convolve\n if derivative == None:\n m = np.linalg.pinv(A)[0].reshape((window_size, -1))\n return scipy.signal.fftconvolve(Z, m, mode='valid')\n elif derivative == 'col':\n c = np.linalg.pinv(A)[1].reshape((window_size, -1))\n return scipy.signal.fftconvolve(Z, -c, mode='valid')\n elif derivative == 'row':\n r = np.linalg.pinv(A)[2].reshape((window_size, -1))\n return scipy.signal.fftconvolve(Z, -r, mode='valid')\n elif derivative == 'both':\n c = np.linalg.pinv(A)[1].reshape((window_size, -1))\n r = np.linalg.pinv(A)[2].reshape((window_size, -1))\n return scipy.signal.fftconvolve(Z, -r, mode='valid'), scipy.signal.fftconvolve(Z, -c, mode='valid')\n \n \ndef load_filelines( fullpath ):\n '''YG Develop March 10, 2018 \n Load all content from a file \n basepath, fname = os.path.split(os.path.abspath( fullpath )) \n Input:\n fullpath: str, full path of the file \n Return:\n list: str\n ''' \n with open( fullpath, 'r' ) as fin:\n p=fin.readlines() \n return p\n\n\n \ndef extract_data_from_file( filename, filepath, good_line_pattern=None, start_row=None, good_cols=None, labels=None,):\n '''YG Develop Octo 17, 2017 \n Add start_row option at March 5, 2018 \n \n Extract data from a file\n Input:\n filename: str, filename of the data\n filepath: str, path of the data\n good_line_pattern: str, data will be extract below this good_line_pattern\n Or giving start_row: int\n good_cols: list of integer, good index of cols\n lables: the label of the good_cols\n #save: False, if True will save the data into a csv file with filename appending csv ??\n Return:\n a pds.dataframe\n Example:\n filepath = '/XF11ID/analysis/2017_3/lwiegart/Link_files/Exports/'\n filename = 'ANPES2 15-10-17 16-31-11-84Exported.txt' \n good_cols = [ 1,2,4,6,8,10 ]\n labels = [ 'time', 'temperature', 'force', 'distance', 'stress', 'strain' ]\n good_line_pattern = \"Index\\tX\\tY\\tX\\tY\\tX\\tY\" \n df = extract_data_from_file( filename, filepath, good_line_pattern, good_cols, labels)\n '''\n import pandas as pds\n with open( filepath + filename, 'r' ) as fin:\n p=fin.readlines()\n di = 1e20 \n for i, line in enumerate(p):\n if start_row is not None:\n di = start_row\n elif good_line_pattern is not None:\n if good_line_pattern in line: \n di = i\n else:\n di = 0 \n if i == di+1:\n els = line.split() \n if good_cols is None:\n data = np.array( els, dtype=float )\n else:\n data = np.array( [els[j] for j in good_cols], dtype=float )\n elif i > di:\n try: \n els = line.split() \n if good_cols is None:\n temp = np.array( els, dtype=float )\n else:\n temp= np.array( [els[j] for j in good_cols], dtype=float ) \n data=np.vstack( (data,temp))\n except:\n pass\n if labels is None:\n labels = np.arange(data.shape[1])\n df = pds.DataFrame( data, index= np.arange(data.shape[0]), columns= labels ) \n return df\n \n \n \ndef get_print_uids( start_time, stop_time, return_all_info=False):\n '''Update Feb 20, 2018 also return full uids\n YG. Octo 3, 2017@CHX\n Get full uids and print uid plus Measurement contents by giving start_time, stop_time\n \n ''' \n hdrs = list( db(start_time= start_time, stop_time = stop_time) )\n fuids = np.zeros( len(hdrs),dtype=object)\n uids = np.zeros( len(hdrs),dtype=object)\n sids = np.zeros( len(hdrs), dtype=object)\n n=0\n all_info = np.zeros( len(hdrs), dtype=object)\n for i in range(len(hdrs)):\n fuid = hdrs[-i-1]['start']['uid'] #reverse order\n uid = fuid[:6] #reverse order\n sid = hdrs[-i-1]['start']['scan_id']\n fuids[n]=fuid\n uids[n]=uid\n sids[n]=sid \n date = time.ctime(hdrs[-i-1]['start']['time'])\n try:\n m = hdrs[-i-1]['start']['Measurement']\n except:\n m=''\n info = \"%3d: uid = '%s' ##%s #%s: %s-- %s \"%(i,uid,date,sid,m, fuid)\n print( info )\n if return_all_info:\n all_info[n]=info\n n +=1 \n if not return_all_info:\n return fuids, uids, sids \n else: \n return fuids, uids, sids, all_info \n \n \n \ndef get_last_uids( n=-1 ):\n '''YG Sep 26, 2017\n A Convinient function to copy uid to jupyter for analysis'''\n uid = db[n]['start']['uid'][:8]\n sid = db[n]['start']['scan_id']\n m = db[n]['start']['Measurement']\n return \" uid = '%s' #(scan num: %s (Measurement: %s \"%(uid,sid,m) \n\n\n \ndef get_base_all_filenames( inDir, base_filename_cut_length = -7 ):\n '''YG Sep 26, 2017\n Get base filenames and their related all filenames\n Input:\n inDir, str, input data dir\n base_filename_cut_length: to which length the base name is unique\n Output:\n dict: keys, base filename\n vales, all realted filename\n '''\n from os import listdir\n from os.path import isfile, join\n tifs = np.array( [f for f in listdir(inDir) if isfile(join(inDir, f))] )\n tifsc = list(tifs.copy()) \n utifs = np.sort( np.unique( np.array([ f[:base_filename_cut_length] for f in tifs] ) ) )[::-1]\n files = {}\n for uf in utifs: \n files[uf] = []\n i = 0\n reName = []\n for i in range(len(tifsc)): \n if uf in tifsc[i]: \n files[uf].append( tifsc[i] ) \n reName.append(tifsc[i])\n for fn in reName:\n tifsc.remove(fn)\n return files\n\n \ndef create_ring_mask( shape, r1, r2, center, mask=None):\n '''YG. Sep 20, 2017 Develop@CHX \n Create 2D ring mask\n input:\n shape: two integer number list, mask shape, e.g., [100,100]\n r1: the inner radius\n r2: the outer radius\n center: two integer number list, [cx,cy], ring center, e.g., [30,50]\n output:\n 2D numpy array, 0,1 type\n '''\n\n m = np.zeros( shape, dtype= bool) \n rr,cc = circle( center[1], center[0], r2, shape=shape )\n m[rr,cc] = 1\n rr,cc = circle( center[1], center[0], r1,shape=shape )\n m[rr,cc] = 0 \n if mask is not None:\n m += mask\n return m\n\ndef get_image_edge(img):\n '''\n Y.G. Developed at Sep 8, 2017 @CHX\n Get sharp edges of an image\n img: two-D array, e.g., a roi mask\n '''\n edg_ = prewitt(img/1.0)\n edg = np.zeros_like(edg_)\n w = np.where(edg_ > 1e-10)\n edg[w] = img[w]\n edg[np.where(edg==0)] = 1\n return edg \n\ndef get_image_with_roi( img, roi_mask, scale_factor = 2):\n '''\n Y.G. Developed at Sep 8, 2017 @CHX\n Get image with edges of roi_mask by doing\n i) get edges of roi_mask by function get_image_edge\n ii) scale img at region of interest (ROI) by scale_factor\n img: two-D array for image\n roi_mask: two-D array for ROI\n scale_factor: scaling factor of ROI in image\n '''\n edg = get_image_edge( roi_mask )\n img_ = img.copy()\n w = np.where(roi_mask)\n img_[w] = img[w] * scale_factor\n return img_ * edg \n\n\n\n\n\ndef get_today_date( ):\n from time import gmtime, strftime\n return strftime(\"%m-%d-%Y\", gmtime() )\n\n\ndef move_beamstop( mask, xshift, yshift ):\n '''Y.G. Developed at July 18, 2017 @CHX\n Create new mask by shift the old one with xshift, yshift\n Input\n ---\n mask: 2D numpy array, 0 for bad pixels, 1 for good pixels\n xshift, integer, shift value along x direction\n yshift, integer, shift value along y direction\n \n Output\n ---\n mask, 2D numpy array,\n '''\n m = np.ones_like(mask)\n W,H = mask.shape\n w = np.where(mask==0)\n nx, ny = w[0]+ int(yshift), w[1]+ int(xshift )\n gw = np.where( (nx >= 0) & (nx<W) & (ny >= 0) & (ny<H) )\n nx = nx[ gw ]\n ny = ny[ gw ] \n m[ nx,ny ] = 0\n return m\n\n\n\ndef validate_uid(uid):\n '''check uid whether be able to load data'''\n try:\n sud = get_sid_filenames(db[uid])\n print(sud)\n md = get_meta_data( uid )\n imgs = load_data( uid, md['detector'], reverse= True )\n print(imgs)\n return 1\n except:\n print(\"Can't load this uid=%s!\"%uid)\n return 0\n\ndef validate_uid_dict( uid_dict ):\n ''' Y.G. developed July 17, 2017 @CHX\n Check each uid in a dict can load data or not\n uids: dict, val: meaningful decription, key: a list of uids \n \n ''' \n badn = 0\n badlist=[]\n for k in list(uids.keys()):\n for uid in uids[k]: \n flag = validate_uid(uid)\n if not flag:\n badn += 1\n badlist.append( uid )\n print( 'There are %s bad uids:%s in this uid_dict.'%(badn, badlist))\n\ndef get_mass_center_one_roi(FD, roi_mask, roi_ind):\n '''Get the mass center (in pixel unit) of one roi in a time series FD\n FD: handler for a compressed time series\n roi_mask: the roi array\n roi_ind: the interest index of the roi \n \n '''\n import scipy\n m = (roi_mask == roi_ind)\n cx, cy = np.zeros( int( ( FD.end - FD.beg)/1 ) ), np.zeros( int( ( FD.end - FD.beg)/1 ) ) \n n =0 \n for i in tqdm(range( FD.beg, FD.end, 1 ), desc= 'Get mass center of one ROI of each frame' ):\n img = FD.rdframe(i) * m\n c = scipy.ndimage.measurements.center_of_mass(img)\n cx[n], cy[n] = int(c[0]), int(c[1])\n n +=1\n return cx,cy\n\n \n\n\ndef get_current_pipeline_filename(NOTEBOOK_FULL_PATH):\n '''Y.G. April 25, 2017\n Get the current running pipeline filename and path \n Assume the piple is located in /XF11ID/\n Return, path and filename\n '''\n from IPython.core.magics.display import Javascript\n if False:\n Javascript( '''\n var nb = IPython.notebook;\n var kernel = IPython.notebook.kernel;\n var command = \"NOTEBOOK_FULL_PATH = '\" + nb.base_url + nb.notebook_path + \"'\";\n kernel.execute(command);\n ''' ) \n print(NOTEBOOK_FULL_PATH)\n filename = NOTEBOOK_FULL_PATH.split('/')[-1]\n path = '/XF11ID/'\n for s in NOTEBOOK_FULL_PATH.split('/')[3:-1]:\n path += s + '/'\n return path, filename \n \ndef get_current_pipeline_fullpath(NOTEBOOK_FULL_PATH):\n '''Y.G. April 25, 2017\n Get the current running pipeline full filepath \n Assume the piple is located in /XF11ID/\n Return, the fullpath (path + filename)\n ''' \n p,f = get_current_pipeline_filename(NOTEBOOK_FULL_PATH)\n return p + f\n \ndef save_current_pipeline(NOTEBOOK_FULL_PATH, outDir):\n '''Y.G. April 25, 2017\n Save the current running pipeline to outDir\n The save pipeline should be the snapshot of the current state. \n '''\n\n import shutil\n path, fp = get_current_pipeline_filename(NOTEBOOK_FULL_PATH)\n shutil.copyfile( path + fp, outDir + fp ) \n \n print('This pipeline: %s is saved in %s.'%(fp, outDir))\n \n \n\ndef plot_g1( taus, g2, g2_fit_paras, qr=None, ylim=[0,1], title=''):\n '''Dev Apr 19, 2017,\n Plot one-time correlation, giving taus, g2, g2_fit'''\n noqs = g2.shape[1]\n fig,ax=plt.subplots()\n if qr is None:\n qr = np.arange(noqs)\n for i in range(noqs):\n b = g2_fit_paras['baseline'][i]\n beta = g2_fit_paras['beta'][i]\n y= np.sqrt( np.abs(g2[1:,i] - b)/beta )\n plot1D( x = taus[1:], y= y, ax=ax, legend= 'q=%s'%qr[i], ls='-', lw=2, \n m=markers[i], c= colors[i], title=title, ylim=ylim,\n logx=True, legend_size= 8 )\n ax.set_ylabel( r\"$g_1$\" + '(' + r'$\\tau$' + ')' ) \n ax.set_xlabel(r\"$\\tau $ $(s)$\", fontsize=16) \n return ax \n\n\n\ndef filter_roi_mask( filter_dict, roi_mask, avg_img, filter_type= 'ylim' ):\n '''Remove bad pixels in roi_mask. The bad pixel is defined by the filter_dict, \n if filter_type ='ylim', the filter_dict wit key as q and each value gives a high and low limit thresholds. The value of the pixels in avg_img above or below the limit are considered as bad pixels.\n if filter_type='badpix': the filter_dict wit key as q and each value gives a list of bad pixel.\n \n avg_img, the averaged image\n roi_mask: two-d array, the same shape as image, the roi mask, value is integer, e.g., 1 ,2 ,...\n filter_dict: keys, as roi_mask integer, value, by default is [None,None], is the limit,\n example, {2:[4,5], 10:[0.1,1.1]}\n NOTE: first q = 1 (not 0)\n '''\n rm = roi_mask.copy()\n rf = np.ravel(rm) \n for k in list(filter_dict.keys()): \n pixel = roi.roi_pixel_values(avg_img, roi_mask, [k] )[0][0]\n #print( np.max(pixel), np.min(pixel) )\n if filter_type == 'ylim':\n xmin,xmax = filter_dict[k]\n badp =np.where( (pixel>= xmax) | ( pixel <= xmin) )[0]\n else:\n badp = filter_dict[k]\n if len(badp)!=0: \n pls = np.where([rf==k])[1]\n rf[ pls[badp] ] = 0 \n return rm\n\n \n##\n#Dev at March 31 for create Eiger chip mask\ndef create_chip_edges_mask( det='1M' ):\n ''' Create a chip edge mask for Eiger detector\n \n '''\n if det == '1M':\n shape = [1065, 1030]\n w = 4\n mask = np.ones( shape , dtype = np.int32)\n cx = [ 1030//4 *i for i in range(1,4) ]\n #cy = [ 1065//4 *i for i in range(1,4) ]\n cy = [808, 257 ]\n #print (cx, cy )\n for c in cx:\n mask[:, c-w//2:c+w//2 ] = 0 \n for c in cy:\n mask[ c-w//2:c+w//2, : ] = 0 \n \n return mask\n \ndef create_ellipse_donut( cx, cy , wx_inner, wy_inner, wx_outer, wy_outer, roi_mask, gap=0):\n Nmax = np.max( np.unique( roi_mask ) )\n rr1, cc1 = ellipse( cy,cx, wy_inner, wx_inner ) \n rr2, cc2 = ellipse( cy, cx, wy_inner + gap, wx_inner +gap ) \n rr3, cc3 = ellipse( cy, cx, wy_outer,wx_outer ) \n roi_mask[rr3,cc3] = 2 + Nmax\n roi_mask[rr2,cc2] = 0\n roi_mask[rr1,cc1] = 1 + Nmax\n return roi_mask\n \ndef create_box( cx, cy, wx, wy, roi_mask):\n Nmax = np.max( np.unique( roi_mask ) )\n for i, [cx_,cy_] in enumerate(list( zip( cx,cy ))): #create boxes\n x = np.array( [ cx_-wx, cx_+wx, cx_+wx, cx_-wx]) \n y = np.array( [ cy_-wy, cy_-wy, cy_+wy, cy_+wy])\n rr, cc = polygon( y,x) \n roi_mask[rr,cc] = i +1 + Nmax\n return roi_mask\n\n\n\n\ndef create_folder( base_folder, sub_folder ):\n '''\n Crate a subfolder under base folder\n Input:\n base_folder: full path of the base folder\n sub_folder: sub folder name to be created \n Return:\n Created full path of the created folder\n '''\n\n data_dir0 = os.path.join( base_folder, sub_folder )\n ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/'\n os.makedirs(data_dir0, exist_ok=True)\n print('Results from this analysis will be stashed in the directory %s' % data_dir0) \n return data_dir0 \n\n\n\n \n \ndef create_user_folder( CYCLE, username=None, default_dir= '/XF11ID/analysis/' ):\n '''\n Crate a folder for saving user data analysis result\n Input:\n CYCLE: run cycle\n username: if None, get username from the jupyter username\n Return:\n Created folder name\n '''\n if username !='Default':\n if username is None:\n username = getpass.getuser() \n data_dir0 = os.path.join(default_dir, CYCLE, username, 'Results/')\n else:\n data_dir0 = os.path.join(default_dir, CYCLE +'/')\n ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/'\n os.makedirs(data_dir0, exist_ok=True)\n print('Results from this analysis will be stashed in the directory %s' % data_dir0) \n return data_dir0 \n \n \n \n \n \n \n##################################\n#########For dose analysis #######\n##################################\ndef get_fra_num_by_dose( exp_dose, exp_time, att=1, dead_time =2 ):\n '''\n Calculate the frame number to be correlated by giving a X-ray exposure dose\n \n Paramters:\n exp_dose: a list, the exposed dose, e.g., in unit of exp_time(ms)*N(fram num)*att( attenuation)\n exp_time: float, the exposure time for a xpcs time sereies\n dead_time: dead time for the fast shutter reponse time, CHX = 2ms\n Return:\n noframes: the frame number to be correlated, exp_dose/( exp_time + dead_time ) \n e.g.,\n \n no_dose_fra = get_fra_num_by_dose( exp_dose = [ 3.34* 20, 3.34*50, 3.34*100, 3.34*502, 3.34*505 ],\n exp_time = 1.34, dead_time = 2)\n \n --> no_dose_fra will be array([ 20, 50, 100, 502, 504]) \n '''\n return np.int_( np.array( exp_dose )/( exp_time + dead_time)/ att )\n\n\ndef get_multi_tau_lag_steps( fra_max, num_bufs = 8 ):\n '''\n Get taus in log steps ( a multi-taus defined taus ) for a time series with max frame number as fra_max\n Parameters:\n fra_max: integer, the maximun frame number \n buf_num (default=8), \n Return:\n taus_in_log, a list \n \n e.g., \n get_multi_tau_lag_steps( 20, 8 ) --> array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16])\n \n ''' \n num_levels = int(np.log( fra_max/(num_bufs-1))/np.log(2) +1) +1\n tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs) \n return lag_steps[lag_steps < fra_max]\n \n\n\ndef get_series_g2_taus( fra_max_list, acq_time=1, max_fra_num=None, log_taus = True, \n num_bufs = 8):\n '''\n Get taus for dose dependent analysis\n Parameters:\n fra_max_list: a list, a lsit of largest available frame number \n acq_time: acquistion time for each frame\n log_taus: if true, will use the multi-tau defined taus bu using buf_num (default=8),\n otherwise, use deltau =1 \n Return:\n tausd, a dict, with keys as taus_max_list items \n e.g., \n get_series_g2_taus( fra_max_list=[20,30,40], acq_time=1, max_fra_num=None, log_taus = True, num_bufs = 8)\n --> \n {20: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16]),\n 30: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28]),\n 40: array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32])\n }\n \n '''\n tausd = {}\n for n in fra_max_list:\n if max_fra_num is not None:\n L = max_fra_num\n else:\n L = np.infty \n if n>L:\n warnings.warn(\"Warning: the dose value is too large, and please\" \n \"check the maxium dose in this data set and give a smaller dose value.\"\n \"We will use the maxium dose of the data.\") \n n = L \n if log_taus:\n lag_steps = get_multi_tau_lag_steps(n, num_bufs)\n else:\n lag_steps = np.arange( n )\n tausd[n] = lag_steps * acq_time\n return tausd\n\n\n\n\ndef check_lost_metadata(md, Nimg=None, inc_x0 =None, inc_y0= None, pixelsize=7.5*10*(-5) ):\n '''Y.G. Dec 31, 2016, check lost metadata\n \n Parameter:\n md: dict, meta data dictionay\n Nimg: number of frames for this uid metadata\n inc_x0/y0: incident beam center x0/y0, if None, will over-write the md['beam_center_x/y']\n pixelsize: if md don't have ['x_pixel_size'], the pixelsize will add it\n Return:\n dpix: pixelsize, in mm\n lambda_: wavelegth of the X-rays in Angstroms\n exposuretime: exposure time in sec\n timeperframe: acquisition time is sec \n center: list, [x,y], incident beam center in pixel\n Will also update md \n '''\n mdn = md.copy()\n if 'number of images' not in list(md.keys()):\n md['number of images'] = Nimg\n if 'x_pixel_size' not in list(md.keys()):\n md['x_pixel_size'] = 7.5000004e-05\n dpix = md['x_pixel_size'] * 1000. #in mm, eiger 4m is 0.075 mm\n try:\n lambda_ =md['wavelength']\n except:\n lambda_ =md['incident_wavelength'] # wavelegth of the X-rays in Angstroms \n try:\n Ldet = md['det_distance']\n if Ldet<=1000:\n Ldet *=1000\n md['det_distance'] = Ldet\n except:\n Ldet = md['detector_distance']\n if Ldet<=1000:\n Ldet *=1000\n md['detector_distance'] = Ldet \n\n \n try:#try exp time from detector\n exposuretime= md['count_time'] #exposure time in sec \n except: \n exposuretime= md['cam_acquire_time'] #exposure time in sec\n try:#try acq time from detector\n acquisition_period = md['frame_time'] \n except:\n try:\n acquisition_period = md['acquire period']\n except: \n uid = md['uid']\n acquisition_period = float( db[uid]['start']['acquire period'] )\n timeperframe = acquisition_period \n if inc_x0 is not None:\n mdn['beam_center_x']= inc_y0\n print( 'Beam_center_x has been changed to %s. (no change in raw metadata): '%inc_y0)\n if inc_y0 is not None:\n mdn['beam_center_y']= inc_x0 \n print( 'Beam_center_y has been changed to %s. (no change in raw metadata): '%inc_x0)\n center = [ int(mdn['beam_center_x']),int( mdn['beam_center_y'] ) ] #beam center [y,x] for python image\n center=[center[1], center[0]]\n \n return dpix, lambda_, Ldet, exposuretime, timeperframe, center\n\n\ndef combine_images( filenames, outputfile, outsize=(2000, 2400)):\n '''Y.G. Dec 31, 2016\n Combine images together to one image using PIL.Image\n Input:\n filenames: list, the images names to be combined\n outputfile: str, the filename to generate\n outsize: the combined image size\n Output:\n save a combined image file\n '''\n N = len( filenames)\n #nx = np.int( np.ceil( np.sqrt(N)) )\n #ny = np.int( np.ceil( N / float(nx) ) )\n\n ny = np.int( np.ceil( np.sqrt(N)) )\n nx = np.int( np.ceil( N / float(ny) ) )\n \n #print(nx,ny)\n result = Image.new(\"RGB\", outsize, color=(255,255,255,0)) \n basewidth = int( outsize[0]/nx ) \n hsize = int( outsize[1]/ny ) \n for index, file in enumerate(filenames):\n path = os.path.expanduser(file)\n img = Image.open(path)\n bands = img.split()\n ratio = img.size[1]/ img.size[0] #h/w \n if hsize > basewidth * ratio:\n basewidth_ = basewidth \n hsize_ = int( basewidth * ratio )\n else:\n basewidth_ = int( hsize/ratio )\n hsize_ = hsize \n #print( index, file, basewidth, hsize )\n size = (basewidth_,hsize_)\n bands = [b.resize(size, Image.LINEAR) for b in bands]\n img = Image.merge('RGBA', bands) \n x = index % nx * basewidth\n y = index // nx * hsize\n w, h = img.size\n #print('pos {0},{1} size {2},{3}'.format(x, y, w, h))\n result.paste(img, (x, y, x + w, y + h ))\n result.save( outputfile,quality=100, optimize=True )\n print( 'The combined image is saved as: %s'%outputfile)\n \n\ndef get_qval_dict( qr_center, qz_center=None, qval_dict = None, multi_qr_for_one_qz= True,\n one_qz_multi_qr = True):\n '''Y.G. Dec 27, 2016\n Map the roi label array with qr or (qr,qz) or (q//, q|-) values\n Parameters:\n qr_center: list, a list of qr\n qz_center: list, a list of qz, \n multi_qr_for_one_qz: by default=True, \n if one_qz_multi_qr: \n one qz_center corresponds to all qr_center, in other words, there are totally, len(qr_center)* len(qz) qs\n else:\n one qr_center corresponds to all qz_center, \n else: one qr with one qz\n qval_dict: if not None, will append the new dict to the qval_dict\n Return:\n qval_dict, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-)\n \n '''\n \n if qval_dict is None:\n qval_dict = {}\n maxN = 0\n else:\n maxN = np.max( list( qval_dict.keys() ) ) +1\n \n if qz_center is not None:\n if multi_qr_for_one_qz:\n if one_qz_multi_qr:\n for qzind in range( len( qz_center)):\n for qrind in range( len( qr_center)): \n qval_dict[ maxN + qzind* len( qr_center) + qrind ] = np.array( [qr_center[qrind], qz_center[qzind] ] )\n else:\n for qrind in range( len( qr_center)):\n for qzind in range( len( qz_center)): \n qval_dict[ maxN + qrind* len( qz_center) + qzind ] = np.array( [qr_center[qrind], qz_center[qzind] ] )\n \n \n else:\n for i, [qr, qz] in enumerate(zip( qr_center, qz_center)): \n qval_dict[ maxN + i ] = np.array( [ qr, qz ] ) \n else:\n for qrind in range( len( qr_center)): \n qval_dict[ maxN + qrind ] = np.array( [ qr_center[qrind] ] ) \n return qval_dict \n\n\ndef update_qval_dict( qval_dict1, qval_dict2 ):\n ''' Y.G. Dec 31, 2016\n Update qval_dict1 with qval_dict2\n Input:\n qval_dict1, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-)\n qval_dict2, a dict, each key (a integer) with value as qr or (qr,qz) or (q//, q|-)\n Output:\n qval_dict, a dict, with the same key as dict1, and all key in dict2 but which key plus max(dict1.keys())\n '''\n maxN = np.max( list( qval_dict1.keys() ) ) +1\n qval_dict = {}\n qval_dict.update( qval_dict1 )\n for k in list( qval_dict2.keys() ):\n qval_dict[k + maxN ] = qval_dict2[k]\n return qval_dict\n\ndef update_roi_mask( roi_mask1, roi_mask2 ):\n ''' Y.G. Dec 31, 2016\n Update qval_dict1 with qval_dict2\n Input:\n roi_mask1, 2d-array, label array, same shape as xpcs frame, \n roi_mask2, 2d-array, label array, same shape as xpcs frame,\n Output:\n roi_mask, 2d-array, label array, same shape as xpcs frame, update roi_mask1 with roi_mask2\n '''\n roi_mask = roi_mask1.copy()\n w= np.where( roi_mask2 ) \n roi_mask[w] = roi_mask2[w] + np.max( roi_mask ) \n return roi_mask\n\n\ndef check_bad_uids(uids, mask, img_choice_N = 10, bad_uids_index = None ):\n '''Y.G. Dec 22, 2016\n Find bad uids by checking the average intensity by a selection of the number img_choice_N of frames for the uid. If the average intensity is zeros, the uid will be considered as bad uid.\n Parameters:\n uids: list, a list of uid\n mask: array, bool type numpy.array\n img_choice_N: random select number of the uid\n bad_uids_index: a list of known bad uid list, default is None\n Return:\n guids: list, good uids\n buids, list, bad uids \n ''' \n import random\n buids = []\n guids = list( uids )\n #print( guids )\n if bad_uids_index is None:\n bad_uids_index = []\n for i, uid in enumerate(uids):\n #print( i, uid )\n if i not in bad_uids_index:\n detector = get_detector( db[uid ] )\n imgs = load_data( uid, detector )\n img_samp_index = random.sample( range(len(imgs)), img_choice_N)\n imgsa = apply_mask( imgs, mask )\n avg_img = get_avg_img( imgsa, img_samp_index, plot_ = False, uid =uid)\n if avg_img.max() == 0:\n buids.append( uid ) \n guids.pop( list( np.where( np.array(guids) == uid)[0] )[0] )\n print( 'The bad uid is: %s'%uid )\n else:\n guids.pop( list( np.where( np.array(guids) == uid)[0] )[0] )\n buids.append( uid )\n print( 'The bad uid is: %s'%uid ) \n print( 'The total and bad uids number are %s and %s, repsectively.'%( len(uids), len(buids) ) ) \n return guids, buids \n \n\n\ndef find_uids(start_time, stop_time ):\n '''Y.G. Dec 22, 2016\n A wrap funciton to find uids by giving start and end time\n Return:\n sids: list, scan id\n uids: list, uid with 8 character length\n fuids: list, uid with full length\n \n '''\n hdrs = db(start_time= start_time, stop_time = stop_time)\n try:\n print ('Totally %s uids are found.'%(len(list(hdrs))))\n except:\n pass\n sids=[]\n uids=[]\n fuids=[]\n for hdr in hdrs:\n s= get_sid_filenames( hdr)\n #print (s[1][:8])\n sids.append( s[0] )\n uids.append( s[1][:8] )\n fuids.append( s[1] )\n sids=sids[::-1]\n uids=uids[::-1]\n fuids=fuids[::-1]\n return np.array(sids), np.array(uids), np.array(fuids)\n\n\ndef ployfit( y, x=None, order = 20 ):\n '''\n fit data (one-d array) by a ploynominal function\n return the fitted one-d array\n '''\n if x is None:\n x = range(len(y))\n pol = np.polyfit(x, y, order)\n return np.polyval(pol, x)\n \ndef check_bad_data_points( data, fit=True, polyfit_order = 30, legend_size = 12,\n plot=True, scale=1.0, good_start=None, good_end=None, path=None, return_ylim=False ):\n '''\n data: 1D array\n scale: the scale of deviation\n fit: if True, use a ploynominal function to fit the imgsum, to get a mean-inten(array), then use the scale to get low and high threshold, it's good to remove bad frames/pixels on top of not-flatten curve\n else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve\n \n '''\n if good_start is None:\n good_start=0\n if good_end is None:\n good_end = len( data )\n bd1 = [i for i in range(0, good_start)]\n bd3 = [i for i in range(good_end,len( data ) )]\n \n d_ = data[good_start:good_end]\n \n if fit:\n pfit = ployfit( d_, order = polyfit_order)\n d = d_ - pfit \n else:\n d = d_ \n pfit = np.ones_like(d) * data.mean()\n \n ymin = d.mean()-scale *d.std()\n ymax = d.mean()+scale *d.std() \n \n if plot:\n fig = plt.figure( ) \n ax = fig.add_subplot(2,1,1 ) \n plot1D( d_, ax = ax, color='k', legend='data',legend_size=legend_size )\n plot1D( pfit,ax=ax, color='b', legend='ploy-fit', title='Find Bad Points',legend_size=legend_size )\n\n ax2 = fig.add_subplot(2,1,2 ) \n plot1D( d, ax = ax2,legend='difference',marker='s', color='b', ) \n\n #print('here')\n plot1D(x=[0,len(d_)], y=[ymin,ymin], ax = ax2, ls='--',lw= 3, marker='o', color='r', legend='low_thresh', legend_size=legend_size )\n\n plot1D(x=[0,len(d_)], y=[ymax,ymax], ax = ax2 , ls='--', lw= 3,marker='o', color='r',legend='high_thresh',title='',legend_size=legend_size )\n\n if path is not None:\n fp = path + '%s'%( uid ) + '_find_bad_points' + '.png' \n plt.savefig( fp, dpi=fig.dpi) \n bd2= list( np.where( np.abs(d -d.mean()) > scale *d.std() )[0] + good_start )\n \n if return_ylim:\n return np.array( bd1 + bd2 + bd3 ), ymin, ymax,pfit\n else:\n return np.array( bd1 + bd2 + bd3 ), pfit\n \n \n \n\ndef get_bad_frame_list( imgsum, fit=True, polyfit_order = 30,legend_size = 12,\n plot=True, scale=1.0, good_start=None, good_end=None, uid='uid',path=None,\n \n return_ylim=False):\n '''\n imgsum: the sum intensity of a time series\n scale: the scale of deviation\n fit: if True, use a ploynominal function to fit the imgsum, to get a mean-inten(array), then use the scale to get low and high threshold, it's good to remove bad frames/pixels on top of not-flatten curve\n else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve\n \n '''\n if good_start is None:\n good_start=0\n if good_end is None:\n good_end = len( imgsum )\n bd1 = [i for i in range(0, good_start)]\n bd3 = [i for i in range(good_end,len( imgsum ) )]\n \n imgsum_ = imgsum[good_start:good_end]\n \n if fit:\n pfit = ployfit( imgsum_, order = polyfit_order)\n data = imgsum_ - pfit \n else:\n data = imgsum_ \n pfit = np.ones_like(data) * data.mean()\n \n ymin = data.mean()-scale *data.std()\n ymax = data.mean()+scale *data.std() \n \n if plot:\n fig = plt.figure( ) \n ax = fig.add_subplot(2,1,1 ) \n plot1D( imgsum_, ax = ax, color='k', legend='data',legend_size=legend_size )\n plot1D( pfit,ax=ax, color='b', legend='ploy-fit', title=uid + '_imgsum',legend_size=legend_size )\n\n ax2 = fig.add_subplot(2,1,2 ) \n plot1D( data, ax = ax2,legend='difference',marker='s', color='b', ) \n\n #print('here')\n plot1D(x=[0,len(imgsum_)], y=[ymin,ymin], ax = ax2, ls='--',lw= 3, marker='o', color='r', legend='low_thresh', legend_size=legend_size )\n\n plot1D(x=[0,len(imgsum_)], y=[ymax,ymax], ax = ax2 , ls='--', lw= 3,marker='o', color='r',legend='high_thresh',title='imgsum_to_find_bad_frame',legend_size=legend_size )\n\n if path is not None:\n fp = path + '%s'%( uid ) + '_imgsum_analysis' + '.png' \n plt.savefig( fp, dpi=fig.dpi) \n \n\n \n bd2= list( np.where( np.abs(data -data.mean()) > scale *data.std() )[0] + good_start )\n \n if return_ylim:\n return np.array( bd1 + bd2 + bd3 ), ymin, ymax\n else:\n return np.array( bd1 + bd2 + bd3 )\n\ndef save_dict_csv( mydict, filename, mode='w'):\n import csv\n with open(filename, mode) as csv_file:\n spamwriter = csv.writer(csv_file) \n for key, value in mydict.items(): \n spamwriter.writerow([key, value])\n \n \n\ndef read_dict_csv( filename ): \n import csv\n with open(filename, 'r') as csv_file:\n reader = csv.reader(csv_file)\n mydict = dict(reader)\n return mydict\n\n\ndef find_bad_pixels( FD, bad_frame_list, uid='uid'):\n bpx = []\n bpy=[]\n for n in bad_frame_list:\n if n>= FD.beg and n<=FD.end:\n f = FD.rdframe(n)\n w = np.where( f == f.max())\n if len(w[0])==1:\n bpx.append( w[0][0] )\n bpy.append( w[1][0] )\n \n \n return trans_data_to_pd( [bpx,bpy], label=[ uid+'_x', uid +'_y' ], dtype='list')\n\n\n\n\n\ndef mask_exclude_badpixel( bp, mask, uid ):\n \n for i in range( len(bp)):\n mask[ int( bp[bp.columns[0]][i] ), int( bp[bp.columns[1]][i] )]=0 \n return mask\n\n\n\ndef print_dict( dicts, keys=None):\n '''\n print keys: values in a dicts\n if keys is None: print all the keys\n '''\n if keys is None:\n keys = list( dicts.keys())\n for k in keys: \n try:\n print('%s--> %s'%(k, dicts[k]) )\n except:\n pass\n \ndef get_meta_data( uid, default_dec = 'eiger', *argv,**kwargs ):\n '''\n Jan 25, 2018 add default_dec opt\n \n Y.G. Dev Dec 8, 2016\n \n Get metadata from a uid\n \n - Adds detector key with detector name\n \n Parameters:\n uid: the unique data acquisition id\n kwargs: overwrite the meta data, for example\n get_meta_data( uid = uid, sample = 'test') --> will overwrtie the meta's sample to test\n return:\n meta data of the uid: a dictionay\n with keys:\n detector \n suid: the simple given uid\n uid: full uid\n filename: the full path of the data\n start_time: the data acquisition starting time in a human readable manner\n And all the input metadata\n '''\n \n if 'verbose' in kwargs.keys(): # added: option to suppress output\n verbose= kwargs['verbose']\n else:\n verbose=True\n \n import time \n header = db[uid]\n md ={}\n \n md['suid'] = uid #short uid\n try:\n md['filename'] = get_sid_filenames(header)[2][0]\n except:\n md['filename'] = 'N.A.'\n\n devices = sorted( list(header.devices()) )\n if len(devices) > 1:\n if verbose: # added: mute output\n print( \"More than one device. This would have unintented consequences.Currently, only the device contains 'default_dec=%s'.\"%default_dec)\n #raise ValueError(\"More than one device. This would have unintented consequences.\")\n dec = devices[0] \n for dec_ in devices: \n if default_dec in dec_:\n dec = dec_\n \n #print(dec)\n #detector_names = sorted( header.start['detectors'] )\n detector_names = sorted( get_detectors(db[uid]) )\n #if len(detector_names) > 1:\n # raise ValueError(\"More than one det. This would have unintented consequences.\") \n detector_name = detector_names[0]\n #md['detector'] = detector_name\n md['detector'] = get_detector( header )\n #print( md['detector'] )\n new_dict = header.config_data(dec)['primary'][0]\n for key, val in new_dict.items():\n newkey = key.replace(detector_name+\"_\", \"\")\n md[newkey] = val\n \n # for k,v in ev['descriptor']['configuration'][dec]['data'].items():\n # md[ k[len(dec)+1:] ]= v\n \n try:\n md.update(header.start['plan_args'].items())\n md.pop('plan_args')\n except:\n pass\n md.update(header.start.items())\n \n \n # print(header.start.time)\n md['start_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(header.start['time']))\n md['stop_time'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime( header.stop['time']))\n try: # added: try to handle runs that don't contain image data\n if \"primary\" in header.v2:\n descriptor = header.v2[\"primary\"].descriptors[0]\n md['img_shape'] = descriptor['data_keys'][md['detector']]['shape'][:2][::-1]\n except:\n if verbose:\n print(\"couldn't find image shape...skip!\")\n else:\n pass\n md.update(kwargs)\n\n #for k, v in sorted(md.items()):\n # ...\n # print(f'{k}: {v}')\n \n return md\n\n\n\ndef get_max_countc(FD, labeled_array ):\n \"\"\"YG. 2016, Nov 18\n Compute the max intensity of ROIs in the compressed file (FD)\n\n Parameters\n ----------\n FD: Multifile class\n compressed file\n labeled_array : array\n labeled array; 0 is background.\n Each ROI is represented by a nonzero integer. It is not required that\n the ROI labels are contiguous\n index : int, list, optional\n The ROI's to use. If None, this function will extract averages for all\n ROIs\n\n Returns\n -------\n max_intensity : a float\n index : list\n The labels for each element of the `mean_intensity` list\n \"\"\"\n \n qind, pixelist = roi.extract_label_indices( labeled_array ) \n timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 ) \n timg[pixelist] = np.arange( 1, len(pixelist) + 1 ) \n \n if labeled_array.shape != ( FD.md['ncols'],FD.md['nrows']):\n raise ValueError(\n \" `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)\" %( FD.md['ncols'],FD.md['nrows'], labeled_array.shape[0], labeled_array.shape[1]) )\n\n max_inten =0 \n for i in tqdm(range( FD.beg, FD.end, 1 ), desc= 'Get max intensity of ROIs in all frames' ):\n try:\n (p,v) = FD.rdrawframe(i)\n w = np.where( timg[p] )[0]\n max_inten = max( max_inten, np.max(v[w]) ) \n except:\n pass\n return max_inten\n\n\ndef create_polygon_mask( image, xcorners, ycorners ):\n '''\n Give image and x/y coners to create a polygon mask \n image: 2d array\n xcorners, list, points of x coners\n ycorners, list, points of y coners\n Return:\n the polygon mask: 2d array, the polygon pixels with values 1 and others with 0\n \n Example:\n \n \n '''\n from skimage.draw import line_aa, line, polygon, circle \n imy, imx = image.shape \n bst_mask = np.zeros_like( image , dtype = bool) \n rr, cc = polygon( ycorners,xcorners)\n bst_mask[rr,cc] =1 \n #full_mask= ~bst_mask \n return bst_mask\n \n \ndef create_rectangle_mask( image, xcorners, ycorners ):\n '''\n Give image and x/y coners to create a rectangle mask \n image: 2d array\n xcorners, list, points of x coners\n ycorners, list, points of y coners\n Return:\n the polygon mask: 2d array, the polygon pixels with values 1 and others with 0\n \n Example:\n \n \n '''\n from skimage.draw import line_aa, line, polygon, circle \n imy, imx = image.shape \n bst_mask = np.zeros_like( image , dtype = bool) \n rr, cc = polygon( ycorners,xcorners)\n bst_mask[rr,cc] =1 \n #full_mask= ~bst_mask \n return bst_mask\n \n \ndef create_multi_rotated_rectangle_mask( image, center=None, length=100, width=50, angles=[0] ):\n ''' Developed at July 10, 2017 by Y.G.@CHX, NSLS2\n Create multi rectangle-shaped mask by rotating a rectangle with a list of angles\n The original rectangle is defined by four corners, i.e., \n [ (center[1] - width//2, center[0]),\n (center[1] + width//2, center[0]), \n (center[1] + width//2, center[0] + length),\n (center[1] - width//2, center[0] + length)\n ]\n \n Parameters:\n image: 2D numpy array, to give mask shape \n center: integer list, if None, will be the center of the image\n length: integer, the length of the non-ratoted rectangle \n width: integer, the width of the non-ratoted rectangle \n angles: integer list, a list of rotated angles\n\n Return:\n mask: 2D bool-type numpy array\n '''\n \n from skimage.draw import polygon\n from skimage.transform import rotate \n cx,cy = center\n imy, imx = image.shape \n mask = np.zeros( image.shape, dtype = bool) \n wy = length\n wx = width\n x = np.array( [ max(0, cx - wx//2), min(imx, cx+wx//2), min(imx, cx+wx//2), max(0,cx-wx//2 ) ]) \n y = np.array( [ cy, cy, min( imy, cy + wy) , min(imy, cy + wy) ])\n rr, cc = polygon( y,x)\n mask[rr,cc] =1\n mask_rot= np.zeros( image.shape, dtype = bool) \n for angle in angles:\n mask_rot += np.array( rotate( mask, angle, center= center ), dtype=bool) #, preserve_range=True) \n return ~mask_rot\n \ndef create_wedge( image, center, radius, wcors, acute_angle=True) :\n '''YG develop at June 18, 2017, @CHX\n Create a wedge by a combination of circle and a triangle defined by center and wcors\n wcors: [ [x1,x2,x3...], [y1,y2,y3..]\n \n '''\n from skimage.draw import line_aa, line, polygon, circle\n imy, imx = image.shape \n cy,cx = center\n x = [cx] + list(wcors[0])\n y = [cy] + list(wcors[1])\n \n maskc = np.zeros_like( image , dtype = bool) \n rr, cc = circle( cy, cx, radius, shape = image.shape)\n maskc[rr,cc] =1 \n \n maskp = np.zeros_like( image , dtype = bool)\n x = np.array( x ) \n y = np.array( y ) \n print(x,y)\n rr, cc = polygon( y,x)\n maskp[rr,cc] =1\n if acute_angle:\n return maskc*maskp\n else:\n return maskc*~maskp\n \n \n\ndef create_cross_mask( image, center, wy_left=4, wy_right=4, wx_up=4, wx_down=4,\n center_circle = True, center_radius=10\n ):\n '''\n Give image and the beam center to create a cross-shaped mask\n wy_left: the width of left h-line\n wy_right: the width of rigth h-line\n wx_up: the width of up v-line\n wx_down: the width of down v-line\n center_circle: if True, create a circle with center and center_radius\n \n Return:\n the cross mask\n '''\n from skimage.draw import line_aa, line, polygon, circle\n \n imy, imx = image.shape \n cx,cy = center\n bst_mask = np.zeros_like( image , dtype = bool) \n ###\n #for right part \n wy = wy_right\n x = np.array( [ cx, imx, imx, cx ]) \n y = np.array( [ cy-wy, cy-wy, cy + wy, cy + wy])\n rr, cc = polygon( y,x)\n bst_mask[rr,cc] =1\n \n ###\n #for left part \n wy = wy_left\n x = np.array( [0, cx, cx,0 ]) \n y = np.array( [ cy-wy, cy-wy, cy + wy, cy + wy])\n rr, cc = polygon( y,x)\n bst_mask[rr,cc] =1 \n \n ###\n #for up part \n wx = wx_up\n x = np.array( [ cx-wx, cx + wx, cx+wx, cx-wx ]) \n y = np.array( [ cy, cy, imy, imy])\n rr, cc = polygon( y,x)\n bst_mask[rr,cc] =1 \n \n ###\n #for low part \n wx = wx_down\n x = np.array( [ cx-wx, cx + wx, cx+wx, cx-wx ]) \n y = np.array( [ 0,0, cy, cy])\n rr, cc = polygon( y,x)\n bst_mask[rr,cc] =1 \n \n if center_radius!=0:\n rr, cc = circle( cy, cx, center_radius, shape = bst_mask.shape)\n bst_mask[rr,cc] =1 \n \n \n full_mask= ~bst_mask\n \n return full_mask\n \n \n \n\n\ndef generate_edge( centers, width):\n '''YG. 10/14/2016\n give centers and width (number or list) to get edges'''\n edges = np.zeros( [ len(centers),2])\n edges[:,0] = centers - width\n edges[:,1] = centers + width\n return edges\n\n\ndef export_scan_scalar( uid, x='dcm_b', y= ['xray_eye1_stats1_total'],\n path='/XF11ID/analysis/2016_3/commissioning/Results/' ):\n '''YG. 10/17/2016\n export uid data to a txt file\n uid: unique scan id\n x: the x-col \n y: the y-cols\n path: save path\n Example:\n data = export_scan_scalar( uid, x='dcm_b', y= ['xray_eye1_stats1_total'],\n path='/XF11ID/analysis/2016_3/commissioning/Results/exported/' )\n A plot for the data:\n d.plot(x='dcm_b', y = 'xray_eye1_stats1_total', marker='o', ls='-', color='r')\n \n '''\n from databroker import DataBroker as db\n from pyCHX.chx_generic_functions import trans_data_to_pd\n \n hdr = db[uid]\n print(hdr.fields())\n data = db[uid].table()\n xp = data[x]\n datap = np.zeros( [len(xp), len(y)+1])\n datap[:,0] = xp\n for i, yi in enumerate(y):\n datap[:,i+1] = data[yi]\n \n datap = trans_data_to_pd( datap, label=[x] + [yi for yi in y]) \n datap.to_csv( path + 'uid=%s.csv'%uid)\n return datap\n\n\n\n\n#####\n#load data by databroker \n\ndef get_flatfield( uid, reverse=False ):\n import h5py\n detector = get_detector( db[uid ] )\n sud = get_sid_filenames(db[uid])\n master_path = '%s_master.h5'%(sud[2][0])\n print( master_path)\n f= h5py.File(master_path, 'r')\n k= 'entry/instrument/detector/detectorSpecific/' #data_collection_date'\n d= np.array( f[ k]['flatfield'] )\n f.close()\n if reverse: \n d = reverse_updown( d )\n \n return d\n\n\n\ndef get_detector( header ):\n '''Get the first detector image string by giving header '''\n keys = get_detectors(header)\n for k in keys:\n if 'eiger' in k:\n return k\n\ndef get_detectors( header ):\n '''Get all the detector image strings by giving header '''\n if \"primary\" in header.v2:\n descriptor = header.v2[\"primary\"].descriptors[0]\n keys = [k for k, v in descriptor['data_keys'].items() if 'external' in v]\n return sorted(set(keys))\n return []\n\ndef get_full_data_path( uid ):\n '''A dirty way to get full data path'''\n header = db[uid]\n d = header.db\n s = list(d.get_documents( db[uid ]))\n #print(s[2])\n p = s[2][1]['resource_path']\n p2 = s[3][1]['datum_kwargs']['seq_id']\n #print(p,p2)\n return p + '_' + str(p2) + '_master.h5'\n \n \n \ndef get_sid_filenames(header):\n \"\"\"YG. Dev Jan, 2016\n Get a bluesky scan_id, unique_id, filename by giveing uid\n \n Parameters\n ----------\n header: a header of a bluesky scan, e.g. db[-1]\n \n Returns\n -------\n scan_id: integer\n unique_id: string, a full string of a uid\n filename: sring\n \n Usuage:\n sid,uid, filenames = get_sid_filenames(db[uid])\n \n \"\"\" \n from collections import defaultdict\n from glob import glob\n from pathlib import Path\n\n filepaths = []\n resources = {} # uid: document\n datums = defaultdict(list) # uid: List(document)\n for name, doc in header.documents():\n if name == \"resource\":\n resources[doc[\"uid\"]] = doc\n elif name == \"datum\":\n datums[doc[\"resource\"]].append(doc)\n elif name == \"datum_page\":\n for datum in event_model.unpack_datum_page(doc):\n datums[datum[\"resource\"]].append(datum)\n for resource_uid, resource in resources.items():\n file_prefix = Path(resource.get('root', '/'), resource[\"resource_path\"])\n if 'eiger' not in resource['spec'].lower():\n continue\n for datum in datums[resource_uid]:\n dm_kw = datum[\"datum_kwargs\"]\n seq_id = dm_kw['seq_id']\n new_filepaths = glob(f'{file_prefix!s}_{seq_id}*')\n filepaths.extend(new_filepaths)\n return header.start['scan_id'], header.start['uid'], filepaths\n\ndef load_data(uid, detector='eiger4m_single_image', fill=True, reverse=False, rot90=False):\n \"\"\"load bluesky scan data by giveing uid and detector\n \n Parameters\n ----------\n uid: unique ID of a bluesky scan\n detector: the used area detector\n fill: True to fill data\n reverse: if True, reverse the image upside down to match the \"real\" image geometry (should always be True in the future)\n \n Returns\n -------\n image data: a pims frames series\n if not success read the uid, will return image data as 0\n \n Usuage:\n imgs = load_data( uid, detector )\n md = imgs.md\n \"\"\" \n hdr = db[uid]\n \n if False:\n ATTEMPTS = 0\n for attempt in range(ATTEMPTS):\n try:\n ev, = hdr.events(fields=[detector], fill=fill) \n break\n\n except Exception: \n print ('Trying again ...!')\n if attempt == ATTEMPTS - 1:\n # We're out of attempts. Raise the exception to help with debugging.\n raise\n else:\n # We didn't succeed\n raise Exception(\"Failed after {} repeated attempts\".format(ATTEMPTS))\n\n # TODO(mrakitin): replace with the lazy loader (when it's implemented):\n imgs = list(hdr.data(detector))\n\n if len(imgs[0])>=1:\n md = imgs[0].md\n imgs = pims.pipeline(lambda img: img)(imgs[0])\n imgs.md = md\n\n if reverse:\n md = imgs.md\n imgs = reverse_updown( imgs ) # Why not np.flipud?\n imgs.md = md\n \n if rot90:\n md = imgs.md\n imgs = rot90_clockwise( imgs ) # Why not np.flipud?\n imgs.md = md \n \n return imgs\n\n\ndef mask_badpixels( mask, detector ):\n '''\n Mask known bad pixel from the giveing mask\n \n '''\n if detector =='eiger1m_single_image':\n #to be determined\n mask = mask\n elif detector =='eiger4m_single_image' or detector == 'image': \n mask[513:552,:] =0 \n mask[1064:1103,:] =0 \n mask[1615:1654,:] =0 \n mask[:,1029:1041] = 0 \n mask[:, 0] =0 \n mask[0:, 2069] =0 \n mask[0] =0 \n mask[2166] =0 \n\n elif detector =='eiger500K_single_image':\n #to be determined\n mask = mask\n else:\n mask = mask \n return mask\n \n \n \n\n\ndef load_data2( uid , detector = 'eiger4m_single_image' ):\n \"\"\"load bluesky scan data by giveing uid and detector\n \n Parameters\n ----------\n uid: unique ID of a bluesky scan\n detector: the used area detector\n \n Returns\n -------\n image data: a pims frames series\n if not success read the uid, will return image data as 0\n \n Usuage:\n imgs = load_data( uid, detector )\n md = imgs.md\n \"\"\" \n hdr = db[uid]\n flag =1\n while flag<4 and flag !=0: \n try:\n ev, = hdr.events(fields=[detector])\n flag =0 \n except:\n flag += 1 \n print ('Trying again ...!')\n\n if flag:\n print (\"Can't Load Data!\")\n uid = '00000' #in case of failling load data\n imgs = 0\n else:\n imgs = ev['data'][detector]\n\n #print (imgs)\n return imgs\n\n\n\ndef psave_obj(obj, filename ):\n '''save an object with filename by pickle.dump method\n This function automatically add '.pkl' as filename extension\n Input:\n obj: the object to be saved\n filename: filename (with full path) to be saved\n Return:\n None \n '''\n with open( filename + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\ndef pload_obj(filename ):\n '''load a pickled filename \n This function automatically add '.pkl' to filename extension\n Input: \n filename: filename (with full path) to be saved\n Return:\n load the object by pickle.load method\n '''\n with open( filename + '.pkl', 'rb') as f:\n return pickle.load(f)\n \n \n \ndef load_mask( path, mask_name, plot_ = False, reverse=False, rot90=False, *argv,**kwargs): \n \n \"\"\"load a mask file\n the mask is a numpy binary file (.npy) \n \n Parameters\n ----------\n path: the path of the mask file\n mask_name: the name of the mask file\n plot_: a boolen type\n reverse: if True, reverse the image upside down to match the \"real\" image geometry (should always be True in the future) \n Returns\n -------\n mask: array\n if plot_ =True, will show the mask \n \n Usuage:\n mask = load_mask( path, mask_name, plot_ = True )\n \"\"\"\n \n mask = np.load( path + mask_name )\n mask = np.array(mask, dtype = np.int32)\n if reverse:\n mask = mask[::-1,:] \n if rot90:\n mask = np.rot90( mask )\n if plot_:\n show_img( mask, *argv,**kwargs) \n return mask\n\n\n\ndef create_hot_pixel_mask(img, threshold, center=None, center_radius=300, outer_radius=0 ):\n '''create a hot pixel mask by giving threshold\n Input:\n img: the image to create hot pixel mask\n threshold: the threshold above which will be considered as hot pixels\n center: optional, default=None\n else, as a two-element list (beam center), i.e., [center_x, center_y]\n if center is not None, the hot pixel will not include a circle region \n which is defined by center and center_radius ( in unit of pixel)\n Output:\n a bool types numpy array (mask), 1 is good and 0 is excluded \n \n '''\n bst_mask = np.ones_like( img , dtype = bool) \n if center is not None: \n from skimage.draw import circle \n imy, imx = img.shape \n cy,cx = center \n rr, cc = circle( cy, cx, center_radius,shape=img.shape )\n bst_mask[rr,cc] =0 \n if outer_radius:\n bst_mask = np.zeros_like( img , dtype = bool) \n rr2, cc2 = circle( cy, cx, outer_radius,shape=img.shape )\n bst_mask[rr2,cc2] =1\n bst_mask[rr,cc] =0 \n hmask = np.ones_like( img )\n hmask[np.where( img * bst_mask > threshold)]=0\n return hmask\n\n\n\n\ndef apply_mask( imgs, mask):\n '''apply mask to imgs to produce a generator\n \n Usuages:\n imgsa = apply_mask( imgs, mask )\n good_series = apply_mask( imgs[good_start:], mask )\n \n '''\n return pims.pipeline(lambda img: np.int_(mask) * img)(imgs) # lazily apply mask\n\n\ndef reverse_updown( imgs):\n '''reverse imgs upside down to produce a generator\n \n Usuages:\n imgsr = reverse_updown( imgs)\n \n \n '''\n return pims.pipeline(lambda img: img[::-1,:])(imgs) # lazily apply mask\n\ndef rot90_clockwise( imgs):\n '''reverse imgs upside down to produce a generator\n \n Usuages:\n imgsr = rot90_clockwise( imgs) \n \n '''\n return pims.pipeline(lambda img: np.rot90(img) )(imgs) # lazily apply mask\n\ndef RemoveHot( img,threshold= 1E7, plot_=True ):\n '''Remove hot pixel from img'''\n \n mask = np.ones_like( np.array( img ) )\n badp = np.where( np.array(img) >= threshold )\n if len(badp[0])!=0: \n mask[badp] = 0 \n if plot_:\n show_img( mask )\n return mask\n\n\n############\n###plot data\n\ndef show_img( image, ax=None,label_array=None, alpha=0.5, interpolation='nearest',\n xlim=None, ylim=None, save=False,image_name=None,path=None, \n aspect=None, logs=False,vmin=None,vmax=None,return_fig=False,cmap='viridis', \n show_time= False, file_name =None, ylabel=None, xlabel=None, extent=None,\n show_colorbar=True, tight=True, show_ticks=True, save_format = 'png', dpi= None,\n center=None,origin='lower', lab_fontsize = 16, tick_size = 12, colorbar_fontsize = 8, \n use_mat_imshow=False,\n *argv,**kwargs ): \n \"\"\"YG. Sep26, 2017 Add label_array/alpha option to show a mask on top of image\n \n a simple function to show image by using matplotlib.plt imshow\n pass *argv,**kwargs to imshow\n \n Parameters\n ----------\n image : array\n Image to show\n Returns\n -------\n None\n \"\"\" \n if ax is None:\n if RUN_GUI:\n fig = Figure()\n ax = fig.add_subplot(111)\n else:\n fig, ax = plt.subplots()\n else:\n fig, ax=ax\n\n \n if center is not None:\n plot1D(center[1],center[0],ax=ax, c='b', m='o', legend='')\n if not logs:\n if not use_mat_imshow:\n im=imshow(ax, image, origin=origin,cmap=cmap,interpolation=interpolation, vmin=vmin,vmax=vmax,\n extent=extent) #vmin=0,vmax=1,\n else:\n im=ax.imshow( image, origin=origin,cmap=cmap,interpolation=interpolation, vmin=vmin,vmax=vmax,\n extent=extent) #vmin=0,vmax=1, \n else:\n if not use_mat_imshow:\n im=imshow(ax, image, origin=origin,cmap=cmap,\n interpolation=interpolation, norm=LogNorm(vmin, vmax),extent=extent) \n else:\n im=ax.imshow(image, origin=origin,cmap=cmap,\n interpolation=interpolation, norm=LogNorm(vmin, vmax),extent=extent) \n if label_array is not None:\n im2=show_label_array(ax, label_array, alpha= alpha, cmap=cmap, interpolation=interpolation ) \n \n ax.set_title( image_name )\n if xlim is not None:\n ax.set_xlim( xlim ) \n if ylim is not None:\n ax.set_ylim( ylim )\n \n if not show_ticks:\n ax.set_yticks([])\n ax.set_xticks([]) \n else: \n \n ax.tick_params(axis='both', which='major', labelsize=tick_size )\n ax.tick_params(axis='both', which='minor', labelsize=tick_size )\n #mpl.rcParams['xtick.labelsize'] = tick_size \n #mpl.rcParams['ytick.labelsize'] = tick_size\n #print(tick_size)\n \n if ylabel is not None:\n #ax.set_ylabel(ylabel)#, fontsize = 9)\n ax.set_ylabel( ylabel , fontsize = lab_fontsize )\n if xlabel is not None:\n ax.set_xlabel(xlabel , fontsize = lab_fontsize ) \n \n if aspect is not None:\n #aspect = image.shape[1]/float( image.shape[0] )\n ax.set_aspect(aspect)\n else:\n ax.set_aspect(aspect='auto')\n \n if show_colorbar:\n cbar = fig.colorbar(im, extend='neither', spacing='proportional',\n orientation='vertical' )\n cbar.ax.tick_params(labelsize=colorbar_fontsize) \n fig.set_tight_layout(tight) \n if save:\n if show_time:\n dt =datetime.now()\n CurTime = '_%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) \n fp = path + '%s'%( file_name ) + CurTime + '.' + save_format\n else:\n fp = path + '%s'%( image_name ) + '.' + save_format\n if dpi is None:\n dpi = fig.dpi\n plt.savefig( fp, dpi= dpi) \n #fig.set_tight_layout(tight) \n if return_fig:\n return im #fig\n \n\n \n \ndef plot1D( y,x=None, yerr=None, ax=None,return_fig=False, ls='-', figsize=None,legend=None,\n legend_size=None, lw=None, markersize=None, tick_size=8, *argv,**kwargs): \n \"\"\"a simple function to plot two-column data by using matplotlib.plot\n pass *argv,**kwargs to plot\n \n Parameters\n ----------\n y: column-y\n x: column-x, by default x=None, the plot will use index of y as x-axis\n the other paramaters are defined same as plt.plot\n Returns\n -------\n None\n \"\"\" \n if ax is None:\n if RUN_GUI:\n fig = Figure()\n ax = fig.add_subplot(111)\n else:\n if figsize is not None:\n fig, ax = plt.subplots(figsize=figsize)\n else:\n fig, ax = plt.subplots()\n \n if legend is None:\n legend = ' '\n try:\n logx = kwargs['logx']\n except:\n logx=False\n try:\n logy = kwargs['logy']\n except:\n logy=False\n \n try:\n logxy = kwargs['logxy']\n except:\n logxy= False \n\n if logx==True and logy==True:\n logxy = True\n \n try:\n marker = kwargs['marker'] \n except:\n try:\n marker = kwargs['m'] \n except: \n marker= next( markers_ )\n try:\n color = kwargs['color']\n except: \n try:\n color = kwargs['c']\n except: \n color = next( colors_ ) \n \n if x is None:\n x=range(len(y))\n if yerr is None: \n ax.plot(x,y, marker=marker,color=color,ls=ls,label= legend, lw=lw,\n markersize=markersize, )#,*argv,**kwargs)\n else:\n ax.errorbar(x,y,yerr, marker=marker,color=color,ls=ls,label= legend, \n lw=lw,markersize=markersize,)#,*argv,**kwargs) \n if logx:\n ax.set_xscale('log')\n if logy:\n ax.set_yscale('log')\n if logxy:\n ax.set_xscale('log')\n ax.set_yscale('log')\n \n \n ax.tick_params(axis='both', which='major', labelsize=tick_size )\n ax.tick_params(axis='both', which='minor', labelsize=tick_size )\n \n if 'xlim' in kwargs.keys():\n ax.set_xlim( kwargs['xlim'] ) \n if 'ylim' in kwargs.keys():\n ax.set_ylim( kwargs['ylim'] )\n if 'xlabel' in kwargs.keys(): \n ax.set_xlabel(kwargs['xlabel'])\n if 'ylabel' in kwargs.keys(): \n ax.set_ylabel(kwargs['ylabel'])\n \n if 'title' in kwargs.keys():\n title = kwargs['title']\n else:\n title = 'plot'\n ax.set_title( title ) \n #ax.set_xlabel(\"$Log(q)$\"r'($\\AA^{-1}$)') \n if (legend!='') and (legend!=None):\n ax.legend(loc = 'best', fontsize=legend_size )\n if 'save' in kwargs.keys():\n if kwargs['save']: \n #dt =datetime.now()\n #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) \n #fp = kwargs['path'] + '%s'%( title ) + CurTime + '.png' \n fp = kwargs['path'] + '%s'%( title ) + '.png' \n plt.savefig( fp, dpi=fig.dpi) \n if return_fig:\n return fig \n\n \n###\n\ndef check_shutter_open( data_series, min_inten=0, time_edge = [0,10], plot_ = False, *argv,**kwargs): \n \n '''Check the first frame with shutter open\n \n Parameters \n ----------\n data_series: a image series\n min_inten: the total intensity lower than min_inten is defined as shtter close\n time_edge: the searching frame number range\n \n return:\n shutter_open_frame: a integer, the first frame number with open shutter \n \n Usuage:\n good_start = check_shutter_open( imgsa, min_inten=5, time_edge = [0,20], plot_ = False )\n \n '''\n imgsum = np.array( [np.sum(img ) for img in data_series[time_edge[0]:time_edge[1]:1]] ) \n if plot_:\n fig, ax = plt.subplots() \n ax.plot(imgsum,'bo')\n ax.set_title('uid=%s--imgsum'%uid)\n ax.set_xlabel( 'Frame' )\n ax.set_ylabel( 'Total_Intensity' ) \n #plt.show() \n shutter_open_frame = np.where( np.array(imgsum) > min_inten )[0][0]\n print ('The first frame with open shutter is : %s'%shutter_open_frame )\n return shutter_open_frame\n\n\n\ndef get_each_frame_intensity( data_series, sampling = 50, \n bad_pixel_threshold=1e10, \n plot_ = False, save= False, *argv,**kwargs): \n '''Get the total intensity of each frame by sampling every N frames\n Also get bad_frame_list by check whether above bad_pixel_threshold \n \n Usuage:\n imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000, \n bad_pixel_threshold=1e10, plot_ = True)\n '''\n \n #print ( argv, kwargs )\n imgsum = np.array( [np.sum(img ) for img in tqdm( data_series[::sampling] , leave = True ) ] ) \n if plot_:\n uid = 'uid'\n if 'uid' in kwargs.keys():\n uid = kwargs['uid'] \n fig, ax = plt.subplots() \n ax.plot(imgsum,'bo')\n ax.set_title('uid= %s--imgsum'%uid)\n ax.set_xlabel( 'Frame_bin_%s'%sampling )\n ax.set_ylabel( 'Total_Intensity' )\n if save:\n #dt =datetime.now()\n #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) \n path = kwargs['path'] \n if 'uid' in kwargs:\n uid = kwargs['uid']\n else:\n uid = 'uid'\n #fp = path + \"Uid= %s--Waterfall-\"%uid + CurTime + '.png' \n fp = path + \"uid=%s--imgsum-\"%uid + '.png' \n fig.savefig( fp, dpi=fig.dpi) \n #plt.show() \n \n bad_frame_list = np.where( np.array(imgsum) > bad_pixel_threshold )[0]\n if len(bad_frame_list):\n print ('Bad frame list are: %s' %bad_frame_list)\n else:\n print ('No bad frames are involved.')\n return imgsum,bad_frame_list\n\n\n \n\ndef create_time_slice( N, slice_num, slice_width, edges=None ):\n '''create a ROI time regions '''\n if edges is not None:\n time_edge = edges\n else:\n if slice_num==1:\n time_edge = [ [0,N] ]\n else:\n tstep = N // slice_num\n te = np.arange( 0, slice_num +1 ) * tstep\n tc = np.int_( (te[:-1] + te[1:])/2 )[1:-1]\n if slice_width%2:\n sw = slice_width//2 +1\n time_edge = [ [0,slice_width], ] + [ [s-sw+1,s+sw] for s in tc ] + [ [N-slice_width,N]]\n else:\n sw= slice_width//2\n time_edge = [ [0,slice_width], ] + [ [s-sw,s+sw] for s in tc ] + [ [N-slice_width,N]]\n \n \n\n return np.array(time_edge)\n\n\ndef show_label_array(ax, label_array, cmap=None, aspect=None,interpolation='nearest',**kwargs):\n \"\"\"\n YG. Sep 26, 2017\n Modified show_label_array(ax, label_array, cmap=None, **kwargs)\n from https://github.com/Nikea/xray-vision/blob/master/xray_vision/mpl_plotting/roi.py\n Display a labeled array nicely\n Additional kwargs are passed through to `ax.imshow`.\n If `vmin` is in kwargs, it is clipped to minimum of 0.5.\n Parameters\n ----------\n ax : Axes\n The `Axes` object to add the artist too\n label_array: ndarray\n Expected to be an unsigned integer array. 0 is background,\n positive integers label region of interest\n cmap : str or colormap, optional\n Color map to use, defaults to 'Paired'\n Returns\n -------\n img : AxesImage\n The artist added to the axes\n \"\"\"\n if cmap is None:\n cmap = 'viridis'\n #print(cmap)\n _cmap = copy.copy((mcm.get_cmap(cmap)))\n _cmap.set_under('w', 0)\n vmin = max(.5, kwargs.pop('vmin', .5))\n im = ax.imshow(label_array, cmap=cmap,\n interpolation=interpolation,\n vmin=vmin,\n **kwargs) \n if aspect is None:\n ax.set_aspect(aspect='auto')\n #ax.set_aspect('equal')\n return im\n\n\n\ndef show_label_array_on_image(ax, image, label_array, cmap=None,norm=None, log_img=True,alpha=0.3, vmin=0.1, vmax=5,\n imshow_cmap='gray', **kwargs): #norm=LogNorm(), \n \"\"\"\n This will plot the required ROI's(labeled array) on the image\n \n Additional kwargs are passed through to `ax.imshow`.\n If `vmin` is in kwargs, it is clipped to minimum of 0.5.\n Parameters\n ----------\n ax : Axes\n The `Axes` object to add the artist too\n image : array\n The image array\n label_array : array\n Expected to be an unsigned integer array. 0 is background,\n positive integers label region of interest\n cmap : str or colormap, optional\n Color map to use for plotting the label_array, defaults to 'None'\n imshow_cmap : str or colormap, optional\n Color map to use for plotting the image, defaults to 'gray'\n norm : str, optional\n Normalize scale data, defaults to 'Lognorm()'\n Returns\n -------\n im : AxesImage\n The artist added to the axes\n im_label : AxesImage\n The artist added to the axes\n \"\"\"\n ax.set_aspect('equal')\n \n #print (vmin, vmax )\n if log_img:\n im = ax.imshow(image, cmap=imshow_cmap, interpolation='none',norm=LogNorm(vmin, vmax),**kwargs) #norm=norm,\n else:\n im = ax.imshow(image, cmap=imshow_cmap, interpolation='none',vmin=vmin, vmax=vmax,**kwargs) #norm=norm,\n \n im_label = mpl_plot.show_label_array(ax, label_array, cmap=cmap, vmin=vmin, vmax=vmax, alpha=alpha,\n **kwargs) # norm=norm,\n \n \n return im, im_label \n \n \n \ndef show_ROI_on_image( image, ROI, center=None, rwidth=400,alpha=0.3, label_on = True,\n save=False, return_fig = False, rect_reqion=None, log_img = True, vmin=0.01, vmax=5, \n show_ang_cor = False,cmap = cmap_albula, fig_ax=None,\n uid='uid', path='', aspect = 1, show_colorbar=True, show_roi_edge=False, *argv,**kwargs):\n \n '''show ROI on an image\n image: the data frame\n ROI: the interested region\n center: the plot center\n rwidth: the plot range around the center \n \n '''\n\n \n if RUN_GUI:\n fig = Figure(figsize=(8,8))\n axes = fig.add_subplot(111)\n elif fig_ax is not None:\n fig, axes = fig_ax\n else:\n fig, axes = plt.subplots( ) #plt.subplots(figsize=(8,8))\n \n #print( vmin, vmax)\n #norm=LogNorm(vmin, vmax)\n \n axes.set_title( \"%s_ROI_on_Image\"%uid )\n if log_img:\n if vmin==0:\n vmin += 1e-10\n \n vmax = max(1, vmax ) \n if not show_roi_edge: \n #print('here')\n im,im_label = show_label_array_on_image(axes, image, ROI, imshow_cmap='viridis',\n cmap=cmap,alpha=alpha, log_img=log_img,\n vmin=vmin, vmax=vmax, origin=\"lower\")\n else:\n edg = get_image_edge( ROI )\n image_ = get_image_with_roi( image, ROI, scale_factor = 2) \n #fig, axes = plt.subplots( ) \n show_img( image_, ax=[fig,axes], vmin=vmin, vmax=vmax, \n logs= log_img, image_name= \"%s_ROI_on_Image\"%uid,\n cmap = cmap )\n \n \n if rect_reqion is None:\n if center is not None:\n x1,x2 = [center[1] - rwidth, center[1] + rwidth]\n y1,y2 = [center[0] - rwidth, center[0] + rwidth]\n axes.set_xlim( [x1,x2])\n axes.set_ylim( [y1,y2])\n else:\n x1,x2,y1,y2= rect_reqion\n axes.set_xlim( [x1,x2])\n axes.set_ylim( [y1,y2])\n \n if label_on:\n num_qzr = len(np.unique( ROI )) -1 \n for i in range( 1, num_qzr + 1 ):\n ind = np.where( ROI == i)[1]\n indz = np.where( ROI == i)[0]\n c = '%i'%i\n y_val = int( indz.mean() )\n x_val = int( ind.mean() )\n #print (xval, y)\n axes.text(x_val, y_val, c, color='b',va='center', ha='center') \n if show_ang_cor:\n axes.text(-0.0, 0.5, '-/+180' + r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes)\n axes.text(1.0, 0.5, '0' + r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes)\n axes.text(0.5, -0.0, '-90'+ r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes)\n axes.text(0.5, 1.0, '90' + r'$^0$', color='r', va='center', ha='center',transform=axes.transAxes) \n\n axes.set_aspect(aspect)\n #fig.colorbar(im_label)\n if show_colorbar:\n if not show_roi_edge: \n fig.colorbar(im)\n if save: \n fp = path + \"%s_ROI_on_Image\"%uid + '.png' \n plt.savefig( fp, dpi=fig.dpi) \n #plt.show()\n if return_fig:\n return fig, axes, im \n\n \n\n \ndef crop_image( image, crop_mask ):\n \n ''' Crop the non_zeros pixels of an image to a new image \n \n \n '''\n from skimage.util import crop, pad \n pxlst = np.where(crop_mask.ravel())[0]\n dims = crop_mask.shape\n imgwidthy = dims[1] #dimension in y, but in plot being x\n imgwidthx = dims[0] #dimension in x, but in plot being y\n #x and y are flipped???\n #matrix notation!!!\n pixely = pxlst%imgwidthy\n pixelx = pxlst//imgwidthy\n\n minpixelx = np.min(pixelx)\n minpixely = np.min(pixely)\n maxpixelx = np.max(pixelx)\n maxpixely = np.max(pixely) \n crops = crop_mask*image\n img_crop = crop( crops, ((minpixelx, imgwidthx - maxpixelx -1 ),\n (minpixely, imgwidthy - maxpixely -1 )) )\n return img_crop\n \n\ndef get_avg_img( data_series, img_samp_index=None, sampling = 100, plot_ = False , save=False, *argv,**kwargs): \n '''Get average imagef from a data_series by every sampling number to save time'''\n if img_samp_index is None:\n avg_img = np.average(data_series[:: sampling], axis=0)\n else:\n avg_img = np.zeros_like( data_series[0] )\n n=0\n for i in img_samp_index:\n avg_img += data_series[i]\n n +=1\n avg_img = np.array( avg_img) / n\n \n if plot_:\n fig, ax = plt.subplots()\n uid = 'uid'\n if 'uid' in kwargs.keys():\n uid = kwargs['uid'] \n \n im = ax.imshow(avg_img , cmap='viridis',origin='lower',\n norm= LogNorm(vmin=0.001, vmax=1e2))\n #ax.set_title(\"Masked Averaged Image\")\n ax.set_title('uid= %s--Masked Averaged Image'%uid)\n fig.colorbar(im)\n \n if save:\n #dt =datetime.now()\n #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) \n path = kwargs['path'] \n if 'uid' in kwargs:\n uid = kwargs['uid']\n else:\n uid = 'uid'\n #fp = path + \"uid= %s--Waterfall-\"%uid + CurTime + '.png' \n fp = path + \"uid=%s--avg-img-\"%uid + '.png' \n fig.savefig( fp, dpi=fig.dpi) \n #plt.show()\n\n return avg_img\n\n\n\ndef check_ROI_intensity( avg_img, ring_mask, ring_number=3 , save=False, plot=True, *argv,**kwargs):\n \n \"\"\"plot intensity versus pixel of a ring \n Parameters\n ----------\n avg_img: 2D-array, the image\n ring_mask: 2D-array \n ring_number: which ring to plot\n \n Returns\n -------\n\n \n \"\"\" \n #print('here')\n \n uid = 'uid'\n if 'uid' in kwargs.keys():\n uid = kwargs['uid'] \n pixel = roi.roi_pixel_values(avg_img, ring_mask, [ring_number] )\n \n if plot:\n fig, ax = plt.subplots()\n ax.set_title('%s--check-RIO-%s-intensity'%(uid, ring_number) )\n ax.plot( pixel[0][0] ,'bo', ls='-' )\n ax.set_ylabel('Intensity')\n ax.set_xlabel('pixel')\n if save: \n path = kwargs['path'] \n fp = path + \"%s_Mean_intensity_of_one_ROI\"%uid + '.png' \n fig.savefig( fp, dpi=fig.dpi)\n if save:\n path = kwargs['path'] \n save_lists( [range( len( pixel[0][0] )), pixel[0][0]], label=['pixel_list', 'roi_intensity'],\n filename=\"%s_Mean_intensity_of_one_ROI\"%uid, path= path) \n #plt.show()\n return pixel[0][0]\n\n#from tqdm import tqdm\n\ndef cal_g2( image_series, ring_mask, bad_image_process,\n bad_frame_list=None,good_start=0, num_buf = 8, num_lev = None ):\n '''calculation g2 by using a multi-tau algorithm'''\n \n noframes = len( image_series) # number of frames, not \"no frames\"\n #num_buf = 8 # number of buffers\n\n if bad_image_process: \n import skbeam.core.mask as mask_image \n bad_img_list = np.array( bad_frame_list) - good_start\n new_imgs = mask_image.bad_to_nan_gen( image_series, bad_img_list) \n\n if num_lev is None:\n num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1\n print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev))\n print ('%s frames will be processed...'%(noframes))\n print( 'Bad Frames involved!')\n\n g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm( new_imgs) )\n print( 'G2 calculation DONE!')\n\n else:\n\n if num_lev is None:\n num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1\n print ('In this g2 calculation, the buf and lev number are: %s--%s--'%(num_buf,num_lev))\n print ('%s frames will be processed...'%(noframes))\n g2, lag_steps = corr.multi_tau_auto_corr(num_lev, num_buf, ring_mask, tqdm(image_series) )\n print( 'G2 calculation DONE!')\n \n return g2, lag_steps\n\n\n\ndef run_time(t0):\n '''Calculate running time of a program\n Parameters\n ----------\n t0: time_string, t0=time.time()\n The start time\n Returns\n -------\n Print the running time \n \n One usage\n ---------\n t0=time.time()\n .....(the running code)\n run_time(t0)\n ''' \n \n elapsed_time = time.time() - t0\n if elapsed_time<60:\n print ('Total time: %.3f sec' %(elapsed_time ))\n else:\n print ('Total time: %.3f min' %(elapsed_time/60.)) \n \n \ndef trans_data_to_pd(data, label=None,dtype='array'):\n '''\n convert data into pandas.DataFrame\n Input:\n data: list or np.array\n label: the coloum label of the data\n dtype: list or array [[NOT WORK or dict (for dict only save the scalar not arrays values)]]\n Output:\n a pandas.DataFrame\n '''\n #lists a [ list1, list2...] all the list have the same length\n from numpy import arange,array\n import pandas as pd,sys \n if dtype == 'list':\n data=array(data).T \n N,M=data.shape\n elif dtype == 'array':\n data=array(data) \n N,M=data.shape \n else:\n print(\"Wrong data type! Now only support 'list' and 'array' tpye\") \n \n \n index = arange( N )\n if label is None:label=['data%s'%i for i in range(M)]\n #print label\n df = pd.DataFrame( data, index=index, columns= label )\n return df\n\n\ndef save_lists( data, label=None, filename=None, path=None, return_res = False, verbose=False): \n '''\n save_lists( data, label=None, filename=None, path=None)\n \n save lists to a CSV file with filename in path\n Parameters\n ----------\n data: list\n label: the column name, the length should be equal to the column number of list\n filename: the filename to be saved\n path: the filepath to be saved\n \n Example: \n save_arrays( [q,iq], label= ['q_A-1', 'Iq'], filename='uid=%s-q-Iq'%uid, path= data_dir ) \n '''\n \n M,N = len(data[0]),len(data)\n d = np.zeros( [N,M] )\n for i in range(N):\n d[i] = data[i] \n \n df = trans_data_to_pd(d.T, label, 'array') \n #dt =datetime.now()\n #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) \n if filename is None:\n filename = 'data'\n filename = os.path.join(path, filename )#+'.csv')\n df.to_csv(filename)\n if verbose:\n print('The data was saved in: %s.'%filename)\n if return_res:\n return df\n\ndef get_pos_val_overlap( p1, v1, p2,v2, Nl):\n '''get the overlap of v1 and v2\n p1: the index of array1 in array with total length as Nl\n v1: the corresponding value of p1\n p2: the index of array2 in array with total length as Nl\n v2: the corresponding value of p2\n Return:\n The values in v1 with the position in overlap of p1 and p2\n The values in v2 with the position in overlap of p1 and p2\n \n An example:\n Nl =10\n p1= np.array( [1,3,4,6,8] )\n v1 = np.array( [10,20,30,40,50])\n p2= np.array( [ 0,2,3,5,7,8])\n v2=np.array( [10,20,30,40,50,60,70])\n \n get_pos_val_overlap( p1, v1, p2,v2, Nl)\n \n '''\n ind = np.zeros( Nl, dtype=np.int32 )\n ind[p1] = np.arange( len(p1) ) +1 \n w2 = np.where( ind[p2] )[0]\n w1 = ind[ p2[w2]] -1\n return v1[w1], v2[w2]\n \n \n \ndef save_arrays( data, label=None, dtype='array', filename=None, path=None, return_res = False,verbose=False): \n '''\n July 10, 2016, Y.G.@CHX\n save_arrays( data, label=None, dtype='array', filename=None, path=None): \n save data to a CSV file with filename in path\n Parameters\n ----------\n data: arrays\n label: the column name, the length should be equal to the column number of data\n dtype: array or list\n filename: the filename to be saved\n path: the filepath to be saved\n \n Example:\n \n save_arrays( qiq, label= ['q_A-1', 'Iq'], dtype='array', filename='uid=%s-q-Iq'%uid, path= data_dir )\n\n \n '''\n df = trans_data_to_pd(data, label,dtype) \n #dt =datetime.now()\n #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) \n if filename is None:\n filename = 'data'\n filename_ = os.path.join(path, filename)# +'.csv')\n df.to_csv(filename_)\n if verbose:\n print( 'The file: %s is saved in %s'%(filename, path) )\n #print( 'The g2 of uid= %s is saved in %s with filename as g2-%s-%s.csv'%(uid, path, uid, CurTime))\n if return_res:\n return df\n\ndef cal_particle_g2( radius, viscosity, qr, taus, beta=0.2, T=298):\n '''YG Dev Nov 20, 2017@CHX\n calculate particle g2 fucntion by giving particle radius, Q , and solution viscosity using a simple \n exponetional model\n Input:\n radius: m\n qr, list, in A-1\n visocity: N*s/m^2 (water at 25K = 8.9*10^(-4) ) \n T: temperture, in K \n e.g., for a 250 nm sphere in glycerol/water (90:10) at RT (298K) gives:\n 1.38064852*10**(-123)*298 / ( 6*np.pi * 0.20871 * 250 *10**(-9)) * 10**20 /1e5 = 4.18*10**5 A2/s\n taus: time \n beta: contrast\n \n cal_particle_g2( radius=125 *10**(-9), qr=[0.01,0.015], viscosity= 8.9*1e-4) \n \n '''\n D0 = get_diffusion_coefficient( viscosity, radius, T=T)\n g2_q1 = np.zeros(len(qr), dtype = object)\n for i, q1 in enumerate(qr):\n relaxation_rate = D0 * q1**2\n g2_q1[i] = simple_exponential( taus, beta=beta, relaxation_rate = relaxation_rate, baseline=1)\n return g2_q1\n \ndef get_Reynolds_number( flow_rate, flow_radius, fluid_density, fluid_viscosity ):\n '''May 10, 2019, Y.G.@CHX\n get Reynolds_number , the ratio of the inertial to viscous forces, V*Dia*density/eta\n Reynolds_number << 1000 gives a laminar flow\n flow_rate: ul/s\n flow_radius: mm\n fluid_density: Kg/m^3 ( for water, 1000 Kg/m^3 = 1 g/cm^3 ) \n fliud_viscosity: N*s/m^2 ( Kg /(s*m) ) \n \n return Reynolds_number \n ''' \n return flow_rate * 1e-6 * flow_radius * 1e-3 *2 * fluid_density/ fluid_viscosity\n \ndef get_Deborah_number( flow_rate, beam_size, q_vector, diffusion_coefficient ):\n '''May 10, 2019, Y.G.@CHX\n get Deborah_number, the ratio of transit time to diffusion time, (V/beam_size)/ ( D*q^2)\n flow_rate: ul/s\n beam_size: ul\n q_vector: A-1\n diffusion_coefficient: A^2/s \n \n return Deborah_number\n ''' \n return (flow_rate /beam_size) / ( diffusion_coefficient * q_vector**2 )\n\n\n \ndef get_viscosity( diffusion_coefficient , radius, T=298):\n '''May 10, 2019, Y.G.@CHX\n get visocity of a Brownian motion particle with radius in fuild with diffusion_coefficient \n diffusion_coefficient in unit of A^2/s \n radius: m\n T: K\n k: 1.38064852(79)*10**(β23) J/T, Boltzmann constant\n \n return visosity: N*s/m^2 (water at 25K = 8.9*10**(-4) )\n '''\n \n k= 1.38064852*10**(-23) \n return k*T / ( 6*np.pi* diffusion_coefficient * radius) * 10**20 \n\ndef get_diffusion_coefficient( viscosity, radius, T=298):\n '''July 10, 2016, Y.G.@CHX\n get diffusion_coefficient of a Brownian motion particle with radius in fuild with visocity\n viscosity: N*s/m^2 (water at 25K = 8.9*10^(-4) )\n radius: m\n T: K\n k: 1.38064852(79)Γ10β23 J/T, Boltzmann constant \n \n return diffusion_coefficient in unit of A^2/s\n e.g., for a 250 nm sphere in glycerol/water (90:10) at RT (298K) gives:\n 1.38064852*10**(β23) *298 / ( 6*np.pi* 0.20871 * 250 *10**(-9)) * 10**20 /1e5 = 4.18*10^5 A2/s\n \n get_diffusion_coefficient( 0.20871, 250 *10**(-9), T=298) \n \n '''\n \n k= 1.38064852*10**(-23) \n return k*T / ( 6*np.pi* viscosity * radius) * 10**20 \n\n\ndef ring_edges(inner_radius, width, spacing=0, num_rings=None):\n \"\"\"\n Aug 02, 2016, Y.G.@CHX\n ring_edges(inner_radius, width, spacing=0, num_rings=None)\n \n Calculate the inner and outer radius of a set of rings.\n\n The number of rings, their widths, and any spacing between rings can be\n specified. They can be uniform or varied.\n\n Parameters\n ----------\n inner_radius : float\n inner radius of the inner-most ring\n\n width : float or list of floats\n ring thickness\n If a float, all rings will have the same thickness.\n\n spacing : float or list of floats, optional\n margin between rings, 0 by default\n If a float, all rings will have the same spacing. If a list,\n the length of the list must be one less than the number of\n rings.\n\n num_rings : int, optional\n number of rings\n Required if width and spacing are not lists and number\n cannot thereby be inferred. If it is given and can also be\n inferred, input is checked for consistency.\n\n Returns\n -------\n edges : array\n inner and outer radius for each ring\n\n Example\n -------\n # Make two rings starting at r=1px, each 5px wide\n >>> ring_edges(inner_radius=1, width=5, num_rings=2)\n [(1, 6), (6, 11)]\n # Make three rings of different widths and spacings.\n # Since the width and spacings are given individually, the number of\n # rings here is simply inferred.\n >>> ring_edges(inner_radius=1, width=(5, 4, 3), spacing=(1, 2))\n [(1, 6), (7, 11), (13, 16)]\n \n \"\"\"\n # All of this input validation merely checks that width, spacing, and\n # num_rings are self-consistent and complete.\n width_is_list = isinstance(width, collections.Iterable)\n spacing_is_list = isinstance(spacing, collections.Iterable)\n if (width_is_list and spacing_is_list):\n if len(width) != len(spacing) + 1:\n raise ValueError(\"List of spacings must be one less than list \"\n \"of widths.\")\n if num_rings is None:\n try:\n num_rings = len(width)\n except TypeError:\n try:\n num_rings = len(spacing) + 1\n except TypeError:\n raise ValueError(\"Since width and spacing are constant, \"\n \"num_rings cannot be inferred and must be \"\n \"specified.\")\n else: \n if width_is_list:\n if num_rings != len(width):\n raise ValueError(\"num_rings does not match width list\")\n if spacing_is_list:\n if num_rings-1 != len(spacing):\n raise ValueError(\"num_rings does not match spacing list\")\n # Now regularlize the input.\n if not width_is_list:\n width = np.ones(num_rings) * width\n \n if spacing is None:\n spacing = [] \n else: \n if not spacing_is_list:\n spacing = np.ones(num_rings - 1) * spacing\n # The inner radius is the first \"spacing.\"\n all_spacings = np.insert(spacing, 0, inner_radius) \n steps = np.array([all_spacings, width]).T.ravel()\n edges = np.cumsum(steps).reshape(-1, 2)\n return edges\n\n \n\ndef get_non_uniform_edges( centers, width = 4, number_rings=1, spacing=0, ):\n '''\n YG CHX Spe 6\n get_non_uniform_edges( centers, width = 4, number_rings=3 )\n \n Calculate the inner and outer radius of a set of non uniform distributed\n rings by giving ring centers\n For each center, there are number_rings with each of width\n\n Parameters\n ----------\n centers : float\n the center of the rings\n\n width : float or list of floats\n ring thickness\n If a float, all rings will have the same thickness.\n\n num_rings : int, optional\n number of rings\n Required if width and spacing are not lists and number\n cannot thereby be inferred. If it is given and can also be\n inferred, input is checked for consistency.\n\n Returns\n -------\n edges : array\n inner and outer radius for each ring \n '''\n \n if number_rings is None: \n number_rings = 1\n edges = np.zeros( [len(centers)*number_rings, 2] )\n #print( width )\n \n if not isinstance(width, collections.Iterable):\n width = np.ones_like( centers ) * width \n for i, c in enumerate(centers): \n edges[i*number_rings:(i+1)*number_rings,:] = ring_edges( inner_radius = c - width[i]*number_rings/2, \n width= width[i], spacing= spacing, num_rings=number_rings)\n return edges \n\n\n\ndef trans_tf_to_td(tf, dtype = 'dframe'):\n '''July 02, 2015, Y.G.@CHX\n Translate epoch time to string\n '''\n import pandas as pd\n import numpy as np\n import datetime\n '''translate time.float to time.date,\n td.type dframe: a dataframe\n td.type list, a list\n ''' \n if dtype is 'dframe':ind = tf.index\n else:ind = range(len(tf)) \n td = np.array([ datetime.datetime.fromtimestamp(tf[i]) for i in ind ])\n return td\n\n\n\ndef trans_td_to_tf(td, dtype = 'dframe'):\n '''July 02, 2015, Y.G.@CHX\n Translate string to epoch time\n \n '''\n import time\n import numpy as np\n '''translate time.date to time.float,\n td.type dframe: a dataframe\n td.type list, a list\n ''' \n if dtype is 'dframe':ind = td.index\n else:ind = range(len(td))\n #tf = np.array([ time.mktime(td[i].timetuple()) for i in range(len(td)) ])\n tf = np.array([ time.mktime(td[i].timetuple()) for i in ind])\n return tf\n\n\n\ndef get_averaged_data_from_multi_res( multi_res, keystr='g2', different_length= True, verbose=False,\n cal_errorbar=False):\n '''Y.G. Dec 22, 2016\n get average data from multi-run analysis result\n Parameters:\n multi_res: dict, generated by function run_xpcs_xsvs_single\n each key is a uid, inside each uid are also dict with key as 'g2','g4' et.al.\n keystr: string, get the averaged keystr\n different_length: if True, do careful average for different length results\n return:\n array, averaged results \n \n '''\n maxM = 0\n mkeys = multi_res.keys()\n if not different_length:\n n=0\n for i, key in enumerate( list( mkeys) ):\n keystri = multi_res[key][keystr]\n if i ==0: \n keystr_average = keystri\n else:\n keystr_average += keystri\n n +=1\n keystr_average /=n\n \n else:\n length_dict = {} \n D= 1 \n for i, key in enumerate( list( mkeys) ):\n if verbose:\n print(i,key)\n shapes = multi_res[key][keystr].shape\n M=shapes[0] \n if i ==0: \n if len(shapes)==2:\n D=2 \n maxN = shapes[1] \n elif len(shapes)==3:\n D=3 \n maxN = shapes[2] #in case of two-time correlation \n if (M) not in length_dict:\n length_dict[(M) ] =1\n else:\n length_dict[(M) ] += 1\n maxM = max( maxM, M ) \n #print( length_dict )\n avg_count = {}\n sk = np.array( sorted(length_dict) ) \n for i, k in enumerate( sk ):\n avg_count[k] = np.sum( np.array( [ length_dict[k] for k in sk[i:] ] ) ) \n #print(length_dict, avg_count) \n if D==2:\n #print('here')\n keystr_average = np.zeros( [maxM, maxN] )\n elif D==3:\n keystr_average = np.zeros( [maxM, maxM, maxN ] ) \n else:\n keystr_average = np.zeros( [maxM] )\n for i, key in enumerate( list( mkeys) ):\n keystri = multi_res[key][keystr]\n Mi = keystri.shape[0] \n if D!=3: \n keystr_average[:Mi] += keystri\n else:\n keystr_average[:Mi,:Mi,:] += keystri\n if D!=3: \n keystr_average[:sk[0]] /= avg_count[sk[0]] \n else:\n keystr_average[:sk[0],:sk[0], : ] /= avg_count[sk[0]] \n for i in range( 0, len(sk)-1 ):\n if D!=3: \n keystr_average[sk[i]:sk[i+1]] /= avg_count[sk[i+1]] \n else:\n keystr_average[sk[i]:sk[i+1],sk[i]:sk[i+1],:] /= avg_count[sk[i+1]] \n \n return keystr_average\n\n\ndef save_g2_general( g2, taus, qr=None, qz=None, uid='uid', path=None, return_res= False ):\n \n '''Y.G. Dec 29, 2016\n \n save g2 results, \n res_pargs should contain\n g2: one-time correlation function\n taus, lags of g2\n qr: the qr center, same length as g2\n qz: the qz or angle center, same length as g2\n path:\n uid:\n \n ''' \n \n df = DataFrame( np.hstack( [ (taus).reshape( len(g2),1) , g2] ) ) \n t,qs = g2.shape\n if qr is None:\n qr = range( qs )\n if qz is None: \n df.columns = ( ['tau'] + [str(qr_) for qr_ in qr ] )\n else:\n df.columns = ( ['tau'] + [ str(qr_) +'_'+ str(qz_) for (qr_,qz_) in zip(qr,qz) ] )\n \n #dt =datetime.now()\n #CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) \n \n #if filename is None:\n \n filename = uid\n #filename = 'uid=%s--g2.csv' % (uid)\n #filename += '-uid=%s-%s.csv' % (uid,CurTime) \n #filename += '-uid=%s.csv' % (uid) \n filename1 = os.path.join(path, filename)\n df.to_csv(filename1)\n print( 'The correlation function is saved in %s with filename as %s'%( path, filename))\n if return_res:\n return df\n \n\n###########\n#*for g2 fit and plot\n\ndef stretched_auto_corr_scat_factor(x, beta, relaxation_rate, alpha=1.0, baseline=1):\n return beta * np.exp(-2 * (relaxation_rate * x)**alpha ) + baseline\n\ndef simple_exponential(x, beta, relaxation_rate, baseline=1):\n '''relation_rate: unit 1/s '''\n return beta * np.exp(-2 * relaxation_rate * x) + baseline\n\n\ndef simple_exponential_with_vibration(x, beta, relaxation_rate, freq, amp, baseline=1):\n return beta * (1 + amp*np.cos( 2*np.pi*freq* x) )* np.exp(-2 * relaxation_rate * x) + baseline\n\ndef stretched_auto_corr_scat_factor_with_vibration(x, beta, relaxation_rate, alpha, freq, amp, baseline=1):\n return beta * (1 + amp*np.cos( 2*np.pi*freq* x) )* np.exp(-2 * (relaxation_rate * x)**alpha ) + baseline\n\n\ndef flow_para_function_with_vibration( x, beta, relaxation_rate, flow_velocity, freq, amp, baseline=1): \n vibration_part = (1 + amp*np.cos( 2*np.pi*freq* x) ) \n Diff_part= np.exp(-2 * relaxation_rate * x)\n Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 \n return beta* vibration_part* Diff_part * Flow_part + baseline\n\ndef flow_para_function( x, beta, relaxation_rate, flow_velocity, baseline=1):\n '''flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) )'''\n \n Diff_part= np.exp(-2 * relaxation_rate * x)\n Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 \n return beta*Diff_part * Flow_part + baseline\n\n\ndef flow_para_function_explicitq( x, beta, diffusion, flow_velocity, alpha=1, baseline=1, qr=1, q_ang=0 ):\n '''Nov 9, 2017 Basically, make q vector to (qr, angle), \n ###relaxation_rate is actually a diffusion rate\n flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) )\n Diffusion part: np.exp( -2*D q^2 *tau )\n q_ang: would be np.radians( ang - 90 ) \n \n '''\n \n Diff_part= np.exp(-2 * ( diffusion* qr**2 * x)**alpha ) \n if flow_velocity !=0: \n if np.cos( q_ang ) >= 1e-8:\n Flow_part = np.pi**2/(16*x*flow_velocity*qr* abs(np.cos(q_ang)) ) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity * qr* abs(np.cos(q_ang)) ) ) )**2 \n else:\n Flow_part = 1\n else:\n Flow_part = 1\n return beta*Diff_part * Flow_part + baseline\n\n\n\ndef get_flow_velocity( average_velocity, shape_factor): \n \n return average_velocity * (1- shape_factor)/(1+ shape_factor)\n\ndef stretched_flow_para_function( x, beta, relaxation_rate, alpha, flow_velocity, baseline=1): \n '''\n flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) )\n '''\n Diff_part= np.exp(-2 * (relaxation_rate * x)**alpha )\n Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 \n return beta*Diff_part * Flow_part + baseline\n\n\ndef get_g2_fit_general_two_steps( g2, taus, function='simple_exponential', \n second_fit_range=[0,20], \n sequential_fit=False, *argv,**kwargs):\n '''\n Fit g2 in two steps,\n i) Using the \"function\" to fit whole g2 to get baseline and beta (contrast)\n ii) Then using the obtained baseline and beta to fit g2 in a \"second_fit_range\" by using simple_exponential function\n '''\n g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, function, sequential_fit, *argv,**kwargs) \n guess_values = {}\n for k in list (g2_fit_result[0].params.keys()):\n guess_values[k] = np.array( [ g2_fit_result[i].params[k].value\n for i in range( g2.shape[1] ) ]) \n \n if 'guess_limits' in kwargs: \n guess_limits = kwargs['guess_limits'] \n else:\n guess_limits = dict( baseline =[1, 1.8], alpha=[0, 2],\n beta = [0., 1], relaxation_rate= [0.001, 10000])\n \n g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, function ='simple_exponential', \n sequential_fit= sequential_fit, fit_range=second_fit_range,\n fit_variables={'baseline':False, 'beta': False, 'alpha':False,'relaxation_rate':True},\n guess_values= guess_values, guess_limits = guess_limits ) \n \n return g2_fit_result, taus_fit, g2_fit\n\n\ndef get_g2_fit_general( g2, taus, function='simple_exponential', \n sequential_fit=False, qval_dict = None, \n ang_init = 90, *argv,**kwargs):\n '''\n Nov 9, 2017, give qval_dict for using function of flow_para_function_explicitq\n qval_dict: a dict with qr and ang (in unit of degrees).\")\n \n \n Dec 29,2016, Y.G.@CHX\n \n Fit one-time correlation function\n \n The support functions include simple exponential and stretched/compressed exponential\n Parameters\n ---------- \n g2: one-time correlation function for fit, with shape as [taus, qs]\n taus: the time delay\n sequential_fit: if True, will use the low-q fit result as initial value to fit the higher Qs\n function: \n supported function include:\n 'simple_exponential' (or 'simple'): fit by a simple exponential function, defined as \n beta * np.exp(-2 * relaxation_rate * lags) + baseline\n 'streched_exponential'(or 'streched'): fit by a streched exponential function, defined as \n beta * ( np.exp( -2 * ( relaxation_rate * tau )**alpha ) + baseline\n 'stretched_vibration': fit by a streched exponential function with vibration, defined as \n beta * (1 + amp*np.cos( 2*np.pi*60* x) )* np.exp(-2 * (relaxation_rate * x)**alpha) + baseline\n 'flow_para_function' (or flow): fit by a flow function\n \n \n kwargs:\n could contains:\n 'fit_variables': a dict, for vary or not, \n keys are fitting para, including \n beta, relaxation_rate , alpha ,baseline\n values: a False or True, False for not vary\n 'guess_values': a dict, for initial value of the fitting para,\n the defalut values are \n dict( beta=.1, alpha=1.0, relaxation_rate =0.005, baseline=1.0)\n \n 'guess_limits': a dict, for the limits of the fittting para, for example:\n dict( beta=[0, 10],, alpha=[0,100] )\n the default is:\n dict( baseline =[0.5, 2.5], alpha=[0, inf] ,beta = [0, 1], relaxation_rate= [0.0,1000] )\n Returns\n ------- \n fit resutls: a instance in limfit\n tau_fit\n fit_data by the model, it has the q number of g2\n \n an example:\n fit_g2_func = 'stretched'\n g2_fit_result, taus_fit, g2_fit = get_g2_fit_general( g2, taus, \n function = fit_g2_func, vlim=[0.95, 1.05], fit_range= None, \n fit_variables={'baseline':True, 'beta':True, 'alpha':True,'relaxation_rate':True},\n guess_values={'baseline':1.0,'beta':0.05,'alpha':1.0,'relaxation_rate':0.01,}) \n \n g2_fit_paras = save_g2_fit_para_tocsv(g2_fit_result, filename= uid_ +'_g2_fit_paras.csv', path=data_dir ) \n \n \n ''' \n \n if 'fit_range' in kwargs.keys():\n fit_range = kwargs['fit_range'] \n else:\n fit_range=None \n\n \n num_rings = g2.shape[1] \n if 'fit_variables' in kwargs:\n additional_var = kwargs['fit_variables'] \n _vars =[ k for k in list( additional_var.keys()) if additional_var[k] is False]\n else:\n _vars = [] \n if function=='simple_exponential' or function=='simple':\n _vars = np.unique ( _vars + ['alpha']) \n mod = Model(stretched_auto_corr_scat_factor)#, independent_vars= list( _vars) ) \n elif function=='stretched_exponential' or function=='stretched': \n mod = Model(stretched_auto_corr_scat_factor)#, independent_vars= _vars) \n elif function=='stretched_vibration': \n mod = Model(stretched_auto_corr_scat_factor_with_vibration)#, independent_vars= _vars) \n elif function=='flow_para_function' or function=='flow_para': \n mod = Model(flow_para_function)#, independent_vars= _vars) \n elif function=='flow_para_function_explicitq' or function=='flow_para_qang': \n mod = Model(flow_para_function_explicitq)#, independent_vars= _vars) \n elif function=='flow_para_function_with_vibration' or function=='flow_vibration': \n mod = Model( flow_para_function_with_vibration )\n \n else:\n print (\"The %s is not supported.The supported functions include simple_exponential and stretched_exponential\"%function) \n \n mod.set_param_hint( 'baseline', min=0.5, max= 2.5 )\n mod.set_param_hint( 'beta', min=0.0, max=1.0 )\n mod.set_param_hint( 'alpha', min=0.0 )\n mod.set_param_hint( 'relaxation_rate', min=0.0, max= 1000 ) \n mod.set_param_hint( 'flow_velocity', min=0) \n mod.set_param_hint( 'diffusion', min=0.0, max= 2e8 ) \n \n if 'guess_limits' in kwargs: \n guess_limits = kwargs['guess_limits'] \n for k in list( guess_limits.keys() ):\n mod.set_param_hint( k, min= guess_limits[k][0], max= guess_limits[k][1] ) \n \n if function=='flow_para_function' or function=='flow_para' or function=='flow_vibration':\n mod.set_param_hint( 'flow_velocity', min=0) \n if function=='flow_para_function_explicitq' or function=='flow_para_qang': \n mod.set_param_hint( 'flow_velocity', min=0) \n mod.set_param_hint( 'diffusion', min=0.0, max= 2e8 ) \n if function=='stretched_vibration' or function=='flow_vibration': \n mod.set_param_hint( 'freq', min=0)\n mod.set_param_hint( 'amp', min=0)\n \n _guess_val = dict( beta=.1, alpha=1.0, relaxation_rate =0.005, baseline=1.0) \n if 'guess_values' in kwargs: \n guess_values = kwargs['guess_values'] \n _guess_val.update( guess_values ) \n \n _beta=_guess_val['beta']\n _alpha=_guess_val['alpha']\n _relaxation_rate = _guess_val['relaxation_rate']\n _baseline= _guess_val['baseline'] \n if isinstance( _beta, (np.ndarray, list) ):\n _beta_=_beta[0]\n else:\n _beta_=_beta\n if isinstance( _baseline, (np.ndarray, list) ):\n _baseline_ = _baseline[0] \n else:\n _baseline_ = _baseline\n if isinstance( _relaxation_rate, (np.ndarray, list) ):\n _relaxation_rate_= _relaxation_rate[0]\n else: \n _relaxation_rate_= _relaxation_rate\n if isinstance( _alpha, (np.ndarray, list) ):\n _alpha_ = _alpha[0] \n else:\n _alpha_ = _alpha \n pars = mod.make_params( beta=_beta_, alpha=_alpha_, \n relaxation_rate =_relaxation_rate_, baseline= _baseline_)\n \n if function=='flow_para_function' or function=='flow_para':\n _flow_velocity =_guess_val['flow_velocity'] \n if isinstance( _flow_velocity, (np.ndarray, list) ):\n _flow_velocity_ = _flow_velocity[0] \n else:\n _flow_velocity_ = _flow_velocity \n pars = mod.make_params( beta=_beta_, alpha=_alpha_, flow_velocity=_flow_velocity_,\n relaxation_rate =_relaxation_rate_, baseline= _baseline_)\n\n if function=='flow_para_function_explicitq' or function=='flow_para_qang':\n _flow_velocity =_guess_val['flow_velocity'] \n _diffusion =_guess_val['diffusion'] \n _guess_val['qr'] = 1\n _guess_val['q_ang'] = 0\n if isinstance( _flow_velocity, (np.ndarray, list) ):\n _flow_velocity_ = _flow_velocity[0] \n else:\n _flow_velocity_ = _flow_velocity \n if isinstance( _diffusion, (np.ndarray, list) ):\n _diffusion_ = _diffusion[0] \n else:\n _diffusion_ = _diffusion \n pars = mod.make_params( beta=_beta_, alpha=_alpha_, flow_velocity=_flow_velocity_,\n diffusion =_diffusion_, baseline= _baseline_,\n qr=1, q_ang=0\n )\n \n if function=='stretched_vibration':\n _freq =_guess_val['freq'] \n _amp = _guess_val['amp'] \n pars = mod.make_params( beta=_beta, alpha=_alpha, freq=_freq, amp = _amp,\n relaxation_rate =_relaxation_rate, baseline= _baseline)\n \n if function=='flow_vibration':\n _flow_velocity =_guess_val['flow_velocity'] \n _freq =_guess_val['freq'] \n _amp = _guess_val['amp'] \n pars = mod.make_params( beta=_beta, freq=_freq, amp = _amp,flow_velocity=_flow_velocity,\n relaxation_rate =_relaxation_rate, baseline= _baseline) \n for v in _vars:\n pars['%s'%v].vary = False\n #print( pars )\n fit_res = []\n model_data = [] \n for i in range(num_rings): \n if fit_range is not None:\n y_=g2[1:, i][fit_range[0]:fit_range[1]]\n lags_=taus[1:][fit_range[0]:fit_range[1]] \n else:\n y_=g2[1:, i]\n lags_=taus[1:] \n \n mm = ~np.isnan(y_) \n y = y_[mm]\n lags = lags_[mm] \n #print( i, mm.shape, y.shape, y_.shape, lags.shape, lags_.shape )\n #y=y_\n #lags=lags_\n #print( _relaxation_rate )\n for k in list(pars.keys()):\n #print(k, _guess_val[k] )\n try:\n if isinstance( _guess_val[k], (np.ndarray, list) ):\n pars[k].value = _guess_val[k][i] \n except:\n pass\n \n if True:\n if isinstance( _beta, (np.ndarray, list) ):\n #pars['beta'].value = _guess_val['beta'][i]\n _beta_ = _guess_val['beta'][i]\n if isinstance( _baseline, (np.ndarray, list) ):\n #pars['baseline'].value = _guess_val['baseline'][i] \n _baseline_ = _guess_val['baseline'][i] \n if isinstance( _relaxation_rate, (np.ndarray, list) ):\n #pars['relaxation_rate'].value = _guess_val['relaxation_rate'][i] \n _relaxation_rate_ = _guess_val['relaxation_rate'][i] \n if isinstance( _alpha, (np.ndarray, list) ):\n #pars['alpha'].value = _guess_val['alpha'][i] \n _alpha_ = _guess_val['alpha'][i] \n #for k in list(pars.keys()):\n #print(k, _guess_val[k] )\n # pars[k].value = _guess_val[k][i] \n if function=='flow_para_function_explicitq' or function=='flow_para_qang': \n if qval_dict is None:\n print(\"Please provide qval_dict, a dict with qr and ang (in unit of degrees).\")\n else:\n \n pars = mod.make_params( \n beta=_beta_, alpha=_alpha_, flow_velocity=_flow_velocity_,\n diffusion =_diffusion_, baseline= _baseline_,\n qr = qval_dict[i][0], q_ang = abs(np.radians( qval_dict[i][1] - ang_init) ) )\n \n \n pars['qr'].vary = False\n pars['q_ang'].vary = False\n for v in _vars:\n pars['%s'%v].vary = False\n \n #if i==20:\n # print(pars)\n #print( pars )\n result1 = mod.fit(y, pars, x =lags ) \n #print(qval_dict[i][0], qval_dict[i][1], y)\n if sequential_fit:\n for k in list(pars.keys()):\n #print( pars )\n if k in list(result1.best_values.keys()):\n pars[k].value = result1.best_values[k]\n fit_res.append( result1) \n #model_data.append( result1.best_fit )\n yf=result1.model.eval(params=result1.params, x= lags_ )\n model_data.append( yf )\n return fit_res, lags_, np.array( model_data ).T\n\n\n\n\ndef get_short_long_labels_from_qval_dict(qval_dict, geometry='saxs'):\n '''Y.G. 2016, Dec 26\n Get short/long labels from a qval_dict\n Parameters\n ---------- \n qval_dict, dict, with key as roi number,\n format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs\n format as {1: [qr1], 2: [qr2] ...} for saxs\n format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs\n geometry:\n 'saxs': a saxs with Qr partition\n 'ang_saxs': a saxs with Qr and angular partition\n 'gi_saxs': gisaxs with Qz, Qr \n '''\n\n Nqs = len( qval_dict.keys())\n len_qrz = len( list( qval_dict.values() )[0] )\n #qr_label = sorted( np.array( list( qval_dict.values() ) )[:,0] )\n qr_label = np.array( list( qval_dict.values() ) )[:,0] \n if geometry=='gi_saxs' or geometry=='ang_saxs':# or geometry=='gi_waxs':\n if len_qrz < 2:\n print( \"please give qz or qang for the q-label\")\n else:\n #qz_label = sorted( np.array( list( qval_dict.values() ) )[:,1] )\n qz_label = np.array( list( qval_dict.values() ) )[:,1] \n else:\n qz_label = np.array( [0] ) \n \n uqz_label = np.unique( qz_label )\n num_qz = len( uqz_label)\n \n uqr_label = np.unique( qr_label )\n num_qr = len( uqr_label) \n \n #print( uqr_label, uqz_label )\n if len( uqr_label ) >= len( uqz_label ):\n master_plot= 'qz' #one qz for many sub plots of each qr \n else:\n master_plot= 'qr' \n\n mastp= master_plot \n if geometry == 'ang_saxs':\n mastp= 'ang' \n num_short = min(num_qz, num_qr)\n num_long = max(num_qz, num_qr)\n \n #print( mastp, num_short, num_long)\n if num_qz != num_qr:\n short_label = [qz_label,qr_label][ np.argmin( [num_qz, num_qr] ) ]\n long_label = [qz_label,qr_label][ np.argmax( [num_qz, num_qr] ) ]\n short_ulabel = [uqz_label,uqr_label][ np.argmin( [num_qz, num_qr] ) ]\n long_ulabel = [uqz_label,uqr_label][ np.argmax( [num_qz, num_qr] ) ]\n else:\n short_label = qz_label\n long_label = qr_label\n short_ulabel = uqz_label\n long_ulabel = uqr_label \n #print( long_ulabel ) \n #print( qz_label,qr_label )\n #print( short_label, long_label ) \n \n if geometry == 'saxs' or geometry == 'gi_waxs':\n ind_long = [ range( num_long ) ] \n else:\n ind_long = [ np.where( short_label == i)[0] for i in short_ulabel ] \n \n \n if Nqs == 1:\n long_ulabel = list( qval_dict.values() )[0]\n long_label = list( qval_dict.values() )[0]\n return qr_label, qz_label, num_qz, num_qr, num_short,num_long, short_label, long_label,short_ulabel,long_ulabel, ind_long, master_plot, mastp\n \n \n############################################\n##a good func to plot g2 for all types of geogmetries\n############################################ \n \n \n \n\ndef plot_g2_general( g2_dict, taus_dict, qval_dict, g2_err_dict = None, \n fit_res=None, geometry='saxs',filename='g2', \n path=None, function='simple_exponential', g2_labels=None, \n fig_ysize= 12, qth_interest = None,\n ylabel='g2', return_fig=False, append_name='', outsize=(2000, 2400), \n max_plotnum_fig=16, figsize=(10, 12), show_average_ang_saxs=True,\n qphi_analysis = False, fontsize_sublabel = 12,\n *argv,**kwargs): \n '''\n Jan 10, 2018 add g2_err_dict option to plot g2 with error bar \n Oct31, 2017 add qth_interest option \n \n Dec 26,2016, Y.G.@CHX\n \n Plot one/four-time correlation function (with fit) for different geometry\n \n The support functions include simple exponential and stretched/compressed exponential\n Parameters\n ---------- \n g2_dict: dict, format as {1: g2_1, 2: g2_2, 3: g2_3...} one-time correlation function, g1,g2, g3,...must have the same shape\n taus_dict, dict, format {1: tau_1, 2: tau_2, 3: tau_3...}, tau1,tau2, tau3,...must have the same shape\n qval_dict, dict, with key as roi number,\n format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs\n format as {1: [qr1], 2: [qr2] ...} for saxs\n format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs\n \n fit_res: give all the fitting parameters for showing in the plot \n qth_interest: if not None: should be a list, and will only plot the qth_interest qs\n filename: for the title of plot\n append_name: if not None, will save as filename + append_name as filename\n path: the path to save data \n outsize: for gi/ang_saxs, will combine all the different qz images together with outsize \n function: \n 'simple_exponential': fit by a simple exponential function, defined as \n beta * np.exp(-2 * relaxation_rate * lags) + baseline\n 'streched_exponential': fit by a streched exponential function, defined as \n beta * (np.exp(-2 * relaxation_rate * lags))**alpha + baseline \n geometry:\n 'saxs': a saxs with Qr partition\n 'ang_saxs': a saxs with Qr and angular partition\n 'gi_saxs': gisaxs with Qz, Qr\n \n one_plot: if True, plot all images in one pannel \n kwargs:\n \n Returns\n ------- \n None\n\n ToDoList: plot an average g2 for ang_saxs for each q\n \n ''' \n\n if ylabel=='g2':\n ylabel='g_2'\n if ylabel=='g4':\n ylabel='g_4' \n \n if geometry =='saxs':\n if qphi_analysis:\n geometry = 'ang_saxs' \n if qth_interest is not None:\n if not isinstance(qth_interest, list):\n print('Please give a list for qth_interest') \n else:\n #g2_dict0, taus_dict0, qval_dict0, fit_res0= g2_dict, taus_dict, qval_dict, fit_res \n qth_interest = np.array( qth_interest ) -1\n g2_dict_ = {} \n #taus_dict_ = {}\n for k in list(g2_dict.keys()): \n g2_dict_[k] = g2_dict[k][:,[i for i in qth_interest]] \n #for k in list(taus_dict.keys()): \n # taus_dict_[k] = taus_dict[k][:,[i for i in qth_interest]] \n taus_dict_ = taus_dict\n qval_dict_ = {k:qval_dict[k] for k in qth_interest} \n if fit_res is not None:\n fit_res_ = [ fit_res[k] for k in qth_interest ] \n else:\n fit_res_ = None\n else:\n g2_dict_, taus_dict_, qval_dict_, fit_res_ = g2_dict, taus_dict, qval_dict, fit_res \n \n (qr_label, qz_label, num_qz, num_qr, num_short,\n num_long, short_label, long_label,short_ulabel,\n long_ulabel,ind_long, master_plot,\n mastp) = get_short_long_labels_from_qval_dict(qval_dict_, geometry=geometry) \n fps = [] \n \n #$print( num_short, num_long )\n \n for s_ind in range( num_short ):\n ind_long_i = ind_long[ s_ind ]\n num_long_i = len( ind_long_i )\n #if show_average_ang_saxs:\n # if geometry=='ang_saxs':\n # num_long_i += 1 \n if RUN_GUI:\n fig = Figure(figsize=(10, 12)) \n else:\n #fig = plt.figure( )\n if num_long_i <=4:\n if master_plot != 'qz':\n fig = plt.figure(figsize=(8, 6)) \n else:\n if num_short>1:\n fig = plt.figure(figsize=(8, 4))\n else:\n fig = plt.figure(figsize=(10, 6))\n #print('Here')\n elif num_long_i > max_plotnum_fig:\n num_fig = int(np.ceil(num_long_i/max_plotnum_fig)) #num_long_i //16\n fig = [ plt.figure(figsize=figsize) for i in range(num_fig) ]\n #print( figsize )\n else:\n #print('Here')\n if master_plot != 'qz':\n fig = plt.figure(figsize=figsize)\n else:\n fig = plt.figure(figsize=(10, 10))\n \n if master_plot == 'qz':\n if geometry=='ang_saxs':\n title_short = 'Angle= %.2f'%( short_ulabel[s_ind] ) + r'$^\\circ$' \n elif geometry=='gi_saxs':\n title_short = r'$Q_z= $' + '%.4f'%( short_ulabel[s_ind] ) + r'$\\AA^{-1}$'\n else:\n title_short = '' \n else: #qr\n if geometry=='ang_saxs' or geometry=='gi_saxs':\n title_short = r'$Q_r= $' + '%.5f '%( short_ulabel[s_ind] ) + r'$\\AA^{-1}$' \n else:\n title_short='' \n #print(geometry) \n #filename =''\n til = '%s:--->%s'%(filename, title_short )\n if num_long_i <=4: \n plt.title( til,fontsize= 14, y =1.15) \n #plt.title( til,fontsize=20, y =1.06) \n #print('here')\n else:\n plt.title( til,fontsize=20, y =1.06) \n #print( num_long ) \n if num_long!=1: \n #print( 'here')\n plt.axis('off') \n #sy = min(num_long_i,4) \n sy = min(num_long_i, int( np.ceil( min(max_plotnum_fig,num_long_i)/4)) ) \n #fig.set_size_inches(10, 12)\n #fig.set_size_inches(10, fig_ysize )\n else: \n sy =1\n #fig.set_size_inches(8,6) \n #plt.axis('off') \n sx = min(4, int( np.ceil( min(max_plotnum_fig,num_long_i)/float(sy) ) ))\n \n temp = sy\n sy = sx\n sx = temp\n \n #print( num_long_i, sx, sy )\n #print( master_plot )\n #print(ind_long_i, len(ind_long_i) )\n \n for i, l_ind in enumerate( ind_long_i ): \n if num_long_i <= max_plotnum_fig:\n #if s_ind ==2:\n # print('Here')\n # print(i, l_ind, short_label[s_ind], long_label[l_ind], sx, sy, i+1 ) \n ax = fig.add_subplot(sx,sy, i + 1 ) \n if sx==1:\n if sy==1:\n plt.axis('on') \n else:\n #fig_subnum = l_ind//max_plotnum_fig\n #ax = fig[fig_subnum].add_subplot(sx,sy, i + 1 - fig_subnum*max_plotnum_fig) \n fig_subnum = i//max_plotnum_fig\n #print( i, sx,sy, fig_subnum, max_plotnum_fig, i + 1 - fig_subnum*max_plotnum_fig )\n ax = fig[fig_subnum].add_subplot(sx,sy, i + 1 - fig_subnum*max_plotnum_fig) \n \n \n ax.set_ylabel( r\"$%s$\"%ylabel + '(' + r'$\\tau$' + ')' ) \n ax.set_xlabel(r\"$\\tau $ $(s)$\", fontsize=16) \n if master_plot == 'qz' or master_plot == 'angle': \n if geometry!='gi_waxs':\n title_long = r'$Q_r= $'+'%.5f '%( long_label[l_ind] ) + r'$\\AA^{-1}$' \n else:\n title_long = r'$Q_r= $'+'%i '%( long_label[l_ind] ) \n #print( title_long,long_label,l_ind )\n else: \n if geometry=='ang_saxs':\n #title_long = 'Ang= ' + '%.2f'%( long_label[l_ind] ) + r'$^\\circ$' + '( %d )'%(l_ind)\n title_long = 'Ang= ' + '%.2f'%( long_label[l_ind] ) #+ r'$^\\circ$' + '( %d )'%(l_ind)\n elif geometry=='gi_saxs':\n title_long = r'$Q_z= $'+ '%.5f '%( long_label[l_ind] ) + r'$\\AA^{-1}$' \n else:\n title_long = '' \n #print( master_plot )\n if master_plot != 'qz':\n ax.set_title(title_long + ' (%s )'%(1+l_ind), y =1.1, fontsize=12) \n else: \n ax.set_title(title_long + ' (%s )'%(1+l_ind), y =1.05, fontsize= fontsize_sublabel) \n #print( geometry )\n #print( title_long ) \n if qth_interest is not None:#it might have a bug here, todolist!!!\n lab = sorted(list(qval_dict_.keys()))\n #print( lab, l_ind)\n ax.set_title(title_long + ' (%s )'%( lab[l_ind] +1), y =1.05, fontsize= 12) \n for ki, k in enumerate( list(g2_dict_.keys()) ): \n if ki==0:\n c='b'\n if fit_res is None:\n m='-o' \n else:\n m='o' \n elif ki==1:\n c='r'\n if fit_res is None:\n m='s' \n else:\n m='-' \n elif ki==2:\n c='g'\n m='-D'\n else:\n c = colors[ki+2]\n m= '-%s'%markers[ki+2] \n try:\n dumy = g2_dict_[k].shape\n #print( 'here is the shape' )\n islist = False \n except:\n islist_n = len( g2_dict_[k] )\n islist = True\n #print( 'here is the list' ) \n if islist:\n for nlst in range( islist_n ):\n m = '-%s'%markers[ nlst ] \n #print(m)\n y=g2_dict_[k][nlst][:, l_ind ]\n x = taus_dict_[k][nlst]\n if ki==0:\n ymin,ymax = min(y), max(y[1:])\n if g2_err_dict is None: \n if g2_labels is None: \n ax.semilogx(x, y, m, color=c, markersize=6) \n else:\n #print('here ki ={} nlst = {}'.format( ki, nlst ))\n if nlst==0:\n ax.semilogx(x, y, m, color=c,markersize=6, label=g2_labels[ki]) \n else:\n ax.semilogx(x, y, m, color=c,markersize=6)\n else:\n yerr= g2_err_dict[k][nlst][:, l_ind ]\n if g2_labels is None:\n ax.errorbar(x, y, yerr=yerr, fmt=m,color=c, markersize=6) \n else:\n if nlst==0:\n ax.errorbar(x, y, yerr=yerr, fmt=m,\n color=c,markersize=6, label=g2_labels[ki]) \n else:\n ax.errorbar(x, y, yerr=yerr, fmt=m, color=c,markersize=6) \n ax.set_xscale(\"log\", nonposx='clip') \n if nlst==0:\n if l_ind==0:\n ax.legend(loc='best', fontsize = 8, fancybox=True, framealpha=0.5) \n \n else: \n y=g2_dict_[k][:, l_ind ] \n x = taus_dict_[k]\n if ki==0:\n ymin,ymax = min(y), max(y[1:]) \n if g2_err_dict is None: \n if g2_labels is None: \n ax.semilogx(x, y, m, color=c, markersize=6) \n else:\n ax.semilogx(x, y, m, color=c,markersize=6, label=g2_labels[ki]) \n else:\n yerr= g2_err_dict[k][:, l_ind ]\n #print(x.shape, y.shape, yerr.shape)\n #print(yerr)\n if g2_labels is None: \n ax.errorbar(x, y, yerr=yerr, fmt=m,color=c, markersize=6)\n else:\n ax.errorbar(x, y, yerr=yerr, fmt=m,color=c, markersize=6,label=g2_labels[ki] )\n ax.set_xscale(\"log\", nonposx='clip') \n if l_ind==0:\n ax.legend(loc='best', fontsize = 8, fancybox=True, framealpha=0.5) \n\n if fit_res_ is not None:\n result1 = fit_res_[l_ind] \n #print (result1.best_values)\n \n beta = result1.best_values['beta'] \n baseline = result1.best_values['baseline'] \n if function=='simple_exponential' or function=='simple':\n rate = result1.best_values['relaxation_rate']\n alpha =1.0 \n elif function=='stretched_exponential' or function=='stretched':\n rate = result1.best_values['relaxation_rate']\n alpha = result1.best_values['alpha']\n elif function=='stretched_vibration': \n rate = result1.best_values['relaxation_rate']\n alpha = result1.best_values['alpha']\n freq = result1.best_values['freq'] \n elif function=='flow_vibration': \n rate = result1.best_values['relaxation_rate']\n freq = result1.best_values['freq'] \n if function=='flow_para_function' or function=='flow_para' or function=='flow_vibration': \n rate = result1.best_values['relaxation_rate']\n flow = result1.best_values['flow_velocity'] \n if function=='flow_para_function_explicitq' or function=='flow_para_qang': \n diff = result1.best_values['diffusion']\n qrr = short_ulabel[s_ind]\n #print(qrr)\n rate = diff * qrr**2\n flow = result1.best_values['flow_velocity'] \n if qval_dict_ is None:\n print(\"Please provide qval_dict, a dict with qr and ang (in unit of degrees).\")\n else: \n pass \n\n if rate!=0:\n txts = r'$\\tau_0$' + r'$ = %.3f$'%(1/rate) + r'$ s$'\n else:\n txts = r'$\\tau_0$' + r'$ = inf$' + r'$ s$'\n x=0.25\n y0=0.9\n fontsize = 12\n ax.text(x =x, y= y0, s=txts, fontsize=fontsize, transform=ax.transAxes) \n #print(function)\n dt=0\n if function!='flow_para_function' and function!='flow_para' and function!='flow_vibration' and function!='flow_para_qang':\n txts = r'$\\alpha$' + r'$ = %.3f$'%(alpha) \n dt +=0.1\n #txts = r'$\\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$'\n ax.text(x =x, y= y0-dt, s=txts, fontsize=fontsize, transform=ax.transAxes)\n \n txts = r'$baseline$' + r'$ = %.3f$'%( baseline) \n dt +=0.1\n ax.text(x =x, y= y0- dt, s=txts, fontsize=fontsize, transform=ax.transAxes)\n \n if function=='flow_para_function' or function=='flow_para' or function=='flow_vibration' or function=='flow_para_qang': \n txts = r'$flow_v$' + r'$ = %.3f$'%( flow) \n dt += 0.1\n ax.text(x =x, y= y0- dt, s=txts, fontsize=fontsize, transform=ax.transAxes) \n if function=='stretched_vibration' or function=='flow_vibration': \n txts = r'$vibration$' + r'$ = %.1f Hz$'%( freq) \n dt += 0.1\n ax.text(x =x, y= y0-dt, s=txts, fontsize=fontsize, transform=ax.transAxes) \n \n txts = r'$\\beta$' + r'$ = %.3f$'%( beta ) \n dt +=0.1\n ax.text(x =x, y= y0- dt, s=txts, fontsize=fontsize, transform=ax.transAxes)\n \n\n if 'ylim' in kwargs:\n ax.set_ylim( kwargs['ylim'])\n elif 'vlim' in kwargs:\n vmin, vmax =kwargs['vlim']\n try:\n ax.set_ylim([ymin*vmin, ymax*vmax ]) \n except:\n pass\n else:\n pass\n if 'xlim' in kwargs:\n ax.set_xlim( kwargs['xlim'])\n if num_short == 1: \n fp = path + filename \n else:\n fp = path + filename + '_%s_%s'%(mastp, s_ind) \n \n if append_name is not '':\n fp = fp + append_name\n fps.append( fp + '.png' ) \n #if num_long_i <= 16:\n if num_long_i <= max_plotnum_fig: \n fig.set_tight_layout(True) \n #fig.tight_layout() \n #print(fig)\n try:\n plt.savefig( fp + '.png', dpi=fig.dpi) \n except:\n print('Can not save figure here.')\n \n else:\n fps=[]\n for fn, f in enumerate(fig):\n f.set_tight_layout(True)\n fp = path + filename + '_q_%s_%s'%(fn*16, (fn+1)*16) \n if append_name is not '':\n fp = fp + append_name\n fps.append( fp + '.png' ) \n f.savefig( fp + '.png', dpi=f.dpi)\n #plt.savefig( fp + '.png', dpi=fig.dpi) \n #combine each saved images together\n \n if (num_short !=1) or (num_long_i > 16):\n outputfile = path + filename + '.png'\n if append_name is not '':\n outputfile = path + filename + append_name + '__joint.png'\n else:\n outputfile = path + filename + '__joint.png'\n combine_images( fps, outputfile, outsize= outsize ) \n if return_fig:\n return fig \n \n \n\ndef power_func(x, D0, power=2):\n return D0 * x**power\n\n\ndef get_q_rate_fit_general( qval_dict, rate, geometry ='saxs', weights=None, *argv,**kwargs): \n '''\n Dec 26,2016, Y.G.@CHX\n \n Fit q~rate by a power law function and fit curve pass (0,0) \n \n Parameters\n ---------- \n qval_dict, dict, with key as roi number,\n format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs\n format as {1: [qr1], 2: [qr2] ...} for saxs\n format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs\n rate: relaxation_rate\n\n Option:\n if power_variable = False, power =2 to fit q^2~rate, \n Otherwise, power is variable.\n Return:\n D0\n qrate_fit_res\n '''\n \n power_variable=False\n \n if 'fit_range' in kwargs.keys():\n fit_range = kwargs['fit_range'] \n else: \n fit_range= None\n \n mod = Model( power_func )\n #mod.set_param_hint( 'power', min=0.5, max= 10 )\n #mod.set_param_hint( 'D0', min=0 )\n pars = mod.make_params( power = 2, D0=1*10^(-5) )\n if power_variable:\n pars['power'].vary = True\n else:\n pars['power'].vary = False\n \n (qr_label, qz_label, num_qz, num_qr, num_short,\n num_long, short_label, long_label,short_ulabel,\n long_ulabel,ind_long, master_plot,\n mastp) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry)\n \n Nqr = num_long\n Nqz = num_short \n D0= np.zeros( Nqz )\n power= 2 #np.zeros( Nqz )\n qrate_fit_res=[] \n #print(Nqz) \n for i in range(Nqz): \n ind_long_i = ind_long[ i ]\n y = np.array( rate )[ind_long_i] \n x = long_label[ind_long_i] \n #print(y,x)\n if fit_range is not None:\n y=y[fit_range[0]:fit_range[1]]\n x=x[fit_range[0]:fit_range[1]] \n #print (i, y,x) \n _result = mod.fit(y, pars, x = x ,weights=weights )\n qrate_fit_res.append( _result )\n D0[i] = _result.best_values['D0']\n #power[i] = _result.best_values['power'] \n print ('The fitted diffusion coefficient D0 is: %.3e A^2S-1'%D0[i])\n return D0, qrate_fit_res\n\n\ndef plot_q_rate_fit_general( qval_dict, rate, qrate_fit_res, geometry ='saxs', ylim = None, \n plot_all_range=True, plot_index_range = None, show_text=True,return_fig=False,\n show_fit=True,\n *argv,**kwargs): \n '''\n Dec 26,2016, Y.G.@CHX\n \n plot q~rate fitted by a power law function and fit curve pass (0,0) \n \n Parameters\n ---------- \n qval_dict, dict, with key as roi number,\n format as {1: [qr1, qz1], 2: [qr2,qz2] ...} for gi-saxs\n format as {1: [qr1], 2: [qr2] ...} for saxs\n format as {1: [qr1, qa1], 2: [qr2,qa2], ...] for ang-saxs\n rate: relaxation_rate\n plot_index_range: \n Option:\n if power_variable = False, power =2 to fit q^2~rate, \n Otherwise, power is variable.\n show_fit:, bool, if False, not show the fit\n \n ''' \n \n if 'uid' in kwargs.keys():\n uid = kwargs['uid'] \n else:\n uid = 'uid' \n if 'path' in kwargs.keys():\n path = kwargs['path'] \n else:\n path = '' \n (qr_label, qz_label, num_qz, num_qr, num_short,\n num_long, short_label, long_label,short_ulabel,\n long_ulabel,ind_long, master_plot,\n mastp) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry)\n \n power = 2\n fig,ax = plt.subplots()\n plt.title(r'$Q^%s$'%(power) + '-Rate-%s_Fit'%(uid),fontsize=20, y =1.06)\n Nqz = num_short \n if Nqz!=1:\n ls = '--'\n else:\n ls='' \n for i in range(Nqz):\n ind_long_i = ind_long[ i ] \n y = np.array( rate )[ind_long_i] \n x = long_label[ind_long_i]\n D0 = qrate_fit_res[i].best_values['D0'] \n #print(i, x, y, D0 ) \n if Nqz!=1:\n label=r'$q_z=%.5f$'%short_ulabel[i]\n else:\n label=''\n ax.plot(x**power, y, marker = 'o', ls =ls, label=label)\n yfit = qrate_fit_res[i].best_fit\n\n if show_fit: \n if plot_all_range:\n ax.plot(x**power, x**power*D0, '-r') \n else: \n ax.plot( (x**power)[:len(yfit) ], yfit, '-r') \n \n if show_text:\n txts = r'$D0: %.3e$'%D0 + r' $A^2$' + r'$s^{-1}$'\n dy=0.1\n ax.text(x =0.15, y=.65 -dy *i, s=txts, fontsize=14, transform=ax.transAxes) \n if Nqz!=1:legend = ax.legend(loc='best')\n\n if plot_index_range is not None:\n d1,d2 = plot_index_range\n d2 = min( len(x)-1, d2 ) \n ax.set_xlim( (x**power)[d1], (x**power)[d2] )\n ax.set_ylim( y[d1],y[d2])\n if ylim is not None:\n ax.set_ylim( ylim )\n \n ax.set_ylabel('Relaxation rate 'r'$\\gamma$'\"($s^{-1}$)\")\n ax.set_xlabel(\"$q^%s$\"r'($\\AA^{-2}$)'%power)\n fp = path + '%s_Q_Rate'%(uid) + '_fit.png'\n fig.savefig( fp, dpi=fig.dpi)\n fig.tight_layout()\n if return_fig:\n return fig,ax\n \n\ndef save_g2_fit_para_tocsv( fit_res, filename, path):\n '''Y.G. Dec 29, 2016, \n save g2 fitted parameter to csv file\n '''\n col = list( fit_res[0].best_values.keys() )\n m,n = len( fit_res ), len( col )\n data = np.zeros( [m,n] )\n for i in range( m ):\n data[i] = list( fit_res[i].best_values.values() )\n df = DataFrame( data ) \n df.columns = col \n filename1 = os.path.join(path, filename) # + '.csv')\n df.to_csv(filename1)\n print( \"The g2 fitting parameters are saved in %s\"%filename1)\n return df\n \n\n\ndef R_2(ydata,fit_data):\n ''' Calculates R squared for a particular fit - by L.W.\n usage R_2(ydata,fit_data)\n returns R2 \n by L.W. Feb. 2019\n '''\n y_ave=np.average(ydata)\n SS_tot=np.sum((np.array(ydata)-y_ave)**2)\n #print('SS_tot: %s'%SS_tot)\n SS_res=np.sum((np.array(ydata)-np.array(fit_data))**2)\n #print('SS_res: %s'%SS_res)\n return 1-SS_res/SS_tot\n"
] |
[
[
"numpy.rot90",
"numpy.ones_like",
"numpy.ma.sum",
"numpy.argmin",
"numpy.load",
"numpy.min",
"numpy.exp",
"numpy.mean",
"numpy.tile",
"numpy.where",
"numpy.radians",
"numpy.sort",
"numpy.cos",
"scipy.ndimage.measurements.center_of_mass",
"numpy.cumsum",
"numpy.size",
"numpy.int_",
"numpy.max",
"numpy.bincount",
"numpy.zeros_like",
"scipy.signal.fftconvolve",
"numpy.log",
"pandas.DataFrame",
"numpy.interp",
"numpy.polyval",
"numpy.flipud",
"numpy.arange",
"numpy.hypot",
"numpy.sqrt",
"numpy.poly1d",
"numpy.polyfit",
"numpy.isfinite",
"numpy.log10",
"numpy.argmax",
"numpy.vstack",
"numpy.array",
"numpy.savetxt",
"numpy.zeros",
"matplotlib.cm.get_cmap",
"numpy.round",
"numpy.linalg.pinv",
"numpy.shape",
"numpy.std",
"numpy.ma.masked_array",
"scipy.special.erf",
"numpy.average",
"numpy.insert",
"numpy.fliplr",
"numpy.ceil",
"numpy.isnan",
"numpy.sum",
"numpy.ones",
"numpy.ravel",
"numpy.abs",
"numpy.repeat",
"numpy.unique"
]
] |
rohitdwivedula/nfhs5
|
[
"1872fa7a84bccfb0882c07b30223fc98024691f2"
] |
[
"get_districtwise_links.py"
] |
[
"from bs4 import BeautifulSoup\nimport pandas as pd\nimport requests\n\nstate_links = pd.read_csv(\"statewise_links.csv\")\ndistrictwise_links = pd.DataFrame(columns=['state', 'district', 'link'])\n\nfor index, row in state_links.iterrows():\n print(f\"Processing state #{index} {row['state']} at link {row['link']}\")\n webpage = requests.get(url = row['link']).text\n soup = BeautifulSoup(webpage, 'html.parser')\n try:\n s = soup.find_all('select')[0].find_all('option')\n for i in range(0, len(s)):\n link_to_report = s[i].get('value')\n district_name = s[i].text\n if link_to_report is not None:\n districtwise_links = districtwise_links.append(\n {\n 'state': row['state'],\n 'district': district_name,\n 'link': link_to_report\n },\n ignore_index=True\n )\n except:\n print(\"[ERROR] Could not process:\", row)\n\ndistrictwise_links.to_csv('districtwise_links.csv')\n\n'''\n\tOutput for this program:\n\n\t\tProcessing state #0 Andhra Pradesh at link http://rchiips.org/nfhs/NFHS-5_AP.shtml\n\t\tProcessing state #1 Arunachal Pradesh at link http://rchiips.org/nfhs/NFHS-5_AR.shtml\n\t\tProcessing state #2 Assam at link http://rchiips.org/nfhs/NFHS-5_AS.shtml\n\t\tProcessing state #3 Bihar at link http://rchiips.org/nfhs/NFHS-5_BR.shtml\n\t\tProcessing state #4 Chhattisgarh at link http://rchiips.org/nfhs/NFHS-5_CT.shtml\n\t\tProcessing state #5 Goa at link http://rchiips.org/nfhs/NFHS-5_GA.shtml\n\t\tProcessing state #6 Gujarat at link http://rchiips.org/nfhs/NFHS-5_GJ.shtml\n\t\tProcessing state #7 Haryana at link http://rchiips.org/nfhs/NFHS-5_HR.shtml\n\t\tProcessing state #8 Himachal Pradesh at link http://rchiips.org/nfhs/NFHS-5_HP.shtml\n\t\tProcessing state #9 Jharkhand at link http://rchiips.org/nfhs/NFHS-5_JH.shtml\n\t\tProcessing state #10 Karnataka at link http://rchiips.org/nfhs/NFHS-5_KA.shtml\n\t\tProcessing state #11 Kerala at link http://rchiips.org/nfhs/NFHS-5_KL.shtml\n\t\tProcessing state #12 Madhya Pradesh at link http://rchiips.org/nfhs/NFHS-5_MP.shtml\n\t\tProcessing state #13 Maharashtra at link http://rchiips.org/nfhs/NFHS-5_MH.shtml\n\t\tProcessing state #14 Manipur at link http://rchiips.org/nfhs/NFHS-5_MN.shtml\n\t\tProcessing state #15 Meghalaya at link http://rchiips.org/nfhs/NFHS-5_ML.shtml\n\t\tProcessing state #16 Mizoram at link http://rchiips.org/nfhs/NFHS-5_MZ.shtml\n\t\tProcessing state #17 Nagaland at link http://rchiips.org/nfhs/NFHS-5_NL.shtml\n\t\tProcessing state #18 Odisha at link http://rchiips.org/nfhs/NFHS-5_OR.shtml\n\t\tProcessing state #19 Punjab at link http://rchiips.org/nfhs/NFHS-5_PB.shtml\n\t\tProcessing state #20 Rajasthan at link http://rchiips.org/nfhs/NFHS-5_RJ.shtml\n\t\tProcessing state #21 Sikkim at link http://rchiips.org/nfhs/NFHS-5_SK.shtml\n\t\tProcessing state #22 Tamil Nadu at link http://rchiips.org/nfhs/NFHS-5_TN.shtml\n\t\tProcessing state #23 Telangana at link http://rchiips.org/nfhs/NFHS-5_TL.shtml\n\t\t[ERROR] Could not process: state Telangana\n\t\tlink http://rchiips.org/nfhs/NFHS-5_TL.shtml\n\t\tName: 23, dtype: object\n\t\tProcessing state #24 Tripura at link http://rchiips.org/nfhs/NFHS-5_TR.shtml\n\t\tProcessing state #25 Uttar Pradesh at link http://rchiips.org/nfhs/NFHS-5_UP.shtml\n\t\tProcessing state #26 Uttarakhand at link http://rchiips.org/nfhs/NFHS-5_UT.shtml\n\t\tProcessing state #27 West Bengal at link http://rchiips.org/nfhs/NFHS-5_WB.shtml\n\t\tProcessing state #28 Andaman & Nicobar Island (UT) at link http://rchiips.org/nfhs/NFHS-5_AN.shtml\n\t\tProcessing state #29 Chandigarh (UT) at link http://rchiips.org/nfhs/NFHS-5_CH.shtml\n\t\t[ERROR] Could not process: state Chandigarh (UT)\n\t\tlink http://rchiips.org/nfhs/NFHS-5_CH.shtml\n\t\tName: 29, dtype: object\n\t\tProcessing state #30 Dadra Nagar Haveli & Daman & Diu (UT) at link http://rchiips.org/nfhs/NFHS-5_DD.shtml\n\t\tProcessing state #31 NCT of Delhi (UT) at link http://rchiips.org/nfhs/NFHS-5_DL.shtml\n\t\tProcessing state #32 Jammu & Kashmir (UT) at link http://rchiips.org/nfhs/NFHS-5_JK.shtml\n\t\tProcessing state #33 Ladakh (UT) at link http://rchiips.org/nfhs/NFHS-5_LH.shtml\n\t\tProcessing state #34 Puducherry (UT) at link http://rchiips.org/nfhs/NFHS-5_PY.shtml\n'''"
] |
[
[
"pandas.DataFrame",
"pandas.read_csv"
]
] |
kaityo256/python_gs
|
[
"d825db5eff9048863a9bed1a52c77e329c6518ff"
] |
[
"gs_anime.py"
] |
[
"import matplotlib.pyplot as plt\nfrom numba import jit\nimport numpy as np\n\n# python gs_jit.py 1.91s user 0.16s system 119% cpu 1.736 total\n\n\n@jit\ndef laplacian(ix, iy, s):\n ts = 0.0\n ts += s[ix-1, iy]\n ts += s[ix+1, iy]\n ts += s[ix, iy-1]\n ts += s[ix, iy+1]\n ts -= 4.0*s[ix, iy]\n return ts\n\n\n@jit\ndef calc(u, v, u2, v2):\n (L, _) = u.shape\n dt = 0.2\n F = 0.04\n k = 0.06075\n lu = np.zeros((L, L))\n lv = np.zeros((L, L))\n for ix in range(1, L-1):\n for iy in range(1, L-1):\n lu[ix, iy] = 0.1 * laplacian(ix, iy, u)\n lv[ix, iy] = 0.05 * laplacian(ix, iy, v)\n cu = -v*v*u + F*(1.0 - u)\n cv = v*v*u - (F+k)*v\n u2[:] = u + (lu+cu) * dt\n v2[:] = v + (lv+cv) * dt\n\n\ndef main():\n L = 64\n u = np.zeros((L, L))\n u2 = np.zeros((L, L))\n v = np.zeros((L, L))\n v2 = np.zeros((L, L))\n h = L//2\n u[h-6:h+6, h-6:h+6] = 0.9\n v[h-3:h+3, h-3:h+3] = 0.7\n for i in range(10000):\n if i % 2 == 0:\n calc(u, v, u2, v2)\n else:\n calc(u2, v2, u, v)\n if i % 100 == 0:\n filename = \"gs_{:02d}.png\".format(i//100)\n print(filename)\n plt.imshow(v, cmap=\"inferno\")\n plt.savefig(filename)\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"matplotlib.pyplot.savefig",
"numpy.zeros",
"matplotlib.pyplot.imshow"
]
] |
itamargr/Mask-RCNN-TF2
|
[
"cc5064b2dcfa9e1e91ed99c9e82b173d73308f1e"
] |
[
"mrcnn/model.py"
] |
[
"\"\"\"\r\nMask R-CNN\r\nThe main Mask R-CNN model implementation.\r\n\r\nCopyright (c) 2017 Matterport, Inc.\r\nLicensed under the MIT License (see LICENSE for details)\r\nWritten by Waleed Abdulla\r\n\"\"\"\r\n\r\nimport os\r\nimport random\r\nimport datetime\r\nimport re\r\nimport math\r\nimport logging\r\nfrom collections import OrderedDict\r\nimport multiprocessing\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport keras\r\nimport keras.backend as K\r\nimport keras.layers as KL\r\nimport keras.models as KM\r\n\r\nfrom mrcnn import utils\r\n\r\n# Requires TensorFlow 1.3+ and Keras 2.0.8+.\r\nfrom distutils.version import LooseVersion\r\nassert LooseVersion(tf.__version__) >= LooseVersion(\"1.3\")\r\nassert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')\r\n\r\n\r\n############################################################\r\n# Utility Functions\r\n############################################################\r\n\r\ndef log(text, array=None):\r\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\r\n prints it's shape, min, and max values.\r\n \"\"\"\r\n if array is not None:\r\n text = text.ljust(25)\r\n text += (\"shape: {:20} \".format(str(array.shape)))\r\n if array.size:\r\n text += (\"min: {:10.5f} max: {:10.5f}\".format(array.min(),array.max()))\r\n else:\r\n text += (\"min: {:10} max: {:10}\".format(\"\",\"\"))\r\n text += \" {}\".format(array.dtype)\r\n print(text)\r\n\r\n\r\nclass BatchNorm(KL.BatchNormalization):\r\n \"\"\"Extends the Keras BatchNormalization class to allow a central place\r\n to make changes if needed.\r\n\r\n Batch normalization has a negative effect on training if batches are small\r\n so this layer is often frozen (via setting in Config class) and functions\r\n as linear layer.\r\n \"\"\"\r\n def call(self, inputs, training=None):\r\n \"\"\"\r\n Note about training values:\r\n None: Train BN layers. This is the normal mode\r\n False: Freeze BN layers. Good when batch size is small\r\n True: (don't use). Set layer in training mode even when making inferences\r\n \"\"\"\r\n return super(self.__class__, self).call(inputs, training=training)\r\n\r\n\r\ndef compute_backbone_shapes(config, image_shape):\r\n \"\"\"Computes the width and height of each stage of the backbone network.\r\n\r\n Returns:\r\n [N, (height, width)]. Where N is the number of stages\r\n \"\"\"\r\n if callable(config.BACKBONE):\r\n return config.COMPUTE_BACKBONE_SHAPE(image_shape)\r\n\r\n # Currently supports ResNet only\r\n assert config.BACKBONE in [\"resnet50\", \"resnet101\"]\r\n return np.array(\r\n [[int(math.ceil(image_shape[0] / stride)),\r\n int(math.ceil(image_shape[1] / stride))]\r\n for stride in config.BACKBONE_STRIDES])\r\n\r\n\r\n############################################################\r\n# Resnet Graph\r\n############################################################\r\n\r\n# Code adopted from:\r\n# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\r\n\r\ndef identity_block(input_tensor, kernel_size, filters, stage, block,\r\n use_bias=True, train_bn=True):\r\n \"\"\"The identity_block is the block that has no conv layer at shortcut\r\n # Arguments\r\n input_tensor: input tensor\r\n kernel_size: default 3, the kernel size of middle conv layer at main path\r\n filters: list of integers, the nb_filters of 3 conv layer at main path\r\n stage: integer, current stage label, used for generating layer names\r\n block: 'a','b'..., current block label, used for generating layer names\r\n use_bias: Boolean. To use or not use a bias in conv layers.\r\n train_bn: Boolean. Train or freeze Batch Norm layers\r\n \"\"\"\r\n nb_filter1, nb_filter2, nb_filter3 = filters\r\n conv_name_base = 'res' + str(stage) + block + '_branch'\r\n bn_name_base = 'bn' + str(stage) + block + '_branch'\r\n\r\n x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',\r\n use_bias=use_bias)(input_tensor)\r\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\r\n x = KL.Activation('relu')(x)\r\n\r\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\r\n name=conv_name_base + '2b', use_bias=use_bias)(x)\r\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\r\n x = KL.Activation('relu')(x)\r\n\r\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',\r\n use_bias=use_bias)(x)\r\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\r\n\r\n x = KL.Add()([x, input_tensor])\r\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\r\n return x\r\n\r\n\r\ndef conv_block(input_tensor, kernel_size, filters, stage, block,\r\n strides=(2, 2), use_bias=True, train_bn=True):\r\n \"\"\"conv_block is the block that has a conv layer at shortcut\r\n # Arguments\r\n input_tensor: input tensor\r\n kernel_size: default 3, the kernel size of middle conv layer at main path\r\n filters: list of integers, the nb_filters of 3 conv layer at main path\r\n stage: integer, current stage label, used for generating layer names\r\n block: 'a','b'..., current block label, used for generating layer names\r\n use_bias: Boolean. To use or not use a bias in conv layers.\r\n train_bn: Boolean. Train or freeze Batch Norm layers\r\n Note that from stage 3, the first conv layer at main path is with subsample=(2,2)\r\n And the shortcut should have subsample=(2,2) as well\r\n \"\"\"\r\n nb_filter1, nb_filter2, nb_filter3 = filters\r\n conv_name_base = 'res' + str(stage) + block + '_branch'\r\n bn_name_base = 'bn' + str(stage) + block + '_branch'\r\n\r\n x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,\r\n name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)\r\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\r\n x = KL.Activation('relu')(x)\r\n\r\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\r\n name=conv_name_base + '2b', use_bias=use_bias)(x)\r\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\r\n x = KL.Activation('relu')(x)\r\n\r\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +\r\n '2c', use_bias=use_bias)(x)\r\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\r\n\r\n shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,\r\n name=conv_name_base + '1', use_bias=use_bias)(input_tensor)\r\n shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)\r\n\r\n x = KL.Add()([x, shortcut])\r\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\r\n return x\r\n\r\n\r\ndef resnet_graph(input_image, architecture, stage5=False, train_bn=True):\r\n \"\"\"Build a ResNet graph.\r\n architecture: Can be resnet50 or resnet101\r\n stage5: Boolean. If False, stage5 of the network is not created\r\n train_bn: Boolean. Train or freeze Batch Norm layers\r\n \"\"\"\r\n assert architecture in [\"resnet50\", \"resnet101\"]\r\n # Stage 1\r\n x = KL.ZeroPadding2D((3, 3))(input_image)\r\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\r\n x = BatchNorm(name='bn_conv1')(x, training=train_bn)\r\n x = KL.Activation('relu')(x)\r\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\r\n # Stage 2\r\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)\r\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)\r\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)\r\n # Stage 3\r\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)\r\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)\r\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)\r\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)\r\n # Stage 4\r\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)\r\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\r\n for i in range(block_count):\r\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)\r\n C4 = x\r\n # Stage 5\r\n if stage5:\r\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)\r\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)\r\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)\r\n else:\r\n C5 = None\r\n return [C1, C2, C3, C4, C5]\r\n\r\n\r\n############################################################\r\n# Proposal Layer\r\n############################################################\r\n\r\ndef apply_box_deltas_graph(boxes, deltas):\r\n \"\"\"Applies the given deltas to the given boxes.\r\n boxes: [N, (y1, x1, y2, x2)] boxes to update\r\n deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply\r\n \"\"\"\r\n # Convert to y, x, h, w\r\n height = boxes[:, 2] - boxes[:, 0]\r\n width = boxes[:, 3] - boxes[:, 1]\r\n center_y = boxes[:, 0] + 0.5 * height\r\n center_x = boxes[:, 1] + 0.5 * width\r\n # Apply deltas\r\n center_y += deltas[:, 0] * height\r\n center_x += deltas[:, 1] * width\r\n height *= tf.exp(deltas[:, 2])\r\n width *= tf.exp(deltas[:, 3])\r\n # Convert back to y1, x1, y2, x2\r\n y1 = center_y - 0.5 * height\r\n x1 = center_x - 0.5 * width\r\n y2 = y1 + height\r\n x2 = x1 + width\r\n result = tf.stack([y1, x1, y2, x2], axis=1, name=\"apply_box_deltas_out\")\r\n return result\r\n\r\n\r\ndef clip_boxes_graph(boxes, window):\r\n \"\"\"\r\n boxes: [N, (y1, x1, y2, x2)]\r\n window: [4] in the form y1, x1, y2, x2\r\n \"\"\"\r\n # Split\r\n wy1, wx1, wy2, wx2 = tf.split(window, 4)\r\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)\r\n # Clip\r\n y1 = tf.maximum(tf.minimum(y1, wy2), wy1)\r\n x1 = tf.maximum(tf.minimum(x1, wx2), wx1)\r\n y2 = tf.maximum(tf.minimum(y2, wy2), wy1)\r\n x2 = tf.maximum(tf.minimum(x2, wx2), wx1)\r\n clipped = tf.concat([y1, x1, y2, x2], axis=1, name=\"clipped_boxes\")\r\n clipped.set_shape((clipped.shape[0], 4))\r\n return clipped\r\n\r\n\r\nclass ProposalLayer(keras.layers.Layer):\r\n \"\"\"Receives anchor scores and selects a subset to pass as proposals\r\n to the second stage. Filtering is done based on anchor scores and\r\n non-max suppression to remove overlaps. It also applies bounding\r\n box refinement deltas to anchors.\r\n\r\n Inputs:\r\n rpn_probs: [batch, num_anchors, (bg prob, fg prob)]\r\n rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]\r\n anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates\r\n\r\n Returns:\r\n Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]\r\n \"\"\"\r\n\r\n def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):\r\n super(ProposalLayer, self).__init__(**kwargs)\r\n self.config = config\r\n self.proposal_count = proposal_count\r\n self.nms_threshold = nms_threshold\r\n\r\n def call(self, inputs):\r\n # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]\r\n scores = inputs[0][:, :, 1]\r\n # Box deltas [batch, num_rois, 4]\r\n deltas = inputs[1]\r\n deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])\r\n # Anchors\r\n anchors = inputs[2]\r\n\r\n # Improve performance by trimming to top anchors by score\r\n # and doing the rest on the smaller subset.\r\n pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])\r\n ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,\r\n name=\"top_anchors\").indices\r\n scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),\r\n self.config.IMAGES_PER_GPU)\r\n deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),\r\n self.config.IMAGES_PER_GPU)\r\n pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),\r\n self.config.IMAGES_PER_GPU,\r\n names=[\"pre_nms_anchors\"])\r\n\r\n # Apply deltas to anchors to get refined anchors.\r\n # [batch, N, (y1, x1, y2, x2)]\r\n boxes = utils.batch_slice([pre_nms_anchors, deltas],\r\n lambda x, y: apply_box_deltas_graph(x, y),\r\n self.config.IMAGES_PER_GPU,\r\n names=[\"refined_anchors\"])\r\n\r\n # Clip to image boundaries. Since we're in normalized coordinates,\r\n # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]\r\n window = np.array([0, 0, 1, 1], dtype=np.float32)\r\n boxes = utils.batch_slice(boxes,\r\n lambda x: clip_boxes_graph(x, window),\r\n self.config.IMAGES_PER_GPU,\r\n names=[\"refined_anchors_clipped\"])\r\n\r\n # Filter out small boxes\r\n # According to Xinlei Chen's paper, this reduces detection accuracy\r\n # for small objects, so we're skipping it.\r\n\r\n # Non-max suppression\r\n def nms(boxes, scores):\r\n indices = tf.image.non_max_suppression(\r\n boxes, scores, self.proposal_count,\r\n self.nms_threshold, name=\"rpn_non_max_suppression\")\r\n proposals = tf.gather(boxes, indices)\r\n # Pad if needed\r\n padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)\r\n proposals = tf.pad(proposals, [(0, padding), (0, 0)])\r\n return proposals\r\n proposals = utils.batch_slice([boxes, scores], nms,\r\n self.config.IMAGES_PER_GPU)\r\n return proposals\r\n\r\n def compute_output_shape(self, input_shape):\r\n return (None, self.proposal_count, 4)\r\n\r\n\r\n############################################################\r\n# ROIAlign Layer\r\n############################################################\r\n\r\ndef log2_graph(x):\r\n \"\"\"Implementation of Log2. TF doesn't have a native implementation.\"\"\"\r\n return tf.math.log(x) / tf.math.log(2.0)\r\n\r\n\r\nclass PyramidROIAlign(keras.layers.Layer):\r\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\r\n\r\n Params:\r\n - pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]\r\n\r\n Inputs:\r\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\r\n coordinates. Possibly padded with zeros if not enough\r\n boxes to fill the array.\r\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\r\n - feature_maps: List of feature maps from different levels of the pyramid.\r\n Each is [batch, height, width, channels]\r\n\r\n Output:\r\n Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].\r\n The width and height are those specific in the pool_shape in the layer\r\n constructor.\r\n \"\"\"\r\n\r\n def __init__(self, pool_shape, **kwargs):\r\n super(PyramidROIAlign, self).__init__(**kwargs)\r\n self.pool_shape = tuple(pool_shape)\r\n\r\n def call(self, inputs):\r\n # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\r\n boxes = inputs[0]\r\n\r\n # Image meta\r\n # Holds details about the image. See compose_image_meta()\r\n image_meta = inputs[1]\r\n\r\n # Feature Maps. List of feature maps from different level of the\r\n # feature pyramid. Each is [batch, height, width, channels]\r\n feature_maps = inputs[2:]\r\n\r\n # Assign each ROI to a level in the pyramid based on the ROI area.\r\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)\r\n h = y2 - y1\r\n w = x2 - x1\r\n # Use shape of first image. Images in a batch must have the same size.\r\n image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]\r\n # Equation 1 in the Feature Pyramid Networks paper. Account for\r\n # the fact that our coordinates are normalized here.\r\n # e.g. a 224x224 ROI (in pixels) maps to P4\r\n image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)\r\n roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))\r\n roi_level = tf.minimum(5, tf.maximum(\r\n 2, 4 + tf.cast(tf.round(roi_level), tf.int32)))\r\n roi_level = tf.squeeze(roi_level, 2)\r\n\r\n # Loop through levels and apply ROI pooling to each. P2 to P5.\r\n pooled = []\r\n box_to_level = []\r\n for i, level in enumerate(range(2, 6)):\r\n ix = tf.where(tf.equal(roi_level, level))\r\n level_boxes = tf.gather_nd(boxes, ix)\r\n\r\n # Box indices for crop_and_resize.\r\n box_indices = tf.cast(ix[:, 0], tf.int32)\r\n\r\n # Keep track of which box is mapped to which level\r\n box_to_level.append(ix)\r\n\r\n # Stop gradient propogation to ROI proposals\r\n level_boxes = tf.stop_gradient(level_boxes)\r\n box_indices = tf.stop_gradient(box_indices)\r\n\r\n # Crop and Resize\r\n # From Mask R-CNN paper: \"We sample four regular locations, so\r\n # that we can evaluate either max or average pooling. In fact,\r\n # interpolating only a single value at each bin center (without\r\n # pooling) is nearly as effective.\"\r\n #\r\n # Here we use the simplified approach of a single value per bin,\r\n # which is how it's done in tf.crop_and_resize()\r\n # Result: [batch * num_boxes, pool_height, pool_width, channels]\r\n pooled.append(tf.image.crop_and_resize(\r\n feature_maps[i], level_boxes, box_indices, self.pool_shape,\r\n method=\"bilinear\"))\r\n\r\n # Pack pooled features into one tensor\r\n pooled = tf.concat(pooled, axis=0)\r\n\r\n # Pack box_to_level mapping into one array and add another\r\n # column representing the order of pooled boxes\r\n box_to_level = tf.concat(box_to_level, axis=0)\r\n box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)\r\n box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],\r\n axis=1)\r\n\r\n # Rearrange pooled features to match the order of the original boxes\r\n # Sort box_to_level by batch then box index\r\n # TF doesn't have a way to sort by two columns, so merge them and sort.\r\n sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]\r\n ix = tf.nn.top_k(sorting_tensor, k=tf.shape(\r\n box_to_level)[0]).indices[::-1]\r\n ix = tf.gather(box_to_level[:, 2], ix)\r\n pooled = tf.gather(pooled, ix)\r\n\r\n # Re-add the batch dimension\r\n shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)\r\n pooled = tf.reshape(pooled, shape)\r\n return pooled\r\n\r\n def compute_output_shape(self, input_shape):\r\n return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )\r\n\r\n\r\n############################################################\r\n# Detection Target Layer\r\n############################################################\r\n\r\ndef overlaps_graph(boxes1, boxes2):\r\n \"\"\"Computes IoU overlaps between two sets of boxes.\r\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\r\n \"\"\"\r\n # 1. Tile boxes2 and repeat boxes1. This allows us to compare\r\n # every boxes1 against every boxes2 without loops.\r\n # TF doesn't have an equivalent to np.repeat() so simulate it\r\n # using tf.tile() and tf.reshape.\r\n b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),\r\n [1, 1, tf.shape(boxes2)[0]]), [-1, 4])\r\n b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])\r\n # 2. Compute intersections\r\n b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)\r\n b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)\r\n y1 = tf.maximum(b1_y1, b2_y1)\r\n x1 = tf.maximum(b1_x1, b2_x1)\r\n y2 = tf.minimum(b1_y2, b2_y2)\r\n x2 = tf.minimum(b1_x2, b2_x2)\r\n intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)\r\n # 3. Compute unions\r\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\r\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\r\n union = b1_area + b2_area - intersection\r\n # 4. Compute IoU and reshape to [boxes1, boxes2]\r\n iou = intersection / union\r\n overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])\r\n return overlaps\r\n\r\n\r\ndef detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):\r\n \"\"\"Generates detection targets for one image. Subsamples proposals and\r\n generates target class IDs, bounding box deltas, and masks for each.\r\n\r\n Inputs:\r\n proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might\r\n be zero padded if there are not enough proposals.\r\n gt_class_ids: [MAX_GT_INSTANCES] int class IDs\r\n gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.\r\n gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.\r\n\r\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\r\n and masks.\r\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates\r\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.\r\n deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]\r\n masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox\r\n boundaries and resized to neural network output size.\r\n\r\n Note: Returned arrays might be zero padded if not enough target ROIs.\r\n \"\"\"\r\n # Assertions\r\n asserts = [\r\n tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],\r\n name=\"roi_assertion\"),\r\n ]\r\n with tf.control_dependencies(asserts):\r\n proposals = tf.identity(proposals)\r\n\r\n # Remove zero padding\r\n proposals, _ = trim_zeros_graph(proposals, name=\"trim_proposals\")\r\n gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name=\"trim_gt_boxes\")\r\n gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,\r\n name=\"trim_gt_class_ids\")\r\n gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,\r\n name=\"trim_gt_masks\")\r\n\r\n # Handle COCO crowds\r\n # A crowd box in COCO is a bounding box around several instances. Exclude\r\n # them from training. A crowd box is given a negative class ID.\r\n crowd_ix = tf.where(gt_class_ids < 0)[:, 0]\r\n non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]\r\n crowd_boxes = tf.gather(gt_boxes, crowd_ix)\r\n gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)\r\n gt_boxes = tf.gather(gt_boxes, non_crowd_ix)\r\n gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)\r\n\r\n # Compute overlaps matrix [proposals, gt_boxes]\r\n overlaps = overlaps_graph(proposals, gt_boxes)\r\n\r\n # Compute overlaps with crowd boxes [proposals, crowd_boxes]\r\n crowd_overlaps = overlaps_graph(proposals, crowd_boxes)\r\n crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)\r\n no_crowd_bool = (crowd_iou_max < 0.001)\r\n\r\n # Determine positive and negative ROIs\r\n roi_iou_max = tf.reduce_max(overlaps, axis=1)\r\n # 1. Positive ROIs are those with >= 0.5 IoU with a GT box\r\n positive_roi_bool = (roi_iou_max >= 0.5)\r\n positive_indices = tf.where(positive_roi_bool)[:, 0]\r\n # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.\r\n negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]\r\n\r\n # Subsample ROIs. Aim for 33% positive\r\n # Positive ROIs\r\n positive_count = int(config.TRAIN_ROIS_PER_IMAGE *\r\n config.ROI_POSITIVE_RATIO)\r\n positive_indices = tf.random.shuffle(positive_indices)[:positive_count]\r\n positive_count = tf.shape(positive_indices)[0]\r\n # Negative ROIs. Add enough to maintain positive:negative ratio.\r\n r = 1.0 / config.ROI_POSITIVE_RATIO\r\n negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count\r\n negative_indices = tf.random.shuffle(negative_indices)[:negative_count]\r\n # Gather selected ROIs\r\n positive_rois = tf.gather(proposals, positive_indices)\r\n negative_rois = tf.gather(proposals, negative_indices)\r\n\r\n # Assign positive ROIs to GT boxes.\r\n positive_overlaps = tf.gather(overlaps, positive_indices)\r\n roi_gt_box_assignment = tf.cond(\r\n tf.greater(tf.shape(positive_overlaps)[1], 0),\r\n true_fn = lambda: tf.argmax(positive_overlaps, axis=1),\r\n false_fn = lambda: tf.cast(tf.constant([]),tf.int64)\r\n )\r\n roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)\r\n roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)\r\n\r\n # Compute bbox refinement for positive ROIs\r\n deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)\r\n deltas /= config.BBOX_STD_DEV\r\n\r\n # Assign positive ROIs to GT masks\r\n # Permute masks to [N, height, width, 1]\r\n transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)\r\n # Pick the right mask for each ROI\r\n roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)\r\n\r\n # Compute mask targets\r\n boxes = positive_rois\r\n if config.USE_MINI_MASK:\r\n # Transform ROI coordinates from normalized image space\r\n # to normalized mini-mask space.\r\n y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)\r\n gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)\r\n gt_h = gt_y2 - gt_y1\r\n gt_w = gt_x2 - gt_x1\r\n y1 = (y1 - gt_y1) / gt_h\r\n x1 = (x1 - gt_x1) / gt_w\r\n y2 = (y2 - gt_y1) / gt_h\r\n x2 = (x2 - gt_x1) / gt_w\r\n boxes = tf.concat([y1, x1, y2, x2], 1)\r\n box_ids = tf.range(0, tf.shape(roi_masks)[0])\r\n masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,\r\n box_ids,\r\n config.MASK_SHAPE)\r\n # Remove the extra dimension from masks.\r\n masks = tf.squeeze(masks, axis=3)\r\n\r\n # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with\r\n # binary cross entropy loss.\r\n masks = tf.round(masks)\r\n\r\n # Append negative ROIs and pad bbox deltas and masks that\r\n # are not used for negative ROIs with zeros.\r\n rois = tf.concat([positive_rois, negative_rois], axis=0)\r\n N = tf.shape(negative_rois)[0]\r\n P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)\r\n rois = tf.pad(rois, [(0, P), (0, 0)])\r\n roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])\r\n roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])\r\n deltas = tf.pad(deltas, [(0, N + P), (0, 0)])\r\n masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])\r\n\r\n return rois, roi_gt_class_ids, deltas, masks\r\n\r\n\r\nclass DetectionTargetLayer(keras.layers.Layer):\r\n \"\"\"Subsamples proposals and generates target box refinement, class_ids,\r\n and masks for each.\r\n\r\n Inputs:\r\n proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might\r\n be zero padded if there are not enough proposals.\r\n gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.\r\n gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized\r\n coordinates.\r\n gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type\r\n\r\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\r\n and masks.\r\n rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized\r\n coordinates\r\n target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\r\n target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]\r\n target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]\r\n Masks cropped to bbox boundaries and resized to neural\r\n network output size.\r\n\r\n Note: Returned arrays might be zero padded if not enough target ROIs.\r\n \"\"\"\r\n\r\n def __init__(self, config, **kwargs):\r\n super(DetectionTargetLayer, self).__init__(**kwargs)\r\n self.config = config\r\n\r\n def call(self, inputs):\r\n proposals = inputs[0]\r\n gt_class_ids = inputs[1]\r\n gt_boxes = inputs[2]\r\n gt_masks = inputs[3]\r\n\r\n # Slice the batch and run a graph for each slice\r\n # TODO: Rename target_bbox to target_deltas for clarity\r\n names = [\"rois\", \"target_class_ids\", \"target_bbox\", \"target_mask\"]\r\n outputs = utils.batch_slice(\r\n [proposals, gt_class_ids, gt_boxes, gt_masks],\r\n lambda w, x, y, z: detection_targets_graph(\r\n w, x, y, z, self.config),\r\n self.config.IMAGES_PER_GPU, names=names)\r\n return outputs\r\n\r\n def compute_output_shape(self, input_shape):\r\n return [\r\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois\r\n (None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids\r\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas\r\n (None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],\r\n self.config.MASK_SHAPE[1]) # masks\r\n ]\r\n\r\n def compute_mask(self, inputs, mask=None):\r\n return [None, None, None, None]\r\n\r\n\r\n############################################################\r\n# Detection Layer\r\n############################################################\r\n\r\ndef refine_detections_graph(rois, probs, deltas, window, config):\r\n \"\"\"Refine classified proposals and filter overlaps and return final\r\n detections.\r\n\r\n Inputs:\r\n rois: [N, (y1, x1, y2, x2)] in normalized coordinates\r\n probs: [N, num_classes]. Class probabilities.\r\n deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific\r\n bounding box deltas.\r\n window: (y1, x1, y2, x2) in normalized coordinates. The part of the image\r\n that contains the image excluding the padding.\r\n\r\n Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where\r\n coordinates are normalized.\r\n \"\"\"\r\n # Class IDs per ROI\r\n class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)\r\n # Class probability of the top class of each ROI\r\n indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)\r\n class_scores = tf.gather_nd(probs, indices)\r\n # Class-specific bounding box deltas\r\n deltas_specific = tf.gather_nd(deltas, indices)\r\n # Apply bounding box deltas\r\n # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates\r\n refined_rois = apply_box_deltas_graph(\r\n rois, deltas_specific * config.BBOX_STD_DEV)\r\n # Clip boxes to image window\r\n refined_rois = clip_boxes_graph(refined_rois, window)\r\n\r\n # TODO: Filter out boxes with zero area\r\n\r\n # Filter out background boxes\r\n keep = tf.where(class_ids > 0)[:, 0]\r\n # Filter out low confidence boxes\r\n if config.DETECTION_MIN_CONFIDENCE:\r\n conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]\r\n keep = tf.sets.intersection(tf.expand_dims(keep, 0),\r\n tf.expand_dims(conf_keep, 0))\r\n keep = tf.sparse.to_dense(keep)[0]\r\n\r\n # Apply per-class NMS\r\n # 1. Prepare variables\r\n pre_nms_class_ids = tf.gather(class_ids, keep)\r\n pre_nms_scores = tf.gather(class_scores, keep)\r\n pre_nms_rois = tf.gather(refined_rois, keep)\r\n unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]\r\n\r\n def nms_keep_map(class_id):\r\n \"\"\"Apply Non-Maximum Suppression on ROIs of the given class.\"\"\"\r\n # Indices of ROIs of the given class\r\n ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]\r\n # Apply NMS\r\n class_keep = tf.image.non_max_suppression(\r\n tf.gather(pre_nms_rois, ixs),\r\n tf.gather(pre_nms_scores, ixs),\r\n max_output_size=config.DETECTION_MAX_INSTANCES,\r\n iou_threshold=config.DETECTION_NMS_THRESHOLD)\r\n # Map indices\r\n class_keep = tf.gather(keep, tf.gather(ixs, class_keep))\r\n # Pad with -1 so returned tensors have the same shape\r\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]\r\n class_keep = tf.pad(class_keep, [(0, gap)],\r\n mode='CONSTANT', constant_values=-1)\r\n # Set shape so map_fn() can infer result shape\r\n class_keep.set_shape([config.DETECTION_MAX_INSTANCES])\r\n return class_keep\r\n\r\n # 2. Map over class IDs\r\n nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,\r\n dtype=tf.int64)\r\n # 3. Merge results into one list, and remove -1 padding\r\n nms_keep = tf.reshape(nms_keep, [-1])\r\n nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])\r\n # 4. Compute intersection between keep and nms_keep\r\n keep = tf.sets.intersection(tf.expand_dims(keep, 0),\r\n tf.expand_dims(nms_keep, 0))\r\n keep = tf.sparse.to_dense(keep)[0]\r\n # Keep top detections\r\n roi_count = config.DETECTION_MAX_INSTANCES\r\n class_scores_keep = tf.gather(class_scores, keep)\r\n num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)\r\n top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]\r\n keep = tf.gather(keep, top_ids)\r\n\r\n # Arrange output as [N, (y1, x1, y2, x2, class_id, score)]\r\n # Coordinates are normalized.\r\n detections = tf.concat([\r\n tf.gather(refined_rois, keep),\r\n tf.cast(tf.gather(class_ids, keep), np.float32)[..., tf.newaxis],\r\n tf.gather(class_scores, keep)[..., tf.newaxis]\r\n ], axis=1)\r\n\r\n # Pad with zeros if detections < DETECTION_MAX_INSTANCES\r\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]\r\n detections = tf.pad(detections, [(0, gap), (0, 0)], \"CONSTANT\")\r\n return detections\r\n\r\n\r\nclass DetectionLayer(keras.layers.Layer):\r\n \"\"\"Takes classified proposal boxes and their bounding box deltas and\r\n returns the final detection boxes.\r\n\r\n Returns:\r\n [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where\r\n coordinates are normalized.\r\n \"\"\"\r\n\r\n def __init__(self, config=None, **kwargs):\r\n super(DetectionLayer, self).__init__(**kwargs)\r\n self.config = config\r\n\r\n def call(self, inputs):\r\n rois = inputs[0]\r\n mrcnn_class = inputs[1]\r\n mrcnn_bbox = inputs[2]\r\n image_meta = inputs[3]\r\n\r\n # Get windows of images in normalized coordinates. Windows are the area\r\n # in the image that excludes the padding.\r\n # Use the shape of the first image in the batch to normalize the window\r\n # because we know that all images get resized to the same size.\r\n m = parse_image_meta_graph(image_meta)\r\n image_shape = m['image_shape'][0]\r\n window = norm_boxes_graph(m['window'], image_shape[:2])\r\n\r\n # Run detection refinement graph on each item in the batch\r\n detections_batch = utils.batch_slice(\r\n [rois, mrcnn_class, mrcnn_bbox, window],\r\n lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),\r\n self.config.IMAGES_PER_GPU)\r\n\r\n # Reshape output\r\n # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in\r\n # normalized coordinates\r\n return tf.reshape(\r\n detections_batch,\r\n [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])\r\n\r\n def compute_output_shape(self, input_shape):\r\n return (None, self.config.DETECTION_MAX_INSTANCES, 6)\r\n\r\n\r\n############################################################\r\n# Region Proposal Network (RPN)\r\n############################################################\r\n\r\ndef rpn_graph(feature_map, anchors_per_location, anchor_stride):\r\n \"\"\"Builds the computation graph of Region Proposal Network.\r\n\r\n feature_map: backbone features [batch, height, width, depth]\r\n anchors_per_location: number of anchors per pixel in the feature map\r\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\r\n every pixel in the feature map), or 2 (every other pixel).\r\n\r\n Returns:\r\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\r\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\r\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\r\n applied to anchors.\r\n \"\"\"\r\n # TODO: check if stride of 2 causes alignment issues if the feature map\r\n # is not even.\r\n # Shared convolutional base of the RPN\r\n shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',\r\n strides=anchor_stride,\r\n name='rpn_conv_shared')(feature_map)\r\n\r\n # Anchor Score. [batch, height, width, anchors per location * 2].\r\n x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',\r\n activation='linear', name='rpn_class_raw')(shared)\r\n\r\n # Reshape to [batch, anchors, 2]\r\n rpn_class_logits = KL.Lambda(\r\n lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)\r\n\r\n # Softmax on last dimension of BG/FG.\r\n rpn_probs = KL.Activation(\r\n \"softmax\", name=\"rpn_class_xxx\")(rpn_class_logits)\r\n\r\n # Bounding box refinement. [batch, H, W, anchors per location * depth]\r\n # where depth is [x, y, log(w), log(h)]\r\n x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding=\"valid\",\r\n activation='linear', name='rpn_bbox_pred')(shared)\r\n\r\n # Reshape to [batch, anchors, 4]\r\n rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)\r\n\r\n return [rpn_class_logits, rpn_probs, rpn_bbox]\r\n\r\n\r\ndef build_rpn_model(anchor_stride, anchors_per_location, depth):\r\n \"\"\"Builds a Keras model of the Region Proposal Network.\r\n It wraps the RPN graph so it can be used multiple times with shared\r\n weights.\r\n\r\n anchors_per_location: number of anchors per pixel in the feature map\r\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\r\n every pixel in the feature map), or 2 (every other pixel).\r\n depth: Depth of the backbone feature map.\r\n\r\n Returns a Keras Model object. The model outputs, when called, are:\r\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\r\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\r\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\r\n applied to anchors.\r\n \"\"\"\r\n input_feature_map = KL.Input(shape=[None, None, depth],\r\n name=\"input_rpn_feature_map\")\r\n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\r\n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")\r\n\r\n\r\n############################################################\r\n# Feature Pyramid Network Heads\r\n############################################################\r\n\r\ndef fpn_classifier_graph(rois, feature_maps, image_meta,\r\n pool_size, num_classes, train_bn=True,\r\n fc_layers_size=1024):\r\n \"\"\"Builds the computation graph of the feature pyramid network classifier\r\n and regressor heads.\r\n\r\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\r\n coordinates.\r\n feature_maps: List of feature maps from different layers of the pyramid,\r\n [P2, P3, P4, P5]. Each has a different resolution.\r\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\r\n pool_size: The width of the square feature map generated from ROI Pooling.\r\n num_classes: number of classes, which determines the depth of the results\r\n train_bn: Boolean. Train or freeze Batch Norm layers\r\n fc_layers_size: Size of the 2 FC layers\r\n\r\n Returns:\r\n logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)\r\n probs: [batch, num_rois, NUM_CLASSES] classifier probabilities\r\n bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to\r\n proposal boxes\r\n \"\"\"\r\n # ROI Pooling\r\n # Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]\r\n x = PyramidROIAlign([pool_size, pool_size],\r\n name=\"roi_align_classifier\")([rois, image_meta] + feature_maps)\r\n # Two 1024 FC layers (implemented with Conv2D for consistency)\r\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding=\"valid\"),\r\n name=\"mrcnn_class_conv1\")(x)\r\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)\r\n x = KL.Activation('relu')(x)\r\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),\r\n name=\"mrcnn_class_conv2\")(x)\r\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)\r\n x = KL.Activation('relu')(x)\r\n\r\n shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),\r\n name=\"pool_squeeze\")(x)\r\n\r\n # Classifier head\r\n mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),\r\n name='mrcnn_class_logits')(shared)\r\n mrcnn_probs = KL.TimeDistributed(KL.Activation(\"softmax\"),\r\n name=\"mrcnn_class\")(mrcnn_class_logits)\r\n\r\n # BBox head\r\n # [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]\r\n x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),\r\n name='mrcnn_bbox_fc')(shared)\r\n # Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]\r\n s = K.int_shape(x)\r\n mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name=\"mrcnn_bbox\")(x)\r\n\r\n return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox\r\n\r\n\r\ndef build_fpn_mask_graph(rois, feature_maps, image_meta,\r\n pool_size, num_classes, train_bn=True):\r\n \"\"\"Builds the computation graph of the mask head of Feature Pyramid Network.\r\n\r\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\r\n coordinates.\r\n feature_maps: List of feature maps from different layers of the pyramid,\r\n [P2, P3, P4, P5]. Each has a different resolution.\r\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\r\n pool_size: The width of the square feature map generated from ROI Pooling.\r\n num_classes: number of classes, which determines the depth of the results\r\n train_bn: Boolean. Train or freeze Batch Norm layers\r\n\r\n Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]\r\n \"\"\"\r\n # ROI Pooling\r\n # Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]\r\n x = PyramidROIAlign([pool_size, pool_size],\r\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\r\n\r\n # Conv layers\r\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\r\n name=\"mrcnn_mask_conv1\")(x)\r\n x = KL.TimeDistributed(BatchNorm(),\r\n name='mrcnn_mask_bn1')(x, training=train_bn)\r\n x = KL.Activation('relu')(x)\r\n\r\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\r\n name=\"mrcnn_mask_conv2\")(x)\r\n x = KL.TimeDistributed(BatchNorm(),\r\n name='mrcnn_mask_bn2')(x, training=train_bn)\r\n x = KL.Activation('relu')(x)\r\n\r\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\r\n name=\"mrcnn_mask_conv3\")(x)\r\n x = KL.TimeDistributed(BatchNorm(),\r\n name='mrcnn_mask_bn3')(x, training=train_bn)\r\n x = KL.Activation('relu')(x)\r\n\r\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\r\n name=\"mrcnn_mask_conv4\")(x)\r\n x = KL.TimeDistributed(BatchNorm(),\r\n name='mrcnn_mask_bn4')(x, training=train_bn)\r\n x = KL.Activation('relu')(x)\r\n\r\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\r\n name=\"mrcnn_mask_deconv\")(x)\r\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\r\n name=\"mrcnn_mask\")(x)\r\n return x\r\n\r\n\r\n############################################################\r\n# Loss Functions\r\n############################################################\r\n\r\ndef smooth_l1_loss(y_true, y_pred):\r\n \"\"\"Implements Smooth-L1 loss.\r\n y_true and y_pred are typically: [N, 4], but could be any shape.\r\n \"\"\"\r\n diff = K.abs(y_true - y_pred)\r\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\r\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\r\n return loss\r\n\r\n\r\ndef rpn_class_loss_graph(rpn_match, rpn_class_logits):\r\n \"\"\"RPN anchor classifier loss.\r\n\r\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\r\n -1=negative, 0=neutral anchor.\r\n rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.\r\n \"\"\"\r\n # Squeeze last dim to simplify\r\n rpn_match = tf.squeeze(rpn_match, -1)\r\n # Get anchor classes. Convert the -1/+1 match to 0/1 values.\r\n anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)\r\n # Positive and Negative anchors contribute to the loss,\r\n # but neutral anchors (match value = 0) don't.\r\n indices = tf.where(K.not_equal(rpn_match, 0))\r\n # Pick rows that contribute to the loss and filter out the rest.\r\n rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)\r\n anchor_class = tf.gather_nd(anchor_class, indices)\r\n # Cross entropy loss\r\n loss = K.sparse_categorical_crossentropy(target=anchor_class,\r\n output=rpn_class_logits,\r\n from_logits=True)\r\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\r\n return loss\r\n\r\n\r\ndef rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):\r\n \"\"\"Return the RPN bounding box loss graph.\r\n\r\n config: the model config object.\r\n target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].\r\n Uses 0 padding to fill in unsed bbox deltas.\r\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\r\n -1=negative, 0=neutral anchor.\r\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\r\n \"\"\"\r\n # Positive anchors contribute to the loss, but negative and\r\n # neutral anchors (match value of 0 or -1) don't.\r\n rpn_match = K.squeeze(rpn_match, -1)\r\n indices = tf.where(K.equal(rpn_match, 1))\r\n\r\n # Pick bbox deltas that contribute to the loss\r\n rpn_bbox = tf.gather_nd(rpn_bbox, indices)\r\n\r\n # Trim target bounding box deltas to the same length as rpn_bbox.\r\n batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)\r\n target_bbox = batch_pack_graph(target_bbox, batch_counts,\r\n config.IMAGES_PER_GPU)\r\n\r\n loss = smooth_l1_loss(target_bbox, rpn_bbox)\r\n \r\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\r\n return loss\r\n\r\n\r\ndef mrcnn_class_loss_graph(target_class_ids, pred_class_logits,\r\n active_class_ids):\r\n \"\"\"Loss for the classifier head of Mask RCNN.\r\n\r\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\r\n padding to fill in the array.\r\n pred_class_logits: [batch, num_rois, num_classes]\r\n active_class_ids: [batch, num_classes]. Has a value of 1 for\r\n classes that are in the dataset of the image, and 0\r\n for classes that are not in the dataset.\r\n \"\"\"\r\n # During model building, Keras calls this function with\r\n # target_class_ids of type float32. Unclear why. Cast it\r\n # to int to get around it.\r\n target_class_ids = tf.cast(target_class_ids, 'int64')\r\n\r\n # Find predictions of classes that are not in the dataset.\r\n pred_class_ids = tf.argmax(pred_class_logits, axis=2)\r\n # TODO: Update this line to work with batch > 1. Right now it assumes all\r\n # images in a batch have the same active_class_ids\r\n pred_active = tf.gather(active_class_ids[0], pred_class_ids)\r\n\r\n # Loss\r\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\r\n labels=target_class_ids, logits=pred_class_logits)\r\n\r\n # Erase losses of predictions of classes that are not in the active\r\n # classes of the image.\r\n loss = loss * pred_active\r\n\r\n # Computer loss mean. Use only predictions that contribute\r\n # to the loss to get a correct mean.\r\n loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)\r\n return loss\r\n\r\n\r\ndef mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):\r\n \"\"\"Loss for Mask R-CNN bounding box refinement.\r\n\r\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\r\n target_class_ids: [batch, num_rois]. Integer class IDs.\r\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\r\n \"\"\"\r\n # Reshape to merge batch and roi dimensions for simplicity.\r\n target_class_ids = K.reshape(target_class_ids, (-1,))\r\n target_bbox = K.reshape(target_bbox, (-1, 4))\r\n pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))\r\n\r\n # Only positive ROIs contribute to the loss. And only\r\n # the right class_id of each ROI. Get their indices.\r\n positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]\r\n positive_roi_class_ids = tf.cast(\r\n tf.gather(target_class_ids, positive_roi_ix), tf.int64)\r\n indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)\r\n\r\n # Gather the deltas (predicted and true) that contribute to loss\r\n target_bbox = tf.gather(target_bbox, positive_roi_ix)\r\n pred_bbox = tf.gather_nd(pred_bbox, indices)\r\n\r\n # Smooth-L1 Loss\r\n loss = K.switch(tf.size(target_bbox) > 0,\r\n smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),\r\n tf.constant(0.0))\r\n loss = K.mean(loss)\r\n return loss\r\n\r\n\r\ndef mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):\r\n \"\"\"Mask binary cross-entropy loss for the masks head.\r\n\r\n target_masks: [batch, num_rois, height, width].\r\n A float32 tensor of values 0 or 1. Uses zero padding to fill array.\r\n target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.\r\n pred_masks: [batch, proposals, height, width, num_classes] float32 tensor\r\n with values from 0 to 1.\r\n \"\"\"\r\n # Reshape for simplicity. Merge first two dimensions into one.\r\n target_class_ids = K.reshape(target_class_ids, (-1,))\r\n mask_shape = tf.shape(target_masks)\r\n target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))\r\n pred_shape = tf.shape(pred_masks)\r\n pred_masks = K.reshape(pred_masks,\r\n (-1, pred_shape[2], pred_shape[3], pred_shape[4]))\r\n # Permute predicted masks to [N, num_classes, height, width]\r\n pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])\r\n\r\n # Only positive ROIs contribute to the loss. And only\r\n # the class specific mask of each ROI.\r\n positive_ix = tf.where(target_class_ids > 0)[:, 0]\r\n positive_class_ids = tf.cast(\r\n tf.gather(target_class_ids, positive_ix), tf.int64)\r\n indices = tf.stack([positive_ix, positive_class_ids], axis=1)\r\n\r\n # Gather the masks (predicted and true) that contribute to loss\r\n y_true = tf.gather(target_masks, positive_ix)\r\n y_pred = tf.gather_nd(pred_masks, indices)\r\n\r\n # Compute binary cross entropy. If no positive ROIs, then return 0.\r\n # shape: [batch, roi, num_classes]\r\n loss = K.switch(tf.size(y_true) > 0,\r\n K.binary_crossentropy(target=y_true, output=y_pred),\r\n tf.constant(0.0))\r\n loss = K.mean(loss)\r\n return loss\r\n\r\n\r\n############################################################\r\n# Data Generator\r\n############################################################\r\n\r\ndef load_image_gt(dataset, config, image_id, augment=False, augmentation=None,\r\n use_mini_mask=False):\r\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\r\n\r\n augment: (deprecated. Use augmentation instead). If true, apply random\r\n image augmentation. Currently, only horizontal flipping is offered.\r\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\r\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\r\n right/left 50% of the time.\r\n use_mini_mask: If False, returns full-size masks that are the same height\r\n and width as the original image. These can be big, for example\r\n 1024x1024x100 (for 100 instances). Mini masks are smaller, typically,\r\n 224x224 and are generated by extracting the bounding box of the\r\n object and resizing it to MINI_MASK_SHAPE.\r\n\r\n Returns:\r\n image: [height, width, 3]\r\n shape: the original shape of the image before resizing and cropping.\r\n class_ids: [instance_count] Integer class IDs\r\n bbox: [instance_count, (y1, x1, y2, x2)]\r\n mask: [height, width, instance_count]. The height and width are those\r\n of the image unless use_mini_mask is True, in which case they are\r\n defined in MINI_MASK_SHAPE.\r\n \"\"\"\r\n # Load image and mask\r\n image = dataset.load_image(image_id)\r\n mask, class_ids = dataset.load_mask(image_id)\r\n original_shape = image.shape\r\n image, window, scale, padding, crop = utils.resize_image(\r\n image,\r\n min_dim=config.IMAGE_MIN_DIM,\r\n min_scale=config.IMAGE_MIN_SCALE,\r\n max_dim=config.IMAGE_MAX_DIM,\r\n mode=config.IMAGE_RESIZE_MODE)\r\n mask = utils.resize_mask(mask, scale, padding, crop)\r\n\r\n # Random horizontal flips.\r\n # TODO: will be removed in a future update in favor of augmentation\r\n if augment:\r\n logging.warning(\"'augment' is deprecated. Use 'augmentation' instead.\")\r\n if random.randint(0, 1):\r\n image = np.fliplr(image)\r\n mask = np.fliplr(mask)\r\n\r\n # Augmentation\r\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\r\n if augmentation:\r\n import imgaug\r\n\r\n # Augmenters that are safe to apply to masks\r\n # Some, such as Affine, have settings that make them unsafe, so always\r\n # test your augmentation on masks\r\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\r\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\r\n \"Affine\", \"PiecewiseAffine\"]\r\n\r\n def hook(images, augmenter, parents, default):\r\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\r\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\r\n\r\n # Store shapes before augmentation to compare\r\n image_shape = image.shape\r\n mask_shape = mask.shape\r\n # Make augmenters deterministic to apply similarly to images and masks\r\n det = augmentation.to_deterministic()\r\n image = det.augment_image(image)\r\n # Change mask to np.uint8 because imgaug doesn't support np.bool\r\n mask = det.augment_image(mask.astype(np.uint8),\r\n hooks=imgaug.HooksImages(activator=hook))\r\n # Verify that shapes didn't change\r\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\r\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\r\n # Change mask back to bool\r\n mask = mask.astype(np.bool)\r\n\r\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\r\n # and here is to filter them out\r\n _idx = np.sum(mask, axis=(0, 1)) > 0\r\n mask = mask[:, :, _idx]\r\n class_ids = class_ids[_idx]\r\n # Bounding boxes. Note that some boxes might be all zeros\r\n # if the corresponding mask got cropped out.\r\n # bbox: [num_instances, (y1, x1, y2, x2)]\r\n bbox = utils.extract_bboxes(mask)\r\n\r\n # Active classes\r\n # Different datasets have different classes, so track the\r\n # classes supported in the dataset of this image.\r\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\r\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\r\n active_class_ids[source_class_ids] = 1\r\n\r\n # Resize masks to smaller size to reduce memory usage\r\n if use_mini_mask:\r\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\r\n\r\n # Image meta data\r\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\r\n window, scale, active_class_ids)\r\n\r\n return image, image_meta, class_ids, bbox, mask\r\n\r\n\r\ndef build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):\r\n \"\"\"Generate targets for training Stage 2 classifier and mask heads.\r\n This is not used in normal training. It's useful for debugging or to train\r\n the Mask RCNN heads without using the RPN head.\r\n\r\n Inputs:\r\n rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.\r\n gt_class_ids: [instance count] Integer class IDs\r\n gt_boxes: [instance count, (y1, x1, y2, x2)]\r\n gt_masks: [height, width, instance count] Ground truth masks. Can be full\r\n size or mini-masks.\r\n\r\n Returns:\r\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]\r\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\r\n bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific\r\n bbox refinements.\r\n masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped\r\n to bbox boundaries and resized to neural network output size.\r\n \"\"\"\r\n assert rpn_rois.shape[0] > 0\r\n assert gt_class_ids.dtype == np.int32, \"Expected int but got {}\".format(\r\n gt_class_ids.dtype)\r\n assert gt_boxes.dtype == np.int32, \"Expected int but got {}\".format(\r\n gt_boxes.dtype)\r\n assert gt_masks.dtype == np.bool_, \"Expected bool but got {}\".format(\r\n gt_masks.dtype)\r\n\r\n # It's common to add GT Boxes to ROIs but we don't do that here because\r\n # according to XinLei Chen's paper, it doesn't help.\r\n\r\n # Trim empty padding in gt_boxes and gt_masks parts\r\n instance_ids = np.where(gt_class_ids > 0)[0]\r\n assert instance_ids.shape[0] > 0, \"Image must contain instances.\"\r\n gt_class_ids = gt_class_ids[instance_ids]\r\n gt_boxes = gt_boxes[instance_ids]\r\n gt_masks = gt_masks[:, :, instance_ids]\r\n\r\n # Compute areas of ROIs and ground truth boxes.\r\n rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \\\r\n (rpn_rois[:, 3] - rpn_rois[:, 1])\r\n gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \\\r\n (gt_boxes[:, 3] - gt_boxes[:, 1])\r\n\r\n # Compute overlaps [rpn_rois, gt_boxes]\r\n overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))\r\n for i in range(overlaps.shape[1]):\r\n gt = gt_boxes[i]\r\n overlaps[:, i] = utils.compute_iou(\r\n gt, rpn_rois, gt_box_area[i], rpn_roi_area)\r\n\r\n # Assign ROIs to GT boxes\r\n rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)\r\n rpn_roi_iou_max = overlaps[np.arange(\r\n overlaps.shape[0]), rpn_roi_iou_argmax]\r\n # GT box assigned to each ROI\r\n rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]\r\n rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]\r\n\r\n # Positive ROIs are those with >= 0.5 IoU with a GT box.\r\n fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]\r\n\r\n # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)\r\n # TODO: To hard example mine or not to hard example mine, that's the question\r\n # bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]\r\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\r\n\r\n # Subsample ROIs. Aim for 33% foreground.\r\n # FG\r\n fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)\r\n if fg_ids.shape[0] > fg_roi_count:\r\n keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)\r\n else:\r\n keep_fg_ids = fg_ids\r\n # BG\r\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]\r\n if bg_ids.shape[0] > remaining:\r\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\r\n else:\r\n keep_bg_ids = bg_ids\r\n # Combine indices of ROIs to keep\r\n keep = np.concatenate([keep_fg_ids, keep_bg_ids])\r\n # Need more?\r\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]\r\n if remaining > 0:\r\n # Looks like we don't have enough samples to maintain the desired\r\n # balance. Reduce requirements and fill in the rest. This is\r\n # likely different from the Mask RCNN paper.\r\n\r\n # There is a small chance we have neither fg nor bg samples.\r\n if keep.shape[0] == 0:\r\n # Pick bg regions with easier IoU threshold\r\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\r\n assert bg_ids.shape[0] >= remaining\r\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\r\n assert keep_bg_ids.shape[0] == remaining\r\n keep = np.concatenate([keep, keep_bg_ids])\r\n else:\r\n # Fill the rest with repeated bg rois.\r\n keep_extra_ids = np.random.choice(\r\n keep_bg_ids, remaining, replace=True)\r\n keep = np.concatenate([keep, keep_extra_ids])\r\n assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \\\r\n \"keep doesn't match ROI batch size {}, {}\".format(\r\n keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)\r\n\r\n # Reset the gt boxes assigned to BG ROIs.\r\n rpn_roi_gt_boxes[keep_bg_ids, :] = 0\r\n rpn_roi_gt_class_ids[keep_bg_ids] = 0\r\n\r\n # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.\r\n rois = rpn_rois[keep]\r\n roi_gt_boxes = rpn_roi_gt_boxes[keep]\r\n roi_gt_class_ids = rpn_roi_gt_class_ids[keep]\r\n roi_gt_assignment = rpn_roi_iou_argmax[keep]\r\n\r\n # Class-aware bbox deltas. [y, x, log(h), log(w)]\r\n bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,\r\n config.NUM_CLASSES, 4), dtype=np.float32)\r\n pos_ids = np.where(roi_gt_class_ids > 0)[0]\r\n bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(\r\n rois[pos_ids], roi_gt_boxes[pos_ids, :4])\r\n # Normalize bbox refinements\r\n bboxes /= config.BBOX_STD_DEV\r\n\r\n # Generate class-specific target masks\r\n masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),\r\n dtype=np.float32)\r\n for i in pos_ids:\r\n class_id = roi_gt_class_ids[i]\r\n assert class_id > 0, \"class id must be greater than 0\"\r\n gt_id = roi_gt_assignment[i]\r\n class_mask = gt_masks[:, :, gt_id]\r\n\r\n if config.USE_MINI_MASK:\r\n # Create a mask placeholder, the size of the image\r\n placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)\r\n # GT box\r\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]\r\n gt_w = gt_x2 - gt_x1\r\n gt_h = gt_y2 - gt_y1\r\n # Resize mini mask to size of GT box\r\n placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \\\r\n np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)\r\n # Place the mini batch in the placeholder\r\n class_mask = placeholder\r\n\r\n # Pick part of the mask and resize it\r\n y1, x1, y2, x2 = rois[i].astype(np.int32)\r\n m = class_mask[y1:y2, x1:x2]\r\n mask = utils.resize(m, config.MASK_SHAPE)\r\n masks[i, :, :, class_id] = mask\r\n\r\n return rois, roi_gt_class_ids, bboxes, masks\r\n\r\n\r\ndef build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\r\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\r\n anchors and deltas to refine them to match their corresponding GT boxes.\r\n\r\n anchors: [num_anchors, (y1, x1, y2, x2)]\r\n gt_class_ids: [num_gt_boxes] Integer class IDs.\r\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\r\n\r\n Returns:\r\n rpn_match: [N] (int32) matches between anchors and GT boxes.\r\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\r\n rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\r\n \"\"\"\r\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\r\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\r\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\r\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\r\n\r\n # Handle COCO crowds\r\n # A crowd box in COCO is a bounding box around several instances. Exclude\r\n # them from training. A crowd box is given a negative class ID.\r\n crowd_ix = np.where(gt_class_ids < 0)[0]\r\n if crowd_ix.shape[0] > 0:\r\n # Filter out crowds from ground truth class IDs and boxes\r\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\r\n crowd_boxes = gt_boxes[crowd_ix]\r\n gt_class_ids = gt_class_ids[non_crowd_ix]\r\n gt_boxes = gt_boxes[non_crowd_ix]\r\n # Compute overlaps with crowd boxes [anchors, crowds]\r\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\r\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\r\n no_crowd_bool = (crowd_iou_max < 0.001)\r\n else:\r\n # All anchors don't intersect a crowd\r\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\r\n\r\n # Compute overlaps [num_anchors, num_gt_boxes]\r\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\r\n\r\n # Match anchors to GT Boxes\r\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\r\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\r\n # Neutral anchors are those that don't match the conditions above,\r\n # and they don't influence the loss function.\r\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\r\n # match it to the closest anchor (even if its max IoU is < 0.3).\r\n #\r\n # 1. Set negative anchors first. They get overwritten below if a GT box is\r\n # matched to them. Skip boxes in crowd areas.\r\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\r\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\r\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\r\n # 2. Set an anchor for each GT box (regardless of IoU value).\r\n # If multiple anchors have the same IoU match all of them\r\n gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0]\r\n rpn_match[gt_iou_argmax] = 1\r\n # 3. Set anchors with high overlap as positive.\r\n rpn_match[anchor_iou_max >= 0.7] = 1\r\n\r\n # Subsample to balance positive and negative anchors\r\n # Don't let positives be more than half the anchors\r\n ids = np.where(rpn_match == 1)[0]\r\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\r\n if extra > 0:\r\n # Reset the extra ones to neutral\r\n ids = np.random.choice(ids, extra, replace=False)\r\n rpn_match[ids] = 0\r\n # Same for negative proposals\r\n ids = np.where(rpn_match == -1)[0]\r\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\r\n np.sum(rpn_match == 1))\r\n if extra > 0:\r\n # Rest the extra ones to neutral\r\n ids = np.random.choice(ids, extra, replace=False)\r\n rpn_match[ids] = 0\r\n\r\n # For positive anchors, compute shift and scale needed to transform them\r\n # to match the corresponding GT boxes.\r\n ids = np.where(rpn_match == 1)[0]\r\n ix = 0 # index into rpn_bbox\r\n # TODO: use box_refinement() rather than duplicating the code here\r\n for i, a in zip(ids, anchors[ids]):\r\n # Closest gt box (it might have IoU < 0.7)\r\n gt = gt_boxes[anchor_iou_argmax[i]]\r\n\r\n # Convert coordinates to center plus width/height.\r\n # GT Box\r\n gt_h = gt[2] - gt[0]\r\n gt_w = gt[3] - gt[1]\r\n gt_center_y = gt[0] + 0.5 * gt_h\r\n gt_center_x = gt[1] + 0.5 * gt_w\r\n # Anchor\r\n a_h = a[2] - a[0]\r\n a_w = a[3] - a[1]\r\n a_center_y = a[0] + 0.5 * a_h\r\n a_center_x = a[1] + 0.5 * a_w\r\n\r\n # Compute the bbox refinement that the RPN should predict.\r\n rpn_bbox[ix] = [\r\n (gt_center_y - a_center_y) / a_h,\r\n (gt_center_x - a_center_x) / a_w,\r\n np.log(gt_h / a_h),\r\n np.log(gt_w / a_w),\r\n ]\r\n # Normalize\r\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\r\n ix += 1\r\n\r\n return rpn_match, rpn_bbox\r\n\r\n\r\ndef generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):\r\n \"\"\"Generates ROI proposals similar to what a region proposal network\r\n would generate.\r\n\r\n image_shape: [Height, Width, Depth]\r\n count: Number of ROIs to generate\r\n gt_class_ids: [N] Integer ground truth class IDs\r\n gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.\r\n\r\n Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.\r\n \"\"\"\r\n # placeholder\r\n rois = np.zeros((count, 4), dtype=np.int32)\r\n\r\n # Generate random ROIs around GT boxes (90% of count)\r\n rois_per_box = int(0.9 * count / gt_boxes.shape[0])\r\n for i in range(gt_boxes.shape[0]):\r\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]\r\n h = gt_y2 - gt_y1\r\n w = gt_x2 - gt_x1\r\n # random boundaries\r\n r_y1 = max(gt_y1 - h, 0)\r\n r_y2 = min(gt_y2 + h, image_shape[0])\r\n r_x1 = max(gt_x1 - w, 0)\r\n r_x2 = min(gt_x2 + w, image_shape[1])\r\n\r\n # To avoid generating boxes with zero area, we generate double what\r\n # we need and filter out the extra. If we get fewer valid boxes\r\n # than we need, we loop and try again.\r\n while True:\r\n y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))\r\n x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))\r\n # Filter out zero area boxes\r\n threshold = 1\r\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\r\n threshold][:rois_per_box]\r\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\r\n threshold][:rois_per_box]\r\n if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:\r\n break\r\n\r\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\r\n # into x1, y1, x2, y2 order\r\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\r\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\r\n box_rois = np.hstack([y1, x1, y2, x2])\r\n rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois\r\n\r\n # Generate random ROIs anywhere in the image (10% of count)\r\n remaining_count = count - (rois_per_box * gt_boxes.shape[0])\r\n # To avoid generating boxes with zero area, we generate double what\r\n # we need and filter out the extra. If we get fewer valid boxes\r\n # than we need, we loop and try again.\r\n while True:\r\n y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))\r\n x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))\r\n # Filter out zero area boxes\r\n threshold = 1\r\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\r\n threshold][:remaining_count]\r\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\r\n threshold][:remaining_count]\r\n if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:\r\n break\r\n\r\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\r\n # into x1, y1, x2, y2 order\r\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\r\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\r\n global_rois = np.hstack([y1, x1, y2, x2])\r\n rois[-remaining_count:] = global_rois\r\n return rois\r\n\r\n\r\ndef data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,\r\n random_rois=0, batch_size=1, detection_targets=False,\r\n no_augmentation_sources=None):\r\n \"\"\"A generator that returns images and corresponding target class ids,\r\n bounding box deltas, and masks.\r\n\r\n dataset: The Dataset object to pick data from\r\n config: The model config object\r\n shuffle: If True, shuffles the samples before every epoch\r\n augment: (deprecated. Use augmentation instead). If true, apply random\r\n image augmentation. Currently, only horizontal flipping is offered.\r\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\r\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\r\n right/left 50% of the time.\r\n random_rois: If > 0 then generate proposals to be used to train the\r\n network classifier and mask heads. Useful if training\r\n the Mask RCNN part without the RPN.\r\n batch_size: How many images to return in each call\r\n detection_targets: If True, generate detection targets (class IDs, bbox\r\n deltas, and masks). Typically for debugging or visualizations because\r\n in trainig detection targets are generated by DetectionTargetLayer.\r\n no_augmentation_sources: Optional. List of sources to exclude for\r\n augmentation. A source is string that identifies a dataset and is\r\n defined in the Dataset class.\r\n\r\n Returns a Python generator. Upon calling next() on it, the\r\n generator returns two lists, inputs and outputs. The contents\r\n of the lists differs depending on the received arguments:\r\n inputs list:\r\n - images: [batch, H, W, C]\r\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\r\n - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)\r\n - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\r\n - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs\r\n - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]\r\n - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width\r\n are those of the image unless use_mini_mask is True, in which\r\n case they are defined in MINI_MASK_SHAPE.\r\n\r\n outputs list: Usually empty in regular training. But if detection_targets\r\n is True then the outputs list contains target class_ids, bbox deltas,\r\n and masks.\r\n \"\"\"\r\n b = 0 # batch item index\r\n image_index = -1\r\n image_ids = np.copy(dataset.image_ids)\r\n error_count = 0\r\n no_augmentation_sources = no_augmentation_sources or []\r\n\r\n # Anchors\r\n # [anchor_count, (y1, x1, y2, x2)]\r\n backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\r\n anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\r\n config.RPN_ANCHOR_RATIOS,\r\n backbone_shapes,\r\n config.BACKBONE_STRIDES,\r\n config.RPN_ANCHOR_STRIDE)\r\n\r\n # Keras requires a generator to run indefinitely.\r\n while True:\r\n try:\r\n # Increment index to pick next image. Shuffle if at the start of an epoch.\r\n image_index = (image_index + 1) % len(image_ids)\r\n if shuffle and image_index == 0:\r\n np.random.shuffle(image_ids)\r\n\r\n # Get GT bounding boxes and masks for image.\r\n image_id = image_ids[image_index]\r\n\r\n # If the image source is not to be augmented pass None as augmentation\r\n if dataset.image_info[image_id]['source'] in no_augmentation_sources:\r\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\r\n load_image_gt(dataset, config, image_id, augment=augment,\r\n augmentation=None,\r\n use_mini_mask=config.USE_MINI_MASK)\r\n else:\r\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\r\n load_image_gt(dataset, config, image_id, augment=augment,\r\n augmentation=augmentation,\r\n use_mini_mask=config.USE_MINI_MASK)\r\n\r\n # Skip images that have no instances. This can happen in cases\r\n # where we train on a subset of classes and the image doesn't\r\n # have any of the classes we care about.\r\n if not np.any(gt_class_ids > 0):\r\n continue\r\n\r\n # RPN Targets\r\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,\r\n gt_class_ids, gt_boxes, config)\r\n\r\n # Mask R-CNN Targets\r\n if random_rois:\r\n rpn_rois = generate_random_rois(\r\n image.shape, random_rois, gt_class_ids, gt_boxes)\r\n if detection_targets:\r\n rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\\\r\n build_detection_targets(\r\n rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)\r\n\r\n # Init batch arrays\r\n if b == 0:\r\n batch_image_meta = np.zeros(\r\n (batch_size,) + image_meta.shape, dtype=image_meta.dtype)\r\n batch_rpn_match = np.zeros(\r\n [batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)\r\n batch_rpn_bbox = np.zeros(\r\n [batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\r\n batch_images = np.zeros(\r\n (batch_size,) + image.shape, dtype=np.float32)\r\n batch_gt_class_ids = np.zeros(\r\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\r\n batch_gt_boxes = np.zeros(\r\n (batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)\r\n batch_gt_masks = np.zeros(\r\n (batch_size, gt_masks.shape[0], gt_masks.shape[1],\r\n config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\r\n if random_rois:\r\n batch_rpn_rois = np.zeros(\r\n (batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\r\n if detection_targets:\r\n batch_rois = np.zeros(\r\n (batch_size,) + rois.shape, dtype=rois.dtype)\r\n batch_mrcnn_class_ids = np.zeros(\r\n (batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\r\n batch_mrcnn_bbox = np.zeros(\r\n (batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\r\n batch_mrcnn_mask = np.zeros(\r\n (batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\r\n\r\n # If more instances than fits in the array, sub-sample from them.\r\n if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:\r\n ids = np.random.choice(\r\n np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)\r\n gt_class_ids = gt_class_ids[ids]\r\n gt_boxes = gt_boxes[ids]\r\n gt_masks = gt_masks[:, :, ids]\r\n\r\n # Add to batch\r\n batch_image_meta[b] = image_meta\r\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\r\n batch_rpn_bbox[b] = rpn_bbox\r\n batch_images[b] = mold_image(image.astype(np.float32), config)\r\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\r\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\r\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\r\n if random_rois:\r\n batch_rpn_rois[b] = rpn_rois\r\n if detection_targets:\r\n batch_rois[b] = rois\r\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\r\n batch_mrcnn_bbox[b] = mrcnn_bbox\r\n batch_mrcnn_mask[b] = mrcnn_mask\r\n b += 1\r\n\r\n # Batch full?\r\n if b >= batch_size:\r\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\r\n batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]\r\n outputs = []\r\n\r\n if random_rois:\r\n inputs.extend([batch_rpn_rois])\r\n if detection_targets:\r\n inputs.extend([batch_rois])\r\n # Keras requires that output and targets have the same number of dimensions\r\n batch_mrcnn_class_ids = np.expand_dims(\r\n batch_mrcnn_class_ids, -1)\r\n outputs.extend(\r\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\r\n\r\n yield inputs, outputs\r\n\r\n # start a new batch\r\n b = 0\r\n except (GeneratorExit, KeyboardInterrupt):\r\n raise\r\n except:\r\n # Log it and skip the image\r\n logging.exception(\"Error processing image {}\".format(\r\n dataset.image_info[image_id]))\r\n error_count += 1\r\n if error_count > 5:\r\n raise\r\n\r\n\r\n############################################################\r\n# MaskRCNN Class\r\n############################################################\r\n\r\nclass MaskRCNN():\r\n \"\"\"Encapsulates the Mask RCNN model functionality.\r\n\r\n The actual Keras model is in the keras_model property.\r\n \"\"\"\r\n\r\n def __init__(self, mode, config, model_dir):\r\n \"\"\"\r\n mode: Either \"training\" or \"inference\"\r\n config: A Sub-class of the Config class\r\n model_dir: Directory to save training logs and trained weights\r\n \"\"\"\r\n assert mode in ['training', 'inference']\r\n self.mode = mode\r\n self.config = config\r\n self.model_dir = model_dir\r\n self.set_log_dir()\r\n self.keras_model = self.build(mode=mode, config=config)\r\n\r\n def build(self, mode, config):\r\n \"\"\"Build Mask R-CNN architecture.\r\n input_shape: The shape of the input image.\r\n mode: Either \"training\" or \"inference\". The inputs and\r\n outputs of the model differ accordingly.\r\n \"\"\"\r\n assert mode in ['training', 'inference']\r\n\r\n # Image size must be dividable by 2 multiple times\r\n h, w = config.IMAGE_SHAPE[:2]\r\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\r\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\r\n \"to avoid fractions when downscaling and upscaling.\"\r\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\r\n\r\n # Inputs\r\n input_image = KL.Input(\r\n shape=[None, None, config.IMAGE_SHAPE[2]], name=\"input_image\")\r\n input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],\r\n name=\"input_image_meta\")\r\n if mode == \"training\":\r\n # RPN GT\r\n input_rpn_match = KL.Input(\r\n shape=[None, 1], name=\"input_rpn_match\", dtype=tf.int32)\r\n input_rpn_bbox = KL.Input(\r\n shape=[None, 4], name=\"input_rpn_bbox\", dtype=tf.float32)\r\n\r\n # Detection GT (class IDs, bounding boxes, and masks)\r\n # 1. GT Class IDs (zero padded)\r\n input_gt_class_ids = KL.Input(\r\n shape=[None], name=\"input_gt_class_ids\", dtype=tf.int32)\r\n # 2. GT Boxes in pixels (zero padded)\r\n # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates\r\n input_gt_boxes = KL.Input(\r\n shape=[None, 4], name=\"input_gt_boxes\", dtype=tf.float32)\r\n # Normalize coordinates\r\n gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(\r\n x, K.shape(input_image)[1:3]))(input_gt_boxes)\r\n # 3. GT Masks (zero padded)\r\n # [batch, height, width, MAX_GT_INSTANCES]\r\n if config.USE_MINI_MASK:\r\n input_gt_masks = KL.Input(\r\n shape=[config.MINI_MASK_SHAPE[0],\r\n config.MINI_MASK_SHAPE[1], None],\r\n name=\"input_gt_masks\", dtype=bool)\r\n else:\r\n input_gt_masks = KL.Input(\r\n shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],\r\n name=\"input_gt_masks\", dtype=bool)\r\n elif mode == \"inference\":\r\n # Anchors in normalized coordinates\r\n input_anchors = KL.Input(shape=[None, 4], name=\"input_anchors\")\r\n\r\n # Build the shared convolutional layers.\r\n # Bottom-up Layers\r\n # Returns a list of the last layers of each stage, 5 in total.\r\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\r\n if callable(config.BACKBONE):\r\n _, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,\r\n train_bn=config.TRAIN_BN)\r\n else:\r\n _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,\r\n stage5=True, train_bn=config.TRAIN_BN)\r\n # Top-down Layers\r\n # TODO: add assert to varify feature map sizes match what's in config\r\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)\r\n P4 = KL.Add(name=\"fpn_p4add\")([\r\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\r\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])\r\n P3 = KL.Add(name=\"fpn_p3add\")([\r\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\r\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])\r\n P2 = KL.Add(name=\"fpn_p2add\")([\r\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\r\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])\r\n # Attach 3x3 conv to all P layers to get the final feature maps.\r\n P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\r\n P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\r\n P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\r\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\r\n # P6 is used for the 5th anchor scale in RPN. Generated by\r\n # subsampling from P5 with stride of 2.\r\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\r\n\r\n # Note that P6 is used in RPN, but not in the classifier heads.\r\n rpn_feature_maps = [P2, P3, P4, P5, P6]\r\n mrcnn_feature_maps = [P2, P3, P4, P5]\r\n\r\n # Anchors\r\n if mode == \"training\":\r\n anchors = self.get_anchors(config.IMAGE_SHAPE)\r\n # Duplicate across the batch dimension because Keras requires it\r\n # TODO: can this be optimized to avoid duplicating the anchors?\r\n anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)\r\n # A hack to get around Keras's bad support for constants\r\n anchors = KL.Lambda(lambda x: tf.Variable(anchors), name=\"anchors\")(input_image)\r\n else:\r\n anchors = input_anchors\r\n\r\n # RPN Model\r\n rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,\r\n len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)\r\n # Loop through pyramid layers\r\n layer_outputs = [] # list of lists\r\n for p in rpn_feature_maps:\r\n layer_outputs.append(rpn([p]))\r\n # Concatenate layer outputs\r\n # Convert from list of lists of level outputs to list of lists\r\n # of outputs across levels.\r\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\r\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\r\n outputs = list(zip(*layer_outputs))\r\n outputs = [KL.Concatenate(axis=1, name=n)(list(o))\r\n for o, n in zip(outputs, output_names)]\r\n\r\n rpn_class_logits, rpn_class, rpn_bbox = outputs\r\n\r\n # Generate proposals\r\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\r\n # and zero padded.\r\n proposal_count = config.POST_NMS_ROIS_TRAINING if mode == \"training\"\\\r\n else config.POST_NMS_ROIS_INFERENCE\r\n rpn_rois = ProposalLayer(\r\n proposal_count=proposal_count,\r\n nms_threshold=config.RPN_NMS_THRESHOLD,\r\n name=\"ROI\",\r\n config=config)([rpn_class, rpn_bbox, anchors])\r\n\r\n if mode == \"training\":\r\n # Class ID mask to mark class IDs supported by the dataset the image\r\n # came from.\r\n active_class_ids = KL.Lambda(\r\n lambda x: parse_image_meta_graph(x)[\"active_class_ids\"]\r\n )(input_image_meta)\r\n\r\n if not config.USE_RPN_ROIS:\r\n # Ignore predicted ROIs and use ROIs provided as an input.\r\n input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],\r\n name=\"input_roi\", dtype=np.int32)\r\n # Normalize coordinates\r\n target_rois = KL.Lambda(lambda x: norm_boxes_graph(\r\n x, K.shape(input_image)[1:3]))(input_rois)\r\n else:\r\n target_rois = rpn_rois\r\n\r\n # Generate detection targets\r\n # Subsamples proposals and generates target outputs for training\r\n # Note that proposal class IDs, gt_boxes, and gt_masks are zero\r\n # padded. Equally, returned rois and targets are zero padded.\r\n rois, target_class_ids, target_bbox, target_mask =\\\r\n DetectionTargetLayer(config, name=\"proposal_targets\")([\r\n target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])\r\n\r\n # Network Heads\r\n # TODO: verify that this handles zero padded ROIs\r\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\r\n fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,\r\n config.POOL_SIZE, config.NUM_CLASSES,\r\n train_bn=config.TRAIN_BN,\r\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\r\n\r\n mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,\r\n input_image_meta,\r\n config.MASK_POOL_SIZE,\r\n config.NUM_CLASSES,\r\n train_bn=config.TRAIN_BN)\r\n\r\n # TODO: clean up (use tf.identify if necessary)\r\n output_rois = KL.Lambda(lambda x: x * 1, name=\"output_rois\")(rois)\r\n\r\n # Losses\r\n rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name=\"rpn_class_loss\")(\r\n [input_rpn_match, rpn_class_logits])\r\n rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name=\"rpn_bbox_loss\")(\r\n [input_rpn_bbox, input_rpn_match, rpn_bbox])\r\n class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name=\"mrcnn_class_loss\")(\r\n [target_class_ids, mrcnn_class_logits, active_class_ids])\r\n bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name=\"mrcnn_bbox_loss\")(\r\n [target_bbox, target_class_ids, mrcnn_bbox])\r\n mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name=\"mrcnn_mask_loss\")(\r\n [target_mask, target_class_ids, mrcnn_mask])\r\n\r\n # Model\r\n inputs = [input_image, input_image_meta,\r\n input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]\r\n if not config.USE_RPN_ROIS:\r\n inputs.append(input_rois)\r\n outputs = [rpn_class_logits, rpn_class, rpn_bbox,\r\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,\r\n rpn_rois, output_rois,\r\n rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]\r\n model = KM.Model(inputs, outputs, name='mask_rcnn')\r\n else:\r\n # Network Heads\r\n # Proposal classifier and BBox regressor heads\r\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\r\n fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,\r\n config.POOL_SIZE, config.NUM_CLASSES,\r\n train_bn=config.TRAIN_BN,\r\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\r\n\r\n # Detections\r\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in\r\n # normalized coordinates\r\n detections = DetectionLayer(config, name=\"mrcnn_detection\")(\r\n [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])\r\n\r\n # Create masks for detections\r\n detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)\r\n mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,\r\n input_image_meta,\r\n config.MASK_POOL_SIZE,\r\n config.NUM_CLASSES,\r\n train_bn=config.TRAIN_BN)\r\n\r\n model = KM.Model([input_image, input_image_meta, input_anchors],\r\n [detections, mrcnn_class, mrcnn_bbox,\r\n mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],\r\n name='mask_rcnn')\r\n\r\n # Add multi-GPU support.\r\n if config.GPU_COUNT > 1:\r\n from mrcnn.parallel_model import ParallelModel\r\n model = ParallelModel(model, config.GPU_COUNT)\r\n\r\n return model\r\n\r\n def find_last(self):\r\n \"\"\"Finds the last checkpoint file of the last trained model in the\r\n model directory.\r\n Returns:\r\n The path of the last checkpoint file\r\n \"\"\"\r\n # Get directory names. Each directory corresponds to a model\r\n dir_names = next(os.walk(self.model_dir))[1]\r\n key = self.config.NAME.lower()\r\n dir_names = filter(lambda f: f.startswith(key), dir_names)\r\n dir_names = sorted(dir_names)\r\n if not dir_names:\r\n import errno\r\n raise FileNotFoundError(\r\n errno.ENOENT,\r\n \"Could not find model directory under {}\".format(self.model_dir))\r\n # Pick last directory\r\n dir_name = os.path.join(self.model_dir, dir_names[-1])\r\n # Find the last checkpoint\r\n checkpoints = next(os.walk(dir_name))[2]\r\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\r\n checkpoints = sorted(checkpoints)\r\n if not checkpoints:\r\n import errno\r\n raise FileNotFoundError(\r\n errno.ENOENT, \"Could not find weight files in {}\".format(dir_name))\r\n checkpoint = os.path.join(dir_name, checkpoints[-1])\r\n return checkpoint\r\n\r\n def load_weights(self, filepath, by_name=False, exclude=None):\r\n \"\"\"Modified version of the corresponding Keras function with\r\n the addition of multi-GPU support and the ability to exclude\r\n some layers from loading.\r\n exclude: list of layer names to exclude\r\n \"\"\"\r\n import h5py\r\n # Conditional import to support versions of Keras before 2.2\r\n # TODO: remove in about 6 months (end of 2018)\r\n try:\r\n from keras.engine import saving\r\n except ImportError:\r\n # Keras before 2.2 used the 'topology' namespace.\r\n from keras.engine import topology as saving\r\n\r\n if exclude:\r\n by_name = True\r\n\r\n if h5py is None:\r\n raise ImportError('`load_weights` requires h5py.')\r\n f = h5py.File(filepath, mode='r')\r\n if 'layer_names' not in f.attrs and 'model_weights' in f:\r\n f = f['model_weights']\r\n\r\n # In multi-GPU training, we wrap the model. Get layers\r\n # of the inner model because they have the weights.\r\n keras_model = self.keras_model\r\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\r\n else keras_model.layers\r\n\r\n # Exclude some layers\r\n if exclude:\r\n layers = filter(lambda l: l.name not in exclude, layers)\r\n\r\n if by_name:\r\n saving.load_weights_from_hdf5_group_by_name(f, layers)\r\n else:\r\n saving.load_weights_from_hdf5_group(f, layers)\r\n if hasattr(f, 'close'):\r\n f.close()\r\n\r\n # Update the log directory\r\n self.set_log_dir(filepath)\r\n\r\n def get_imagenet_weights(self):\r\n \"\"\"Downloads ImageNet trained weights from Keras.\r\n Returns path to weights file.\r\n \"\"\"\r\n from keras.utils.data_utils import get_file\r\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\\\r\n 'releases/download/v0.2/'\\\r\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\r\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\r\n TF_WEIGHTS_PATH_NO_TOP,\r\n cache_subdir='models',\r\n md5_hash='a268eb855778b3df3c7506639542a6af')\r\n return weights_path\r\n\r\n def compile(self, learning_rate, momentum):\r\n \"\"\"Gets the model ready for training. Adds losses, regularization, and\r\n metrics. Then calls the Keras compile() function.\r\n \"\"\"\r\n self.keras_model.metrics_tensors = []\r\n # Optimizer object\r\n optimizer = keras.optimizers.SGD(\r\n lr=learning_rate, momentum=momentum,\r\n clipnorm=self.config.GRADIENT_CLIP_NORM)\r\n # Add Losses\r\n # First, clear previously set losses to avoid duplication\r\n self.keras_model._losses = []\r\n self.keras_model._per_input_losses = {}\r\n loss_names = [\r\n \"rpn_class_loss\", \"rpn_bbox_loss\",\r\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\r\n for name in loss_names:\r\n layer = self.keras_model.get_layer(name)\r\n # print(\"AAAAAAAAA \", layer.output in self.keras_model.losses)\r\n # print(\"AAAAAAAAAA \", tf.constant(layer.output in self.keras_model.losses))\r\n # print(str(self.keras_model.losses))\r\n # print(str(layer.output))\r\n # print(\"AAAAAAAAAAAA \", str(layer.output) in str(self.keras_model.losses))\r\n loss = tf.reduce_mean(layer.output, keepdims=True) * self.config.LOSS_WEIGHTS.get(name, 1.)\r\n self.keras_model.add_loss(loss)\r\n # tf.cond(tf.constant(layer.output in self.keras_model.losses), lambda: 1, lambda: self.keras_model.add_loss(tf.reduce_mean(layer.output, keepdims=True) * self.config.LOSS_WEIGHTS.get(name, 1.)))\r\n # tf.cond(tf.constant(layer.output in self.keras_model.losses), lambda: 1, lambda: self.keras_model.add_loss(tf.reduce_mean(layer.output, keepdims=True) * self.config.LOSS_WEIGHTS.get(name, 1.)))\r\n #if layer.output in self.keras_model.losses:\r\n # continue\r\n #loss = (\r\n # tf.reduce_mean(layer.output, keepdims=True)\r\n # * self.config.LOSS_WEIGHTS.get(name, 1.))\r\n # self.keras_model.add_loss(loss)\r\n\r\n # Add L2 Regularization\r\n # Skip gamma and beta weights of batch normalization layers.\r\n reg_losses = [\r\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)\r\n for w in self.keras_model.trainable_weights\r\n if 'gamma' not in w.name and 'beta' not in w.name]\r\n self.keras_model.add_loss(tf.add_n(reg_losses))\r\n\r\n # Compile\r\n self.keras_model.compile(\r\n optimizer=optimizer,\r\n loss=[None] * len(self.keras_model.outputs))\r\n\r\n # Add metrics for losses\r\n for name in loss_names:\r\n if name in self.keras_model.metrics_names:\r\n continue\r\n layer = self.keras_model.get_layer(name)\r\n self.keras_model.metrics_names.append(name)\r\n loss = (\r\n tf.reduce_mean(layer.output, keepdims=True)\r\n * self.config.LOSS_WEIGHTS.get(name, 1.))\r\n self.keras_model.metrics_tensors.append(loss)\r\n\r\n def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\r\n \"\"\"Sets model layers as trainable if their names match\r\n the given regular expression.\r\n \"\"\"\r\n # Print message on the first call (but not on recursive calls)\r\n if verbose > 0 and keras_model is None:\r\n log(\"Selecting layers to train\")\r\n\r\n keras_model = keras_model or self.keras_model\r\n\r\n # In multi-GPU training, we wrap the model. Get layers\r\n # of the inner model because they have the weights.\r\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\r\n else keras_model.layers\r\n\r\n for layer in layers:\r\n # Is the layer a model?\r\n if layer.__class__.__name__ == 'Model':\r\n print(\"In model: \", layer.name)\r\n self.set_trainable(\r\n layer_regex, keras_model=layer, indent=indent + 4)\r\n continue\r\n\r\n if not layer.weights:\r\n continue\r\n # Is it trainable?\r\n trainable = bool(re.fullmatch(layer_regex, layer.name))\r\n # Update layer. If layer is a container, update inner layer.\r\n if layer.__class__.__name__ == 'TimeDistributed':\r\n layer.layer.trainable = trainable\r\n else:\r\n layer.trainable = trainable\r\n # Print trainable layer names\r\n if trainable and verbose > 0:\r\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\r\n layer.__class__.__name__))\r\n\r\n def set_log_dir(self, model_path=None):\r\n \"\"\"Sets the model log directory and epoch counter.\r\n\r\n model_path: If None, or a format different from what this code uses\r\n then set a new log directory and start epochs from 0. Otherwise,\r\n extract the log directory and the epoch counter from the file\r\n name.\r\n \"\"\"\r\n # Set date and epoch counter as if starting a new model\r\n self.epoch = 0\r\n now = datetime.datetime.now()\r\n\r\n # If we have a model path with date and epochs use them\r\n if model_path:\r\n # Continue from we left of. Get epoch and date from the file name\r\n # A sample model path might look like:\r\n # \\path\\to\\logs\\coco20171029T2315\\mask_rcnn_coco_0001.h5 (Windows)\r\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)\r\n regex = r\".*[/\\\\][\\w-]+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})[/\\\\]mask\\_rcnn\\_[\\w-]+(\\d{4})\\.h5\"\r\n m = re.match(regex, model_path)\r\n if m:\r\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\r\n int(m.group(4)), int(m.group(5)))\r\n # Epoch number in file is 1-based, and in Keras code it's 0-based.\r\n # So, adjust for that then increment by one to start from the next epoch\r\n self.epoch = int(m.group(6)) - 1 + 1\r\n print('Re-starting from epoch %d' % self.epoch)\r\n\r\n # Directory for training logs\r\n# self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\r\n# self.config.NAME.lower(), now))\r\n self.log_dir = \"//logdir//train\"\r\n\r\n # Path to save after each epoch. Include placeholders that get filled by Keras.\r\n self.checkpoint_path = os.path.join(self.log_dir, \"mask_rcnn_{}_*epoch*.h5\".format(\r\n self.config.NAME.lower()))\r\n self.checkpoint_path = self.checkpoint_path.replace(\r\n \"*epoch*\", \"{epoch:04d}\")\r\n\r\n def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,\r\n augmentation=None, custom_callbacks=None, no_augmentation_sources=None):\r\n \"\"\"Train the model.\r\n train_dataset, val_dataset: Training and validation Dataset objects.\r\n learning_rate: The learning rate to train with\r\n epochs: Number of training epochs. Note that previous training epochs\r\n are considered to be done alreay, so this actually determines\r\n the epochs to train in total rather than in this particaular\r\n call.\r\n layers: Allows selecting wich layers to train. It can be:\r\n - A regular expression to match layer names to train\r\n - One of these predefined values:\r\n heads: The RPN, classifier and mask heads of the network\r\n all: All the layers\r\n 3+: Train Resnet stage 3 and up\r\n 4+: Train Resnet stage 4 and up\r\n 5+: Train Resnet stage 5 and up\r\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)\r\n augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)\r\n flips images right/left 50% of the time. You can pass complex\r\n augmentations as well. This augmentation applies 50% of the\r\n time, and when it does it flips images right/left half the time\r\n and adds a Gaussian blur with a random sigma in range 0 to 5.\r\n\r\n augmentation = imgaug.augmenters.Sometimes(0.5, [\r\n imgaug.augmenters.Fliplr(0.5),\r\n imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))\r\n ])\r\n\t custom_callbacks: Optional. Add custom callbacks to be called\r\n\t with the keras fit_generator method. Must be list of type keras.callbacks.\r\n no_augmentation_sources: Optional. List of sources to exclude for\r\n augmentation. A source is string that identifies a dataset and is\r\n defined in the Dataset class.\r\n \"\"\"\r\n assert self.mode == \"training\", \"Create model in training mode.\"\r\n\r\n # Pre-defined layer regular expressions\r\n layer_regex = {\r\n # all layers but the backbone\r\n \"heads\": r\"(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\r\n # From a specific Resnet stage and up\r\n \"3+\": r\"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\r\n \"4+\": r\"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\r\n \"5+\": r\"(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\r\n # All layers\r\n \"all\": \".*\",\r\n }\r\n if layers in layer_regex.keys():\r\n layers = layer_regex[layers]\r\n\r\n # Data generators\r\n train_generator = data_generator(train_dataset, self.config, shuffle=True,\r\n augmentation=augmentation,\r\n batch_size=self.config.BATCH_SIZE,\r\n no_augmentation_sources=no_augmentation_sources)\r\n val_generator = data_generator(val_dataset, self.config, shuffle=True,\r\n batch_size=self.config.BATCH_SIZE)\r\n\r\n # Create log_dir if it does not exist\r\n if not os.path.exists(self.log_dir):\r\n os.makedirs(self.log_dir)\r\n\r\n # Callbacks\r\n callbacks = [\r\n keras.callbacks.TensorBoard(log_dir=self.log_dir,\r\n histogram_freq=0, write_graph=True, write_images=False),\r\n keras.callbacks.ModelCheckpoint(self.checkpoint_path,\r\n verbose=0, save_weights_only=True),\r\n ]\r\n\r\n # Add custom callbacks to the list\r\n if custom_callbacks:\r\n callbacks += custom_callbacks\r\n\r\n # Train\r\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch, learning_rate))\r\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\r\n self.set_trainable(layers)\r\n self.compile(learning_rate, self.config.LEARNING_MOMENTUM)\r\n\r\n # Work-around for Windows: Keras fails on Windows when using\r\n # multiprocessing workers. See discussion here:\r\n # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009\r\n if os.name is 'nt':\r\n workers = 0\r\n else:\r\n workers = multiprocessing.cpu_count()\r\n\r\n self.keras_model.fit_generator(\r\n train_generator,\r\n initial_epoch=self.epoch,\r\n epochs=epochs,\r\n steps_per_epoch=self.config.STEPS_PER_EPOCH,\r\n callbacks=callbacks,\r\n validation_data=val_generator,\r\n validation_steps=self.config.VALIDATION_STEPS,\r\n max_queue_size=100,\r\n workers=workers,\r\n use_multiprocessing=True,\r\n )\r\n self.epoch = max(self.epoch, epochs)\r\n\r\n def mold_inputs(self, images):\r\n \"\"\"Takes a list of images and modifies them to the format expected\r\n as an input to the neural network.\r\n images: List of image matrices [height,width,depth]. Images can have\r\n different sizes.\r\n\r\n Returns 3 Numpy matrices:\r\n molded_images: [N, h, w, 3]. Images resized and normalized.\r\n image_metas: [N, length of meta data]. Details about each image.\r\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\r\n original image (padding excluded).\r\n \"\"\"\r\n molded_images = []\r\n image_metas = []\r\n windows = []\r\n for image in images:\r\n # Resize image\r\n # TODO: move resizing to mold_image()\r\n molded_image, window, scale, padding, crop = utils.resize_image(\r\n image,\r\n min_dim=self.config.IMAGE_MIN_DIM,\r\n min_scale=self.config.IMAGE_MIN_SCALE,\r\n max_dim=self.config.IMAGE_MAX_DIM,\r\n mode=self.config.IMAGE_RESIZE_MODE)\r\n molded_image = mold_image(molded_image, self.config)\r\n # Build image_meta\r\n image_meta = compose_image_meta(\r\n 0, image.shape, molded_image.shape, window, scale,\r\n np.zeros([self.config.NUM_CLASSES], dtype=np.int32))\r\n # Append\r\n molded_images.append(molded_image)\r\n windows.append(window)\r\n image_metas.append(image_meta)\r\n # Pack into arrays\r\n molded_images = np.stack(molded_images)\r\n image_metas = np.stack(image_metas)\r\n windows = np.stack(windows)\r\n return molded_images, image_metas, windows\r\n\r\n def unmold_detections(self, detections, mrcnn_mask, original_image_shape,\r\n image_shape, window):\r\n \"\"\"Reformats the detections of one image from the format of the neural\r\n network output to a format suitable for use in the rest of the\r\n application.\r\n\r\n detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates\r\n mrcnn_mask: [N, height, width, num_classes]\r\n original_image_shape: [H, W, C] Original image shape before resizing\r\n image_shape: [H, W, C] Shape of the image after resizing and padding\r\n window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real\r\n image is excluding the padding.\r\n\r\n Returns:\r\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\r\n class_ids: [N] Integer class IDs for each bounding box\r\n scores: [N] Float probability scores of the class_id\r\n masks: [height, width, num_instances] Instance masks\r\n \"\"\"\r\n # How many detections do we have?\r\n # Detections array is padded with zeros. Find the first class_id == 0.\r\n zero_ix = np.where(detections[:, 4] == 0)[0]\r\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\r\n\r\n # Extract boxes, class_ids, scores, and class-specific masks\r\n boxes = detections[:N, :4]\r\n class_ids = detections[:N, 4].astype(np.int32)\r\n scores = detections[:N, 5]\r\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\r\n\r\n # Translate normalized coordinates in the resized image to pixel\r\n # coordinates in the original image before resizing\r\n window = utils.norm_boxes(window, image_shape[:2])\r\n wy1, wx1, wy2, wx2 = window\r\n shift = np.array([wy1, wx1, wy1, wx1])\r\n wh = wy2 - wy1 # window height\r\n ww = wx2 - wx1 # window width\r\n scale = np.array([wh, ww, wh, ww])\r\n # Convert boxes to normalized coordinates on the window\r\n boxes = np.divide(boxes - shift, scale)\r\n # Convert boxes to pixel coordinates on the original image\r\n boxes = utils.denorm_boxes(boxes, original_image_shape[:2])\r\n\r\n # Filter out detections with zero area. Happens in early training when\r\n # network weights are still random\r\n exclude_ix = np.where(\r\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\r\n if exclude_ix.shape[0] > 0:\r\n boxes = np.delete(boxes, exclude_ix, axis=0)\r\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\r\n scores = np.delete(scores, exclude_ix, axis=0)\r\n masks = np.delete(masks, exclude_ix, axis=0)\r\n N = class_ids.shape[0]\r\n\r\n # Resize masks to original image size and set boundary threshold.\r\n full_masks = []\r\n for i in range(N):\r\n # Convert neural network mask to full size mask\r\n full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)\r\n full_masks.append(full_mask)\r\n full_masks = np.stack(full_masks, axis=-1)\\\r\n if full_masks else np.empty(original_image_shape[:2] + (0,))\r\n\r\n return boxes, class_ids, scores, full_masks\r\n\r\n def detect(self, images, verbose=0):\r\n \"\"\"Runs the detection pipeline.\r\n\r\n images: List of images, potentially of different sizes.\r\n\r\n Returns a list of dicts, one dict per image. The dict contains:\r\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\r\n class_ids: [N] int class IDs\r\n scores: [N] float probability scores for the class IDs\r\n masks: [H, W, N] instance binary masks\r\n \"\"\"\r\n assert self.mode == \"inference\", \"Create model in inference mode.\"\r\n assert len(\r\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\r\n\r\n if verbose:\r\n log(\"Processing {} images\".format(len(images)))\r\n for image in images:\r\n log(\"image\", image)\r\n\r\n # Mold inputs to format expected by the neural network\r\n molded_images, image_metas, windows = self.mold_inputs(images)\r\n\r\n # Validate image sizes\r\n # All images in a batch MUST be of the same size\r\n image_shape = molded_images[0].shape\r\n for g in molded_images[1:]:\r\n assert g.shape == image_shape,\\\r\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\r\n\r\n # Anchors\r\n anchors = self.get_anchors(image_shape)\r\n # Duplicate across the batch dimension because Keras requires it\r\n # TODO: can this be optimized to avoid duplicating the anchors?\r\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\r\n # print(\"PPPPPPPPPPPP \", image_metas)\r\n # print(\"QQQQQQQQQ \", anchors)\r\n # np.save(\"image_metas.npy\", image_metas)\r\n # np.save(\"anchors.npy\", anchors)\r\n # np.save(\"molded_images.npy\", molded_images)\r\n\r\n if verbose:\r\n log(\"molded_images\", molded_images)\r\n log(\"image_metas\", image_metas)\r\n log(\"anchors\", anchors)\r\n\r\n # Run object detection\r\n detections, _, _, mrcnn_mask, _, _, _ =\\\r\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\r\n # Process detections\r\n results = []\r\n for i, image in enumerate(images):\r\n final_rois, final_class_ids, final_scores, final_masks =\\\r\n self.unmold_detections(detections[i], mrcnn_mask[i],\r\n image.shape, molded_images[i].shape,\r\n windows[i])\r\n results.append({\r\n \"rois\": final_rois,\r\n \"class_ids\": final_class_ids,\r\n \"scores\": final_scores,\r\n \"masks\": final_masks,\r\n })\r\n return results\r\n\r\n def detect_molded(self, molded_images, image_metas, verbose=0):\r\n \"\"\"Runs the detection pipeline, but expect inputs that are\r\n molded already. Used mostly for debugging and inspecting\r\n the model.\r\n\r\n molded_images: List of images loaded using load_image_gt()\r\n image_metas: image meta data, also returned by load_image_gt()\r\n\r\n Returns a list of dicts, one dict per image. The dict contains:\r\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\r\n class_ids: [N] int class IDs\r\n scores: [N] float probability scores for the class IDs\r\n masks: [H, W, N] instance binary masks\r\n \"\"\"\r\n assert self.mode == \"inference\", \"Create model in inference mode.\"\r\n assert len(molded_images) == self.config.BATCH_SIZE,\\\r\n \"Number of images must be equal to BATCH_SIZE\"\r\n\r\n if verbose:\r\n log(\"Processing {} images\".format(len(molded_images)))\r\n for image in molded_images:\r\n log(\"image\", image)\r\n\r\n # Validate image sizes\r\n # All images in a batch MUST be of the same size\r\n image_shape = molded_images[0].shape\r\n for g in molded_images[1:]:\r\n assert g.shape == image_shape, \"Images must have the same size\"\r\n\r\n # Anchors\r\n anchors = self.get_anchors(image_shape)\r\n # Duplicate across the batch dimension because Keras requires it\r\n # TODO: can this be optimized to avoid duplicating the anchors?\r\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\r\n\r\n if verbose:\r\n log(\"molded_images\", molded_images)\r\n log(\"image_metas\", image_metas)\r\n log(\"anchors\", anchors)\r\n # Run object detection\r\n detections, _, _, mrcnn_mask, _, _, _ =\\\r\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\r\n # Process detections\r\n results = []\r\n for i, image in enumerate(molded_images):\r\n window = [0, 0, image.shape[0], image.shape[1]]\r\n final_rois, final_class_ids, final_scores, final_masks =\\\r\n self.unmold_detections(detections[i], mrcnn_mask[i],\r\n image.shape, molded_images[i].shape,\r\n window)\r\n results.append({\r\n \"rois\": final_rois,\r\n \"class_ids\": final_class_ids,\r\n \"scores\": final_scores,\r\n \"masks\": final_masks,\r\n })\r\n return results\r\n\r\n def get_anchors(self, image_shape):\r\n \"\"\"Returns anchor pyramid for the given image size.\"\"\"\r\n backbone_shapes = compute_backbone_shapes(self.config, image_shape)\r\n # Cache anchors and reuse if image shape is the same\r\n if not hasattr(self, \"_anchor_cache\"):\r\n self._anchor_cache = {}\r\n if not tuple(image_shape) in self._anchor_cache:\r\n # Generate Anchors\r\n a = utils.generate_pyramid_anchors(\r\n self.config.RPN_ANCHOR_SCALES,\r\n self.config.RPN_ANCHOR_RATIOS,\r\n backbone_shapes,\r\n self.config.BACKBONE_STRIDES,\r\n self.config.RPN_ANCHOR_STRIDE)\r\n # Keep a copy of the latest anchors in pixel coordinates because\r\n # it's used in inspect_model notebooks.\r\n # TODO: Remove this after the notebook are refactored to not use it\r\n self.anchors = a\r\n # Normalize coordinates\r\n self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])\r\n return self._anchor_cache[tuple(image_shape)]\r\n\r\n def ancestor(self, tensor, name, checked=None):\r\n \"\"\"Finds the ancestor of a TF tensor in the computation graph.\r\n tensor: TensorFlow symbolic tensor.\r\n name: Name of ancestor tensor to find\r\n checked: For internal use. A list of tensors that were already\r\n searched to avoid loops in traversing the graph.\r\n \"\"\"\r\n checked = checked if checked is not None else []\r\n # Put a limit on how deep we go to avoid very long loops\r\n if len(checked) > 500:\r\n return None\r\n # Convert name to a regex and allow matching a number prefix\r\n # because Keras adds them automatically\r\n if isinstance(name, str):\r\n name = re.compile(name.replace(\"/\", r\"(\\_\\d+)*/\"))\r\n\r\n parents = tensor.op.inputs\r\n for p in parents:\r\n if p in checked:\r\n continue\r\n if bool(re.fullmatch(name, p.name)):\r\n return p\r\n checked.append(p)\r\n a = self.ancestor(p, name, checked)\r\n if a is not None:\r\n return a\r\n return None\r\n\r\n def find_trainable_layer(self, layer):\r\n \"\"\"If a layer is encapsulated by another layer, this function\r\n digs through the encapsulation and returns the layer that holds\r\n the weights.\r\n \"\"\"\r\n if layer.__class__.__name__ == 'TimeDistributed':\r\n return self.find_trainable_layer(layer.layer)\r\n return layer\r\n\r\n def get_trainable_layers(self):\r\n \"\"\"Returns a list of layers that have weights.\"\"\"\r\n layers = []\r\n # Loop through all layers\r\n for l in self.keras_model.layers:\r\n # If layer is a wrapper, find inner trainable layer\r\n l = self.find_trainable_layer(l)\r\n # Include layer if it has weights\r\n if l.get_weights():\r\n layers.append(l)\r\n return layers\r\n\r\n def run_graph(self, images, outputs, image_metas=None):\r\n \"\"\"Runs a sub-set of the computation graph that computes the given\r\n outputs.\r\n\r\n image_metas: If provided, the images are assumed to be already\r\n molded (i.e. resized, padded, and normalized)\r\n\r\n outputs: List of tuples (name, tensor) to compute. The tensors are\r\n symbolic TensorFlow tensors and the names are for easy tracking.\r\n\r\n Returns an ordered dict of results. Keys are the names received in the\r\n input and values are Numpy arrays.\r\n \"\"\"\r\n model = self.keras_model\r\n\r\n # Organize desired outputs into an ordered dict\r\n outputs = OrderedDict(outputs)\r\n for o in outputs.values():\r\n assert o is not None\r\n\r\n # Build a Keras function to run parts of the computation graph\r\n inputs = model.inputs\r\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\r\n inputs += [K.learning_phase()]\r\n kf = K.function(model.inputs, list(outputs.values()))\r\n\r\n # Prepare inputs\r\n if image_metas is None:\r\n molded_images, image_metas, _ = self.mold_inputs(images)\r\n else:\r\n molded_images = images\r\n image_shape = molded_images[0].shape\r\n # Anchors\r\n anchors = self.get_anchors(image_shape)\r\n # Duplicate across the batch dimension because Keras requires it\r\n # TODO: can this be optimized to avoid duplicating the anchors?\r\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\r\n model_in = [molded_images, image_metas, anchors]\r\n\r\n # Run inference\r\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\r\n model_in.append(0.)\r\n outputs_np = kf(model_in)\r\n\r\n # Pack the generated Numpy arrays into a a dict and log the results.\r\n outputs_np = OrderedDict([(k, v)\r\n for k, v in zip(outputs.keys(), outputs_np)])\r\n for k, v in outputs_np.items():\r\n log(k, v)\r\n return outputs_np\r\n\r\n\r\n############################################################\r\n# Data Formatting\r\n############################################################\r\n\r\ndef compose_image_meta(image_id, original_image_shape, image_shape,\r\n window, scale, active_class_ids):\r\n \"\"\"Takes attributes of an image and puts them in one 1D array.\r\n\r\n image_id: An int ID of the image. Useful for debugging.\r\n original_image_shape: [H, W, C] before resizing or padding.\r\n image_shape: [H, W, C] after resizing and padding\r\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\r\n image is (excluding the padding)\r\n scale: The scaling factor applied to the original image (float32)\r\n active_class_ids: List of class_ids available in the dataset from which\r\n the image came. Useful if training on images from multiple datasets\r\n where not all classes are present in all datasets.\r\n \"\"\"\r\n meta = np.array(\r\n [image_id] + # size=1\r\n list(original_image_shape) + # size=3\r\n list(image_shape) + # size=3\r\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\r\n [scale] + # size=1\r\n list(active_class_ids) # size=num_classes\r\n )\r\n return meta\r\n\r\n\r\ndef parse_image_meta(meta):\r\n \"\"\"Parses an array that contains image attributes to its components.\r\n See compose_image_meta() for more details.\r\n\r\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\r\n\r\n Returns a dict of the parsed values.\r\n \"\"\"\r\n image_id = meta[:, 0]\r\n original_image_shape = meta[:, 1:4]\r\n image_shape = meta[:, 4:7]\r\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\r\n scale = meta[:, 11]\r\n active_class_ids = meta[:, 12:]\r\n return {\r\n \"image_id\": image_id.astype(np.int32),\r\n \"original_image_shape\": original_image_shape.astype(np.int32),\r\n \"image_shape\": image_shape.astype(np.int32),\r\n \"window\": window.astype(np.int32),\r\n \"scale\": scale.astype(np.float32),\r\n \"active_class_ids\": active_class_ids.astype(np.int32),\r\n }\r\n\r\n\r\ndef parse_image_meta_graph(meta):\r\n \"\"\"Parses a tensor that contains image attributes to its components.\r\n See compose_image_meta() for more details.\r\n\r\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\r\n\r\n Returns a dict of the parsed tensors.\r\n \"\"\"\r\n image_id = meta[:, 0]\r\n original_image_shape = meta[:, 1:4]\r\n image_shape = meta[:, 4:7]\r\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\r\n scale = meta[:, 11]\r\n active_class_ids = meta[:, 12:]\r\n return {\r\n \"image_id\": image_id,\r\n \"original_image_shape\": original_image_shape,\r\n \"image_shape\": image_shape,\r\n \"window\": window,\r\n \"scale\": scale,\r\n \"active_class_ids\": active_class_ids,\r\n }\r\n\r\n\r\ndef mold_image(images, config):\r\n \"\"\"Expects an RGB image (or array of images) and subtracts\r\n the mean pixel and converts it to float. Expects image\r\n colors in RGB order.\r\n \"\"\"\r\n return images.astype(np.float32) - config.MEAN_PIXEL\r\n\r\n\r\ndef unmold_image(normalized_images, config):\r\n \"\"\"Takes a image normalized with mold() and returns the original.\"\"\"\r\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\r\n\r\n\r\n############################################################\r\n# Miscellenous Graph Functions\r\n############################################################\r\n\r\ndef trim_zeros_graph(boxes, name='trim_zeros'):\r\n \"\"\"Often boxes are represented with matrices of shape [N, 4] and\r\n are padded with zeros. This removes zero boxes.\r\n\r\n boxes: [N, 4] matrix of boxes.\r\n non_zeros: [N] a 1D boolean mask identifying the rows to keep\r\n \"\"\"\r\n non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)\r\n boxes = tf.boolean_mask(boxes, non_zeros, name=name)\r\n return boxes, non_zeros\r\n\r\n\r\ndef batch_pack_graph(x, counts, num_rows):\r\n \"\"\"Picks different number of values from each row\r\n in x depending on the values in counts.\r\n \"\"\"\r\n outputs = []\r\n for i in range(num_rows):\r\n outputs.append(x[i, :counts[i]])\r\n return tf.concat(outputs, axis=0)\r\n\r\n\r\ndef norm_boxes_graph(boxes, shape):\r\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\r\n boxes: [..., (y1, x1, y2, x2)] in pixel coordinates\r\n shape: [..., (height, width)] in pixels\r\n\r\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\r\n coordinates it's inside the box.\r\n\r\n Returns:\r\n [..., (y1, x1, y2, x2)] in normalized coordinates\r\n \"\"\"\r\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\r\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\r\n shift = tf.constant([0., 0., 1., 1.])\r\n return tf.divide(boxes - shift, scale)\r\n\r\n\r\ndef denorm_boxes_graph(boxes, shape):\r\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\r\n boxes: [..., (y1, x1, y2, x2)] in normalized coordinates\r\n shape: [..., (height, width)] in pixels\r\n\r\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\r\n coordinates it's inside the box.\r\n\r\n Returns:\r\n [..., (y1, x1, y2, x2)] in pixel coordinates\r\n \"\"\"\r\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\r\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\r\n shift = tf.constant([0., 0., 1., 1.])\r\n return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)\r\n"
] |
[
[
"tensorflow.exp",
"numpy.random.choice",
"tensorflow.image.non_max_suppression",
"numpy.copy",
"tensorflow.unique",
"tensorflow.reshape",
"numpy.where",
"tensorflow.sqrt",
"numpy.sort",
"tensorflow.stack",
"tensorflow.control_dependencies",
"numpy.broadcast_to",
"tensorflow.divide",
"tensorflow.cast",
"tensorflow.identity",
"numpy.concatenate",
"tensorflow.random.shuffle",
"numpy.divide",
"tensorflow.shape",
"numpy.empty",
"tensorflow.concat",
"numpy.log",
"numpy.max",
"tensorflow.argmax",
"tensorflow.image.crop_and_resize",
"tensorflow.Variable",
"tensorflow.math.log",
"tensorflow.transpose",
"tensorflow.add_n",
"tensorflow.constant",
"tensorflow.squeeze",
"numpy.argmax",
"numpy.random.randint",
"numpy.arange",
"tensorflow.split",
"tensorflow.pad",
"numpy.expand_dims",
"tensorflow.abs",
"numpy.array",
"tensorflow.range",
"tensorflow.minimum",
"numpy.reshape",
"numpy.zeros",
"tensorflow.where",
"tensorflow.gather_nd",
"tensorflow.round",
"tensorflow.expand_dims",
"numpy.delete",
"numpy.random.shuffle",
"tensorflow.map_fn",
"numpy.stack",
"numpy.amax",
"tensorflow.reduce_sum",
"tensorflow.nn.top_k",
"numpy.hstack",
"tensorflow.boolean_mask",
"tensorflow.logical_and",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"numpy.fliplr",
"tensorflow.size",
"tensorflow.multiply",
"numpy.sum",
"numpy.ones",
"tensorflow.equal",
"tensorflow.sparse.to_dense",
"numpy.any",
"tensorflow.reduce_max",
"numpy.abs",
"tensorflow.gather",
"tensorflow.maximum",
"tensorflow.reduce_mean",
"tensorflow.stop_gradient"
]
] |
jiachens/auto_LiRPA
|
[
"cc1ff18e8fbc938953b20ae6a030a25761cb0b78"
] |
[
"tests/test_rectangle_patches.py"
] |
[
"import torch\nimport random\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nfrom auto_LiRPA import BoundedModule, BoundedTensor\nfrom auto_LiRPA.perturbations import *\nimport sys\nsys.path.append('../examples/vision')\nimport models\nfrom testcase import TestCase\n\nclass cnn_4layer_resnet(nn.Module):\n def __init__(self):\n super(cnn_4layer_resnet, self).__init__()\n self.conv1 = nn.Conv2d(3, 3, 4, stride=2, padding=1)\n self.bn = nn.BatchNorm2d(3)\n self.shortcut = nn.Conv2d(3, 3, 4, stride=2, padding=1)\n self.conv2 = nn.Conv2d(3, 3, 4, stride=2, padding=1)\n self.fc1 = nn.Linear(168, 10)\n\n def forward(self, x):\n x_ = x\n x = F.relu(self.conv1(self.bn(x)))\n x += self.shortcut(x_)\n x = F.relu(self.conv2(x))\n x = x.view(x.size(0), -1)\n print(x.size())\n x = self.fc1(x)\n\n return x\n\nclass TestResnetPatches(TestCase): \n def __init__(self, methodName='runTest', generate=False):\n super().__init__(methodName, \n seed=1234, ref_path='data/rectangle_patches_test_data',\n generate=generate)\n\n def test(self):\n model_oris = [\n cnn_4layer_resnet(),\n ]\n self.result = []\n if not self.generate:\n self.reference = torch.load(self.ref_path)\n\n for model_ori in model_oris:\n conv_mode = 'patches' # conv_mode can be set as 'matrix' or 'patches' \n \n normalize = torchvision.transforms.Normalize(mean = [0.4914, 0.4822, 0.4465], std = [0.2023, 0.1994, 0.2010])\n test_data = torchvision.datasets.CIFAR10(\"./data\", train=False, download=True, \n transform=torchvision.transforms.Compose([torchvision.transforms.ToTensor(), normalize]))\n N = 1\n n_classes = 10\n\n image = torch.Tensor(test_data.data[:N]).reshape(N,3,32,32)\n image = image[:, :, :28, :]\n image = image.to(torch.float32) / 255.0\n\n model = BoundedModule(model_ori, image, bound_opts={\"conv_mode\": conv_mode})\n\n ptb = PerturbationLpNorm(norm = np.inf, eps = 0.03)\n image = BoundedTensor(image, ptb)\n pred = model(image)\n lb, ub = model.compute_bounds(IBP=False, C=None, method='backward')\n self.result += [lb, ub]\n\n self.check()\n\nif __name__ == '__main__':\n # Change to generate=True when genearting reference results\n testcase = TestResnetPatches(generate=False)\n testcase.test()"
] |
[
[
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.Conv2d",
"torch.load",
"torch.Tensor"
]
] |
Ensteinjun/mediapipe
|
[
"38be2ec58f2a1687f4ffca287094c7bbd7791f58"
] |
[
"mediapipe/util/sequence/media_sequence.py"
] |
[
"\"\"\"Copyright 2019 The MediaPipe Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n\nThis script defines a large number of getters and setters for storing\nmultimedia, such as video or audio, and related machine learning data in\ntf.train.SequenceExamples. These getters and setters simplify sharing\ndata by enforcing common patterns for storing data in SequenceExample\nkey-value pairs.\n\nThe constants, macros, and functions are organized into 6 groups: clip\nmetadata, clip label related, segment related, bounding-box related, image\nrelated, feature list related, and keyframe related. The following examples\nwill walk through common task structures, but the relevant data to store can\nvary by task.\n\nThe clip metadata group is generally data about the media and stored in the\nSequenceExample.context. Specifying the metadata enables media pipelines,\nsuch as MediaPipe, to retrieve that data. Typically, set_clip_data_path,\nset_clip_start_timestamp, and set_clip_end_timestamp define which data to use\nwithout storing the data itself. Example:\n tensorflow.train.SequenceExample sequence\n set_clip_data_path(\"/relative/path/to/data.mp4\", sequence)\n set_clip_start_timestamp(0, sequence)\n set_clip_end_timestamp(10000000, sequence) # 10 seconds in microseconds.\n\nThe clip label group adds labels that apply to the entire media clip. To\nannotate that a video clip has a particular label, set the clip metadata\nabove and also set the set_clip_label_index and set_clip_label_string. Most\ntraining pipelines will only use the label index or string, but we recommend\nstoring both to improve readability while maintaining ease of use.\nExample:\n set_clip_label_string((\"run\", \"jump\"), sequence)\n set_Clip_label_index((35, 47), sequence)\n\nThe segment group is generally data about time spans within the media clip\nand stored in the SequenceExample.context. In this code, continuous lengths\nof media are called clips, and each clip may have subregions of interest that\nare called segments. To annotate that a video clip has time spans with labels\nset the clip metadata above and use the functions set_segment_start_timestamp,\nset_segment_end_timestamp, set_segment_label_index, and\nset_segment_label_string. Most training pipelines will only use the label index\nor string, but we recommend storing both to improve readability while\nmaintaining ease of use. By listing segments as times, the frame rate or other\nproperties can change without affecting the labels.\nExample:\n set_segment_start_timestamp((500000, 1000000), sequence) # in microseconds\n set_segment_end_timestamp((2000000, 6000000), sequence)\n set_segment_label_index((35, 47), sequence)\n set_segment_label_string((\"run\", \"jump\"), sequence)\n\nThe bounding box group is useful for identifying spatio-temporal annotations\nfor detection, tracking, or action recognition. The exact keys that are\nneeded can vary by task, but to annotate a video clip for detection set the\nclip metadata above and use repeatedly call add_bbox, add_bbox_timestamp,\nadd_bbox_label_index, and add_bbox_label_string. Most training pipelines will\nonly use the label index or string, but we recommend storing both to improve\nreadability while maintaining ease of use. Because bounding boxes are\nassigned to timepoints in a video, changing the image frame rate can can\nchange the alignment. The media_sequence.h's ReconcileMetadata function can\nalign bounding boxes to the nearest image.\n\nThe image group is useful for storing data as sequential 2D arrays, typically\nencoded as bytes. Images can be RGB images stored as JPEG, discrete masks\nstored as PNG, or some other format. Parameters that are static over time are\nset in the context using set_image_width, set_image_height, set_image_format,\netc. The series of frames and timestamps are then added with add_image_encoded\nand\nadd_image_timestamp. For discrete masks, the class or instance indices can be\nmapped to labels or classes using\nset_class_segmentation_class_label_{index,string} and\nset_instance_segmentation_object_class_index.\n\nThe feature list group is useful for storing audio and extracted features,\nsuch as per-frame embeddings. SequenceExamples only store lists of floats per\ntimestep, so the dimensions are stored in the context to enable reshaping.\nFor example, set_feature_dimensions and repeatedly calling add_feature_floats\nand add_feature_timestamp adds per-frame embeddings. The feature methods also\nsupport audio features.\n\nMacros for common patterns are created in media_sequence_util.py and are used\nhere extensively. Because these macros are formulaic, I will only include a\nusage example here in the code rather than repeating documentation for every\ninstance. This header defines additional functions to simplify working with\nMediaPipe types.\n\nEach msu.create_{TYPE}_context_feature takes a NAME and a KEY. It provides\nsetters and getters for SequenceExamples and stores a single value under KEY\nin the context field. The provided functions are has_${NAME}, get_${NAME},\nset_${Name}, and clear_${NAME}.\nEg.\n tf.train.SequenceExample example\n set_data_path(\"data_path\", example)\n if has_data_path(example):\n data_path = get_data_path(example)\n clear_data_path(example)\n\nEach msu.create_{TYPE}_list_context_feature takes a NAME and a KEY. It provides\nsetters and getters for SequenceExamples and stores a sequence of values\nunder KEY in the context field. The provided functions are has_${NAME},\nget_${NAME}, set_${Name}, clear_${NAME}, get_${NAME}_at, and add_${NAME}.\nEg.\n tf.train.SequenceExample example\n set_clip_label_string((\"run\", \"jump\"), example)\n if has_clip_label_string(example):\n values = get_clip_label_string(example)\n clear_clip_label_string(example)\n\nEach msu.create_{TYPE}_feature_list takes a NAME and a KEY. It provides setters\nand getters for SequenceExamples and stores a single value in each feature field\nunder KEY of the feature_lists field. The provided functions are has_${NAME},\nget_${NAME}, clear_${NAME}, get_${NAME}_size, get_${NAME}_at, and add_${NAME}.\n tf.train.SequenceExample example\n add_image_timestamp(1000000, example)\n add_image_timestamp(2000000, example)\n if has_image_timestamp(example):\n for i in range(get_image_timestamp_size()):\n timestamp = get_image_timestamp_at(example, i)\n clear_image_timestamp(example)\n\nEach VECTOR_{TYPE}_FEATURE_LIST takes a NAME and a KEY. It provides setters\nand getters for SequenceExamples and stores a sequence of values in each\nfeature field under KEY of the feature_lists field. The provided functions\nare Has${NAME}, Get${NAME}, Clear${NAME}, Get${NAME}Size, Get${NAME}At, and\nAdd${NAME}.\n tf.train.SequenceExample example\n add_bbox_label_string((\"run\", \"jump\"), example)\n add_bbox_label_string((\"run\", \"fall\"), example)\n if has_bbox_label_string(example):\n for i in range(get_bbox_label_string_size(example)):\n labels = get_bbox_label_string_at(example, i)\n clear_bbox_label_string(example)\n\nAs described in media_sequence_util.h, each of these functions can take an\nadditional string prefix argument as their first argument. The prefix can\nbe fixed with a new NAME by using functools.partial. Prefixes are used to\nidentify common storage patterns (e.g. storing an image along with the height\nand width) under different names (e.g. storing a left and right image in a\nstereo pair.) An example creating functions such as\nadd_left_image_encoded that adds a string under the key \"LEFT/image/encoded\"\n add_left_image_encoded = functools.partial(add_image_encoded, prefix=\"LEFT\")\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport functools\nimport numpy as np\nfrom mediapipe.util.sequence import media_sequence_util\nmsu = media_sequence_util\n\n_HAS_DYNAMIC_ATTRIBUTES = True\n\n################################## METADATA #################################\n# A unique identifier for each example.\nEXAMPLE_ID_KEY = \"example/id\"\n# The name o fthe data set, including the version.\nEXAMPLE_DATASET_NAME_KEY = \"example/dataset_name\"\n# The relative path to the data on disk from some root directory.\nCLIP_DATA_PATH_KEY = \"clip/data_path\"\n# Any identifier for the media beyond the data path.\nCLIP_MEDIA_ID_KEY = \"clip/media_id\"\n# Yet another alternative identifier.\nALTERNATIVE_CLIP_MEDIA_ID_KEY = \"clip/alternative_media_id\"\n# The encoded bytes for storing media directly in the SequenceExample.\nCLIP_ENCODED_MEDIA_BYTES_KEY = \"clip/encoded_media_bytes\"\n# The start time for the encoded media if not preserved during encoding.\nCLIP_ENCODED_MEDIA_START_TIMESTAMP_KEY = \"clip/encoded_media_start_timestamp\"\n# The start time, in microseconds, for the start of the clip in the media.\nCLIP_START_TIMESTAMP_KEY = \"clip/start/timestamp\"\n# The end time, in microseconds, for the end of the clip in the media.\nCLIP_END_TIMESTAMP_KEY = \"clip/end/timestamp\"\n# A list of label indices for this clip.\nCLIP_LABEL_INDEX_KEY = \"clip/label/index\"\n# A list of label strings for this clip.\nCLIP_LABEL_STRING_KEY = \"clip/label/string\"\n# A list of label confidences for this clip.\nCLIP_LABEL_CONFIDENCE_KEY = \"clip/label/confidence\"\nmsu.create_bytes_context_feature(\n \"example_id\", EXAMPLE_ID_KEY, module_dict=globals())\nmsu.create_bytes_context_feature(\n \"example_dataset_name\", EXAMPLE_DATASET_NAME_KEY, module_dict=globals())\nmsu.create_bytes_context_feature(\n \"clip_media_id\", CLIP_MEDIA_ID_KEY, module_dict=globals())\nmsu.create_bytes_context_feature(\n \"clip_alternative_media_id\", ALTERNATIVE_CLIP_MEDIA_ID_KEY,\n module_dict=globals())\nmsu.create_bytes_context_feature(\n \"clip_encoded_media_bytes\", CLIP_ENCODED_MEDIA_BYTES_KEY,\n module_dict=globals())\nmsu.create_bytes_context_feature(\n \"clip_data_path\", CLIP_DATA_PATH_KEY, module_dict=globals())\nmsu.create_int_context_feature(\n \"clip_encoded_media_start_timestamp\",\n CLIP_ENCODED_MEDIA_START_TIMESTAMP_KEY, module_dict=globals())\nmsu.create_int_context_feature(\n \"clip_start_timestamp\", CLIP_START_TIMESTAMP_KEY, module_dict=globals())\nmsu.create_int_context_feature(\n \"clip_end_timestamp\", CLIP_END_TIMESTAMP_KEY, module_dict=globals())\nmsu.create_bytes_list_context_feature(\n \"clip_label_string\", CLIP_LABEL_STRING_KEY, module_dict=globals())\nmsu.create_int_list_context_feature(\n \"clip_label_index\", CLIP_LABEL_INDEX_KEY, module_dict=globals())\nmsu.create_float_list_context_feature(\n \"clip_label_confidence\", CLIP_LABEL_CONFIDENCE_KEY, module_dict=globals())\n\n################################## SEGMENTS #################################\n# A list of segment start times in microseconds.\nSEGMENT_START_TIMESTAMP_KEY = \"segment/start/timestamp\"\n# A list of indices marking the first frame index >= the start timestamp.\nSEGMENT_START_INDEX_KEY = \"segment/start/index\"\n# A list of segment end times in microseconds.\nSEGMENT_END_TIMESTAMP_KEY = \"segment/end/timestamp\"\n# A list of indices marking the last frame index <= the end timestamp.\nSEGMENT_END_INDEX_KEY = \"segment/end/index\"\n# A list with the label index for each segment.\n# Multiple labels for the same segment are encoded as repeated segments.\nSEGMENT_LABEL_INDEX_KEY = \"segment/label/index\"\n# A list with the label string for each segment.\n# Multiple labels for the same segment are encoded as repeated segments.\nSEGMENT_LABEL_STRING_KEY = \"segment/label/string\"\n# A list with the label confidence for each segment.\n# Multiple labels for the same segment are encoded as repeated segments.\nSEGMENT_LABEL_CONFIDENCE_KEY = \"segment/label/confidence\"\nmsu.create_bytes_list_context_feature(\n \"segment_label_string\", SEGMENT_LABEL_STRING_KEY, module_dict=globals())\nmsu.create_int_list_context_feature(\n \"segment_start_timestamp\",\n SEGMENT_START_TIMESTAMP_KEY, module_dict=globals())\nmsu.create_int_list_context_feature(\n \"segment_start_index\", SEGMENT_START_INDEX_KEY, module_dict=globals())\nmsu.create_int_list_context_feature(\n \"segment_end_timestamp\", SEGMENT_END_TIMESTAMP_KEY, module_dict=globals())\nmsu.create_int_list_context_feature(\n \"segment_end_index\", SEGMENT_END_INDEX_KEY, module_dict=globals())\nmsu.create_int_list_context_feature(\n \"segment_label_index\", SEGMENT_LABEL_INDEX_KEY, module_dict=globals())\nmsu.create_float_list_context_feature(\n \"segment_label_confidence\",\n SEGMENT_LABEL_CONFIDENCE_KEY, module_dict=globals())\n\n########################## REGIONS / BOUNDING BOXES #########################\n\n# Normalized coordinates of bounding boxes are provided in four lists to avoid\n# order ambiguity. We provide additional accessors for complete bounding boxes\n# below.\nREGION_BBOX_YMIN_KEY = \"region/bbox/ymin\"\nREGION_BBOX_XMIN_KEY = \"region/bbox/xmin\"\nREGION_BBOX_YMAX_KEY = \"region/bbox/ymax\"\nREGION_BBOX_XMAX_KEY = \"region/bbox/xmax\"\n# The point and radius can denote keypoints.\nREGION_POINT_X_KEY = \"region/point/x\"\nREGION_POINT_Y_KEY = \"region/point/y\"\nREGION_RADIUS_KEY = \"region/radius\"\n# The 3D point can denote keypoints.\nREGION_3D_POINT_X_KEY = \"region/3d_point/x\"\nREGION_3D_POINT_Y_KEY = \"region/3d_point/y\"\nREGION_3D_POINT_Z_KEY = \"region/3d_point/z\"\n# The number of regions at that timestep.\nREGION_NUM_REGIONS_KEY = \"region/num_regions\"\n# Whether that timestep is annotated for regions.\n# (Disambiguates between multiple meanings of num_regions = 0.)\nREGION_IS_ANNOTATED_KEY = \"region/is_annotated\"\n# A list indicating if each region is generated (1) or manually annotated (0)\nREGION_IS_GENERATED_KEY = \"region/is_generated\"\n# A list indicating if each region is occluded (1) or visible (0)\nREGION_IS_OCCLUDED_KEY = \"region/is_occluded\"\n# Lists with a label for each region.\n# Multiple labels for the same region require duplicating the region.\nREGION_LABEL_INDEX_KEY = \"region/label/index\"\nREGION_LABEL_STRING_KEY = \"region/label/string\"\nREGION_LABEL_CONFIDENCE_KEY = \"region/label/confidence\"\n# Lists with a track identifier for each region.\n# Multiple track identifier for the same region require duplicating the region.\nREGION_TRACK_INDEX_KEY = \"region/track/index\"\nREGION_TRACK_STRING_KEY = \"region/track/string\"\nREGION_TRACK_CONFIDENCE_KEY = \"region/track/confidence\"\n# Lists with a class for each region. In general, prefer to use the label\n# fields. These class fields exist to distinguish tracks when different classes\n# have overlapping track ids.\nREGION_CLASS_INDEX_KEY = \"region/class/index\"\nREGION_CLASS_STRING_KEY = \"region/class/string\"\nREGION_CLASS_CONFIDENCE_KEY = \"region/class/confidence\"\n# The timestamp of the region annotation in microseconds.\nREGION_TIMESTAMP_KEY = \"region/timestamp\"\n# The original timestamp in microseconds for region annotations.\n# If regions are aligned to image frames, this field preserves the original\n# timestamps.\nREGION_UNMODIFIED_TIMESTAMP_KEY = \"region/unmodified_timestamp\"\n# The list of region parts expected in this example.\nREGION_PARTS_KEY = \"region/parts\"\n# The dimensions of each embedding per region / bounding box.\nREGION_EMBEDDING_DIMENSIONS_PER_REGION_KEY = (\n \"region/embedding/dimensions_per_region\")\n# The format encoding embeddings as strings.\nREGION_EMBEDDING_FORMAT_KEY = \"region/embedding/format\"\n# An embedding for each region. The length of each list must be the product of\n# the number of regions and the product of the embedding dimensions.\nREGION_EMBEDDING_FLOAT_KEY = \"region/embedding/float\"\n# A string encoded embedding for each regions.\nREGION_EMBEDDING_ENCODED_KEY = \"region/embedding/encoded\"\n# The confidence of the embedding.\nREGION_EMBEDDING_CONFIDENCE_KEY = \"region/embedding/confidence\"\n\n\ndef _create_region_with_prefix(name, prefix):\n \"\"\"Create multiple accessors for region based data.\"\"\"\n msu.create_int_feature_list(name + \"_num_regions\", REGION_NUM_REGIONS_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_int_feature_list(name + \"_is_annotated\", REGION_IS_ANNOTATED_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_int_list_feature_list(\n name + \"_is_occluded\", REGION_IS_OCCLUDED_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_int_list_feature_list(\n name + \"_is_generated\", REGION_IS_GENERATED_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_int_feature_list(name + \"_timestamp\", REGION_TIMESTAMP_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_int_feature_list(\n name + \"_unmodified_timestamp\", REGION_UNMODIFIED_TIMESTAMP_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_bytes_list_feature_list(\n name + \"_label_string\", REGION_LABEL_STRING_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_int_list_feature_list(\n name + \"_label_index\", REGION_LABEL_INDEX_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_float_list_feature_list(\n name + \"_label_confidence\", REGION_LABEL_CONFIDENCE_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_bytes_list_feature_list(\n name + \"_class_string\", REGION_CLASS_STRING_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_int_list_feature_list(\n name + \"_class_index\", REGION_CLASS_INDEX_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_float_list_feature_list(\n name + \"_class_confidence\", REGION_CLASS_CONFIDENCE_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_bytes_list_feature_list(\n name + \"_track_string\", REGION_TRACK_STRING_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_int_list_feature_list(\n name + \"_track_index\", REGION_TRACK_INDEX_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_float_list_feature_list(\n name + \"_track_confidence\", REGION_TRACK_CONFIDENCE_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_float_list_feature_list(name + \"_ymin\", REGION_BBOX_YMIN_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_float_list_feature_list(name + \"_xmin\", REGION_BBOX_XMIN_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_float_list_feature_list(name + \"_ymax\", REGION_BBOX_YMAX_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_float_list_feature_list(name + \"_xmax\", REGION_BBOX_XMAX_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_float_list_feature_list(name + \"_point_x\", REGION_POINT_X_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_float_list_feature_list(name + \"_point_y\", REGION_POINT_Y_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_float_list_feature_list(\n name + \"_3d_point_x\", REGION_3D_POINT_X_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_float_list_feature_list(\n name + \"_3d_point_y\", REGION_3D_POINT_Y_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_float_list_feature_list(\n name + \"_3d_point_z\", REGION_3D_POINT_Z_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_bytes_list_context_feature(name + \"_parts\",\n REGION_PARTS_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_float_list_context_feature(\n name + \"_embedding_dimensions_per_region\",\n REGION_EMBEDDING_DIMENSIONS_PER_REGION_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_bytes_context_feature(name + \"_embedding_format\",\n REGION_EMBEDDING_FORMAT_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_float_list_feature_list(name + \"_embedding_floats\",\n REGION_EMBEDDING_FLOAT_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_bytes_list_feature_list(name + \"_embedding_encoded\",\n REGION_EMBEDDING_ENCODED_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_float_list_feature_list(name + \"_embedding_confidence\",\n REGION_EMBEDDING_CONFIDENCE_KEY,\n prefix=prefix, module_dict=globals())\n # pylint: disable=undefined-variable\n def get_prefixed_bbox_at(index, sequence_example, prefix):\n return np.stack((\n get_bbox_ymin_at(index, sequence_example, prefix=prefix),\n get_bbox_xmin_at(index, sequence_example, prefix=prefix),\n get_bbox_ymax_at(index, sequence_example, prefix=prefix),\n get_bbox_xmax_at(index, sequence_example, prefix=prefix)),\n 1)\n def add_prefixed_bbox(values, sequence_example, prefix):\n values = np.array(values)\n if values.size == 0:\n add_bbox_ymin([], sequence_example, prefix=prefix)\n add_bbox_xmin([], sequence_example, prefix=prefix)\n add_bbox_ymax([], sequence_example, prefix=prefix)\n add_bbox_xmax([], sequence_example, prefix=prefix)\n else:\n add_bbox_ymin(values[:, 0], sequence_example, prefix=prefix)\n add_bbox_xmin(values[:, 1], sequence_example, prefix=prefix)\n add_bbox_ymax(values[:, 2], sequence_example, prefix=prefix)\n add_bbox_xmax(values[:, 3], sequence_example, prefix=prefix)\n def get_prefixed_bbox_size(sequence_example, prefix):\n return get_bbox_ymin_size(sequence_example, prefix=prefix)\n def has_prefixed_bbox(sequence_example, prefix):\n return has_bbox_ymin(sequence_example, prefix=prefix)\n def clear_prefixed_bbox(sequence_example, prefix):\n clear_bbox_ymin(sequence_example, prefix=prefix)\n clear_bbox_xmin(sequence_example, prefix=prefix)\n clear_bbox_ymax(sequence_example, prefix=prefix)\n clear_bbox_xmax(sequence_example, prefix=prefix)\n def get_prefixed_point_at(index, sequence_example, prefix):\n return np.stack((\n get_bbox_point_y_at(index, sequence_example, prefix=prefix),\n get_bbox_point_x_at(index, sequence_example, prefix=prefix)),\n 1)\n def add_prefixed_point(values, sequence_example, prefix):\n add_bbox_point_y(values[:, 0], sequence_example, prefix=prefix)\n add_bbox_point_x(values[:, 1], sequence_example, prefix=prefix)\n def get_prefixed_point_size(sequence_example, prefix):\n return get_bbox_point_y_size(sequence_example, prefix=prefix)\n def has_prefixed_point(sequence_example, prefix):\n return has_bbox_point_y(sequence_example, prefix=prefix)\n def clear_prefixed_point(sequence_example, prefix):\n clear_bbox_point_y(sequence_example, prefix=prefix)\n clear_bbox_point_x(sequence_example, prefix=prefix)\n def get_prefixed_3d_point_at(index, sequence_example, prefix):\n return np.stack((\n get_bbox_3d_point_x_at(index, sequence_example, prefix=prefix),\n get_bbox_3d_point_y_at(index, sequence_example, prefix=prefix),\n get_bbox_3d_point_z_at(index, sequence_example, prefix=prefix)),\n 1)\n def add_prefixed_3d_point(values, sequence_example, prefix):\n add_bbox_3d_point_x(values[:, 0], sequence_example, prefix=prefix)\n add_bbox_3d_point_y(values[:, 1], sequence_example, prefix=prefix)\n add_bbox_3d_point_z(values[:, 2], sequence_example, prefix=prefix)\n def get_prefixed_3d_point_size(sequence_example, prefix):\n return get_bbox_3d_point_x_size(sequence_example, prefix=prefix)\n def has_prefixed_3d_point(sequence_example, prefix):\n return has_bbox_3d_point_x(sequence_example, prefix=prefix)\n def clear_prefixed_3d_point(sequence_example, prefix):\n clear_bbox_3d_point_x(sequence_example, prefix=prefix)\n clear_bbox_3d_point_y(sequence_example, prefix=prefix)\n clear_bbox_3d_point_z(sequence_example, prefix=prefix)\n # pylint: enable=undefined-variable\n msu.add_functions_to_module({\n \"get_\" + name + \"_at\":\n functools.partial(get_prefixed_bbox_at, prefix=prefix),\n \"add_\" + name:\n functools.partial(add_prefixed_bbox, prefix=prefix),\n \"get_\" + name + \"_size\":\n functools.partial(get_prefixed_bbox_size, prefix=prefix),\n \"has_\" + name:\n functools.partial(has_prefixed_bbox, prefix=prefix),\n \"clear_\" + name:\n functools.partial(clear_prefixed_bbox, prefix=prefix),\n }, module_dict=globals())\n msu.add_functions_to_module({\n \"get_\" + name + \"_point_at\":\n functools.partial(get_prefixed_point_at, prefix=prefix),\n \"add_\" + name + \"_point\":\n functools.partial(add_prefixed_point, prefix=prefix),\n \"get_\" + name + \"_point_size\":\n functools.partial(get_prefixed_point_size, prefix=prefix),\n \"has_\" + name + \"_point\":\n functools.partial(has_prefixed_point, prefix=prefix),\n \"clear_\" + name + \"_point\":\n functools.partial(clear_prefixed_point, prefix=prefix),\n }, module_dict=globals())\n msu.add_functions_to_module({\n \"get_\" + name + \"_3d_point_at\":\n functools.partial(get_prefixed_3d_point_at, prefix=prefix),\n \"add_\" + name + \"_3d_point\":\n functools.partial(add_prefixed_3d_point, prefix=prefix),\n \"get_\" + name + \"_3d_point_size\":\n functools.partial(get_prefixed_3d_point_size, prefix=prefix),\n \"has_\" + name + \"_3d_point\":\n functools.partial(has_prefixed_3d_point, prefix=prefix),\n \"clear_\" + name + \"_3d_point\":\n functools.partial(clear_prefixed_3d_point, prefix=prefix),\n }, module_dict=globals())\n\n\nPREDICTED_PREFIX = \"PREDICTED\"\n_create_region_with_prefix(\"bbox\", \"\")\n_create_region_with_prefix(\"predicted_bbox\", PREDICTED_PREFIX)\n\n\n################################### IMAGES #################################\n# The format the images are encoded as (e.g. \"JPEG\", \"PNG\")\nIMAGE_FORMAT_KEY = \"image/format\"\n# The number of channels in the image.\nIMAGE_CHANNELS_KEY = \"image/channels\"\n# The colorspace of the iamge.\nIMAGE_COLORSPACE_KEY = \"image/colorspace\"\n# The height of the image in pixels.\nIMAGE_HEIGHT_KEY = \"image/height\"\n# The width of the image in pixels.\nIMAGE_WIDTH_KEY = \"image/width\"\n# frame rate in images/second of media.\nIMAGE_FRAME_RATE_KEY = \"image/frame_rate\"\n# The maximum values if the images were saturated and normalized for encoding.\nIMAGE_SATURATION_KEY = \"image/saturation\"\n# The listing from discrete image values (as indices) to class indices.\nIMAGE_CLASS_LABEL_INDEX_KEY = \"image/class/label/index\"\n# The listing from discrete image values (as indices) to class strings.\nIMAGE_CLASS_LABEL_STRING_KEY = \"image/class/label/string\"\n# The listing from discrete instance indices to class indices they embody.\nIMAGE_OBJECT_CLASS_INDEX_KEY = \"image/object/class/index\"\n# The encoded image frame.\nIMAGE_ENCODED_KEY = \"image/encoded\"\n# Multiple images from the same timestep (e.g. multiview video).\nIMAGE_MULTI_ENCODED_KEY = \"image/multi_encoded\"\n# The timestamp of the frame in microseconds.\nIMAGE_TIMESTAMP_KEY = \"image/timestamp\"\n# A per image label if specific frames have labels.\n# If time spans have labels, segments are preferred to allow changing rates.\nIMAGE_LABEL_INDEX_KEY = \"image/label/index\"\nIMAGE_LABEL_STRING_KEY = \"image/label/string\"\nIMAGE_LABEL_CONFIDENCE_KEY = \"image/label/confidence\"\n# The path of the image file if it did not come from a media clip.\nIMAGE_DATA_PATH_KEY = \"image/data_path\"\n\n\ndef _create_image_with_prefix(name, prefix):\n \"\"\"Create multiple accessors for image based data.\"\"\"\n msu.create_bytes_context_feature(name + \"_format\", IMAGE_FORMAT_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_bytes_context_feature(name + \"_colorspace\", IMAGE_COLORSPACE_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_int_context_feature(name + \"_channels\", IMAGE_CHANNELS_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_int_context_feature(name + \"_height\", IMAGE_HEIGHT_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_int_context_feature(name + \"_width\", IMAGE_WIDTH_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_bytes_feature_list(name + \"_encoded\", IMAGE_ENCODED_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_float_context_feature(name + \"_frame_rate\", IMAGE_FRAME_RATE_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_bytes_list_context_feature(\n name + \"_class_label_string\", IMAGE_CLASS_LABEL_STRING_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_int_list_context_feature(\n name + \"_class_label_index\", IMAGE_CLASS_LABEL_INDEX_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_int_list_context_feature(\n name + \"_object_class_index\", IMAGE_OBJECT_CLASS_INDEX_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_bytes_context_feature(name + \"_data_path\", IMAGE_DATA_PATH_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_int_feature_list(name + \"_timestamp\", IMAGE_TIMESTAMP_KEY,\n prefix=prefix, module_dict=globals())\n msu.create_bytes_list_feature_list(name + \"_multi_encoded\",\n IMAGE_MULTI_ENCODED_KEY, prefix=prefix,\n module_dict=globals())\nFORWARD_FLOW_PREFIX = \"FORWARD_FLOW\"\nCLASS_SEGMENTATION_PREFIX = \"CLASS_SEGMENTATION\"\nINSTANCE_SEGMENTATION_PREFIX = \"INSTANCE_SEGMENTATION\"\n_create_image_with_prefix(\"image\", \"\")\n_create_image_with_prefix(\"forward_flow\", FORWARD_FLOW_PREFIX)\n_create_image_with_prefix(\"class_segmentation\", CLASS_SEGMENTATION_PREFIX)\n_create_image_with_prefix(\"instance_segmentation\", INSTANCE_SEGMENTATION_PREFIX)\n\n################################## FEATURES #################################\n# The dimensions of the feature.\nFEATURE_DIMENSIONS_KEY = \"feature/dimensions\"\n# The rate the features are extracted per second of media.\nFEATURE_RATE_KEY = \"feature/rate\"\n# The encoding format if any for the feature.\nFEATURE_BYTES_FORMAT_KEY = \"feature/bytes/format\"\n# For audio, the rate the samples are extracted per second of media.\nFEATURE_SAMPLE_RATE_KEY = \"feature/sample_rate\"\n# For audio, the number of channels per extracted feature.\nFEATURE_NUM_CHANNELS_KEY = \"feature/num_channels\"\n# For audio, th enumber of samples per extracted feature.\nFEATURE_NUM_SAMPLES_KEY = \"feature/num_samples\"\n# For audio, the rate the features are extracted per second of media.\nFEATURE_PACKET_RATE_KEY = \"feature/packet_rate\"\n# For audio, the original audio sampling rate the feature is derived from.\nFEATURE_AUDIO_SAMPLE_RATE_KEY = \"feature/audio_sample_rate\"\n# The feature as a list of floats.\nFEATURE_FLOATS_KEY = \"feature/floats\"\n# The feature as a list of bytes. May be encoded.\nFEATURE_BYTES_KEY = \"feature/bytes\"\n# The feature as a list of ints.\nFEATURE_INTS_KEY = \"feature/ints\"\n# The timestamp, in microseconds, of the feature.\nFEATURE_TIMESTAMP_KEY = \"feature/timestamp\"\n# It is occasionally useful to indicate that a feature applies to a given range.\n# This should be used for features only and annotations should be provided as\n# segments.\nFEATURE_DURATION_KEY = \"feature/duration\"\n# Encodes an optional confidence score for the generated features.\nFEATURE_CONFIDENCE_KEY = \"feature/confidence\"\n\nmsu.create_int_list_context_feature(\n \"feature_dimensions\", FEATURE_DIMENSIONS_KEY, module_dict=globals())\nmsu.create_float_context_feature(\n \"feature_rate\", FEATURE_RATE_KEY, module_dict=globals())\nmsu.create_bytes_context_feature(\n \"feature_bytes_format\", FEATURE_BYTES_FORMAT_KEY, module_dict=globals())\nmsu.create_float_context_feature(\n \"feature_sample_rate\", FEATURE_SAMPLE_RATE_KEY, module_dict=globals())\nmsu.create_int_context_feature(\n \"feature_num_channels\", FEATURE_NUM_CHANNELS_KEY, module_dict=globals())\nmsu.create_int_context_feature(\n \"feature_num_samples\", FEATURE_NUM_SAMPLES_KEY, module_dict=globals())\nmsu.create_float_context_feature(\n \"feature_packet_rate\", FEATURE_PACKET_RATE_KEY, module_dict=globals())\nmsu.create_float_context_feature(\n \"feature_audio_sample_rate\", FEATURE_AUDIO_SAMPLE_RATE_KEY,\n module_dict=globals())\nmsu.create_float_list_feature_list(\n \"feature_floats\", FEATURE_FLOATS_KEY, module_dict=globals())\nmsu.create_bytes_list_feature_list(\n \"feature_bytes\", FEATURE_BYTES_KEY, module_dict=globals())\nmsu.create_int_list_feature_list(\n \"feature_ints\", FEATURE_INTS_KEY, module_dict=globals())\nmsu.create_int_feature_list(\n \"feature_timestamp\", FEATURE_TIMESTAMP_KEY, module_dict=globals())\nmsu.create_int_list_feature_list(\n \"feature_duration\", FEATURE_DURATION_KEY, module_dict=globals())\nmsu.create_float_list_feature_list(\n \"feature_confidence\", FEATURE_CONFIDENCE_KEY, module_dict=globals())\n\n"
] |
[
[
"numpy.array"
]
] |
eumiro/pandas
|
[
"781d9983ac54f9c6ee17accc0fd4ed55cdf71bbd"
] |
[
"pandas/core/dtypes/cast.py"
] |
[
"\"\"\"\nRoutines for casting.\n\"\"\"\n\nfrom contextlib import suppress\nfrom datetime import datetime, timedelta\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n List,\n Optional,\n Sequence,\n Set,\n Sized,\n Tuple,\n Type,\n Union,\n)\n\nimport numpy as np\n\nfrom pandas._libs import lib, missing as libmissing, tslib\nfrom pandas._libs.tslibs import (\n NaT,\n OutOfBoundsDatetime,\n Period,\n Timedelta,\n Timestamp,\n conversion,\n iNaT,\n ints_to_pydatetime,\n ints_to_pytimedelta,\n)\nfrom pandas._libs.tslibs.timezones import tz_compare\nfrom pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, Scalar\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom pandas.core.dtypes.common import (\n DT64NS_DTYPE,\n POSSIBLY_CAST_DTYPES,\n TD64NS_DTYPE,\n ensure_int8,\n ensure_int16,\n ensure_int32,\n ensure_int64,\n ensure_object,\n ensure_str,\n is_bool,\n is_bool_dtype,\n is_categorical_dtype,\n is_complex,\n is_complex_dtype,\n is_datetime64_dtype,\n is_datetime64_ns_dtype,\n is_datetime64tz_dtype,\n is_datetime_or_timedelta_dtype,\n is_dtype_equal,\n is_extension_array_dtype,\n is_float,\n is_float_dtype,\n is_integer,\n is_integer_dtype,\n is_numeric_dtype,\n is_object_dtype,\n is_scalar,\n is_sparse,\n is_string_dtype,\n is_timedelta64_dtype,\n is_timedelta64_ns_dtype,\n is_unsigned_integer_dtype,\n)\nfrom pandas.core.dtypes.dtypes import (\n DatetimeTZDtype,\n ExtensionDtype,\n IntervalDtype,\n PeriodDtype,\n)\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame,\n ABCExtensionArray,\n ABCIndex,\n ABCSeries,\n)\nfrom pandas.core.dtypes.inference import is_list_like\nfrom pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, notna\n\nif TYPE_CHECKING:\n from pandas import Series\n from pandas.core.arrays import ExtensionArray\n\n_int8_max = np.iinfo(np.int8).max\n_int16_max = np.iinfo(np.int16).max\n_int32_max = np.iinfo(np.int32).max\n_int64_max = np.iinfo(np.int64).max\n\n\ndef maybe_convert_platform(values):\n \"\"\" try to do platform conversion, allow ndarray or list here \"\"\"\n if isinstance(values, (list, tuple, range)):\n values = construct_1d_object_array_from_listlike(values)\n if getattr(values, \"dtype\", None) == np.object_:\n if hasattr(values, \"_values\"):\n values = values._values\n values = lib.maybe_convert_objects(values)\n\n return values\n\n\ndef is_nested_object(obj) -> bool:\n \"\"\"\n return a boolean if we have a nested object, e.g. a Series with 1 or\n more Series elements\n\n This may not be necessarily be performant.\n\n \"\"\"\n if isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype):\n\n if any(isinstance(v, ABCSeries) for v in obj._values):\n return True\n\n return False\n\n\ndef maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:\n \"\"\"\n Cast scalar to Timestamp or Timedelta if scalar is datetime-like\n and dtype is not object.\n\n Parameters\n ----------\n value : scalar\n dtype : Dtype, optional\n\n Returns\n -------\n scalar\n \"\"\"\n if dtype == object:\n pass\n elif isinstance(value, (np.datetime64, datetime)):\n value = Timestamp(value)\n elif isinstance(value, (np.timedelta64, timedelta)):\n value = Timedelta(value)\n\n return value\n\n\ndef maybe_unbox_datetimelike(value: Scalar, dtype: DtypeObj) -> Scalar:\n \"\"\"\n Convert a Timedelta or Timestamp to timedelta64 or datetime64 for setting\n into a numpy array. Failing to unbox would risk dropping nanoseconds.\n\n Notes\n -----\n Caller is responsible for checking dtype.kind in [\"m\", \"M\"]\n \"\"\"\n if is_valid_nat_for_dtype(value, dtype):\n # GH#36541: can't fill array directly with pd.NaT\n # > np.empty(10, dtype=\"datetime64[64]\").fill(pd.NaT)\n # ValueError: cannot convert float NaN to integer\n value = dtype.type(\"NaT\", \"ns\")\n elif isinstance(value, Timestamp):\n if value.tz is None:\n value = value.to_datetime64()\n elif isinstance(value, Timedelta):\n value = value.to_timedelta64()\n return value\n\n\ndef maybe_downcast_to_dtype(result, dtype: Union[str, np.dtype]):\n \"\"\"\n try to cast to the specified dtype (e.g. convert back to bool/int\n or could be an astype of float64->float32\n \"\"\"\n do_round = False\n\n if is_scalar(result):\n return result\n elif isinstance(result, ABCDataFrame):\n # occurs in pivot_table doctest\n return result\n\n if isinstance(dtype, str):\n if dtype == \"infer\":\n inferred_type = lib.infer_dtype(ensure_object(result), skipna=False)\n if inferred_type == \"boolean\":\n dtype = \"bool\"\n elif inferred_type == \"integer\":\n dtype = \"int64\"\n elif inferred_type == \"datetime64\":\n dtype = \"datetime64[ns]\"\n elif inferred_type == \"timedelta64\":\n dtype = \"timedelta64[ns]\"\n\n # try to upcast here\n elif inferred_type == \"floating\":\n dtype = \"int64\"\n if issubclass(result.dtype.type, np.number):\n do_round = True\n\n else:\n dtype = \"object\"\n\n dtype = np.dtype(dtype)\n\n elif dtype.type is Period:\n from pandas.core.arrays import PeriodArray\n\n with suppress(TypeError):\n # e.g. TypeError: int() argument must be a string, a\n # bytes-like object or a number, not 'Period\n return PeriodArray(result, freq=dtype.freq)\n\n converted = maybe_downcast_numeric(result, dtype, do_round)\n if converted is not result:\n return converted\n\n # a datetimelike\n # GH12821, iNaT is cast to float\n if dtype.kind in [\"M\", \"m\"] and result.dtype.kind in [\"i\", \"f\"]:\n if hasattr(dtype, \"tz\"):\n # not a numpy dtype\n if dtype.tz:\n # convert to datetime and change timezone\n from pandas import to_datetime\n\n result = to_datetime(result).tz_localize(\"utc\")\n result = result.tz_convert(dtype.tz)\n else:\n result = result.astype(dtype)\n\n return result\n\n\ndef maybe_downcast_numeric(result, dtype: DtypeObj, do_round: bool = False):\n \"\"\"\n Subset of maybe_downcast_to_dtype restricted to numeric dtypes.\n\n Parameters\n ----------\n result : ndarray or ExtensionArray\n dtype : np.dtype or ExtensionDtype\n do_round : bool\n\n Returns\n -------\n ndarray or ExtensionArray\n \"\"\"\n if not isinstance(dtype, np.dtype):\n # e.g. SparseDtype has no itemsize attr\n return result\n\n def trans(x):\n if do_round:\n return x.round()\n return x\n\n if dtype.kind == result.dtype.kind:\n # don't allow upcasts here (except if empty)\n if result.dtype.itemsize <= dtype.itemsize and result.size:\n return result\n\n if is_bool_dtype(dtype) or is_integer_dtype(dtype):\n\n if not result.size:\n # if we don't have any elements, just astype it\n return trans(result).astype(dtype)\n\n # do a test on the first element, if it fails then we are done\n r = result.ravel()\n arr = np.array([r[0]])\n\n if isna(arr).any():\n # if we have any nulls, then we are done\n return result\n\n elif not isinstance(r[0], (np.integer, np.floating, int, float, bool)):\n # a comparable, e.g. a Decimal may slip in here\n return result\n\n if (\n issubclass(result.dtype.type, (np.object_, np.number))\n and notna(result).all()\n ):\n new_result = trans(result).astype(dtype)\n if new_result.dtype.kind == \"O\" or result.dtype.kind == \"O\":\n # np.allclose may raise TypeError on object-dtype\n if (new_result == result).all():\n return new_result\n else:\n if np.allclose(new_result, result, rtol=0):\n return new_result\n\n elif (\n issubclass(dtype.type, np.floating)\n and not is_bool_dtype(result.dtype)\n and not is_string_dtype(result.dtype)\n ):\n return result.astype(dtype)\n\n return result\n\n\ndef maybe_cast_result(\n result: ArrayLike, obj: \"Series\", numeric_only: bool = False, how: str = \"\"\n) -> ArrayLike:\n \"\"\"\n Try casting result to a different type if appropriate\n\n Parameters\n ----------\n result : array-like\n Result to cast.\n obj : Series\n Input Series from which result was calculated.\n numeric_only : bool, default False\n Whether to cast only numerics or datetimes as well.\n how : str, default \"\"\n How the result was computed.\n\n Returns\n -------\n result : array-like\n result maybe casted to the dtype.\n \"\"\"\n dtype = obj.dtype\n dtype = maybe_cast_result_dtype(dtype, how)\n\n assert not is_scalar(result)\n\n if (\n is_extension_array_dtype(dtype)\n and not is_categorical_dtype(dtype)\n and dtype.kind != \"M\"\n ):\n # We have to special case categorical so as not to upcast\n # things like counts back to categorical\n cls = dtype.construct_array_type()\n result = maybe_cast_to_extension_array(cls, result, dtype=dtype)\n\n elif numeric_only and is_numeric_dtype(dtype) or not numeric_only:\n result = maybe_downcast_to_dtype(result, dtype)\n\n return result\n\n\ndef maybe_cast_result_dtype(dtype: DtypeObj, how: str) -> DtypeObj:\n \"\"\"\n Get the desired dtype of a result based on the\n input dtype and how it was computed.\n\n Parameters\n ----------\n dtype : DtypeObj\n Input dtype.\n how : str\n How the result was computed.\n\n Returns\n -------\n DtypeObj\n The desired dtype of the result.\n \"\"\"\n from pandas.core.arrays.boolean import BooleanDtype\n from pandas.core.arrays.floating import Float64Dtype\n from pandas.core.arrays.integer import Int64Dtype, _IntegerDtype\n\n if how in [\"add\", \"cumsum\", \"sum\", \"prod\"]:\n if dtype == np.dtype(bool):\n return np.dtype(np.int64)\n elif isinstance(dtype, (BooleanDtype, _IntegerDtype)):\n return Int64Dtype()\n elif how in [\"mean\", \"median\", \"var\"] and isinstance(\n dtype, (BooleanDtype, _IntegerDtype)\n ):\n return Float64Dtype()\n return dtype\n\n\ndef maybe_cast_to_extension_array(\n cls: Type[\"ExtensionArray\"], obj: ArrayLike, dtype: Optional[ExtensionDtype] = None\n) -> ArrayLike:\n \"\"\"\n Call to `_from_sequence` that returns the object unchanged on Exception.\n\n Parameters\n ----------\n cls : class, subclass of ExtensionArray\n obj : arraylike\n Values to pass to cls._from_sequence\n dtype : ExtensionDtype, optional\n\n Returns\n -------\n ExtensionArray or obj\n \"\"\"\n from pandas.core.arrays.string_ import StringArray\n from pandas.core.arrays.string_arrow import ArrowStringArray\n\n assert isinstance(cls, type), f\"must pass a type: {cls}\"\n assertion_msg = f\"must pass a subclass of ExtensionArray: {cls}\"\n assert issubclass(cls, ABCExtensionArray), assertion_msg\n\n # Everything can be converted to StringArrays, but we may not want to convert\n if (\n issubclass(cls, (StringArray, ArrowStringArray))\n and lib.infer_dtype(obj) != \"string\"\n ):\n return obj\n\n try:\n result = cls._from_sequence(obj, dtype=dtype)\n except Exception:\n # We can't predict what downstream EA constructors may raise\n result = obj\n return result\n\n\ndef maybe_upcast_putmask(result: np.ndarray, mask: np.ndarray) -> np.ndarray:\n \"\"\"\n A safe version of putmask that potentially upcasts the result.\n\n The result is replaced with the first N elements of other,\n where N is the number of True values in mask.\n If the length of other is shorter than N, other will be repeated.\n\n Parameters\n ----------\n result : ndarray\n The destination array. This will be mutated in-place if no upcasting is\n necessary.\n mask : boolean ndarray\n\n Returns\n -------\n result : ndarray\n\n Examples\n --------\n >>> arr = np.arange(1, 6)\n >>> mask = np.array([False, True, False, True, True])\n >>> result = maybe_upcast_putmask(arr, mask)\n >>> result\n array([ 1., nan, 3., nan, nan])\n \"\"\"\n if not isinstance(result, np.ndarray):\n raise ValueError(\"The result input must be a ndarray.\")\n\n # NB: we never get here with result.dtype.kind in [\"m\", \"M\"]\n\n if mask.any():\n\n # we want to decide whether place will work\n # if we have nans in the False portion of our mask then we need to\n # upcast (possibly), otherwise we DON't want to upcast (e.g. if we\n # have values, say integers, in the success portion then it's ok to not\n # upcast)\n new_dtype, _ = maybe_promote(result.dtype, np.nan)\n if new_dtype != result.dtype:\n result = result.astype(new_dtype, copy=True)\n\n np.place(result, mask, np.nan)\n\n return result\n\n\ndef maybe_promote(dtype, fill_value=np.nan):\n \"\"\"\n Find the minimal dtype that can hold both the given dtype and fill_value.\n\n Parameters\n ----------\n dtype : np.dtype or ExtensionDtype\n fill_value : scalar, default np.nan\n\n Returns\n -------\n dtype\n Upcasted from dtype argument if necessary.\n fill_value\n Upcasted from fill_value argument if necessary.\n\n Raises\n ------\n ValueError\n If fill_value is a non-scalar and dtype is not object.\n \"\"\"\n if not is_scalar(fill_value) and not is_object_dtype(dtype):\n # with object dtype there is nothing to promote, and the user can\n # pass pretty much any weird fill_value they like\n raise ValueError(\"fill_value must be a scalar\")\n\n # if we passed an array here, determine the fill value by dtype\n if isinstance(fill_value, np.ndarray):\n if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):\n fill_value = fill_value.dtype.type(\"NaT\", \"ns\")\n else:\n\n # we need to change to object type as our\n # fill_value is of object type\n if fill_value.dtype == np.object_:\n dtype = np.dtype(np.object_)\n fill_value = np.nan\n\n if dtype == np.object_ or dtype.kind in [\"U\", \"S\"]:\n # We treat string-like dtypes as object, and _always_ fill\n # with np.nan\n fill_value = np.nan\n dtype = np.dtype(np.object_)\n\n # returns tuple of (dtype, fill_value)\n if issubclass(dtype.type, np.datetime64):\n if isinstance(fill_value, datetime) and fill_value.tzinfo is not None:\n # Trying to insert tzaware into tznaive, have to cast to object\n dtype = np.dtype(np.object_)\n elif is_integer(fill_value) or (is_float(fill_value) and not isna(fill_value)):\n dtype = np.dtype(np.object_)\n elif is_valid_nat_for_dtype(fill_value, dtype):\n # e.g. pd.NA, which is not accepted by Timestamp constructor\n fill_value = np.datetime64(\"NaT\", \"ns\")\n else:\n try:\n fill_value = Timestamp(fill_value).to_datetime64()\n except (TypeError, ValueError):\n dtype = np.dtype(np.object_)\n elif issubclass(dtype.type, np.timedelta64):\n if (\n is_integer(fill_value)\n or (is_float(fill_value) and not np.isnan(fill_value))\n or isinstance(fill_value, str)\n ):\n # TODO: What about str that can be a timedelta?\n dtype = np.dtype(np.object_)\n elif is_valid_nat_for_dtype(fill_value, dtype):\n # e.g pd.NA, which is not accepted by the Timedelta constructor\n fill_value = np.timedelta64(\"NaT\", \"ns\")\n else:\n try:\n fv = Timedelta(fill_value)\n except ValueError:\n dtype = np.dtype(np.object_)\n else:\n if fv is NaT:\n # NaT has no `to_timedelta64` method\n fill_value = np.timedelta64(\"NaT\", \"ns\")\n else:\n fill_value = fv.to_timedelta64()\n elif is_datetime64tz_dtype(dtype):\n if isna(fill_value):\n fill_value = NaT\n elif not isinstance(fill_value, datetime):\n dtype = np.dtype(np.object_)\n elif fill_value.tzinfo is None:\n dtype = np.dtype(np.object_)\n elif not tz_compare(fill_value.tzinfo, dtype.tz):\n # TODO: sure we want to cast here?\n dtype = np.dtype(np.object_)\n\n elif is_extension_array_dtype(dtype) and isna(fill_value):\n fill_value = dtype.na_value\n\n elif is_float(fill_value):\n if issubclass(dtype.type, np.bool_):\n dtype = np.dtype(np.object_)\n\n elif issubclass(dtype.type, np.integer):\n dtype = np.dtype(np.float64)\n\n elif dtype.kind == \"f\":\n mst = np.min_scalar_type(fill_value)\n if mst > dtype:\n # e.g. mst is np.float64 and dtype is np.float32\n dtype = mst\n\n elif dtype.kind == \"c\":\n mst = np.min_scalar_type(fill_value)\n dtype = np.promote_types(dtype, mst)\n\n elif is_bool(fill_value):\n if not issubclass(dtype.type, np.bool_):\n dtype = np.dtype(np.object_)\n\n elif is_integer(fill_value):\n if issubclass(dtype.type, np.bool_):\n dtype = np.dtype(np.object_)\n\n elif issubclass(dtype.type, np.integer):\n if not np.can_cast(fill_value, dtype):\n # upcast to prevent overflow\n mst = np.min_scalar_type(fill_value)\n dtype = np.promote_types(dtype, mst)\n if dtype.kind == \"f\":\n # Case where we disagree with numpy\n dtype = np.dtype(np.object_)\n\n elif is_complex(fill_value):\n if issubclass(dtype.type, np.bool_):\n dtype = np.dtype(np.object_)\n\n elif issubclass(dtype.type, (np.integer, np.floating)):\n mst = np.min_scalar_type(fill_value)\n dtype = np.promote_types(dtype, mst)\n\n elif dtype.kind == \"c\":\n mst = np.min_scalar_type(fill_value)\n if mst > dtype:\n # e.g. mst is np.complex128 and dtype is np.complex64\n dtype = mst\n\n elif fill_value is None or fill_value is libmissing.NA:\n if is_float_dtype(dtype) or is_complex_dtype(dtype):\n fill_value = np.nan\n elif is_integer_dtype(dtype):\n dtype = np.float64\n fill_value = np.nan\n elif is_datetime_or_timedelta_dtype(dtype):\n fill_value = dtype.type(\"NaT\", \"ns\")\n else:\n dtype = np.dtype(np.object_)\n if fill_value is not libmissing.NA:\n fill_value = np.nan\n else:\n dtype = np.dtype(np.object_)\n\n # in case we have a string that looked like a number\n if is_extension_array_dtype(dtype):\n pass\n elif issubclass(np.dtype(dtype).type, (bytes, str)):\n dtype = np.dtype(np.object_)\n\n fill_value = _ensure_dtype_type(fill_value, dtype)\n return dtype, fill_value\n\n\ndef _ensure_dtype_type(value, dtype: DtypeObj):\n \"\"\"\n Ensure that the given value is an instance of the given dtype.\n\n e.g. if out dtype is np.complex64_, we should have an instance of that\n as opposed to a python complex object.\n\n Parameters\n ----------\n value : object\n dtype : np.dtype or ExtensionDtype\n\n Returns\n -------\n object\n \"\"\"\n # Start with exceptions in which we do _not_ cast to numpy types\n if is_extension_array_dtype(dtype):\n return value\n elif dtype == np.object_:\n return value\n elif isna(value):\n # e.g. keep np.nan rather than try to cast to np.float32(np.nan)\n return value\n\n return dtype.type(value)\n\n\ndef infer_dtype_from(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:\n \"\"\"\n Interpret the dtype from a scalar or array.\n\n Parameters\n ----------\n val : object\n pandas_dtype : bool, default False\n whether to infer dtype including pandas extension types.\n If False, scalar/array belongs to pandas extension types is inferred as\n object\n \"\"\"\n if is_scalar(val):\n return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)\n return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)\n\n\ndef infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, Any]:\n \"\"\"\n Interpret the dtype from a scalar.\n\n Parameters\n ----------\n pandas_dtype : bool, default False\n whether to infer dtype including pandas extension types.\n If False, scalar belongs to pandas extension types is inferred as\n object\n \"\"\"\n dtype: DtypeObj = np.dtype(object)\n\n # a 1-element ndarray\n if isinstance(val, np.ndarray):\n msg = \"invalid ndarray passed to infer_dtype_from_scalar\"\n if val.ndim != 0:\n raise ValueError(msg)\n\n dtype = val.dtype\n val = lib.item_from_zerodim(val)\n\n elif isinstance(val, str):\n\n # If we create an empty array using a string to infer\n # the dtype, NumPy will only allocate one character per entry\n # so this is kind of bad. Alternately we could use np.repeat\n # instead of np.empty (but then you still don't want things\n # coming out as np.str_!\n\n dtype = np.dtype(object)\n\n elif isinstance(val, (np.datetime64, datetime)):\n val = Timestamp(val)\n if val is NaT or val.tz is None:\n dtype = np.dtype(\"M8[ns]\")\n else:\n if pandas_dtype:\n dtype = DatetimeTZDtype(unit=\"ns\", tz=val.tz)\n else:\n # return datetimetz as object\n return np.dtype(object), val\n val = val.value\n\n elif isinstance(val, (np.timedelta64, timedelta)):\n val = Timedelta(val).value\n dtype = np.dtype(\"m8[ns]\")\n\n elif is_bool(val):\n dtype = np.dtype(np.bool_)\n\n elif is_integer(val):\n if isinstance(val, np.integer):\n dtype = np.dtype(type(val))\n else:\n dtype = np.dtype(np.int64)\n\n try:\n np.array(val, dtype=dtype)\n except OverflowError:\n dtype = np.array(val).dtype\n\n elif is_float(val):\n if isinstance(val, np.floating):\n dtype = np.dtype(type(val))\n else:\n dtype = np.dtype(np.float64)\n\n elif is_complex(val):\n dtype = np.dtype(np.complex_)\n\n elif pandas_dtype:\n if lib.is_period(val):\n dtype = PeriodDtype(freq=val.freq)\n elif lib.is_interval(val):\n subtype = infer_dtype_from_scalar(val.left, pandas_dtype=True)[0]\n dtype = IntervalDtype(subtype=subtype)\n\n return dtype, val\n\n\ndef dict_compat(d: Dict[Scalar, Scalar]) -> Dict[Scalar, Scalar]:\n \"\"\"\n Convert datetimelike-keyed dicts to a Timestamp-keyed dict.\n\n Parameters\n ----------\n d: dict-like object\n\n Returns\n -------\n dict\n\n \"\"\"\n return {maybe_box_datetimelike(key): value for key, value in d.items()}\n\n\ndef infer_dtype_from_array(\n arr, pandas_dtype: bool = False\n) -> Tuple[DtypeObj, ArrayLike]:\n \"\"\"\n Infer the dtype from an array.\n\n Parameters\n ----------\n arr : array\n pandas_dtype : bool, default False\n whether to infer dtype including pandas extension types.\n If False, array belongs to pandas extension types\n is inferred as object\n\n Returns\n -------\n tuple (numpy-compat/pandas-compat dtype, array)\n\n Notes\n -----\n if pandas_dtype=False. these infer to numpy dtypes\n exactly with the exception that mixed / object dtypes\n are not coerced by stringifying or conversion\n\n if pandas_dtype=True. datetime64tz-aware/categorical\n types will retain there character.\n\n Examples\n --------\n >>> np.asarray([1, '1'])\n array(['1', '1'], dtype='<U21')\n\n >>> infer_dtype_from_array([1, '1'])\n (dtype('O'), [1, '1'])\n \"\"\"\n if isinstance(arr, np.ndarray):\n return arr.dtype, arr\n\n if not is_list_like(arr):\n arr = [arr]\n\n if pandas_dtype and is_extension_array_dtype(arr):\n return arr.dtype, arr\n\n elif isinstance(arr, ABCSeries):\n return arr.dtype, np.asarray(arr)\n\n # don't force numpy coerce with nan's\n inferred = lib.infer_dtype(arr, skipna=False)\n if inferred in [\"string\", \"bytes\", \"mixed\", \"mixed-integer\"]:\n return (np.dtype(np.object_), arr)\n\n arr = np.asarray(arr)\n return arr.dtype, arr\n\n\ndef maybe_infer_dtype_type(element):\n \"\"\"\n Try to infer an object's dtype, for use in arithmetic ops.\n\n Uses `element.dtype` if that's available.\n Objects implementing the iterator protocol are cast to a NumPy array,\n and from there the array's type is used.\n\n Parameters\n ----------\n element : object\n Possibly has a `.dtype` attribute, and possibly the iterator\n protocol.\n\n Returns\n -------\n tipo : type\n\n Examples\n --------\n >>> from collections import namedtuple\n >>> Foo = namedtuple(\"Foo\", \"dtype\")\n >>> maybe_infer_dtype_type(Foo(np.dtype(\"i8\")))\n dtype('int64')\n \"\"\"\n tipo = None\n if hasattr(element, \"dtype\"):\n tipo = element.dtype\n elif is_list_like(element):\n element = np.asarray(element)\n tipo = element.dtype\n return tipo\n\n\ndef maybe_upcast(\n values: np.ndarray,\n fill_value: Scalar = np.nan,\n copy: bool = False,\n) -> Tuple[np.ndarray, Scalar]:\n \"\"\"\n Provide explicit type promotion and coercion.\n\n Parameters\n ----------\n values : np.ndarray\n The array that we may want to upcast.\n fill_value : what we want to fill with\n copy : bool, default True\n If True always make a copy even if no upcast is required.\n\n Returns\n -------\n values: np.ndarray\n the original array, possibly upcast\n fill_value:\n the fill value, possibly upcast\n \"\"\"\n new_dtype, fill_value = maybe_promote(values.dtype, fill_value)\n # We get a copy in all cases _except_ (values.dtype == new_dtype and not copy)\n values = values.astype(new_dtype, copy=copy)\n\n return values, fill_value\n\n\ndef invalidate_string_dtypes(dtype_set: Set[DtypeObj]):\n \"\"\"\n Change string like dtypes to object for\n ``DataFrame.select_dtypes()``.\n \"\"\"\n non_string_dtypes = dtype_set - {np.dtype(\"S\").type, np.dtype(\"<U\").type}\n if non_string_dtypes != dtype_set:\n raise TypeError(\"string dtypes are not allowed, use 'object' instead\")\n\n\ndef coerce_indexer_dtype(indexer, categories):\n \"\"\" coerce the indexer input array to the smallest dtype possible \"\"\"\n length = len(categories)\n if length < _int8_max:\n return ensure_int8(indexer)\n elif length < _int16_max:\n return ensure_int16(indexer)\n elif length < _int32_max:\n return ensure_int32(indexer)\n return ensure_int64(indexer)\n\n\ndef astype_td64_unit_conversion(\n values: np.ndarray, dtype: np.dtype, copy: bool\n) -> np.ndarray:\n \"\"\"\n By pandas convention, converting to non-nano timedelta64\n returns an int64-dtyped array with ints representing multiples\n of the desired timedelta unit. This is essentially division.\n\n Parameters\n ----------\n values : np.ndarray[timedelta64[ns]]\n dtype : np.dtype\n timedelta64 with unit not-necessarily nano\n copy : bool\n\n Returns\n -------\n np.ndarray\n \"\"\"\n if is_dtype_equal(values.dtype, dtype):\n if copy:\n return values.copy()\n return values\n\n # otherwise we are converting to non-nano\n result = values.astype(dtype, copy=False) # avoid double-copying\n result = result.astype(np.float64)\n\n mask = isna(values)\n np.putmask(result, mask, np.nan)\n return result\n\n\ndef astype_nansafe(\n arr, dtype: DtypeObj, copy: bool = True, skipna: bool = False\n) -> ArrayLike:\n \"\"\"\n Cast the elements of an array to a given dtype a nan-safe manner.\n\n Parameters\n ----------\n arr : ndarray\n dtype : np.dtype or ExtensionDtype\n copy : bool, default True\n If False, a view will be attempted but may fail, if\n e.g. the item sizes don't align.\n skipna: bool, default False\n Whether or not we should skip NaN when casting as a string-type.\n\n Raises\n ------\n ValueError\n The dtype was a datetime64/timedelta64 dtype, but it had no unit.\n \"\"\"\n if arr.ndim > 1:\n # Make sure we are doing non-copy ravel and reshape.\n flags = arr.flags\n flat = arr.ravel(\"K\")\n result = astype_nansafe(flat, dtype, copy=copy, skipna=skipna)\n order = \"F\" if flags.f_contiguous else \"C\"\n return result.reshape(arr.shape, order=order)\n\n # dispatch on extension dtype if needed\n if isinstance(dtype, ExtensionDtype):\n return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)\n\n elif not isinstance(dtype, np.dtype):\n raise ValueError(\"dtype must be np.dtype or ExtensionDtype\")\n\n if issubclass(dtype.type, str):\n return lib.ensure_string_array(\n arr.ravel(), skipna=skipna, convert_na_value=False\n ).reshape(arr.shape)\n\n elif is_datetime64_dtype(arr):\n if is_object_dtype(dtype):\n return ints_to_pydatetime(arr.view(np.int64))\n elif dtype == np.int64:\n if isna(arr).any():\n raise ValueError(\"Cannot convert NaT values to integer\")\n return arr.view(dtype)\n\n # allow frequency conversions\n if dtype.kind == \"M\":\n return arr.astype(dtype)\n\n raise TypeError(f\"cannot astype a datetimelike from [{arr.dtype}] to [{dtype}]\")\n\n elif is_timedelta64_dtype(arr):\n if is_object_dtype(dtype):\n return ints_to_pytimedelta(arr.view(np.int64))\n elif dtype == np.int64:\n if isna(arr).any():\n raise ValueError(\"Cannot convert NaT values to integer\")\n return arr.view(dtype)\n\n elif dtype.kind == \"m\":\n return astype_td64_unit_conversion(arr, dtype, copy=copy)\n\n raise TypeError(f\"cannot astype a timedelta from [{arr.dtype}] to [{dtype}]\")\n\n elif np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer):\n\n if not np.isfinite(arr).all():\n raise ValueError(\"Cannot convert non-finite values (NA or inf) to integer\")\n\n elif is_object_dtype(arr):\n\n # work around NumPy brokenness, #1987\n if np.issubdtype(dtype.type, np.integer):\n return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)\n\n # if we have a datetime/timedelta array of objects\n # then coerce to a proper dtype and recall astype_nansafe\n\n elif is_datetime64_dtype(dtype):\n from pandas import to_datetime\n\n return astype_nansafe(to_datetime(arr).values, dtype, copy=copy)\n elif is_timedelta64_dtype(dtype):\n from pandas import to_timedelta\n\n return astype_nansafe(to_timedelta(arr)._values, dtype, copy=copy)\n\n if dtype.name in (\"datetime64\", \"timedelta64\"):\n msg = (\n f\"The '{dtype.name}' dtype has no unit. Please pass in \"\n f\"'{dtype.name}[ns]' instead.\"\n )\n raise ValueError(msg)\n\n if copy or is_object_dtype(arr) or is_object_dtype(dtype):\n # Explicit copy, or required since NumPy can't view from / to object.\n return arr.astype(dtype, copy=True)\n\n return arr.view(dtype)\n\n\ndef soft_convert_objects(\n values: np.ndarray,\n datetime: bool = True,\n numeric: bool = True,\n timedelta: bool = True,\n copy: bool = True,\n):\n \"\"\"\n Try to coerce datetime, timedelta, and numeric object-dtype columns\n to inferred dtype.\n\n Parameters\n ----------\n values : np.ndarray[object]\n datetime : bool, default True\n numeric: bool, default True\n timedelta : bool, default True\n copy : bool, default True\n\n Returns\n -------\n np.ndarray\n \"\"\"\n validate_bool_kwarg(datetime, \"datetime\")\n validate_bool_kwarg(numeric, \"numeric\")\n validate_bool_kwarg(timedelta, \"timedelta\")\n validate_bool_kwarg(copy, \"copy\")\n\n conversion_count = sum((datetime, numeric, timedelta))\n if conversion_count == 0:\n raise ValueError(\"At least one of datetime, numeric or timedelta must be True.\")\n\n # Soft conversions\n if datetime:\n # GH 20380, when datetime is beyond year 2262, hence outside\n # bound of nanosecond-resolution 64-bit integers.\n try:\n values = lib.maybe_convert_objects(values, convert_datetime=True)\n except OutOfBoundsDatetime:\n pass\n\n if timedelta and is_object_dtype(values.dtype):\n # Object check to ensure only run if previous did not convert\n values = lib.maybe_convert_objects(values, convert_timedelta=True)\n\n if numeric and is_object_dtype(values.dtype):\n try:\n converted = lib.maybe_convert_numeric(values, set(), coerce_numeric=True)\n except (ValueError, TypeError):\n pass\n else:\n # If all NaNs, then do not-alter\n values = converted if not isna(converted).all() else values\n values = values.copy() if copy else values\n\n return values\n\n\ndef convert_dtypes(\n input_array: AnyArrayLike,\n convert_string: bool = True,\n convert_integer: bool = True,\n convert_boolean: bool = True,\n convert_floating: bool = True,\n) -> Dtype:\n \"\"\"\n Convert objects to best possible type, and optionally,\n to types supporting ``pd.NA``.\n\n Parameters\n ----------\n input_array : ExtensionArray, Index, Series or np.ndarray\n convert_string : bool, default True\n Whether object dtypes should be converted to ``StringDtype()``.\n convert_integer : bool, default True\n Whether, if possible, conversion can be done to integer extension types.\n convert_boolean : bool, defaults True\n Whether object dtypes should be converted to ``BooleanDtypes()``.\n convert_floating : bool, defaults True\n Whether, if possible, conversion can be done to floating extension types.\n If `convert_integer` is also True, preference will be give to integer\n dtypes if the floats can be faithfully casted to integers.\n\n Returns\n -------\n dtype\n new dtype\n \"\"\"\n is_extension = is_extension_array_dtype(input_array.dtype)\n if (\n convert_string or convert_integer or convert_boolean or convert_floating\n ) and not is_extension:\n try:\n inferred_dtype = lib.infer_dtype(input_array)\n except ValueError:\n # Required to catch due to Period. Can remove once GH 23553 is fixed\n inferred_dtype = input_array.dtype\n\n if not convert_string and is_string_dtype(inferred_dtype):\n inferred_dtype = input_array.dtype\n\n if convert_integer:\n target_int_dtype = \"Int64\"\n\n if is_integer_dtype(input_array.dtype):\n from pandas.core.arrays.integer import INT_STR_TO_DTYPE\n\n inferred_dtype = INT_STR_TO_DTYPE.get(\n input_array.dtype.name, target_int_dtype\n )\n if not is_integer_dtype(input_array.dtype) and is_numeric_dtype(\n input_array.dtype\n ):\n inferred_dtype = target_int_dtype\n\n else:\n if is_integer_dtype(inferred_dtype):\n inferred_dtype = input_array.dtype\n\n if convert_floating:\n if not is_integer_dtype(input_array.dtype) and is_numeric_dtype(\n input_array.dtype\n ):\n from pandas.core.arrays.floating import FLOAT_STR_TO_DTYPE\n\n inferred_float_dtype = FLOAT_STR_TO_DTYPE.get(\n input_array.dtype.name, \"Float64\"\n )\n # if we could also convert to integer, check if all floats\n # are actually integers\n if convert_integer:\n arr = input_array[notna(input_array)]\n if (arr.astype(int) == arr).all():\n inferred_dtype = \"Int64\"\n else:\n inferred_dtype = inferred_float_dtype\n else:\n inferred_dtype = inferred_float_dtype\n else:\n if is_float_dtype(inferred_dtype):\n inferred_dtype = input_array.dtype\n\n if convert_boolean:\n if is_bool_dtype(input_array.dtype):\n inferred_dtype = \"boolean\"\n else:\n if isinstance(inferred_dtype, str) and inferred_dtype == \"boolean\":\n inferred_dtype = input_array.dtype\n\n else:\n inferred_dtype = input_array.dtype\n\n return inferred_dtype\n\n\ndef maybe_castable(arr: np.ndarray) -> bool:\n # return False to force a non-fastpath\n\n assert isinstance(arr, np.ndarray) # GH 37024\n\n # check datetime64[ns]/timedelta64[ns] are valid\n # otherwise try to coerce\n kind = arr.dtype.kind\n if kind == \"M\":\n return is_datetime64_ns_dtype(arr.dtype)\n elif kind == \"m\":\n return is_timedelta64_ns_dtype(arr.dtype)\n\n return arr.dtype.name not in POSSIBLY_CAST_DTYPES\n\n\ndef maybe_infer_to_datetimelike(\n value: Union[ArrayLike, Scalar], convert_dates: bool = False\n):\n \"\"\"\n we might have a array (or single object) that is datetime like,\n and no dtype is passed don't change the value unless we find a\n datetime/timedelta set\n\n this is pretty strict in that a datetime/timedelta is REQUIRED\n in addition to possible nulls/string likes\n\n Parameters\n ----------\n value : np.array / Series / Index / list-like\n convert_dates : bool, default False\n if True try really hard to convert dates (such as datetime.date), other\n leave inferred dtype 'date' alone\n\n \"\"\"\n if isinstance(value, (ABCIndex, ABCExtensionArray)):\n if not is_object_dtype(value.dtype):\n raise ValueError(\"array-like value must be object-dtype\")\n\n v = value\n\n if not is_list_like(v):\n v = [v]\n v = np.array(v, copy=False)\n\n # we only care about object dtypes\n if not is_object_dtype(v):\n return value\n\n shape = v.shape\n if v.ndim != 1:\n v = v.ravel()\n\n if not len(v):\n return value\n\n def try_datetime(v):\n # safe coerce to datetime64\n try:\n # GH19671\n v = tslib.array_to_datetime(v, require_iso8601=True, errors=\"raise\")[0]\n except ValueError:\n\n # we might have a sequence of the same-datetimes with tz's\n # if so coerce to a DatetimeIndex; if they are not the same,\n # then these stay as object dtype, xref GH19671\n from pandas import DatetimeIndex\n\n try:\n\n values, tz = conversion.datetime_to_datetime64(v)\n return DatetimeIndex(values).tz_localize(\"UTC\").tz_convert(tz=tz)\n except (ValueError, TypeError):\n pass\n\n except Exception:\n pass\n\n return v.reshape(shape)\n\n def try_timedelta(v):\n # safe coerce to timedelta64\n\n # will try first with a string & object conversion\n from pandas import to_timedelta\n\n try:\n td_values = to_timedelta(v)\n except ValueError:\n return v.reshape(shape)\n else:\n return np.asarray(td_values).reshape(shape)\n\n inferred_type = lib.infer_datetimelike_array(ensure_object(v))\n\n if inferred_type == \"date\" and convert_dates:\n value = try_datetime(v)\n elif inferred_type == \"datetime\":\n value = try_datetime(v)\n elif inferred_type == \"timedelta\":\n value = try_timedelta(v)\n elif inferred_type == \"nat\":\n\n # if all NaT, return as datetime\n if isna(v).all():\n value = try_datetime(v)\n else:\n\n # We have at least a NaT and a string\n # try timedelta first to avoid spurious datetime conversions\n # e.g. '00:00:01' is a timedelta but technically is also a datetime\n value = try_timedelta(v)\n if lib.infer_dtype(value, skipna=False) in [\"mixed\"]:\n # cannot skip missing values, as NaT implies that the string\n # is actually a datetime\n value = try_datetime(v)\n\n return value\n\n\ndef maybe_cast_to_datetime(value, dtype: Optional[DtypeObj]):\n \"\"\"\n try to cast the array/value to a datetimelike dtype, converting float\n nan to iNaT\n \"\"\"\n from pandas.core.tools.datetimes import to_datetime\n from pandas.core.tools.timedeltas import to_timedelta\n\n if dtype is not None:\n is_datetime64 = is_datetime64_dtype(dtype)\n is_datetime64tz = is_datetime64tz_dtype(dtype)\n is_timedelta64 = is_timedelta64_dtype(dtype)\n\n if is_datetime64 or is_datetime64tz or is_timedelta64:\n\n # Force the dtype if needed.\n msg = (\n f\"The '{dtype.name}' dtype has no unit. \"\n f\"Please pass in '{dtype.name}[ns]' instead.\"\n )\n\n if is_datetime64:\n # unpack e.g. SparseDtype\n dtype = getattr(dtype, \"subtype\", dtype)\n if not is_dtype_equal(dtype, DT64NS_DTYPE):\n\n # pandas supports dtype whose granularity is less than [ns]\n # e.g., [ps], [fs], [as]\n if dtype <= np.dtype(\"M8[ns]\"):\n if dtype.name == \"datetime64\":\n raise ValueError(msg)\n dtype = DT64NS_DTYPE\n else:\n raise TypeError(\n f\"cannot convert datetimelike to dtype [{dtype}]\"\n )\n elif is_datetime64tz:\n\n # our NaT doesn't support tz's\n # this will coerce to DatetimeIndex with\n # a matching dtype below\n if is_scalar(value) and isna(value):\n value = [value]\n\n elif is_timedelta64 and not is_dtype_equal(dtype, TD64NS_DTYPE):\n\n # pandas supports dtype whose granularity is less than [ns]\n # e.g., [ps], [fs], [as]\n if dtype <= np.dtype(\"m8[ns]\"):\n if dtype.name == \"timedelta64\":\n raise ValueError(msg)\n dtype = TD64NS_DTYPE\n else:\n raise TypeError(f\"cannot convert timedeltalike to dtype [{dtype}]\")\n\n if is_scalar(value):\n value = maybe_unbox_datetimelike(value, dtype)\n elif not is_sparse(value):\n value = np.array(value, copy=False)\n\n # have a scalar array-like (e.g. NaT)\n if value.ndim == 0:\n value = iNaT\n\n # we have an array of datetime or timedeltas & nulls\n elif np.prod(value.shape) or not is_dtype_equal(value.dtype, dtype):\n try:\n if is_datetime64:\n value = to_datetime(value, errors=\"raise\")\n # GH 25843: Remove tz information since the dtype\n # didn't specify one\n if value.tz is not None:\n value = value.tz_localize(None)\n value = value._values\n elif is_datetime64tz:\n # The string check can be removed once issue #13712\n # is solved. String data that is passed with a\n # datetime64tz is assumed to be naive which should\n # be localized to the timezone.\n is_dt_string = is_string_dtype(value.dtype)\n value = to_datetime(value, errors=\"raise\").array\n if is_dt_string:\n # Strings here are naive, so directly localize\n value = value.tz_localize(dtype.tz)\n else:\n # Numeric values are UTC at this point,\n # so localize and convert\n value = value.tz_localize(\"UTC\").tz_convert(dtype.tz)\n elif is_timedelta64:\n value = to_timedelta(value, errors=\"raise\")._values\n except OutOfBoundsDatetime:\n raise\n except (ValueError, TypeError):\n pass\n\n # coerce datetimelike to object\n elif is_datetime64_dtype(\n getattr(value, \"dtype\", None)\n ) and not is_datetime64_dtype(dtype):\n if is_object_dtype(dtype):\n if value.dtype != DT64NS_DTYPE:\n value = value.astype(DT64NS_DTYPE)\n ints = np.asarray(value).view(\"i8\")\n return ints_to_pydatetime(ints)\n\n # we have a non-castable dtype that was passed\n raise TypeError(f\"Cannot cast datetime64 to {dtype}\")\n\n else:\n\n is_array = isinstance(value, np.ndarray)\n\n # catch a datetime/timedelta that is not of ns variety\n # and no coercion specified\n if is_array and value.dtype.kind in [\"M\", \"m\"]:\n dtype = value.dtype\n\n if dtype.kind == \"M\" and dtype != DT64NS_DTYPE:\n value = conversion.ensure_datetime64ns(value)\n\n elif dtype.kind == \"m\" and dtype != TD64NS_DTYPE:\n value = conversion.ensure_timedelta64ns(value)\n\n # only do this if we have an array and the dtype of the array is not\n # setup already we are not an integer/object, so don't bother with this\n # conversion\n elif not (\n is_array\n and not (\n issubclass(value.dtype.type, np.integer) or value.dtype == np.object_\n )\n ):\n value = maybe_infer_to_datetimelike(value)\n\n return value\n\n\ndef find_common_type(types: List[DtypeObj]) -> DtypeObj:\n \"\"\"\n Find a common data type among the given dtypes.\n\n Parameters\n ----------\n types : list of dtypes\n\n Returns\n -------\n pandas extension or numpy dtype\n\n See Also\n --------\n numpy.find_common_type\n\n \"\"\"\n if len(types) == 0:\n raise ValueError(\"no types given\")\n\n first = types[0]\n\n # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)\n # => object\n if all(is_dtype_equal(first, t) for t in types[1:]):\n return first\n\n # get unique types (dict.fromkeys is used as order-preserving set())\n types = list(dict.fromkeys(types).keys())\n\n if any(isinstance(t, ExtensionDtype) for t in types):\n for t in types:\n if isinstance(t, ExtensionDtype):\n res = t._get_common_dtype(types)\n if res is not None:\n return res\n return np.dtype(\"object\")\n\n # take lowest unit\n if all(is_datetime64_dtype(t) for t in types):\n return np.dtype(\"datetime64[ns]\")\n if all(is_timedelta64_dtype(t) for t in types):\n return np.dtype(\"timedelta64[ns]\")\n\n # don't mix bool / int or float or complex\n # this is different from numpy, which casts bool with float/int as int\n has_bools = any(is_bool_dtype(t) for t in types)\n if has_bools:\n for t in types:\n if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t):\n return np.dtype(\"object\")\n\n return np.find_common_type(types, [])\n\n\ndef construct_1d_arraylike_from_scalar(\n value: Scalar, length: int, dtype: Optional[DtypeObj]\n) -> ArrayLike:\n \"\"\"\n create a np.ndarray / pandas type of specified shape and dtype\n filled with values\n\n Parameters\n ----------\n value : scalar value\n length : int\n dtype : pandas_dtype or np.dtype\n\n Returns\n -------\n np.ndarray / pandas type of length, filled with value\n\n \"\"\"\n\n if dtype is None:\n try:\n dtype, value = infer_dtype_from_scalar(value, pandas_dtype=True)\n except OutOfBoundsDatetime:\n dtype = np.dtype(object)\n\n if is_extension_array_dtype(dtype):\n cls = dtype.construct_array_type()\n subarr = cls._from_sequence([value] * length, dtype=dtype)\n\n else:\n\n if length and is_integer_dtype(dtype) and isna(value):\n # coerce if we have nan for an integer dtype\n dtype = np.dtype(\"float64\")\n elif isinstance(dtype, np.dtype) and dtype.kind in (\"U\", \"S\"):\n # we need to coerce to object dtype to avoid\n # to allow numpy to take our string as a scalar value\n dtype = np.dtype(\"object\")\n if not isna(value):\n value = ensure_str(value)\n elif dtype.kind in [\"M\", \"m\"]:\n value = maybe_unbox_datetimelike(value, dtype)\n\n subarr = np.empty(length, dtype=dtype)\n subarr.fill(value)\n\n return subarr\n\n\ndef construct_1d_object_array_from_listlike(values: Sized) -> np.ndarray:\n \"\"\"\n Transform any list-like object in a 1-dimensional numpy array of object\n dtype.\n\n Parameters\n ----------\n values : any iterable which has a len()\n\n Raises\n ------\n TypeError\n * If `values` does not have a len()\n\n Returns\n -------\n 1-dimensional numpy array of dtype object\n \"\"\"\n # numpy will try to interpret nested lists as further dimensions, hence\n # making a 1D array that contains list-likes is a bit tricky:\n result = np.empty(len(values), dtype=\"object\")\n result[:] = values\n return result\n\n\ndef construct_1d_ndarray_preserving_na(\n values: Sequence, dtype: Optional[DtypeObj] = None, copy: bool = False\n) -> np.ndarray:\n \"\"\"\n Construct a new ndarray, coercing `values` to `dtype`, preserving NA.\n\n Parameters\n ----------\n values : Sequence\n dtype : numpy.dtype, optional\n copy : bool, default False\n Note that copies may still be made with ``copy=False`` if casting\n is required.\n\n Returns\n -------\n arr : ndarray[dtype]\n\n Examples\n --------\n >>> np.array([1.0, 2.0, None], dtype='str')\n array(['1.0', '2.0', 'None'], dtype='<U4')\n\n >>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype=np.dtype('str'))\n array(['1.0', '2.0', None], dtype=object)\n \"\"\"\n\n if dtype is not None and dtype.kind == \"U\":\n subarr = lib.ensure_string_array(values, convert_na_value=False, copy=copy)\n else:\n subarr = np.array(values, dtype=dtype, copy=copy)\n\n return subarr\n\n\ndef maybe_cast_to_integer_array(arr, dtype: Dtype, copy: bool = False):\n \"\"\"\n Takes any dtype and returns the casted version, raising for when data is\n incompatible with integer/unsigned integer dtypes.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n arr : array-like\n The array to cast.\n dtype : str, np.dtype\n The integer dtype to cast the array to.\n copy: bool, default False\n Whether to make a copy of the array before returning.\n\n Returns\n -------\n ndarray\n Array of integer or unsigned integer dtype.\n\n Raises\n ------\n OverflowError : the dtype is incompatible with the data\n ValueError : loss of precision has occurred during casting\n\n Examples\n --------\n If you try to coerce negative values to unsigned integers, it raises:\n\n >>> pd.Series([-1], dtype=\"uint64\")\n Traceback (most recent call last):\n ...\n OverflowError: Trying to coerce negative values to unsigned integers\n\n Also, if you try to coerce float values to integers, it raises:\n\n >>> pd.Series([1, 2, 3.5], dtype=\"int64\")\n Traceback (most recent call last):\n ...\n ValueError: Trying to coerce float values to integers\n \"\"\"\n assert is_integer_dtype(dtype)\n\n try:\n if not hasattr(arr, \"astype\"):\n casted = np.array(arr, dtype=dtype, copy=copy)\n else:\n casted = arr.astype(dtype, copy=copy)\n except OverflowError as err:\n raise OverflowError(\n \"The elements provided in the data cannot all be \"\n f\"casted to the dtype {dtype}\"\n ) from err\n\n if np.array_equal(arr, casted):\n return casted\n\n # We do this casting to allow for proper\n # data and dtype checking.\n #\n # We didn't do this earlier because NumPy\n # doesn't handle `uint64` correctly.\n arr = np.asarray(arr)\n\n if is_unsigned_integer_dtype(dtype) and (arr < 0).any():\n raise OverflowError(\"Trying to coerce negative values to unsigned integers\")\n\n if is_float_dtype(arr) or is_object_dtype(arr):\n raise ValueError(\"Trying to coerce float values to integers\")\n\n\ndef convert_scalar_for_putitemlike(scalar: Scalar, dtype: np.dtype) -> Scalar:\n \"\"\"\n Convert datetimelike scalar if we are setting into a datetime64\n or timedelta64 ndarray.\n\n Parameters\n ----------\n scalar : scalar\n dtype : np.dtype\n\n Returns\n -------\n scalar\n \"\"\"\n if dtype.kind in [\"m\", \"M\"]:\n scalar = maybe_box_datetimelike(scalar, dtype)\n return maybe_unbox_datetimelike(scalar, dtype)\n else:\n validate_numeric_casting(dtype, scalar)\n return scalar\n\n\ndef validate_numeric_casting(dtype: np.dtype, value: Scalar) -> None:\n \"\"\"\n Check that we can losslessly insert the given value into an array\n with the given dtype.\n\n Parameters\n ----------\n dtype : np.dtype\n value : scalar\n\n Raises\n ------\n ValueError\n \"\"\"\n if issubclass(dtype.type, (np.integer, np.bool_)):\n if is_float(value) and np.isnan(value):\n raise ValueError(\"Cannot assign nan to integer series\")\n\n if issubclass(dtype.type, (np.integer, np.floating, complex)) and not issubclass(\n dtype.type, np.bool_\n ):\n if is_bool(value):\n raise ValueError(\"Cannot assign bool to float/integer series\")\n"
] |
[
[
"numpy.array_equal",
"pandas.core.arrays.floating.FLOAT_STR_TO_DTYPE.get",
"pandas.core.dtypes.common.ensure_int32",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_datetime64_dtype",
"pandas._libs.lib.infer_dtype",
"numpy.empty",
"numpy.prod",
"pandas.core.dtypes.common.is_bool",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas.core.dtypes.common.ensure_str",
"pandas._libs.tslibs.conversion.ensure_timedelta64ns",
"pandas.core.dtypes.dtypes.DatetimeTZDtype",
"numpy.iinfo",
"pandas.core.dtypes.common.is_timedelta64_ns_dtype",
"pandas.core.dtypes.common.is_datetime_or_timedelta_dtype",
"pandas.core.dtypes.dtypes.PeriodDtype",
"numpy.promote_types",
"pandas.DatetimeIndex",
"pandas.core.arrays.integer.Int64Dtype",
"pandas._libs.lib.ensure_string_array",
"numpy.issubdtype",
"pandas.core.dtypes.missing.isna",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.arrays.floating.Float64Dtype",
"pandas._libs.tslibs.conversion.datetime_to_datetime64",
"numpy.array",
"pandas.core.dtypes.dtypes.IntervalDtype",
"pandas.core.dtypes.common.is_complex_dtype",
"pandas._libs.lib.is_period",
"pandas.core.dtypes.common.is_sparse",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas._libs.tslibs.timezones.tz_compare",
"pandas.core.arrays.integer.INT_STR_TO_DTYPE.get",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas._libs.tslibs.Timestamp",
"pandas.core.dtypes.common.is_string_dtype",
"pandas._libs.tslibs.Timedelta",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.dtypes.missing.is_valid_nat_for_dtype",
"pandas._libs.lib.item_from_zerodim",
"pandas.to_datetime",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.inference.is_list_like",
"pandas.core.dtypes.common.is_integer",
"numpy.allclose",
"pandas.core.dtypes.common.ensure_int64",
"numpy.min_scalar_type",
"pandas._libs.tslibs.conversion.ensure_datetime64ns",
"numpy.datetime64",
"pandas.core.dtypes.common.ensure_int16",
"numpy.find_common_type",
"numpy.asarray",
"pandas._libs.lib.is_interval",
"pandas._libs.tslib.array_to_datetime",
"pandas.core.dtypes.common.is_complex",
"pandas._libs.tslibs.ints_to_pydatetime",
"pandas.core.arrays.PeriodArray",
"pandas.core.dtypes.common.ensure_object",
"pandas.core.dtypes.common.is_float",
"pandas.core.dtypes.common.is_unsigned_integer_dtype",
"pandas.core.dtypes.common.ensure_int8",
"pandas.core.dtypes.common.is_datetime64_ns_dtype",
"numpy.dtype",
"pandas.core.dtypes.missing.notna",
"numpy.putmask",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.util._validators.validate_bool_kwarg",
"numpy.can_cast",
"numpy.isfinite",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas._libs.lib.maybe_convert_objects",
"numpy.place",
"numpy.timedelta64",
"numpy.isnan",
"pandas.to_timedelta",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.core.dtypes.common.is_categorical_dtype"
]
] |
foroozandehgroup/nmr_sims
|
[
"a035bdf75f467f88e96f1ef90c26b9dd3b5c2884"
] |
[
"nmr_sims/_sanity.py"
] |
[
"# _sanity.py\n# Simon Hulse\n# [email protected]\n# Last Edited: Fri 18 Feb 2022 13:48:21 GMT\n\nfrom dataclasses import dataclass\nimport re\nfrom typing import Any, Dict, Iterable, Tuple, Union\nimport numpy as np\nfrom nmr_sims import nuclei\n\n\ndef is_multiple_of_one_half(x):\n return round(x, 10) % 0.5 == 0\n\n\ndef is_an_iterable_of_spins(obj):\n try:\n iter(obj)\n except TypeError:\n return False\n return all([is_multiple_of_one_half(x) for x in obj])\n\n\ndef check_dict_with_int_keys(\n obj: Any, varname: str, consecutive: bool = False, max_: Union[int, None] = None,\n forbidden: Union[Iterable[int], None] = None,\n) -> None:\n errmsg = f\"`{varname}` should be a dict, and it's keys should be<1>ints<2>.<3>\"\n if consecutive:\n errmsg = errmsg.replace(\"<1>\", \" consecutive \")\n errmsg = errmsg.replace(\"<2>\", \", starting at 1\")\n errmsg = errmsg.replace(\"<3>\", \"\")\n elif isinstance(max_, int):\n errmsg = errmsg.replace(\"<1>\", \" positive \")\n errmsg = errmsg.replace(\"<2>\", f\", that are no greater than {max_}\")\n if forbidden is not None:\n if len(forbidden) == 1:\n errmsg = errmsg.replace(\"<3>\", f\" {forbidden[0]} is not permitted.\")\n else:\n errmsg = errmsg.replace(\n \"<3>\", \" \" + \", \".join(forbidden) + \" are not permitted.\",\n )\n else:\n errmsg = errmsg.replace(\"<3>\", \"\")\n\n else:\n errmsg = errmsg.replace(\"<1>\", \" \")\n errmsg = errmsg.replace(\"<2>\", \"\")\n errmsg = errmsg.replace(\"<3>\", \"\")\n\n if not isinstance(obj, dict):\n raise TypeError(errmsg)\n keys = list(obj.keys())\n if any([not isinstance(key, int) for key in keys]):\n raise ValueError(errmsg)\n if consecutive and (sorted(keys) != list(range(1, len(keys) + 1))):\n raise ValueError(errmsg)\n if isinstance(max_, int) and any([key > max_ for key in keys]):\n raise ValueError(errmsg)\n if forbidden is not None and any([key in forbidden for key in keys]):\n raise ValueError(errmsg)\n\n\ndef process_spins(spins: Any, default_nucleus: Any) -> Iterable[Dict]:\n check_dict_with_int_keys(spins, \"spins\", consecutive=True)\n nspins = len(spins)\n spin_dict = {}\n couplings = np.zeros((nspins, nspins))\n default_nucleus = process_nucleus(default_nucleus, \"default_nucleus\")\n\n for i, spin in spins.items():\n # Process nucleus\n if \"nucleus\" in spin.keys():\n nucleus = process_nucleus(spin[\"nucleus\"], f\"spins[{i}][\\\"nucelus\\\"]\")\n else:\n nucleus = default_nucleus\n\n if \"shift\" not in spin.keys():\n raise ValueError(\n \"Each value in `spins` should be a dict with the keys \"\n \"\\\"shift\\\" and (optional) \\\"couplings\\\", \\\"nucleus\\\". \"\n f\"This is not satisfied by spin {i}.\"\n )\n\n if not isinstance(spin[\"shift\"], (int, float)):\n raise TypeError(\n \"\\\"shift\\\" entries should be scalar values. This is not \"\n f\"satisfied by spin {i}.\"\n )\n\n shift = float(spin[\"shift\"])\n\n if \"couplings\" in spin.keys():\n check_dict_with_int_keys(\n spin[\"couplings\"], f\"spins[{i}][\\\"couplings\\\"]\", max_=nspins,\n forbidden=[i],\n )\n\n for j, coupling in spin[\"couplings\"].items():\n current_value = couplings[i - 1, j - 1]\n if float(current_value) != 0.:\n if coupling != current_value:\n raise ValueError(\n f\"Contradictory couplings given between spins {j} and \"\n f\"{i}: {float(coupling)} and {current_value}.\"\n )\n else:\n couplings[i - 1, j - 1] = coupling\n couplings[j - 1, i - 1] = coupling\n\n spin_dict[i] = Spin(\n nucleus,\n float(shift),\n {\n j + 1: couplings[i - 1, j] for j in range(i, nspins)\n },\n )\n\n return spin_dict\n\n\ndef process_nucleus(nucleus: Any, varname: str) -> nuclei.Nucleus:\n if isinstance(nucleus, nuclei.Nucleus):\n return nucleus\n elif nucleus in nuclei.supported_nuclei:\n return nuclei.supported_nuclei[nucleus]\n else:\n raise ValueError(\n \"`{varname}` specified is not recognised. Either provide a \"\n \"`nuclei.Nucleus instance, or one of the following\\n:\" +\n \", \".join([f\"\\\"{n}\\\"\" for n in nuclei.supported_nuclei])\n )\n\n\ndef process_value(\n value: Any, varname: str, regex: str, can_be_negative: bool\n) -> Tuple[float, str]:\n errmsg = (\n f\"`{varname}` should be a<POS>scalar, or a string satifying \\\"{regex}\\\"\"\n )\n if can_be_negative:\n errmsg = errmsg.replace(\"<POS>\", \" \")\n else:\n errmsg = errmsg.replace(\"<POS>\", \" positive \")\n\n if isinstance(value, (int, float)):\n if can_be_negative:\n return value, None\n else:\n if value > 0:\n return value, None\n else:\n raise ValueError(errmsg)\n\n if not isinstance(value, str):\n raise ValueError(errmsg)\n\n match = re.match(regex, value, re.IGNORECASE)\n if match is None:\n raise ValueError(errmsg)\n else:\n value = float(match.group(1))\n unit = match.group(2).lower()\n return value, unit\n\n\ndef process_temperature(temperature: Any) -> float:\n temp, unit = process_value(\n temperature, \"temperature\", r\"^(\\d*\\.?\\d*)(C|K)$\", False,\n )\n if unit is None or unit == \"k\":\n return temp\n elif unit == \"c\":\n return temp + 273.15\n\n\ndef process_field(field: Any):\n field, unit = process_value(field, \"field\", r\"^(\\d*\\.?\\d*)(T|MHz)$\", False)\n if unit is None or unit == \"t\":\n return field\n elif unit == \"mhz\":\n return 2e6 * np.pi * field / nuclei.supported_nuclei[\"1H\"].gamma\n\n\ndef process_sweep_width(\n sweep_width: Any, nucleus: nuclei.Nucleus, field: float,\n) -> float:\n sweep_width, unit = process_value(\n sweep_width, \"sweep_width\", r\"^(\\d*\\.?\\d*)(Hz|ppm)$\", False,\n )\n if unit is None or unit == \"hz\":\n return sweep_width\n elif unit == \"ppm\":\n return sweep_width * field * np.abs(nucleus.gamma) / (2e6 * np.pi)\n\n\ndef process_offset(\n offset: Any, nucleus: nuclei.Nucleus, field: float,\n) -> float:\n offset, unit = process_value(\n offset, \"offset\", r\"(-?\\d*\\.?\\d*)(Hz|ppm)\", True,\n )\n if unit is None or unit == \"hz\":\n return offset\n elif unit == \"ppm\":\n return offset * field * nucleus.gamma / (2e6 * np.pi)\n\n\ndef process_points(points: Any) -> int:\n if isinstance(points, int) and points > 0:\n return points\n else:\n raise ValueError(\"`points` should be a positive int.\")\n\n\n@dataclass\nclass Spin:\n nucleus: nuclei.Nucleus\n shift: float\n couplings: Dict[int, float]\n"
] |
[
[
"numpy.abs",
"numpy.zeros"
]
] |
feigelman/LNAplusplus
|
[
"091218a8f956ce8750ae2dee0be775dc5e7ca6bf"
] |
[
"examples/runBirthDeath.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis is an LNA++ application example with a simple birth-death process \ninvolving a constitutively active DNA, and stochastic production and \ndegradation of mRNA and protein:\n\nModel of birth-death process:\n==============================\n k_m\nDNA --> DNA+mRNA\n g_m\nmRNA --> 0\n k_p\nmRNA --> mRNA + protein\n g_p\nprotein --> 0\n\nA more detailed model description is provided in \ndocumentation/Documentation.pdf.\n\nThis example is used for performance testing of LNA++. We \n1) show how to perform exact simulation of the stochastic process and use LNA++\nto compute the mean and temporal auto-covariance of the proteins, \n2) compute sensitivities of the mean protein copy number with respect to the model \nparameters and compare the computed results to a finite difference approximation,\n3) perform multi-start optimization in order to compare parameter inference \nwith and without the analytical sensitivities and \n4) use LNA++ to evaluate the likelihood landscape for this model.\n\"\"\"\n\n# Open plot windows or plot to file?\nplotToFile = False\nimport sys\nif len(sys.argv) > 1 and sys.argv[1] == '--headless':\n plotToFile = True\n import matplotlib\n matplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\n\ndef plot(plotToFile, filename): \n if plotToFile:\n plt.savefig(filename)\n else:\n plt.show()\n\n# add LNA++ python module to search path\nimport os\nscriptDir = os.path.dirname(os.path.realpath(__file__))\nfrom sys import path\npath.append(scriptDir + '/../python')\n\n\n# Create the Python module for the birth / death system\nimport LNA\nLNA.generateLNA(scriptDir + '/BirthDeath.xml', 'BirthDeath', computeSS='BOTH')\n\n# Import the generated module (adapt path first)\npath.append(LNA.lnaModulesDir)\nimport BirthDeathLNA\n\nimport numpy as np\n\n# model parameters\nparameterNames = ['$k_m$','$k_p$','$g_m$','$g_p$']\nTheta = [20, 25, 10, 1]\n\n# time span 0.0, 0.1, ... 10.0\ntspan = np.arange(0, 10, 0.1).tolist()\n\n# Simulate: IC = steady state\nMRE,Var = BirthDeathLNA.LNA(Theta,tspan)\n\n# plot results:\nfig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\nfig.suptitle('Simulation: IC = Steady state')\n# mean of mRNA\nax1.plot(tspan, MRE[0])\nax1.set_xlabel('Time')\nax1.set_ylabel('mRNA')\nax1.set_title('Macroscopic mean of mRNA')\n# auto-covariance of mRNA\nax2.matshow(Var[0, 0, :, :], extent=[np.min(tspan), np.max(tspan), np.max(tspan), np.min(tspan)])\nax2.set_xlabel('Time')\nax2.set_ylabel('Time')\nax2.set_title('Autocovariance of mRNA')\n# mean of protein\nax3.plot(MRE[1])\nax3.set_xlabel('Time')\nax3.set_ylabel('Protein')\nax3.set_title('Macroscopic mean of protein')\n# auto-covariance of protein\nax4.matshow(Var[1, 1, :, :], extent=[np.min(tspan), np.max(tspan), np.max(tspan), np.min(tspan)])\nax4.set_xlabel('Time')\nax4.set_ylabel('Time')\nax4.set_title('Autocovariance of protein')\n\n\n# Simulate: IC = steady state; observable = mRNA and protein\n# Both species are observed (= mRNA,protein)\nObsIndex = [0, 1]\n# Variance of measurement noise\nVarNoise = [10.0,50.0]\n\n# solve LNA and compute measured distribution\nMRE, Var = BirthDeathLNA.LNA(Theta, tspan, merr=VarNoise,obsVar=ObsIndex)\n\n# plot results:\nfig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\nfig.suptitle('Simulation: IC = steady state; observable = mRNA and protein')\n# mean of mRNA\nax1.plot(tspan, MRE[0])\nax1.set_xlabel('Time')\nax1.set_ylabel('mRNA')\nax1.set_title('Macroscopic mean of mRNA')\n# auto-covariance of mRNA\nax2.matshow(Var[0, 0, :, :], extent=[np.min(tspan), np.max(tspan), np.max(tspan), np.min(tspan)])\nax2.set_xlabel('Time')\nax2.set_ylabel('Time')\nax2.set_title('Autocovariance of mRNA')\n# mean of protein\nax3.plot(MRE[1])\nax3.set_xlabel('Time')\nax3.set_ylabel('Protein')\nax3.set_title('Macroscopic mean of protein')\n# auto-covariance of protein\nax4.matshow(Var[1, 1, :, :], extent=[np.min(tspan), np.max(tspan), np.max(tspan), np.min(tspan)])\nax4.set_xlabel('Time')\nax4.set_ylabel('Time')\nax4.set_title('Autocovariance of protein')\n\n\n# Simulate: IC = steady state; observable = protein\n# observable is second species (= protein)\nObsIndex = [1]\n# Variance of measurement noise\nVarNoise = [50.0]\n\n# solve LNA and compute measured distribution\nMRE, Var = BirthDeathLNA.LNA(Theta, tspan, merr=VarNoise,obsVar=ObsIndex)\n\n# plot results:\nfig, (ax1, ax2) = plt.subplots(1, 2)\nfig.suptitle('Simulation: IC = steady state; observable = protein')\n# mean of protein\nax1.plot(tspan, MRE[0])\nax1.set_xlabel('Time')\nax1.set_ylabel('Protein')\nax1.set_title('Macroscopic mean of protein')\n# auto-covariance of protein\nax2.matshow(Var[0, 0, :, :], extent=[np.min(tspan), np.max(tspan), np.max(tspan), np.min(tspan)])\nax2.set_xlabel('Time')\nax2.set_ylabel('Time')\nax2.set_title('Autocovariance of protein')\n\n\n# Simulate: IC = no steady state; observable = protein\n# observable is second species (= protein)\nObsIndex = [1]\n# Variance of measurement noise\nVarNoise = [0.0]\n# initial values (E(mRNA),E(Protein))\nMRE0 = [2, 200];\n# initial co-variances (cov[mRNA,mRNA],cov(mRNA,Protein),cov(Protein,Protein))\nVar0 = [0, 0, 0];\n\n# solve LNA and compute measured distribution\nMRE, Var = BirthDeathLNA.LNA(Theta, tspan, Y0=MRE0, V0=Var0, merr=VarNoise, obsVar=ObsIndex)\n\n# plot results:\nfig, (ax1, ax2) = plt.subplots(1, 2)\nfig.suptitle('Simulation: IC = no steady state; observable = protein')\n# mean of protein\nax1.plot(tspan, MRE[0])\nax1.set_xlabel('Time')\nax1.set_ylabel('Protein')\nax1.set_title('Macroscopic mean of protein')\n# auto-covariance of protein\nax2.matshow(Var[0, 0, :, :], extent=[np.min(tspan), np.max(tspan), np.max(tspan), np.min(tspan)])\nax2.set_xlabel('Time')\nax2.set_ylabel('Time')\nax2.set_title('Autocovariance of protein')\n\n\n# Simulate: IC = no steady state; observable = protein; sensitivity = 1st order\n\n# solve LNA and compute measured distribution\nMRE, Var, Sens_MRE, Sens_Var = BirthDeathLNA.LNA(Theta, tspan, Y0=MRE0, V0=Var0, merr=VarNoise, obsVar=ObsIndex, computeSens=True)\n\n# plot results:\nfig, axs = plt.subplots(2, len(Theta))\nfig.suptitle('Simulate: IC = no steady state; observable = protein; sensitivity = 1st order')\nfor i in range(len(Theta)):\n (ax1, ax2) = axs[0][i], axs[1][i]\n # Sensitivity of mean of protein\n ax1.plot(tspan, Sens_MRE[0, i, :])\n ax1.set_xlabel('Time')\n ax1.set_ylabel('Sensitivity of mean')\n ax1.set_title(parameterNames[i])\n # Sensitivity of auto-covariance of protein\n ax2.matshow(Sens_Var[0, 0, i, :, :], extent=[np.min(tspan), np.max(tspan), np.max(tspan), np.min(tspan)])\n ax2.set_xlabel('Time')\n ax2.set_ylabel('Sensitivity of autocovariance')\n ax2.set_title(parameterNames[i])\n\n\n# Simulate: IC = no steady state; observable = protein; sensitivity = 1st & 2nd order\n\n# solve LNA and compute measured distribution\nMRE, Var, Sens_MRE, Sens_Var,Sens2_MRE,Sens2_Var = BirthDeathLNA.LNA(Theta, tspan, Y0=MRE0, V0=Var0, merr=VarNoise, obsVar=ObsIndex, computeSens2=True)\n\n# plot results:\nfig, axs = plt.subplots(len(Theta), len(Theta))\nfig.suptitle('Simulation of mean: IC = no steady state; observable = protein; sensitivity = 1st & 2nd order')\nfor i in range(len(Theta)):\n for j in range(len(Theta)):\n ax1 = axs[i][j]\n # Sensitivity of mean of protein\n ax1.plot(tspan, Sens2_MRE[0, i, j, :])\n ax1.set_xlabel('Time')\n ax1.set_ylabel('Sensitivity of mean')\n ax1.set_title('(%s, %s)' % (parameterNames[i], parameterNames[j]))\n\nfig, axs = plt.subplots(len(Theta), len(Theta))\nfig.suptitle('Simulation of autocovariance: IC = no steady state; observable = protein; sensitivity = 1st & 2nd order')\nfor i in range(len(Theta)):\n for j in range(len(Theta)):\n ax1 = axs[i][j]\n # Sensitivity of mean of protein\n ax1.matshow(Sens2_Var[0, 0, i, j, :, :], extent=[np.min(tspan), np.max(tspan), np.max(tspan), np.min(tspan)])\n ax1.set_xlabel('Time')\n ax1.set_ylabel('Time')\n ax1.set_title('(%s, %s)' % (parameterNames[i], parameterNames[j]))\n\n# Test of cross-species sensitivities\ni = 4-1;\nj = 4-1;\neps_theta = 1e-4;\nTheta_per = Theta[:] \nTheta_per[i] += eps_theta\n[MRE,Var,sMRE,sVar,s2MRE,s2Var] = BirthDeathLNA.LNA(Theta, tspan, Y0=MRE0, V0=Var0, computeSens2=True);\n[MRE_per,Var_per,sMRE_per,sVar_per,s2MRE_per,s2Var_per] = BirthDeathLNA.LNA(Theta_per, tspan, Y0=MRE0, V0=Var0, computeSens2=True);\n\nk1 = 50-1;\nk2 = 100-1;\n\n# 1st order sensitivity matrix\nfig, (ax1, ax2, ax3) = plt.subplots(1, 3)\nim = ax1.imshow((Var_per[:, :, k1, k2] - Var[:, :, k1, k2]) / eps_theta)\nax1.set_title('finite differences')\nfig.colorbar(im, ax=ax1)\nim = ax2.imshow(sVar[:, :, i, k1, k2])\nax2.set_title('analytical sensitivities')\nfig.colorbar(im, ax=ax2)\nim = ax3.imshow((Var_per[:, :, k1, k2] - Var[:, :, k1, k2]) / eps_theta - sVar[:, :, i, k1, k2])\nax3.set_title('error')\nfig.colorbar(im, ax=ax3)\nplt.show()\n\n# 2nd order sensitivity matrix for temporal cross-covariance of protein (species 2) abundance\nfig, (ax1, ax2, ax3) = plt.subplots(1, 3)\nim = ax1.imshow(np.squeeze((sVar_per[1, 1, j, :, :] - sVar[1, 1, j, :, :]) / eps_theta))\nax1.set_title('finite differences')\nfig.colorbar(im, ax=ax1)\nim = ax2.imshow(np.squeeze(s2Var[1, 1, i, j, :, :]))\nax2.set_title('analytical sensitivities')\nfig.colorbar(im, ax=ax2)\nim = ax3.imshow(np.squeeze((sVar_per[1, 1, j, :, :] - sVar[1, 1, j, :, :]) / eps_theta) - np.squeeze(s2Var[1, 1, i, j, :, :]))\nax3.set_title('error')\nfig.colorbar(im, ax=ax3)\nplt.show()\n\n# 2nd order sensitivity matrix for temporal cross-covariance of two time points\nfig, (ax1, ax2, ax3) = plt.subplots(1, 3)\nim = ax1.imshow((sVar_per[:, :, j, k1, k2] - sVar[:, :, j, k1, k2]) / eps_theta)\nax1.set_title('finite differences')\nfig.colorbar(im, ax=ax1)\nim = ax2.imshow(s2Var[:, :, i, j, k1, k2])\nax2.set_title('analytical sensitivities')\nfig.colorbar(im, ax=ax2)\nim = ax3.imshow((sVar_per[:, :, j, k1, k2] - sVar[:, :, j, k1, k2]) / eps_theta - s2Var[:, :, i, j, k1, k2])\nax3.set_title('error')\nfig.colorbar(im, ax=ax3)\nplt.show()\n"
] |
[
[
"matplotlib.use",
"numpy.max",
"matplotlib.pyplot.savefig",
"numpy.min",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show",
"numpy.squeeze"
]
] |
HuaijiaLin/AGSS-VOS
|
[
"e9272365aa45bf098316d7111238fe0ab8df8a17"
] |
[
"train_davis.py"
] |
[
"import sys\r\n\r\nimport os\r\nimport time\r\nimport logging\r\nimport numpy as np\r\nimport argparse\r\nimport random\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.parallel\r\nimport torch.backends.cudnn as cudnn\r\nimport torch.optim as optim\r\nfrom torch.utils import data\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\n\r\nfrom dataset.vos import Trainset\r\nfrom networks.agssvos import AGSSVOS\r\nsys.path.append('flow_inference')\r\nfrom flow_inference.flow_inference import Inference_flow\r\nfrom tools import preprocess, visualize, utils\r\nimport timeit\r\nimport cv2\r\n\r\n\r\ndef get_parser():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--batch-size', type=int)\r\n parser.add_argument('--root-data', type=str)\r\n parser.add_argument('--root-all-data', type=str)\r\n parser.add_argument('--meta-list', type=str)\r\n parser.add_argument('--epoch', type=int)\r\n parser.add_argument('--start-epoch', type=int, default=0)\r\n parser.add_argument('--sample-size', type=int, default=10)\r\n parser.add_argument('--gpu', type=str, default='0')\r\n parser.add_argument('--lr', type=float)\r\n parser.add_argument('--finetune', action='store_true')\r\n parser.add_argument('--init-atn', action='store_true')\r\n parser.add_argument('--freeze', action='store_true')\r\n parser.add_argument('--set-bn-no-update', action='store_true')\r\n parser.add_argument('--random-crop', action='store_true')\r\n parser.add_argument('--iou-thr-per-obj', action='store_true')\r\n parser.add_argument('--lr-atn', action='store_true')\r\n parser.add_argument('--lr-after-atn', action='store_true')\r\n parser.add_argument('--three-frames-data', action='store_true')\r\n parser.add_argument('--loss-iou-maxmin', action='store_true')\r\n parser.add_argument('--random-ref', action='store_true')\r\n parser.add_argument('--random-skip', action='store_true')\r\n parser.add_argument('--restore', type=str, default=None)\r\n parser.add_argument('--sample-dir', type=str)\r\n parser.add_argument('--snapshot-dir', type=str)\r\n parser.add_argument('--crop_size', type=int, default=512)\r\n parser.add_argument('--resize_h', type=int, default=360)\r\n parser.add_argument('--resize_w', type=int, default=640)\r\n parser.add_argument('--rgb_max', type=float, default=255.)\r\n parser.add_argument('--div_flow', type=int, default=20)\r\n parser.add_argument('--ignore_label', type=int, default=255)\r\n parser.add_argument('--scale_min', type=float, default=0.5, help='minimum random scale')\r\n parser.add_argument('--scale_max', type=float, default=2.0, help='maximum random scale')\r\n parser.add_argument('--rotate_min', type=float, default=-10, help='minimum random rotate')\r\n parser.add_argument('--rotate_max', type=float, default=10, help='maximum random rotate')\r\n parser.add_argument('--flow_checkpoint_path', type=str, default='models/FlowNet2-C_checkpoint.pth.tar',\r\n help='pretrained model for flownetC')\r\n parser.add_argument('--fix-lr', type=int, default=0)\r\n parser.add_argument('--show-img', action='store_true', help='show intermediate result')\r\n return parser\r\n\r\n# get logger\r\ndef get_logger():\r\n logger = logging.getLogger('train')\r\n logger.setLevel(logging.INFO)\r\n handler = logging.StreamHandler()\r\n fmt = \"[%(asctime)s line %(lineno)d] %(message)s\"\r\n handler.setFormatter(logging.Formatter(fmt))\r\n logger.addHandler(handler)\r\n return logger\r\n\r\ndef show(images, labels, preds, prev_labs):\r\n os.system('rm %s/*' % args.sample_dir)\r\n for i_bs in range(images.shape[0]):\r\n for j_bs in range(labels.shape[1]):\r\n path = args.sample_dir + '/' + str(i_bs)+'_'+str(j_bs) + '#'\r\n image = visualize.denorm_image(images[i_bs, j_bs, :])\r\n label = visualize.vis_label(labels[i_bs, j_bs], 1, 128)\r\n cv2.imwrite(path + 'img.jpg', image)\r\n cv2.imwrite(path + 'lab.jpg', label)\r\n\r\n for j_bs in range(preds.shape[1]):\r\n path = args.sample_dir + '/' + str(i_bs)+'_'+str(j_bs) + '#'\r\n pred = preds[i_bs,j_bs]*255\r\n cv2.imwrite(path + 'pred.jpg', pred)\r\n\r\n for j_bs in range(prev_labs.shape[1]):\r\n path = args.sample_dir + '/' + str(i_bs)+'_'+str(j_bs+2) + '#'\r\n prev_lab = prev_labs[i_bs,j_bs]*255\r\n cv2.imwrite(path + 'parev_lab.jpg', prev_lab)\r\n\r\ndef main():\r\n global args, logger, writer\r\n args = get_parser().parse_args()\r\n logger_train = get_logger()\r\n random.seed(20170624)\r\n logger_train.info((args))\r\n\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\r\n utils.mkdir(args.snapshot_dir, args.sample_dir)\r\n\r\n # setting up model\r\n model = AGSSVOS(init_atn=args.init_atn, freeze=args.freeze).cuda()\r\n model = torch.nn.DataParallel(model).cuda()\r\n model.train()\r\n\r\n for m in model.module.Encoder.modules():\r\n if isinstance(m, nn.BatchNorm2d):\r\n m.eval()\r\n if args.set_bn_no_update:\r\n for p in m.parameters():\r\n p.requires_grad = False\r\n\r\n if args.restore != None:\r\n assert os.path.isfile(args.restore), \"no restore file found at %s\" % (args.restore)\r\n logger_train.info(\"loading from %s\" % (args.restore))\r\n\r\n state = model.state_dict()\r\n checkpoint = torch.load(args.restore)\r\n if args.finetune:\r\n checkpoint = checkpoint['seg']\r\n checkpoint = {k: v for k, v in checkpoint.items() if k in state}\r\n state.update(checkpoint)\r\n model.load_state_dict(state)\r\n\r\n del checkpoint\r\n torch.cuda.empty_cache()\r\n\r\n if args.finetune:\r\n flow_infer = Inference_flow(args, train_flow=True, resume=args.restore)\r\n else:\r\n flow_infer = Inference_flow(args, train_flow=True)\r\n\r\n params = []\r\n scale_lr = []\r\n assert args.lr_atn != args.lr_after_atn\r\n for key, value in dict(model.module.named_parameters()).items():\r\n if args.lr_atn and ('atn' in key or 'pred2' in key or 'RF2' in key) and not args.finetune:\r\n flag = True\r\n elif args.lr_after_atn and ('atn' in key or 'pred2' in key or 'RF2' in key) and not args.finetune:\r\n flag = True\r\n else:\r\n flag = False\r\n if value.requires_grad:\r\n if flag:\r\n scale_lr.append(True)\r\n print('lrx10', key)\r\n else:\r\n scale_lr.append(False)\r\n params += [{'params':[value],'lr':args.lr*10 if flag else args.lr , 'weight_decay': 4e-5}]\r\n optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=4e-5)\r\n spec_vid = None\r\n spec_obj_ind = None\r\n\r\n trainloader = data.DataLoader(\r\n Trainset(root_data=args.root_data, json_meta_list=args.meta_list,\r\n sample_size=args.sample_size, test_mode=False, spec_vid=spec_vid, spec_obj_ind=spec_obj_ind,\r\n step=1, fix_size=False, half_size=False, random_ref=args.random_ref, random_skip=args.random_skip),\r\n batch_size=args.batch_size, shuffle=True, num_workers=1, pin_memory=True)\r\n\r\n # training\r\n tot_iter = len(trainloader)\r\n logger_train.info(\"Total iteration per epoch is %d\" % (tot_iter))\r\n tot_time = []\r\n loss_set = []\r\n iou_set = []\r\n optimizer.zero_grad()\r\n\r\n for epoch in range(args.start_epoch, args.epoch):\r\n for i_iter, batch in enumerate(trainloader):\r\n start_time = timeit.default_timer()\r\n\r\n img, lab, ori_img = batch\r\n\r\n img = img[0].cuda().float()\r\n lab = lab[0].cuda().float()\r\n ori_img = ori_img.numpy()\r\n # img KT3HW, lab KTHW, ori_img, KTHW3#\r\n\r\n ### It may be better to move this augmentation into the dataset preprocessing ##\r\n if random.uniform(0,1)>0.5 and args.random_crop:\r\n ### random resize ###\r\n coord = [1e4,1e4,0,0]\r\n lab_agno = lab.sum(0)\r\n val_cnt = 0\r\n for i in range(lab_agno.shape[0]):\r\n idx = torch.nonzero(lab_agno[i]>0)\r\n if idx.shape[0] == 0:\r\n continue\r\n val_cnt += 1\r\n h0 = idx[:,0].min().item()\r\n w0 = idx[:,1].min().item()\r\n h1 = idx[:,0].max().item()\r\n w1 = idx[:,1].max().item()\r\n coord[0] = min(coord[0], h0)\r\n coord[1] = min(coord[1], w0)\r\n coord[2] = max(coord[2], h1)\r\n coord[3] = max(coord[3], w1)\r\n if val_cnt < 2:\r\n logger_train.info(('The number of frames that have label is less than 2, continue..'))\r\n continue\r\n ori_shape = lab.shape[-2:]\r\n rand_coord = [0]*4\r\n\r\n if random.uniform(0,1) > 0.3:\r\n scale = random.uniform(0,1)\r\n else:\r\n scale = 1\r\n rand_coord[0] = coord[0] * scale\r\n rand_coord[1] = coord[1] * scale\r\n rand_coord[2] = (ori_shape[0]-coord[2]-1)*(1-scale)+coord[2]+1\r\n rand_coord[3] = (ori_shape[1]-coord[3]-1)*(1-scale)+coord[3]+1\r\n for j in range(4):\r\n rand_coord[j] = int(rand_coord[j])\r\n\r\n old_img = img.clone()\r\n old_lab = lab.clone()\r\n ori_img = torch.FloatTensor(ori_img).cuda().transpose(-1,-2).transpose(-2,-3)\r\n old_ori_img = ori_img.clone()\r\n\r\n old_lab = old_lab[:,:,rand_coord[0]:rand_coord[2]+1,rand_coord[1]:rand_coord[3]+1]\r\n lab = F.upsample(old_lab, ori_shape, mode='bilinear', align_corners=True)\r\n lab = (lab>0.5).float()\r\n for i in range(img.shape[0]):\r\n img_obj = old_img[i,:,:,rand_coord[0]:rand_coord[2]+1,rand_coord[1]:rand_coord[3]+1]\r\n img[i] = F.upsample(img_obj, ori_shape, mode='bilinear', align_corners=True)\r\n img_obj = old_ori_img[0,:,:,rand_coord[0]:rand_coord[2]+1,rand_coord[1]:rand_coord[3]+1]\r\n ori_img[0] = F.upsample(img_obj, ori_shape, mode='bilinear', align_corners=True)\r\n ori_img = ori_img.transpose(-2,-3).transpose(-1,-2).cpu().numpy().astype(np.uint8)\r\n\r\n ### end of random resize ###\r\n\r\n if lab.shape[1] == 1:\r\n logger_train.info('lab.shape[1](vid_len) == 1, continue..')\r\n continue\r\n\r\n lr = utils.lr_poly(args.lr, i_iter, tot_iter, epoch, args.epoch)\r\n utils.adjust_optim_all(optimizer, lr, scale_lr)\r\n preds = []\r\n prev_labs = []\r\n preds.append(lab[:,0:1].contiguous())\r\n preds.append(lab[:,1:2].contiguous())\r\n merge_preds_ref = lab[:,0:1].contiguous().sum(0)\r\n for i in range(2, img.shape[1], 1):\r\n ms = model.forward(img[:,0], merge_preds_ref)\r\n flow = flow_infer.infer(ori_img[0,i], ori_img[0,i-1])\r\n prev_lab = utils.flow_warp_tensor(preds[i-1], flow)\r\n\r\n prev_labs.append(prev_lab.detach())\r\n merge_preds = prev_lab.max(0)[0]\r\n\r\n output, _ = model.forward(img[:,i], merge_preds, prev_lab.squeeze(1), ref=ms)\r\n\r\n cur_lab = lab[:,i].contiguous()\r\n\r\n if args.loss_iou_maxmin:\r\n cur_loss = utils.loss_calc_iou(output, cur_lab.unsqueeze(1), unify=False, optim_hard=False,\r\n square=False) # try this\r\n else:\r\n cur_loss = utils.loss_calc_iou_v2(output, cur_lab.unsqueeze(1), unify=False, optim_hard=False,\r\n square=False) # try this\r\n\r\n loss_set.append(cur_loss.item())\r\n\r\n iou = utils.calc_iou(output.data, cur_lab.long(), merge=False)\r\n iou_set.append(np.mean(iou))\r\n\r\n optimizer.zero_grad()\r\n cur_loss.backward()\r\n optimizer.step()\r\n\r\n if args.iou_thr_per_obj:\r\n output = output.detach()\r\n new_output = torch.zeros_like(output).cuda().float()\r\n for j in range(new_output.shape[0]):\r\n if iou[j] > 0.5:\r\n new_output[j] = output[j]\r\n else:\r\n new_output[j] = lab[j:j+1,i]\r\n new_output = new_output.contiguous()\r\n preds.append(new_output.detach())\r\n else:\r\n if np.mean(iou) > 0.5:\r\n preds.append(output.detach())\r\n else:\r\n preds.append(cur_lab.unsqueeze(1).detach())\r\n\r\n end_time = timeit.default_timer()\r\n tot_time.append(end_time - start_time)\r\n\r\n if i_iter % 200 == 0:\r\n logger_train.info('show at %s' % args.sample_dir)\r\n try:\r\n preds = torch.cat(preds, dim=1)\r\n prev_labs = torch.cat(prev_labs, dim=1)\r\n except Exception as e:\r\n print(e)\r\n print('Ignore.. Continue..')\r\n continue\r\n if args.show_img:\r\n show(img.data.cpu().numpy(), lab.data.cpu().numpy(), preds.data.cpu().numpy().astype(np.float),\r\n prev_labs.data.cpu().numpy().astype(np.float32))\r\n\r\n if i_iter % 20 == 0:\r\n run_time = np.mean(tot_time)\r\n rem_time = utils.calc_remain_time(run_time, i_iter, tot_iter, epoch, args.epoch)\r\n logger_train.info('iter = %d of %d in epoch = %d of %d, remain_time = %s' %\r\n (i_iter, tot_iter, epoch, args.epoch, rem_time))\r\n tot_time = []\r\n logger_train.info('lr = %f, loss = %f, iou = %f' % (lr, np.mean(loss_set), np.mean(iou_set)))\r\n loss_set = []\r\n iou_set = []\r\n\r\n if epoch % (args.epoch//5) == 0 or epoch == args.epoch - 1:\r\n path = os.path.join(args.snapshot_dir, 'model_' + str(epoch) + '.pth')\r\n logger_train.info('save model at %s' % path)\r\n torch.save({'seg':model.state_dict(), 'flow':flow_infer.model.state_dict()}, path)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n\r\n"
] |
[
[
"torch.nonzero",
"torch.cat",
"torch.nn.functional.upsample",
"torch.FloatTensor",
"torch.optim.Adam",
"numpy.mean",
"torch.cuda.empty_cache",
"torch.load",
"torch.zeros_like",
"torch.nn.DataParallel"
]
] |
LSN1220/torch_example
|
[
"b1230c8f702487225566b5be13947bd6f7904556"
] |
[
"mtcnn-pytorch/mtcnn/core/utils.py"
] |
[
"import numpy as np\nimport time\n\ndef IoU(box, boxes):\n \"\"\"Compute IoU between detect box and gt boxes\n\n Parameters:\n ----------\n box: numpy array , shape (5, ): x1, y1, x2, y2, score\n input box\n boxes: numpy array, shape (n, 4): x1, y1, x2, y2\n input ground truth boxes\n\n Returns:\n -------\n ovr: numpy.array, shape (n, )\n IoU\n \"\"\"\n box_area = (box[2] - box[0] + 1) * (box[3] - box[1] + 1)\n area = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1)\n xx1 = np.maximum(box[0], boxes[:, 0])\n yy1 = np.maximum(box[1], boxes[:, 1])\n xx2 = np.minimum(box[2], boxes[:, 2])\n yy2 = np.minimum(box[3], boxes[:, 3])\n\n # compute the width and height of the bounding box\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n\n inter = w * h\n ovr = np.true_divide(inter,(box_area + area - inter))\n #ovr = inter / (box_area + area - inter)\n return ovr\n\n\ndef convert_to_square(bbox):\n \"\"\"Convert bbox to square\n\n Parameters:\n ----------\n bbox: numpy array , shape n x 5\n input bbox\n\n Returns:\n -------\n square bbox\n \"\"\"\n square_bbox = bbox.copy()\n\n h = bbox[:, 3] - bbox[:, 1] + 1\n w = bbox[:, 2] - bbox[:, 0] + 1\n max_side = np.maximum(h,w)\n square_bbox[:, 0] = bbox[:, 0] + w*0.5 - max_side*0.5\n square_bbox[:, 1] = bbox[:, 1] + h*0.5 - max_side*0.5\n square_bbox[:, 2] = square_bbox[:, 0] + max_side - 1\n square_bbox[:, 3] = square_bbox[:, 1] + max_side - 1\n return square_bbox\n\n# non-maximum suppression: eleminates the box which have large interception with the box which have the largest score\ndef nms(dets, thresh, mode=\"Union\"):\n \"\"\"\n greedily select boxes with high confidence\n keep boxes overlap <= thresh\n rule out overlap > thresh\n :param dets: [[x1, y1, x2, y2 score]]\n :param thresh: retain overlap <= thresh\n :return: indexes to keep\n \"\"\"\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4]\n\n # shape of x1 = (454,), shape of scores = (454,)\n # print(\"shape of x1 = {0}, shape of scores = {1}\".format(x1.shape, scores.shape))\n # time.sleep(5)\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1] # argsort: ascending order then [::-1] reverse the order --> descending order\n # print(\"shape of order {0}\".format(order.size)) # (454,)\n # time.sleep(5)\n\n # eleminates the box which have large interception with the box which have the largest score in order\n # matain the box with largest score and boxes don't have large interception with it\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n xx1 = np.maximum(x1[i], x1[order[1:]])\n yy1 = np.maximum(y1[i], y1[order[1:]])\n xx2 = np.minimum(x2[i], x2[order[1:]])\n yy2 = np.minimum(y2[i], y2[order[1:]])\n\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n\n # cacaulate the IOU between box which have largest score with other boxes\n if mode == \"Union\":\n # area[i]: the area of largest score\n ovr = inter / (areas[i] + areas[order[1:]] - inter)\n elif mode == \"Minimum\":\n ovr = inter / np.minimum(areas[i], areas[order[1:]])\n\n inds = np.where(ovr <= thresh)[0]\n order = order[inds + 1] # +1: eliminates the first element in order\n # print(inds)\n # print(\"shape of order {0}\".format(order.shape)) # (454,)\n # time.sleep(2)\n\n return keep\n\n\n\n\n"
] |
[
[
"numpy.minimum",
"numpy.where",
"numpy.true_divide",
"numpy.maximum"
]
] |
dpaiton/DeepSparseCoding
|
[
"5ea01fa8770794df5e13743aa3f2d85297c27eb1"
] |
[
"tf1x/modules/sae_module.py"
] |
[
"import tensorflow as tf\n\nfrom DeepSparseCoding.tf1x.utils.trainable_variable_dict import TrainableVariableDict\nimport DeepSparseCoding.tf1x.utils.entropy_functions as ef\nfrom DeepSparseCoding.tf1x.modules.ae_module import AeModule\nfrom DeepSparseCoding.tf1x.modules.activations import sigmoid\n\nclass SaeModule(AeModule):\n def __init__(self, data_tensor, layer_types, enc_channels, dec_channels, patch_size,\n conv_strides, sparse_mult, w_decay_mult, w_norm_mult, target_act, act_funcs, dropout,\n tie_dec_weights, w_init_type, variable_scope=\"sae\"):\n \"\"\"\n Implementation of sparse autoencoder described in Andrew Ng's 2011 Stanford CS294A lecture notes\n Sigmoidal activation function\n Untied encoding & decoding weights\n Linear reconstructions - input images do not have 0-1 range\n Inputs:\n data_tensor\n enc_channels [list of ints] the number of output channels per encoder layer\n Last entry is the number of latent units\n dec_channels [list of ints] the number of output channels per decoder layer\n Last entry must be the number of input pixels for FC layers and channels for CONV layers\n w_decay_mult - weight decay multiplier\n w_norm_mult: tradeoff multiplier for weight norm loss (asks weight norm to == 1)\n act_funcs - activation functions\n dropout: specifies the keep probability or None\n conv: if True, do convolution\n conv_strides: list of strides for convolution [batch, y, x, channels]\n patch_size: number of (y, x) inputs for convolutional patches\n w_init_type: if True, l2 normalize w_init,\n reducing over [0] axis on enc and [-1] axis on dec\n variable_scope - specifies the variable_scope for the module\n Outputs:\n dictionary\n \"\"\"\n self.sparse_mult = sparse_mult\n self.target_act = target_act\n super(SaeModule, self).__init__(data_tensor, layer_types, enc_channels, dec_channels,\n patch_size, conv_strides, w_decay_mult, w_norm_mult, act_funcs, dropout, tie_dec_weights,\n w_init_type, variable_scope)\n\n def compute_sparse_loss(self, a_in):\n with tf.compat.v1.variable_scope(\"unsupervised\"):\n reduc_dims = tuple(range(len(a_in.get_shape().as_list()) - 1))\n avg_act = tf.reduce_mean(input_tensor=a_in, axis=reduc_dims, name=\"batch_avg_activity\")\n p_dist = self.target_act * tf.subtract(ef.safe_log(self.target_act),\n ef.safe_log(avg_act), name=\"kl_p\")\n q_dist = (1-self.target_act) * tf.subtract(ef.safe_log(1-self.target_act),\n ef.safe_log(1-avg_act), name=\"kl_q\")\n kl_divergence = tf.reduce_sum(input_tensor=tf.add(p_dist, q_dist), name=\"kld\")\n sparse_loss = tf.multiply(self.sparse_mult, kl_divergence, name=\"sparse_loss\")\n return sparse_loss\n\n def compute_total_loss(self):\n with tf.compat.v1.variable_scope(\"loss\") as scope:\n self.loss_dict = {\"recon_loss\":self.compute_recon_loss(self.reconstruction),\n \"sparse_loss\":self.compute_sparse_loss(self.a),\n \"weight_decay_loss\":self.compute_weight_decay_loss(),\n \"weight_norm_loss\":self.compute_weight_norm_loss()}\n self.total_loss = tf.add_n([loss for loss in self.loss_dict.values()], name=\"total_loss\")\n"
] |
[
[
"tensorflow.add",
"tensorflow.multiply",
"tensorflow.reduce_mean",
"tensorflow.compat.v1.variable_scope"
]
] |
Austaon/GroupRecommendationThesis
|
[
"4a4b8c2f230d7db3b6b36342ab06b02f146462ae"
] |
[
"Experiment Processing/experiment2/survey_fatigue.py"
] |
[
"import statistics\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import pyplot\n\nfrom database.session import Session\n\n\ndef survey_fatigue():\n \"\"\"\n Analysis to check if survey fatigue was present in the survey.\n The survey key to consider can be changed in the code below.\n\n Calculates the average survey rating for each playlist (before being put in the \"correct\" position),\n if survey fatigue did take place, the ratings should go down over time.\n :return:\n \"\"\"\n\n key = \"like_rating\"\n # key = \"selection_rating\"\n # key = \"suitable_rating\"\n\n # key = \"like_rating_specific\"\n # key = \"selection_rating_specific\"\n # key = \"suitable_rating_specific\"\n\n specific_ratings = {\n \"playlist1\": {\n \"Probability Weighted Sum\": [],\n \"Fairness\": [],\n \"Least Misery\": []\n },\n \"playlist2\": {\n \"Probability Weighted Sum\": [],\n \"Fairness\": [],\n \"Least Misery\": []\n },\n \"playlist3\": {\n \"Probability Weighted Sum\": [],\n \"Fairness\": [],\n \"Least Misery\": []\n }\n }\n\n overall_ratings = {\n \"playlist1\": [],\n \"playlist2\": [],\n \"playlist3\": []\n }\n\n for user, session in Session.get_users_with_surveys():\n survey = user.survey\n\n for playlist_string in [f\"playlist{i}\" for i in range(1, 4)]:\n voting_rule_name = survey[\"metaData\"][playlist_string][\"rule_name\"][\"ruleName\"]\n\n if \"specific\" in key:\n specific_ratings[playlist_string][voting_rule_name].extend(\n [int(x) for _, x in survey[f\"{playlist_string}_{key}\"].items()]\n )\n overall_ratings[playlist_string].extend(\n [int(x) for _, x in survey[f\"{playlist_string}_{key}\"].items()]\n )\n else:\n specific_ratings[playlist_string][voting_rule_name].append(\n int(survey[f\"{playlist_string}_{key}\"])\n )\n overall_ratings[playlist_string].append(\n int(survey[f\"{playlist_string}_{key}\"])\n )\n\n boxplot_data = [overall_ratings[\"playlist1\"], overall_ratings[\"playlist2\"], overall_ratings[\"playlist3\"]]\n boxplot_labels = [\"Playlist 1\", \"Playlist 2\", \"Playlist 3\"]\n\n fig1, ax1 = plt.subplots()\n\n pyplot.locator_params(axis='y', nbins=5)\n\n ax1.boxplot(boxplot_data, labels=boxplot_labels,\n boxprops=dict(linestyle='-', linewidth=1.5),\n medianprops=dict(linestyle='-', linewidth=2),\n whiskerprops=dict(linestyle='-', linewidth=1.5),\n capprops=dict(linestyle='-', linewidth=1.5),\n showfliers=True\n )\n\n ax1.set_ylim((0.8, 5.2))\n ax1.set_ylabel(f\"Survey Rating\")\n ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)\n\n fig1.tight_layout()\n plt.show()\n\n result = \"Specific:\\n\"\n overall_result = \"Overall:\\n\"\n\n for playlist in specific_ratings:\n\n playlist_data = specific_ratings[playlist]\n result += f\"{playlist}: \"\n overall_result += f\"{playlist}: {statistics.mean(overall_ratings[playlist]):.2f},\" \\\n f\" {statistics.stdev(overall_ratings[playlist]):.2f}, \"\n\n for voting_rule in playlist_data:\n result += f\"{voting_rule}: {statistics.mean(playlist_data[voting_rule]):.2f},\" \\\n f\" {statistics.stdev(playlist_data[voting_rule]):.2f}\" \\\n f\" (length: {len(playlist_data[voting_rule]): =3d}), \"\n result += \"\\n\"\n overall_result += \"\\n\"\n\n print(result[:-3])\n print(overall_result[:-3])\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.locator_params",
"matplotlib.pyplot.subplots"
]
] |
nextBillyonair/Attention
|
[
"3e2dfecd63abd762633888895f3ba721c903f439"
] |
[
"src/tf_schedulers.py"
] |
[
"import torch\nimport torch.nn.functional as F\nimport math\n\n\ndef constant_annealing(model, iteration, max_iterations, start_tf=1.0):\n model.teacher_forcing = min(1, max(start_tf, 0))\n\ndef linear_annealing(model, iteration, max_iterations, start_tf=1.0):\n value = start_tf * (1 - iteration / max_iterations)\n model.teacher_forcing = min(1, max(value, 0))\n\ndef logarithmic_annealing(model, iteration, max_iterations, start_tf=1.0):\n value = start_tf * math.log(2) / math.log(iteration + 2)\n model.teacher_forcing = min(1, max(value, 0))\n\ndef exponentional_annealing(model, iteration, max_iterations, start_tf=1.0, gamma=0.99):\n value = gamma * model.teacher_forcing\n model.teacher_forcing = min(1, max(value, 0))\n\ndef fast_annealing(model, iteration, max_iterations, start_tf=1.0):\n model.teacher_forcing = min(1, max(start_tf / (iteration + 1), 0))\n\ndef sigmoid_annealing(model, iteration, max_iterations, start_tf=1.0, k=1):\n scale = (iteration / max_iterations) * (12) - 6\n value = start_tf * torch.sigmoid(torch.tensor(-scale)).item()\n model.teacher_forcing = min(1, max(value, 0))\n\ndef cosine_annealing(model, iteration, max_iterations, start_tf=1.0):\n scale = iteration / max_iterations\n value = start_tf * 0.5 * (1 + math.cos(scale * math.pi))\n model.teacher_forcing = min(1, max(value, 0))\n\ndef softplus_annealing(model, iteration, max_iterations, start_tf=1.0):\n max_value = math.log(math.e - 1)\n scale = (iteration / max_iterations) * (max_value + 5) - 5\n value = start_tf * (-F.softplus(torch.tensor(scale)).item() + 1.)\n model.teacher_forcing = min(1, max(value, 0))\n\ndef elu_annealing(model, iteration, max_iterations, start_tf=1.0):\n scale = (iteration / max_iterations) * (5) - 5\n value = start_tf * -F.elu(torch.tensor(scale)).item()\n model.teacher_forcing = min(1, max(value, 0))\n\ndef log_sigmoid_annealing(model, iteration, max_iterations, start_tf=1.0):\n max_value = math.log(math.e - 1)\n scale = (iteration / max_iterations) * (5 + max_value) - max_value\n value = start_tf * -torch.sigmoid(torch.tensor(scale)).log().item()\n model.teacher_forcing = min(1, max(value, 0))\n\ndef tanhshrink_annealing(model, iteration, max_iterations, start_tf=1.0):\n scale = (iteration / max_iterations) * 4 - 2\n value = start_tf * (-F.tanhshrink(torch.tensor(scale)).item() / (2 * 1.0360) + 0.5)\n model.teacher_forcing = min(1, max(value, 0))\n\ndef tanh_annealing(model, iteration, max_iterations, start_tf=1.0):\n scale = (iteration / max_iterations) * 12 - 6\n value = start_tf * (-torch.tanh(torch.tensor(scale)).item() * 0.5 + 0.5)\n model.teacher_forcing = min(1, max(value, 0))\n"
] |
[
[
"torch.tensor"
]
] |
Sundrops/pytorch-faster-rcnn
|
[
"3279e7c20b7996839b30e8287e5eb7e5b1f1f793"
] |
[
"lib/layer_utils/anchor_target_layer.py"
] |
[
"# --------------------------------------------------------\n# Faster R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick and Xinlei Chen\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom model.config import cfg\nimport numpy as np\nimport numpy.random as npr\nfrom utils.bbox import bbox_overlaps\nfrom model.bbox_transform import bbox_transform\nimport torch\n\ndef anchor_target_layer(rpn_cls_score, gt_boxes, im_info, _feat_stride, all_anchors, num_anchors):\n \"\"\"Same as the anchor target layer in original Fast/er RCNN \"\"\"\n A = num_anchors\n total_anchors = all_anchors.shape[0]\n K = total_anchors / num_anchors\n\n # allow boxes to sit over the edge by a small amount\n _allowed_border = 0\n\n # map of shape (..., H, W)\n height, width = rpn_cls_score.shape[1:3]\n\n # only keep anchors inside the image\n inds_inside = np.where(\n (all_anchors[:, 0] >= -_allowed_border) &\n (all_anchors[:, 1] >= -_allowed_border) &\n (all_anchors[:, 2] < im_info[1] + _allowed_border) & # width\n (all_anchors[:, 3] < im_info[0] + _allowed_border) # height\n )[0]\n\n # keep only inside anchors\n anchors = all_anchors[inds_inside, :]\n\n # label: 1 is positive, 0 is negative, -1 is dont care\n labels = np.empty((len(inds_inside),), dtype=np.float32)\n labels.fill(-1)\n\n # overlaps between the anchors and the gt boxes\n # overlaps (ex, gt)\n overlaps = bbox_overlaps(\n np.ascontiguousarray(anchors, dtype=np.float),\n np.ascontiguousarray(gt_boxes, dtype=np.float))\n argmax_overlaps = overlaps.argmax(axis=1)\n max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]\n gt_argmax_overlaps = overlaps.argmax(axis=0)\n gt_max_overlaps = overlaps[gt_argmax_overlaps,\n np.arange(overlaps.shape[1])]\n gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]\n\n if not cfg.TRAIN.RPN_CLOBBER_POSITIVES:\n # assign bg labels first so that positive labels can clobber them\n # first set the negatives\n labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0\n\n # fg label: for each gt, anchor with highest overlap\n labels[gt_argmax_overlaps] = 1\n\n # fg label: above threshold IOU\n labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1\n\n if cfg.TRAIN.RPN_CLOBBER_POSITIVES:\n # assign bg labels last so that negative labels can clobber positives\n labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0\n\n # subsample positive labels if we have too many\n num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE)\n fg_inds = np.where(labels == 1)[0]\n if len(fg_inds) > num_fg:\n disable_inds = npr.choice(\n fg_inds, size=(len(fg_inds) - num_fg), replace=False)\n labels[disable_inds] = -1\n\n # subsample negative labels if we have too many\n num_bg = cfg.TRAIN.RPN_BATCHSIZE - np.sum(labels == 1)\n bg_inds = np.where(labels == 0)[0]\n if len(bg_inds) > num_bg:\n disable_inds = npr.choice(\n bg_inds, size=(len(bg_inds) - num_bg), replace=False)\n labels[disable_inds] = -1\n\n bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)\n bbox_targets = _compute_targets(anchors, gt_boxes[argmax_overlaps, :])\n\n bbox_inside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)\n # only the positive ones have regression targets\n bbox_inside_weights[labels == 1, :] = np.array(cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS)\n\n bbox_outside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)\n if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0:\n # uniform weighting of examples (given non-uniform sampling)\n num_examples = np.sum(labels >= 0)\n positive_weights = np.ones((1, 4)) * 1.0 / num_examples\n negative_weights = np.ones((1, 4)) * 1.0 / num_examples\n else:\n assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) &\n (cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))\n positive_weights = (cfg.TRAIN.RPN_POSITIVE_WEIGHT /\n np.sum(labels == 1))\n negative_weights = ((1.0 - cfg.TRAIN.RPN_POSITIVE_WEIGHT) /\n np.sum(labels == 0))\n bbox_outside_weights[labels == 1, :] = positive_weights\n bbox_outside_weights[labels == 0, :] = negative_weights\n\n # map up to original set of anchors\n labels = _unmap(labels, total_anchors, inds_inside, fill=-1)\n bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)\n bbox_inside_weights = _unmap(bbox_inside_weights, total_anchors, inds_inside, fill=0)\n bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside, fill=0)\n\n # labels\n labels = labels.reshape((1, height, width, A)).transpose(0, 3, 1, 2)\n labels = labels.reshape((1, 1, A * height, width))\n rpn_labels = labels\n\n # bbox_targets\n bbox_targets = bbox_targets \\\n .reshape((1, height, width, A * 4))\n\n rpn_bbox_targets = bbox_targets\n # bbox_inside_weights\n bbox_inside_weights = bbox_inside_weights \\\n .reshape((1, height, width, A * 4))\n\n rpn_bbox_inside_weights = bbox_inside_weights\n\n # bbox_outside_weights\n bbox_outside_weights = bbox_outside_weights \\\n .reshape((1, height, width, A * 4))\n\n rpn_bbox_outside_weights = bbox_outside_weights\n return rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights\n\n\ndef _unmap(data, count, inds, fill=0):\n \"\"\" Unmap a subset of item (data) back to the original set of items (of\n size count) \"\"\"\n if len(data.shape) == 1:\n ret = np.empty((count,), dtype=np.float32)\n ret.fill(fill)\n ret[inds] = data\n else:\n ret = np.empty((count,) + data.shape[1:], dtype=np.float32)\n ret.fill(fill)\n ret[inds, :] = data\n return ret\n\n\ndef _compute_targets(ex_rois, gt_rois):\n \"\"\"Compute bounding-box regression targets for an image.\"\"\"\n\n assert ex_rois.shape[0] == gt_rois.shape[0]\n assert ex_rois.shape[1] == 4\n if cfg.SUB_CATEGORY:\n assert gt_rois.shape[1] == 6\n else:\n assert gt_rois.shape[1] == 5\n\n return bbox_transform(torch.from_numpy(ex_rois), torch.from_numpy(gt_rois[:, :4])).numpy()"
] |
[
[
"numpy.array",
"numpy.empty",
"numpy.ascontiguousarray",
"numpy.sum",
"numpy.ones",
"torch.from_numpy",
"numpy.where",
"numpy.arange"
]
] |
MaryumSayeed/SYD-PYpline
|
[
"cfdd5bb2880cc4d2ab90af048ae6cc5994862076"
] |
[
"pysyd/plots.py"
] |
[
"import os\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.convolution import convolve, Box1DKernel\n\nfrom pysyd import functions\nfrom pysyd import models\nfrom pysyd import utils\n\n\ndef set_plot_params():\n \"\"\"\n Sets the matplotlib parameters.\n\n Returns\n -------\n None\n\n \"\"\"\n\n plt.style.use('dark_background')\n plt.rcParams.update({\n 'agg.path.chunksize': 10000,\n 'mathtext.fontset': 'stix',\n 'figure.autolayout': True,\n 'lines.linewidth': 1,\n 'axes.titlesize': 18.0,\n 'axes.labelsize': 16.0,\n 'axes.linewidth': 1.25,\n 'axes.formatter.useoffset': False,\n 'xtick.major.size': 10.0,\n 'xtick.minor.size': 5.0,\n 'xtick.major.width': 1.25,\n 'xtick.minor.width': 1.25,\n 'xtick.direction': 'inout',\n 'ytick.major.size': 10.0,\n 'ytick.minor.size': 5.0,\n 'ytick.major.width': 1.25,\n 'ytick.minor.width': 1.25,\n 'ytick.direction': 'inout',\n })\n\n\n\ndef plot_excess(star, notebook=False):\n \"\"\"\n Creates a plot summarizing the results of the find excess routine.\n\n Parameters\n ----------\n star : target.Target\n the pySYD pipeline object\n notebook : bool\n if running script out of Jupyter notebook (for output plots)\n\n Returns\n -------\n None\n \n \"\"\"\n\n fig = plt.figure(\"Find excess results for %s\"%star.name, figsize=(12,8))\n\n # Time series data\n ax1 = plt.subplot(2, 3, 1)\n if star.lc:\n ax1.plot(star.time, star.flux, 'w-')\n ax1.set_xlim([min(star.time), max(star.time)])\n ax1.set_title(r'$\\rm Time \\,\\, series$')\n ax1.set_xlabel(r'$\\rm Time \\,\\, [days]$')\n ax1.set_ylabel(r'$\\rm Flux$')\n\n # log-log power spectrum with crude background fit\n ax2 = plt.subplot(2, 3, 2)\n ax2.loglog(star.freq, star.pow, 'w-')\n ax2.set_xlim([min(star.freq), max(star.freq)])\n ax2.set_ylim([min(star.pow), max(star.pow)*1.25])\n ax2.set_title(r'$\\rm Crude \\,\\, background \\,\\, fit$')\n ax2.set_xlabel(r'$\\rm Frequency \\,\\, [\\mu Hz]$')\n ax2.set_ylabel(r'$\\rm Power \\,\\, [ppm^{2} \\mu Hz^{-1}]$')\n if star.findex['binning'] is not None:\n ax2.loglog(star.bin_freq, star.bin_pow, 'r-')\n ax2.loglog(star.freq, star.interp_pow, color='lime', linestyle='-', lw=2.0)\n\n # Crude background-corrected power spectrum\n ax3 = plt.subplot(2, 3, 3)\n ax3.plot(star.freq, star.bgcorr_pow, 'w-')\n ax3.set_xlim([min(star.freq), max(star.freq)])\n ax3.set_ylim([0.0, max(star.bgcorr_pow)*1.25])\n ax3.set_title(r'$\\rm Background \\,\\, corrected \\,\\, PS$')\n ax3.set_xlabel(r'$\\rm Frequency \\,\\, [\\mu Hz]$')\n ax3.set_ylabel(r'$\\rm Power \\,\\, [ppm^{2} \\mu Hz^{-1}]$')\n\n # ACF trials to determine numax\n for i in range(star.findex['n_trials']):\n ax = plt.subplot(2, 3, 4+i)\n ax.plot(star.findex['results'][star.name][i+1]['x'], star.findex['results'][star.name][i+1]['y'], 'w-')\n xran = max(star.findex['results'][star.name][i+1]['fitx'])-min(star.findex['results'][star.name][i+1]['fitx'])\n ymax = star.findex['results'][star.name][i+1]['maxy']\n ax.axvline(star.findex['results'][star.name][i+1]['maxx'], linestyle='dotted', color='r', linewidth=0.75)\n ax.set_title(r'$\\rm Collapsed \\,\\, ACF \\,\\, [trial \\,\\, %d]$' % (i+1))\n ax.set_xlabel(r'$\\rm Frequency \\,\\, [\\mu Hz]$')\n ax.set_ylabel(r'$\\rm Arbitrary \\,\\, units$')\n if star.findex['results'][star.name][i+1]['good_fit']:\n ax.plot(star.findex['results'][star.name][i+1]['fitx'], star.findex['results'][star.name][i+1]['fity'], color='lime', linestyle='-', linewidth=1.5)\n if max(star.findex['results'][star.name][i+1]['fity']) > star.findex['results'][star.name][i+1]['maxy']:\n ymax = max(star.findex['results'][star.name][i+1]['fity'])\n ax.axvline(star.findex['results'][star.name][i+1]['numax'], color='lime', linestyle='--', linewidth=0.75)\n yran = np.absolute(ymax)\n ax.set_xlim([min(star.findex['results'][star.name][i+1]['x']), max(star.findex['results'][star.name][i+1]['x'])])\n ax.set_ylim([-0.05, ymax+0.15*yran])\n ax.annotate(r'$\\rm SNR = %3.2f$' % star.findex['results'][star.name][i+1]['snr'], xy=(min(star.findex['results'][star.name][i+1]['fitx'])+0.05*xran, ymax+0.025*yran), fontsize=18)\n\n plt.tight_layout()\n if star.params['save']:\n plt.savefig(os.path.join(star.params[star.name]['path'],'excess.png'), dpi=300)\n if notebook:\n with open('excess.pickle','wb') as f:\n pickle.dump(fig, f)\n if not star.params['show']:\n plt.close()\n\n\ndef plot_background(star, n_peaks=10, notebook=False):\n \"\"\"\n Creates a plot summarizing the results of the fit background routine.\n\n Parameters\n ----------\n star : target.Target\n the main pipeline Target class object\n n_peaks : int\n the number of peaks to highlight in the zoomed-in power spectrum\n notebook : bool\n if running script out of Jupyter notebook (for output plots)\n\n Results\n -------\n None\n\n \"\"\"\n\n exp_numax=star.fitbg['results'][star.name]['numax_gauss'][0]\n exp_dnu=0.22*(exp_numax**0.797)\n obs_dnu=star.fitbg['results'][star.name]['dnu'][0]\n\n fig = plt.figure(\"Global fit for %s\"%star.name, figsize=(12, 12))\n # Time series data\n ax1 = fig.add_subplot(3, 3, 1)\n if star.lc:\n ax1.plot(star.time, star.flux, 'w-')\n ax1.set_xlim([min(star.time), max(star.time)])\n ax1.set_title(r'$\\rm Time \\,\\, series$')\n ax1.set_xlabel(r'$\\rm Time \\,\\, [days]$')\n ax1.set_ylabel(r'$\\rm Flux$')\n\n # Initial background guesses\n ax2 = fig.add_subplot(3, 3, 2)\n ax2.plot(star.frequency, star.random_pow, c='lightgrey', zorder=0, alpha=0.5)\n ax2.plot(star.frequency[star.frequency < star.params[star.name]['ps_mask'][0]], star.random_pow[star.frequency < star.params[star.name]['ps_mask'][0]], 'w-', zorder=1)\n ax2.plot(star.frequency[star.frequency > star.params[star.name]['ps_mask'][1]], star.random_pow[star.frequency > star.params[star.name]['ps_mask'][1]], 'w-', zorder=1)\n ax2.plot(star.frequency[star.frequency < star.params[star.name]['ps_mask'][0]], star.smooth_pow[star.frequency < star.params[star.name]['ps_mask'][0]], 'r-', linewidth=0.75, zorder=2)\n ax2.plot(star.frequency[star.frequency > star.params[star.name]['ps_mask'][1]], star.smooth_pow[star.frequency > star.params[star.name]['ps_mask'][1]], 'r-', linewidth=0.75, zorder=2)\n total = np.zeros_like(star.frequency)\n for r in range(star.nlaws_orig):\n model = models.harvey(star.frequency, [star.b_orig[r], star.a_orig[r], star.noise])\n ax2.plot(star.frequency, model, color='blue', linestyle=':', linewidth=1.5, zorder=4)\n total += model\n total += star.noise\n ax2.plot(star.frequency, total, color='blue', linewidth=2., zorder=5)\n ax2.errorbar(star.bin_freq, star.bin_pow, yerr=star.bin_err, color='lime', markersize=0., fillstyle='none', ls='None', marker='D', capsize=3, ecolor='lime', elinewidth=1, capthick=2, zorder=3)\n ax2.axvline(star.params[star.name]['ps_mask'][0], color='darkorange', linestyle='dashed', linewidth=2.0, zorder=1, dashes=(5,5))\n ax2.axvline(star.params[star.name]['ps_mask'][1], color='darkorange', linestyle='dashed', linewidth=2.0, zorder=1, dashes=(5,5))\n ax2.axhline(star.noise, color='blue', linestyle='dashed', linewidth=1.5, zorder=3, dashes=(5, 5))\n ax2.set_xlim([min(star.frequency), max(star.frequency)])\n ax2.set_ylim([min(star.power), max(star.power)*1.25])\n ax2.set_title(r'$\\rm Initial \\,\\, guesses$')\n ax2.set_xlabel(r'$\\rm Frequency \\,\\, [\\mu Hz]$')\n ax2.set_ylabel(r'$\\rm Power \\,\\, [ppm^{2} \\mu Hz^{-1}]$')\n ax2.set_xscale('log')\n ax2.set_yscale('log')\n\n lower = (min(star.frequency)+exp_numax)/2.\n upper = (max(star.frequency)+exp_numax)/2.\n mask = (star.frequency >= lower)&(star.frequency <= upper)\n # Fitted background\n ax3 = fig.add_subplot(3, 3, 3)\n ax3.plot(star.frequency, star.random_pow, c='lightgrey', zorder=0, alpha=0.5)\n ax3.plot(star.frequency[star.frequency < star.params[star.name]['ps_mask'][0]], star.random_pow[star.frequency < star.params[star.name]['ps_mask'][0]], 'w-', linewidth=0.75, zorder=1)\n ax3.plot(star.frequency[star.frequency > star.params[star.name]['ps_mask'][1]], star.random_pow[star.frequency > star.params[star.name]['ps_mask'][1]], 'w-', linewidth=0.75, zorder=1)\n ax3.plot(star.frequency[star.frequency < star.params[star.name]['ps_mask'][0]], star.smooth_pow[star.frequency < star.params[star.name]['ps_mask'][0]], 'r-', linewidth=0.75, zorder=2)\n ax3.plot(star.frequency[star.frequency > star.params[star.name]['ps_mask'][1]], star.smooth_pow[star.frequency > star.params[star.name]['ps_mask'][1]], 'r-', linewidth=0.75, zorder=2)\n if star.nlaws != 0:\n for r in range(star.nlaws):\n ax3.plot(star.frequency, models.harvey(star.frequency, [star.pars[2*r], star.pars[2*r+1], star.pars[-1]]), color='blue', linestyle=':', linewidth=1.5, zorder=4)\n ax3.plot(star.frequency, models.harvey(star.frequency, star.pars, total=True), color='blue', linewidth=2.0, zorder=5)\n ax3.errorbar(star.bin_freq, star.bin_pow, yerr=star.bin_err, color='lime', markersize=0.0, fillstyle='none', ls='None', marker='D', capsize=3, ecolor='lime', elinewidth=1, capthick=2, zorder=3)\n ax3.axvline(star.params[star.name]['ps_mask'][0], color='darkorange', linestyle='dashed', linewidth=2.0, zorder=1, dashes=(5,5))\n ax3.axvline(star.params[star.name]['ps_mask'][1], color='darkorange', linestyle='dashed', linewidth=2.0, zorder=1, dashes=(5,5))\n ax3.axhline(star.pars[-1], color='blue', linestyle='dashed', linewidth=1.5, zorder=3, dashes=(5, 5))\n ax3.plot(star.frequency[mask], star.pssm[mask], color='yellow', linewidth=2.0, linestyle='dashed', zorder=6)\n ax3.set_xlim([min(star.frequency), max(star.frequency)])\n ax3.set_ylim([min(star.power), max(star.power)*1.25])\n ax3.set_title(r'$\\rm Fitted \\,\\, model$')\n ax3.set_xlabel(r'$\\rm Frequency \\,\\, [\\mu Hz]$')\n ax3.set_ylabel(r'$\\rm Power \\,\\, [ppm^{2} \\mu Hz^{-1}]$')\n ax3.set_xscale('log')\n ax3.set_yscale('log')\n\n # Smoothed power excess w/ gaussian\n ax4 = fig.add_subplot(3, 3, 4)\n ax4.plot(star.region_freq, star.region_pow, 'w-', zorder=0)\n idx = functions.return_max(star.region_freq, star.region_pow, index=True)\n ax4.plot([star.region_freq[idx]], [star.region_pow[idx]], color='red', marker='s', markersize=7.5, zorder=0)\n ax4.axvline([star.region_freq[idx]], color='white', linestyle='--', linewidth=1.5, zorder=0)\n ax4.plot(star.new_freq, star.numax_fit, 'b-', zorder=3)\n ax4.axvline(exp_numax, color='blue', linestyle=':', linewidth=1.5, zorder=2)\n ax4.plot([exp_numax], [max(star.numax_fit)], color='b', marker='D', markersize=7.5, zorder=1)\n ax4.set_title(r'$\\rm Smoothed \\,\\, bg$-$\\rm corrected \\,\\, PS$')\n ax4.set_xlabel(r'$\\rm Frequency \\,\\, [\\mu Hz]$')\n ax4.set_xlim([min(star.region_freq), max(star.region_freq)])\n\n # Background-corrected power spectrum with n highest peaks\n mask = np.ma.getmask(np.ma.masked_inside(star.frequency, star.params[star.name]['ps_mask'][0], star.params[star.name]['ps_mask'][1]))\n star.freq = star.frequency[mask]\n star.psd = star.bg_corr_smooth[mask]\n peaks_f, peaks_p = functions.max_elements(star.freq, star.psd, n_peaks)\n ax5 = fig.add_subplot(3, 3, 5)\n ax5.plot(star.freq, star.psd, 'w-', zorder=0, linewidth=1.0)\n ax5.scatter(peaks_f, peaks_p, s=25.0, edgecolor='r', marker='s', facecolor='none', linewidths=1.0)\n ax5.set_title(r'$\\rm Bg$-$\\rm corrected \\,\\, PS$')\n ax5.set_xlabel(r'$\\rm Frequency \\,\\, [\\mu Hz]$')\n ax5.set_ylabel(r'$\\rm Power$')\n ax5.set_xlim([star.params[star.name]['ps_mask'][0], star.params[star.name]['ps_mask'][1]])\n ax5.set_ylim([min(star.psd)-0.025*(max(star.psd)-min(star.psd)), max(star.psd)+0.1*(max(star.psd)-min(star.psd))])\n\n sig = 0.35*exp_dnu/2.35482 \n weights = 1./(sig*np.sqrt(2.*np.pi))*np.exp(-(star.lag-exp_dnu)**2./(2.*sig**2))\n new_weights = weights/max(weights)\n # ACF for determining dnu\n ax6 = fig.add_subplot(3, 3, 6)\n ax6.plot(star.lag, star.auto, 'w-', zorder=0, linewidth=1.)\n ax6.scatter(star.peaks_l, star.peaks_a, s=30.0, edgecolor='r', marker='^', facecolor='none', linewidths=1.0)\n ax6.axvline(exp_dnu, color='red', linestyle=':', linewidth=1.5, zorder=5)\n ax6.axvline(obs_dnu, color='lime', linestyle='--', linewidth=1.5, zorder=2)\n ax6.scatter(star.best_lag, star.best_auto, s=45.0, edgecolor='lime', marker='s', facecolor='none', linewidths=1.0)\n ax6.plot(star.zoom_lag, star.zoom_auto, 'r-', zorder=5, linewidth=1.0)\n ax6.plot(star.lag, new_weights, c='yellow', linestyle=':', zorder = 0, linewidth = 1.0)\n ax6.set_title(r'$\\rm ACF \\,\\, for \\,\\, determining \\,\\, \\Delta\\nu$')\n ax6.set_xlabel(r'$\\rm Frequency \\,\\, separation \\,\\, [\\mu Hz]$')\n ax6.set_xlim([min(star.lag), max(star.lag)])\n ax6.set_ylim([min(star.auto)-0.05*(max(star.auto)-min(star.auto)), max(star.auto)+0.1*(max(star.auto)-min(star.auto))])\n\n # dnu fit\n ax7 = fig.add_subplot(3, 3, 7)\n ax7.plot(star.zoom_lag, star.zoom_auto, 'w-', zorder=0, linewidth=1.0)\n ax7.axvline(obs_dnu, color='lime', linestyle='--', linewidth=1.5, zorder=2)\n ax7.plot(star.new_lag, star.dnu_fit, color='lime', linewidth=1.5)\n ax7.axvline(exp_dnu, color='red', linestyle=':', linewidth=1.5, zorder=5)\n ax7.set_title(r'$\\rm \\Delta\\nu \\,\\, fit$')\n ax7.set_xlabel(r'$\\rm Frequency \\,\\, separation \\,\\, [\\mu Hz]$')\n ax7.annotate(r'$\\Delta\\nu = %.2f$'%obs_dnu, xy=(0.025, 0.85), xycoords=\"axes fraction\", fontsize=18, color='lime')\n ax7.set_xlim([min(star.zoom_lag), max(star.zoom_lag)])\n\n if star.globe['interp_ech']:\n interpolation='bilinear'\n else:\n interpolation='none'\n # echelle diagram\n ax8 = fig.add_subplot(3, 3, 8)\n ax8.imshow(star.ech, extent=star.extent, interpolation=interpolation, aspect='auto', origin='lower', cmap=plt.get_cmap('viridis'))\n ax8.axvline([obs_dnu], color='white', linestyle='--', linewidth=1.0, dashes=(5, 5))\n ax8.set_title(r'$\\rm \\grave{E}chelle \\,\\, diagram$')\n ax8.set_xlabel(r'$\\rm \\nu \\,\\, mod \\,\\, %.2f \\,\\, [\\mu Hz]$'%obs_dnu)\n ax8.set_ylabel(r'$\\rm \\nu \\,\\, [\\mu Hz]$')\n ax8.set_xlim([0.0, 2.0*obs_dnu])\n ax8.set_ylim([star.params[star.name]['ps_mask'][0], star.params[star.name]['ps_mask'][1]])\n\n yrange = max(star.yax)-min(star.yax)\n ax9 = fig.add_subplot(3, 3, 9)\n ax9.plot(star.xax, star.yax, color='white', linestyle='-', linewidth=0.75)\n ax9.set_title(r'$\\rm Collapsed \\,\\, \\grave{e}chelle \\,\\, diagram$')\n ax9.set_xlabel(r'$\\rm \\nu \\,\\, mod \\,\\, %.2f \\,\\, [\\mu Hz]$'%obs_dnu)\n ax9.set_ylabel(r'$\\rm Collapsed \\,\\, power$')\n ax9.set_xlim([0.0, 2.0*obs_dnu])\n ax9.set_ylim([min(star.yax)-0.025*(yrange), max(star.yax)+0.05*(yrange)])\n\n plt.tight_layout()\n if star.params['save']:\n if star.globe['interp_ech']:\n plt.savefig(os.path.join(star.params[star.name]['path'],'background_sm_ech.png'), dpi=300)\n else:\n plt.savefig(os.path.join(star.params[star.name]['path'],'background.png'), dpi=300)\n if notebook:\n with open('background.pickle','wb') as f:\n pickle.dump(fig, f)\n if not star.params['show']:\n plt.close()\n\n\ndef plot_samples(star, notebook=False):\n \"\"\"\n Plot results of the Monte-Carlo sampling.\n\n Parameters\n ----------\n star : target.Target\n the pySYD pipeline object\n notebook : bool\n if running script out of Jupyter notebook (for output plots)\n\n Returns\n -------\n None\n \n \"\"\"\n\n n = len(star.df.columns.values.tolist())\n params = utils.get_params_dict()\n if n <= 3:\n x, y = 3, 1\n size=(12,6)\n elif n > 3 and n <= 6:\n x, y = 3, 2\n size=(12,8)\n elif n > 6 and n <= 9:\n x, y = 3, 3\n size=(12,12)\n elif n > 9 and n <= 12:\n x, y = 4, 3\n size=(12,8)\n elif n > 12 and n <= 16:\n x, y = 4, 4\n size=(12,12)\n else:\n x, y = 5, 4\n size=(12,8)\n fig = plt.figure(\"Posteriors for %s\"%star.name, figsize=size)\n for i, col in enumerate(star.df.columns.values.tolist()):\n ax = plt.subplot(y, x, i+1)\n ax.hist(star.df[col], bins=20, color='cyan', histtype='step', lw=2.5, facecolor='0.75')\n ax.set_yticks([])\n ax.set_yticklabels([])\n ax.set_title(params[col]['label'], fontsize=16)\n plt.tight_layout()\n if star.params['save']:\n plt.savefig(os.path.join(star.params[star.name]['path'],'samples.png'), dpi=300)\n if notebook:\n with open('samples.pickle','wb') as f:\n pickle.dump(fig, f)\n if not star.params['show']:\n plt.close()\n\n\ndef time_series(star, notebook=True):\n\n fig = plt.figure(\"%s time series\"%star.name, figsize=(10,6))\n ax = plt.subplot(1,1,1)\n ax.plot(star.time, star.flux, 'w-')\n ax.set_xlim([min(star.time), max(star.time)])\n ax.tick_params(axis='both', which='minor', length=10, width=1.25, direction='inout')\n ax.tick_params(axis='both', which='major', length=15, width=1.25, direction='inout') \n ax.tick_params(labelsize=22)\n plt.xlabel(r'$\\rm Time \\,\\, [days]$', fontsize=28)\n plt.ylabel(r'$\\rm Normalized \\,\\, flux$', fontsize=28)\n plt.tight_layout()\n if notebook:\n with open('lc.pickle','wb') as f:\n pickle.dump(fig, f)\n if not star.params['show']:\n plt.close()\n\n\ndef frequency_series(star, notebook=True):\n\n fig = plt.figure(\"%s power spectrum\"%star.name, figsize=(10,6))\n ax = plt.subplot(1,1,1)\n ax.plot(star.frequency, star.power, 'w-')\n ax.set_xlim([min(star.frequency), max(star.frequency)])\n ax.tick_params(axis='both', which='minor', length=10, width=1.25, direction='inout')\n ax.tick_params(axis='both', which='major', length=15, width=1.25, direction='inout') \n ax.tick_params(labelsize=22)\n ax.set_xscale('log')\n ax.set_yscale('log')\n plt.xlabel(r'$\\rm Frequency \\,\\, [\\mu Hz]$', fontsize=28)\n plt.ylabel(r'$\\rm Power \\,\\, [ppm^2 \\, \\mu Hz^{-1}]$', fontsize=28)\n plt.tight_layout()\n if notebook:\n with open('ps.pickle','wb') as f:\n pickle.dump(fig, f)\n if not star.params['show']:\n plt.close()\n"
] |
[
[
"matplotlib.pyplot.rcParams.update",
"numpy.zeros_like",
"numpy.absolute",
"numpy.ma.masked_inside",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.close",
"numpy.exp",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.ylabel",
"numpy.sqrt",
"matplotlib.pyplot.subplot"
]
] |
Evangeline98/Multi-Label-Classification-with-CNN-and-RNN
|
[
"59ea2093b347fc43600c0d54ca74ab09c80e3ddb"
] |
[
"attention/train.py"
] |
[
"import os, sys\nimport torch.nn.functional as F\nimport time\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torchvision.transforms as transforms\nfrom torch import nn\nfrom torch.nn.utils.rnn import pack_padded_sequence,pad_packed_sequence\nfrom models import Encoder, DecoderWithAttention\nfrom datasets import *\nfrom ut import *\n#from dataset import TrainDataset\nfrom sample import sampler\nimport pandas as pd\nimport numpy as np\n\nglobal f2, epochs_since_improvement, checkpoint, start_epoch, fine_tune_encoder, data_name, word_map\n\n# Model parameters\nemb_dim = 300 # dimension of word embeddings\nattention_dim = 512 # dimension of attention linear layers\ndecoder_dim = 512 # dimension of decoder RNN\ndropout = 0.5\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") # sets device for model and PyTorch tensors\ncudnn.benchmark = True # set to true only if inputs to model are fixed size; otherwise lot of computational overhead\n\n# Training parameters\nstart_epoch = 0\nepochs = 120 # number of epochs to train for (if early stopping is not triggered)\nepochs_since_improvement = 0 # keeps track of number of epochs since there's been an improvement in validation BLEU\nbatch_size = 64\nworkers = 1 # for data-loading; right now, only 1 works with h5py\nencoder_lr = 1e-4 # learning rate for encoder if fine-tuning\ndecoder_lr = 4e-4 # learning rate for decoder\ngrad_clip = 5. # clip gradients at an absolute value of\nalpha_c = 1. # regularization parameter for 'doubly stochastic attention', as in the paper\nf2 = 0. # BLEU-4 score right now\nprint_freq = 1 # print training/validation stats every __ batches\nfine_tune_encoder = False # fine-tune encoder?\ncheckpoint = 'checkpoint_attention.pth.tar' # path to checkpoint, None if none\n\ndef f2score(scores, targets, l,k):\n \n batch_size = scores.size(0)\n \n def get_score(target, y_pred):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', category=UndefinedMetricWarning)\n return fbeta_score(target, y_pred, beta=2, average='samples')\n \n y_pred = np.zeros((batch_size, 1103))\n for i in range(batch_size):\n for cls in scores[i,:l[i]]:\n if cls ==1104:\n break\n if cls <=1102:\n y_pred[i,cls] = 1\n #y_pred = np.concatenate(y_pred)\n \n target = np.zeros((batch_size, 1103))\n for i in range(batch_size):\n for cls in targets[i,:]:\n if cls ==1104:\n break\n if cls <=1102:\n target[i,cls] = 1\n #target = np.concatenate(target)\n \n f2 = get_score(target, y_pred)\n return f2\n \n\n\n\ndef train(train_loader, encoder, decoder, criterion, encoder_optimizer, decoder_optimizer, epoch):\n \"\"\"\n Performs one epoch's training.\n\n :param train_loader: DataLoader for training data\n :param encoder: encoder model\n :param decoder: decoder model\n :param criterion: loss layer\n :param encoder_optimizer: optimizer to update encoder's weights (if fine-tuning)\n :param decoder_optimizer: optimizer to update decoder's weights\n :param epoch: epoch number\n \"\"\"\n\n decoder.train() # train mode (dropout and batchnorm is used)\n encoder.train()\n\n batch_time = AverageMeter() # forward prop. + back prop. time\n data_time = AverageMeter() # data loading time\n losses = AverageMeter() # loss (per word decoded)\n top5accs = AverageMeter() # top5 accuracy\n top5f2 = AverageMeter() # top5 accuracy\n\n start = time.time()\n\n # Batches\n for i, (imgs, caps, caplens) in enumerate(train_loader):\n data_time.update(time.time() - start)\n\n # Move to GPU, if available\n imgs = imgs.to(device)\n caps = caps.to(device)\n caplens = caplens.to(device)\n\n # Forward prop.\n imgs = encoder(imgs)\n\n scores, caps_sorted, decode_lengths, alphas, sort_ind = decoder(imgs, caps, caplens)\n \n# print(\"caps_sorted\",caps_sorted.size())\n # Since we decoded starting with <start>, the targets are all words after <start>, up to <end>\n caps_sorted = caps_sorted.view(caps.size()[0], -1).to(device)\n# print(\"caps_sorted2\",caps_sorted.size())\n targets = caps_sorted[:, 1:]\n targets = targets.type(torch.LongTensor).to(device)\n\n # Remove timesteps that we didn't decode at, or are pads\n # pack_padded_sequence is an easy trick to do this\n# print(\"score1\",scores.size())\n# print(\"targets1\",targets.size())\n S = pack_padded_sequence(scores, decode_lengths, batch_first=True)\n T = pack_padded_sequence(targets, decode_lengths, batch_first=True)\n \n scores = S.data\n targets = T.data\n #scores = scores.to(device)\n #targets = targets.to(device)\n \n # Calculate loss\n \n \n loss = criterion(scores, targets)\n\n # Add doubly stochastic attention regularization\n loss += alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()\n\n # Back prop.\n decoder_optimizer.zero_grad()\n if encoder_optimizer is not None:\n encoder_optimizer.zero_grad()\n loss.backward()\n\n # Clip gradients\n if grad_clip is not None:\n clip_gradient(decoder_optimizer, grad_clip)\n if encoder_optimizer is not None:\n clip_gradient(encoder_optimizer, grad_clip)\n\n # Update weights\n decoder_optimizer.step()\n if encoder_optimizer is not None:\n encoder_optimizer.step()\n \n roughscore = pad_packed_sequence(S, batch_first = True)\n roughscore = roughscore[0].argmax(dim=2)\n #print(roughscore)\n #print(\"rough\",roughscore[0].argmax(dim=2))\n rought = pad_packed_sequence(T, batch_first = True)[0]\n #print(rought)\n #print(\"rough\",rought[0])\n \n\n # Keep track of metrics\n top5 = accuracy(scores, targets,5)\n top5f = f2score(roughscore, rought,decode_lengths, 5)\n losses.update(loss.item(), sum(decode_lengths))\n top5accs.update(top5, sum(decode_lengths))\n top5f2.update(top5f, sum(decode_lengths))\n batch_time.update(time.time() - start)\n\n start = time.time()\n\n # Print status\n if i % print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'f2 {top5f2.val:.4f} ({top5f2.avg:.4f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Top-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(train_loader),\n batch_time=batch_time,\n top5f2=top5f2,\n loss=losses,\n top5=top5accs))\n\n\n\ndef caption_image_beam_search(encoder, decoder, image, beam_size = 3):\n \"\"\"\n Reads an image and captions it with beam search.\n\n :param encoder: encoder model\n :param decoder: decoder model\n :param image_path: path to image\n :param word_map: word map\n :param beam_size: number of sequences to consider at each decode-step\n :return: caption, weights for visualization\n \"\"\"\n\n k = beam_size\n vocab_size = 1106\n\n encoder_out = image # (1, enc_image_size, enc_image_size, encoder_dim)\n encoder_out = encoder_out.unsqueeze(0)\n enc_image_size = encoder_out.size(1)\n encoder_dim = encoder_out.size(3)\n\n # Flatten encoding\n encoder_out = encoder_out.view(1, -1, encoder_dim) # (1, num_pixels, encoder_dim)\n num_pixels = encoder_out.size(1)\n\n # We'll treat the problem as having a batch size of k\n encoder_out = encoder_out.expand(k, num_pixels, encoder_dim) # (k, num_pixels, encoder_dim)\n\n # Tensor to store top k previous words at each step; now they're just <start>\n k_prev_words = torch.LongTensor([[1103]]*k).to(device) # (k, 1)\n # Tensor to store top k sequences; now they're just <start>\n seqs = k_prev_words # (k, 1)\n # Tensor to store top k sequences' scores; now they're just 0\n top_k_scores = torch.zeros(k, 1).to(device) # (k, 1)\n # Tensor to store top k sequences' alphas; now they're just 1s\n seqs_alpha = torch.ones(k, 1, enc_image_size, enc_image_size).to(device) # (k, 1, enc_image_size, enc_image_size)\n\n # Lists to store completed sequences, their alphas and scores\n complete_seqs = list()\n complete_seqs_alpha = list()\n complete_seqs_scores = list()\n\n # Start decoding\n step = 1\n h, c = decoder.init_hidden_state(encoder_out)\n\n # s is a number less than or equal to k, because sequences are removed from this process once they hit <end>\n while True:\n embeddings = decoder.embedding(k_prev_words).squeeze(1) # (s, embed_dim)\n awe, alpha = decoder.attention(encoder_out, h) # (s, encoder_dim), (s, num_pixels)\n alpha = alpha.view(-1, enc_image_size, enc_image_size) # (s, enc_image_size, enc_image_size)\n gate = decoder.sigmoid(decoder.f_beta(h)) # gating scalar, (s, encoder_dim)\n awe = gate * awe\n h, c = decoder.decode_step(torch.cat([embeddings, awe], dim=1), (h, c)) # (s, decoder_dim)\n scores = decoder.fc(h) # (s, vocab_size)\n scores = F.log_softmax(scores, dim=1)\n # Add\n scores = top_k_scores.expand_as(scores) + scores # (s, vocab_size)\n # For the first step, all k points will have the same scores (since same k previous words, h, c)\n if step == 1:\n top_k_scores, top_k_words = scores[0].topk(k, 0, True, True) # (s)\n else:\n # Unroll and find top scores, and their unrolled indices\n top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True) # (s)\n\n # Convert unrolled indices to actual indices of scores\n prev_word_inds = top_k_words / vocab_size # (s)\n next_word_inds = top_k_words % vocab_size # (s)\n\n # Add new words to sequences, alphas\n seqs = torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1) # (s, step+1)\n seqs_alpha = torch.cat([seqs_alpha[prev_word_inds], alpha[prev_word_inds].unsqueeze(1)],\n dim=1) # (s, step+1, enc_image_size, enc_image_size)\n\n # Which sequences are incomplete (didn't reach <end>)?\n incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if\n next_word != 1104]\n complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds))\n\n # Set aside complete sequences\n if len(complete_inds) > 0:\n complete_seqs.extend(seqs[complete_inds].tolist())\n complete_seqs_alpha.extend(seqs_alpha[complete_inds].tolist())\n complete_seqs_scores.extend(top_k_scores[complete_inds])\n k -= len(complete_inds) # reduce beam length accordingly\n\n # Proceed with incomplete sequences\n if k == 0:\n break\n seqs = seqs[incomplete_inds]\n seqs_alpha = seqs_alpha[incomplete_inds]\n h = h[prev_word_inds[incomplete_inds]]\n c = c[prev_word_inds[incomplete_inds]]\n encoder_out = encoder_out[prev_word_inds[incomplete_inds]]\n top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)\n k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1)\n\n # Break if things have been going on too long\n if step > 50:\n break\n step += 1\n\n i = complete_seqs_scores.index(max(complete_seqs_scores))\n seq = complete_seqs[i]\n alphas = complete_seqs_alpha[i]\n\n return seq, alphas\n \n \n\n\ndef validate(val_loader, encoder, decoder):\n \"\"\"\n Performs one epoch's validation.\n :param val_loader: DataLoader for validation data.\n :param encoder: encoder model\n :param decoder: decoder model\n :param criterion: loss layer\n :return: BLEU-4 score\n \"\"\"\n decoder.eval() # eval mode (no dropout or batchnorm)\n if encoder is not None:\n encoder.eval()\n \n vocab_size = 1106\n\n with torch.no_grad():\n # Batches\n f2 = []\n for i, (imgs, caps, caplens) in enumerate(val_loader):\n\n # Move to device, if available\n imgs = imgs.to(device)\n caps = caps.to(device)\n #caplens = caplens.to(device)\n\n # Forward prop.\n if encoder is not None:\n imgs = encoder(imgs)\n \n batch_size = imgs.size(0)\n for image_index in range(batch_size): \n seq,_ = caption_image_beam_search(encoder, decoder, imgs[image_index], 4)\n seq = np.array(seq)\n # Since we decoded starting with <start>, the targets are all words after <start>, up to <end>\n targets = torch.zeros(1103)\n y_pred = torch.zeros(1103)\n \n \n for cls1 in caps[image_index,:]:\n if cls1<=1102:\n targets[cls1] = 1\n if cls1 == 1104:\n break\n \n for cls2 in seq:\n if cls2<=1102:\n y_pred[cls2] = 1\n if cls2 == 1104:\n break\n \n def getscore(targets,y_pred):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', category=UndefinedMetricWarning)\n return fbeta_score(\n targets, y_pred, beta=2)\n \n f2.append(getscore(targets,y_pred))\n\n if i % print_freq == 0:\n print('Validation: [{0}/{1}]\\t'\n 'f2score {f2:.4f})\\t'\n .format(i, len(val_loader),f2 = np.mean(f2)))\n\n # Store references (true captions), and hypothesis (prediction) for each image\n # If for n images, we have n hypotheses, and references a, b, c... for each image, we need -\n # references = [[ref1a, ref1b, ref1c], [ref2a, ref2b], ...], hypotheses = [hyp1, hyp2, ...]\n\n # References\n \n\n return np.mean(f2)\n\n\n\"\"\"\nTraining and validation.\n\"\"\"\n\n\n\n# Read word map\n# word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name + '.json')\n# with open(word_map_file, 'r') as j:\n# word_map = json.load(j)\nword_map = pd.read_csv('~/data/labels.csv')\nmark = pd.DataFrame([[1103,'<start>'],[1104,'<end>'],[1105,'<pad>']])\nmark.columns = word_map.columns\nword_map.append(mark)\n# Initialize / load checkpoint\nif checkpoint is None:\n decoder = DecoderWithAttention(attention_dim=attention_dim,\n embed_dim=emb_dim,\n decoder_dim=decoder_dim,\n vocab_size= 1106,\n dropout=dropout)\n decoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()),\n lr=decoder_lr)\n encoder = Encoder()\n encoder.fine_tune(fine_tune_encoder)\n encoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, encoder.parameters()),\n lr=encoder_lr) if fine_tune_encoder else None\n\nelse:\n checkpoint = torch.load(checkpoint)\n start_epoch = checkpoint['epoch'] + 1\n epochs_since_improvement = checkpoint['epochs_since_improvement']\n f2 = checkpoint['f2']\n decoder = checkpoint['decoder']\n decoder_optimizer = checkpoint['decoder_optimizer']\n encoder = checkpoint['encoder']\n encoder_optimizer = checkpoint['encoder_optimizer']\n if fine_tune_encoder is True and encoder_optimizer is None:\n encoder.fine_tune(fine_tune_encoder)\n encoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, encoder.parameters()),\n lr=encoder_lr)\n\n# Move to GPU, if available\ndecoder = decoder.to(device)\nencoder = encoder.to(device)\n\n# Loss function\ncriterion = nn.CrossEntropyLoss().to(device)\n\n# Custom dataloaders\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\ntrain_loader, val_loader = sampler(0, 64)\n# Epochs\nfor epoch in range(start_epoch, epochs):\n\n # Decay learning rate if there is no improvement for 8 consecutive epochs, and terminate training after 20\n if epochs_since_improvement == 20:\n break\n if epochs_since_improvement > 0 and epochs_since_improvement % 8 == 0:\n adjust_learning_rate(decoder_optimizer, 0.8)\n if fine_tune_encoder:\n adjust_learning_rate(encoder_optimizer, 0.8)\n #epoch = 0\n # One epoch's training\n train(train_loader=train_loader,\n encoder=encoder,\n decoder=decoder,\n criterion=criterion,\n encoder_optimizer=encoder_optimizer,\n decoder_optimizer=decoder_optimizer,\n epoch=epoch)\n\n # One epoch's validation\n recent_f2 = validate(val_loader=val_loader,\n encoder=encoder,\n decoder=decoder)\n\n # Check if there was an improvement\n is_best = recent_f2 > f2\n f2 = max(f2, recent_f2)\n if not is_best:\n epochs_since_improvement += 1\n print(\"\\nEpochs since last improvement: %d\\n\" % (epochs_since_improvement,))\n else:\n epochs_since_improvement = 0\n \n state = {'epoch': epoch,\n 'epochs_since_improvement': epochs_since_improvement,\n 'f2': f2,\n 'encoder': encoder,\n 'decoder': decoder,\n 'encoder_optimizer': encoder_optimizer,\n 'decoder_optimizer': decoder_optimizer}\n filename = 'checkpoint_' + 'attention' + '.pth.tar'\n torch.save(state, filename)\n # If this checkpoint is the best so far, store a copy so it doesn't get overwritten by a worse checkpoint\n if is_best:\n torch.save(state, 'BEST_' + filename)\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"pandas.DataFrame",
"numpy.mean",
"torch.nn.functional.log_softmax",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.utils.rnn.pack_padded_sequence",
"pandas.read_csv",
"torch.nn.CrossEntropyLoss"
]
] |
HephaestusProject/pytorch-binaryconnect
|
[
"0a07a524522e993366749a865ae4bdf927cea3b5"
] |
[
"tests/test_net.py"
] |
[
"import pytest\nimport pytorch_lightning\nimport torch\nfrom omegaconf import OmegaConf\n\nfrom src.model.net import BinaryConv, BinaryLinear\n\n\[email protected](scope=\"module\")\ndef fix_seed():\n pytorch_lightning.seed_everything(777)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\[email protected](scope=\"module\")\ndef tearup_binarylinear_model_config():\n return OmegaConf.create(\n {\n \"params\": {\n \"width\": 28,\n \"height\": 28,\n \"channels\": 1,\n \"in_feature\": 784,\n \"classes\": 10,\n \"mode\": \"stochastic\",\n \"feature_layers\": {\n \"linear\": [\n {\n \"in_feature\": 784,\n \"out_feature\": 1024,\n \"bias\": True,\n \"batch_norm\": True,\n \"activation\": {\"type\": \"ReLU\", \"args\": {}},\n \"mode\": \"stochastic\",\n },\n {\n \"in_feature\": 1024,\n \"out_feature\": 1024,\n \"bias\": True,\n \"batch_norm\": True,\n \"activation\": {\"type\": \"ReLU\", \"args\": {}},\n \"mode\": \"stochastic\",\n },\n {\n \"in_feature\": 1024,\n \"out_feature\": 10,\n \"bias\": True,\n \"batch_norm\": True,\n \"activation\": None,\n \"mode\": \"stochastic\",\n },\n ]\n },\n \"output_layer\": {\"type\": \"Softmax\", \"args\": {\"dim\": 1}},\n },\n \"type\": \"BinaryLinear\",\n }\n )\n\n\nbinarylinear_forward_test_case = [\n # (device, test_input)\n (\"cpu\", torch.randn(((2, 1, 28, 28)))),\n (torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"), torch.randn(((2, 1, 28, 28)))),\n]\n\n\[email protected](\n \"device, test_input\", binarylinear_forward_test_case,\n)\ndef test_binarylinear_forward(\n fix_seed, tearup_binarylinear_model_config, device, test_input,\n):\n\n model = BinaryLinear(tearup_binarylinear_model_config).to(device)\n\n test_input = test_input.to(device)\n model(test_input)\n\n\[email protected](scope=\"module\")\ndef tearup_binaryconv_model_config():\n return OmegaConf.create(\n {\n \"params\": {\n \"width\": 32,\n \"height\": 32,\n \"channels\": 3,\n \"classes\": 10,\n \"mode\": \"stochastic\",\n \"feature_layers\": {\n \"conv\": [\n {\n \"in_channels\": 3,\n \"out_channels\": 128,\n \"kernel_size\": 3,\n \"stride\": 1,\n \"padding\": 0,\n \"dilation\": 1,\n \"groups\": 1,\n \"bias\": True,\n \"padding_mode\": \"zeros\",\n \"batch_norm\": True,\n \"activation\": {\"type\": \"ReLU\", \"args\": {}},\n \"pool\": None,\n \"mode\": \"stochastic\",\n },\n {\n \"in_channels\": 128,\n \"out_channels\": 128,\n \"kernel_size\": 3,\n \"stride\": 1,\n \"padding\": 0,\n \"dilation\": 1,\n \"groups\": 1,\n \"bias\": True,\n \"padding_mode\": \"zeros\",\n \"batch_norm\": True,\n \"activation\": {\"type\": \"ReLU\", \"args\": {}},\n \"pool\": {\n \"type\": \"MaxPool2d\",\n \"args\": {\n \"kernel_size\": [2, 2],\n \"stride\": None,\n \"padding\": 0,\n \"dilation\": 1,\n \"return_indices\": False,\n \"ceil_mode\": False,\n },\n },\n \"mode\": \"stochastic\",\n },\n {\n \"in_channels\": 128,\n \"out_channels\": 256,\n \"kernel_size\": 3,\n \"stride\": 1,\n \"padding\": 0,\n \"dilation\": 1,\n \"groups\": 1,\n \"bias\": True,\n \"padding_mode\": \"zeros\",\n \"batch_norm\": True,\n \"activation\": {\"type\": \"ReLU\", \"args\": {}},\n \"pool\": None,\n \"mode\": \"stochastic\",\n },\n {\n \"in_channels\": 256,\n \"out_channels\": 256,\n \"kernel_size\": 3,\n \"stride\": 1,\n \"padding\": 0,\n \"dilation\": 1,\n \"groups\": 1,\n \"bias\": True,\n \"padding_mode\": \"zeros\",\n \"batch_norm\": True,\n \"activation\": {\"type\": \"ReLU\", \"args\": {}},\n \"pool\": {\n \"type\": \"MaxPool2d\",\n \"args\": {\n \"kernel_size\": [2, 2],\n \"stride\": None,\n \"padding\": 0,\n \"dilation\": 1,\n \"return_indices\": False,\n \"ceil_mode\": False,\n },\n },\n \"mode\": \"stochastic\",\n },\n {\n \"in_channels\": 256,\n \"out_channels\": 512,\n \"kernel_size\": 3,\n \"stride\": 1,\n \"padding\": 0,\n \"dilation\": 1,\n \"groups\": 1,\n \"bias\": True,\n \"padding_mode\": \"zeros\",\n \"batch_norm\": True,\n \"activation\": {\"type\": \"ReLU\", \"args\": {}},\n \"pool\": None,\n \"mode\": \"stochastic\",\n },\n {\n \"in_channels\": 512,\n \"out_channels\": 512,\n \"kernel_size\": 3,\n \"stride\": 1,\n \"padding\": 0,\n \"dilation\": 1,\n \"groups\": 1,\n \"bias\": True,\n \"padding_mode\": \"zeros\",\n \"batch_norm\": True,\n \"activation\": {\"type\": \"ReLU\", \"args\": {}},\n \"pool\": None,\n \"mode\": \"stochastic\",\n },\n ],\n \"linear\": [\n {\n \"in_feature\": 512,\n \"out_feature\": 1024,\n \"bias\": True,\n \"batch_norm\": True,\n \"activation\": None,\n \"mode\": \"stochastic\",\n },\n {\n \"in_feature\": 1024,\n \"out_feature\": 1024,\n \"bias\": True,\n \"batch_norm\": True,\n \"activation\": {\"type\": \"ReLU\", \"args\": {}},\n \"mode\": \"stochastic\",\n },\n {\n \"in_feature\": 1024,\n \"out_feature\": 10,\n \"bias\": True,\n \"batch_norm\": True,\n \"activation\": {\"type\": \"ReLU\", \"args\": {}},\n \"mode\": \"stochastic\",\n },\n ],\n },\n \"output_layer\": {\"type\": \"Softmax\", \"args\": {\"dim\": 1}},\n },\n \"type\": \"BinaryConv\",\n }\n )\n\n\nbinaryconv_forward_test_case = [\n # (device, test_input)\n (\"cpu\", torch.randn(((2, 3, 32, 32)))),\n (torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"), torch.randn(((2, 3, 32, 32)))),\n]\n\n\[email protected](\n \"device, test_input\", binaryconv_forward_test_case,\n)\ndef test_binaryconv_forward(\n fix_seed, tearup_binaryconv_model_config, device, test_input,\n):\n\n model = BinaryConv(tearup_binaryconv_model_config).to(device)\n\n test_input = test_input.to(device)\n model(test_input)\n\n\nsummary_test_case = [\n # (device, test_input)\n (\"cpu\"),\n (torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")),\n]\n\n\[email protected](\"device\", summary_test_case)\ndef test_binarylinear_summary(fix_seed, tearup_binarylinear_model_config, device):\n model = BinaryLinear(tearup_binarylinear_model_config).to(device=device)\n model.summary()\n\n\[email protected](\"device\", summary_test_case)\ndef test_binaryconv_summary(fix_seed, tearup_binaryconv_model_config, device):\n model = BinaryConv(tearup_binaryconv_model_config).to(device=device)\n model.summary()\n"
] |
[
[
"torch.cuda.is_available",
"torch.randn"
]
] |
malmgrek/gammy
|
[
"20ca24b45fa81ad29453123a13e19b3ff7ae33ab"
] |
[
"gammy/utils.py"
] |
[
"\"\"\"Miscellaneous utilities\n\n\"\"\"\n\nimport functools\nfrom typing import (Callable, Dict, Generator, Iterable, List, Tuple)\n\nimport numpy as np\nimport scipy as sp\nfrom scipy import interpolate\n\n\n#\n# Functional\n# ~~~~~~~~~~\n#\n\n\n# TODO: A proper curry\ndef curryish(f: Callable) -> Callable:\n \"\"\"Lifted partial application\n\n \"\"\"\n\n def g(*args, **kwargs):\n return functools.partial(f, *args, **kwargs)\n\n return g\n\n\ndef compose2(f: Callable, g: Callable) -> Callable:\n \"\"\"Compose two functions\n\n \"\"\"\n\n def h(*args, **kwargs):\n return f(g(*args, **kwargs))\n\n return h\n\n\ndef lift(func: Callable) -> Callable:\n \"\"\"Transforms a function into an operator\n\n \"\"\"\n return lambda f: compose2(func, f)\n\n\ndef rlift(func: Callable) -> Callable:\n \"\"\"Lift from right\n\n \"\"\"\n return lambda f: compose2(f, func)\n\n\ndef compose(*funcs: Callable) -> Callable:\n \"\"\"Function composition\n\n \"\"\"\n return functools.partial(functools.reduce, compose2)(funcs)\n\n\ndef pipe(arg, *funcs: Callable) -> Callable:\n \"\"\"Piping an object through functions\n\n \"\"\"\n return compose(*funcs[::-1])(arg)\n\n\nlistmap = curryish(compose(list, map))\nlistmap.__doc__ = \"\"\"Map for lists with partial evaluation\n\n\"\"\"\n\ntuplemap = curryish(compose(tuple, map))\ntuplemap.__doc__ = \"\"\"Map for tuples with partial evaluation\n\n\"\"\"\n\nlistfilter = curryish(compose(list, filter))\nlistfilter.__doc__ = \"\"\"Filter for lists with partial evaluation\n\n\"\"\"\n\ntuplefilter = curryish(compose(tuple, filter))\ntuplefilter.__doc__ = \"\"\"Filter for tuples with partial evaluation\n\n\"\"\"\n\n\n#\n# Iterables\n# ~~~~~~~~~\n#\n\n\ndef unflatten(x: list, y: list) -> list:\n \"\"\"Unflatten according to a reference\n\n Examples\n --------\n\n .. code-block:: python\n\n unflatten([1, 2, 3], [[\"a\", \"b\"], [\"c\"]])\n # [[1, 2], [3]]\n\n \"\"\"\n def func(cum, this):\n x_crop, res = cum\n return [\n x_crop[len(this):], res + [x_crop[:len(this)]]\n ]\n\n return functools.reduce(func, list(y), [list(x), []])[-1]\n\n\ndef extract_diag_blocks(x: np.ndarray, y: list) -> List[np.ndarray]:\n \"\"\"Extract diagonal blocks from a matrix according to a reference\n\n \"\"\"\n\n def func(cum, this):\n x_crop, res = cum\n return [\n x_crop[len(this):, len(this):],\n res + [x_crop[:len(this), :len(this)]]\n ]\n\n return functools.reduce(func, list(y), [x, []])[-1]\n\n\ndef extend_spline_grid(grid, order: int) -> np.ndarray:\n \"\"\"Grid extension for higher order splines\n\n \"\"\"\n if order < 1:\n raise ValueError(\n \"Spline order = n + 1 where n >= 0 is the polynomial degree\"\n )\n return grid if order == 1 else pipe(\n grid,\n lambda x: np.append(\n x, np.diff(x)[-(order - 1):][::-1].cumsum() + x[-1]\n ),\n lambda x: np.append(\n x[0] - np.diff(x)[:(order - 1)].cumsum()[::-1], x\n )\n )\n\n\ndef gen_spline_args_from_grid_ext(grid_ext: np.ndarray, order: int, extrapolate: bool) -> Generator:\n \"\"\"Spline arguments generator from extended grid\n\n Parameters\n ----------\n grid_ext : np.ndarray\n Extended grid\n order : int\n Order of the splines\n extrapolate : bool\n Allow smooth(ish) extrapolation\n\n \"\"\"\n n = len(grid_ext) - order # Number of basis functions\n (i_left, i_right) = (\n (1, n - 1) if order == 1 else (order - 1, n - order + 1)\n )\n return (\n (grid_ext[i:i + order + 1],) + (\n (extrapolate, -1) if i < i_left\n else (\n (extrapolate, 1) if i >= i_right else (False, 0)\n )\n )\n for i in range(n)\n )\n\n\n#\n# Basis function generation tools\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# TODO / FIXME: The linear-algebraic stuff below remains unfortunately\n# largely untested.\n#\n\n\ndef squared_dist(x1, x2) -> np.ndarray:\n \"\"\"Squared distance matrix for column array of N-dimensional points\n\n Parameters\n ----------\n x1 : np.ndarray\n 1-D Column array\n x2 : np.ndarray\n 1-D Column array\n\n Examples\n --------\n\n .. code-block:: python\n\n x = np.array([[0], [1], [2]])\n squared_dist(x, x)\n # array([[0, 1, 4],\n # [1, 0, 1],\n # [4, 1, 0]])\n\n \"\"\"\n return (\n np.sum(x1 ** 2, 1).reshape(-1, 1) +\n np.sum(x2 ** 2, 1) -\n 2 * np.dot(x1, x2.T)\n )\n\n\ndef exp_squared(x1, x2, corrlen=1.0, sigma=1.0) -> np.ndarray:\n \"\"\"Exponential squared kernel function\n\n \"\"\"\n return sigma * np.exp(-0.5 / corrlen ** 2 * squared_dist(x1, x2))\n\n\ndef exp_sine_squared(x1, x2, corrlen=1.0, sigma=1.0, period=1.0) -> np.ndarray:\n \"\"\"Exponential sine squared kernel function\n\n \"\"\"\n return sigma * np.exp(\n -2.0 / corrlen ** 2 * np.sin(\n np.pi * np.sqrt(squared_dist(x1, x2)) / period\n ) ** 2\n )\n\n\ndef rational_quadratic(x1, x2, corrlen=1.0, sigma=1.0, alpha=1.0) -> np.ndarray:\n \"\"\"Rational quadratic kernel function\n\n \"\"\"\n return sigma * (\n 1 + squared_dist(x1, x2) / 2.0 / alpha / corrlen ** 2\n ) ** -alpha\n\n\ndef ornstein_uhlenbeck(x1, x2, corrlen=1.0, sigma=1.0) -> np.ndarray:\n \"\"\"Ornstein-Uhlenbeck kernel function\n\n \"\"\"\n return sigma * np.exp(\n -np.sqrt(squared_dist(x1, x2)) / corrlen\n )\n\n\ndef white_noise(n_dims: int, sigma=1.0, **unused) -> np.ndarray:\n \"\"\"White noise kernel function\n\n \"\"\"\n return sigma * np.identity(n_dims)\n\n\ndef decompose_covariance(H, energy: float=1.01) -> np.ndarray:\n \"\"\"Most important eigenvectors of a symmetric positive-definite square matrix\n\n Ordered with respect of the descending eigenvalues. Each\n eigenvector scaled with ``sqrt(Ξ»)``. For theoretical justification,\n see the section on Gaussian Processes in the package documentation.\n\n NOTE: In the implementation we use np.linalg.svd instead of np.linalg.eigh\n because the latter sometimes returns slightly negative eigenvalues for\n numerical reasons. In those cases the energy trick doesn't give all\n eigenvectors even if we wanted\n\n REVIEW: There might be problem with serialization. If there are duplicate\n eigenvalues, then on different machines, the vectors might appear in\n different order.\n\n Parameters\n ----------\n H : np.ndarray\n Symmetric positive-definite square matrix\n energy : float\n Truncate to eigenvalues that sum up to this proportion of the total\n eigenvalue sum. If absolutelu all eigenvectors are needed, give value\n slightly larger than one.\n\n \"\"\"\n\n #\n # Comparison of np.linalg.eigh and np.linalg.svd\n #\n # (W, V) = np.linalg.eigh(H)\n # (U, S, Vh) = np.linalg.svd(H)\n #\n # Holds up to numerical sanity: V[:, ::-1] == U == Vh.T\n #\n\n (U, S, Vh) = np.linalg.svd(H)\n crop = (S.cumsum() / S.sum()) <= energy\n return np.dot(U[:, crop], np.sqrt(np.diag(S[crop])))\n\n\ndef interp_arrays1d(v, grid, **kwargs) -> List:\n \"\"\"Create list of interpolators from a given array\n\n Parameters\n ----------\n v : np.array\n Each column is a \"basis\" vector\n grid : np.ndarray\n Interpolation grid\n\n \"\"\"\n return [\n interpolate.interp1d(grid, v[:, i], **kwargs)\n for i in range(v.shape[1])\n ]\n\n\n#\n# Files and I/O\n# ~~~~~~~~~~~~~\n#\n\n\ndef write_to_hdf5(group, data, name):\n \"\"\"Add data to HDF5 handler\n\n \"\"\"\n try:\n group.create_dataset(name, data=data, compression=\"gzip\")\n except TypeError:\n group.create_dataset(name, data=data)\n except ValueError:\n raise ValueError(f\"Could not write {data}\")\n\n\n#\n# BayesPy related\n# ~~~~~~~~~~~~~~~\n#\n\n\ndef concat_gaussians(gaussians):\n \"\"\"Concatenate means and covariances to one Gaussian\n\n Parameters\n ----------\n gaussians : List[Tuple[np.ndarray]]\n List of mean-precision tuples of each Gaussian\n\n \"\"\"\n return (\n np.hstack([g[0] for g in gaussians]),\n sp.linalg.block_diag(*[g[1] for g in gaussians])\n )\n\n\ndef solve_covariance(u) -> np.ndarray:\n \"\"\"Solve covariance matrix from moments\n\n Parameters\n ----------\n u : List[np.ndarray]\n List of moments as defined by the ``get_moments()`` method call\n of a BayesPy node object.\n\n \"\"\"\n cov = u[1] - np.outer(u[0], u[0])\n return cov if cov.shape != (1, 1) else np.array(cov.sum())\n\n\nsolve_precision = compose(np.linalg.inv, solve_covariance)\nsolve_precision.__doc__ = \"\"\"Solve precision matrix from moments\n\n\"\"\"\n\n\ndef jsonify(node) -> Dict:\n \"\"\"Turn a expfamily node into a JSON serializable dict\n\n \"\"\"\n return {\n **{\n \"u{0}\".format(i):\n ui.tolist() for (i, ui) in enumerate(node.u)\n },\n **{\n \"observed\": node.observed\n },\n **{\n \"phi{0}\".format(i):\n phii.tolist() for (i, phii) in enumerate(node.phi)\n },\n **{\n \"f\": node.f.tolist(),\n \"g\": node.g.tolist()\n }\n }\n\n\ndef set_from_json(raw: dict, node):\n \"\"\"Set BayesPy node attributes from JSON\n\n \"\"\"\n node.u = [\n np.array(raw[\"u{0}\".format(i)]) for i in range(len(node.u))\n ]\n node.observed = raw[\"observed\"]\n node.phi = [\n np.array(raw[\"phi{0}\".format(i)]) for i in range(len(node.phi))\n ]\n node.f = np.array(raw[\"f\"])\n node.g = np.array(raw[\"g\"])\n return node\n\n\ndef peaks(x, y):\n \"\"\"The MATLAB function\n\n \"\"\"\n return (\n 3 * (1 - x) ** 2 * np.exp(-(x ** 2) - (y + 1) ** 2) -\n 10 * (x / 5 - x ** 3 - y ** 5) * np.exp(-x ** 2 - y ** 2) -\n 1 / 3 * np.exp(-(x + 1) ** 2 - y ** 2)\n )\n"
] |
[
[
"numpy.array",
"scipy.interpolate.interp1d",
"numpy.dot",
"numpy.sum",
"scipy.linalg.block_diag",
"numpy.exp",
"numpy.diff",
"numpy.identity",
"numpy.linalg.svd",
"numpy.outer",
"numpy.hstack",
"numpy.diag"
]
] |
Hiroshiba/hihobot-synthesis
|
[
"93c149885ec1be5ee9bcfcb8ad99785cb1eb6731"
] |
[
"hihobot_synthesis/config.py"
] |
[
"import json\nfrom pathlib import Path\nfrom typing import Tuple, List, Dict, NamedTuple, Optional\n\nimport numpy as np\n\n\nclass Config(NamedTuple):\n lowest_frequency: Optional[float]\n\n mgc_dim: int\n lf0_dim: int\n vuv_dim: int\n bap_dim: int\n\n duration_linguistic_dim: int\n acoustic_linguisic_dim: int\n duration_dim: int\n acoustic_dim: int\n\n fs: int\n frame_period: int\n fftlen: int\n alpha: float\n hop_length: int\n\n mgc_start_idx: int\n lf0_start_idx: int\n vuv_start_idx: int\n bap_start_idx: int\n\n windows: Tuple[int, int, List[float]]\n\n use_phone_alignment: bool\n\n num_hidden_layers: Dict[str, int]\n hidden_size: Dict[str, int]\n\n batch_size: int\n n_workers: int\n pin_memory: bool\n nepoch: int\n n_save_epoch: int\n lr: float\n weight_decay: float\n\n X_channel: Dict[str, int]\n Y_channel: Dict[str, int]\n X_min: Dict[str, np.ndarray]\n X_max: Dict[str, np.ndarray]\n Y_mean: Dict[str, np.ndarray]\n Y_var: Dict[str, np.ndarray]\n Y_scale: Dict[str, np.ndarray]\n\n\ndef load_from_json(p: Path):\n d = json.load(p.open())\n return Config(\n lowest_frequency=d['lowest_frequency'],\n\n mgc_dim=d['mgc_dim'],\n lf0_dim=d['lf0_dim'],\n vuv_dim=d['vuv_dim'],\n bap_dim=d['bap_dim'],\n\n duration_linguistic_dim=d['duration_linguistic_dim'],\n acoustic_linguisic_dim=d['acoustic_linguisic_dim'],\n duration_dim=d['duration_dim'],\n acoustic_dim=d['acoustic_dim'],\n\n fs=d['fs'],\n frame_period=d['frame_period'],\n fftlen=d['fftlen'],\n alpha=d['alpha'],\n hop_length=d['hop_length'],\n\n mgc_start_idx=d['mgc_start_idx'],\n lf0_start_idx=d['lf0_start_idx'],\n vuv_start_idx=d['vuv_start_idx'],\n bap_start_idx=d['bap_start_idx'],\n\n windows=d['windows'],\n\n use_phone_alignment=d['use_phone_alignment'],\n\n num_hidden_layers=d['num_hidden_layers'],\n hidden_size=d['hidden_size'],\n\n batch_size=d['batch_size'],\n n_workers=d['n_workers'],\n pin_memory=d['pin_memory'],\n nepoch=d['nepoch'],\n n_save_epoch=d['n_save_epoch'],\n lr=d['lr'],\n weight_decay=d['weight_decay'],\n\n X_channel=d['X_channel'],\n Y_channel=d['Y_channel'],\n X_min={k: np.array(v) for k, v in d['X_min'].items()},\n X_max={k: np.array(v) for k, v in d['X_max'].items()},\n Y_mean={k: np.array(v) for k, v in d['Y_mean'].items()},\n Y_var={k: np.array(v) for k, v in d['Y_var'].items()},\n Y_scale={k: np.array(v) for k, v in d['Y_scale'].items()},\n )\n"
] |
[
[
"numpy.array"
]
] |
IchiruTake/Bit2Edge
|
[
"4eec1703426042bb3b823d6e244427b86595ac76"
] |
[
"Bit2Edge_Updated/Preprocessing.py"
] |
[
"import gc\r\nfrom logging import warning\r\nfrom time import perf_counter\r\nfrom typing import Callable, List, Optional, Tuple, Union, Set, Dict, Any\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom numpy import ndarray\r\nfrom scipy.sparse import coo_matrix, spmatrix, csc_matrix, csr_matrix\r\n\r\nfrom .coreConfig import EXTRA_LIBRARY\r\n\r\n\r\ndef checkCompatibility():\r\n from rdkit import __version__ as rdkit_version\r\n from sklearn import __version__ as sklearn_version\r\n from tensorflow import __version__ as tf_version\r\n np_major, np_minor, np_patch = np.__version__.split(\".\")\r\n pd_major, pd_minor, pd_patch = pd.__version__.split(\".\")\r\n rdkit_major, rdkit_minor, rdkit_patch = rdkit_version.split(\".\")\r\n sklearn_major, sklearn_minor, sklearn_patch = sklearn_version.split(\".\")\r\n tf_major, tf_minor, tf_patch = tf_version.split(\".\")\r\n\r\n if not (int(np_major) == 1 and int(np_minor) >= 18):\r\n raise ImportWarning(f\"Numpy version is relatively low ({np.__version__}). Please upgrade into version at \"\r\n f\"least 1.18+. Try version 1.20.3\")\r\n\r\n if not (int(pd_major) == 1 and int(pd_minor) >= 2):\r\n raise ImportWarning(f\"Pandas version is relatively low ({pd.__version__}). Please upgrade into version at \"\r\n \"least 1.2.x. Try version 1.2.4\")\r\n\r\n if not (int(sklearn_major) == 0 and int(sklearn_minor) >= 23):\r\n raise ImportWarning(f\"Scikit-Learn version is relatively low ({sklearn_version}). Please upgrade into version \"\r\n f\"at least 0.23.x. Try version 0.24.x+\")\r\n\r\n if not ((int(rdkit_major) == 2020 and int(rdkit_minor) == 9) or int(rdkit_major) >= 2021):\r\n raise ImportError(f\"RDKit version is relatively low ({rdkit_version}). Please upgrade into version at \"\r\n \"least 2020.09.x\")\r\n\r\n if not (int(tf_major) == 2 and int(tf_minor) >= 3):\r\n raise ImportError(f\"TensorFlow version is relatively low ({tf_version}). Please upgrade into version 2.3.x\")\r\n\r\n try:\r\n from dask import __version__ as dask_version\r\n dask_major, dask_minor, dask_patch = dask_version.split(\".\")\r\n if not (int(dask_major) == 2021 and int(dask_minor) == 4):\r\n raise ImportWarning(f\" Dask version is relatively low ({dask_version}). Please upgrade into version at \"\r\n f\"least 2021.04.x. Try version 2021.04.x\")\r\n except (ImportError, ImportWarning):\r\n pass\r\n\r\n\r\ndef Acknowledgements():\r\n from rdkit import __version__ as rdkit_version\r\n from sklearn import __version__ as sklearn_version\r\n from tensorflow import __version__ as tf_version\r\n print(f\"Library Contribution:\"\r\n f\"\\n\\tNumpy ({np.__version__}): Give immediate access with low-time processing and low memory usage on \"\r\n f\"heavy-weight database (list/array) compared to Python List (C++, Fortran, CPython).\"\r\n f\"\\n\\tPandas ({pd.__version__}): Construct DataFrame to create .csv and make connection to scikit-learn \"\r\n f\"with low-time processing and low memory usage but it is a bit slower (1.2 - 1.5 - 2.0x slower) than Numpy.\"\r\n f\"\\n\\tSciKit-Learn ({sklearn_version}): High Performance of Machine Learning techniques (C++, CPython)\"\r\n f\"\\n\\tRDKit ({rdkit_version}): Create sufficient data from SMILES Notation (C++, CPython)\"\r\n f\"\\n\\tTensorFlow ({tf_version}): integrated in TensorFlow\")\r\n print(\"Main Article References: Prediction of organic homolytic bond dissociation enthalpies \"\r\n \"at near chemical accuracy with sub-second computational cost\"\r\n \"\\n\\tAuthors: Peter C. St. John, Yanfei Guan, Yeonjoon Kim, Seonah Kim & Robert S. Paton\"\r\n \"\\n\\tDOI: 10.1038/s41467-020-16201-z\")\r\n print(\"Programming Language: Python 3.7.10 (PyCharm IDE 2020.03.1)\"\r\n \"\\nSub-programming Language: C++, Fortran, CUDA, HTML, C\")\r\n\r\n\r\n# -------------------------------------------------------------------------------------------------------------------\r\n# [1]: Checking DataType\r\n__CALLER: str = \"Python built-in\"\r\nDATA_TYPE_CACHE_CHECK: Dict[str, List] = \\\r\n {\"str\": [str, f\"{__CALLER} string\"], \"int\": [int, f\"{__CALLER} integer string\"],\r\n \"bool\": [bool, f\"{__CALLER} boolean\"], \"float\": [float, f\"{__CALLER} float\"],\r\n \"List\": [List, f\"{__CALLER} list\"], \"Tuple\": [Tuple, f\"{__CALLER} tuple\"],\r\n \"Dict\": [Dict, f\"{__CALLER} dictionary\"], \"Set\": [Set, f\"{__CALLER} set\"],\r\n \"Slice\": [slice, f\"{__CALLER} slice\"],\r\n \"None\": [None, f\"{__CALLER} NoneType object\"], \"Callable\": [Callable, f\"method/function\"],\r\n \"DataFrame\": [pd.DataFrame, f\"Pandas DataFrame\"], \"Index\": [pd.Index, f\"Pandas Index\"],\r\n \"coo_matrix\": [coo_matrix, f\"Scipy coo_matrix\"], \"spmatrix\": [spmatrix, f\"Scipy spmatrix\"],\r\n \"csc_matrix\": [csc_matrix, f\"Scipy DataFrame\"], \"csr_matrix\": [csr_matrix, f\"Scipy csr_matrix\"],\r\n \"ndarray\": [ndarray, f\"Numpy array\"]}\r\n\r\n\r\ndef inputFastCheck(value: Any, dtype: Optional[str], delimiter: Optional[str] = None) -> bool:\r\n if dtype is None or 'None' in dtype:\r\n if value is None:\r\n return True\r\n\r\n try:\r\n target = tuple([DATA_TYPE_CACHE_CHECK[key][0] for key in dtype.split(delimiter)]) \\\r\n if delimiter is not None else DATA_TYPE_CACHE_CHECK[dtype][0]\r\n\r\n if isinstance(target, Tuple):\r\n if None in target:\r\n if value is None:\r\n return True\r\n return isinstance(value, tuple([checkDtype for checkDtype in target if checkDtype is not None]))\r\n return isinstance(value, target)\r\n except (ValueError, KeyError, IndexError, TypeError):\r\n warning(\"Unable to check your value properly as basic datatype input is unavailable.\")\r\n return False\r\n\r\n\r\ndef inputFullCheck(value: Any, name: str, dtype: Optional[str], delimiter: Optional[str] = None,\r\n warning_only: bool = False, fastCheck: bool = False) -> bool:\r\n \"\"\"\r\n Used to check parameter in a single shot. Return boolean value whether it passed the test if warning_only=True;\r\n else, raise TypeError\r\n\r\n :param value: The value needed to be checked\r\n :type value: Any\r\n\r\n :param name: The value needed for display\r\n :type name: str\r\n\r\n :param dtype: The dtype needed for checking. If multiple data type must be checked in one instance,\r\n delimiter = None\r\n :type dtype: str\r\n\r\n :param delimiter: if provided, multiple data types will be checked in one calling by string separation\r\n :type delimiter: str\r\n\r\n :param warning_only: if True, no TypeError made; instead warning called\r\n :type warning_only: bool\r\n\r\n :param fastCheck: if True, skip some checking. Only used when you type correct input\r\n :type fastCheck: bool\r\n\r\n :return: bool\r\n \"\"\"\r\n if not inputFastCheck(value=fastCheck, dtype='bool'):\r\n raise TypeError(f\"Fast Checking should be {DATA_TYPE_CACHE_CHECK['bool'][1]}\")\r\n\r\n if fastCheck:\r\n if value is None:\r\n if dtype is None:\r\n return True\r\n elif dtype.find(\"None\") != -1:\r\n return True\r\n else:\r\n if not inputFastCheck(value=name, dtype='str'):\r\n raise TypeError(f\"Input Name should be {DATA_TYPE_CACHE_CHECK['str'][1]}\")\r\n if dtype is not None:\r\n if not inputFastCheck(value=dtype, dtype='str'):\r\n raise TypeError(f\"Input Data Type should be {DATA_TYPE_CACHE_CHECK['str'][1]}\")\r\n elif value is None: # dtype is None\r\n return True\r\n\r\n if not inputFastCheck(value=delimiter, dtype='str') and delimiter is not None:\r\n raise TypeError(f\"Input Delimiter should be {DATA_TYPE_CACHE_CHECK['str'][1]} or NoneType object\")\r\n if not inputFastCheck(value=warning_only, dtype='bool'):\r\n raise TypeError(f\"warning_only={warning_only} should be {DATA_TYPE_CACHE_CHECK['bool'][1]}\")\r\n\r\n outcome: bool = inputFastCheck(value=value, dtype=dtype, delimiter=delimiter)\r\n if outcome:\r\n return outcome\r\n\r\n target = tuple([DATA_TYPE_CACHE_CHECK[key][0] for key in dtype.split(delimiter)]) \\\r\n if delimiter is not None else DATA_TYPE_CACHE_CHECK[dtype][0]\r\n msg: str = f\" {name} should be {__CALLER} {target} but not type: {type(value)}\"\r\n\r\n if warning_only:\r\n warning(msg)\r\n return outcome\r\n raise TypeError(msg)\r\n\r\n\r\ndef _checkLefty_(value: Union[int, float], minimumValue: Union[int, float], allowBoundary: bool) -> bool:\r\n return minimumValue <= value if allowBoundary else minimumValue < value\r\n\r\n\r\ndef _checkRighty_(value: Union[int, float], maximumValue: Union[int, float], allowBoundary: bool) -> bool:\r\n return maximumValue >= value if allowBoundary else maximumValue > value\r\n\r\n\r\ndef inputCheckRange(value: Union[int, float], name: str, maxValue: Optional[Union[int, float]],\r\n minValue: Optional[Union[int, float]] = 0, fastCheck: bool = False, allowNoneInput: bool = False,\r\n allowFloatInput: bool = False, warning_only: bool = False, leftBound: bool = True,\r\n rightBound: bool = False) -> bool:\r\n \"\"\" Used to check python built-in input parameter between a range [minValue, maxValue).\r\n Return boolean value whether it passed the test if warning_only=True; else, raise TypeError or ValueError\"\"\"\r\n inputFullCheck(value=fastCheck, name='fastCheck', dtype='bool', warning_only=False)\r\n if not fastCheck:\r\n if minValue is not None:\r\n inputFullCheck(value=minValue, name='minimum_value', dtype='int-float', delimiter='-', warning_only=False)\r\n if maxValue is not None:\r\n inputFullCheck(value=maxValue, name='maximum_value', dtype='int-float', delimiter='-', warning_only=False)\r\n\r\n inputFullCheck(value=name, name='name', dtype='str', warning_only=False)\r\n inputFullCheck(value=warning_only, name='warning_only', dtype='bool', warning_only=False)\r\n inputFullCheck(value=allowNoneInput, name='allowNoneInput', dtype='bool', warning_only=False)\r\n inputFullCheck(value=allowFloatInput, name='allowFloatInput', dtype='bool', warning_only=False)\r\n inputFullCheck(value=leftBound, name='leftBound', dtype='bool', warning_only=False)\r\n inputFullCheck(value=rightBound, name='rightBound', dtype='bool', warning_only=False)\r\n\r\n if allowNoneInput:\r\n if value is None:\r\n return True\r\n\r\n checking_datatype = 'int-float' if allowFloatInput else 'int'\r\n if minValue is None and maxValue is None:\r\n warning(f' {name}: Your value only be checked with the data type. Your input cannot be compared at any metric')\r\n return inputFullCheck(value=value, name=name, dtype=checking_datatype, delimiter='-')\r\n\r\n if maxValue is not None and minValue is not None:\r\n if minValue > maxValue:\r\n warning(f\" {name}: Input range must be swapped to guarantee consistency\")\r\n minValue, maxValue = maxValue, minValue\r\n\r\n if minValue is None:\r\n lBound = '('\r\n else:\r\n lBound: str = '[' if leftBound else '('\r\n if maxValue is None:\r\n rBound = '('\r\n else:\r\n rBound: str = ']' if rightBound else ')'\r\n INF: str = 'INFINITE'\r\n\r\n if inputFastCheck(value=value, dtype=checking_datatype, delimiter='-'):\r\n msg: str = ''\r\n if minValue is not None and maxValue is None:\r\n if not _checkLefty_(value=value, minimumValue=minValue, allowBoundary=leftBound):\r\n msg: str = f\"{name}={value} is out-of-range {lBound}{minValue}, {INF}{rBound}\"\r\n elif minValue is None and maxValue is not None:\r\n if not _checkRighty_(value=value, maximumValue=maxValue, allowBoundary=rightBound):\r\n msg: str = f\"{name}={value} is out-of-range {lBound}{INF}, {maxValue}{rBound}\"\r\n else:\r\n if not (_checkLefty_(value=value, minimumValue=minValue, allowBoundary=leftBound) and\r\n _checkRighty_(value=value, maximumValue=maxValue, allowBoundary=rightBound)):\r\n msg: str = f\"{name}={value} is out-of-range {lBound}{minValue}, {maxValue}{rBound}\"\r\n\r\n if msg != '':\r\n if warning_only:\r\n warning(msg)\r\n return False\r\n raise ValueError(msg)\r\n else:\r\n note = \"integer\"\r\n if minValue is not None:\r\n if minValue >= 0:\r\n note = \"positive integer\"\r\n elif maxValue is not None:\r\n if maxValue <= 0:\r\n note = \"negative integer\"\r\n\r\n msg: str = f\"{name}={value} must be a {note} {lBound}{minValue}, {maxValue}{rBound}\"\r\n if allowNoneInput:\r\n msg = f\"{msg} or None\"\r\n\r\n if warning_only:\r\n warning(msg)\r\n return False\r\n raise ValueError(msg)\r\n\r\n return True\r\n\r\n\r\ndef inputCheckIterableInRange(value: Union[ndarray, List, Tuple], name: str, maxValue: Optional[Union[int, float]],\r\n minValue: Optional[Union[int, float]] = 0, maxInputInside: int = 2,\r\n strictInput: bool = False, **kwargs) -> bool:\r\n # **kwargs: Argument need for function inputCheckRange\r\n inputFullCheck(value=value, name=name, dtype='List-Tuple', delimiter='-')\r\n inputFullCheck(value=strictInput, name='strictInput', dtype='bool')\r\n inputCheckRange(value=maxInputInside, name='len(value)', maxValue=len(value), minValue=0, rightBound=True)\r\n if strictInput:\r\n if len(value) != maxInputInside:\r\n raise ValueError(f\"{name} should have only {maxInputInside} values\")\r\n\r\n for idx, location in enumerate(value):\r\n inputCheckRange(value=location, name=f'{name}[{idx}]', maxValue=maxValue, minValue=minValue, **kwargs)\r\n return True\r\n\r\n\r\n# -------------------------------------------------------------------------------------------------------------------\r\n# [2]: Decorator and Function used for warp-up\r\ndef MeasureExecutionTime(Function: Callable) -> Callable:\r\n def compute(*args, **kwargs):\r\n start = perf_counter()\r\n result = Function(*args, **kwargs)\r\n print(f\"Executing Time ({Function}): {perf_counter() - start:.6f}s\")\r\n return result\r\n return compute\r\n\r\n\r\ndef objectMemoryProfiler(Object: object, verbose: bool = True, sorting_mode: bool = True,\r\n descending: bool = True) -> pd.DataFrame:\r\n # Hyper-parameter Verification\r\n inputFastCheck(value=verbose, dtype='bool')\r\n inputFastCheck(value=sorting_mode, dtype='bool')\r\n inputFastCheck(value=descending, dtype='bool')\r\n\r\n from sys import getsizeof\r\n print(\"=\" * 30, objectMemoryProfiler, \"=\" * 30)\r\n total: int = 0\r\n np_total: int = 0\r\n arr: List[List[str, str, int]] = []\r\n for name in Object.__dict__:\r\n obj = getattr(Object, name)\r\n size = obj.nbytes if isinstance(obj, ndarray) else getsizeof(obj)\r\n total += size\r\n\r\n if isinstance(obj, ndarray):\r\n size = obj.nbytes\r\n np_total += size\r\n elif isinstance(obj, (coo_matrix, csc_matrix, csr_matrix, spmatrix)):\r\n if isinstance(obj, coo_matrix):\r\n size = obj.data.nbytes + obj.row.nbytes + obj.col.nbytes\r\n else:\r\n size = obj.data.nbytes + obj.indices.nbytes + obj.indptr.nbytes\r\n np_total += size\r\n\r\n if verbose and not sorting_mode:\r\n msg = f\"{name} ({type(obj)}): \\t\\t\\t\\t{size} bytes --> Shape: {obj.shape}\" \\\r\n if isinstance(obj, ndarray) else f\"{name} ({type(obj)}): \\t\\t\\t\\t{size} bytes\"\r\n print(msg)\r\n\r\n arr.append([name, type(obj), size])\r\n if sorting_mode:\r\n arr.sort(key=lambda item: int(item[2]), reverse=descending)\r\n arr: pd.DataFrame = pd.DataFrame(data=arr, index=None, columns=[\"Name\", \"Type\", \"Byte Size\"])\r\n print(arr)\r\n\r\n print(\"-\" * 80)\r\n percentage: float = np_total / total\r\n print(f\"Attribute Memory: {total} bytes ({round(total / (1024 * 1024), 6)} MB)\")\r\n print(f\"Numpy Attribute Memory: {np_total} bytes ({round(np_total / (1024 * 1024), 6)} MB)\"\r\n f\" ---> Percentage: {round(100 * percentage, 6)} %\")\r\n print(f\"Remaining Memory: {total - np_total} bytes ({round((total - np_total) / (1024 * 1024), 6)} MB) \"\r\n f\"---> Percentage: {round(100 * (1 - percentage), 6)} %\")\r\n return arr\r\n\r\n\r\ndef TimingProfiler(Function: Callable):\r\n def compute(*args, **kwargs):\r\n from cProfile import Profile\r\n profiler = Profile()\r\n profiler.enable()\r\n result = Function(*args, **kwargs)\r\n profiler.disable()\r\n profiler.print_stats(sort=True)\r\n return result\r\n return compute\r\n\r\n\r\n# -------------------------------------------------------------------------------------------------------------------\r\n# [3]: Function used for generating file and modifying filename\r\ndef _StringValidation_(FileName: str, extension: str) -> None:\r\n inputFullCheck(value=FileName, name='FileName', dtype='str')\r\n inputFullCheck(value=extension, name='extension', dtype='str')\r\n\r\n\r\ndef FixPath(FileName: str, extension: str) -> str:\r\n _StringValidation_(FileName=FileName, extension=extension)\r\n return f\"{FileName}{extension}\" if FileName.rfind(extension) != len(FileName) - len(extension) \\\r\n else FileName\r\n\r\n\r\ndef RemoveExtension(FileName: str, extension: str) -> str:\r\n _StringValidation_(FileName=FileName, extension=extension)\r\n return FileName if FileName.rfind(extension) != len(FileName) - len(extension) \\\r\n else FileName[:len(FileName) - len(extension)]\r\n\r\n\r\ndef ReadFile(FilePath: Optional[str], header: Optional[int] = 0, dtype=None, get_values: bool = False,\r\n get_columns: bool = False, nrows: Optional[int] = None, blocksize: Union[float, int] = 64e6,\r\n dtypes_memory_identifier: Union[float, int] = 1, usecols: Union[List[int], List[str]] = None,\r\n skiprows: Optional[Union[List, int]] = None) \\\r\n -> Optional[Union[pd.DataFrame, List[str], ndarray, Tuple[ndarray, List[str]]]]:\r\n \"\"\"\r\n Default implementation used to call a .csv documentation.\r\n 1 MiB = 2^10 KiB = 2^20 bytes = 1048576 bytes\r\n 1 MB = 10^3 KB = 10^6 bytes = 1000000 bytes\r\n\r\n :param FilePath: The path contained the .csv file. This hyper-parameter does not need extension name as\r\n it have to be checked directly before accessing pandas library (str).\r\n :type FilePath: str\r\n\r\n :param header: The position of column name used as label/features identifier (int). Default to 0.\r\n :type header: int\r\n\r\n :param dtype: pandas dtype // numpy.dtype\r\n :type dtype: dtype\r\n\r\n :param get_values: Whether to get values only\r\n :type get_values: bool\r\n\r\n :param get_columns: Whether to get columns only\r\n :type get_columns: bool\r\n\r\n :param nrows: number of rows for computing\r\n :type nrows: Optional[int]\r\n\r\n :param skiprows: number of rows or row's position for skipping\r\n :type skiprows: Optional[Union[List, int]]\r\n\r\n :param usecols: number of rows or row's position for skipping\r\n :type usecols: Optional[Union[List, int]]\r\n\r\n :param blocksize: The chunking memory for paralleling (Dask Library), Default to be 64 MB\r\n :type blocksize: float or int\r\n\r\n :param dtypes_memory_identifier: The coefficient memory adding when reading csv by Dask Library (default to be 1).\r\n Base case: 1 MiB (mebibytes)\r\n :type dtypes_memory_identifier: float or int\r\n\r\n :return: pd.DataFrame\r\n \"\"\"\r\n if True:\r\n if FilePath is None or FilePath == \"\":\r\n return None\r\n\r\n inputFullCheck(value=FilePath, name='FilePath', dtype='str')\r\n inputFullCheck(value=get_values, name='get_values', dtype='bool')\r\n inputFullCheck(value=get_columns, name='get_columns', dtype='bool')\r\n\r\n if not EXTRA_LIBRARY[\"Dask\"] and not EXTRA_LIBRARY[\"Dask_activated\"]:\r\n EXTRA_LIBRARY[\"Dask_activated\"] = True\r\n try:\r\n import dask.dataframe as dd\r\n EXTRA_LIBRARY[\"Dask\"] = True\r\n warning(\" Dask is a great tool to replicate pandas.DataFrame with read_csv. In fact, this project \"\r\n \"leverage computation strength with memory by Numpy rather than Pandas. Switch default to Dask \"\r\n \"DataFrame\")\r\n except (ImportError, ImportWarning):\r\n warning(\" Dask is not in your environment. Switch to pandas (Memory & Time Consumption is larger).\")\r\n\r\n pass\r\n\r\n FilePath: str = FixPath(FileName=FilePath, extension=\".csv\")\r\n File: Optional[pd.DataFrame] = None\r\n if EXTRA_LIBRARY[\"Dask\"] and nrows != 1:\r\n try:\r\n import dask.dataframe as dd\r\n MiB: int = 1048576\r\n File: pd.DataFrame = \\\r\n dd.read_csv(FilePath, dtype=dtype, header=header, low_memory=True, usecols=usecols, blocksize=blocksize,\r\n sample=int(MiB * dtypes_memory_identifier), cache_dates=False).compute()\r\n except (ValueError, MemoryError, ModuleNotFoundError):\r\n pass\r\n\r\n if File is None:\r\n File: pd.DataFrame = pd.read_csv(FilePath, dtype=dtype, nrows=nrows, skiprows=skiprows, usecols=usecols,\r\n header=header, low_memory=True, cache_dates=False)\r\n\r\n if not get_values and not get_columns:\r\n return File\r\n elif not get_values and get_columns:\r\n return File.columns.tolist()\r\n elif get_values and not get_columns:\r\n return File.values if inputFastCheck(File.values, 'ndarray') else np.array(File.values, dtype=dtype)\r\n\r\n return File.values if inputFastCheck(File.values, 'ndarray') else np.array(File.values, dtype=dtype), \\\r\n File.columns.tolist()\r\n\r\n\r\ndef ExportFile(DataFrame: pd.DataFrame, FilePath: str, index: bool = False, index_label: Optional[str] = None) -> None:\r\n \"\"\"\r\n Default implementation used to return the .csv documentation from DataFrame\r\n\r\n :param DataFrame: The DataFrame needs for creating the .csv file (pd.DataFrame).\r\n :type DataFrame: pd.DataFrame\r\n\r\n :param FilePath: The path contained the .csv file. This hyper-parameter does not need extension name as it have to\r\n be checked directly before accessing pandas library (str).\r\n :type FilePath: str\r\n\r\n :param index: The implicit array-like used for row indexing (Array-like). Default to False\r\n :type index: List[str] or Tuple[str] or bool or List[int] or Tuple[int]\r\n\r\n :param index_label: The name of index column\r\n :type index_label: str or None\r\n\r\n :return: None\r\n \"\"\"\r\n if FilePath is None:\r\n return None\r\n inputFullCheck(value=DataFrame, name='DataFrame', dtype='DataFrame')\r\n DataFrame.to_csv(FixPath(FileName=FilePath, extension=\".csv\"), index=index, index_label=index_label)\r\n\r\n\r\n# -------------------------------------------------------------------------------------------------------------------\r\n# [4]: Function used for data comparison\r\n# [4.1]: Base Cleaning method\r\ndef BinarySearch(array_1d: Union[List, Tuple, ndarray], value: Union[int, str, float],\r\n getIndex: bool = False, raiseError: bool = False) -> Union[int, bool]:\r\n # This implementation is used to boost-up searching.\r\n # Binary Search for Large Array\r\n inputFullCheck(value=array_1d, name='array_1d', dtype='List-Tuple-ndarray', delimiter='-')\r\n inputFullCheck(value=value, name='value', dtype='int-str-float', delimiter='-')\r\n if inputFastCheck(value=array_1d, dtype='ndarray'):\r\n if array_1d.ndim != 1:\r\n raise ValueError(\" Only works for 1D-array.\")\r\n inputFullCheck(value=getIndex, name='getIndex', dtype='bool')\r\n inputFullCheck(value=raiseError, name='raiseError', dtype='bool')\r\n\r\n start, end = 0, len(array_1d)\r\n counter = 0\r\n while start <= end:\r\n mid = (start + end) // 2\r\n if end - start <= 1:\r\n counter += 1\r\n if counter == 2:\r\n if not raiseError:\r\n return False if not getIndex else -1\r\n raise ValueError(f\"value ({value}) is not in the array\")\r\n\r\n if value < array_1d[mid]:\r\n end = mid\r\n elif value > array_1d[mid]:\r\n start = mid\r\n elif value == array_1d[mid]:\r\n return True if not getIndex else mid\r\n\r\n if not raiseError:\r\n return False if not getIndex else -1\r\n raise ValueError(f\"value={value} is not in the array\")\r\n\r\n\r\ndef BinaryIndexing(array_1d: Union[List, Tuple, ndarray], value: Union[str, int, float], ascending: bool = True):\r\n inputFullCheck(value=array_1d, name='array_1d', dtype='List-Tuple-ndarray', delimiter='-')\r\n inputFullCheck(value=value, name='value', dtype='int-str-float', delimiter='-')\r\n inputFullCheck(value=ascending, name='ascending', dtype='bool')\r\n if len(array_1d) == 0:\r\n raise ValueError(\" Binary Search do not allow empty array\")\r\n\r\n left, right = 0, len(array_1d) - 1\r\n if ascending:\r\n while left <= right:\r\n mid = (left + right) // 2\r\n if right - left == 1:\r\n if value == array_1d[left]:\r\n return left\r\n elif value == array_1d[right]:\r\n return right\r\n\r\n if value < array_1d[mid]:\r\n right = mid\r\n elif value > array_1d[mid]:\r\n left = mid\r\n else:\r\n return mid\r\n else:\r\n while left <= right:\r\n mid = (left + right) // 2\r\n if right - left == 1:\r\n if value == array_1d[right]:\r\n return right\r\n elif value == array_1d[left]:\r\n return left\r\n\r\n if value < array_1d[mid]:\r\n left = mid\r\n elif value > array_1d[mid]:\r\n right = mid\r\n else:\r\n return mid\r\n\r\n return None\r\n\r\n\r\ndef _Export_(dataFrame: pd.DataFrame, overlap_directory: str, new_directory: str = None, status: bool = True) -> None:\r\n inputFullCheck(value=status, name='status', dtype='bool')\r\n if status:\r\n ExportFile(DataFrame=dataFrame, FilePath=overlap_directory if new_directory is None else new_directory)\r\n return None\r\n\r\n\r\ndef _IsSingleBinaryUnique_(array_1d: ndarray) -> bool:\r\n inputFullCheck(value=array_1d, name='array_1d', dtype='ndarray')\r\n if array_1d[0] != array_1d[1]:\r\n return False\r\n return np.sum(array_1d, axis=-1) / array_1d.size == array_1d[0]\r\n\r\n\r\ndef IsSingleUnique(array_1d: ndarray, binaryMode: bool = False, allowCache: bool = True) -> bool:\r\n inputFullCheck(value=allowCache, name='allowCache', dtype='bool')\r\n inputFullCheck(value=binaryMode, name='binaryMode', dtype='bool')\r\n inputFullCheck(value=array_1d, name='array_1d', dtype='ndarray-List-Tuple', delimiter='-')\r\n if binaryMode and inputFastCheck(array_1d, dtype='ndarray'):\r\n return _IsSingleBinaryUnique_(array_1d=array_1d)\r\n\r\n if inputFastCheck(value=array_1d, dtype='ndarray'):\r\n if array_1d.ndim != 1:\r\n raise ValueError(f\"Accept 1D-array Only ({array_1d.ndim})\")\r\n cache = array_1d.tolist() if allowCache else array_1d\r\n else:\r\n cache = array_1d\r\n\r\n first_value, size = cache[0], len(cache)\r\n\r\n for idx in range(1, size):\r\n if cache[idx] != first_value:\r\n del cache\r\n return False\r\n del cache\r\n return True\r\n\r\n\r\ndef ArrayEqual(array_1: Union[ndarray, List, Tuple, pd.Index], array_2: Union[ndarray, List, Tuple, pd.Index],\r\n allowCache: bool = True) -> bool:\r\n \"\"\"\r\n Note that np.array_equal always result in O(2*N) or O(3*N) time complexity (depended on task dependency) as\r\n it have to ensure that all value in array should be converted into boolean matrix and validate using\r\n bool(np.asarray(a==b).all()). However, we want to reduce them the time complexity in the specific task only.\r\n Costing O(k) real-time complexity only with no extra space complexity O(1) compared to numpy.array_equal.\r\n \"\"\"\r\n inputFullCheck(array_1, name='array_1', dtype='List-Tuple-ndarray', delimiter='-')\r\n inputFullCheck(array_2, name='array_2', dtype='List-Tuple-ndarray', delimiter='-')\r\n if inputFastCheck(value=array_1, dtype='List-Tuple', delimiter='-') and \\\r\n inputFastCheck(value=array_2, dtype='List-Tuple', delimiter='-'):\r\n size: int = len(array_1)\r\n if size != len(array_2):\r\n return False\r\n elif size == 0: # Two arrays have no values\r\n return True\r\n\r\n cache_1, cache_2 = array_1, array_2\r\n else:\r\n rebuilt_1 = np.asarray(array_1) if not inputFastCheck(value=array_1, dtype='ndarray') else array_1\r\n rebuilt_2 = np.asarray(array_2) if not inputFastCheck(value=array_2, dtype='ndarray') else array_2\r\n if not (rebuilt_1.ndim == rebuilt_2.ndim and rebuilt_1.ndim == 1):\r\n raise ValueError(f\"Two array was not equivalent in size a: {rebuilt_1.shape} --- b: {rebuilt_2.shape}\")\r\n size: int = rebuilt_1.size\r\n if rebuilt_1.size != rebuilt_2.size:\r\n return False\r\n elif rebuilt_1.size == 0: # Two arrays have no values\r\n return True\r\n\r\n inputFullCheck(value=allowCache, name='allowCache', dtype='bool')\r\n\r\n cache_1 = array_1.tolist() if allowCache and inputFastCheck(array_1, dtype='ndarray') else array_1\r\n cache_2 = array_2.tolist() if allowCache and inputFastCheck(array_2, dtype='ndarray') else array_2\r\n\r\n foundDifferent: bool = any(cache_1[index] != cache_2[index] for index in range(size))\r\n del cache_1, cache_2\r\n return not foundDifferent\r\n\r\n\r\ndef GetIndexForLabelRemoval(RemovingLabels: Union[pd.DataFrame, pd.Index, ndarray, List[str], Tuple[str, ...]],\r\n TargetLabels: Union[pd.DataFrame, pd.Index, ndarray, List[str], Tuple[str, ...]]) -> List[int]:\r\n \"\"\"\r\n Implementation of get the index of removed_labels to match with target_labels\r\n\r\n :param RemovingLabels: The labels needs to get removed\r\n :type RemovingLabels: Union[pd.DataFrame, pd.Index, ndarray, List[str]]\r\n\r\n :param TargetLabels: The labels for data comparison\r\n :type TargetLabels: Union[pd.DataFrame, pd.Index, ndarray, List[str]]\r\n\r\n :return:\r\n \"\"\"\r\n # Hyper-parameter Verification\r\n if True:\r\n inputFullCheck(value=RemovingLabels, name='removed_labels', delimiter='-',\r\n dtype='List-Tuple-ndarray-Index-DataFrame')\r\n\r\n if not inputFastCheck(value=RemovingLabels, dtype='ndarray'):\r\n RemovingLabels = np.asarray(RemovingLabels.columns).ravel() \\\r\n if inputFastCheck(value=RemovingLabels, dtype='DataFrame') else np.asarray(RemovingLabels).ravel()\r\n\r\n inputFullCheck(value=TargetLabels, name='target_labels', delimiter='-',\r\n dtype='List-Tuple-ndarray-Index-DataFrame')\r\n\r\n if not inputFastCheck(value=TargetLabels, dtype='ndarray'):\r\n TargetLabels = np.asarray(TargetLabels.columns).ravel() \\\r\n if inputFastCheck(value=TargetLabels, dtype='DataFrame') else np.asarray(TargetLabels).ravel()\r\n\r\n if len(TargetLabels) > len(RemovingLabels):\r\n warning(\" WARNING: Two array above cannot be matched. Please check your file or source code\")\r\n warning(f\" The target label ({len(TargetLabels)}) is longer than the removed label ({len(RemovingLabels)})\")\r\n status = False\r\n pass\r\n\r\n FalseLabel: List[int] = []\r\n moving_column: int = 0\r\n for current_column in range(0, len(RemovingLabels)):\r\n if moving_column < len(TargetLabels):\r\n if RemovingLabels[current_column] == TargetLabels[moving_column]:\r\n moving_column += 1\r\n else:\r\n FalseLabel.append(current_column)\r\n else:\r\n FalseLabel.append(current_column)\r\n\r\n if len(RemovingLabels) - len(FalseLabel) != len(TargetLabels):\r\n warning(\" Two arrays above cannot be matched. Please check your file or source code\")\r\n\r\n return FalseLabel\r\n\r\n\r\n# [4.2]: Cleaning Function\r\ndef _checkCleaningInput_(comparedArray: ndarray, FeaturesLabels: Union[ndarray, List[str]], nonTouchableSize: int,\r\n relevantSet_1: ndarray = None, relevantSet_2: ndarray = None, ) -> None:\r\n inputFullCheck(value=comparedArray, name='comparedArray', dtype='ndarray')\r\n inputFullCheck(value=FeaturesLabels, name='FeaturesLabels', dtype='ndarray-List', delimiter='-')\r\n\r\n observationSize, featureSize = comparedArray.shape\r\n if len(FeaturesLabels) != featureSize:\r\n raise ValueError(f\"Invalid Length of Columns ({featureSize} vs {len(FeaturesLabels)}).\")\r\n if relevantSet_1 is not None:\r\n inputFullCheck(value=relevantSet_1, name='relevantSet_1', dtype='ndarray')\r\n if relevantSet_1.shape[1] != featureSize:\r\n raise ValueError(f\"Invalid Length of relevantSet_1 ({featureSize} vs {relevantSet_1.shape[1]}).\")\r\n if relevantSet_2 is not None:\r\n inputFullCheck(value=relevantSet_2, name='relevantSet_2', dtype='ndarray')\r\n if relevantSet_2.shape[1] != featureSize:\r\n raise ValueError(f\"Invalid Length of relevantSet_2 ({featureSize} vs {relevantSet_2.shape[1]}).\")\r\n\r\n inputFullCheck(value=nonTouchableSize, name='nonTouchableSize', dtype='int')\r\n if nonTouchableSize < 0:\r\n warning(\" nonTouchableSize must be positive. Switch to zero (0)\")\r\n nonTouchableSize = 0\r\n if 100 * (nonTouchableSize / featureSize) > 0.75:\r\n x: float = 100 * (nonTouchableSize / featureSize)\r\n warning(f\" nonTouchableSize is implemented at relatively large scale ({round(x, 2)}%). Please be careful\")\r\n\r\n return None\r\n\r\n\r\ndef _getCustomSparseMatrix_(array: ndarray, nonTouchableSize: int, binaryMode: bool,\r\n booleanMask: Optional[List[bool]] = None) -> Tuple[List[List[int]], List[ndarray]]:\r\n if booleanMask is not None:\r\n if len(booleanMask) != array.shape[1]:\r\n raise ValueError(\"Source Code Error\")\r\n\r\n def _validate_(column) -> bool:\r\n if booleanMask is None:\r\n return True\r\n if not booleanMask[column]:\r\n return True\r\n return False\r\n\r\n WorkingSize: int = array.shape[1] - nonTouchableSize\r\n\r\n print(\"Generate Dynamic Sparse Matrix: PENDING ...\")\r\n BinaryCols: List[List[int]] = [np.where(array[:, col] == 1)[0].tolist() if _validate_(col) else []\r\n for col in range(0, WorkingSize)]\r\n ExtraCols: List[Union[ndarray, List[int]]] = [[] for _ in range(0, WorkingSize)]\r\n\r\n if binaryMode:\r\n return BinaryCols, ExtraCols\r\n\r\n empty: ndarray = np.array([], dtype=array.dtype)\r\n opt: Callable = OptimizeIntegerDatatypeByShape\r\n maximumValue: int = np.iinfo(array.dtype).max\r\n for col in range(0, WorkingSize):\r\n if _validate_(col):\r\n if array[:, col].sum() != len(BinaryCols[col]):\r\n index_temp: ndarray = np.where(np.logical_and(array[:, col] != 1, array[:, col] != 0))[0]\r\n if index_temp.size == 0:\r\n ExtraCols[col] = empty\r\n else:\r\n ExtraCols[col] = np.zeros(shape=(index_temp.size, 2),\r\n dtype=opt((int(maximumValue), int(index_temp[-1]))))\r\n ExtraCols[col][:, 0] = index_temp\r\n ExtraCols[col][:, 1] = array[index_temp, col]\r\n\r\n print(\"Generate Dynamic Sparse Matrix: DONE ...\")\r\n return BinaryCols, ExtraCols\r\n\r\n\r\ndef _mergeTwoSortedArrays_(array_1: Union[List[int], ndarray], array_2: Union[List[int], ndarray]) -> List[int]:\r\n inputFullCheck(value=array_1, name='array_1', dtype='List-Tuple-ndarray', delimiter='-')\r\n inputFullCheck(value=array_2, name='array_2', dtype='List-Tuple-ndarray', delimiter='-')\r\n\r\n size_1: int = array_1.size if inputFastCheck(value=array_1, dtype='ndarray') else len(array_1)\r\n size_2: int = array_2.size if inputFastCheck(value=array_2, dtype='ndarray') else len(array_2)\r\n\r\n if not isinstance(array_1, type(array_2)):\r\n warning(\" Two arrays does not having same type\")\r\n\r\n if size_1 == 0 and size_2 == 0:\r\n return []\r\n else:\r\n if size_1 == 0:\r\n return array_2.copy()\r\n if size_2 == 0:\r\n return array_1.copy()\r\n\r\n newArray: List[int] = [0] * (size_1 + size_2)\r\n i, j, k = 0, 0, 0\r\n while i < size_1 and j < size_2:\r\n if array_1[i] < array_2[j]:\r\n newArray[k] = array_1[i]\r\n i += 1\r\n else:\r\n newArray[k] = array_2[j]\r\n j += 1\r\n k += 1\r\n\r\n if i != size_1: # Remaining value from array_1\r\n for x in range(i, size_1):\r\n newArray[k] = array_1[x]\r\n k += 1\r\n else:\r\n for x in range(j, size_2):\r\n newArray[k] = array_2[x]\r\n k += 1\r\n\r\n if k != len(newArray):\r\n raise ValueError(\"Incorrect Merging\")\r\n\r\n return newArray\r\n\r\n\r\ndef NonLabelCleaning(cleanedArray: ndarray, labels: Union[ndarray, List[str]], nonTouchableSize: int,\r\n relevantSet_1: ndarray = None, relevantSet_2: ndarray = None, HorizontalCleaning: bool = False,\r\n VarianceThreshold: Union[int, float] = 0, BinaryMode: bool = False):\r\n \"\"\"\r\n Implementation of Data Cleaning by Features: When calling this function, it will observe all of the features in the\r\n ndarray from [0...ndarray.size - nonTouchableSize] using comparedSet as the benchmark. If any features contained\r\n singleton value (i.e all 0s / 1s), that features can be marked as useless and would be removed all because it can\r\n be sum up as bias(es) parameter.\r\n\r\n :param cleanedArray: The ndarray database for identifying useless features\r\n :type cleanedArray: ndarray\r\n\r\n :param labels: The labeling of comparedSet (mostly derived from DataFrame.columns).\r\n It could be a list of string or ndarray\r\n :type labels: List[str] or ndarray\r\n\r\n :param nonTouchableSize: The number of features at the right of all dataset which is not used for cleaning\r\n :type nonTouchableSize: int\r\n\r\n :param HorizontalCleaning: Whether to apply horizontal cleaning (default to False).\r\n :type HorizontalCleaning: bool\r\n\r\n :param relevantSet_1: The dataset which shares same number of features as comparedArray (Don't use in benchmarking)\r\n :type relevantSet_1: ndarray\r\n\r\n :param relevantSet_2: The dataset which shares same number of features as comparedArray (Don't use in benchmarking)\r\n :type relevantSet_2: ndarray\r\n\r\n :param VarianceThreshold: The maximum variance threshold for data cleaning (feature selection)\r\n :type VarianceThreshold: int or float\r\n\r\n :param BinaryMode: If your data is in binary mode (0, 1) only. It would be faster to activate this function.\r\n Note that no checking || verification was performed on ndarray if binaryMode=True\r\n :type BinaryMode: bool\r\n\r\n :return: Tuple of ndarray\r\n \"\"\"\r\n # Hyper-parameter Verification\r\n if True:\r\n _checkCleaningInput_(cleanedArray, labels, nonTouchableSize=nonTouchableSize, relevantSet_1=relevantSet_1,\r\n relevantSet_2=relevantSet_2)\r\n inputCheckRange(value=VarianceThreshold, name='VarianceThreshold', minValue=0, maxValue=None,\r\n allowFloatInput=True)\r\n inputFullCheck(value=HorizontalCleaning, name='horizontalCleaning', dtype='bool')\r\n\r\n print(\"=\" * 30, \"Non-Labeled Cleaning\", \"=\" * 30)\r\n DM_startTime: float = perf_counter()\r\n ObservationSize, InitialSize, WorkingSize = cleanedArray.shape[0], cleanedArray.shape[1], \\\r\n cleanedArray.shape[1] - nonTouchableSize\r\n VerticalSimilar, HorizontalSimilar = [], []\r\n BooleanLargeMask: List[bool] = [False] * WorkingSize\r\n\r\n print(\"Number of Original Features:\", InitialSize)\r\n print(\"[1]- Vertical Data Cleaning: \", end=\"\")\r\n for col in range(0, WorkingSize):\r\n if IsSingleUnique(cleanedArray[:, col], binaryMode=BinaryMode):\r\n BooleanLargeMask[col] = True\r\n VerticalSimilar.append(col)\r\n\r\n if VarianceThreshold != 0:\r\n newVerticalSimilarElement: List[int] = []\r\n for col in range(0, WorkingSize):\r\n if not BooleanLargeMask[col]:\r\n if BinaryMode:\r\n nums_one = np.count_nonzero(cleanedArray[:, col] == np.uint8(1))\r\n if (nums_one * (1 - nums_one / ObservationSize) ** 2) / (ObservationSize - 1) <= VarianceThreshold:\r\n newVerticalSimilarElement.append(col)\r\n BooleanLargeMask[col] = True\r\n else:\r\n if cleanedArray[:, col].var() <= VarianceThreshold:\r\n newVerticalSimilarElement.append(col)\r\n BooleanLargeMask[col] = True\r\n\r\n if len(newVerticalSimilarElement) != 0: # Merging two sorted arrays\r\n VerticalSimilar = _mergeTwoSortedArrays_(array_1=VerticalSimilar, array_2=newVerticalSimilarElement)\r\n print(\"Number of Modified Features:\", InitialSize - len(VerticalSimilar))\r\n\r\n print(\"[2]- Horizontal Data Cleaning: \", end=\"\")\r\n if HorizontalCleaning:\r\n # IterationsCount: int = 0\r\n # [1]: Generate Dynamic Sparse Matrix\r\n BinaryCols, ExtraCols = _getCustomSparseMatrix_(array=cleanedArray, nonTouchableSize=nonTouchableSize,\r\n binaryMode=BinaryMode, booleanMask=BooleanLargeMask)\r\n # [2]: Making comparison\r\n print(\"Comparing Column: PENDING ... \", end='')\r\n for staticCol in range(0, WorkingSize):\r\n if not BooleanLargeMask[staticCol]:\r\n print(\"Remaining Columns: \", WorkingSize - staticCol)\r\n for dynamicCol in range(staticCol + 1, WorkingSize):\r\n if not BooleanLargeMask[dynamicCol]:\r\n # IterationsCount += 1\r\n check: bool = True\r\n if len(ExtraCols[staticCol]) != 0:\r\n check: bool = False\r\n if ArrayEqual(ExtraCols[staticCol][:, 0], ExtraCols[dynamicCol][:, 0]):\r\n if ArrayEqual(ExtraCols[staticCol][:, 1], ExtraCols[dynamicCol][:, 1]):\r\n check = True\r\n\r\n if check:\r\n if ArrayEqual(BinaryCols[staticCol], BinaryCols[dynamicCol]):\r\n HorizontalSimilar.append(dynamicCol)\r\n # print(\"Number of Horizontal Iterations:\", IterationsCount)\r\n print(\"DONE\")\r\n\r\n # [2]: Cleaning Section\r\n print(\"[3]- Generate Removing Column from Boolean Mask and Delete them: PENDING ...\")\r\n CompleteIndex: List[int] = [column for column in range(0, WorkingSize) if BooleanLargeMask[column]]\r\n if CompleteIndex:\r\n cleanedArray = np.delete(cleanedArray, obj=CompleteIndex, axis=1)\r\n if relevantSet_1 is not None:\r\n relevantSet_1 = np.delete(relevantSet_1, obj=CompleteIndex, axis=1)\r\n if relevantSet_2 is not None:\r\n relevantSet_2 = np.delete(relevantSet_2, obj=CompleteIndex, axis=1)\r\n labels = np.delete(labels, obj=CompleteIndex, axis=None)\r\n\r\n print(f\"Number of Modified Features: {cleanedArray.shape[1]}\")\r\n print(f\"Number of Original Features: {InitialSize}\"\r\n f\"\\n----- 1: Vertical Database Minimization -----\"\r\n f\"\\n\\t: Remaining Features : {InitialSize - len(VerticalSimilar)}\"\r\n f\"\\n----- 2: Horizontal Database Minimization -----\"\r\n f\"\\n\\t: Remaining Features : {InitialSize - len(VerticalSimilar) - len(HorizontalSimilar)}\")\r\n # f\"\\n----- 3: Removed Column: {CompleteIndex}\")\r\n print(f\"Non-Labeled (Data) Cleaning: {perf_counter() - DM_startTime:.6f}s\")\r\n\r\n if relevantSet_1 is None and relevantSet_2 is None:\r\n return cleanedArray, labels\r\n elif relevantSet_1 is not None and relevantSet_2 is None:\r\n return cleanedArray, relevantSet_1, labels\r\n elif relevantSet_1 is None and relevantSet_2 is not None:\r\n return cleanedArray, relevantSet_2, labels\r\n\r\n return cleanedArray, relevantSet_1, relevantSet_2, labels\r\n\r\n\r\ndef GroupCleaning(cleanedArray: ndarray, labels: Union[ndarray, List[str]], nonTouchableSize: int, numsInput: int,\r\n relevantSet_1: ndarray = None, relevantSet_2: ndarray = None, StrictCleaning: bool = False,\r\n BinaryMode: bool = False):\r\n \"\"\"\r\n Implementation of Data Cleaning by Set of Features: When calling this function, it will observe sets of similar\r\n features columns be columns ndarray from [0...ndarray.size - nonTouchableSize] using comparedArray as the benchmark.\r\n\r\n :param cleanedArray: The ndarray database for identifying useless features\r\n :type cleanedArray: ndarray\r\n\r\n :param labels: The labeling of comparedSet (mostly derived from DataFrame.columns).\r\n It could be a list of string or ndarray\r\n :type labels: List[str] or ndarray\r\n\r\n :param nonTouchableSize: The number of features at the right of all dataset which is not used for cleaning\r\n :type nonTouchableSize: int\r\n\r\n :param numsInput: The number of set of features used for elimination\r\n :type numsInput: int\r\n\r\n :param StrictCleaning: Whether to ensure that feature is singleton (one unique feature only)\r\n :type StrictCleaning: bool\r\n\r\n :param relevantSet_1: The dataset which shares same number of features as comparedArray (Don't use in benchmarking)\r\n :type relevantSet_1: ndarray\r\n\r\n :param relevantSet_2: The dataset which shares same number of features as comparedArray (Don't use in benchmarking)\r\n :type relevantSet_2: ndarray\r\n\r\n :param BinaryMode: If your data is in binary mode (0, 1) only. It would be faster to activate this function.\r\n Note that no checking || verification was performed on ndarray if binaryMode=True\r\n :type BinaryMode: bool\r\n\r\n :return: Tuple of ndarray\r\n \"\"\"\r\n # Hyper-parameter Verification\r\n if True:\r\n _checkCleaningInput_(comparedArray=cleanedArray, FeaturesLabels=labels, nonTouchableSize=nonTouchableSize,\r\n relevantSet_1=relevantSet_1, relevantSet_2=relevantSet_2)\r\n inputFullCheck(value=StrictCleaning, name='strict_cleaning', dtype='bool')\r\n inputFullCheck(value=BinaryMode, name='BinaryMode', dtype='bool')\r\n\r\n print(\"=\" * 30, \"Index-Based Cleaning\", \"=\" * 30)\r\n DM_startTime: float = perf_counter()\r\n ObservationSize, InitialSize, WorkingSize = cleanedArray.shape[0], cleanedArray.shape[1], \\\r\n cleanedArray.shape[1] - nonTouchableSize\r\n EachWorkingSize: int = WorkingSize // numsInput\r\n IndexSimilar: List[int] = []\r\n\r\n TotalIterations: int = 0\r\n\r\n # Generate Sparse Matrix\r\n BinaryCols, ExtraCols = _getCustomSparseMatrix_(array=cleanedArray, nonTouchableSize=nonTouchableSize,\r\n binaryMode=BinaryMode, booleanMask=None)\r\n for ComparingColumn in range(0, EachWorkingSize):\r\n if StrictCleaning:\r\n if len(BinaryCols[ComparingColumn]) not in [0, ObservationSize]: # Maintain full constant\r\n continue\r\n\r\n if len(ExtraCols[ComparingColumn]) != 0:\r\n if not IsSingleUnique(ExtraCols[ComparingColumn][:, 1], binaryMode=BinaryMode):\r\n continue\r\n\r\n removable: bool = True\r\n for sample in range(1, numsInput):\r\n TotalIterations += 1\r\n CompareToThisColumn: int = EachWorkingSize * sample + ComparingColumn\r\n if not BinaryMode:\r\n if not ArrayEqual(ExtraCols[ComparingColumn][:, 0], ExtraCols[CompareToThisColumn][:, 0]):\r\n removable = False\r\n break\r\n\r\n if not ArrayEqual(ExtraCols[ComparingColumn][:, 1], ExtraCols[CompareToThisColumn][:, 1]):\r\n removable = False\r\n break\r\n\r\n if not ArrayEqual(BinaryCols[ComparingColumn], BinaryCols[CompareToThisColumn]):\r\n removable = False\r\n break\r\n\r\n if removable:\r\n IndexSimilar += [EachWorkingSize * idx + ComparingColumn for idx in range(0, numsInput)]\r\n\r\n if IndexSimilar:\r\n print(\"List of Removed Cols: \\n\", np.reshape(IndexSimilar, newshape=(len(IndexSimilar) // numsInput, numsInput)))\r\n IndexSimilar.sort()\r\n cleanedArray = np.delete(cleanedArray, obj=IndexSimilar, axis=1)\r\n if relevantSet_1 is not None:\r\n relevantSet_1 = np.delete(relevantSet_1, obj=IndexSimilar, axis=1)\r\n if relevantSet_2 is not None:\r\n relevantSet_2 = np.delete(relevantSet_2, obj=IndexSimilar, axis=1)\r\n labels = np.delete(labels, obj=IndexSimilar, axis=None)\r\n\r\n print(\"Number of Initial Features:\", InitialSize)\r\n print(\"Remaining Features:\", cleanedArray.shape[1])\r\n print(\"Number of Iterations: \", TotalIterations)\r\n print(f\"Index-Based (Data) Cleaning: {perf_counter() - DM_startTime:.6f}s\")\r\n\r\n if relevantSet_1 is None and relevantSet_2 is None:\r\n return cleanedArray, labels\r\n elif relevantSet_1 is not None and relevantSet_2 is None:\r\n return cleanedArray, relevantSet_1, labels\r\n elif relevantSet_1 is None and relevantSet_2 is not None:\r\n return cleanedArray, relevantSet_2, labels\r\n\r\n return cleanedArray, relevantSet_1, relevantSet_2, labels\r\n\r\n\r\n# -------------------------------------------------------------------------------------------------------------------\r\n# [5]: Function used for extension\r\n# [5.1]: Function used for extension\r\ndef ArraySorting(database: ndarray, column: int, reverse: bool = False) -> ndarray:\r\n \"\"\"\r\n Implementation of column sorting by row.\r\n\r\n :param database: The dataset needs to be sorted\r\n :type database: ndarray\r\n\r\n :param column: The column needs to make sorting\r\n :type column: int\r\n\r\n :param reverse: Whether to reversed the order of sorting (default to False)\r\n :type reverse: bool\r\n\r\n :return: ndarray\r\n \"\"\"\r\n if not inputFullCheck(value=database, name='database', dtype='ndarray', warning_only=True):\r\n database = np.asarray(database)\r\n\r\n inputCheckRange(value=column, name='column', maxValue=database.shape[1], minValue=0)\r\n inputFullCheck(value=reverse, name='reverse', dtype='bool')\r\n\r\n index: ndarray = np.argsort(database[:, column])\r\n return database[index[::-1]] if reverse else database[index]\r\n\r\n\r\ndef _checkGetIndex_(database: ndarray, column: int, get_last: bool = False) -> None:\r\n if not inputFastCheck(value=database, dtype='ndarray'):\r\n database = np.asarray(database)\r\n\r\n inputCheckRange(value=column, name='column', maxValue=database.shape[1], minValue=0)\r\n inputFullCheck(value=get_last, name='get_last', dtype='bool')\r\n return None\r\n\r\n\r\ndef GetIndexOnArrangedData(database: ndarray, column: int, get_last: bool = False, key: Callable = str) -> List:\r\n \"\"\"\r\n Implementation of get the index of the object in the column position of the database.\r\n\r\n :param database: The dataset needs to be sorted\r\n :type database: ndarray\r\n\r\n :param column: The column needs to make sorting. Note that the indicated column should not be your own\r\n customized object.\r\n :type column: int\r\n\r\n :param get_last: Whether to get the last position of the database\r\n :type get_last: bool\r\n\r\n :param key: The function used to check with current status\r\n :type key: Callable\r\n\r\n :return: List\r\n \"\"\"\r\n # Hyper-parameter Verification\r\n _checkGetIndex_(database=database, column=column, get_last=get_last)\r\n\r\n value = key(database[0, column])\r\n storage = [value]\r\n MolData: List[Tuple[int, Optional[str]]] = [(0, value)]\r\n\r\n for index, value in enumerate(database[1:, column], start=1):\r\n value = key(value)\r\n if value != storage[-1]:\r\n MolData.append((index, value))\r\n storage.append(value)\r\n\r\n if get_last:\r\n MolData.append((database.shape[0], None))\r\n\r\n return MolData\r\n\r\n\r\ndef GetIndexOnArrangedDataV2(database: ndarray, column: Tuple[int, int], get_last: bool = False,\r\n first_key: Callable = str, second_key: Callable = int) -> Tuple[List[Tuple], List[Tuple]]:\r\n # Hyper-parameter Verification\r\n _checkGetIndex_(database=database, column=column[0], get_last=get_last)\r\n _checkGetIndex_(database=database, column=column[1], get_last=get_last)\r\n\r\n MolData: List[Tuple[int, str]] = GetIndexOnArrangedData(database=database, column=column[0], get_last=True,\r\n key=first_key)\r\n\r\n StackData: List[Tuple] = []\r\n for index, value in range(0, len(MolData) - 1):\r\n BEGIN, END = MolData[index][0], MolData[index + 1][0]\r\n\r\n CurrentData = database[BEGIN, column[1]]\r\n StackData.append((BEGIN, second_key(database[BEGIN, column[1]])))\r\n\r\n for row in range(BEGIN + 1, END):\r\n if database[row, column[1]] != CurrentData:\r\n CurrentData = database[row, column[1]]\r\n StackData.append((row, int(database[row, column[1]])))\r\n\r\n if get_last is False:\r\n MolData.pop()\r\n else:\r\n StackData.append((database.shape[0], None))\r\n\r\n return MolData, StackData\r\n\r\n\r\n# [5.2]: Function used for advanced extension\r\ndef _checkNumericalTargetInputs_(target_value: Optional[Union[int, List[int], Tuple]], name: str, maxValue: int,\r\n minValue: int = 0, **kwargs) -> Optional[Union[int, List[int], Tuple]]:\r\n # **kwargs: Argument need for function inputCheckRange\r\n if target_value is None:\r\n return target_value\r\n\r\n inputFullCheck(value=target_value, name=name, dtype='int-List-Tuple', delimiter='-')\r\n if inputFastCheck(value=target_value, dtype='List-Tuple'):\r\n for idx, val in enumerate(target_value):\r\n inputCheckRange(value=val, name=f\"{name}[{idx}]\", maxValue=maxValue, minValue=minValue, **kwargs)\r\n return target_value\r\n\r\n inputCheckRange(value=target_value, name=name, maxValue=maxValue, minValue=minValue, **kwargs)\r\n return [target_value]\r\n\r\n\r\n@MeasureExecutionTime\r\ndef DuplicateRadical(database: ndarray, RadicalCols: Union[List[int], Tuple[int]] = (1, 2),\r\n RemoveTwoSameFragments: bool = True) -> ndarray:\r\n \"\"\"\r\n Implementation of duplicating radicals\r\n \r\n :param database: The dataset needs to make radicals duplication\r\n :type database: ndarray\r\n\r\n :param RadicalCols: Two radical columns for radical duplication (default to [1, 2])\r\n :type RadicalCols: List[int]\r\n\r\n :param RemoveTwoSameFragments: Whether to remove two identical radicals (default to True)\r\n :type RemoveTwoSameFragments: bool\r\n :return: \r\n \"\"\"\r\n # Hyper-parameter Verification\r\n if True:\r\n if not inputFastCheck(value=database, dtype='ndarray'):\r\n database = np.asarray(database)\r\n\r\n inputCheckIterableInRange(value=RadicalCols, name='RadicalCols', minValue=0, maxValue=database.shape[1],\r\n maxInputInside=2)\r\n inputFullCheck(value=RemoveTwoSameFragments, name='RemoveTwoSameFragments', dtype='bool')\r\n\r\n r1, r2 = RadicalCols[0], RadicalCols[1]\r\n extractor = database[:, (r1, r2)].copy()\r\n\r\n path = np.arange(0, 2 * database.shape[0], 2, dtype=np.uint32)\r\n newFile = np.zeros(shape=(database.shape[0] * 2, database.shape[1]), dtype=database.dtype)\r\n newFile[path, :] = database\r\n newFile[path + 1, :] = database\r\n newFile[path + 1, (r1, r2)] = extractor[:, (1, 0)]\r\n\r\n if RemoveTwoSameFragments:\r\n # Note that we only test on the initial value only, stored in path, not for full file\r\n newFile = np.delete(newFile, obj=[row for row in path if newFile[row, r1] == newFile[row, r2]], axis=0)\r\n return newFile\r\n\r\n\r\ndef DuplicateRadicalByFile(FilePath: str, Output: str = None, RadicalCols: Union[List[int], Tuple[int]] = (1, 2),\r\n RemoveTwoSameFragments: bool = True, FileExport: bool = True) -> pd.DataFrame:\r\n \"\"\"\r\n Implementation of duplicating radicals\r\n\r\n :param FilePath: The directory of the file\r\n :type FilePath: str\r\n\r\n :param Output: The directory of the output. If None, it will overlapped on the original file\r\n :type Output: str\r\n\r\n :param RadicalCols: Two radical columns for radical duplication (default to [1, 2])\r\n :type RadicalCols: List[int] or Tuple[int] or ndarray\r\n\r\n :param RemoveTwoSameFragments: Whether to remove two identical radicals (default to True)\r\n :type RemoveTwoSameFragments: bool\r\n\r\n :param FileExport: Whether to allow export to file\r\n :type FileExport: bool\r\n\r\n :return:\r\n \"\"\"\r\n value, columns = ReadFile(FilePath=FilePath, header=0, get_values=True, get_columns=True)\r\n value = DuplicateRadical(database=value, RadicalCols=RadicalCols, RemoveTwoSameFragments=RemoveTwoSameFragments)\r\n DataFrame = pd.DataFrame(data=value, index=None, columns=columns)\r\n _Export_(dataFrame=DataFrame, overlap_directory=FilePath, new_directory=Output, status=FileExport)\r\n return DataFrame\r\n\r\n\r\n@MeasureExecutionTime\r\ndef GetLineWhenRemoveRepeatedRadicals(database: ndarray, RadicalCols: Tuple[int, int] = (1, 2), MoleculeCol: int = 0,\r\n TargetCol: Optional[Union[int, List[int]]] = None,\r\n RemoveConnection: bool = True) -> List[int]:\r\n \"\"\"\r\n Implementation of removing repeated radicals. Get the line of removal\r\n\r\n :param database: The dataset needs to make radicals duplication\r\n :type database: ndarray\r\n\r\n :param MoleculeCol: The molecule_column used for calling index data\r\n :type MoleculeCol: int\r\n\r\n :param RadicalCols: Two radical columns for radical duplication (default to [1, 2])\r\n :type RadicalCols: List[int]\r\n\r\n :param TargetCol: Default at None, if specified it would averaging/normalize the result (BDE)\r\n :type TargetCol: List[int]\r\n\r\n :param RemoveConnection: If set to True, it would remove all duplication either in mode A-B or B-A.\r\n Else, it would only observe for one way only\r\n :type RemoveConnection: bool\r\n\r\n :return: ndarray\r\n \"\"\"\r\n\r\n # [0]: Hyper-parameter Verification\r\n if not inputFastCheck(value=database, dtype='ndarray'):\r\n database = np.asarray(database)\r\n\r\n inputCheckRange(value=MoleculeCol, name='MoleculeCol', maxValue=database.shape[1], minValue=0, fastCheck=True)\r\n inputCheckIterableInRange(value=RadicalCols, name='RadicalCols', minValue=0, maxValue=database.shape[1],\r\n maxInputInside=2)\r\n inputFullCheck(value=RemoveConnection, name='RemoveConnection', dtype='bool')\r\n TargetCol = _checkNumericalTargetInputs_(target_value=TargetCol, name='TargetCol', maxValue=database.shape[1],\r\n minValue=0)\r\n\r\n indexData = GetIndexOnArrangedData(database=database, column=MoleculeCol, get_last=True)\r\n radicalsList = database[:, RadicalCols].tolist()\r\n RemoveLine: List[int] = []\r\n size: int = len(indexData) - 1\r\n\r\n def evaluate(radicals: List[List[str]], current_row: int, following_row: int, removeConnection: bool) -> bool:\r\n if radicals[current_row][0] == radicals[following_row][0]:\r\n if radicals[current_row][1] == radicals[following_row][1]:\r\n return True\r\n\r\n if removeConnection:\r\n if radicals[current_row][0] == radicals[following_row][1]:\r\n if radicals[current_row][1] == radicals[following_row][0]:\r\n return True\r\n return False\r\n\r\n for index in range(0, size): # Extract Molecule\r\n begin, end = indexData[index][0], indexData[index + 1][0]\r\n Temp: List[Optional[bool]] = [None] * (end - begin)\r\n for row in range(begin, end): # For every bond\r\n if Temp[row - begin] is not None:\r\n continue\r\n OverlappedBDE: List[int] = [row]\r\n for nextRow in range(row + 1, end):\r\n if Temp[nextRow - begin] is None:\r\n if evaluate(radicals=radicalsList, current_row=row, following_row=nextRow,\r\n removeConnection=RemoveConnection):\r\n Temp[nextRow - begin] = False\r\n RemoveLine.append(nextRow)\r\n OverlappedBDE.append(nextRow)\r\n\r\n if TargetCol is not None:\r\n if len(OverlappedBDE) > 1:\r\n for value in TargetCol:\r\n database[row, value] = database[OverlappedBDE, value].astype(np.float32).mean()\r\n\r\n return RemoveLine\r\n\r\ndef RemoveRepeatedRadicals(database: ndarray, RadicalCols: Union[List[int], Tuple[int, int]] = (1, 2),\r\n MoleculeCol: int = 0, TargetCol: Union[int, List[int], Tuple] = None,\r\n RemoveConnection: bool = True) -> ndarray:\r\n RemoveLine: List[int] = \\\r\n GetLineWhenRemoveRepeatedRadicals(database=database, RadicalCols=RadicalCols, MoleculeCol=MoleculeCol,\r\n TargetCol=TargetCol, RemoveConnection=RemoveConnection)\r\n\r\n return np.delete(database, obj=RemoveLine, axis=0) if RemoveLine else database\r\n\r\n\r\ndef RemoveRepeatedRadicalsByFile(FilePath: str, Output: str = None, MoleculeCol: int = 0,\r\n RadicalCols: Union[List[int], Tuple[int]] = (1, 2),\r\n TargetCol: Union[List[int], Tuple[int]] = None,\r\n RemoveConnection: bool = True, FileExport: bool = True) -> pd.DataFrame:\r\n \"\"\"\r\n Implementation of removing repeated radicals\r\n\r\n :param FilePath: The directory of the file\r\n :type FilePath: str\r\n\r\n :param Output: The directory of the output. If None, it will overlapped on the original file\r\n :type Output: str\r\n\r\n :param MoleculeCol: The molecule_column used for calling index data\r\n :type MoleculeCol: int\r\n\r\n :param RadicalCols: Two radical columns for radical duplication (default to (1, 2))\r\n :type RadicalCols: List[int]\r\n\r\n :param TargetCol: Default at None, if specified it would averaging/normalize the result (BDE)\r\n :type TargetCol: int\r\n\r\n :param RemoveConnection: If set to True, it would remove all duplication either in mode A-B or B-A.\r\n Else, it would only observe for one way only\r\n :type RemoveConnection: bool\r\n\r\n :param FileExport: Whether to allow export to file\r\n :type FileExport: bool\r\n\r\n :return: None\r\n \"\"\"\r\n value, columns = ReadFile(FilePath=FilePath, header=0, get_values=True, get_columns=True)\r\n value = RemoveRepeatedRadicals(database=value, RadicalCols=RadicalCols, MoleculeCol=MoleculeCol,\r\n TargetCol=TargetCol, RemoveConnection=RemoveConnection)\r\n DataFrame = pd.DataFrame(data=value, index=None, columns=columns)\r\n _Export_(dataFrame=DataFrame, overlap_directory=FilePath, new_directory=Output, status=FileExport)\r\n return DataFrame\r\n\r\n\r\n@MeasureExecutionTime\r\ndef RemoveSingleDuplicateInIdx(database: ndarray, IndexCol: int = 0, RemovingCol: int = 3,\r\n TargetCol: Union[int, List[int], Tuple] = None, IndexSorting: bool = False,\r\n key: Callable = int) -> ndarray:\r\n \"\"\"\r\n Implementation of removing duplication in molecule.\r\n\r\n :param database: The dataset needs to make radicals duplication\r\n :type database: ndarray\r\n\r\n :param RemovingCol: The removing_column used for calling index data\r\n :type RemovingCol: int\r\n\r\n :param IndexCol: The column used to make benchmark\r\n :type IndexCol: int\r\n\r\n :param TargetCol: Default at None, if specified it would averaging/normalize the result (BDE)\r\n :type TargetCol: int\r\n\r\n :param IndexSorting: If True, the index column in the dataset would be sorted\r\n :type IndexSorting: bool\r\n\r\n :param key: The method used for validation and comparison (default to be int()).\r\n :type key: Callable\r\n\r\n :return: ndarray\r\n \"\"\"\r\n # Hyper-parameter Verification\r\n if True:\r\n if not inputFastCheck(value=database, dtype='ndarray'):\r\n database = np.asarray(database)\r\n\r\n inputCheckRange(value=IndexCol, name='IndexCol', maxValue=database.shape[1], minValue=0, fastCheck=True)\r\n inputCheckRange(value=RemovingCol, name='RemovingCol', maxValue=database.shape[1], minValue=0, fastCheck=True)\r\n\r\n TargetCol = _checkNumericalTargetInputs_(target_value=TargetCol, name='TargetCol', maxValue=database.shape[1],\r\n minValue=0)\r\n\r\n inputFullCheck(value=IndexSorting, name='IndexSorting', dtype='bool')\r\n inputFullCheck(value=key, name='key', dtype='Callable')\r\n\r\n if IndexSorting:\r\n database = ArraySorting(database=database, column=IndexCol, reverse=False)\r\n MolData = GetIndexOnArrangedData(database=database, column=IndexCol, get_last=True)\r\n\r\n OverlappedPosition: List[int] = []\r\n size: int = len(MolData) - 1\r\n if TargetCol is not None:\r\n for i in range(0, size):\r\n begin, end = MolData[i][0], MolData[i + 1][0]\r\n Temp: List[Optional[bool]] = [None] * (end - begin)\r\n for row in range(begin, end):\r\n if Temp[row - begin] is not None:\r\n continue\r\n OverlappedBDE: List[int] = [row]\r\n for nextRow in range(begin + 1, end):\r\n if Temp[nextRow - begin] is None:\r\n if key(database[row, RemovingCol]) == key(database[nextRow, RemovingCol]):\r\n Temp[nextRow - begin] = False\r\n OverlappedPosition.append(nextRow)\r\n OverlappedBDE.append(nextRow)\r\n\r\n if len(OverlappedBDE) > 1:\r\n for value in TargetCol:\r\n database[row, value] = database[OverlappedBDE, value].astype(np.float32).mean()\r\n\r\n else:\r\n for i in range(0, size):\r\n begin, end = MolData[i][0], MolData[i + 1][0]\r\n Temp: List[Optional[bool]] = [None] * (end - begin)\r\n for row in range(begin, end):\r\n if Temp[row - begin] is not None:\r\n continue\r\n for nextRow in range(begin + 1, end):\r\n if Temp[nextRow - begin] is None:\r\n if key(database[row, RemovingCol]) == key(database[nextRow, RemovingCol]):\r\n Temp[nextRow - begin] = False\r\n OverlappedPosition.append(nextRow)\r\n\r\n return np.delete(database, obj=OverlappedPosition, axis=0) if OverlappedPosition else database\r\n\r\n\r\ndef RemoveSingleDuplicateInIdxByFile(FilePath: str, output: str = None, IndexCol: int = 0, RemovingCol: int = 3,\r\n IndexSorting: bool = False, key: Callable = int,\r\n TargetCol: Union[List[int], Tuple[int, ...]] = None,\r\n FileExport: bool = True) -> pd.DataFrame:\r\n \"\"\"\r\n Implementation of removing duplication in molecule.\r\n\r\n :param FilePath: The directory of the file\r\n :type FilePath: str\r\n\r\n :param output: The directory of the output. If None, it will overlapped on the original file\r\n :type output: str\r\n\r\n :param RemovingCol: The removing_column used for calling index data\r\n :type RemovingCol: int\r\n\r\n :param TargetCol: Default at None, if specified it would averaging/normalize the result (BDE)\r\n :type TargetCol: int\r\n\r\n :param IndexCol: The column used to make benchmark\r\n :type IndexCol: int\r\n\r\n :param IndexSorting: If True, the index column in the dataset would be sorted\r\n :type IndexSorting: bool\r\n\r\n :param key: The method used for validation and comparison (default to be int()).\r\n :type key: Callable\r\n\r\n :param FileExport: Whether to allow export to file\r\n :type FileExport: bool\r\n\r\n :return: None\r\n \"\"\"\r\n value, columns = ReadFile(FilePath=FilePath, header=0, get_values=True, get_columns=True)\r\n value = RemoveSingleDuplicateInIdx(database=value, IndexCol=IndexCol, RemovingCol=RemovingCol, TargetCol=TargetCol,\r\n IndexSorting=IndexSorting, key=key)\r\n DataFrame = pd.DataFrame(data=value, index=None, columns=columns)\r\n _Export_(dataFrame=DataFrame, overlap_directory=FilePath, new_directory=output, status=FileExport)\r\n return DataFrame\r\n\r\n\r\ndef SortWithIdx(database: ndarray, IndexCol: int = 0, SortCol: int = 3, ExtraSortCol: Optional[int] = None,\r\n IndexSorting: bool = False, IndexReverse: bool = False, SortKey: Callable = int,\r\n SortReverse: bool = False, ExtraSortKey: Callable = float, ExtraSortReverse: bool = False) -> ndarray:\r\n \"\"\"\r\n Implementation of sorting two specified column with defined order.\r\n\r\n :param database: The dataset needs to be sorted\r\n :type database: ndarray\r\n\r\n :param IndexCol: Integer used to mark the index for benchmarking, especially for molecule\r\n :type IndexCol: int\r\n\r\n :param SortCol: Integer used to sorting some specific values in order in specific range,\r\n especially for bond index\r\n :type SortCol: int\r\n\r\n :param ExtraSortCol: Integer used to sorting extra 'column' requirement specific values in order\r\n in specific range, especially for BDE\r\n :type ExtraSortCol: int\r\n\r\n :param IndexSorting: If True, the index column in the dataset would be sorted\r\n :type IndexSorting: bool\r\n\r\n :param SortKey: The method used for validation and comparison (default to be int()).\r\n :type SortKey: Callable\r\n\r\n :param ExtraSortKey: The method used for validation and comparison (default to be float()), used for 'extra'.\r\n :type ExtraSortKey: Callable\r\n\r\n :param IndexReverse: If True, the sorting order in the IndexCol would be descending instead of ascending\r\n :type IndexReverse: bool\r\n\r\n :param SortReverse: If True, the sorting order in the SortingCol would be descending instead of ascending\r\n :type SortReverse: bool\r\n\r\n :param ExtraSortReverse: If True, the sorting order in the ExtraSortingCol would be descending instead of ascending\r\n :type ExtraSortReverse: bool\r\n\r\n :return: ndarray\r\n \"\"\"\r\n # Hyper-parameter Verification\r\n if True:\r\n if not inputFastCheck(value=database, dtype='ndarray'):\r\n database = np.asarray(database)\r\n\r\n inputCheckRange(value=IndexCol, name='IndexCol', maxValue=database.shape[1], minValue=0, fastCheck=True)\r\n inputFullCheck(value=IndexSorting, name='IndexSorting', dtype='bool')\r\n inputFullCheck(value=IndexReverse, name='reverse', dtype='bool')\r\n\r\n inputCheckRange(value=SortCol, name='SortCol', maxValue=database.shape[1], minValue=0, fastCheck=True)\r\n inputFullCheck(value=SortKey, name='SortKey', dtype='Callable')\r\n inputFullCheck(value=SortReverse, name='SortReverse', dtype='bool')\r\n\r\n if IndexSorting:\r\n database: ndarray = ArraySorting(database=database, column=IndexCol, reverse=IndexReverse)\r\n\r\n DataStructure = GetIndexOnArrangedData(database=database, column=IndexCol, get_last=True)\r\n for i in range(0, len(DataStructure) - 1):\r\n begin, end = DataStructure[i][0], DataStructure[i + 1][0]\r\n if end - begin != 1:\r\n temp: List = database[begin:end, :].tolist()\r\n temp.sort(key=lambda item: SortKey(item[SortCol]), reverse=SortReverse)\r\n database[begin:end, :] = temp\r\n\r\n if ExtraSortCol is not None:\r\n database[begin:end, :] = \\\r\n SortWithIdx(database=database[begin:end, :], IndexCol=SortCol, SortCol=ExtraSortCol,\r\n ExtraSortCol=None, IndexSorting=False, IndexReverse=False, SortKey=ExtraSortKey,\r\n SortReverse=ExtraSortReverse)\r\n\r\n return database\r\n\r\n\r\ndef SortWithIdxByFile(FilePath: str, Output: str = None, IndexCol: int = 0, SortingCol: int = 3,\r\n ExtraSortingCol: Optional[int] = None, IndexSorting: bool = False, SortingKey: Callable = int,\r\n ExtraSortingKey: Callable = float, reverse: bool = False, SortingReverse: bool = False,\r\n ExtraReverse: bool = False, FileExport: bool = True) -> pd.DataFrame:\r\n \"\"\"\r\n Implementation of sorting two specified column with defined order.\r\n\r\n :param FilePath: The directory of the file\r\n :type FilePath: str\r\n\r\n :param Output: The directory of the output. If None, it will overlapped on the original file\r\n :type Output: str\r\n\r\n :param IndexCol: Integer used to mark the index for benchmarking, especially for molecule\r\n :type IndexCol: int\r\n\r\n :param SortingCol: Integer used to sorting some specific values in order in specific range,\r\n especially for bond index\r\n :type SortingCol: int\r\n\r\n :param IndexSorting: If True, the index column in the dataset would be sorted\r\n :type IndexSorting: bool\r\n\r\n :param SortingKey: The method used for validation and comparison (default to be int()).\r\n :type SortingKey: Callable\r\n\r\n :param reverse: If True, the sorting order in the IndexCol would be descending instead of ascending\r\n :type reverse: bool\r\n\r\n :param SortingReverse: If True, the sorting order in the SortingCol would be descending instead of ascending\r\n :type SortingReverse: bool\r\n\r\n :param FileExport: Whether to allow export to file\r\n :type FileExport: bool\r\n\r\n :param ExtraSortingCol: Integer used to sorting extra 'column' requirement specific values in order\r\n in specific range, especially for BDE\r\n :type ExtraSortingCol: int\r\n\r\n :param ExtraSortingKey: The method used for validation and comparison (default to be float()), used for 'extra'.\r\n :type ExtraSortingKey: Callable\r\n\r\n :param ExtraSortingKey: The method used for validation and comparison (default to be int()), used for 'extra'.\r\n :type ExtraSortingKey: Callable\r\n\r\n :param ExtraReverse: If True, the sorting order in the ExtraSortingCol would be descending instead of ascending\r\n :type ExtraReverse: bool\r\n\r\n :return: ndarray\r\n \"\"\"\r\n value, columns = ReadFile(FilePath=FilePath, header=0, get_values=True, get_columns=True)\r\n value = SortWithIdx(database=value, IndexCol=IndexCol, SortCol=SortingCol, ExtraSortCol=ExtraSortingCol,\r\n IndexSorting=IndexSorting, IndexReverse=reverse, SortKey=SortingKey, SortReverse=SortingReverse,\r\n ExtraSortKey=ExtraSortingKey, ExtraSortReverse=ExtraReverse)\r\n DataFrame = pd.DataFrame(data=value, index=None, columns=columns)\r\n _Export_(dataFrame=DataFrame, overlap_directory=FilePath, new_directory=Output, status=FileExport)\r\n return DataFrame\r\n\r\n\r\ndef GetRemainingIndexToLimit(indexArray: Union[ndarray, List[int], Tuple[int]], maximumValue: int) -> List[int]:\r\n if True:\r\n if inputFastCheck(value=indexArray, dtype='ndarray'):\r\n if indexArray.ndim != 1:\r\n raise TypeError(\"indexArray should be 1-dimensional array\")\r\n if checkNumpyIntegerDtype(indexArray.dtype):\r\n infiniteCheck = checkNumpyUnsignedIntegerDtype(indexArray.dtype)\r\n else:\r\n raise TypeError(\"indexArray should be 1-dimensional array of positive integer value\")\r\n else:\r\n infiniteCheck = True\r\n if len(set(indexArray)) != len(indexArray):\r\n raise TypeError(\"indexArray should not contained duplicated value\")\r\n\r\n if isinstance(maximumValue, (int, np.integer)):\r\n if maximumValue <= 0:\r\n raise TypeError(f\"maximumValue={maximumValue} should be positive integer\")\r\n if maximumValue < indexArray[-1]:\r\n warning(f\" There are something that is not right. In normal cases, maximumValue={maximumValue} \"\r\n f\"should be the largest value of all\")\r\n else:\r\n raise TypeError(f\"maximumValue={maximumValue} should be positive integer\")\r\n\r\n array: List[int] = []\r\n n: int = len(indexArray)\r\n counter: int = 0\r\n indexArray.sort()\r\n isChecked: bool = False\r\n for idx in range(0, maximumValue):\r\n if counter >= n:\r\n array.append(idx)\r\n continue\r\n\r\n if infiniteCheck:\r\n if not isChecked:\r\n if isinstance(indexArray[counter], (int, np.integer)):\r\n if indexArray[counter] < 0:\r\n raise TypeError(f\"indexArray[counter]={indexArray[counter]} should be positive integer.\")\r\n isChecked = True\r\n\r\n if idx == indexArray[counter]:\r\n counter += 1\r\n isChecked = False\r\n else:\r\n array.append(idx)\r\n\r\n return array\r\n\r\n\r\n@MeasureExecutionTime\r\ndef ArrangeDatabase(database: ndarray, BaseColumn: int, ExtensionColumn: Optional[int] = None,\r\n ExtensionMode: str = 'sort', *args, **kwargs) -> ndarray:\r\n \"\"\"\r\n Implementation of arranging the database from top to bottom\r\n\r\n :param database: The dataset needs to be sorted\r\n :type database: ndarray\r\n\r\n :param BaseColumn: The first column index (positive) to arranging\r\n :type BaseColumn: int\r\n\r\n :param ExtensionColumn: The column index (positive) to arranging\r\n :type ExtensionColumn: int\r\n\r\n :param ExtensionMode: Either 'arrange' or 'sort' is acceptable\r\n :type ExtensionMode: int\r\n\r\n :return: ndarray\r\n \"\"\"\r\n # Hyper-parameter Verification\r\n if True:\r\n if not inputFastCheck(value=database, dtype='ndarray'):\r\n database = np.asarray(database)\r\n\r\n inputCheckRange(value=BaseColumn, name='BaseColumn', maxValue=database.shape[1], minValue=0)\r\n inputCheckRange(value=ExtensionColumn, name='ExtensionColumn', maxValue=database.shape[1], minValue=0,\r\n allowNoneInput=True)\r\n\r\n inputFullCheck(value=ExtensionMode, name='ExtensionMode', dtype='str')\r\n if ExtensionMode not in ['sort', 'arrange']:\r\n raise ValueError(\"Unable to perform further implementation\")\r\n\r\n # [1]: Retrieve the structure of the data\r\n hashtable: Dict[str, List[int]] = {}\r\n BaseStructure = GetIndexOnArrangedData(database=database, column=BaseColumn, get_last=False)\r\n for row, value in BaseStructure:\r\n try:\r\n hashtable[row].append(value)\r\n except (IndexError, ValueError):\r\n hashtable[row] = [value]\r\n\r\n # [2]: If found duplicate value, we search and rearranged by the following structure\r\n if len(hashtable) != len(BaseStructure):\r\n size: int = database.shape[1]\r\n rowLine: List[int] = [0] * size\r\n maskLine: List[bool] = [False] * len(BaseStructure)\r\n counter: int = 0\r\n for row, value in BaseStructure:\r\n if maskLine[row]: # If it has been found previously, skip it\r\n continue\r\n\r\n combinationList: List[int] = hashtable[value]\r\n for combination in combinationList:\r\n maskLine[combination] = True\r\n for line in range(combination, size):\r\n if database[line] != value:\r\n break\r\n rowLine[counter] = line\r\n counter += 1\r\n\r\n database: ndarray = database[rowLine, :]\r\n\r\n if ExtensionColumn is None:\r\n return database\r\n\r\n if ExtensionMode == 'sort':\r\n return SortWithIdx(database=database, IndexCol=BaseColumn, SortCol=ExtensionColumn, *args, **kwargs)\r\n\r\n hashtable.clear()\r\n BaseStructure.clear()\r\n gc.collect()\r\n\r\n # [3]: If we provide the extension_column\r\n BaseStructure = GetIndexOnArrangedData(database=database, column=BaseColumn, get_last=True)\r\n for i in range(0, len(BaseStructure) - 1):\r\n START, END = BaseStructure[i][0], BaseStructure[i + 1][0]\r\n database[START:END, :] = ArrangeDatabase(database=database[START:END, :], BaseColumn=ExtensionColumn,\r\n ExtensionColumn=None)\r\n\r\n return database\r\n\r\n\r\ndef ArrangeDatabaseByFile(FilePath: str, Output: str = None, BaseColumn: int = 0, ExtensionColumn: Optional[int] = None,\r\n ExtensionMode: str = 'sort', FileExport: bool = True, *args, **kwargs) -> pd.DataFrame:\r\n value, columns = ReadFile(FilePath=FilePath, header=0, get_values=True, get_columns=True)\r\n value = ArrangeDatabase(value, BaseColumn, ExtensionColumn, ExtensionMode, *args, **kwargs)\r\n DataFrame = pd.DataFrame(data=value, index=None, columns=columns)\r\n _Export_(dataFrame=DataFrame, overlap_directory=FilePath, new_directory=Output, status=FileExport)\r\n return DataFrame\r\n\r\n\r\ndef EvaluateInputPosition(maxSize: int, **kwargs) -> None:\r\n inputCheckRange(value=maxSize, name='maxSize', minValue=0, maxValue=None, allowNoneInput=True)\r\n stack: List[Optional[Union[int, Tuple[int, int]]]] = []\r\n for key, value in kwargs.items():\r\n inputFullCheck(value=value, name=key, dtype='int-List-Tuple-None', delimiter='-')\r\n if isinstance(value, int):\r\n inputCheckRange(value=value, name=key, minValue=0, maxValue=maxSize, allowNoneInput=False,\r\n rightBound=False, leftBound=True)\r\n stack.append(value)\r\n elif value is None:\r\n stack.append(value)\r\n elif isinstance(value, (List, Tuple)):\r\n inputCheckIterableInRange(value=value, name=key, minValue=0, maxValue=maxSize, maxInputInside=2,\r\n allowNoneInput=False, rightBound=False, leftBound=True)\r\n stack.append(value[0])\r\n stack.append(value[1])\r\n\r\n if len(set(stack)) != len(stack):\r\n raise ValueError(\"Your input cannot contain duplicate\")\r\n return None\r\n\r\n\r\ndef ComputeErrorHistogram(data: pd.DataFrame, error_column: Union[str, int], index_column: Union[str, int] = None,\r\n interval: float = 0.25, maximum_benchmark: float = 5.0, x_axis: str = \"Error (kcal/mol)\",\r\n y_axis: str = \"Counting\", title: str = \"Error Histogram\") -> None:\r\n \"\"\"\r\n Implementation of error histogram\r\n\r\n :param data: The computed dataset which has stored error (pd.DataFrame).\r\n :type data: pd.DataFrame\r\n\r\n :param error_column: The value of the error column in the DataFrame.\r\n :type error_column: str or int\r\n\r\n :param index_column: The value of the index column in the DataFrame (usually bond type) (default to be None).\r\n If None, all bond type is not in difference consideration\r\n :type index_column: str or int\r\n\r\n :param interval: The difference between each column of error analysis (positive input)\r\n :type interval: float or int\r\n\r\n :param maximum_benchmark: The benchmark where exceeding this, all of the value over them will be analyzed here.\r\n :type maximum_benchmark: float or int\r\n\r\n :param x_axis: The name of the x-axis\r\n :type x_axis: str\r\n\r\n :param y_axis: The name of the y-axis\r\n :type y_axis: str\r\n\r\n :param title: The name of the title\r\n :type title: str\r\n\r\n :return: None\r\n \"\"\"\r\n\r\n # Hyper-parameter Verification\r\n def inputCheckColumn(column: Union[str, int], name: str, df: pd.DataFrame) -> str:\r\n inputFullCheck(value=column, name=name, dtype='str-int', delimiter='-')\r\n if inputFastCheck(value=column, dtype='int'):\r\n inputCheckRange(value=column, name=name, maxValue=df.values.shape[1], minValue=0, fastCheck=True)\r\n return df.columns[column]\r\n elif inputFastCheck(value=column, dtype='str'):\r\n if column not in list(df.columns):\r\n raise ValueError(f\"error_column must be was not inside the DataFrame ({column})\")\r\n return column\r\n\r\n if True:\r\n inputFullCheck(value=data, name='data', dtype='DataFrame')\r\n inputCheckColumn(column=error_column, name='error_column', df=data)\r\n\r\n if index_column is not None:\r\n inputCheckColumn(column=index_column, name='index_column', df=data)\r\n inputFullCheck(value=error_column, name='error_column', dtype='str-int', delimiter='-')\r\n\r\n if x_axis is None:\r\n x_axis = \"Error (kcal/mol)\"\r\n else:\r\n inputFullCheck(value=x_axis, name='x_axis', dtype='str')\r\n\r\n if y_axis is None:\r\n y_axis = \"Counting\"\r\n else:\r\n inputFullCheck(value=y_axis, name='y_axis', dtype='str')\r\n\r\n if title is None:\r\n title = \"Error Histogram\"\r\n else:\r\n inputFullCheck(value=title, name='title', dtype='str')\r\n\r\n inputFullCheck(value=interval, name='interval', dtype='int-float', delimiter='-')\r\n if interval <= 0:\r\n warning(\" Your interval is negative. Change to default (0.25)\")\r\n interval = 0.25\r\n\r\n inputFullCheck(value=maximum_benchmark, name='maximum_benchmark', dtype='int-float', delimiter='-')\r\n if maximum_benchmark <= 0:\r\n warning(\" Your maximum_benchmark is negative. Change to default (5)\")\r\n maximum_benchmark = 5\r\n if maximum_benchmark < interval:\r\n raise ValueError(\" Maximum benchmark must exceed the interval\")\r\n\r\n pass\r\n\r\n import matplotlib.pyplot as plt\r\n\r\n def call(DataFrame, objectType: Optional[str], bound: List[float]):\r\n plt.clf()\r\n plt.autoscale(DataFrame=False)\r\n plt.hist(DataFrame, bins=bound, alpha=0.75, color='red', rwidth=0.85)\r\n plt.xlabel(str(x_axis))\r\n plt.ylabel(str(y_axis))\r\n plt.title(str(title) if object_type is None else str(title) + f\"({objectType})\")\r\n plt.xlim(0, bound[- 1])\r\n plt.show()\r\n\r\n edge = [interval * index for index in range(0, int((maximum_benchmark + interval) // interval))] + \\\r\n [maximum_benchmark + interval]\r\n\r\n if index_column is None:\r\n dataframe = data[error_column].values\r\n dataframe[dataframe > maximum_benchmark] = maximum_benchmark + interval / 2\r\n call(DataFrame=dataframe, objectType=None, bound=edge)\r\n else:\r\n modified_data = data[[index_column, error_column]].values\r\n modified_data = ArraySorting(database=modified_data, column=0, reverse=False)\r\n index_array = GetIndexOnArrangedData(database=modified_data, column=0, get_last=True)\r\n for i in range(0, len(index_array) - 1):\r\n copy = modified_data[index_array[i]:index_array[i + 1], 1]\r\n copy[copy > maximum_benchmark] = maximum_benchmark + interval / 2\r\n object_type = index_array[i][1]\r\n call(DataFrame=copy, objectType=object_type, bound=edge)\r\n return None\r\n\r\n\r\n# -------------------------------------------------------------------------------------------------------------------\r\n# [6]: Function used for further extension: Checking data type\r\ndef OptimizeIntegerDatatypeByShape(shape: Union[Tuple, List]) -> np.dtype:\r\n minimum, maximum = min(shape), max(shape)\r\n if not inputFastCheck(minimum, dtype='int') or not inputFastCheck(maximum, dtype='int'):\r\n raise TypeError(f\"Float value has been found. Please check your object_shape ({shape})\")\r\n\r\n if minimum >= 0:\r\n np_dtype = (np.uint8, np.uint16, np.uint32, np.uint64)\r\n else:\r\n np_dtype = (np.int8, np.int16, np.int32, np.int64)\r\n\r\n highest_point = max(abs(minimum), abs(maximum))\r\n for search_dtype in np_dtype:\r\n if highest_point <= np.iinfo(search_dtype).max:\r\n return np.dtype(search_dtype)\r\n return np.dtype(np_dtype[-2])\r\n\r\n\r\n@MeasureExecutionTime\r\ndef convertNumpyDenseToScipySparse(data: Union[ndarray, coo_matrix, spmatrix], tuned_up: bool = True,\r\n sparse_format: str = \"coo\") -> spmatrix:\r\n if inputFastCheck(data, dtype='coo_matrix-spmatrix', delimiter='-'):\r\n return data\r\n\r\n if not inputFastCheck(data, dtype='ndarray'):\r\n warning(f\" The current input data was not np.ndarray (!= {type(data)})\")\r\n data: ndarray = np.array(data, dtype=np.uint8)\r\n\r\n inputFullCheck(value=tuned_up, name='tuned_up', dtype='bool')\r\n if sparse_format not in (\"coo\", \"csr\", \"csc\"):\r\n raise TypeError(\"sparse_format should be in coo, csc, csr format\")\r\n\r\n func = coo_matrix\r\n if sparse_format == \"csr\":\r\n func = csr_matrix\r\n elif sparse_format == \"csc\":\r\n func = csc_matrix\r\n sparseMatrix: spmatrix = func(data, shape=data.shape, dtype=data.dtype)\r\n\r\n if not tuned_up:\r\n return sparseMatrix\r\n\r\n if inputFastCheck(sparseMatrix, dtype='coo_matrix'):\r\n sparseMatrix.col = sparseMatrix.col.astype(OptimizeIntegerDatatypeByShape([sparseMatrix.shape[1]]))\r\n sparseMatrix.row = sparseMatrix.row.astype(OptimizeIntegerDatatypeByShape([sparseMatrix.shape[0]]))\r\n else:\r\n sparseMatrix.indices = sparseMatrix.indices.astype(OptimizeIntegerDatatypeByShape([sparseMatrix.indices.shape]))\r\n sparseMatrix.indptr = sparseMatrix.indptr.astype(OptimizeIntegerDatatypeByShape([sparseMatrix.indptr.shape]))\r\n\r\n return sparseMatrix\r\n\r\n\r\ndef checkNumpyUnsignedIntegerDtype(dtype) -> bool:\r\n integer = [np.unsignedinteger, np.uint8, np.uint16, np.uint32, np.uint64, np.ulonglong]\r\n for dt in integer:\r\n if dtype == dt:\r\n return True\r\n return False\r\n\r\n\r\ndef checkNumpySignedIntegerDtype(dtype) -> bool:\r\n integer = [np.signedinteger, np.int8, np.int16, np.int32, np.int64, np.longlong]\r\n for dt in integer:\r\n if dtype == dt:\r\n return True\r\n return False\r\n\r\n\r\ndef checkNumpyIntegerDtype(dtype) -> bool:\r\n if checkNumpyUnsignedIntegerDtype(dtype):\r\n return True\r\n return checkNumpySignedIntegerDtype(dtype)\r\n\r\n\r\ndef checkNumpyFloatingDtype(dtype) -> bool:\r\n floating = [np.float16, np.float32, np.float64, np.float96, np.float128, np.float_]\r\n for dt in floating:\r\n if dtype == dt:\r\n return True\r\n return False\r\n\r\n\r\ndef checkNumericNumpyDtype(dtype) -> bool:\r\n if checkNumpyIntegerDtype(dtype):\r\n return True\r\n return checkNumpyFloatingDtype(dtype)\r\n\r\n\r\ndef checkNumpyDtype(dtype) -> bool:\r\n arr = (np.integer, np.floating, np.inexact, np.complexfloating, np.bool_, np.timedelta64, np.object_,\r\n np.flexible, np.bytes_)\r\n return isinstance(dtype, arr)\r\n\r\n\r\ndef optimizePandasDatatype(df: pd.DataFrame) -> NotImplementedError:\r\n raise NotImplementedError\r\n\r\n\r\ndef checkIsPrimitiveType(value) -> bool:\r\n return isinstance(value, (str, int, float, bool)) or value is None\r\n"
] |
[
[
"matplotlib.pyplot.xlim",
"numpy.where",
"pandas.read_csv",
"numpy.dtype",
"numpy.uint8",
"pandas.DataFrame",
"numpy.logical_and",
"numpy.arange",
"numpy.array",
"numpy.delete",
"numpy.zeros",
"matplotlib.pyplot.autoscale",
"matplotlib.pyplot.hist",
"numpy.argsort",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.show",
"sklearn.__version__.split",
"numpy.iinfo",
"numpy.asarray",
"numpy.sum",
"pandas.__version__.split",
"numpy.__version__.split",
"tensorflow.__version__.split"
]
] |
AmeyaWagh/miniflow
|
[
"0f0706692e0098e61df60f64038a80c1eb83e695"
] |
[
"miniflow_python/MiniFlow.py"
] |
[
"#! /usr/bin/python\nimport numpy as np\n\nclass Node(object):\n def __init__(self, inbound_nodes=[]):\n # Nodes from which this Node receives values\n self.inbound_nodes = inbound_nodes\n # Nodes to which this Node passes values\n self.outbound_nodes = []\n # A calculated value\n self.value = None\n # Add this node as an outbound node on its inputs.\n for n in self.inbound_nodes:\n n.outbound_nodes.append(self)\n\n # These will be implemented in a subclass.\n def forward(self):\n \"\"\"\n Forward propagation.\n\n Compute the output value based on `inbound_nodes` and\n store the result in self.value.\n \"\"\"\n raise NotImplemented\n\n\nclass Input(Node):\n def __init__(self):\n # an Input node has no inbound nodes,\n # so no need to pass anything to the Node instantiator\n Node.__init__(self)\n\n # NOTE: Input node is the only node that may\n # receive its value as an argument to forward().\n #\n # All other node implementations should calculate their\n # values from the value of previous nodes, using\n # self.inbound_nodes\n #\n # Example:\n # val0 = self.inbound_nodes[0].value\n def forward(self, value=None):\n if value is not None:\n self.value = value\n\n\nclass Add(Node):\n # You may need to change this...\n def __init__(self, *inputs):\n Node.__init__(self, inputs)\n\n def forward(self):\n \"\"\"\n For reference, here's the old way from the last\n quiz. You'll want to write code here.\n \"\"\"\n # x_value = self.inbound_nodes[0].value\n # y_value = self.inbound_nodes[1].value\n # self.value = x_value + y_value\n\n # print(\">>\",dir(self.inbound_nodes))\n value=0 \n for node in self.inbound_nodes:\n # print(node.value)\n value+=node.value\n # self.value = reduce(lambda x,y:x.value+y.value,self.inbound_nodes)\n self.value=value\n\nclass Multiply(Node):\n def __init__(self, *inputs):\n Node.__init__(self,inputs)\n \n def forward(self):\n value=1 \n for node in self.inbound_nodes:\n print(node.value)\n value*=node.value\n # self.value = reduce(lambda x,y:x.value+y.value,self.inbound_nodes)\n self.value=value\n\n\nclass Linear(Node):\n def __init__(self, inputs, weights, bias):\n Node.__init__(self, [inputs, weights, bias])\n\n # NOTE: The weights and bias properties here are not\n # numbers, but rather references to other nodes.\n # The weight and bias values are stored within the\n # respective nodes.\n\n def forward(self):\n \"\"\"\n Set self.value to the value of the linear function output.\n\n Your code goes here!\n \"\"\"\n # for _input _weight in zip(iter1, iter2)\n # for node in self.inbound_nodes:\n # print(node.value) \n inputs = np.array(self.inbound_nodes[0].value)\n weights = np.array(self.inbound_nodes[1].value)\n bias = np.array(self.inbound_nodes[2].value)\n # self.value = \n # print(\"inputs\",inputs)\n # print(\"weights\",weights)\n # print(\"bias\",bias)\n # print(\"value\",np.matmul(inputs,weights)+bias)\n # self.value = np.matmul(inputs,weights)+bias\n self.value = np.dot(inputs, weights) + bias\n\nclass Sigmoid(Node):\n \"\"\"\n You need to fix the `_sigmoid` and `forward` methods.\n \"\"\"\n def __init__(self, node):\n Node.__init__(self, [node])\n\n def _sigmoid(self, x):\n \"\"\"\n This method is separate from `forward` because it\n will be used later with `backward` as well.\n\n `x`: A numpy array-like object.\n\n Return the result of the sigmoid function.\n\n Your code here!\n \"\"\"\n # sigmoid = lambda n:(1./1.+np.exp(-1*n))\n # meta = Linear(inputs, weights, bias)\n # return map(sigmoid,x)\n return 1./(1.+np.exp(-x))\n\n def forward(self):\n \"\"\"\n Set the value of this node to the result of the\n sigmoid function, `_sigmoid`.\n\n Your code here!\n \"\"\"\n # This is a dummy value to prevent numpy errors\n # if you test without changing this method.\n # for node in self.inbound_nodes:\n # print(self._sigmoid(node.value))\n self.value = self._sigmoid(self.inbound_nodes[0].value)\n\nclass MSE(Node):\n def __init__(self, y, a):\n \"\"\"\n The mean squared error cost function.\n Should be used as the last node for a network.\n \"\"\"\n # Call the base class' constructor.\n Node.__init__(self, [y, a])\n\n def forward(self):\n \"\"\"\n Calculates the mean squared error.\n \"\"\"\n # NOTE: We reshape these to avoid possible matrix/vector broadcast\n # errors.\n #\n # For example, if we subtract an array of shape (3,) from an array of shape\n # (3,1) we get an array of shape(3,3) as the result when we want\n # an array of shape (3,1) instead.\n #\n # Making both arrays (3,1) insures the result is (3,1) and does\n # an elementwise subtraction as expected.\n y = self.inbound_nodes[0].value.reshape(-1, 1)\n a = self.inbound_nodes[1].value.reshape(-1, 1)\n # TODO: your code here\n # print(y)\n # print(a)\n self.diff = y-a \n self.value = np.mean(self.diff**2)\n\ndef topological_sort(feed_dict):\n \"\"\"\n Sort generic nodes in topological order using Kahn's Algorithm.\n\n `feed_dict`: A dictionary where the key is a `Input` node and the value is the respective value feed to that node.\n\n Returns a list of sorted nodes.\n \"\"\"\n\n input_nodes = [n for n in feed_dict.keys()]\n\n G = {}\n nodes = [n for n in input_nodes]\n while len(nodes) > 0:\n n = nodes.pop(0)\n if n not in G:\n G[n] = {'in': set(), 'out': set()}\n for m in n.outbound_nodes:\n if m not in G:\n G[m] = {'in': set(), 'out': set()}\n G[n]['out'].add(m)\n G[m]['in'].add(n)\n nodes.append(m)\n\n L = []\n S = set(input_nodes)\n while len(S) > 0:\n n = S.pop()\n\n if isinstance(n, Input):\n n.value = feed_dict[n]\n\n L.append(n)\n for m in n.outbound_nodes:\n G[n]['out'].remove(m)\n G[m]['in'].remove(n)\n # if no other incoming edges add to S\n if len(G[m]['in']) == 0:\n S.add(m)\n return L\n\ndef forward_pass(graph):\n \"\"\"\n Performs a forward pass through a list of sorted Nodes.\n\n Arguments:\n\n `graph`: The result of calling `topological_sort`.\n \"\"\"\n # Forward pass\n for n in graph:\n n.forward()\n\n# def forward_pass(output_node, sorted_nodes):\n# \"\"\"\n# Performs a forward pass through a list of sorted nodes.\n\n# Arguments:\n\n# `output_node`: A node in the graph, should be the output node (have no outgoing edges).\n# `sorted_nodes`: A topologically sorted list of nodes.\n\n# Returns the output Node's value\n# \"\"\"\n\n# for n in sorted_nodes:\n# n.forward()\n\n# return output_node.value\n"
] |
[
[
"numpy.array",
"numpy.dot",
"numpy.exp",
"numpy.mean"
]
] |
Akey20/Missions_to_Mars
|
[
"b17872643a1c377c98d9d4b67bb287ebe3481db5"
] |
[
"scrape_mars.py"
] |
[
"#imports\nfrom splinter import Browser\nfrom bs4 import BeautifulSoup\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport pandas as pd\nimport time\nimport requests\nimport json\n\n\ndef scrape_info():\n \n # init_browser()\n executable_path = {'executable_path': ChromeDriverManager().install()}\n browser = Browser('chrome', **executable_path, headless=False)\n\n # Mars News\n url = \"https://redplanetscience.com/\"\n browser.visit(url)\n\n time.sleep(1)\n\n html = browser.html\n soup = BeautifulSoup(html, \"html.parser\")\n \n data = {}\n\n data[\"news_title\"] = soup.find(\"div\", class_=\"content_title\").get_text()\n data[\"news_p\"] = soup.find(\"div\", class_=\"article_teaser_body\").get_text()\n data[\"featured_image\"] = get_featured(browser)\n data[\"mars_table\"] = get_table()\n data[\"hemispheres\"] = get_hemispheres(browser)\n\n browser.quit()\n\n return data\n\n\n\ndef get_featured(browser):\n \n #JPL Mars Space Images - Featured Image\n url = \"https://spaceimages-mars.com/\"\n browser.visit(url)\n\n html = browser.html\n soup = BeautifulSoup(html, \"html.parser\")\n\n image = soup.body.find_all(\"img\")[1]\n \n featured_image_url = url + \"image/featured/mars2.jpg\"\n \n return featured_image_url\n\n\n\ndef get_table():\n \n #Mars Facts\n url = \"https://galaxyfacts-mars.com/\"\n tables = pd.read_html(url)\n\n df = tables[0] \n\n html_table = df.to_html()\n \n html_table.replace('\\n', '')\n\n return df.to_html()\n\n\n\ndef get_hemispheres(browser):\n #Mars Hemispheres\n url = 'https://marshemispheres.com/'\n browser.visit(url)\n\n all_hemispheres = []\n for num in range(4):\n browser.find_by_css(\"a.product-item img\")[num].click()\n all_hemispheres.append({\n 'title':browser.find_by_css('h2.title')[0].text,\n 'url':browser.find_by_text('Sample')[0]['href']\n })\n browser.back()\n\n return all_hemispheres"
] |
[
[
"pandas.read_html"
]
] |
sjesupaul/paintingClassifier
|
[
"7c8f157beeb086d23e9df7c72d263eba00ad4a93"
] |
[
"artClassifierApp/app/__init__.py"
] |
[
"from flask import Flask, jsonify, request, render_template\n\nfrom sklearn.linear_model import LogisticRegression\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport os\n\ndef get_image_score(image_path):\n full_path = os.getcwd()+'/app/static/images/'+image_path\n image_data = tf.gfile.FastGFile(full_path, 'rb').read()\n\n label_lines = sorted([line.rstrip() for line in tf.gfile.GFile('/Users/Saniya/tf_files/retrained_labels1.txt')])\n\n with tf.gfile.FastGFile('/Users/Saniya/tf_files/retrained_graph1.pb', 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n results = []\n\n print(image_path)\n with tf.Session() as sess:\n softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')\n # with tf.Graph().as_default():\n predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data})\n # top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]\n \n for node_id in range(0, 13):#top_k:\n human_string = label_lines[node_id]\n score = predictions[0][node_id]\n results.append({\n \"axis\": human_string.title(),\n \"value\":score\n })\n print('%s (score = %.5f)' % (human_string, score))\n return results\n\n\n#---------- URLS AND WEB PAGES -------------#\n\n# Initialize the app\napp = Flask(__name__)\n\nimage_path = 'ethan.jpg'\n# Get an example and return it's score from the predictor model\[email protected](\"/\", methods=[\"GET\", \"POST\"])\ndef score():\n # \"\"\"\n # When A POST request with json data is made to this uri,\n # Read the example from the json, predict probability and\n # send it with a response\n # \"\"\"\n\n image_path = request.form.get(\"image_path\", \"\")\n if image_path:\n results = [get_image_score(image_path)]\n else:\n results = [[]]\n return render_template(\"index.html\", image_path=image_path, results=results)\n\n#--------- RUN WEB APP SERVER ------------#\n\n# Start the app server\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8000, debug=True)\n"
] |
[
[
"tensorflow.GraphDef",
"tensorflow.Session",
"tensorflow.import_graph_def",
"tensorflow.gfile.GFile",
"tensorflow.gfile.FastGFile"
]
] |
hofaflo/pyedflib
|
[
"2911382c93415925278c330058fc2640e1211324"
] |
[
"pyedflib/edfwriter.py"
] |
[
"# -*- coding: utf-8 -*-\n# Copyright (c) 2019 - 2020 Simon Kern\n# Copyright (c) 2015 - 2020 Holger Nahrstaedt\n# Copyright (c) 2011, 2015, Chris Lee-Messer\n# Copyright (c) 2016-2017 The pyedflib Developers\n# <https://github.com/holgern/pyedflib>\n# See LICENSE for license details.\n\nimport numpy as np\nimport sys\nimport warnings\nfrom datetime import datetime, date\nfrom ._extensions._pyedflib import FILETYPE_EDFPLUS, FILETYPE_BDFPLUS, FILETYPE_BDF, FILETYPE_EDF\nfrom ._extensions._pyedflib import open_file_writeonly, set_physical_maximum, set_patient_additional, set_digital_maximum\nfrom ._extensions._pyedflib import set_birthdate, set_digital_minimum, set_technician, set_recording_additional, set_patientname\nfrom ._extensions._pyedflib import set_patientcode, set_equipment, set_admincode, set_gender, set_datarecord_duration, set_number_of_annotation_signals\nfrom ._extensions._pyedflib import set_startdatetime, set_starttime_subsecond, set_samplefrequency, set_physical_minimum, set_label, set_physical_dimension\nfrom ._extensions._pyedflib import set_transducer, set_prefilter, write_physical_samples, close_file, write_annotation_latin1, write_annotation_utf8\nfrom ._extensions._pyedflib import blockwrite_physical_samples, write_errors, blockwrite_digital_samples, write_digital_short_samples, write_digital_samples, blockwrite_digital_short_samples\n\n\n__all__ = ['EdfWriter']\n\n\ndef check_is_ascii(string):\n \"\"\"according to the EDF+ specifications, only ASCII chars in ordeal\n range 32...126 are allowed, where 32 is space\n\n https://www.edfplus.info/specs/edfplus.html#header\n \"\"\"\n if not all([ord(x)>32 and ord(x)<127 for x in string]):\n warnings.warn('Invalid char: header entries should contain only ASCII'\\\n ' characters and no spaces: \"{}\"'.format(string))\n\n\ndef check_signal_header_correct(channels, i, file_type):\n \"\"\"\n helper function to check if all entries in the channel dictionary are fine.\n\n Will give a warning if label, transducer, dimension, prefilter are too long.\n\n Will throw an exception if dmin, dmax, pmin, pmax are out of bounds or would\n be truncated in such a way as that signal values would be completely off.\n \"\"\"\n ch = channels[i]\n label = ch['label']\n\n if len(ch['label'])>16:\n warnings.warn('Label of channel {} is longer than 16 ASCII chars.'\\\n 'The label will be truncated to \"{}\"'.format(i, ch['label'][:16] ))\n if len(ch['prefilter'])>80:\n warnings.warn('prefilter of channel {} is longer than 80 ASCII chars.'\\\n 'The label will be truncated to \"{}\"'.format(i, ch['prefilter'][:80] ))\n if len(ch['transducer'])>80:\n warnings.warn('transducer of channel {} is longer than 80 ASCII chars.'\\\n 'The label will be truncated to \"{}\"'.format(i, ch['transducer'][:80] ))\n if len(ch['dimension'])>80:\n warnings.warn('dimension of channel {} is longer than 8 ASCII chars.'\\\n 'The label will be truncated to \"{}\"'.format(i, ch['dimension'][:8] ))\n\n # these ones actually raise an exception\n dmin, dmax = (-8388608, 8388607) if file_type in (FILETYPE_BDFPLUS, FILETYPE_BDF) else (-32768, 32767)\n if ch['digital_min']<dmin:\n raise ValueError('Digital minimum for channel {} ({}) is {},'\\\n 'but minimum allowed value is {}'.format(i, label,\n ch['digital_min'],\n dmin))\n if ch['digital_max']>dmax:\n raise ValueError('Digital maximum for channel {} ({}) is {},'\\\n 'but maximum allowed value is {}'.format(i, label,\n ch['digital_max'],\n dmax))\n\n\n # if we truncate the physical min before the dot, we potentitally\n # have all the signals incorrect by an order of magnitude.\n if len(str(ch['physical_min']))>8 and ch['physical_min'] < -99999999:\n raise ValueError('Physical minimum for channel {} ({}) is {}, which has {} chars, '\\\n 'however, EDF+ can only save 8 chars, critical precision loss is expected, '\\\n 'please convert the signals to another dimesion (eg uV to mV)'.format(i, label,\n ch['physical_min'],\n len(str(ch['physical_min']))))\n if len(str(ch['physical_max']))>8 and ch['physical_max'] > 99999999:\n raise ValueError('Physical minimum for channel {} ({}) is {}, which has {} chars, '\\\n 'however, EDF+ can only save 8 chars, critical precision loss is expected, '\\\n 'please convert the signals to another dimesion (eg uV to mV).'.format(i, label,\n ch['physical_max'],\n len(str(ch['physical_max']))))\n # if we truncate the physical min behind the dot, we just lose precision,\n # in this case only a warning is enough\n if len(str(ch['physical_min']))>8:\n warnings.warn('Physical minimum for channel {} ({}) is {}, which has {} chars, '\\\n 'however, EDF+ can only save 8 chars, will be truncated to {}, '\\\n 'some loss of precision is to be expected'.format(i, label,\n ch['physical_min'],\n len(str(ch['physical_min'])),\n str(ch['physical_min'])[:8]))\n if len(str(ch['physical_max']))>8:\n warnings.warn('Physical minimum for channel {} ({}) is {}, which has {} chars, '\\\n 'however, EDF+ can only save 8 chars, will be truncated to {}, '\\\n 'some loss of precision is to be expected.'.format(i, label,\n ch['physical_max'],\n len(str(ch['physical_max'])),\n str(ch['physical_max'])[:8]))\n\n\n\ndef u(x):\n return x.decode(\"utf-8\", \"strict\")\n\n\ndef du(x):\n if isbytestr(x):\n return x\n else:\n return x.encode(\"utf-8\")\n\n\ndef isstr(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)\n\n\ndef isbytestr(s):\n return isinstance(s, bytes)\n\n\ndef gender2int(gender):\n if isinstance(gender, int) or gender is None:\n return gender\n elif gender.lower() in ['', 'x', 'xx', 'xxx', 'unknown', '?', '??']:\n return None\n elif gender.lower() in [\"female\", \"woman\", \"f\", \"w\"]:\n return 0\n elif gender.lower() in [\"male\", \"man\", \"m\"]:\n return 1\n else:\n raise ValueError(\"Unknown gender: '{}'\".format(gender))\n\n\nclass ChannelDoesNotExist(Exception):\n def __init__(self, value):\n self.parameter = value\n\n def __str__(self):\n return repr(self.parameter)\n\n\nclass WrongInputSize(Exception):\n def __init__(self, value):\n self.parameter = value\n\n def __str__(self):\n return repr(self.parameter)\n\n\nclass EdfWriter(object):\n def __exit__(self, exc_type, exc_val, ex_tb):\n self.close()\n\n def __enter__(self):\n return self\n # return self\n\n def __del__(self):\n self.close()\n\n def __init__(self, file_name, n_channels,\n file_type=FILETYPE_EDFPLUS):\n \"\"\"Initialises an EDF file at file_name.\n file_type is one of\n edflib.FILETYPE_EDFPLUS\n edflib.FILETYPE_BDFPLUS\n n_channels is the number of channels without the annotation channel\n\n channel_info should be a\n list of dicts, one for each channel in the data. Each dict needs\n these values:\n\n 'label' : channel label (string, <= 16 characters, must be unique)\n 'dimension' : physical dimension (e.g., mV) (string, <= 8 characters)\n 'sample_rate' : sample frequency in hertz (int). Deprecated: use 'sample_frequency' instead.\n 'sample_frequency' : number of samples per record (int)\n 'physical_max' : maximum physical value (float)\n 'physical_min' : minimum physical value (float)\n 'digital_max' : maximum digital value (int, -2**15 <= x < 2**15)\n 'digital_min' : minimum digital value (int, -2**15 <= x < 2**15)\n \"\"\"\n self.path = file_name\n self.file_type = file_type\n self.patient_name = ''\n self.patient_code = ''\n self.technician = ''\n self.equipment = ''\n self.recording_additional = ''\n self.patient_additional = ''\n self.admincode = ''\n self.gender = None\n self.recording_start_time = datetime.now().replace(microsecond=0)\n\n self.birthdate = ''\n self.duration = 1\n self.number_of_annotations = 1 if file_type in [FILETYPE_EDFPLUS, FILETYPE_BDFPLUS] else 0\n self.n_channels = n_channels\n self.channels = []\n self.sample_buffer = []\n for i in np.arange(self.n_channels):\n if self.file_type == FILETYPE_BDFPLUS or self.file_type == FILETYPE_BDF:\n self.channels.append({'label': 'ch{}'.format(i), 'dimension': 'mV', 'sample_rate': 100,\n 'sample_frequency': None, 'physical_max': 1.0, 'physical_min': -1.0,\n 'digital_max': 8388607,'digital_min': -8388608,\n 'prefilter': '', 'transducer': ''})\n elif self.file_type == FILETYPE_EDFPLUS or self.file_type == FILETYPE_EDF:\n self.channels.append({'label': 'ch{}'.format(i), 'dimension': 'mV', 'sample_rate': 100,\n 'sample_frequency': None, 'physical_max': 1.0, 'physical_min': -1.0,\n 'digital_max': 32767, 'digital_min': -32768,\n 'prefilter': '', 'transducer': ''})\n\n self.sample_buffer.append([])\n self.handle = open_file_writeonly(self.path, self.file_type, self.n_channels)\n if (self.handle < 0):\n raise IOError(write_errors[self.handle])\n\n def update_header(self):\n \"\"\"\n Updates header to edffile struct\n \"\"\"\n # some checks that warn users if header fields exceed 80 chars\n patient_ident = len(self.patient_code) + len(self.patient_name) \\\n + len(self.patient_additional) + 3 + 1 + 11 # 3 spaces 1 gender 11 birthdate\n record_ident = len(self.equipment) + len(self.technician) \\\n + len(self.admincode) + len(self.recording_additional) \\\n + len('Startdate') + 3 + 11 # 3 spaces 11 birthdate\n\n if patient_ident>80:\n warnings.warn('Patient code, name, gender and birthdate combined must not be larger than 80 chars. ' +\n 'Currently has len of {}. See https://www.edfplus.info/specs/edfplus.html#additionalspecs'.format(patient_ident))\n if record_ident>80:\n warnings.warn('Equipment, technician, admincode and recording_additional combined must not be larger than 80 chars. ' +\n 'Currently has len of {}. See https://www.edfplus.info/specs/edfplus.html#additionalspecs'.format(record_ident))\n\n set_technician(self.handle, du(self.technician))\n set_recording_additional(self.handle, du(self.recording_additional))\n set_patientname(self.handle, du(self.patient_name))\n set_patientcode(self.handle, du(self.patient_code))\n set_patient_additional(self.handle, du(self.patient_additional))\n set_equipment(self.handle, du(self.equipment))\n set_admincode(self.handle, du(self.admincode))\n set_gender(self.handle, gender2int(self.gender))\n\n set_datarecord_duration(self.handle, self.duration)\n set_number_of_annotation_signals(self.handle, self.number_of_annotations)\n set_startdatetime(self.handle, self.recording_start_time.year, self.recording_start_time.month,\n self.recording_start_time.day, self.recording_start_time.hour,\n self.recording_start_time.minute, self.recording_start_time.second)\n # subseconds are noted in nanoseconds, so we multiply by 100\n if self.recording_start_time.microsecond>0:\n set_starttime_subsecond(self.handle, self.recording_start_time.microsecond*100)\n if isstr(self.birthdate):\n if self.birthdate != '':\n birthday = datetime.strptime(self.birthdate, '%d %b %Y').date()\n set_birthdate(self.handle, birthday.year, birthday.month, birthday.day)\n else:\n set_birthdate(self.handle, self.birthdate.year, self.birthdate.month, self.birthdate.day)\n for i in np.arange(self.n_channels):\n\n check_signal_header_correct(self.channels, i, self.file_type)\n\n set_samplefrequency(self.handle, i, self._get_sample_frequency(i))\n set_physical_maximum(self.handle, i, self.channels[i]['physical_max'])\n set_physical_minimum(self.handle, i, self.channels[i]['physical_min'])\n set_digital_maximum(self.handle, i, self.channels[i]['digital_max'])\n set_digital_minimum(self.handle, i, self.channels[i]['digital_min'])\n set_label(self.handle, i, du(self.channels[i]['label']))\n set_physical_dimension(self.handle, i, du(self.channels[i]['dimension']))\n set_transducer(self.handle, i, du(self.channels[i]['transducer']))\n set_prefilter(self.handle, i, du(self.channels[i]['prefilter']))\n\n def setHeader(self, fileHeader):\n \"\"\"\n Sets the file header\n \"\"\"\n self.technician = fileHeader[\"technician\"]\n self.recording_additional = fileHeader[\"recording_additional\"]\n self.patient_name = fileHeader[\"patientname\"]\n self.patient_additional = fileHeader[\"patient_additional\"]\n self.patient_code = fileHeader[\"patientcode\"]\n self.equipment = fileHeader[\"equipment\"]\n self.admincode = fileHeader[\"admincode\"]\n self.gender = fileHeader[\"gender\"]\n self.recording_start_time = fileHeader[\"startdate\"]\n self.birthdate = fileHeader[\"birthdate\"]\n self.update_header()\n\n def setSignalHeader(self, edfsignal, channel_info):\n \"\"\"\n Sets the parameter for signal edfsignal.\n\n channel_info should be a dict with\n these values:\n\n 'label' : channel label (string, <= 16 characters, must be unique)\n 'dimension' : physical dimension (e.g., mV) (string, <= 8 characters)\n 'sample_rate' : sample frequency in hertz (int). Deprecated: use 'sample_frequency' instead.\n 'sample_frequency' : number of samples per record (int)\n 'physical_max' : maximum physical value (float)\n 'physical_min' : minimum physical value (float)\n 'digital_max' : maximum digital value (int, -2**15 <= x < 2**15)\n 'digital_min' : minimum digital value (int, -2**15 <= x < 2**15)\n \"\"\"\n if edfsignal < 0 or edfsignal > self.n_channels:\n raise ChannelDoesNotExist(edfsignal)\n self.channels[edfsignal].update(channel_info)\n self.update_header()\n\n def setSignalHeaders(self, signalHeaders):\n \"\"\"\n Sets the parameter for all signals\n\n Parameters\n ----------\n signalHeaders : array_like\n containing dict with\n 'label' : str\n channel label (string, <= 16 characters, must be unique)\n 'dimension' : str\n physical dimension (e.g., mV) (string, <= 8 characters)\n 'sample_rate' :\n sample frequency in hertz (int). Deprecated: use 'sample_frequency' instead.\n 'sample_frequency' : int\n number of samples per record\n 'physical_max' : float\n maximum physical value\n 'physical_min' : float\n minimum physical value\n 'digital_max' : int\n maximum digital value (-2**15 <= x < 2**15)\n 'digital_min' : int\n minimum digital value (-2**15 <= x < 2**15)\n \"\"\"\n for edfsignal in np.arange(self.n_channels):\n self.channels[edfsignal].update(signalHeaders[edfsignal])\n self.update_header()\n\n def setTechnician(self, technician):\n \"\"\"\n Sets the technicians name to `technician`.\n\n Notes\n -----\n This function is optional and can be called only after opening a file in writemode and before the first sample write action.\n \"\"\"\n check_is_ascii(technician)\n self.technician = technician\n self.update_header()\n\n def setRecordingAdditional(self, recording_additional):\n \"\"\"\n Sets the additional recordinginfo\n\n Notes\n -----\n This function is optional and can be called only after opening a file in writemode and before the first sample write action.\n \"\"\"\n check_is_ascii(recording_additional)\n self.recording_additional = recording_additional\n self.update_header()\n\n def setPatientName(self, patient_name):\n \"\"\"\n Sets the patientname to `patient_name`.\n\n Notes\n -----\n This function is optional and can be called only after opening a file in writemode and before the first sample write action.\n \"\"\"\n check_is_ascii(patient_name)\n self.patient_name = patient_name\n self.update_header()\n\n def setPatientCode(self, patient_code):\n \"\"\"\n Sets the patientcode to `patient_code`.\n\n Notes\n -----\n This function is optional and can be called only after opening a file in writemode and before the first sample write action.\n \"\"\"\n check_is_ascii(patient_code)\n self.patient_code = patient_code\n self.update_header()\n\n def setPatientAdditional(self, patient_additional):\n \"\"\"\n Sets the additional patientinfo to `patient_additional`.\n\n Notes\n -----\n This function is optional and can be called only after opening a file in writemode and before the first sample write action.\n \"\"\"\n check_is_ascii(patient_additional)\n self.patient_additional = patient_additional\n self.update_header()\n\n def setEquipment(self, equipment):\n \"\"\"\n Sets the name of the param equipment used during the aquisition.\n This function is optional and can be called only after opening a file in writemode and before the first sample write action.\n\n Parameters\n ----------\n equipment : str\n Describes the measurement equpipment\n\n \"\"\"\n check_is_ascii(equipment)\n self.equipment = equipment\n self.update_header()\n\n def setAdmincode(self, admincode):\n \"\"\"\n Sets the admincode.\n\n This function is optional and can be called only after opening a file in writemode and before the first sample write action.\n\n Parameters\n ----------\n admincode : str\n admincode which is written into the header\n\n \"\"\"\n check_is_ascii(admincode)\n self.admincode = admincode\n self.update_header()\n\n def setGender(self, gender):\n \"\"\"\n Sets the gender.\n This function is optional and can be called only after opening a file in writemode and before the first sample write action.\n\n Parameters\n ----------\n gender : int\n 1 is male, 0 is female\n \"\"\"\n self.gender = gender2int(gender)\n self.update_header()\n\n def setDatarecordDuration(self, duration):\n \"\"\"\n Sets the datarecord duration. The default value is 100000 which is 1 second.\n ATTENTION: the argument \"duration\" is expressed in units of 10 microSeconds!\n So, if you want to set the datarecord duration to 0.1 second, you must give\n the argument \"duration\" a value of \"10000\".\n This function is optional, normally you don't need to change\n the default value. The datarecord duration must be in the range 0.001 to 60 seconds.\n Returns 0 on success, otherwise -1.\n\n Parameters\n ----------\n duration : integer\n Sets the datarecord duration in units of 10 microSeconds\n\n Notes\n -----\n This function is NOT REQUIRED but can be called after opening a file in writemode and\n before the first sample write action. This function can be used when you want\n to use a samplerate which is not an integer. For example, if you want to use\n a samplerate of 0.5 Hz, set the samplefrequency to 5 Hz and\n the datarecord duration to 10 seconds. Do not use this function,\n except when absolutely necessary!\n \"\"\"\n self.duration = duration\n self.update_header()\n\n def set_number_of_annotation_signals(self, number_of_annotations):\n \"\"\"\n Sets the number of annotation signals. The default value is 1\n This function is optional and can be called only after opening a file in writemode\n and before the first sample write action\n Normally you don't need to change the default value. Only when the number of annotations\n you want to write is more than the number of seconds of the duration of the recording, you can use\n this function to increase the storage space for annotations\n Minimum is 1, maximum is 64\n\n Parameters\n ----------\n number_of_annotations : integer\n Sets the number of annotation signals\n \"\"\"\n number_of_annotations = max((min((int(number_of_annotations), 64)), 1))\n self.number_of_annotations = number_of_annotations\n self.update_header()\n\n def setStartdatetime(self, recording_start_time):\n \"\"\"\n Sets the recording start Time\n\n Parameters\n ----------\n recording_start_time: datetime object\n Sets the recording start Time\n \"\"\"\n if not isinstance(recording_start_time, datetime):\n recording_start_time = datetime.strptime(recording_start_time,\"%d %b %Y %H:%M:%S\")\n self.recording_start_time = recording_start_time\n self.update_header()\n\n def setBirthdate(self, birthdate):\n \"\"\"\n Sets the birthdate.\n\n Parameters\n ----------\n birthdate: date object from datetime\n\n Examples\n --------\n >>> import pyedflib\n >>> from datetime import datetime, date\n >>> f = pyedflib.EdfWriter('test.bdf', 1, file_type=pyedflib.FILETYPE_BDFPLUS)\n >>> f.setBirthdate(date(1951, 8, 2))\n >>> f.close()\n\n Notes\n -----\n This function is optional and can be called only after opening a file in writemode and before the first sample write action.\n \"\"\"\n if isinstance(birthdate, str):\n birthdate = datetime.strptime(birthdate, \"%d.%m.%Y\")\n self.birthdate = birthdate\n self.update_header()\n\n def setSamplefrequency(self, edfsignal, samplefrequency):\n \"\"\"\n Sets the samplefrequency of signal edfsignal.\n\n Notes\n -----\n This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action.\n \"\"\"\n if edfsignal < 0 or edfsignal > self.n_channels:\n raise ChannelDoesNotExist(edfsignal)\n\n # Temporary double assignment while we deprecate 'sample_rate' as a channel attribute\n # in favor of 'sample_frequency', supporting the use of either to give\n # users time to switch to the new interface.\n self.channels[edfsignal]['sample_rate'] = samplefrequency\n self.channels[edfsignal]['sample_frequency'] = samplefrequency\n self.update_header()\n\n def setPhysicalMaximum(self, edfsignal, physical_maximum):\n \"\"\"\n Sets the physical_maximum of signal edfsignal.\n\n Parameters\n ----------\n edfsignal: int\n signal number\n physical_maximum: float\n Sets the physical maximum\n\n Notes\n -----\n This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action.\n \"\"\"\n if edfsignal < 0 or edfsignal > self.n_channels:\n raise ChannelDoesNotExist(edfsignal)\n self.channels[edfsignal]['physical_max'] = physical_maximum\n self.update_header()\n\n def setPhysicalMinimum(self, edfsignal, physical_minimum):\n \"\"\"\n Sets the physical_minimum of signal edfsignal.\n\n Parameters\n ----------\n edfsignal: int\n signal number\n physical_minimum: float\n Sets the physical minimum\n\n Notes\n -----\n This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action.\n \"\"\"\n if (edfsignal < 0 or edfsignal > self.n_channels):\n raise ChannelDoesNotExist(edfsignal)\n self.channels[edfsignal]['physical_min'] = physical_minimum\n self.update_header()\n\n def setDigitalMaximum(self, edfsignal, digital_maximum):\n \"\"\"\n Sets the maximum digital value of signal edfsignal.\n Usually, the value 32767 is used for EDF+ and 8388607 for BDF+.\n\n Parameters\n ----------\n edfsignal : int\n signal number\n digital_maximum : int\n Sets the maximum digital value\n\n Notes\n -----\n This function is optional and can be called only after opening a file in writemode and before the first sample write action.\n \"\"\"\n if (edfsignal < 0 or edfsignal > self.n_channels):\n raise ChannelDoesNotExist(edfsignal)\n self.channels[edfsignal]['digital_max'] = digital_maximum\n self.update_header()\n\n def setDigitalMinimum(self, edfsignal, digital_minimum):\n \"\"\"\n Sets the minimum digital value of signal edfsignal.\n Usually, the value -32768 is used for EDF+ and -8388608 for BDF+. Usually this will be (-(digital_maximum + 1)).\n\n Parameters\n ----------\n edfsignal : int\n signal number\n digital_minimum : int\n Sets the minimum digital value\n\n Notes\n -----\n This function is optional and can be called only after opening a file in writemode and before the first sample write action.\n \"\"\"\n if (edfsignal < 0 or edfsignal > self.n_channels):\n raise ChannelDoesNotExist(edfsignal)\n self.channels[edfsignal]['digital_min'] = digital_minimum\n self.update_header()\n\n def setLabel(self, edfsignal, label):\n \"\"\"\n Sets the label (name) of signal edfsignal (\"FP1\", \"SaO2\", etc.).\n\n Parameters\n ----------\n edfsignal : int\n signal number on which the label should be changed\n label : str\n signal label\n\n Notes\n -----\n This function is recommended for every signal and can be called only after opening a file in writemode and before the first sample write action.\n \"\"\"\n if (edfsignal < 0 or edfsignal > self.n_channels):\n raise ChannelDoesNotExist(edfsignal)\n self.channels[edfsignal]['label'] = label\n self.update_header()\n\n def setPhysicalDimension(self, edfsignal, physical_dimension):\n \"\"\"\n Sets the physical dimension of signal edfsignal (\"uV\", \"BPM\", \"mA\", \"Degr.\", etc.)\n\n :param edfsignal: int\n :param physical_dimension: str\n\n Notes\n -----\n This function is recommended for every signal and can be called only after opening a file in writemode and before the first sample write action.\n \"\"\"\n if edfsignal < 0 or edfsignal > self.n_channels:\n raise ChannelDoesNotExist(edfsignal)\n self.channels[edfsignal]['dimension'] = physical_dimension\n self.update_header()\n\n def setTransducer(self, edfsignal, transducer):\n \"\"\"\n Sets the transducer of signal edfsignal\n\n :param edfsignal: int\n :param transducer: str\n\n Notes\n -----\n This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action.\n \"\"\"\n if (edfsignal < 0 or edfsignal > self.n_channels):\n raise ChannelDoesNotExist(edfsignal)\n self.channels[edfsignal]['transducer'] = transducer\n self.update_header()\n\n def setPrefilter(self, edfsignal, prefilter):\n \"\"\"\n Sets the prefilter of signal edfsignal (\"HP:0.1Hz\", \"LP:75Hz N:50Hz\", etc.)\n\n :param edfsignal: int\n :param prefilter: str\n\n Notes\n -----\n This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action.\n \"\"\"\n if edfsignal < 0 or edfsignal > self.n_channels:\n raise ChannelDoesNotExist(edfsignal)\n self.channels[edfsignal]['prefilter'] = prefilter\n self.update_header()\n\n def writePhysicalSamples(self, data):\n \"\"\"\n Writes n physical samples (uV, mA, Ohm) belonging to one signal where n\n is the samplefrequency of the signal.\n\n data_vec belonging to one signal. The size must be the samplefrequency of the signal.\n\n Notes\n -----\n Writes n physical samples (uV, mA, Ohm) from data_vec belonging to one signal where n\n is the samplefrequency of the signal. The physical samples will be converted to digital\n samples using the values of physical maximum, physical minimum, digital maximum and digital\n minimum. The number of samples written is equal to the samplefrequency of the signal.\n Call this function for every signal in the file. The order is important! When there are 4\n signals in the file, the order of calling this function must be: signal 0, signal 1, signal 2,\n signal 3, signal 0, signal 1, signal 2, etc.\n\n All parameters must be already written into the bdf/edf-file.\n \"\"\"\n return write_physical_samples(self.handle, data)\n\n def writeDigitalSamples(self, data):\n return write_digital_samples(self.handle, data)\n\n def writeDigitalShortSamples(self, data):\n return write_digital_short_samples(self.handle, data)\n\n def blockWritePhysicalSamples(self, data):\n \"\"\"\n Writes physical samples (uV, mA, Ohm)\n must be filled with samples from all signals\n where each signal has n samples which is the samplefrequency of the signal.\n\n data_vec belonging to one signal. The size must be the samplefrequency of the signal.\n\n Notes\n -----\n buf must be filled with samples from all signals, starting with signal 0, 1, 2, etc.\n one block equals one second\n The physical samples will be converted to digital samples using the\n values of physical maximum, physical minimum, digital maximum and digital minimum\n The number of samples written is equal to the sum of the samplefrequencies of all signals\n Size of buf should be equal to or bigger than sizeof(double) multiplied by the sum of the samplefrequencies of all signals\n Returns 0 on success, otherwise -1\n\n All parameters must be already written into the bdf/edf-file.\n \"\"\"\n return blockwrite_physical_samples(self.handle, data)\n\n def blockWriteDigitalSamples(self, data):\n return blockwrite_digital_samples(self.handle, data)\n\n def blockWriteDigitalShortSamples(self, data):\n return blockwrite_digital_short_samples(self.handle, data)\n\n def writeSamples(self, data_list, digital = False):\n \"\"\"\n Writes physical samples (uV, mA, Ohm) from data belonging to all signals\n The physical samples will be converted to digital samples using the values\n of physical maximum, physical minimum, digital maximum and digital minimum.\n if the samplefrequency of all signals are equal, then the data could be\n saved into a matrix with the size (N,signals) If the samplefrequency\n is different, then sample_freq is a vector containing all the different\n samplefrequencys. The data is saved as list. Each list entry contains\n a vector with the data of one signal.\n\n If digital is True, digital signals (as directly from the ADC) will be expected.\n (e.g. int16 from 0 to 2048)\n\n All parameters must be already written into the bdf/edf-file.\n \"\"\"\n there_are_blank_sample_frequencies = any([channel.get('sample_frequency') is None\n for channel in self.channels])\n if there_are_blank_sample_frequencies:\n warnings.warn(\"The 'sample_rate' parameter is deprecated. Please use \"\n \"'sample_frequency' instead.\", DeprecationWarning)\n\n if (len(data_list)) == 0:\n raise WrongInputSize('Data list is empty') \n if (len(data_list) != len(self.channels)):\n raise WrongInputSize('Number of channels ({}) \\\n unequal to length of data ({})'.format(len(self.channels), len(data_list)))\n\n # Check for F-contiguous arrays\n if any([s.flags.f_contiguous for s in data_list if isinstance(s, np.ndarray)]) or \\\n (isinstance(data_list, np.ndarray) and data_list.flags.f_contiguous):\n warnings.warn('signals are in Fortran order. Will automatically ' \\\n 'transfer to C order for compatibility with edflib.')\n if isinstance(data_list, list):\n data_list = [s.copy(order='C') for s in data_list]\n elif isinstance(data_list, np.ndarray) and data_list.flags.f_contiguous:\n data_list = data_list.copy(order='C')\n \n if digital:\n if any([not np.issubdtype(a.dtype, np.integer) for a in data_list]):\n raise TypeError('Digital = True requires all signals in int')\n\n # Check that all channels have different physical_minimum and physical_maximum\n for chan in self.channels:\n assert chan['physical_min'] != chan['physical_max'], \\\n 'In chan {} physical_min {} should be different from '\\\n 'physical_max {}'.format(chan['label'], chan['physical_min'], chan['physical_max'])\n\n ind = []\n notAtEnd = True\n for i in np.arange(len(data_list)):\n ind.append(0)\n\n sampleLength = 0\n sampleFrequencies = np.zeros(len(data_list), dtype=np.int32)\n for i in np.arange(len(data_list)):\n sampleFrequencies[i] = self._get_sample_frequency(i)\n if (np.size(data_list[i]) < ind[i] + sampleFrequencies[i]):\n notAtEnd = False\n sampleLength += sampleFrequencies[i]\n\n dataRecord = np.array([], dtype=np.int32 if digital else None)\n\n while notAtEnd:\n del dataRecord\n dataRecord = np.array([], dtype=np.int32 if digital else None)\n for i in np.arange(len(data_list)):\n dataRecord = np.append(dataRecord, data_list[i][int(ind[i]):int(ind[i]+sampleFrequencies[i])])\n ind[i] += sampleFrequencies[i]\n if digital:\n success = self.blockWriteDigitalSamples(dataRecord)\n else:\n success = self.blockWritePhysicalSamples(dataRecord)\n\n if success < 0:\n raise IOError('Unknown error while calling blockWriteSamples')\n\n for i in np.arange(len(data_list)):\n if (np.size(data_list[i]) < ind[i] + sampleFrequencies[i]):\n notAtEnd = False\n\n\n for i in np.arange(len(data_list)):\n lastSamples = np.zeros(sampleFrequencies[i], dtype=np.int32 if digital else None)\n lastSampleInd = int(np.max(data_list[i].shape) - ind[i])\n lastSampleInd = int(np.min((lastSampleInd,sampleFrequencies[i])))\n if lastSampleInd > 0:\n lastSamples[:lastSampleInd] = data_list[i][-lastSampleInd:]\n if digital:\n success = self.writeDigitalSamples(lastSamples)\n else:\n success = self.writePhysicalSamples(lastSamples)\n\n if success<0:\n raise IOError('Unknown error while calling writeSamples')\n\n def writeAnnotation(self, onset_in_seconds, duration_in_seconds, description, str_format='utf-8'):\n \"\"\"\n Writes an annotation/event to the file\n \"\"\"\n if self.file_type in [FILETYPE_EDF, FILETYPE_BDF]:\n raise TypeError('Trying to write annotation to EDF/BDF, must use EDF+/BDF+')\n\n if isinstance(duration_in_seconds, bytes):\n duration_in_seconds = float(duration_in_seconds)\n \n if str_format == 'utf-8':\n if duration_in_seconds >= 0:\n return write_annotation_utf8(self.handle, np.round(onset_in_seconds*10000).astype(int), np.round(duration_in_seconds*10000).astype(int), du(description))\n else:\n return write_annotation_utf8(self.handle, np.round(onset_in_seconds*10000).astype(int), -1, du(description))\n else:\n if duration_in_seconds >= 0:\n return write_annotation_latin1(self.handle, np.round(onset_in_seconds*10000).astype(int), np.round(duration_in_seconds*10000).astype(int), u(description).encode('latin1'))\n else:\n return write_annotation_latin1(self.handle, np.round(onset_in_seconds*10000).astype(int), -1, u(description).encode('latin1'))\n\n def close(self):\n \"\"\"\n Closes the file.\n \"\"\"\n close_file(self.handle)\n self.handle = -1\n\n def _get_sample_frequency(self, channelIndex):\n # Temporary conditional assignment while we deprecate 'sample_rate' as a channel attribute\n # in favor of 'sample_frequency', supporting the use of either to give\n # users time to switch to the new interface.\n return (self.channels[channelIndex]['sample_rate']\n if self.channels[channelIndex].get('sample_frequency') is None\n else self.channels[channelIndex]['sample_frequency'])\n"
] |
[
[
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.round",
"numpy.min",
"numpy.arange",
"numpy.size",
"numpy.issubdtype"
]
] |
jlashner/hmf
|
[
"c7ef7ce77b83a147dfc7a887d667775eca1a7e48"
] |
[
"tests/test_halofit.py"
] |
[
"import inspect\nimport os\n\nLOCATION = \"/\".join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))).split(\"/\")[:-1])\nfrom nose.tools import raises\nimport sys\nsys.path.insert(0, LOCATION)\nfrom hmf import transfer\nimport numpy as np\n\n\ndef test_takahashi():\n t = transfer.Transfer(transfer_model=\"EH\", takahashi=False, lnk_max=7)\n tt = transfer.Transfer(transfer_model=\"EH\", takahashi=True, lnk_max=7)\n\n assert np.isclose(t.nonlinear_power[0],tt.nonlinear_power[0],rtol=1e-4)\n print(t.nonlinear_power[-1]/ tt.nonlinear_power[-1])\n assert np.logical_not(np.isclose(t.nonlinear_power[-1]/ tt.nonlinear_power[-1], 1, rtol=0.4))\n\n\ndef test_takahashi_hiz():\n # This test should do the HALOFIT WARNING\n t = transfer.Transfer(transfer_model=\"EH\", takahashi=False, lnk_max=7,z=8.0)\n tt = transfer.Transfer(transfer_model=\"EH\", takahashi=True, lnk_max=7, z=8.0)\n\n assert np.isclose(t.nonlinear_power[0],tt.nonlinear_power[0],rtol=1e-4)\n print(t.nonlinear_power[-1]/ tt.nonlinear_power[-1])\n assert np.logical_not(np.isclose(t.nonlinear_power[-1]/ tt.nonlinear_power[-1], 1, rtol=0.4))\n\n t.update(z=0)\n\n assert np.logical_not(np.isclose(t.nonlinear_power[0]/ tt.nonlinear_power[0], 0.9, rtol=0.1))\n assert np.logical_not(np.isclose(t.nonlinear_power[-1]/ tt.nonlinear_power[-1], 0.99, rtol=0.1))\n\n\n\ndef test_halofit_high_s8():\n t = transfer.Transfer(transfer_model=\"EH\", lnk_max=7,sigma_8=0.999)\n thi = transfer.Transfer(transfer_model=\"EH\", lnk_max=7, sigma_8=1.001) #just above threshold\n\n\n print(t.nonlinear_power[0]/thi.nonlinear_power[0] -1, t.nonlinear_power[-1]/thi.nonlinear_power[-1] -1)\n assert np.isclose(t.nonlinear_power[0],thi.nonlinear_power[0],rtol=2e-2)\n assert np.isclose(t.nonlinear_power[-1], thi.nonlinear_power[-1], rtol=5e-2)\n"
] |
[
[
"numpy.isclose"
]
] |
PoCFrance/Underflow
|
[
"214ab77db4a169548d68b463fb40df3f42bc80d0"
] |
[
"regularflow/utils_regularflow/toolbox.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 13 10:39:59 2019\n\n@author: slo\n\"\"\"\n\nimport torch\nfrom .qfunction import Qfunction\nfrom torch.optim import RMSprop\nfrom.state import State\nimport numpy as np\nimport math as math\nfrom time import process_time\nimport random\n\nEPS_START = 0.9\nEPS_END = 0.05\nEPS_DECAY = 200\nSTEPS_DONE = 0\n\n__all__ = [\"Toolbox\"]\n\nclass Toolbox() :\n\n def __init__(self, qfunction: Qfunction):\n self.qfunction: Qfunction = qfunction\n self.optimizer: RMSprop = RMSprop(qfunction.parameters())\n\n def _qtarget(self, reward, gamma, next_step):\n next_state = self.qfunction(next_step).max(1)[0].unsqueeze(1)\n reward = reward.unsqueeze(1)\n return reward + (gamma * next_state)\n\n def _one_hot(self, a, num_classes):\n return np.squeeze(np.eye(num_classes)[a.reshape(-1)])\n\n def _take_action(self, state: State):\n global STEPS_DONE\n sample = random.random()\n eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * STEPS_DONE / EPS_DECAY)\n STEPS_DONE += 1\n if (eps_threshold <= 0.06):\n eps_threshold = EPS_START\n STEPS_DONE = 0\n stateAction = torch.from_numpy(state._getState()).unsqueeze(0).float()\n print(f\"Step done : {STEPS_DONE}\\nstate : {state._getState()}\\n eps : {eps_threshold}\\n\\n\")\n if sample < 0:\n action = np.random.randint(0, 2)\n newLight = list(self._one_hot(np.array(action), 2))\n else:\n print(\"model\")\n with torch.no_grad():\n action = self.qfunction(stateAction).max(1)[1].view(1, 1)\n print(self.qfunction(stateAction))\n newLight = list(self._one_hot(action, 2))\n state._setState(light=newLight)\n if newLight[0] == 0:\n state.clockCars = process_time()\n else:\n state.clockPedestrian = process_time()\n return newLight\n\n def _progbar(self, curr, total, full_progbar):\n frac = curr / total\n filled_progbar = round(frac * full_progbar)\n print('\\r', '#' * filled_progbar + '-' * (full_progbar - filled_progbar), '[{:>7.2%}]'.format(frac), end='')"
] |
[
[
"numpy.array",
"torch.no_grad",
"numpy.random.randint",
"numpy.eye"
]
] |
dutta-alankar/PH-354-2018-IISc-Assignment-Problems
|
[
"370dbbc447749cebe148a6ffffb48ea978b2949b"
] |
[
"hw1/05/5.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jan 6 05:35:36 2018\r\n\r\n@author: Alankar\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\ndef wave_vec(E,V,m=1,hbar=1):\r\n k1 = np.sqrt(2*m*E)/hbar\r\n k2 = np.sqrt(2*m*(E-V))/hbar\r\n return(k1,k2)\r\n\r\ndef Ref(k1,k2):\r\n R = (k1-k2)/(k1+k2)\r\n return np.abs(R)**2\r\n\r\ndef Trans(k1,k2):\r\n T = (2*k1)/(k1+k2)\r\n return (k2/k1)*np.abs(T)**2\r\n\r\nm = 9.11e-31 \r\n#units of Mass, Energy and hbar doesn't change Reflection and Transmission Probabilities as long as they are consistently used\r\nE, V = 10, 9\r\nk1, k2 = wave_vec(E,V)\r\nprint('Particle energy: %.1f eV\\nBarrier height: %.1f eV'%(E,V))\r\nprint('Transmission Probability: %.2f\\nReflection Probability: %.2f'%(Trans(k1,k2),Ref(k1,k2)))\r\n "
] |
[
[
"numpy.abs",
"numpy.sqrt"
]
] |
sarvarkaxxorov/MassQueryLanguage
|
[
"b7618ba7fb5343c252c5691dc574f4193fb8e83e"
] |
[
"workflow/bin/merged_extracted.py"
] |
[
"import argparse\nimport os\nimport glob\nimport json\nimport uuid\nimport pandas as pd\n\nfrom massql import msql_extract\n\n\ndef _export_extraction(all_spectra, output_mzML_filename, output_mgf_filename, output_json_filename):\n # Returns two dataframes\n\n # Renumbering the scans when merging\n scan = 1\n for spectrum in all_spectra:\n spectrum[\"new_scan\"] = scan\n scan += 1\n\n msql_extract._export_mzML(all_spectra, output_mzML_filename)\n msql_extract._export_mgf(all_spectra, output_mgf_filename)\n\n # Writing out JSON\n open(output_json_filename, \"w\").write(json.dumps(all_spectra))\n\n # Formatting output for tsv\n results_list = []\n\n for spectrum in all_spectra:\n for query_result in spectrum[\"query_results\"]:\n query_result[\"new_scan\"] = spectrum[\"new_scan\"]\n query_result[\"new_filename\"] = os.path.basename(output_mzML_filename)\n results_list.append(query_result)\n\n results_df = pd.DataFrame(results_list)\n\n return results_df\n\ndef main():\n parser = argparse.ArgumentParser(description=\"MSQL CMD\")\n parser.add_argument('json_folder', help='json_folder') # THis is each row is a json for the spectra\n parser.add_argument('output_mzML_folder', help='Output mzML Folder')\n parser.add_argument('output_mgf_folder', help='Output mgf Folder')\n parser.add_argument('output_json_folder', help='Output merged JSON Folder')\n #parser.add_argument('output_parquet', help='Output Parquet File')\n parser.add_argument('--output_tsv', default=None, help='Output Summary Extraction File')\n parser.add_argument('--output_tsv_prefix', default=None, help='Output Summary Extraction output_tsv_prefix')\n\n args = parser.parse_args()\n\n file_hash = str(uuid.uuid4())\n MAX_SPECTRA_PER_EXTRACTION = 5000\n\n all_results_list = []\n all_spectra = []\n\n all_json_files = glob.glob(os.path.join(args.json_folder, \"*.json\"))\n\n for json_filename in all_json_files:\n for json_line in open(json_filename):\n if len(json_line) < 2:\n continue\n\n try:\n all_spectra.append(json.loads(json_line.rstrip()))\n except:\n pass\n\n if len(all_spectra) > MAX_SPECTRA_PER_EXTRACTION:\n output_mzML_filename = os.path.join(args.output_mzML_folder, \"extracted_{}.mzML\".format(file_hash))\n output_mgf_filename = os.path.join(args.output_mgf_folder, \"extracted_{}.mgf\".format(file_hash))\n output_json_filename = os.path.join(args.output_json_folder, \"extracted_{}.json\".format(file_hash))\n\n results_df = _export_extraction(all_spectra, output_mzML_filename, output_mgf_filename, output_json_filename)\n all_results_list.append(results_df)\n file_hash = str(uuid.uuid4())\n all_spectra = []\n\n if len(all_spectra) > 0:\n output_mzML_filename = os.path.join(args.output_mzML_folder, \"extracted_{}.mzML\".format(file_hash))\n output_mgf_filename = os.path.join(args.output_mgf_folder, \"extracted_{}.mgf\".format(file_hash))\n output_json_filename = os.path.join(args.output_json_folder, \"extracted_{}.json\".format(file_hash))\n\n results_df = _export_extraction(all_spectra, output_mzML_filename, output_mgf_filename, output_json_filename)\n all_results_list.append(results_df)\n\n # Merging all the results\n merged_result_df = pd.concat(all_results_list)\n if args.output_tsv is not None:\n merged_result_df.to_csv(args.output_tsv, sep=\"\\t\", index=False)\n elif args.output_tsv_prefix is not None:\n merged_result_df.to_csv(args.output_tsv_prefix + \"_\" + str(uuid.uuid4()) + \".tsv\", sep=\"\\t\", index=False)\n\n\n\n\n \n\n # Formatting the json peaks into a parquet data frame file\n # peak_list = []\n # for spectrum in all_spectra:\n # sum_intensity = sum([peak[1] for peak in spectrum['peaks']])\n # for peak in spectrum['peaks']:\n # peak_dict = {}\n # peak_dict[\"mz\"] = peak[0]\n # peak_dict[\"i\"] = peak[1]\n # peak_dict[\"i_norm\"] = peak[1] / sum_intensity\n \n # if \"precursor_mz\" in spectrum:\n # peak_dict[\"precursor_mz\"] = spectrum[\"precursor_mz\"]\n\n # # TODO: There could be multiple comments per spectrum\n # try:\n # if \"comment\" in spectrum[\"query_results\"][0]:\n # peak_dict[\"comment\"] = float(spectrum[\"query_results\"][0][\"comment\"])\n # except:\n # pass\n\n # peak_list.append(peak_dict)\n\n # peaks_df = pd.DataFrame(peak_list)\n # peaks_df.to_parquet(args.output_parquet)\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"pandas.DataFrame",
"pandas.concat"
]
] |
lxkain/tracking
|
[
"00ed9a0b31c4880687a42df3bf9651e68e0c4360",
"00ed9a0b31c4880687a42df3bf9651e68e0c4360"
] |
[
"signalworks/tracking/tracking.py",
"tests/test_load_audio.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"Tracks\nEach track has a fs and a duration. There are 4 kinds of tracks:\n\n1 Event - times\n2 Wave - values\n3 TimeValue - values at times, duration\n4 Partition - values between times\n\nAll track intervals are of the type [), and duration points to the next unoccupied sample == length\n\"\"\"\n\nimport logging\nfrom builtins import str\nfrom pathlib import Path\nfrom typing import List, Optional, Union\n\nimport numpy\nfrom signalworks.tracking.metatrack import MetaTrack\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n# logger.setLevel(logging.WARNING)\n# logger.setLevel(logging.ERROR)\n\nTIME_TYPE = numpy.int64\n\n\ndef convert_dtype(source, target_dtype):\n \"\"\"\n return a link (if unchanged) or copy of signal in the specified dtype (often changes bit-depth as well)\n \"\"\"\n assert isinstance(source, numpy.ndarray)\n source_dtype = source.dtype\n assert source_dtype in (\n numpy.int16,\n numpy.int32,\n numpy.float32,\n numpy.float64,\n ), \"source must be a supported type\"\n assert target_dtype in (\n numpy.int16,\n numpy.int32,\n numpy.float32,\n numpy.float64,\n ), \"target must be a supported type\"\n if source_dtype == target_dtype:\n return source\n else: # conversion\n if source_dtype == numpy.int16:\n if target_dtype == numpy.int32:\n return source.astype(target_dtype) << 16\n else: # target_dtype == numpy.float32 / numpy.float64:\n return source.astype(target_dtype) / (1 << 15)\n elif source_dtype == numpy.int32:\n if target_dtype == numpy.int16:\n return (source >> 16).astype(target_dtype) # lossy\n else: # target_dtype == numpy.float32 / numpy.float64:\n return source.astype(target_dtype) / (1 << 31)\n else: # source_dtype == numpy.float32 / numpy.float64\n M = numpy.max(numpy.abs(source))\n limit = 1 - 1e-16\n if M > limit:\n factor = limit / M\n logger.warning(\n f\"maximum float waveform value {M} is beyond [-{limit}, {limit}],\"\n f\"applying scaling of {factor}\"\n )\n source *= factor\n if target_dtype == numpy.float32 or target_dtype == numpy.float64:\n return source.astype(target_dtype)\n else:\n if target_dtype == numpy.int16:\n return (source * (1 << 15)).astype(target_dtype) # dither?\n else: # target_dtype == numpy.int32\n return (source * (1 << 31)).astype(target_dtype) # dither?\n\n\nclass Track(MetaTrack):\n default_suffix = \".trk\"\n\n def __init__(self, path):\n self._fs = 0\n self.type: Optional[str] = None\n self.min: Union[int, float, None] = None\n self.max: Union[int, float, None] = None\n self.unit: Optional[str] = None\n self.label: Optional[str] = None\n if path is None:\n path = str(id(self))\n self.path = Path(path).with_suffix(self.default_suffix)\n\n def get_time(self):\n raise NotImplementedError\n\n def set_time(self, time):\n raise NotImplementedError\n\n time = property(get_time, set_time)\n\n def get_value(self):\n raise NotImplementedError\n\n def set_value(self, value):\n raise NotImplementedError\n\n value = property(get_value, set_value)\n\n def get_fs(self):\n return self._fs\n\n def set_fs(self, _value):\n raise Exception(\"Cannot change fs, try resample()\")\n\n fs = property(get_fs, set_fs, doc=\"sampling frequency\")\n\n def get_duration(self):\n pass\n\n def set_duration(self, duration):\n raise NotImplementedError\n\n duration = property(get_duration, set_duration)\n\n def __eq__(self, other):\n raise NotImplementedError\n\n def __ne__(self, other):\n raise NotImplementedError\n\n def __len__(self):\n pass\n\n def __str__(self):\n pass\n\n def __add__(self, other):\n raise NotImplementedError\n\n @classmethod\n def read(cls, path, samplerate=None):\n # we do the imports here to avoid circular import when Wave inherits Track, and Track call Wave's function\n # we only need a function from the dependencies\n from signalworks.tracking.partition import Partition\n from signalworks.tracking.timevalue import TimeValue\n from signalworks.tracking.wave import Wave\n from signalworks.tracking.multitrack import MultiTrack\n\n \"\"\"Loads object from name, adding default extension if missing.\"\"\"\n # E = []\n suffix = Path(path).suffix\n\n with open(path, \"rb\") as fileIn:\n bufHeader = fileIn.read(38)\n if (\n (bufHeader[0:4] == b\"RIFF\")\n and (bufHeader[12:16] == b\"fmt \")\n and (bufHeader[0:5] != b\"RIFFB\")\n ):\n channels = None\n mmap = False\n return Wave.wav_read(path, channels, mmap)\n elif suffix == \".tmv\":\n return TimeValue.read_tmv(path) # for now, handle nans\n elif suffix == \".lab\":\n return Partition.read(path)\n elif suffix == \".edf\":\n return MultiTrack.read_edf(path)\n elif suffix == \".xdf\":\n return MultiTrack.read_xdf(path)\n else:\n channels = None\n mmap = False\n return Wave.wav_read(path, channels, mmap)\n\n def write(self, name, *args, **kwargs):\n \"\"\"Saves object to name, adding default extension if missing.\"\"\"\n raise NotImplementedError\n\n def resample(self, fs):\n \"\"\"resample self to a certain fs\"\"\"\n raise NotImplementedError\n\n def select(self, a, b):\n \"\"\"\n return a selection of the track from a to b. a and b are in fs units.\n Times are new objects, but values are views - idea is to make a read-only section, not a copy\n \"\"\"\n raise NotImplementedError\n\n def insert(self, a, t):\n raise NotImplementedError\n\n def remove(self, a, b):\n raise NotImplementedError\n\n def copy(self, a, b):\n raise NotImplementedError\n\n def cut(self, a, b):\n t = self.copy(a, b)\n self.remove(a, b)\n return t\n\n\ndef get_track_classes() -> List[Track]:\n def all_subclasses(c):\n return c.__subclasses__() + [\n a for b in c.__subclasses__() for a in all_subclasses(b)\n ]\n\n return [obj for obj in all_subclasses(Track)]\n\n\n# TODO: class NamedEvent(_Track)\n# there hasn't been a need for it yet, but may be useful in the future\n# wonder if I can extend Event itself with optional values...\n# class NamedEvent(_Track):\n# def __init__(self, time, value, fs, duration)\n\n\n# class HetMultiTrack(MultiTrack): # may want to define common abstract class instead\n# \"\"\"\n# A dictionary containing time-synchronous tracks of equal duration, but HETEROGENOUS fs\n# \"\"\"\n\n# # this fs relates to the manner by which we time-index (possibly with float) into the multitrack object.\n# # Use 1.0 for seconds.\n# def __init__(self, mapping=dict(), fs=1.0):\n# dict.__init__(self, mapping)\n# if __debug__: # long assert - TODO: do this on mapping, and then assign\n# self.check()\n# self._fs = fs\n\n# def check(self):\n# if len(self) > 1:\n# duration = None\n# for i, (key, track) in enumerate(self.items()):\n# if duration is None:\n# duration = track.duration / track.fs\n# if track.duration / track.fs != duration:\n# raise AssertionError(\n# f\"all durations must be equal, track #{i} ('{key}') does not match track #1\"\n# )\n\n# def get_fs(self):\n# if len(self):\n# return self._fs\n# else:\n# return 0 # or raise?\n\n# def set_fs(self, fs):\n# self._fs = fs\n\n# fs = property(get_fs, set_fs, doc=\"sampling frequency of time-index\")\n\n# def select(self, a, b, keys=None):\n# assert a >= 0\n# assert a < b # or a <= b?\n# assert b <= self.duration\n# \"\"\"return a new object with all track views from time a to b\"\"\"\n# if keys is None:\n# keys = self.keys()\n# obj = type(self)()\n# for key in keys:\n# trk = self[key]\n# obj[key] = trk.select(\n# a / self._fs * trk._fs, b / self._fs * trk._fs\n# ) # untested\n# return obj\n\n# def test_pml(self):\n# import tempfile\n# tmp = tempfile.NamedTemporaryFile(prefix='test_pml_')\n# filename = tmp.name\n# tmp.close()\n# self.t.pmlwrite(filename)\n# s = Event.pmlread(filename)\n# os.unlink(filename)\n# # duration CANNOT be encoded in the file (or can it?)\n# s.duration = int(numpy.round(self.t.duration * s.fs / self.t.fs))\n# s = s.resample(self.t.fs)\n# self.assertTrue(numpy.allclose(s.time, self.t.time))\n",
"# -*- coding: utf-8 -*-\nimport distutils.spawn\nfrom pathlib import Path\n\nimport numpy as np\nimport pytest\n\nfrom signalworks.tracking import load_audio\n\n\ndef has_gitlfs() -> bool:\n return distutils.spawn.find_executable(\"git-lfs\") is not None\n\n\ndef is_gitlfs_pointer(path: Path) -> bool:\n return path.stat().st_blocks == 8 and path.stat().st_blksize == 4096\n\n\nxfailif_no_gitlfs = pytest.mark.xfail(\n not has_gitlfs(), reason=\"This test requires git-lfs\"\n)\n\n\n@xfailif_no_gitlfs\ndef test_load_wav():\n # read regular wav file\n path = Path(__file__).parents[1] / \"data\" / \"speech-mwm.wav\"\n if is_gitlfs_pointer(path):\n pytest.skip(\"Audio object is a git lfs pointer\")\n multiTrack = load_audio(path)\n wave = multiTrack[\"left\"]\n assert np.any(wave.value > 0)\n assert wave.fs == 22050\n\n\n@xfailif_no_gitlfs\ndef test_load_au():\n # read au file\n soundfile = pytest.importorskip( # noqa\n \"soundfile\", reason=\"If soundfile is not installed, this test will fail\"\n )\n path = Path(__file__).parents[1] / \"data\" / \"test.au\"\n if is_gitlfs_pointer(path):\n pytest.skip(\"Audio object is a git lfs pointer\")\n multiTrack = load_audio(path)\n wave = multiTrack[\"left\"]\n assert np.any(wave.value > 0)\n assert wave.fs == 44100\n\n\n@xfailif_no_gitlfs\ndef test_load_TIMIT():\n # read NIST file\n soundfile = pytest.importorskip( # noqa\n \"soundfile\", reason=\"If soundfile is not installed, this test will fail\"\n )\n path = Path(__file__).parents[1] / \"data\" / \"test.WAV\"\n if is_gitlfs_pointer(path):\n pytest.skip(\"Audio object is a git lfs pointer\")\n multiTrack = load_audio(path)\n wave = multiTrack[\"left\"]\n assert np.any(wave.value > 0)\n assert wave.fs == 16000\n\n\n@xfailif_no_gitlfs\ndef test_load_nis():\n # read NIST file\n soundfile = pytest.importorskip( # noqa\n \"soundfile\", reason=\"If soundfile is not installed, this test will fail\"\n )\n path = Path(__file__).parents[1] / \"data\" / \"test.nis\"\n if is_gitlfs_pointer(path):\n pytest.skip(\"Audio object is a git lfs pointer\")\n multiTrack = load_audio(path)\n wave = multiTrack[\"left\"]\n assert np.any(wave.value > 0)\n assert wave.fs == 16000\n\n\n@xfailif_no_gitlfs\ndef test_2channels():\n soundfile = pytest.importorskip( # noqa\n \"soundfile\", reason=\"If soundfile is not installed, this test will fail\"\n )\n path = Path(__file__).parents[1] / \"data\" / \"test.flac\"\n if is_gitlfs_pointer(path):\n pytest.skip(\"Audio object is a git lfs pointer\")\n multiTrack = load_audio(path)\n wave_left = multiTrack[\"left\"]\n assert np.any(wave_left.value > 0)\n assert wave_left.fs == 44100\n\n wave_right = multiTrack[\"right\"]\n assert np.any(wave_right.value > 0)\n assert wave_right.fs == 44100\n\n\n@xfailif_no_gitlfs\ndef test_load_wa1():\n # read WA1 file\n soundfile = pytest.importorskip( # noqa\n \"soundfile\", reason=\"If soundfile is not installed, this test will fail\"\n )\n path = Path(__file__).parents[1] / \"data\" / \"test.wa1\"\n if is_gitlfs_pointer(path):\n pytest.skip(\"Audio object is a git lfs pointer\")\n multiTrack = load_audio(path)\n wave = multiTrack[\"left\"]\n assert np.any(wave.value > 0)\n assert wave.fs == 8000\n\n\n@xfailif_no_gitlfs\ndef test_load_wa2():\n # read WA2 file\n path = Path(__file__).parents[1] / \"data\" / \"test.wa2\"\n if is_gitlfs_pointer(path):\n pytest.skip(\"Audio object is a git lfs pointer\")\n soundfile = pytest.importorskip( # noqa\n \"soundfile\", reason=\"If soundfile is not installed, this test will fail\"\n )\n multiTrack = load_audio(path)\n wave = multiTrack[\"left\"]\n assert np.any(wave.value > 0)\n assert wave.fs == 8000\n\n\n@xfailif_no_gitlfs\[email protected](reason=\"We cannot support this kind of file\")\ndef test_load_wv1():\n # read WA2 file\n path = Path(__file__).parents[1] / \"data\" / \"test.WV1\"\n if is_gitlfs_pointer(path):\n pytest.skip(\"Audio object is a git lfs pointer\")\n soundfile = pytest.importorskip( # noqa\n \"soundfile\", reason=\"If soundfile is not installed, this test will fail\"\n )\n multiTrack = load_audio(path)\n wave = multiTrack[\"left\"]\n assert np.any(wave.value > 0)\n assert wave.fs == 16000\n\n\n@xfailif_no_gitlfs\[email protected](reason=\"We cannot support this kind of file\")\ndef test_load_wv2():\n # read WA2 file\n path = Path(__file__).parents[1] / \"data\" / \"test.WV2\"\n if is_gitlfs_pointer(path):\n pytest.skip(\"Audio object is a git lfs pointer\")\n soundfile = pytest.importorskip( # noqa\n \"soundfile\", reason=\"If soundfile is not installed, this test will fail\"\n )\n multiTrack = load_audio(path)\n wave = multiTrack[\"left\"]\n assert np.any(wave.value > 0)\n assert wave.fs == 16000\n"
] |
[
[
"numpy.abs"
],
[
"numpy.any"
]
] |
GavinNL/gcsg
|
[
"f5d47d9f035ea28a08e5eeeaf36500792a32f848"
] |
[
"plot_points.py"
] |
[
"#!/bin/env python3\nimport numpy as np\nimport pylab as pl\nimport sys\nfrom matplotlib import collections as mc\n\n\narray = []\nlines = []\npointsx = []\npointsy = []\n\nwith open(sys.argv[1]) as f:\n\n for line in f: # read rest of lines\n array = [float(x) for x in line.split()]\n lines.append( [ (array[0], array[1]), (array[2], array[3]) ])\n\n p1 = [array[0],array[1]]\n p2 = [array[2],array[3]]\n\n mx = 0.5*(p1[0]+p2[0])\n my = 0.5*(p1[1]+p2[1])\n\n vx = -(p2[1]-p1[1])\n vy = p2[0]-p1[0]\n\n vx = vx/np.sqrt( vx*vx+vy*vy)\n vy = vy/np.sqrt( vx*vx+vy*vy)\n print(vx, vy)\n pointsx.append( array[0] )\n pointsx.append( array[2] )\n pointsy.append( array[1] )\n pointsy.append( array[3] )\n\n\n lines.append( [ (mx,my), (mx+vx,my+vy) ] );\n\n\nlc = mc.LineCollection(lines, linewidths=2)\nfig, ax = pl.subplots()\nax.add_collection(lc)\nax.autoscale()\nax.margins(0.1)\npl.xlabel('x')\npl.ylabel('y')\n\npl.plot(pointsx, pointsy, 'o')\npl.show()\n"
] |
[
[
"matplotlib.collections.LineCollection",
"numpy.sqrt"
]
] |
Boltuzamaki/30-Days-AI-Projects
|
[
"04c48a3951ccdd7e1bd5776cde419d0e77d9822b"
] |
[
"8th day - Real Time twitter sentiment analysis/Python code/Scraping and performing sentiment analysis of tweets in real time .py"
] |
[
"\n# coding: utf-8\n\n# # Importing libraries\n\n# In[14]:\n\n\nfrom contraction import CONTRACTION_MAP\nimport re\nimport pickle\nimport math\nimport re\nimport time\nimport tensorflow as tf\nfrom tensorflow import keras\nimport pandas as pd\nimport nltk\nfrom contraction import CONTRACTION_MAP # Its a py file contain expanded word of all short words like I'm\nfrom bs4 import BeautifulSoup\nfrom tweepy import Stream\nfrom tweepy import StreamListener\nimport json\nimport re\nimport csv\nimport tweepy\nget_ipython().run_line_magic('matplotlib', 'notebook')\nimport matplotlib.pyplot as plt\nimport matplotlib.animation\nimport numpy as np\n\n\n# # Data cleaning functions\n\n# In[15]:\n\n\ndef remove_htmltags(text): # Remove HTML tags\n from bs4 import BeautifulSoup\n soup = BeautifulSoup(text, \"html.parser\")\n stripped_text = soup.get_text()\n return stripped_text\n\ndef remove_accented_chars(text): # Normalizing accented charaters like ΓΌ\n import unicodedata\n text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n return text\n\ndef expand_contractions(text, contraction_mapping=CONTRACTION_MAP): # Expanding short words iike I've --> I have\n from contraction import CONTRACTION_MAP\n import contraction\n import re\n contractions_pattern = re.compile('({})'.format('|'.join(contraction_mapping.keys())), \n flags=re.IGNORECASE|re.DOTALL)\n def expand_match(contraction):\n match = contraction.group(0)\n first_char = match[0]\n expanded_contraction = contraction_mapping.get(match) if contraction_mapping.get(match) else contraction_mapping.get(match.lower()) \n expanded_contraction = first_char+expanded_contraction[1:]\n return expanded_contraction\n \n expanded_text = contractions_pattern.sub(expand_match, text)\n expanded_text = re.sub(\"'\", \"\", expanded_text)\n return expanded_text\n\ndef remove_special_characters(text, remove_digits=False): # Remove special characters\n pattern = r'[^a-zA-z0-9\\s]' if not remove_digits else r'[^a-zA-z\\s]'\n text = re.sub(pattern, '', text)\n return text\n\ndef simple_stemmer(text): # Stemming the words\n import nltk\n ps = nltk.porter.PorterStemmer()\n text = ' '.join([ps.stem(word) for word in text.split()])\n return text\n\ndef simple_lemmatize(text): # lammetizing the words\n from nltk.stem import WordNetLemmatizer\n lemmatizer = WordNetLemmatizer() \n text = ' '.join([lemmatizer.lemmatize(word) for word in text.split()])\n return text\n\ndef remove_stopwords(text, is_lower_case=False): # Remove stopwords\n from nltk.corpus import stopwords\n from nltk.tokenize import WordPunctTokenizer\n tokenizer = WordPunctTokenizer()\n stopword_list =stopwords.words('english')\n tokens = tokenizer.tokenize(text)\n tokens = [token.strip() for token in tokens]\n if is_lower_case:\n filtered_tokens = [token for token in tokens if token not in stopword_list]\n else:\n filtered_tokens = [token for token in tokens if token.lower() not in stopword_list]\n filtered_text = ' '.join(filtered_tokens) \n return filtered_text\n\ndef remove_link(text): # Remove https\n text = re.sub(r'http\\S+', '', text)\n return text\n \ndef remove_hash_attherate(text): # Remove @ and # tags\n text = re.sub(\"#\\w*\", \"\",text)\n text = re.sub(\"@\\w*\", \"\",text)\n text = re.sub(\"\\s+\", \" \", text)\n return text\n\n# Compiling all text cleaning function\n\ndef noramalize_text(text,htmltags = True, accented_chars = True, contractions_exp = True,\n text_lower_case = True,special_characters = True, stemmer_text = True, \n lemmatize_text = True, stopwords_remove = False, remove_hash = True, remove_linkadd = True):\n if htmltags:\n text = remove_htmltags(text)\n \n if accented_chars:\n text = remove_accented_chars(text)\n \n if contractions_exp:\n text = expand_contractions(text)\n \n if text_lower_case:\n text = text.lower()\n \n if remove_linkadd:\n text = remove_link(text)\n # remove extra line\n text = re.sub(r'[\\r|\\n|\\r\\n]+', ' ',text)\n \n if remove_hash:\n text = remove_hash_attherate(text)\n \n if special_characters:\n text = remove_special_characters(text)\n \n if stemmer_text:\n text = simple_stemmer(text)\n \n if lemmatize_text:\n text = simple_lemmatize(text)\n \n # remove extra whitespace\n text = re.sub(' +', ' ', text) \n \n if stopwords_remove:\n text = remove_stopwords(text) \n \n return text\n\n\n# # Loading the pretrained tokenizer and model \n\n# In[16]:\n\n\n# loading\nwith open('tokenizer.pickle', 'rb') as handle:\n tokenizer = pickle.load(handle)\nMAX_LEN = 50 \n\nfrom keras.models import load_model\nmod = load_model('model.h5')\n\n\n# In[17]:\n\n\nsequences_test = tokenizer.texts_to_sequences(['This is good'])\ntest = tf.keras.preprocessing.sequence.pad_sequences(sequences_test,value = 0,padding = 'post', maxlen = MAX_LEN)\npred = mod.predict(test)\n\n\n# # Get it from twitter developer dashboard\n\n# In[18]:\n\n\n# inputs\nconsumer_key = \"\"\nconsumer_secret = \"\"\naccess_token = \"\"\naccess_token_secret = \"\"\n\n\n# In[20]:\n\n\nsentiment = 0\n\n\n# # Creating a function to write csv file of results to use it in plotting graph\n\n# In[21]:\n\n\ndef csv_creator(sentiment_list):\n dictionary = { \"sentiment\" : sentiment_list\n }\n data = pd.DataFrame(dictionary, index = None)\n data.to_csv(\"real_time.csv\", index = None)\n\nimport time \n\n\n# # Getting the tweets and predicting function\n\n# In[22]:\n\n\ntext = []\nclass Listener(StreamListener):\n def __init__(self):\n self.sentiment = 0\n self.list = []\n def on_data(self, data):\n raw_tweets = json.loads(data)\n try:\n if not raw_tweets['text'].startswith('RT'): # \"RT\" to remove retweets\n text.append(noramalize_text(raw_tweets['text']))\n sequences_test = tokenizer.texts_to_sequences(text)\n test = tf.keras.preprocessing.sequence.pad_sequences(sequences_test,value = 0,padding = 'post', maxlen = MAX_LEN)\n pred = mod.predict(test)\n if pred < 0.5:\n self.sentiment = self.sentiment - 1\n if pred >= 0.5:\n self.sentiment = self.sentiment + 1\n self.list.append(self.sentiment) \n csv_creator(self.list) # Passing predicted list to csv_creator function\n time.sleep(2)\n print(self.sentiment)\n print(noramalize_text(raw_tweets['text']))\n text.pop()\n \n except:\n print(\"Error got\")\n def on_error(self, status):\n print(status)\n\n\n# # Put your authentication details here \n\n# In[23]:\n\n\nauth = tweepy.OAuthHandler(consumer_key ,consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\n\n# # Start real time tweet collecting steam \n\n# In[ ]:\n\n\ntwitter_stream = Stream(auth, Listener())\ntwitter_stream.filter(languages = [\"en\"], track = ['China'])\n\n"
] |
[
[
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"pandas.DataFrame"
]
] |
ZeyadOsama/apollo
|
[
"89e7d7b264b78ace7ef4239899e2dab2568174fa"
] |
[
"apollo/engine/genre_classification/tagger.py"
] |
[
"import argparse\n\nimport numpy as np\n\nfrom apollo.engine.genre_classification.extractor import extractor\n\n\ndef top_tags(file_name, model='MSD_musicnn', topN=5, input_length=10, input_overlap=False, print_tags=True,\n save_tags=False):\n taggram, tags = extractor(file_name, model=model, input_length=input_length, input_overlap=input_overlap,\n extract_features=False)\n tags_likelihood_mean = np.mean(taggram, axis=0)\n\n if print_tags:\n print('[' + file_name + '] Top' + str(topN) + ' tags: ')\n\n if save_tags:\n to = open(save_tags, 'a')\n to.write(\n file_name + ',' + model + ',input_length=' + str(input_length) + ',input_overlap=' + str(input_overlap))\n\n topN_tags = []\n for tag_index in tags_likelihood_mean.argsort()[-topN:][::-1]:\n topN_tags.append(tags[tag_index])\n\n if print_tags:\n print(' - ' + tags[tag_index])\n\n if save_tags:\n to.write(',' + tags[tag_index])\n\n if save_tags:\n to.write('\\n')\n to.close()\n\n return topN_tags\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Predict the topN tags of the music-clip in file_name with the selected model')\n\n parser.add_argument('file_name',\n type=str,\n help='audio file to process')\n\n parser.add_argument('-mod', '--model', metavar='',\n type=str,\n default='MTT_musicnn',\n help='select the music audio tagging model to employ (python -m musicnn.tagger music.mp3 --model MTT_musicnn)',\n required=False)\n\n parser.add_argument('-n', '--topN', metavar='',\n type=int,\n default=3,\n help='extract N most likely tags according to the selected model (python -m musicnn.tagger music.mp3 --topN 10)',\n required=False)\n\n parser.add_argument('-len', '--length', metavar='',\n type=float,\n default=3.0,\n help='length (in seconds) of the input spectrogram patches (python -m musicnn.tagger music.mp3 -len 3.1)',\n required=False)\n\n parser.add_argument('-ov', '--overlap', metavar='',\n type=float,\n default=False,\n help='ammount of overlap (in seconds) of the input spectrogram patches (python -m musicnn.tagger music.mp3 -ov 1.0)',\n required=False)\n\n parser.add_argument('-p', '--print',\n default=False,\n action='store_true',\n help='employ --print flag for printing the tags (python -m musicnn.tagger music.mp3 --print)',\n required=False)\n\n parser.add_argument('-s', '--save', metavar='',\n type=str,\n default=False,\n help='path where to store/save the tags (python -m musicnn.tagger music.mp3 --save out.tags)',\n required=False)\n\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == '__main__':\n # read parameters from command line\n params = parse_args()\n\n # predict topN tags\n topN_tags = top_tags(params.file_name,\n model=params.model,\n topN=params.topN,\n input_length=params.length,\n input_overlap=params.overlap,\n print_tags=params.print,\n save_tags=params.save)\n"
] |
[
[
"numpy.mean"
]
] |
janusassetallocation/CsvUploader
|
[
"051abca8658ceda2df035327a59ea7c0f499f31e"
] |
[
"tests/test_headercsv.py"
] |
[
"import py\nfrom csvuploader import HeaderCsv\nimport pandas as pd\nfrom pandas.util.testing import assert_frame_equal\nfrom StringIO import StringIO\n\n\ndef test_load_file(request):\n test_dir = py.path.local(request.module.__file__)\n with test_dir.dirpath('data', 'simple.csv').open('r') as f:\n text = f.read()\n assert text == 'A,B\\n1,2'\n h = HeaderCsv.load(f)\n assert h.metadata is None\n assert_frame_equal(h.df, pd.DataFrame([[1, 2]], columns=['A', 'B']).set_index('A'))\n\n\ndef test_load_file_with_header(request):\n test_dir = py.path.local(request.module.__file__)\n with test_dir.dirpath('data', 'simple_with_header.csv').open('r') as f:\n h = HeaderCsv.load(f)\n assert h.metadata == { 'name': 'Simple with header' }\n assert_frame_equal(h.df, pd.DataFrame([[1, 2]], columns=['A', 'B']).set_index('A'))\n\n\ndef test_roundtrip():\n stream = StringIO()\n h1 = HeaderCsv(None, pd.DataFrame([[1, 2]], columns=['A', 'B']))\n h1.dump(stream)\n h2 = HeaderCsv.load(stream)\n assert h2.metadata is None\n assert_frame_equal(h2.df, pd.DataFrame([[1, 2]], columns=['A', 'B']))"
] |
[
[
"pandas.DataFrame"
]
] |
zenitheesc/report-maker
|
[
"ab932c2bf71549e9015be240dfed8a9fdd1b0069"
] |
[
"MAIN/statistics.py"
] |
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport folium as fl\nfrom time import sleep\nimport selenium.webdriver\nimport os\n\n# --------------------------------------------------------------------------------------------\n\n# Uses the 1.5xIQR method to identify outliers and substitute them with a NaN\n\ndef outliers_clean (data, sorted_data):\n\n Q1 = np.percentile(sorted_data, 25)\n Q3 = np.percentile(sorted_data, 75)\n IQR = Q3 - Q1\n lower_range = Q1 - (1.5 * IQR)\n upper_range = Q3 + (1.5 * IQR)\n\n for c, sample in enumerate(data):\n if sample > upper_range or sample < lower_range:\n data[c] = np.NaN\n\n return (data)\n\n# --------------------------------------------------------------------------------------------\n\n# Generic function to generate line plot graphics of a dataType in function of the another dataType\n# Examples: Temperature x Time; Pressure x Time\n\ndef generate_data_x_data (data, data2, dataType, dataType2, path):\n # print(dataType, len(data),dataType2, len(data2))\n if pd.Series(data).is_unique == True and False:\n pass\n # return -1 \n\n else:\n # Cleaning outliers\n # data = outliers_clean (data , np.sort(data))\n # data2 = outliers_clean (data2, np.sort(data2))\n\n df = pd.DataFrame(list(zip(data, data2)), columns= [f'{dataType}', f'{dataType2}'])\n \n # Set a graph design\n plt.style.use(u'seaborn-pastel')\n\n # Create graph\n plt.plot(df[f'{dataType2}'], df[f'{dataType}'], marker='', color='blue', linewidth=1, alpha=0.7)\n\n # Add titles\n plt.title(f\"{dataType} x {dataType2}\", loc='left', fontsize=12, fontweight=0, color='black')\n plt.xlabel(f'{dataType2}')\n plt.ylabel(f\"{dataType}\")\n\n # Save graph\n if (os.path.isdir(path + \"/IMAGES\") == False):\n os.makedirs(path + \"/IMAGES\")\n plt.savefig(f'{path}/IMAGES/graph_{dataType}_x_{dataType2}.png', dpi=4*96, bbox_inches='tight')\n plt.clf()\n return 1\n\n# ----------------------------------------------------------------------------------------------\n\n# Generic function to generate line plot graphics comparing 3 dataType, which are correlated through time\n# Examples: Compare temp_int, temp_ext and temp_geral through time\n\ndef generate_compare_graph (data1, data2, data3, time, dataType1, dataType2, dataType3, comparing_data, path):\n # # Cleaning outliers\n # data = outliers_clean (data , np.sort(data))\n # data2 = outliers_clean (data2, np.sort(data2))\n # data3 = outliers_clean (data3, np.sort(data3))\n\n df = pd.DataFrame(list(zip(data1, data2, data3, time)), columns= [f'{dataType1}', f'{dataType2}', f'{dataType3}', 'Time'])\n\n # Set a graph design\n plt.style.use(u'seaborn-pastel')\n\n # Create graph\n plt.plot(df['Time'], df[f'{dataType1}'], marker='', color='red', linewidth=1.5, alpha=0.7, label= f'{dataType1}')\n plt.plot(df['Time'], df[f'{dataType2}'], marker='', color='blue', linewidth=1.5, alpha=0.7, label= f'{dataType2}')\n plt.plot(df['Time'], df[f'{dataType3}'], marker='', color='green', linewidth=1.5, alpha=0.7, label= f'{dataType3}')\n\n # Add legend (Acertar)\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)\n\n # Add titles\n plt.title(f\"Comparing {comparing_data}\", loc='left', fontsize=12, fontweight=0, color='black')\n plt.xlabel(\"Time (s)\") # Podemos entrar como parΓ’metro as unidades de medida\n plt.ylabel(f\"{comparing_data}\")\n\n # Save graph\n if (os.path.isdir(path + \"/IMAGES\") == False):\n os.makedirs(path + \"/IMAGES\")\n plt.savefig(f'{path}/IMAGES/graph_compare_{comparing_data}.png', dpi=4*96, bbox_inches='tight')\n plt.clf()\n\n# ----------------------------------------------------------------------------------------------\n\n# Generates a map using latitude and longitude data\n\ndef generate_map (latitude, longitude, path):\n\n df = pd.DataFrame(list(zip(latitude, longitude)), columns= ['Latitude', 'Longitude'])\n\n # Create a map\n map = fl.Map(location=[-21.9808416, -47.7506511], tiles=\"OpenStreetMap\", zoom_start=9)\n\n # Mark all coordinates\n for row in range(0,len(df)):\n fl.CircleMarker((df.loc[row, 'Latitude'], df.loc[row, 'Longitude']), radius=7, weight=5, color='red', fill_color='red', fill_opacity=.5).add_to(map)\n\n # Save the map as an html \n if (os.path.isdir(path + \"/IMAGES\") == False):\n os.makedirs(path + \"/IMAGES\") \n map.save(f'{path}/IMAGES/Map.html')\n\n # Open a browser window to display the html file and screenshot the map\n\n driver = selenium.webdriver.Chrome(os.path.join(os.path.dirname(__file__), \"DEPENDENCES/chromedriver.exe\"))\n driver.set_window_size(4000, 3000) \n driver.get(f'{path}/IMAGES/Map.html')\n sleep(5)\n\n driver.save_screenshot(f'{path}/IMAGES/map.png')\n driver.quit()\n\n# ----------------------------------------------------------------------------------------------\n\ndef generate_scatter_plot (data, data2, dataType, dataType2):\n\n df = pd.DataFrame(list(zip(data, data2)), columns= [f'{dataType}', f'{dataType2}'])\n\n sns.regplot(x=df[f'{dataType2}'], y=df[f'{dataType}'], line_kws={\"color\":\"r\",\"alpha\":0.5,\"lw\":4}, scatter_kws={\"color\":\"blue\",\"alpha\":0.3, \"s\":10})\n\n # Add titles\n plt.title(f\"{dataType} x {dataType2}\", loc='left', fontsize=12, fontweight=0, color='black')\n plt.xlabel(f'{dataType2}')\n plt.ylabel(f\"{dataType}\")\n\n # Save graph\n plt.savefig(f'scatterplot_{dataType}_x_{dataType2}.png', dpi=96, bbox_inches='tight')\n plt.clf()\n\n# ----------------------------------------------------------------------------------------------\n\n"
] |
[
[
"numpy.percentile",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.clf",
"pandas.Series"
]
] |
kristinakupf/Histo_StrengthInDiversity
|
[
"1c7f6156f5a7504c03f66881301bbca264789f7e"
] |
[
"SS_pretrain/utils.py"
] |
[
"import time\nimport torch\nimport pickle\nimport torchvision.transforms as t\nimport torch.utils.data as data\nimport numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\nimport os\nimport h5py\nimport random\nfrom shutil import copyfile\nimport io\nfrom PIL import Image\nimport math\nimport matplotlib.pyplot as plt\nimport Dataset_Combinations\nimport cv2\n\nclass TextLogger():\n def __init__(self, title, save_path, append=False):\n file_state = 'wb'\n if append:\n file_state = 'ab'\n self.file = open(save_path, file_state, 0)\n self.log(title)\n\n def log(self, strdata):\n outstr = strdata + '\\n'\n outstr = outstr.encode(\"utf-8\")\n self.file.write(outstr)\n\n def __del__(self):\n self.file.close()\n\njigsaw_image_transform = t.Compose([\n # t.Resize(96, Image.BILINEAR),\n # t.CenterCrop(90),\n t.Resize(600, Image.BILINEAR),\n t.RandomHorizontalFlip(0.5),\n t.RandomRotation([0, 360]),\n t.RandomCrop(300),\n t.ColorJitter(\n hue= 0.01,\n saturation=0.01,\n brightness=0.01,\n contrast=0.01),\n t.ToTensor(),\n ])\n\nrotation_image_transform = t.Compose([\n t.Resize(150, Image.BILINEAR),\n t.RandomCrop(96),\n t.ColorJitter(\n hue= 0.01,\n saturation=0.01,\n brightness=0.01,\n contrast=0.01),\n t.ToTensor(),\n ])\n\ntile_transform = t.Compose([\n t.Resize((100, 100)),\n t.RandomCrop(96),\n t.ColorJitter(\n hue= 0.01,\n saturation=0.01,\n brightness=0.01,\n contrast=0.01),\n t.ToTensor(),\n ])\n\nclass ImageDataset(data.Dataset):\n def __init__(self, dataset_path, train, is_test, dataset, num_classes, ss_task):\n\n self.train = train\n self.is_test = is_test\n self.dataset=dataset\n self.dataset_path = dataset_path\n self.num_classes=num_classes\n self.ss_task = ss_task\n target = dataset_path\n\n #If single dataset will return single, if combo# will return multiple datasets\n self.dataset_list = Dataset_Combinations.dataset_list_convert(self.dataset)\n print(self.dataset_list)\n self.h5_list=[np.zeros(len(self.dataset_list))]\n\n self.pil2tensor = t.ToTensor()\n self.tensor2pil = t.ToPILImage()\n\n\n for dataset_idx in range(len(self.dataset_list)):\n\n #Specify paths to dataset\n train_path = self.dataset_list[dataset_idx] + '_train.h5'\n valid_path = self.dataset_list[dataset_idx] + '_valid.h5'\n test_path = self.dataset_list[dataset_idx] + '_test.h5'\n\n #If using combo of multiple datasets\n if len(self.dataset_list) != 1:\n target_mod = target.replace(self.dataset, self.dataset_list[dataset_idx])\n else:\n target_mod = target\n\n\n if self.train == True:\n #Training\n if ss_task == 'jigsaw':\n self.transform =jigsaw_image_transform\n if ss_task == 'rotation':\n self.transform = rotation_image_transform\n\n self.h5_file = target_mod + train_path\n else:\n #Validation\n if ss_task == 'jigsaw':\n self.transform = jigsaw_image_transform\n if ss_task == 'rotation':\n self.transform = rotation_image_transform\n\n if self.is_test==False:\n self.h5_file = target_mod + valid_path\n else:\n #Testing\n self.h5_file = target_mod + test_path\n\n #Save that specific h5 file to index in list\n self.h5_list = (h5py.File(self.h5_file, 'r'))['x']\n\n # randomly sample 4000 images from entire dataset (4000/#of datasets)\n num_samples =int(4000/len(self.dataset_list))\n if self.train==True:\n print('subsampling {} samples from dataset of length {}'.format(num_samples, len(self.h5_list)))\n random_idx=np.sort(random.sample(range(0,len(self.h5_list)), num_samples))\n self.h5_list = self.h5_list[list(random_idx)]\n else:\n random_idx = np.sort(random.sample(range(0, len(self.h5_list)), int(num_samples/4)))\n self.h5_list = self.h5_list[list(random_idx)]\n\n if dataset_idx == 0 :\n self.data = self.h5_list\n else:\n self.data = torch.utils.data.ConcatDataset([self.data, self.h5_list])\n\n self.data_length = len(self.data)\n\n self.random_ixs = list(range(self.data_length))\n random.shuffle(self.random_ixs)\n\n if not os.path.exists('data/'+dataset):\n os.makedirs('data/'+dataset)\n\n if not os.path.exists('data/'+dataset+'/mean_std.pt'):\n mean_std = {}\n mean_std['mean'] = [0,0,0]\n mean_std['std'] = [0,0,0]\n\n print('Calculating mean and std')\n for ix in tqdm(range(len(self.random_ixs))):\n np_dat = self.data[ix]\n img = self.pil2tensor(Image.fromarray(np_dat))\n for cix in range(3):\n mean_std['mean'][cix] += img[cix,:,:].mean()\n mean_std['std'][cix] += img[cix,:,:].std()\n\n for cix in range(3):\n mean_std['mean'][cix] /= self.data_length\n mean_std['std'][cix] /= self.data_length\n\n torch.save(mean_std, 'data/'+dataset+'/mean_std.pt')\n\n else:\n mean_std = torch.load('data/'+dataset+'/mean_std.pt')\n\n normalize = t.Normalize(mean=mean_std['mean'], std=mean_std['std'])\n # normalize = t.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n # normalize = t.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n self.transform.transforms.append(normalize)\n\n def split_tiles(self, img):\n img = self.transform(img)\n img = self.tensor2pil(img)\n\n tiles = [None] * 9\n for n in range(9):\n\n #Crop each tile\n tile_h, tile_w = img.size\n left = tile_h/3*(n%3)\n right = tile_h/3*((n%3)+1)\n top = tile_h/3*(math.floor(n/3))\n bottom = tile_h/3*(math.ceil((n+1)/3))\n\n tile=(img.crop((left,top,right,bottom)))\n\n #Normalize individual tile\n norm_tile = Image.fromarray(cv2.normalize(np.uint8(tile), np.uint8(tile), alpha=0, beta=255, norm_type=cv2.NORM_MINMAX))\n\n #Apply individual tile transform\n tile = tile_transform(norm_tile)\n tiles[n] = tile\n\n return tiles\n\n def jigsaw_task(self, img):\n tiles = self.split_tiles(img)\n\n #Load pre-calculated permutations of jigsaw puzzle\n all_perm = np.load('./jigsaw_setup/permutations_%d.npy' % (self.num_classes))\n\n #Randomly select a permutation index to apply\n permute_index = np.random.randint(self.num_classes)\n order = all_perm[permute_index]\n\n data = [tiles[order[t]] for t in range(9)]\n\n data = torch.stack(data, 0)\n\n return data, int(permute_index)\n\n def rotation_task(self, img):\n #Define possible rotation classes\n transform_val = ([0,90,180,270])\n\n rand_idx = random.randrange(len(transform_val))\n img_rot = t.functional.rotate(img, transform_val[rand_idx])\n\n img_rot = self.transform(img_rot)\n\n return img_rot, rand_idx\n\n def __getitem__(self, index):\n\n img = Image.fromarray(self.data[self.random_ixs[index]])\n\n #Split image into tiles and permute them according to one of the classes\n if self.ss_task == 'jigsaw':\n data, labels = self.jigsaw_task(img)\n\n if self.ss_task == 'rotation':\n data, labels = self.rotation_task(img)\n\n img = self.transform(img)\n\n return data, labels, img\n\n\n def __len__(self):\n return int(self.data_length)\n\n\nif __name__ == '__main__':\n train_logger = TextLogger('Train loss', 'train_loss.log')\n for ix in range(30):\n # print(ix)\n train_logger.log('%s, %s' % (str(torch.rand(1)[0]), str(torch.rand(1)[0])))\n time.sleep(1)\n\n\n"
] |
[
[
"torch.utils.data.ConcatDataset",
"torch.rand",
"numpy.uint8",
"torch.stack",
"torch.save",
"numpy.load",
"numpy.random.randint",
"torch.load"
]
] |
fac2003/attention-is-all-you-need-pytorch
|
[
"892e7b6de7d92c8acb78800a9da33e2b00918374"
] |
[
"src/org/campagnelab/dl/funnel/transformer/train_transformer.py"
] |
[
"import time\n\nimport torch\nfrom torch.autograd import Variable\nimport numpy as np\n\nfrom src.org.campagnelab.dl.CheckGradients import register_hooks\nfrom src.org.campagnelab.dl.funnel.transformer.Batching import Batch\nfrom src.org.campagnelab.dl.funnel.transformer.FunnelTransformerModel import make_funnel_model\nfrom src.org.campagnelab.dl.funnel.transformer.LabelSmoothing import LabelSmoothing\nfrom src.org.campagnelab.dl.funnel.transformer.Masking import subsequent_mask\nfrom src.org.campagnelab.dl.funnel.transformer.Optimizing import NoamOpt\nfrom src.org.campagnelab.dl.funnel.transformer.TransformerModel import make_model\n\n\nclass SimpleLossCompute:\n \"A simple loss compute and train function.\"\n\n def __init__(self, generator, criterion, opt=None):\n self.generator = generator\n self.criterion = criterion\n self.opt = opt\n\n def __call__(self, x, y, norm):\n x = self.generator(x)\n loss = self.criterion(x.contiguous().view(-1, x.size(-1)),\n y.contiguous().view(-1)) / norm\n return loss\n # loss.backward()\n # if self.opt is not None:\n # self.opt.step()\n # self.opt.optimizer.zero_grad()\n # return loss.data[0] * norm\n\n\ndef run_epoch(epoch, data_iter, model, loss_compute):\n \"Standard Training and Logging Function\"\n start = time.time()\n total_tokens = 0\n total_loss = 0\n tokens = 0\n\n for i, batch in enumerate(data_iter):\n\n out = model.forward(batch.src, batch.trg,\n batch.src_mask, batch.trg_mask)\n loss = loss_compute(out, batch.trg_y, batch.ntokens)\n loss.backward()\n total_loss += loss\n total_tokens += batch.ntokens\n tokens += batch.ntokens\n if i % 50 == 1:\n elapsed = time.time() - start\n print(\"Epoch %d Step: %d Loss: %f Tokens per Sec: %f\" %\n (epoch, i, loss / batch.ntokens, tokens / elapsed))\n start = time.time()\n tokens = 0\n return total_loss / total_tokens\n\n\ndef data_gen(V, batch, nbatches):\n \"Generate random data for a src-tgt copy task.\"\n for i in range(nbatches):\n data = torch.from_numpy(np.random.randint(1, V, size=(batch, 10)))\n data[:, 0] = 1\n src = Variable(data, requires_grad=False)\n tgt = Variable(data, requires_grad=False)\n yield Batch(src, tgt, 0)\n\n\ndef greedy_decode(model, src, src_mask, max_len, start_symbol):\n memory,src_mask = model.encode(src, src_mask)\n ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)\n for i in range(max_len - 1):\n out = model.decode(memory, src_mask,\n Variable(ys),\n Variable(subsequent_mask(ys.size(1))\n .type_as(src.data)))\n prob = model.generator(out[:, -1])\n _, next_word = torch.max(prob, dim=1)\n next_word = next_word.data[0]\n ys = torch.cat([ys,\n torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1)\n return ys\n\n\nV = 11\ncriterion = LabelSmoothing(size=V, padding_idx=0, smoothing=0.0)\nmodel = make_funnel_model(V, V, N=2, d_model=32, d_ff=64, max_length=20)\n#print(model)\nmodel_opt = NoamOpt(model.src_embed[0].d_model, 1, 400,\n torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))\n\nfor epoch in range(100):\n model.train()\n run_epoch(epoch, data_gen(V, 30, 20), model,\n SimpleLossCompute(model.generator, criterion, model_opt))\n model.eval()\n print(run_epoch(epoch, data_gen(V, 30, 5), model,\n SimpleLossCompute(model.generator, criterion, None)))\n model.eval()\n src = Variable(torch.LongTensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]))\n src_mask = Variable(torch.ones(1, 1, 10))\n print(greedy_decode(model, src, src_mask, max_len=10, start_symbol=1))\n for layer in model.encoder_src.layers:\n print(\"compression rate: {}\".format(layer.compressor.compression_rate.data[0]))\n"
] |
[
[
"torch.max",
"torch.autograd.Variable",
"torch.ones",
"numpy.random.randint",
"torch.LongTensor"
]
] |
jinumohan173/tensorflow
|
[
"c967084e6af90b560b47435ff4d3292677353bfe"
] |
[
"tensorflow/python/ops/image_grad_test.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Python ops defined in image_grad.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass ResizeNearestNeighborOpTest(tf.test.TestCase):\n\n TYPES = [np.float32, np.float64]\n\n def testShapeIsCorrectAfterOp(self):\n in_shape = [1, 2, 2, 1]\n out_shape = [1, 4, 6, 1]\n\n for nptype in self.TYPES:\n x = np.arange(0, 4).reshape(in_shape).astype(nptype)\n\n with self.test_session() as sess:\n input_tensor = tf.constant(x, shape=in_shape)\n resize_out = tf.image.resize_nearest_neighbor(input_tensor,\n out_shape[1:3])\n self.assertEqual(out_shape, list(resize_out.get_shape()))\n\n resize_out = sess.run(resize_out)\n self.assertEqual(out_shape, list(resize_out.shape))\n\n def testGradFromResizeToLargerInBothDims(self):\n in_shape = [1, 2, 3, 1]\n out_shape = [1, 4, 6, 1]\n\n for nptype in self.TYPES:\n x = np.arange(0, 6).reshape(in_shape).astype(nptype)\n\n with self.test_session():\n input_tensor = tf.constant(x, shape=in_shape)\n resize_out = tf.image.resize_nearest_neighbor(input_tensor,\n out_shape[1:3])\n err = tf.test.compute_gradient_error(input_tensor,\n in_shape,\n resize_out,\n out_shape,\n x_init_value=x)\n self.assertLess(err, 1e-3)\n\n def testGradFromResizeToSmallerInBothDims(self):\n in_shape = [1, 4, 6, 1]\n out_shape = [1, 2, 3, 1]\n\n for nptype in self.TYPES:\n x = np.arange(0, 24).reshape(in_shape).astype(nptype)\n\n with self.test_session():\n input_tensor = tf.constant(x, shape=in_shape)\n resize_out = tf.image.resize_nearest_neighbor(input_tensor,\n out_shape[1:3])\n err = tf.test.compute_gradient_error(input_tensor,\n in_shape,\n resize_out,\n out_shape,\n x_init_value=x)\n self.assertLess(err, 1e-3)\n\n def testCompareGpuVsCpu(self):\n in_shape = [1, 4, 6, 3]\n out_shape = [1, 8, 16, 3]\n\n for nptype in self.TYPES:\n x = np.arange(0, np.prod(in_shape)).reshape(in_shape).astype(nptype)\n for align_corners in [True, False]:\n with self.test_session(use_gpu=False):\n input_tensor = tf.constant(x, shape=in_shape)\n resize_out = tf.image.resize_nearest_neighbor(input_tensor,\n out_shape[1:3],\n align_corners=align_corners)\n grad_cpu = tf.test.compute_gradient(input_tensor,\n in_shape,\n resize_out,\n out_shape,\n x_init_value=x)\n\n with self.test_session(use_gpu=True):\n input_tensor = tf.constant(x, shape=in_shape)\n resize_out = tf.image.resize_nearest_neighbor(input_tensor,\n out_shape[1:3],\n align_corners=align_corners)\n grad_gpu = tf.test.compute_gradient(input_tensor,\n in_shape,\n resize_out,\n out_shape,\n x_init_value=x)\n self.assertAllClose(grad_cpu, grad_gpu, rtol=1e-5, atol=1e-5)\n\nclass ResizeBilinearOpTest(tf.test.TestCase):\n\n def testShapeIsCorrectAfterOp(self):\n in_shape = [1, 2, 2, 1]\n out_shape = [1, 4, 6, 1]\n\n x = np.arange(0, 4).reshape(in_shape).astype(np.float32)\n\n with self.test_session() as sess:\n input_tensor = tf.constant(x, shape=in_shape)\n resize_out = tf.image.resize_bilinear(input_tensor,\n out_shape[1:3])\n self.assertEqual(out_shape, list(resize_out.get_shape()))\n\n resize_out = sess.run(resize_out)\n self.assertEqual(out_shape, list(resize_out.shape))\n\n def testGradFromResizeToLargerInBothDims(self):\n in_shape = [1, 2, 3, 1]\n out_shape = [1, 4, 6, 1]\n\n x = np.arange(0, 6).reshape(in_shape).astype(np.float32)\n\n with self.test_session():\n input_tensor = tf.constant(x, shape=in_shape)\n resize_out = tf.image.resize_bilinear(input_tensor,\n out_shape[1:3])\n err = tf.test.compute_gradient_error(input_tensor,\n in_shape,\n resize_out,\n out_shape,\n x_init_value=x)\n self.assertLess(err, 1e-3)\n\n def testGradFromResizeToSmallerInBothDims(self):\n in_shape = [1, 4, 6, 1]\n out_shape = [1, 2, 3, 1]\n\n x = np.arange(0, 24).reshape(in_shape).astype(np.float32)\n\n with self.test_session():\n input_tensor = tf.constant(x, shape=in_shape)\n resize_out = tf.image.resize_bilinear(input_tensor,\n out_shape[1:3])\n err = tf.test.compute_gradient_error(input_tensor,\n in_shape,\n resize_out,\n out_shape,\n x_init_value=x)\n self.assertLess(err, 1e-3)\n\n def testGradOnUnsupportedType(self):\n in_shape = [1, 4, 6, 1]\n out_shape = [1, 2, 3, 1]\n\n x = np.arange(0, 24).reshape(in_shape).astype(np.uint8)\n\n with self.test_session():\n input_tensor = tf.constant(x, shape=in_shape)\n resize_out = tf.image.resize_bilinear(input_tensor, out_shape[1:3])\n grad = tf.gradients(input_tensor, [resize_out])\n self.assertEqual([None], grad)\n\n def testCompareGpuVsCpu(self):\n in_shape = [2, 4, 6, 3]\n out_shape = [2, 8, 16, 3]\n\n size = np.prod(in_shape)\n x = 1.0 / size * np.arange(0, size).reshape(in_shape).astype(np.float32)\n for align_corners in [True, False]:\n grad = {}\n for use_gpu in [False, True]:\n with self.test_session(use_gpu=use_gpu):\n input_tensor = tf.constant(x, shape=in_shape)\n resized_tensor = tf.image.resize_bilinear(input_tensor,\n out_shape[1:3],\n align_corners=align_corners)\n grad[use_gpu] = tf.test.compute_gradient(input_tensor,\n in_shape,\n resized_tensor,\n out_shape,\n x_init_value=x)\n\n self.assertAllClose(grad[False], grad[True], rtol=1e-4, atol=1e-4)\n\n\nclass CropAndResizeOpTest(tf.test.TestCase):\n\n def testShapeIsCorrectAfterOp(self):\n batch = 2\n image_height = 3\n image_width = 4\n crop_height = 4\n crop_width = 5\n depth = 2\n num_boxes = 2\n\n image_shape = [batch, image_height, image_width, depth]\n crop_size = [crop_height, crop_width]\n crops_shape = [num_boxes, crop_height, crop_width, depth]\n\n image = np.arange(0, batch * image_height * image_width *\n depth).reshape(image_shape).astype(np.float32)\n boxes = np.array([[0, 0, 1, 1], [.1, .2, .7, .8]], dtype=np.float32)\n box_ind = np.array([0, 1], dtype=np.int32)\n\n with self.test_session() as sess:\n crops = tf.image.crop_and_resize(\n tf.constant(image, shape=image_shape),\n tf.constant(boxes, shape=[num_boxes, 4]),\n tf.constant(box_ind, shape=[num_boxes]),\n tf.constant(crop_size, shape=[2]))\n self.assertEqual(crops_shape, list(crops.get_shape()))\n crops = sess.run(crops)\n self.assertEqual(crops_shape, list(crops.shape))\n\n def _randomUniformAvoidAnchors(self, low, high, anchors, radius, num_samples):\n \"\"\"Generate samples that are far enough from a set of anchor points.\n\n We generate uniform samples in [low, high], then reject those that are less\n than radius away from any point in anchors. We stop after we have accepted\n num_samples samples.\n\n Args:\n low: The lower end of the interval.\n high: The upper end of the interval.\n anchors: A list of length num_crops with anchor points to avoid.\n radius: Distance threshold for the samples from the anchors.\n num_samples: How many samples to produce.\n\n Returns:\n samples: A list of length num_samples with the accepted samples.\n \"\"\"\n self.assertTrue(low < high)\n self.assertTrue(radius >= 0)\n num_anchors = len(anchors)\n # Make sure that at least half of the interval is not forbidden.\n self.assertTrue(2 * radius * num_anchors < 0.5 * (high - low))\n anchors = np.reshape(anchors, num_anchors)\n samples = []\n while len(samples) < num_samples:\n sample = np.random.uniform(low, high)\n if np.all(np.fabs(sample - anchors) > radius):\n samples.append(sample)\n return samples\n\n def testGradRandomBoxes(self):\n \"\"\"Test that the gradient is correct for randomly generated boxes.\n\n The mapping is piecewise differentiable with respect to the box coordinates.\n The points where the function is not differentiable are those which are\n mapped to image pixels, i.e., the normalized y coordinates in\n np.linspace(0, 1, image_height) and normalized x coordinates in\n np.linspace(0, 1, image_width). Make sure that the box coordinates are\n sufficiently far away from those rectangular grid centers that are points of\n discontinuity, so that the finite difference Jacobian is close to the\n computed one.\n \"\"\"\n np.random.seed(1) # Make it reproducible.\n delta = 1e-3\n radius = 2 * delta\n low, high = -0.5, 1.5 # Also covers the case of extrapolation.\n\n image_height = 4\n for image_width in range(1, 3):\n for crop_height in range(1, 3):\n for crop_width in range(2, 4):\n for depth in range(1, 3):\n for num_boxes in range(1, 3):\n\n batch = num_boxes\n image_shape = [batch, image_height, image_width, depth]\n crop_size = [crop_height, crop_width]\n crops_shape = [num_boxes, crop_height, crop_width, depth]\n boxes_shape = [num_boxes, 4]\n\n image = np.arange(0, batch * image_height * image_width *\n depth).reshape(image_shape).astype(np.float32)\n boxes = []\n for _ in range(num_boxes):\n # pylint: disable=unbalanced-tuple-unpacking\n y1, y2 = self._randomUniformAvoidAnchors(\n low, high, np.linspace(0, 1, image_height), radius, 2)\n x1, x2 = self._randomUniformAvoidAnchors(\n low, high, np.linspace(0, 1, image_width), radius, 2)\n # pylint: enable=unbalanced-tuple-unpacking\n boxes.append([y1, x1, y2, x2])\n\n boxes = np.array(boxes, dtype=np.float32)\n box_ind = np.arange(batch, dtype=np.int32)\n\n with self.test_session():\n image_tensor = tf.constant(image, shape=image_shape)\n boxes_tensor = tf.constant(boxes, shape=[num_boxes, 4])\n box_ind_tensor = tf.constant(box_ind, shape=[num_boxes])\n crops = tf.image.crop_and_resize(\n image_tensor,\n boxes_tensor,\n box_ind_tensor,\n tf.constant(crop_size, shape=[2]))\n\n err = tf.test.compute_gradient_error(\n [image_tensor, boxes_tensor], [image_shape, boxes_shape],\n crops,\n crops_shape,\n delta=delta,\n x_init_value=[image, boxes])\n\n self.assertLess(err, 2e-3)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] |
[
[
"tensorflow.image.resize_bilinear",
"numpy.array",
"numpy.reshape",
"numpy.random.seed",
"tensorflow.test.compute_gradient_error",
"tensorflow.test.compute_gradient",
"tensorflow.gradients",
"tensorflow.image.resize_nearest_neighbor",
"tensorflow.constant",
"numpy.fabs",
"numpy.prod",
"numpy.random.uniform",
"numpy.arange",
"tensorflow.test.main",
"numpy.linspace"
]
] |
awslabs/adatune
|
[
"87d106de0433c6437c2b0b1d436c9f3aded7e134"
] |
[
"adatune/mu_adam.py"
] |
[
"# Copyright 2017-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport math\n\nimport numpy as np\nimport torch.optim as optim\n\nfrom adatune.utils import *\n\n\nclass MuAdam(object):\n\n def __init__(self, optimizer, hyper_lr, grad_clipping, first_order, mu, alpha, device):\n self.optimizer = optimizer\n self.lr = self.optimizer.param_groups[0]['lr']\n self.beta1 = self.optimizer.param_groups[0]['betas'][0]\n self.beta2 = self.optimizer.param_groups[0]['betas'][1]\n self.eps = self.optimizer.param_groups[0]['eps']\n self.hyper_lr = hyper_lr\n self.hyper_lr_tensor = torch.tensor(self.lr, requires_grad=True, device=device)\n self.hyper_optim = optim.SGD([self.hyper_lr_tensor], lr=self.hyper_lr)\n self.grad_clipping = grad_clipping\n self.first_order = first_order\n self.device = device\n self.mu = mu\n self.mu_mode = 'auto' if self.mu < 0.0 else 'manual'\n self.alpha = alpha\n self.z_0 = None\n self.z_1 = None\n self.z_2 = None\n self.step = 0\n self.b = None\n self.c = 0.0\n self.state_init = False\n\n def flatten_state(self, net):\n return (torch.cat([self.optimizer.state[v]['exp_avg'].view(-1) for v in net.parameters()]),\n torch.cat([self.optimizer.state[v]['exp_avg_sq'].view(-1) for v in net.parameters()]))\n\n def clip_grad(self, net):\n if self.grad_clipping:\n for params in net:\n params.clamp_(-self.grad_clipping, self.grad_clipping)\n\n def compute_hg(self, net, first_grad):\n # Adam needs at least one update to initialize the gradient and sqauared-gradient buffers\n if not self.state_init:\n self.state_init = True\n self.step += 1\n return\n\n self.clip_grad(first_grad)\n grad_flatten = torch.cat([g.view(-1) for g in first_grad]).requires_grad_(True)\n\n coeff = (math.sqrt(1.0 - self.beta2 ** self.step)) / (1.0 - self.beta1 ** self.step)\n\n if self.first_order or self.z_2 is None:\n m_t, v_t = self.flatten_state(net)\n self.z_0 = torch.zeros_like(grad_flatten)\n self.z_1 = torch.zeros_like(grad_flatten)\n self.z_2 = torch.neg(coeff * (m_t / torch.sqrt(v_t + self.eps)))\n else:\n hvp = ag.grad(grad_flatten @ self.z_2, net.parameters())\n self.clip_grad(hvp)\n grad_flatten = grad_flatten.detach()\n hvp_flatten = torch.cat([h.view(-1) for h in hvp])\n\n m_t, v_t = self.flatten_state(net)\n\n a_31 = -self.lr * coeff * self.beta1 * torch.reciprocal(torch.sqrt(v_t + self.eps))\n a_32 = self.lr * coeff * 0.5 * self.beta2 * (m_t / torch.pow(v_t + self.eps, 1.5))\n a_33_inner_1 = (1.0 - self.beta1) * torch.reciprocal(torch.sqrt(v_t + self.eps))\n a_33_inner_2 = (1.0 - self.beta2) * ((m_t * grad_flatten) / torch.pow(v_t + self.eps, 1.5))\n a_33 = (1.0 - self.lr * coeff) * (a_33_inner_1 - a_33_inner_2) * hvp_flatten\n\n self.z_2 = self.mu * (a_31 * self.z_0 + a_32 * self.z_1 + a_33)\n self.z_2 = self.z_2 + torch.neg(coeff * (m_t / torch.sqrt(v_t + self.eps)))\n\n self.z_0 = self.mu * (self.beta1 * self.z_0 + (1.0 - self.beta1) * hvp_flatten)\n self.z_1 = self.mu * (self.beta2 * self.z_1 + 2.0 * (1.0 - self.beta2) * grad_flatten * hvp_flatten)\n\n self.step += 1\n\n self.z_0 = self.z_0.detach()\n self.z_1 = self.z_1.detach()\n self.z_2 = self.z_2.detach()\n\n self.b = grad_flatten.detach()\n\n def hyper_step(self, val_grad):\n if self.z_2 is None:\n return\n\n self.clip_grad(val_grad)\n val_grad_flatten = torch.cat([f.view(-1) for f in val_grad])\n\n mat_mul = val_grad_flatten @ self.z_2\n hyper_grad = mat_mul.item()\n\n self.hyper_lr_tensor.grad = torch.tensor(hyper_grad, device=self.device)\n self.hyper_optim.step()\n new_lr = self.hyper_lr_tensor.data.item()\n\n # Update LR\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = new_lr\n\n # # Update hyper-LR\n for param_group in self.hyper_optim.param_groups:\n param_group['lr'] = np.max([param_group['lr'] + self.alpha * hyper_grad * new_lr, 0.0])\n\n # Update mu\n if self.mu_mode == 'auto':\n grad_mult = (val_grad_flatten @ self.b).item()\n q_norm = new_lr / grad_mult\n z = np.maximum(np.minimum(q_norm, 1.), 0.)\n self.c = self.c * np.sign(self.mu) + self.mu\n self.mu = np.power(z, 1. / (self.c + 1.))\n"
] |
[
[
"numpy.max",
"numpy.minimum",
"torch.optim.SGD",
"numpy.sign",
"numpy.power"
]
] |
mahdi-darvish/GANs-augmented-pet-classifier
|
[
"399e86c022c007c66d48055a977f82ce498c51fc"
] |
[
"landmark_detection/pet/animal_data_generator.py"
] |
[
"\nimport pet.utils.general\nimport pet.utils.image\nimport numpy as np\nimport keras\nimport pet.utils.sampling\nfrom keras.applications import mobilenet_v2\nimport os\n\nclass animal_generator(keras.utils.Sequence):\n def __init__(self, path, output_type, include_landmarks=False, batch_size=64, shuffle=True,\n flip_horizontal=False, rotate=False, rotate_90=False, rotate_n=0,\n crop=False, crop_scale_balanced_black=False, crop_scale_balanced=False,\n sampling_method_rotate='random', sampling_method_resize='random',\n crop_landmarks_margin=0.1, crop_landmarks_random_margin=0.1):\n self.path = path\n self.output_type = output_type\n self.include_landmarks = include_landmarks\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.flip_horizontal = flip_horizontal\n self.rotate = rotate\n self.rotate_90 = rotate_90\n self.rotate_n = rotate_n\n self.crop = crop\n self.crop_scale_balanced_black = crop_scale_balanced_black\n self.crop_scale_balanced = crop_scale_balanced\n self.sampling_method_rotate = sampling_method_rotate\n self.sampling_method_resize = sampling_method_resize\n self.crop_landmarks_margin = crop_landmarks_margin\n self.crop_landmarks_random_margin = crop_landmarks_random_margin\n if self.output_type == 'bbox':\n self.output_dim = 4\n if self.include_landmarks:\n self.output_dim += 10\n else:\n self.output_dim = 10\n self.files = [os.path.join(path, f) for f in os.listdir(path) if f[-4:] in ('.jpg', '.bmp', '.gif', '.png')]\n self.indexes = np.arange(len(self.files))\n\n self.on_epoch_end()\n\n def __getitem__(self, index):\n indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]\n x = np.zeros((len(indexes),) + pet.utils.general.IMG_SHAPE)\n y = np.zeros((len(indexes), self.output_dim))\n for i, idx in enumerate(indexes):\n img, landmarks = pet.utils.image.load(self.files[idx])\n img, landmarks = self._augment(img, landmarks)\n img, landmarks = pet.utils.image.resize(img, landmarks, sampling_method=self.sampling_method_resize)\n landmarks = np.round(landmarks).astype('int')\n if self.output_type == 'bbox':\n bounding_box = pet.utils.image.get_bounding_box(landmarks)\n if self.include_landmarks:\n y[i] = np.concatenate((bounding_box, landmarks.flatten()))\n else:\n y[i] = bounding_box\n else:\n y[i] = landmarks.flatten()\n x[i] = np.asarray(img)\n x = mobilenet_v2.preprocess_input(x)\n return x, y\n\n def _augment(self, img, landmarks):\n if self.rotate:\n angle = 360 * np.random.random_sample()\n img, landmarks = pet.utils.image.rotate(img, landmarks, angle, sampling_method=self.sampling_method_rotate)\n if self.rotate_90:\n angle = np.random.choice([0, 90, 180, 270])\n img, landmarks = pet.utils.image.rotate(img, landmarks, angle, sampling_method=self.sampling_method_rotate)\n if self.rotate_n > 0:\n angle = self.rotate_n * (2. * np.random.random_sample() - 1.)\n img, landmarks = pet.utils.image.rotate(img, landmarks, angle, sampling_method=self.sampling_method_rotate)\n if self.output_type == 'bbox':\n if self.crop:\n bb_crop = pet.utils.sampling.sample_bounding_box(img.size, landmarks)\n img, landmarks = pet.utils.image.crop(img, landmarks, bb_crop)\n if self.crop_scale_balanced_black:\n bb_crop = pet.utils.sampling.sample_bounding_box_scale_balanced_black(landmarks)\n img, landmarks = pet.utils.image.crop(img, landmarks, bb_crop)\n if self.crop_scale_balanced:\n bb_crop = pet.utils.sampling.sample_bounding_box_scale_balanced(img.size, landmarks)\n img, landmarks = pet.utils.image.crop(img, landmarks, bb_crop)\n else:\n if self.crop:\n bb_crop = pet.utils.sampling.sample_bounding_box_landmarks(landmarks, self.crop_landmarks_margin, self.crop_landmarks_random_margin)\n img, landmarks = pet.utils.image.crop(img, landmarks, bb_crop)\n if self.flip_horizontal and np.random.random_sample() > 0.5:\n img, landmarks = pet.utils.image.flip(img, landmarks)\n return img, landmarks\n\n def __len__(self):\n return int(np.ceil(len(self.files) / self.batch_size))\n\n def on_epoch_end(self):\n if self.shuffle:\n np.random.shuffle(self.indexes)\n"
] |
[
[
"numpy.random.choice",
"numpy.asarray",
"numpy.round",
"numpy.random.shuffle",
"numpy.random.random_sample"
]
] |
nknezek/MAC_waves
|
[
"2bf87fb6d8a24d0c046b19ff7011dd81ce36f285"
] |
[
"src/macmodel_bdiv.py"
] |
[
"import macmodel\nimport numpy as np\nfrom numpy import sin, cos, tan\n\nclass Model(macmodel.Model):\n\n def make_A(self):\n Nk = self.Nk\n Nl = self.Nl\n E = self.E\n Pm = self.Pm\n N = self.N\n th = self.th\n Br = self.Br\n Bth = self.Bth\n Bph = self.Bph\n '''\n Creates the A matrix (M*l*x = A*x)\n m: azimuthal fourier mode to compute\n '''\n\n ################################\n # Momentum Equation ############\n ################################\n # R-momentum\n self.add_gov_equation('rmom', 'ur')\n self.rmom.add_drP('p', C= -1)\n self.rmom.add_term('r_disp', -N**2)\n # self.rmom.add_term('uph', 2.0*sin(th))\n self.rmom.add_d2_b0('ur', C= E)\n self.rmom.add_d2r_th('uth', C= E)\n self.rmom.add_d2r_ph('uph', C= E)\n # self.rmom.add_dr_b0('br', C= E/Pm*Br)\n # self.rmom.add_dr_ccb0('bth', C= -E/Pm*Bth)\n # self.rmom.add_dr_ccb0('bph', C= -E/Pm*Bph)\n # self.rmom.add_dth('br', C= E/Pm*Bth)\n # self.rmom.add_dth('bth', C= E/Pm*Br)\n # self.rmom.add_dph('bph', C= E/Pm*Br)\n # self.rmom.add_dph('br', C= E/Pm*Bph)\n self.A_rows = self.rmom.rows\n self.A_cols = self.rmom.cols\n self.A_vals = self.rmom.vals\n del self.rmom\n\n # Theta-Momentum\n self.add_gov_equation('tmom', 'uth')\n self.tmom.add_dthP('p', C= -1)\n self.tmom.add_term('uph', 2.0*cos(th))\n self.tmom.add_d2_bd0('uth', C= E)\n self.tmom.add_d2th_r('ur', C= E)\n self.tmom.add_d2th_ph('uph', C= E)\n self.tmom.add_dr_ccb0('bth', C= E/Pm*Br)\n # self.tmom.add_dr_bd0('br', C= -E/Pm*Bth)\n # self.tmom.add_dth('bth', C= E/Pm*Bth)\n self.tmom.add_dth('br', C= -E/Pm*Br)\n # self.tmom.add_dth('bph', C= -E/Pm*Bph)\n # self.tmom.add_dph('bph', C= E/Pm*Bth)\n # self.tmom.add_dph('bth', C= E/Pm*Bph)\n self.A_rows += self.tmom.rows\n self.A_cols += self.tmom.cols\n self.A_vals += self.tmom.vals\n del self.tmom\n\n # Phi-Momentum\n self.add_gov_equation('pmom', 'uph')\n self.pmom.add_dphP('p', C= -1)\n self.pmom.add_term('uth', -2.0*cos(th))\n # self.pmom.add_term('ur', 2.0*sin(th))\n self.pmom.add_d2_bd0('uph', C= E)\n self.pmom.add_d2ph_r('ur', C= E)\n self.pmom.add_d2ph_th('uth', C= E)\n self.pmom.add_dr_ccb0('bph', C= E/Pm*Br)\n # self.pmom.add_dr_bd0('br', C= -E/Pm*Bph)\n # self.pmom.add_dth('bph', C= E/Pm*Bth)\n # self.pmom.add_dth('bth', C= E/Pm*Bph)\n # self.pmom.add_dph('bph', C= E/Pm*Bph)\n self.pmom.add_dph('br', C= -E/Pm*Br)\n # self.pmom.add_dph('bth', C= -E/Pm*Bth)\n self.A_rows += self.pmom.rows\n self.A_cols += self.pmom.cols\n self.A_vals += self.pmom.vals\n del self.pmom\n\n ################################\n # Lorentz Equation ##########\n ################################\n\n # B-divergence replaces r-lorentz\n self.add_gov_equation('bdiv', 'br')\n self.bdiv.add_dr_bd0('br')\n self.bdiv.add_dth('bth')\n self.bdiv.add_dph('bph')\n self.A_rows += self.bdiv.rows\n self.A_cols += self.bdiv.cols\n self.A_vals += self.bdiv.vals\n del self.bdiv\n\n # theta-Lorentz\n self.add_gov_equation('thlorentz', 'bth')\n self.thlorentz.add_dr_bd0('uth', C= Br)\n # self.thlorentz.add_dr_b0('ur', C= -Bth)\n # self.thlorentz.add_dph('uth', C= Bph)\n # self.thlorentz.add_dph('uph', C= -Bth)\n self.thlorentz.add_d2_ccb0('bth', C= E/Pm)\n self.thlorentz.add_d2th_r('br', C= E/Pm)\n self.thlorentz.add_d2th_ph('bph', C= E/Pm)\n self.A_rows += self.thlorentz.rows\n self.A_cols += self.thlorentz.cols\n self.A_vals += self.thlorentz.vals\n del self.thlorentz\n\n # phi-Lorentz\n self.add_gov_equation('phlorentz', 'bph')\n self.phlorentz.add_dr_bd0('uph', C= Br)\n # self.phlorentz.add_dr_b0('ur', C= -Bph)\n # self.phlorentz.add_dth('uph', C= Bth)\n # self.phlorentz.add_dth('uth', C= -Bph)\n self.phlorentz.add_d2_ccb0('bph', C= E/Pm)\n self.phlorentz.add_d2ph_r('br', C= E/Pm)\n self.phlorentz.add_d2ph_th('bth', C= E/Pm)\n self.A_rows += self.phlorentz.rows\n self.A_cols += self.phlorentz.cols\n self.A_vals += self.phlorentz.vals\n del self.phlorentz\n\n # Divergence (Mass Conservation) #########\n self.add_gov_equation('div', 'p')\n self.div.add_dr_b0('ur')\n self.div.add_dth('uth')\n self.div.add_dph('uph')\n self.A_rows += self.div.rows\n self.A_cols += self.div.cols\n self.A_vals += self.div.vals\n del self.div\n\n # Displacement Equation #########\n self.add_gov_equation('rdisp', 'r_disp')\n self.rdisp.add_term('ur', np.ones((Nk,Nl)))\n self.A_rows += self.rdisp.rows\n self.A_cols += self.rdisp.cols\n self.A_vals += self.rdisp.vals\n del self.rdisp\n\n self.A = macmodel.coo_matrix((self.A_vals, (self.A_rows, self.A_cols)),\n shape=(self.SizeM, self.SizeM))\n del self.A_vals, self.A_rows, self.A_cols\n return self.A\n\n def make_B(self):\n '''\n Creates the B matrix (B*l*x = A*x)\n m: azimuthal fourier mode to compute\n '''\n ones = np.ones((self.Nk, self.Nl))\n self.B_rows = []\n self.B_cols = []\n self.B_vals = []\n\n self.add_gov_equation('B_uth', 'uth')\n self.B_uth.add_term('uth', ones)\n self.B_rows = self.B_uth.rows\n self.B_cols = self.B_uth.cols\n self.B_vals = self.B_uth.vals\n del self.B_uth\n\n self.add_gov_equation('B_uph', 'uph')\n self.B_uph.add_term('uph', ones)\n self.B_rows += self.B_uph.rows\n self.B_cols += self.B_uph.cols\n self.B_vals += self.B_uph.vals\n del self.B_uph\n\n self.add_gov_equation('B_thlorentz', 'bth')\n self.B_thlorentz.add_term('bth', ones)\n self.B_rows += self.B_thlorentz.rows\n self.B_cols += self.B_thlorentz.cols\n self.B_vals += self.B_thlorentz.vals\n del self.B_thlorentz\n\n self.add_gov_equation('B_phlorentz', 'bph')\n self.B_phlorentz.add_term('bph', ones)\n self.B_rows += self.B_phlorentz.rows\n self.B_cols += self.B_phlorentz.cols\n self.B_vals += self.B_phlorentz.vals\n del self.B_phlorentz\n\n self.add_gov_equation('B_rdisp', 'r_disp')\n self.B_rdisp.add_term('r_disp', ones)\n self.B_rows += self.B_rdisp.rows\n self.B_cols += self.B_rdisp.cols\n self.B_vals += self.B_rdisp.vals\n del self.B_rdisp\n self.B = macmodel.coo_matrix((self.B_vals, (self.B_rows, self.B_cols)),\n shape=(self.SizeM, self.SizeM))\n del self.B_vals, self.B_rows, self.B_cols\n return self.B"
] |
[
[
"numpy.ones",
"numpy.cos"
]
] |
argonne-lcf/PINO
|
[
"95f830bdaafb2c03f7153df9e59e4832223a6108"
] |
[
"solver/legacy_solver.py"
] |
[
"import torch\nimport math\nimport scipy.io\nfrom timeit import default_timer\nfrom tqdm import tqdm\n\n\nclass GaussianRF(object):\n\n def __init__(self, dim, size, alpha=2, tau=3, sigma=None, boundary=\"periodic\", device=None):\n\n self.dim = dim\n self.device = device\n\n if sigma is None:\n sigma = tau**(0.5*(2*alpha - self.dim))\n\n k_max = size//2\n\n if dim == 1:\n k = torch.cat((torch.arange(start=0, end=k_max, step=1, device=device), \\\n torch.arange(start=-k_max, end=0, step=1, device=device)), 0)\n\n self.sqrt_eig = size*math.sqrt(2.0)*sigma*((4*(math.pi**2)*(k**2) + tau**2)**(-alpha/2.0))\n self.sqrt_eig[0] = 0.0\n\n elif dim == 2:\n wavenumers = torch.cat((torch.arange(start=0, end=k_max, step=1, device=device), \\\n torch.arange(start=-k_max, end=0, step=1, device=device)), 0).repeat(size,1)\n\n k_x = wavenumers.transpose(0,1)\n k_y = wavenumers\n\n self.sqrt_eig = (size**2)*math.sqrt(2.0)*sigma*((4*(math.pi**2)*(k_x**2 + k_y**2) + tau**2)**(-alpha/2.0))\n self.sqrt_eig[0,0] = 0.0\n\n elif dim == 3:\n wavenumers = torch.cat((torch.arange(start=0, end=k_max, step=1, device=device), \\\n torch.arange(start=-k_max, end=0, step=1, device=device)), 0).repeat(size,size,1)\n\n k_x = wavenumers.transpose(1,2)\n k_y = wavenumers\n k_z = wavenumers.transpose(0,2)\n\n self.sqrt_eig = (size**3)*math.sqrt(2.0)*sigma*((4*(math.pi**2)*(k_x**2 + k_y**2 + k_z**2) + tau**2)**(-alpha/2.0))\n self.sqrt_eig[0,0,0] = 0.0\n\n self.size = []\n for j in range(self.dim):\n self.size.append(size)\n\n self.size = tuple(self.size)\n\n def sample(self, N):\n\n coeff = torch.randn(N, *self.size, 2, device=self.device)\n\n coeff[...,0] = self.sqrt_eig*coeff[...,0]\n coeff[...,1] = self.sqrt_eig*coeff[...,1]\n\n u = torch.ifft(coeff, self.dim, normalized=False)\n u = u[...,0]\n\n return u\n\n#w0: initial vorticity\n#f: forcing term\n#visc: viscosity (1/Re)\n#T: final time\n#delta_t: internal time-step for solve (descrease if blow-up)\n#record_steps: number of in-time snapshots to record\ndef navier_stokes_2d(w0, f, visc, T, delta_t=1e-4, record_steps=1):\n\n #Grid size - must be power of 2\n N = w0.size()[-1]\n\n #Maximum frequency\n k_max = math.floor(N/2.0)\n\n #Number of steps to final time\n steps = math.ceil(T/delta_t)\n\n #Initial vorticity to Fourier space\n w_h = torch.rfft(w0, 2, normalized=False, onesided=False)\n\n #Forcing to Fourier space\n f_h = torch.rfft(f, 2, normalized=False, onesided=False)\n\n #If same forcing for the whole batch\n if len(f_h.size()) < len(w_h.size()):\n f_h = torch.unsqueeze(f_h, 0)\n\n #Record solution every this number of steps\n record_time = math.floor(steps/record_steps)\n\n #Wavenumbers in y-direction\n k_y = torch.cat((torch.arange(start=0, end=k_max, step=1, device=w0.device), torch.arange(start=-k_max, end=0, step=1, device=w0.device)), 0).repeat(N,1)\n #Wavenumbers in x-direction\n k_x = k_y.transpose(0,1)\n #Negative Laplacian in Fourier space\n lap = 4*(math.pi**2)*(k_x**2 + k_y**2)\n lap[0,0] = 1.0\n #Dealiasing mask\n dealias = torch.unsqueeze(torch.logical_and(torch.abs(k_y) <= (2.0/3.0)*k_max, torch.abs(k_x) <= (2.0/3.0)*k_max).float(), 0)\n\n #Saving solution and time\n sol = torch.zeros(*w0.size(), record_steps, device=w0.device)\n sol_t = torch.zeros(record_steps, device=w0.device)\n\n #Record counter\n c = 0\n #Physical time\n t = 0.0\n for j in tqdm(range(steps)):\n #Stream function in Fourier space: solve Poisson equation\n psi_h = w_h.clone()\n psi_h[...,0] = psi_h[...,0]/lap\n psi_h[...,1] = psi_h[...,1]/lap\n\n #Velocity field in x-direction = psi_y\n q = psi_h.clone()\n temp = q[...,0].clone()\n q[...,0] = -2*math.pi*k_y*q[...,1]\n q[...,1] = 2*math.pi*k_y*temp\n q = torch.irfft(q, 2, normalized=False, onesided=False, signal_sizes=(N,N))\n\n #Velocity field in y-direction = -psi_x\n v = psi_h.clone()\n temp = v[...,0].clone()\n v[...,0] = 2*math.pi*k_x*v[...,1]\n v[...,1] = -2*math.pi*k_x*temp\n v = torch.irfft(v, 2, normalized=False, onesided=False, signal_sizes=(N,N))\n\n #Partial x of vorticity\n w_x = w_h.clone()\n temp = w_x[...,0].clone()\n w_x[...,0] = -2*math.pi*k_x*w_x[...,1]\n w_x[...,1] = 2*math.pi*k_x*temp\n w_x = torch.irfft(w_x, 2, normalized=False, onesided=False, signal_sizes=(N,N))\n\n #Partial y of vorticity\n w_y = w_h.clone()\n temp = w_y[...,0].clone()\n w_y[...,0] = -2*math.pi*k_y*w_y[...,1]\n w_y[...,1] = 2*math.pi*k_y*temp\n w_y = torch.irfft(w_y, 2, normalized=False, onesided=False, signal_sizes=(N,N))\n\n #Non-linear term (u.grad(w)): compute in physical space then back to Fourier space\n F_h = torch.rfft(q*w_x + v*w_y, 2, normalized=False, onesided=False)\n\n #Dealias\n F_h[...,0] = dealias* F_h[...,0]\n F_h[...,1] = dealias* F_h[...,1]\n\n #Cranck-Nicholson update\n w_h[...,0] = (-delta_t*F_h[...,0] + delta_t*f_h[...,0] + (1.0 - 0.5*delta_t*visc*lap)*w_h[...,0])/(1.0 + 0.5*delta_t*visc*lap)\n w_h[...,1] = (-delta_t*F_h[...,1] + delta_t*f_h[...,1] + (1.0 - 0.5*delta_t*visc*lap)*w_h[...,1])/(1.0 + 0.5*delta_t*visc*lap)\n\n #Update real time (used only for recording)\n t += delta_t\n\n if (j+1) % record_time == 0:\n #Solution in physical space\n w = torch.irfft(w_h, 2, normalized=False, onesided=False, signal_sizes=(N,N))\n\n #Record solution and time\n sol[...,c] = w\n sol_t[c] = t\n\n c += 1\n return sol, sol_t\n\n\nif __name__ == '__main__':\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n # Resolution\n # s = 2048\n # sub = 1\n #\n # # Number of solutions to generate\n # N = 10\n #\n # # Set up 2d GRF with covariance parameters\n # GRF = GaussianRF(2, s, alpha=2.5, tau=7, device=device)\n #\n # # Forcing function: 0.1*(sin(2pi(x+y)) + cos(2pi(x+y)))\n # t = torch.linspace(0, 1, s + 1, device=device)\n # t = t[0:-1]\n #\n # X, Y = torch.meshgrid(t, t)\n # f = 0.1 * (torch.sin(2 * math.pi * (X + Y)) + torch.cos(2 * math.pi * (X + Y)))\n #\n # # Number of snapshots from solution\n # record_steps = 200\n #\n # # Inputs\n # a = torch.zeros(N, s, s)\n # # Solutions\n # u = torch.zeros(N, s, s, record_steps)\n #\n # # Solve equations in batches (order of magnitude speed-up)\n #\n # # Batch size\n # bsize = 10\n #\n # c = 0\n # t0 = default_timer()\n # for j in range(N // bsize):\n # # Sample random feilds\n # w0 = GRF.sample(bsize)\n #\n # # Solve NS\n # sol, sol_t = navier_stokes_2d(w0, f, 1e-3, 50.0, 1e-4, record_steps)\n #\n # a[c:(c + bsize), ...] = w0\n # u[c:(c + bsize), ...] = sol\n #\n # c += bsize\n # t1 = default_timer()\n # print(j, c, t1 - t0)\n # torch.save(\n # {\n # 'a': a.cpu(),\n # 'u': u.cpu(),\n # 't': sol_t.cpu()\n # },\n # 'data/ns_data.pt'\n # )\n # scipy.io.savemat('data/ns_data.mat', mdict={'a': a.cpu().numpy(), 'u': u.cpu().numpy(), 't': sol_t.cpu().numpy()})"
] |
[
[
"torch.rfft",
"torch.zeros",
"torch.arange",
"torch.irfft",
"torch.unsqueeze",
"torch.abs",
"torch.cuda.is_available",
"torch.ifft",
"torch.randn"
]
] |
TheCacophonyProject/python-api
|
[
"49b9e1c9aa16b0f47344c3e57dbe27b232372719"
] |
[
"examples/example0.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"Example of querying the Cacophony Project Servers via a REST API using a Python Client.\"\"\"\n# #### Configuration\n\nimport logging, sys, os\n\nlogging.basicConfig(\n format=\"%(asctime)s : %(module)s :%(levelname)s : %(message)s\",\n level=logging.DEBUG,\n stream=sys.stdout,\n)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nlogging.info(\n \"Logging Started ---------------- LEVEL: {} -------------\".format(\n logging.getLevelName(logger.level)\n )\n)\n\n\n# %%\nimport json\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\n\n# %%\ntest = \"\"\nstr(test)\n# %%\nfrom cacophonyapi.user import UserAPI\nfrom cacophonyapi.config import Config\n\n\n# # %%\nconfig = Config().load_config(\n config_file=os.path.join(os.getcwd(), \".env\", \"defaultconfig.json\")\n)\n\n# %%\n\n# %%\n\ncp_client = UserAPI(\n baseurl=config.api_url,\n username=config.admin_username,\n password=config.admin_password,\n)\ncp_client.version\n\n# %% [markdown]\n# ### SHOW devices and groups\n\nprint(pd.DataFrame(cp_client.get_devices_as_json()))\n\n\nprint(pd.DataFrame(cp_client.get_groups_as_json()[\"groups\"]))\n\n# %% [markdown]\n#\n# ## Define some helper functions\n#\n# strToSqlDateTime : Generates a datatime object from a string\n#\n# recordingSubset : Retrieve a subset of dataframe by recording datetime and columns\n#\n# pandas_df_to_markdown_table : Displays a markdown table from a dataframe\n#\n# pandas_df_to_markdown_table : Generates a string representing the markdown table\n#\n#\n\n# %%\nstrToSqlDateTime = lambda x: datetime.strptime(x, \"%Y-%m-%d %H:%M:%S\")\n\n\ndef recordingSubset(dataframe=None, startDateUTC=None, endDateUTC=None, fields=None):\n \"\"\"Generate a DataFrame from a subset of recording time points of an existing Dataframe.\n\n Returns a Pandas Dataframe subset of recordingings by date and Time inclusive of the lower and upper limit. NB date ranges must be in GMT\n\n :param startDateUTC: lower value for recordings to select\n :param endDateUTC: upper value (inclusive) of recordings to select\n :param fields: a list of columns to include in the subset of recordings\n \"\"\"\n # fields = [\"recordingDateTime_DT_local\",'recordingURL','comment']\n return dataframe[\n (df.recordingDateTime >= startDateUTC) & (df.recordingDateTime <= endDateUTC)\n ].loc[:, fields]\n\n\ndef pandas_df_to_markdown_table(df, *args, **kwargs):\n \"\"\"Generate a Markdown Display (IPython) of the Pandas Dataframe.\n\n Displays in a IPython environment such as Jupyter Notebook\n\n :param columns: [Optional, default 'list(df.columns)'] A list of columns to display\n \"\"\"\n df[\"index\"] = df.index\n if \"columns\" not in kwargs:\n columns = list(df.columns)\n else:\n columns = [\"index\"] + kwargs[\"columns\"]\n\n from IPython.display import Markdown, display\n\n fmt = [\"---\" for i in range(len(df.columns))]\n df_fmt = pd.DataFrame([fmt], columns=df.columns)\n df_formatted = pd.concat([df_fmt, df])\n display(Markdown(df_formatted.loc[:, columns].to_csv(sep=\"|\", index=False)))\n\n\ndef pandas_df_to_markdown_table_string(df, *args, **kwargs):\n \"\"\"Generate a Markdown string representing the Pandas Dataframe.\n\n Returns a string representing the dataframe in markdown syntax\n\n :param columns: [Optional, default 'list(df.columns)'] A list of columns to display\n \"\"\"\n df[\"index\"] = df.index\n if \"columns\" not in kwargs:\n columns = df.columns.to_list()\n else:\n columns = [\"index\"] + kwargs[\"columns\"]\n from IPython.display import Markdown, display\n\n headers = [\"- {} -\".format(col) for i, col in enumerate(df.columns)]\n fmt = [\"---\" for i in range(len(df.columns))]\n df_headers = pd.DataFrame([headers], columns=df.columns)\n df_fmt = pd.DataFrame([fmt], columns=df.columns)\n df_formatted = pd.concat([df_fmt, df])\n return df_formatted.loc[:, columns].to_csv(sep=\"|\", index=False)\n\n\n# %% [markdown]\n#\n# ## Query for first 300 recordings\n#\n#\n\n# %%\nqueryResponse = cp_client.query(\n endDate=strToSqlDateTime(\"2019-11-06 06:30:00\"),\n startDate=strToSqlDateTime(\"2019-11-01 19:00:00\"),\n limit=300,\n offset=0,\n tagmode=\"any\",\n)\ndf = pd.DataFrame(queryResponse)\n\n\n# %%\n\ndf.columns\n\n\n# %%\npd.options.display.html.table_schema = True\npd.options.display.max_rows = None\nfrom IPython.display import HTML\n\n# %% [markdown]\n# ### Format the Data\n\n# %%\ndf[\"recordingDateTime_DT\"] = pd.to_datetime(df[\"recordingDateTime\"])\ndf[\"Date\"] = df[\"recordingDateTime_DT\"].dt.date # TODO: Check where we are using this\ndf[\"recordingDateTime_DT_local\"] = (\n df[\"recordingDateTime_DT\"]\n .dt.tz_localize(\"Pacific/Auckland\")\n .dt.strftime(\"%Y/%m/%d %H:%M:%S\")\n)\ndf[\"recordingURL\"] = df[\"id\"].apply(\n lambda id: '<a href=\"https://browse.cacophony.org.nz/recording/{id}\">{id}</a>'.format(\n id=id\n )\n)\ndf[\"recordingURLmd\"] = df[\"id\"].apply(\n lambda id: \"[{id}](https://browse.cacophony.org.nz/recording/{id})\".format(id=id)\n)\n\n\ndf[\"metric_recordingCount\"] = 1\ndf[\"metric_gainIssueTrue\"] = (df.comment.str.contains(\"[G|g]ain\") == True).apply(\n lambda x: 1 if x else 0\n)\ndf[\"metric_gainIssueTrue\"] = (df.comment.str.contains(\"(?i)[G|g]ain\") == True).apply(\n lambda x: 1 if x else 0\n)\n\ndf[\"metric_gainIssueFalse\"] = (\n (df.comment.str.contains(\"[G|g]ain\") != True) | (df.comment.isnull())\n).apply(lambda x: 1 if x else 0)\ndf[\"metric_otherComment\"] = (\n (df.comment.str.contains(\"[G|g]ain\") != True) & (~df.comment.isnull())\n).apply(lambda x: 1 if x else 0)\n\n# %% [markdown]\n# # EXAMPLES\n#\n# exaples of various queries\n#\n\n# %%\nHTML(\n recordingSubset(\n df,\n \"2019-11-04T06:00Z\",\n \"2019-11-04T21:00Z\",\n [\"recordingDateTime_DT_local\", \"recordingURL\", \"comment\"],\n ).to_html(escape=False)\n)\n\n\n# %%\ndoi = \"2019-11-05\"\npandas_df_to_markdown_table(\n recordingSubset(\n df[df.metric_gainIssueTrue == 1],\n \"{}T06:00Z\".format(doi),\n \"{}T21:00Z\".format(doi),\n [\"recordingDateTime_DT_local\", \"recordingURLmd\", \"comment\"],\n )\n)\n\n\n# %%\n# df.groupby(by=df['recordingDateTime_DT'].dt.date).sum()\ndfp = pd.pivot_table(\n df,\n index=[\"Date\"],\n values=[\n \"metric_recordingCount\",\n \"metric_gainIssueTrue\",\n \"metric_gainIssueFalse\",\n \"metric_otherComment\",\n ],\n aggfunc=np.sum,\n)\n# dfp\n\ndfp[\"percentGainIssueTrue\"] = (\n dfp.metric_gainIssueTrue / dfp.metric_recordingCount * 100.0\n).map(\"{0:,.2f}%\".format)\ndfp.loc[\n :,\n [\n \"percentGainIssueTrue\",\n \"metric_recordingCount\",\n \"metric_gainIssueTrue\",\n \"metric_gainIssueFalse\",\n \"metric_otherComment\",\n ],\n]\npandas_df_to_markdown_table(\n dfp,\n columns=[\n \"percentGainIssueTrue\",\n \"metric_recordingCount\",\n \"metric_gainIssueTrue\",\n \"metric_gainIssueFalse\",\n \"metric_otherComment\",\n ],\n)\n\n\n# %%\nprint(\n pandas_df_to_markdown_table_string(\n dfp,\n columns=[\n \"percentGainIssueTrue\",\n \"metric_recordingCount\",\n \"metric_gainIssueTrue\",\n \"metric_gainIssueFalse\",\n \"metric_otherComment\",\n ],\n )\n)\n\n\n# %%\n"
] |
[
[
"pandas.to_datetime",
"pandas.DataFrame",
"pandas.pivot_table",
"pandas.concat"
]
] |
roychowdhuryresearch/HFO-Classification
|
[
"c672852257410664b0351a3f9505ca6ad0d3c315"
] |
[
"data_preprocessing/create_data_90min/label_filter_90.py"
] |
[
"from utilities import dump_pickle\nimport numpy as np\nimport os\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\ndef fetch_hfo_add_info(stats, csv_dir ,pt_name):\n \"\"\"\n Get soz and removed, spike information from csv\n \"\"\"\n df = pd.DataFrame() \n df['channel_name'] = np.char.upper(np.squeeze(stats[\"info\"]))\n start_end = np.squeeze(stats[\"start_end\"])\n df['start'] = start_end[:, 0]\n df['end'] = start_end[:, 1]\n fn = f\"{pt_name}.csv\"\n hfo_df = pd.read_csv(os.path.join(csv_dir, fn))\n hfo_df['channel_names'] = np.char.upper(np.array(list(hfo_df['channel_names'].values)))\n new_df = pd.merge(df, hfo_df, how='left', left_on=['channel_name','start', 'end'], right_on = ['channel_names','start', 'end'])\n dff = new_df[[\"channel_name\",'start', 'end', 'predictions_a']]\n artifacts_label = new_df['predictions_a'].values\n return artifacts_label\n #print(new_df.head)\n\ndef add_prediction(in_folder, csv_dir):\n valid_patients = os.listdir(\"/media/yipeng/data/HFO_clasification/HFO_wise_stats_90\")\n valid = set()\n for pp in valid_patients:\n valid.add(pp.split(\".\")[0])\n for pn in os.listdir(in_folder):\n if pn not in valid:\n continue\n folder = os.path.join(in_folder, pn)\n loaded = np.load(os.path.join(folder,\"data_flzoomin.npz\"), allow_pickle=True) \n artifacts_label = fetch_hfo_add_info(loaded, csv_dir ,pn)\n dump_pickle(os.path.join(folder,\"artifacts.pkl\"),artifacts_label)\n\nif __name__ == \"__main__\":\n in_folder = \"/media/yipeng/data/HFO_clasification/HFO_classification_training_data_spike_90_10_500\"\n add_prediction(in_folder)"
] |
[
[
"pandas.DataFrame",
"numpy.squeeze",
"pandas.merge"
]
] |
Zamony/qlearning4k
|
[
"8d7e54e97c00e0e066567adf8246e94e125b35ae"
] |
[
"qlearning4k/games/snake.py"
] |
[
"__author__ = \"Fariz Rahman\"\r\n\r\nimport numpy as np\r\nfrom .game import Game\r\n\r\n\r\nactions = {0:'left', 1:'right', 2:'up', 3:'down', 4:'idle'}\r\nforbidden_moves = [(0, 1), (1, 0), (2, 3), (3, 2)]\r\n\r\nclass Snake(Game):\r\n\r\n def __init__(self, grid_size=10, snake_length=3):\r\n self.grid_size = grid_size\r\n self.snake_length = snake_length\r\n self.reset()\r\n self.state_changed = True\r\n\r\n @property\r\n def name(self):\r\n return \"Snake\"\r\n @property\r\n def nb_actions(self):\r\n return 5\r\n\r\n def play(self, action):\r\n assert action in range(5), \"Invalid action.\"\r\n self.scored = False\r\n self.move_snake(action)\r\n if self.fruit == self.snake[0]:\r\n self.scored = True\r\n self.grow()\r\n self.drop_fruit()\r\n elif self.self_bite() or self.hit_border():\r\n self.game_over = True\r\n\r\n def grow(self):\r\n end = self.snake[-1]\r\n seg = self.snake[-2] # segment just before end\r\n if end[0] == seg[0] - 1:\r\n # grow to left\r\n p = (end[0] - 1, end[1])\r\n elif end[0] == seg[0] + 1:\r\n # grow to rght\r\n p = (end[0] + 1, end[1])\r\n elif end[1] == seg[1] - 1:\r\n # grow up\r\n p = (end[0], end[1] - 1)\r\n else:\r\n p = (end[0], end[1] + 1)\r\n self.snake.append(p)\r\n\r\n def drop_fruit(self):\r\n if len(self.snake) >= (self.grid_size - 2) ** 2:\r\n self.fruit = (-1, -1)\r\n pass\r\n while True:\r\n fruit = np.random.randint(1, self.grid_size - 1, 2)\r\n fruit = (fruit[0], fruit[1])\r\n if fruit in self.snake:\r\n continue\r\n else:\r\n self.fruit = fruit\r\n break\r\n\r\n def move_snake(self, action):\r\n if action == 4 or (action, self.previous_action) in forbidden_moves:\r\n action = self.previous_action\r\n else:\r\n self.previous_action = action\r\n head = self.snake[0]\r\n if action == 0:\r\n p = (head[0] - 1, head[1])\r\n elif action == 1:\r\n p = (head[0] + 1, head[1])\r\n elif action == 2:\r\n p = (head[0], head[1] - 1)\r\n elif action == 3:\r\n p = (head[0], head[1] + 1)\r\n self.snake.insert(0, p)\r\n self.snake.pop()\r\n\r\n def get_state(self):\r\n canvas = np.ones((self.grid_size, ) * 2)\r\n canvas[1:-1, 1:-1] = 0.\r\n for seg in self.snake:\r\n canvas[seg[0], seg[1]] = 1.\r\n canvas[self.fruit[0], self.fruit[1]] = .5\r\n return canvas\r\n\r\n def get_score(self):\r\n if self.game_over:\r\n score = -1\r\n elif self.scored:\r\n score = len(self.snake)\r\n else:\r\n score = 0\r\n return score\r\n\r\n def reset(self):\r\n grid_size = self.grid_size\r\n snake_length = self.snake_length\r\n head_x = (grid_size - snake_length) // 2\r\n self.snake = [(x, grid_size // 2) for x in range (head_x, head_x + snake_length)]\r\n self.game_over = False\r\n self.scored = False\r\n self.drop_fruit()\r\n if np.random.randint(2) == 0:\r\n self.previous_action = 0\r\n else:\r\n self.previous_action = 1\r\n self.snake.reverse()\r\n self.border = []\r\n for z in range(grid_size):\r\n self.border += [(z, 0), (z, grid_size - 1), (0, z), (grid_size - 1, z)]\r\n\r\n def left(self):\r\n self.play(0)\r\n\r\n def right(self):\r\n self.play(1)\r\n\r\n def up(self):\r\n self.play(2)\r\n\r\n def down(self):\r\n self.play(3)\r\n\r\n def idle(self):\r\n self.play(4)\r\n\r\n def self_bite(self):\r\n return len(self.snake) > len(set(self.snake))\r\n\r\n def hit_border(self):\r\n return self.snake[0] in self.border or self.snake[-1] in self.border\r\n\r\n def is_over(self):\r\n return self.self_bite() or self.hit_border()\r\n\r\n def is_won(self):\r\n return len(self.snake) > self.snake_length\r\n"
] |
[
[
"numpy.ones",
"numpy.random.randint"
]
] |
prorevizor/noc
|
[
"37e44b8afc64318b10699c06a1138eee9e7d6a4e"
] |
[
"core/text.py"
] |
[
"# ---------------------------------------------------------------------\n# Various text-processing utilities\n# ---------------------------------------------------------------------\n# Copyright (C) 2007-2020 The NOC Project\n# See LICENSE for details\n# ---------------------------------------------------------------------\n\n# Python modules\nimport re\nfrom itertools import zip_longest\n\n# Third-party modules\nfrom numpy import array\nfrom typing import List, Union, Iterable\n\nrx_header_start = re.compile(r\"^\\s*[-=]+[\\s\\+]+[-=]+\")\nrx_col = re.compile(r\"^([\\s\\+]*)([\\-]+|[=]+)\")\n\n\ndef default_line_wrapper(p_line):\n return p_line.expandtabs()\n\n\ndef parse_table(\n s,\n allow_wrap=False,\n allow_extend=False,\n expand_columns=False,\n max_width=0,\n footer=None,\n n_row_delim=\"\",\n line_wrapper=default_line_wrapper,\n row_wrapper=None,\n):\n \"\"\"\n Parse string containing table an return a list of table rows.\n Each row is a list of cells.\n Columns are determined by a sequences of ---- or ==== which are\n determines rows bounds.\n Examples:\n First Second Third\n ----- ------ -----\n a b c\n ddd eee fff\n Will be parsed down to the [[\"a\",\"b\",\"c\"],[\"ddd\",\"eee\",\"fff\"]]\n\n :param s: Table for parsing\n :type s: str\n :param allow_wrap: Union if cell contins multiple line\n :type allow_wrap: bool\n :param allow_extend: Check if column on row longest then column width, enlarge it and shift rest of columns\n :type allow_extend: bool\n :param expand_columns: Expand columns covering all available width\n :type expand_columns: bool\n :param max_width: Max table width, if table width < max_width extend length, else - nothing\n :type max_width: int\n :param footer: stop iteration if match expression footer\n :type footer: string\n :param n_row_delim: Append delimiter to next cell line\n :type n_row_delim: string\n :param line_wrapper: Call line_wrapper with line argument\n :type line_wrapper: callable\n :param row_wrapper: Call row_wrapper with row argument\n :type row_wrapper: callable\n \"\"\"\n r = []\n columns = []\n if footer is not None:\n rx_footer = re.compile(footer)\n if line_wrapper and not callable(line_wrapper):\n line_wrapper = None\n if row_wrapper and not callable(row_wrapper):\n row_wrapper = None\n for line in s.splitlines():\n if line_wrapper:\n # Replace tabs with spaces with step 8\n line = line_wrapper(line)\n if not line.strip() and footer is None:\n columns = []\n continue\n if footer is not None and rx_footer.search(line):\n break # Footer reached, stop\n if not columns and rx_header_start.match(line):\n # Column delimiters found. try to determine column's width\n columns = []\n x = 0\n while line:\n match = rx_col.match(line)\n if not match:\n break\n spaces = len(match.group(1))\n dashes = len(match.group(2))\n columns += [(x + spaces, x + spaces + dashes)]\n x += match.end()\n line = line[match.end() :]\n if max_width and columns[-1][-1] < max_width:\n columns[-1] = (columns[-1][0], max_width)\n if expand_columns:\n columns = [(cc[0], nc[0] - 1) for cc, nc in zip(columns, columns[1:])] + [\n columns[-1]\n ]\n elif columns: # Fetch cells\n if allow_extend:\n # Find which spaces between column not empty\n ll = len(line)\n for i, (f, t) in enumerate(columns):\n if t < ll and line[t].strip():\n # If spaces not empty - shift column width equal size row\n shift = len(line[f:].split()[0]) - (t - f)\n # Enlarge column\n columns[i] = (f, t + shift)\n # Shift rest\n columns[i + 1 :] = [(v[0] + shift, v[1] + shift) for v in columns[i + 1 :]]\n break\n if allow_wrap:\n row = [line[f:t] for f, t in columns]\n if r and not row[0].strip():\n # first column is empty\n for i, x in enumerate(row):\n if (\n x.strip()\n and not r[-1][i].endswith(n_row_delim)\n and not x.startswith(n_row_delim)\n ):\n r[-1][i] += \"%s%s\" % (n_row_delim, row_wrapper(x) if row_wrapper else x)\n else:\n r[-1][i] += row_wrapper(x) if row_wrapper else x\n else:\n r += [row]\n else:\n r += [\n [\n row_wrapper(line[f:t]).strip() if row_wrapper else line[f:t].strip()\n for f, t in columns\n ]\n ]\n if allow_wrap:\n return [[x.strip() for x in rr] for rr in r]\n else:\n return r\n\n\n#\n# Convert HTML to plain text\n#\nrx_html_tags = re.compile(\"</?[^>+]+>\", re.MULTILINE | re.DOTALL)\n\n\ndef strip_html_tags(s):\n t = rx_html_tags.sub(\"\", s)\n for k, v in [(\" \", \" \"), (\"<\", \"<\"), (\">\", \">\"), (\"&\", \"&\")]:\n t = t.replace(k, v)\n return t\n\n\n#\n# Convert XML to list of elements\n#\ndef xml_to_table(s, root, row):\n # pylint: disable=line-too-long\n \"\"\"\n >>> xml_to_table('<?xml version=\"1.0\" encoding=\"UTF-8\" ?><response><action><row><a>1</a><b>2</b></row><row><a>3</a><b>4</b></row></action></response>','action','row') # noqa\n [{'a': '1', 'b': '2'}, {'a': '3', 'b': '4'}]\n \"\"\"\n # Detect root element\n match = re.search(r\"<%s>(.*)</%s>\" % (root, root), s, re.DOTALL | re.IGNORECASE)\n if not match:\n return []\n s = match.group(1)\n row_re = re.compile(r\"<%s>(.*?)</%s>\" % (row, row), re.DOTALL | re.IGNORECASE)\n item_re = re.compile(r\"<([^\\]+])>(.*?)</\\1>\", re.DOTALL | re.IGNORECASE)\n r = []\n for m in [x for x in row_re.split(s) if x]:\n data = item_re.findall(m)\n if data:\n r += [dict(data)]\n return r\n\n\n#\n# Convert list of values to string of ranges\n#\ndef list_to_ranges(s):\n \"\"\"\n >>> list_to_ranges([])\n ''\n >>> list_to_ranges([1])\n '1'\n >>> list_to_ranges([1,2])\n '1-2'\n >>> list_to_ranges([1,2,3])\n '1-3'\n >>> list_to_ranges([1,2,3,5])\n '1-3,5'\n >>> list_to_ranges([1,2,3,5,6,7])\n '1-3,5-7'\n >>> list_to_ranges(range(1,4001))\n '1-4000'\n \"\"\"\n\n def f():\n if last_start == last_end:\n return str(last_start)\n else:\n return \"%d-%d\" % (last_start, last_end)\n\n last_start = None\n last_end = None\n r = []\n for i in sorted(s):\n if last_end is not None and i == last_end + 1:\n last_end += 1\n else:\n if last_start is not None:\n r += [f()]\n last_start = i\n last_end = i\n if last_start is not None:\n r += [f()]\n return \",\".join(r)\n\n\n#\n# Convert range string to a list of integers\n#\nrx_range = re.compile(r\"^(\\d+)\\s*-\\s*(\\d+)$\")\n\n\ndef ranges_to_list(s, splitter=\",\"):\n \"\"\"\n >>> ranges_to_list(\"1\")\n [1]\n >>> ranges_to_list(\"1, 2\")\n [1, 2]\n >>> ranges_to_list(\"1, 10-12\")\n [1, 10, 11, 12]\n >>> ranges_to_list(\"1, 10-12, 15, 17-19\")\n [1, 10, 11, 12, 15, 17, 18, 19]\n \"\"\"\n r = []\n if \"to\" in s:\n s = s.replace(\" to \", \"-\")\n for p in s.split(splitter):\n p = p.strip()\n try:\n r += [int(p)]\n continue\n except ValueError:\n pass\n match = rx_range.match(p)\n if not match:\n raise SyntaxError\n f, t = [int(x) for x in match.groups()]\n if f >= t:\n raise SyntaxError\n for i in range(f, t + 1):\n r += [i]\n return sorted(r)\n\n\ndef replace_re_group(expr, group, pattern):\n if isinstance(expr, bytes):\n return _replace_re_group_binary(expr, group, pattern)\n return _replace_re_group_text(expr, group, pattern)\n\n\ndef _replace_re_group_text(expr: str, group: str, pattern: str) -> str:\n \"\"\"\n Replace regular expression group with pattern\n\n >>> replace_re_group(\"nothing\",\"(?P<groupname>\",\"groupvalue\")\n 'nothing'\n >>> replace_re_group(\"the (?P<groupname>simple) test\",\"(?P<groupname>\",\"groupvalue\")\n 'the groupvalue test'\n >>> replace_re_group(\"the (?P<groupname> nested (test)>)\",\"(?P<groupname>\",\"groupvalue\")\n 'the groupvalue'\n \"\"\"\n r = []\n lg = len(group)\n while expr:\n idx = expr.find(group)\n if idx == -1:\n break\n r += [expr[:idx]]\n expr = expr[idx + lg :]\n level = 1 # Level of parenthesis nesting\n while expr:\n c = expr[0]\n expr = expr[1:]\n if c == \"\\\\\":\n # Skip quoted character\n expr = expr[1:]\n continue\n elif c == \"(\":\n # Increase nesting level\n level += 1\n continue\n elif c == \")\":\n # Decrease nesting level\n level -= 1\n if level == 0:\n # Replace with pattern and search for next\n r += [pattern]\n break\n r += [expr]\n return \"\".join(r)\n\n\ndef _replace_re_group_binary(expr: bytes, group: bytes, pattern: bytes) -> bytes:\n \"\"\"\n Replace regular expression group with pattern\n\n >>> replace_re_group(\"nothing\",\"(?P<groupname>\",\"groupvalue\")\n 'nothing'\n >>> replace_re_group(\"the (?P<groupname>simple) test\",\"(?P<groupname>\",\"groupvalue\")\n 'the groupvalue test'\n >>> replace_re_group(\"the (?P<groupname> nested (test)>)\",\"(?P<groupname>\",\"groupvalue\")\n 'the groupvalue'\n \"\"\"\n r = []\n lg = len(group)\n while expr:\n idx = expr.find(group)\n if idx == -1:\n break\n r += [expr[:idx]]\n expr = expr[idx + lg :]\n level = 1 # Level of parenthesis nesting\n while expr:\n c = expr[0]\n expr = expr[1:]\n if c == 0x5C: # \"\\\\\"\n # Skip quoted character\n expr = expr[1:]\n continue\n elif c == 0x28: # \"(\"\n # Increase nesting level\n level += 1\n continue\n elif c == 0x29: # \")\"\n # Decrease nesting level\n level -= 1\n if level == 0:\n # Replace with pattern and search for next\n r += [pattern]\n break\n r += [expr]\n return b\"\".join(r)\n\n\ndef indent(text, n=4):\n \"\"\"\n Indent each line of text with spaces\n\n :param text: text\n :param n: amount of spaces to ident\n\n >>> indent(\"\")\n ''\n >>> indent(\"the quick brown fox\\\\njumped over an lazy dog\\\\nend\")\n ' the quick brown fox\\\\n jumped over an lazy dog\\\\n end'\n \"\"\"\n if not text:\n return \"\"\n i = \" \" * n\n return i + text.replace(\"\\n\", \"\\n\" + i)\n\n\nrx_split_alnum = re.compile(r\"(\\d+|[^0-9]+)\")\n\n\ndef _iter_split_alnum(s: str) -> Iterable[str]:\n \"\"\"\n Iterator yielding alphabetic and numeric sections if string\n\n :param s:\n :return:\n \"\"\"\n for match in rx_split_alnum.finditer(s):\n yield match.group(0)\n\n\ndef split_alnum(s: str) -> List[Union[str, int]]:\n \"\"\"\n Split line to a sequence of iterating alpha and digit strings\n\n :param s:\n :type s: str\n :return: list\n :rtype: list\n\n >>> split_alnum(\"Fa 0/1\")\n ['Fa ', 0, '/', 1]\n >>> split_alnum(\"Fa 0/1.15\")\n ['Fa ', 0, '/', 1, '.', 15]\n >>> split_alnum(\"ge-1/0/1\")\n ['ge-', 1, '/', 0, '/', 1]\n >>> split_alnum(\"ge-1/0/1.15\")\n ['ge-', 1, '/', 0, '/', 1, '.', 15]\n \"\"\"\n\n def maybe_int(v: str) -> Union[str, int]:\n try:\n return int(v)\n except ValueError:\n return v\n\n return [maybe_int(x) for x in _iter_split_alnum(s)]\n\n\ndef alnum_key(s: str) -> str:\n \"\"\"\n Comparable alpha-numeric key\n :param s:\n :return:\n \"\"\"\n\n def maybe_formatted_int(v: str) -> str:\n try:\n return \"%012d\" % int(v)\n except ValueError:\n return v\n\n return \"\".join(maybe_formatted_int(x) for x in _iter_split_alnum(s))\n\n\nrx_notspace = re.compile(r\"^\\S+\")\n\n\ndef find_indented(s):\n \"\"\"\n Parses following text structure:\n\n section 1 header\n line 1\n line 2\n section 2 header\n line 1\n line 2\n\n >>> find_idented(\"section0\\\\nsection 1\\\\n line 1-1\\\\n line 1-2\\\\n\\\\n\"\\\n \"section 2\\\\n line 2-1\\\\n line 2-2\")\n ['section 1\\n line 1-1\\n line 1-2', 'section 2\\n line 2-1\\n line 2-2']\n :param s:\n :return:\n \"\"\"\n r = []\n cr = []\n for line in s.splitlines():\n if rx_notspace.match(line):\n if len(cr) > 1:\n r += [\"\\n\".join(cr)]\n cr = [line]\n continue\n elif line:\n cr += [line]\n if len(cr) > 1:\n r += [\"\\n\".join(cr)]\n return r\n\n\ndef parse_kv(kmap, data, sep=\":\"):\n \"\"\"\n :param kmap: text -> dict mapping\n :param data:\n :return: dict\n \"\"\"\n r = {}\n for line in data.splitlines():\n if sep not in line:\n continue\n k, v = line.strip().split(sep, 1)\n k = k.strip().lower()\n if k in kmap:\n r[kmap[k]] = v.strip()\n return r\n\n\ndef str_dict(d):\n \"\"\"\n Convert dict to key=value, key=value, .... string\n :type d: dict\n :rtype: str\n \"\"\"\n return \", \".join(\"%s=%s\" % (k, d[k]) for k in d)\n\n\nrx_safe_path = re.compile(r\"[^a-z0-9\\-\\+]+\", re.IGNORECASE)\n\n\ndef quote_safe_path(d):\n return rx_safe_path.sub(\"_\", d)\n\n\ndef to_seconds(v):\n \"\"\"\n Convert string value to seconds.\n Available acronyms are h, d, w, m, y\n \"\"\"\n m = 1\n if v.endswith(\"h\"):\n v = v[:-1]\n m = 3600\n elif v.endswith(\"d\"):\n v = v[:-1]\n m = 24 * 3600\n elif v.endswith(\"w\"):\n v = v[:-1]\n m = 7 * 24 * 3600\n elif v.endswith(\"m\"):\n v = v[:-1]\n m = 30 * 24 * 3600\n elif v.endswith(\"y\"):\n v = v[:-1]\n m = 365 * 24 * 3600\n try:\n v = int(v)\n except ValueError:\n raise \"Invalid time: %s\" % v\n return v * m\n\n\ndef format_table(widths, data, sep=\" \", hsep=\" \"):\n \"\"\"\n Print formatted table.\n :param widths: list of minimal column widths\n :param data: list of rows, first row is the header\n :param sep: column separator\n :param hsep: header line separator\n \"\"\"\n # Calculate column widths\n widths = list(widths)\n for row in data:\n widths = [max(x, len(y)) for x, y in zip(widths, row)]\n # Build print mask\n mask = sep.join(\"%%-%ds\" % w for w in widths)\n out = [\n # Header line\n mask % tuple(data[0]),\n # Header separator\n hsep.join(\"-\" * w for w in widths),\n ]\n out += [mask % tuple(row) for row in data[1:]]\n return \"\\n\".join(out)\n\n\nrx_non_numbers = re.compile(\"[^0-9]+\")\n\n\ndef clean_number(n):\n \"\"\"\n Remove all non-number occurences\n :param n:\n :return:\n \"\"\"\n return rx_non_numbers.sub(\"\", n)\n\n\ndef safe_shadow(text):\n \"\"\"\n Shadow string to first and last char\n :param text:\n :return:\n\n >>> safe_shadow(None)\n 'None'\n >>>safe_shadow(\"s\")\n '******'\n >>>safe_shadow(\"sssssss\")\n 's******s'\n >>> safe_shadow(1)\n '******'\n >>> safe_shadow([1, 2])\n '******'\n \"\"\"\n if not text:\n return \"None\"\n elif not isinstance(text, str):\n return \"******\"\n elif len(text) > 2:\n return \"%s******%s\" % (text[0], text[-1])\n else:\n return \"******\"\n\n\ndef ch_escape(s):\n return s.replace(\"\\n\", \"\\\\n\").replace(\"\\t\", \"\\\\t\").replace(\"\\\\\", \"\\\\\\\\\")\n\n\nESC_REPLACEMENTS = {re.escape(\"\\n\"): \" \", re.escape(\"\\t\"): \" \"}\n\nrx_escape = re.compile(\"|\".join(ESC_REPLACEMENTS))\n\n\ndef tsv_escape(text):\n return rx_escape.sub(lambda match: ESC_REPLACEMENTS[re.escape(match.group(0))], text)\n\n\ndef parse_table_header(v):\n \"\"\"\n Parse header structured multiline format:\n Config Current Agg Min Ld Share Flags Ld Share Agg Link Link Up\n Master Master Control Active Algorithm Group Mbr State Transitions\n :param v:\n :return: Dictionary {start column position: header}\n {10: 'Config Master', 18: 'Current Master', 26: 'Agg Control', 33: 'Min Active',\n 43: 'Ld Share Algorithm', 49: 'Flags ', 59: 'Ld Share Group', 63: 'Agg Mbr', 69: 'Link State'}\n \"\"\"\n head = []\n empty_header = None\n header = {}\n for num, lines in enumerate(zip_longest(*v, fillvalue=\"-\")):\n #\n if empty_header is None:\n empty_header = (\" \",) * len(lines)\n head += [lines]\n continue\n if set(head[-1]) == {\" \"} and lines != empty_header:\n head = array(head)\n # Transpone list header string\n header[num] = \" \".join([\"\".join(s).strip() for s in head.transpose().tolist()])\n header[num] = header[num].strip()\n head = []\n head += [lines]\n else:\n # last column\n head = array(head)\n header[num] = \" \".join([\"\".join(s).strip(\" -\") for s in head.transpose().tolist()])\n header[num] = header[num].strip()\n return header\n"
] |
[
[
"numpy.array"
]
] |
sohailalam2/qstrader
|
[
"e6d86a3ac3dc507b26e27b1f20c2949a69438ef7"
] |
[
"examples/moving_average_cross_backtest.py"
] |
[
"from collections import deque\nimport datetime\n\nimport numpy as np\n\nfrom qstrader import settings\nfrom qstrader.strategy.base import AbstractStrategy\nfrom qstrader.event import SignalEvent, EventType\nfrom qstrader.compat import queue\nfrom qstrader.trading_session import TradingSession\n\n\nclass MovingAverageCrossStrategy(AbstractStrategy):\n \"\"\"\n Requires:\n ticker - The ticker symbol being used for moving averages\n events_queue - A handle to the system events queue\n short_window - Lookback period for short moving average\n long_window - Lookback period for long moving average\n \"\"\"\n def __init__(\n self, ticker,\n events_queue,\n short_window=100,\n long_window=300,\n base_quantity=100\n ):\n self.ticker = ticker\n self.events_queue = events_queue\n self.short_window = short_window\n self.long_window = long_window\n self.base_quantity = base_quantity\n self.bars = 0\n self.invested = False\n self.sw_bars = deque(maxlen=self.short_window)\n self.lw_bars = deque(maxlen=self.long_window)\n\n def calculate_signals(self, event):\n if (\n event.type == EventType.BAR and\n event.ticker == self.ticker\n ):\n # Add latest adjusted closing price to the\n # short and long window bars\n self.lw_bars.append(event.adj_close_price)\n if self.bars > self.long_window - self.short_window:\n self.sw_bars.append(event.adj_close_price)\n\n # Enough bars are present for trading\n if self.bars > self.long_window:\n # Calculate the simple moving averages\n short_sma = np.mean(self.sw_bars)\n long_sma = np.mean(self.lw_bars)\n # Trading signals based on moving average cross\n if short_sma > long_sma and not self.invested:\n print(\"LONG %s: %s\" % (self.ticker, event.time))\n signal = SignalEvent(\n self.ticker, \"BOT\",\n suggested_quantity=self.base_quantity\n )\n self.events_queue.put(signal)\n self.invested = True\n elif short_sma < long_sma and self.invested:\n print(\"SHORT %s: %s\" % (self.ticker, event.time))\n signal = SignalEvent(\n self.ticker, \"SLD\",\n suggested_quantity=self.base_quantity\n )\n self.events_queue.put(signal)\n self.invested = False\n self.bars += 1\n\n\ndef run(config, testing, tickers, filename):\n # Backtest information\n title = ['Moving Average Crossover Example on AAPL: 100x300']\n initial_equity = 10000.0\n start_date = datetime.datetime(2000, 1, 1)\n end_date = datetime.datetime(2014, 1, 1)\n\n # Use the MAC Strategy\n events_queue = queue.Queue()\n strategy = MovingAverageCrossStrategy(\n tickers[0], events_queue,\n short_window=100,\n long_window=300\n )\n\n # Set up the backtest\n backtest = TradingSession(\n config, strategy, tickers,\n initial_equity, start_date, end_date,\n events_queue, title=title,\n benchmark=tickers[1],\n )\n results = backtest.start_trading(testing=testing)\n return results\n\n\nif __name__ == \"__main__\":\n # Configuration data\n testing = False\n config = settings.from_file(\n settings.DEFAULT_CONFIG_FILENAME, testing\n )\n tickers = [\"AAPL\", \"SPY\"]\n filename = None\n run(config, testing, tickers, filename)\n"
] |
[
[
"numpy.mean"
]
] |
joschout/pyIDS
|
[
"959353fc9eca1e8a327d360120cfad375d5918ab"
] |
[
"pyids/test/test_ids_classifier.py"
] |
[
"import unittest\nimport pandas as pd\nimport random\n\nfrom pyids.ids_classifier import IDS, mine_CARs\nfrom pyarc.qcba.data_structures import QuantitativeDataFrame\n\nclass TestIDSClassifier(unittest.TestCase):\n\n def test_model_fitting(self):\n df = pd.read_csv(\"C:/code/python/interpretable_decision_sets/data/titanic.csv\")\n cars = mine_CARs(df, rule_cutoff=40)\n\n quant_df = QuantitativeDataFrame(df)\n ids = IDS()\n ids.fit(quant_df, cars, debug=False)\n auc = ids.score_auc(quant_df)\n\n def test_sls_algorithm(self):\n df = pd.read_csv(\"C:/code/python/interpretable_decision_sets/data/titanic.csv\")\n cars = mine_CARs(df, rule_cutoff=40)\n\n quant_df = QuantitativeDataFrame(df)\n ids = IDS()\n ids.fit(quant_df, cars, algorithm=\"SLS\", debug=False)\n auc = ids.score_auc(quant_df)\n\n def test_dls_algorithm(self):\n df = pd.read_csv(\"C:/code/python/interpretable_decision_sets/data/titanic.csv\")\n cars = mine_CARs(df, rule_cutoff=40)\n\n quant_df = QuantitativeDataFrame(df)\n ids = IDS()\n ids.fit(quant_df, cars, algorithm=\"DLS\", debug=False)\n auc = ids.score_auc(quant_df)\n"
] |
[
[
"pandas.read_csv"
]
] |
wojtek11530/PKD-for-BERT-Model-Compression
|
[
"c8a005e8127645a4d1609e9c08646b8013a8c62b",
"c8a005e8127645a4d1609e9c08646b8013a8c62b"
] |
[
"BERT/examples/run_gpt2.py",
"BERT/examples/lm_finetuning/finetune_on_pregenerated.py"
] |
[
"#!/usr/bin/env python3\n\nimport argparse\nimport logging\nfrom tqdm import trange\n\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom pytorch_pretrained_bert import GPT2LMHeadModel, GPT2Tokenizer\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\nlogger = logging.getLogger(__name__)\n\ndef top_k_logits(logits, k):\n \"\"\"\n Masks everything but the k top entries as -infinity (1e10).\n Used to mask logits such that e^-infinity -> 0 won't contribute to the\n sum of the denominator.\n \"\"\"\n if k == 0:\n return logits\n else:\n values = torch.topk(logits, k)[0]\n batch_mins = values[:, -1].view(-1, 1).expand_as(logits)\n return torch.where(logits < batch_mins, torch.ones_like(logits) * -1e10, logits)\n\ndef sample_sequence(model, length, start_token=None, batch_size=None, context=None, temperature=1, top_k=0, device='cuda', sample=True):\n if start_token is None:\n assert context is not None, 'Specify exactly one of start_token and context!'\n context = torch.tensor(context, device=device, dtype=torch.long).unsqueeze(0).repeat(batch_size, 1)\n else:\n assert context is None, 'Specify exactly one of start_token and context!'\n context = torch.full((batch_size, 1), start_token, device=device, dtype=torch.long)\n prev = context\n output = context\n past = None\n with torch.no_grad():\n for i in trange(length):\n logits, past = model(prev, past=past)\n logits = logits[:, -1, :] / temperature\n logits = top_k_logits(logits, k=top_k)\n log_probs = F.softmax(logits, dim=-1)\n if sample:\n prev = torch.multinomial(log_probs, num_samples=1)\n else:\n _, prev = torch.topk(log_probs, k=1, dim=-1)\n output = torch.cat((output, prev), dim=1)\n return output\n\ndef run_model():\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_name_or_path', type=str, default='gpt2', help='pretrained model name or path to local checkpoint')\n parser.add_argument(\"--seed\", type=int, default=0)\n parser.add_argument(\"--nsamples\", type=int, default=1)\n parser.add_argument(\"--batch_size\", type=int, default=-1)\n parser.add_argument(\"--length\", type=int, default=-1)\n parser.add_argument(\"--temperature\", type=int, default=1)\n parser.add_argument(\"--top_k\", type=int, default=0)\n parser.add_argument('--unconditional', action='store_true', help='If true, unconditional generation.')\n args = parser.parse_args()\n print(args)\n\n if args.batch_size == -1:\n args.batch_size = 1\n assert args.nsamples % args.batch_size == 0\n\n np.random.seed(args.seed)\n torch.random.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n enc = GPT2Tokenizer.from_pretrained(args.model_name_or_path)\n model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path)\n model.to(device)\n model.eval()\n\n if args.length == -1:\n args.length = model.config.n_ctx // 2\n elif args.length > model.config.n_ctx:\n raise ValueError(\"Can't get samples longer than window size: %s\" % model.config.n_ctx)\n\n while not args.unconditional:\n if not args.unconditional:\n raw_text = input(\"Model prompt >>> \")\n while not raw_text:\n print('Prompt should not be empty!')\n raw_text = input(\"Model prompt >>> \")\n context_tokens = enc.encode(raw_text)\n generated = 0\n for _ in range(args.nsamples // args.batch_size):\n out = sample_sequence(\n model=model, length=args.length,\n context=context_tokens if not args.unconditional else None,\n start_token=enc.encoder['<|endoftext|>'] if args.unconditional else None,\n batch_size=args.batch_size,\n temperature=args.temperature, top_k=args.top_k, device=device\n )\n out = out[:, len(context_tokens):].tolist()\n for i in range(args.batch_size):\n generated += 1\n text = enc.decode(out[i])\n print(\"=\" * 40 + \" SAMPLE \" + str(generated) + \" \" + \"=\" * 40)\n print(text)\n print(\"=\" * 80)\n\nif __name__ == '__main__':\n run_model()\n\n",
"from argparse import ArgumentParser\nfrom pathlib import Path\nimport torch\nimport logging\nimport json\nimport random\nimport numpy as np\nfrom collections import namedtuple\nfrom tempfile import TemporaryDirectory\n\nfrom torch.utils.data import DataLoader, Dataset, RandomSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm\n\nfrom pytorch_pretrained_bert.modeling import BertForPreTraining\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\nfrom pytorch_pretrained_bert.optimization import BertAdam, warmup_linear\n\nInputFeatures = namedtuple(\"InputFeatures\", \"input_ids input_mask segment_ids lm_label_ids is_next\")\n\nlog_format = '%(asctime)-10s: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=log_format)\n\n\ndef convert_example_to_features(example, tokenizer, max_seq_length):\n tokens = example[\"tokens\"]\n segment_ids = example[\"segment_ids\"]\n is_random_next = example[\"is_random_next\"]\n masked_lm_positions = example[\"masked_lm_positions\"]\n masked_lm_labels = example[\"masked_lm_labels\"]\n\n assert len(tokens) == len(segment_ids) <= max_seq_length # The preprocessed data should be already truncated\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n masked_label_ids = tokenizer.convert_tokens_to_ids(masked_lm_labels)\n\n input_array = np.zeros(max_seq_length, dtype=np.int)\n input_array[:len(input_ids)] = input_ids\n\n mask_array = np.zeros(max_seq_length, dtype=np.bool)\n mask_array[:len(input_ids)] = 1\n\n segment_array = np.zeros(max_seq_length, dtype=np.bool)\n segment_array[:len(segment_ids)] = segment_ids\n\n lm_label_array = np.full(max_seq_length, dtype=np.int, fill_value=-1)\n lm_label_array[masked_lm_positions] = masked_label_ids\n\n features = InputFeatures(input_ids=input_array,\n input_mask=mask_array,\n segment_ids=segment_array,\n lm_label_ids=lm_label_array,\n is_next=is_random_next)\n return features\n\n\nclass PregeneratedDataset(Dataset):\n def __init__(self, training_path, epoch, tokenizer, num_data_epochs, reduce_memory=False):\n self.vocab = tokenizer.vocab\n self.tokenizer = tokenizer\n self.epoch = epoch\n self.data_epoch = epoch % num_data_epochs\n data_file = training_path / f\"epoch_{self.data_epoch}.json\"\n metrics_file = training_path / f\"epoch_{self.data_epoch}_metrics.json\"\n assert data_file.is_file() and metrics_file.is_file()\n metrics = json.loads(metrics_file.read_text())\n num_samples = metrics['num_training_examples']\n seq_len = metrics['max_seq_len']\n self.temp_dir = None\n self.working_dir = None\n if reduce_memory:\n self.temp_dir = TemporaryDirectory()\n self.working_dir = Path(self.temp_dir.name)\n input_ids = np.memmap(filename=self.working_dir/'input_ids.memmap',\n mode='w+', dtype=np.int32, shape=(num_samples, seq_len))\n input_masks = np.memmap(filename=self.working_dir/'input_masks.memmap',\n shape=(num_samples, seq_len), mode='w+', dtype=np.bool)\n segment_ids = np.memmap(filename=self.working_dir/'input_masks.memmap',\n shape=(num_samples, seq_len), mode='w+', dtype=np.bool)\n lm_label_ids = np.memmap(filename=self.working_dir/'lm_label_ids.memmap',\n shape=(num_samples, seq_len), mode='w+', dtype=np.int32)\n lm_label_ids[:] = -1\n is_nexts = np.memmap(filename=self.working_dir/'is_nexts.memmap',\n shape=(num_samples,), mode='w+', dtype=np.bool)\n else:\n input_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.int32)\n input_masks = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)\n segment_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)\n lm_label_ids = np.full(shape=(num_samples, seq_len), dtype=np.int32, fill_value=-1)\n is_nexts = np.zeros(shape=(num_samples,), dtype=np.bool)\n logging.info(f\"Loading training examples for epoch {epoch}\")\n with data_file.open() as f:\n for i, line in enumerate(tqdm(f, total=num_samples, desc=\"Training examples\")):\n line = line.strip()\n example = json.loads(line)\n features = convert_example_to_features(example, tokenizer, seq_len)\n input_ids[i] = features.input_ids\n segment_ids[i] = features.segment_ids\n input_masks[i] = features.input_mask\n lm_label_ids[i] = features.lm_label_ids\n is_nexts[i] = features.is_next\n assert i == num_samples - 1 # Assert that the sample count metric was true\n logging.info(\"Loading complete!\")\n self.num_samples = num_samples\n self.seq_len = seq_len\n self.input_ids = input_ids\n self.input_masks = input_masks\n self.segment_ids = segment_ids\n self.lm_label_ids = lm_label_ids\n self.is_nexts = is_nexts\n\n def __len__(self):\n return self.num_samples\n\n def __getitem__(self, item):\n return (torch.tensor(self.input_ids[item].astype(np.int64)),\n torch.tensor(self.input_masks[item].astype(np.int64)),\n torch.tensor(self.segment_ids[item].astype(np.int64)),\n torch.tensor(self.lm_label_ids[item].astype(np.int64)),\n torch.tensor(self.is_nexts[item].astype(np.int64)))\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('--pregenerated_data', type=Path, required=True)\n parser.add_argument('--output_dir', type=Path, required=True)\n parser.add_argument(\"--bert_model\", type=str, required=True,\n choices=[\"bert-base-uncased\", \"bert-large-uncased\", \"bert-base-cased\",\n \"bert-base-multilingual\", \"bert-base-chinese\"])\n parser.add_argument(\"--do_lower_case\", action=\"store_true\")\n parser.add_argument(\"--reduce_memory\", action=\"store_true\",\n help=\"Store training data as on-disc memmaps to massively reduce memory usage\")\n\n parser.add_argument(\"--epochs\", type=int, default=3, help=\"Number of epochs to train for\")\n parser.add_argument(\"--local_rank\",\n type=int,\n default=-1,\n help=\"local_rank for distributed training on gpus\")\n parser.add_argument(\"--no_cuda\",\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument('--gradient_accumulation_steps',\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--train_batch_size\",\n default=32,\n type=int,\n help=\"Total batch size for training.\")\n parser.add_argument('--fp16',\n action='store_true',\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\n parser.add_argument('--loss_scale',\n type=float, default=0,\n help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\\n\"\n \"0 (default value): dynamic loss scaling.\\n\"\n \"Positive power of 2: static loss scaling value.\\n\")\n parser.add_argument(\"--warmup_proportion\",\n default=0.1,\n type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10%% of training.\")\n parser.add_argument(\"--learning_rate\",\n default=3e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument('--seed',\n type=int,\n default=42,\n help=\"random seed for initialization\")\n args = parser.parse_args()\n\n assert args.pregenerated_data.is_dir(), \\\n \"--pregenerated_data should point to the folder of files made by pregenerate_training_data.py!\"\n\n samples_per_epoch = []\n for i in range(args.epochs):\n epoch_file = args.pregenerated_data / f\"epoch_{i}.json\"\n metrics_file = args.pregenerated_data / f\"epoch_{i}_metrics.json\"\n if epoch_file.is_file() and metrics_file.is_file():\n metrics = json.loads(metrics_file.read_text())\n samples_per_epoch.append(metrics['num_training_examples'])\n else:\n if i == 0:\n exit(\"No training data was found!\")\n print(f\"Warning! There are fewer epochs of pregenerated data ({i}) than training epochs ({args.epochs}).\")\n print(\"This script will loop over the available data, but training diversity may be negatively impacted.\")\n num_data_epochs = i\n break\n else:\n num_data_epochs = args.epochs\n\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n logging.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\n device, n_gpu, bool(args.local_rank != -1), args.fp16))\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\n args.gradient_accumulation_steps))\n\n args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n if args.output_dir.is_dir() and list(args.output_dir.iterdir()):\n logging.warning(f\"Output directory ({args.output_dir}) already exists and is not empty!\")\n args.output_dir.mkdir(parents=True, exist_ok=True)\n\n tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)\n\n total_train_examples = 0\n for i in range(args.epochs):\n # The modulo takes into account the fact that we may loop over limited epochs of data\n total_train_examples += samples_per_epoch[i % len(samples_per_epoch)]\n\n num_train_optimization_steps = int(\n total_train_examples / args.train_batch_size / args.gradient_accumulation_steps)\n if args.local_rank != -1:\n num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()\n\n # Prepare model\n model = BertForPreTraining.from_pretrained(args.bert_model)\n if args.fp16:\n model.half()\n model.to(device)\n if args.local_rank != -1:\n try:\n from apex.parallel import DistributedDataParallel as DDP\n except ImportError:\n raise ImportError(\n \"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n model = DDP(model)\n elif n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Prepare optimizer\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n\n if args.fp16:\n try:\n from apex.optimizers import FP16_Optimizer\n from apex.optimizers import FusedAdam\n except ImportError:\n raise ImportError(\n \"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n optimizer = FusedAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n bias_correction=False,\n max_grad_norm=1.0)\n if args.loss_scale == 0:\n optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)\n else:\n optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)\n\n else:\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n warmup=args.warmup_proportion,\n t_total=num_train_optimization_steps)\n\n global_step = 0\n logging.info(\"***** Running training *****\")\n logging.info(f\" Num examples = {total_train_examples}\")\n logging.info(\" Batch size = %d\", args.train_batch_size)\n logging.info(\" Num steps = %d\", num_train_optimization_steps)\n model.train()\n for epoch in range(args.epochs):\n epoch_dataset = PregeneratedDataset(epoch=epoch, training_path=args.pregenerated_data, tokenizer=tokenizer,\n num_data_epochs=num_data_epochs)\n if args.local_rank == -1:\n train_sampler = RandomSampler(epoch_dataset)\n else:\n train_sampler = DistributedSampler(epoch_dataset)\n train_dataloader = DataLoader(epoch_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0\n with tqdm(total=len(train_dataloader), desc=f\"Epoch {epoch}\") as pbar:\n for step, batch in enumerate(train_dataloader):\n batch = tuple(t.to(device) for t in batch)\n input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch\n loss = model(input_ids, segment_ids, input_mask, lm_label_ids, is_next)\n if n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n if args.fp16:\n optimizer.backward(loss)\n else:\n loss.backward()\n tr_loss += loss.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n pbar.update(1)\n mean_loss = tr_loss * args.gradient_accumulation_steps / nb_tr_steps\n pbar.set_postfix_str(f\"Loss: {mean_loss:.5f}\")\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n # modify learning rate with special warm up BERT uses\n # if args.fp16 is False, BertAdam is used that handles this automatically\n lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps,\n args.warmup_proportion)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_this_step\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n\n # Save a trained model\n logging.info(\"** ** * Saving fine-tuned model ** ** * \")\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n output_model_file = args.output_dir / \"pytorch_model.bin\"\n torch.save(model_to_save.state_dict(), str(output_model_file))\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.cat",
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.no_grad",
"torch.random.manual_seed",
"torch.multinomial",
"torch.full",
"torch.cuda.is_available",
"torch.tensor",
"torch.ones_like",
"torch.nn.functional.softmax",
"torch.topk"
],
[
"numpy.full",
"torch.device",
"torch.distributed.get_world_size",
"torch.cuda.manual_seed_all",
"torch.utils.data.RandomSampler",
"numpy.zeros",
"numpy.random.seed",
"torch.distributed.init_process_group",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.set_device",
"numpy.memmap",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler",
"torch.nn.DataParallel"
]
] |
pcampeti/plancklens
|
[
"44b8a932551cb6534965892cd7c72b0a9cd8c3a5"
] |
[
"plancklens/nhl.py"
] |
[
"\"\"\"Calculation of semi-analytical noise biases module.\n\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport pickle as pk\nimport numpy as np\nimport healpy as hp\n\nfrom plancklens import qresp, utils, utils_spin as uspin\nfrom plancklens.helpers import mpi, sql\n\n\ndef get_nhl(qe_key1, qe_key2, cls_weights, cls_ivfs, lmax_ivf1, lmax_ivf2,\n lmax_out=None, lmax_ivf12=None, lmax_ivf22=None, cls_ivfs_bb=None, cls_ivfs_ab=None):\n \"\"\"(Semi-)Analytical noise level calculation for the cross-spectrum of two QE keys.\n\n Args:\n qe_key1: QE key 1\n qe_key2: QE key 2\n cls_weights: dictionary with the CMB spectra entering the QE weights.\n (expected are 'tt', 'te', 'ee' when/if relevant)\n cls_ivfs: dictionary with the inverse-variance filtered CMB spectra.\n (expected are 'tt', 'te', 'ee', 'bb', 'tb', 'eb' when/if relevant)\n lmax_ivf1: QE 1 uses CMB multipoles down to lmax_ivf1.\n lmax_ivf2: QE 2 uses CMB multipoles down to lmax_ivf2.\n lmax_out(optional): outputs are calculated down to lmax_out. Defaults to lmax_ivf1 + lmax_ivf2\n\n Outputs:\n 4-tuple of gradient (G) and curl (C) mode Gaussian noise co-variances GG, CC, GC, CG.\n\n \"\"\"\n if lmax_ivf12 is None: lmax_ivf12 = lmax_ivf1\n if lmax_ivf22 is None: lmax_ivf22 = lmax_ivf2\n qes1 = qresp.get_qes(qe_key1, lmax_ivf1, cls_weights, lmax2=lmax_ivf12)\n qes2 = qresp.get_qes(qe_key2, lmax_ivf2, cls_weights, lmax2=lmax_ivf22)\n if lmax_out is None:\n lmax_out = max(lmax_ivf1, lmax_ivf12) + max(lmax_ivf2, lmax_ivf22)\n return _get_nhl(qes1, qes2, cls_ivfs, lmax_out, cls_ivfs_bb=cls_ivfs_bb, cls_ivfs_ab=cls_ivfs_ab)\n\ndef _get_nhl(qes1, qes2, cls_ivfs, lmax_out, cls_ivfs_bb=None, cls_ivfs_ab=None, ret_terms=False):\n GG_N0 = np.zeros(lmax_out + 1, dtype=float)\n CC_N0 = np.zeros(lmax_out + 1, dtype=float)\n GC_N0 = np.zeros(lmax_out + 1, dtype=float)\n CG_N0 = np.zeros(lmax_out + 1, dtype=float)\n\n cls_ivfs_aa = cls_ivfs\n cls_ivfs_bb = cls_ivfs if cls_ivfs_bb is None else cls_ivfs_bb\n cls_ivfs_ab = cls_ivfs if cls_ivfs_ab is None else cls_ivfs_ab\n cls_ivfs_ba = cls_ivfs_ab\n if ret_terms:\n terms = []\n for qe1 in qes1:\n cL1 = qe1.cL(np.arange(lmax_out + 1))\n for qe2 in qes2:\n cL2 = qe2.cL(np.arange(lmax_out + 1))\n si, ti, ui, vi = (qe1.leg_a.spin_in, qe1.leg_b.spin_in, qe2.leg_a.spin_in, qe2.leg_b.spin_in)\n so, to, uo, vo = (qe1.leg_a.spin_ou, qe1.leg_b.spin_ou, qe2.leg_a.spin_ou, qe2.leg_b.spin_ou)\n assert so + to >= 0 and uo + vo >= 0, (so, to, uo, vo)\n\n clsu = utils.joincls([qe1.leg_a.cl, qe2.leg_a.cl.conj(), uspin.spin_cls(si, ui, cls_ivfs_aa)])\n cltv = utils.joincls([qe1.leg_b.cl, qe2.leg_b.cl.conj(), uspin.spin_cls(ti, vi, cls_ivfs_bb)])\n R_sutv = utils.joincls([uspin.wignerc(clsu, cltv, so, uo, to, vo, lmax_out=lmax_out), cL1, cL2])\n\n clsv = utils.joincls([qe1.leg_a.cl, qe2.leg_b.cl.conj(), uspin.spin_cls(si, vi, cls_ivfs_ab)])\n cltu = utils.joincls([qe1.leg_b.cl, qe2.leg_a.cl.conj(), uspin.spin_cls(ti, ui, cls_ivfs_ba)])\n R_sutv = R_sutv + utils.joincls([uspin.wignerc(clsv, cltu, so, vo, to, uo, lmax_out=lmax_out), cL1, cL2])\n\n # we now need -s-t uv\n sgnms = (-1) ** (si + so)\n sgnmt = (-1) ** (ti + to)\n clsu = utils.joincls([sgnms * qe1.leg_a.cl.conj(), qe2.leg_a.cl.conj(), uspin.spin_cls(-si, ui, cls_ivfs_aa)])\n cltv = utils.joincls([sgnmt * qe1.leg_b.cl.conj(), qe2.leg_b.cl.conj(), uspin.spin_cls(-ti, vi, cls_ivfs_bb)])\n R_msmtuv = utils.joincls([uspin.wignerc(clsu, cltv, -so, uo, -to, vo, lmax_out=lmax_out), cL1, cL2])\n\n clsv = utils.joincls([sgnms * qe1.leg_a.cl.conj(), qe2.leg_b.cl.conj(), uspin.spin_cls(-si, vi, cls_ivfs_ab)])\n cltu = utils.joincls([sgnmt * qe1.leg_b.cl.conj(), qe2.leg_a.cl.conj(), uspin.spin_cls(-ti, ui, cls_ivfs_ba)])\n R_msmtuv = R_msmtuv + utils.joincls([uspin.wignerc(clsv, cltu, -so, vo, -to, uo, lmax_out=lmax_out), cL1, cL2])\n\n GG_N0 += 0.5 * R_sutv.real\n GG_N0 += 0.5 * (-1) ** (to + so) * R_msmtuv.real\n\n CC_N0 += 0.5 * R_sutv.real\n CC_N0 -= 0.5 * (-1) ** (to + so) * R_msmtuv.real\n\n GC_N0 -= 0.5 * R_sutv.imag\n GC_N0 -= 0.5 * (-1) ** (to + so) * R_msmtuv.imag\n\n CG_N0 += 0.5 * R_sutv.imag\n CG_N0 -= 0.5 * (-1) ** (to + so) * R_msmtuv.imag\n if ret_terms:\n terms += [0.5 * R_sutv, 0.5 * (-1) ** (to + so) * R_msmtuv]\n return (GG_N0, CC_N0, GC_N0, CG_N0) if not ret_terms else (GG_N0, CC_N0, GC_N0, CG_N0, terms)\n\n\nclass nhl_lib_simple:\n \"\"\"Semi-analytical unnormalized N0 library.\n\n This version only for 4 identical legs, and with simple 1/fsky spectrum estimator.\n\n Args:\n lib_dir: outputs will be cached there\n ivfs: inverse-variance filtering library\n cls_weight(dict): fiducial spectra entering the QE weights (numerator in Eq. 2 of https://arxiv.org/abs/1807.06210)\n lmax_qlm: noise (co-)variances are calculated up to multipole lmax_qlm\n resplib: only relevant for bias hardened estimators\n\n \"\"\"\n def __init__(self, lib_dir, ivfs, cls_weight, lmax_qlm, resplib=None):\n self.lmax_qlm = lmax_qlm\n self.cls_weight = cls_weight\n self.ivfs = ivfs\n fn_hash = os.path.join(lib_dir, 'nhl_hash.pk')\n if mpi.rank == 0:\n if not os.path.exists(lib_dir):\n os.makedirs(lib_dir)\n if not os.path.exists(fn_hash):\n pk.dump(self.hashdict(), open(fn_hash, 'wb'), protocol=2)\n mpi.barrier()\n utils.hash_check(pk.load(open(fn_hash, 'rb')), self.hashdict())\n\n self.lib_dir = lib_dir\n self.npdb = sql.npdb(os.path.join(lib_dir, 'npdb.db'))\n self.fsky = np.mean(self.ivfs.get_fmask())\n self.resplib = resplib\n\n def hashdict(self):\n ret = {k: utils.clhash(self.cls_weight[k]) for k in self.cls_weight.keys()}\n ret['ivfs'] = self.ivfs.hashdict()\n ret['lmax_qlm'] = self.lmax_qlm\n return ret\n\n def _get_qe_derived(self, k):\n if '_bh_' in k:\n kQE, ksource = k.split('_bh_')\n assert len(ksource) == 1.\n wL = self.resplib.get_response(kQE, ksource) * utils.cli(self.resplib.get_response(ksource + kQE[1:], ksource))\n return [(kQE, 1.), (ksource + kQE[1:], -wL)]\n else:\n return [(k, 1.)]\n\n def get_sim_nhl(self, idx, k1, k2, recache=False):\n \"\"\"\n Args:\n idx: simulation index\n k1: QE key 1\n k2: QE key 2\n \"\"\"\n assert idx == -1 or idx >= 0, idx\n k1sw = self._get_qe_derived(k1)\n k2sw = self._get_qe_derived(k2)\n ret = np.zeros(self.lmax_qlm + 1)\n for k1, w1 in k1sw:\n for k2, w2 in k2sw:\n s1, GC1, s1ins, ksp1 = qresp.qe_spin_data(k1)\n s2, GC2, s2ins, ksp2 = qresp.qe_spin_data(k2)\n fn = 'anhl_qe_' + ksp1 + k1[1:] + '_qe_' + ksp2 + k2[1:] + GC1 + GC2\n suf = ('sim%04d'%idx) * (int(idx) >= 0) + 'dat' * (idx == -1)\n if self.npdb.get(fn + suf) is None or recache:\n assert s1 >= 0 and s2 >= 0, (s1, s2)\n cls_ivfs, lmax_ivf = self._get_cls(idx, np.unique(np.concatenate([s1ins, s2ins])))\n GG, CC, GC, CG = get_nhl(k1, k2, self.cls_weight, cls_ivfs, lmax_ivf, lmax_ivf, lmax_out=self.lmax_qlm)\n fns = [('G', 'G', GG) ] + [('C', 'G', CG)] * (s1 > 0) + [('G', 'C', GC)] * (s2 > 0) + [('C', 'C', CC)] * (s1 > 0) * (s2 > 0)\n if recache and self.npdb.get(fn + suf) is not None:\n for GC1, GC2, N0 in fns:\n self.npdb.remove('anhl_qe_' + ksp1 + k1[1:] + '_qe_'+ ksp2 + k2[1:] + GC1 + GC2 + suf)\n for GC1, GC2, N0 in fns:\n self.npdb.add('anhl_qe_' + ksp1 + k1[1:] + '_qe_' + ksp2 + k2[1:] + GC1 + GC2 + suf, N0)\n ret += w1 * w2 * self.npdb.get(fn + suf)\n return ret\n\n def _get_cls(self, idx, spins):\n assert np.all(spins >= 0), spins\n ret = {}\n if 0 in spins:\n ret['tt'] = hp.alm2cl(self.ivfs.get_sim_tlm(idx)) / self.fsky\n if 2 in spins:\n ret['ee'] = hp.alm2cl(self.ivfs.get_sim_elm(idx)) / self.fsky\n ret['bb'] = hp.alm2cl(self.ivfs.get_sim_blm(idx)) / self.fsky\n ret['eb'] = hp.alm2cl(self.ivfs.get_sim_elm(idx), alms2=self.ivfs.get_sim_blm(idx)) / self.fsky\n if 0 in spins and 2 in spins:\n ret['te'] = hp.alm2cl(self.ivfs.get_sim_tlm(idx), alms2=self.ivfs.get_sim_elm(idx)) / self.fsky\n ret['tb'] = hp.alm2cl(self.ivfs.get_sim_tlm(idx), alms2=self.ivfs.get_sim_blm(idx)) / self.fsky\n lmaxs = [len(cl) for cl in ret.values()]\n assert len(np.unique(lmaxs)) == 1, lmaxs\n return ret, lmaxs[0]"
] |
[
[
"numpy.concatenate",
"numpy.zeros",
"numpy.arange",
"numpy.all",
"numpy.unique"
]
] |
clems4ever/tensorstream
|
[
"61bff14f65f71bdd4ab58aefbd6eda79ec5863cb",
"61bff14f65f71bdd4ab58aefbd6eda79ec5863cb"
] |
[
"tensorstream/meta/factory_spec.py",
"tensorstream/finance/momentum.py"
] |
[
"import numpy as np\nimport tensorflow as tf\n\nfrom tensorstream.streamable import Streamable\nfrom tensorstream.meta.factory import Factory\nfrom tensorstream.tests import TestCase\n\nclass MultiplyBy(Streamable):\n def __init__(self, nb):\n super().__init__()\n self.nb = nb\n\n def step(self, x, prev_x=None):\n if prev_x is None:\n prev_x = tf.constant(0.0)\n return prev_x * self.nb, x, prev_x\n\nclass FactorySpec(TestCase):\n def setUp(self):\n self.sheets = self.read_ods(\n self.from_test_res('factory.ods', __file__))\n\n def test_factory_simple(self):\n sheet = self.sheets['Sheet1']\n factory = Factory(MultiplyBy, ([3], [5], [10]))\n x = tf.placeholder(tf.float32)\n factory_ts, _, _ = factory((x, x, x))\n \n with tf.Session() as sess:\n output = sess.run(factory_ts, { x: sheet['x'] })\n\n np.testing.assert_almost_equal(output[0],\n sheet['Mul 3'].values, decimal=3)\n np.testing.assert_almost_equal(output[1],\n sheet['Mul 5'].values, decimal=3)\n np.testing.assert_almost_equal(output[2],\n sheet['Mul 10'].values, decimal=3)\n",
"import tensorflow as tf\n\nfrom tensorstream.common.common import Sub, Fork, Identity\nfrom tensorstream.common.lag import Lag\nfrom tensorstream.common.set_during import SetDuring\nfrom tensorstream.meta.compose import Compose\nfrom tensorstream.meta.join import Join\n\ndef Momentum(period):\n return Compose(\n SetDuring(tf.constant(0.0), period),\n Sub(),\n Join(Identity(), Lag(period)),\n Fork(2),\n )\n"
] |
[
[
"numpy.testing.assert_almost_equal",
"tensorflow.constant",
"tensorflow.Session",
"tensorflow.placeholder"
],
[
"tensorflow.constant"
]
] |
danenigma/Traditional-Computer-Vision
|
[
"04e66c8d6bdb3e5eb5d1ab05674e4a5ea3b9f823"
] |
[
"Scene-Classification-With-Spatial-Pyramid-Matching/network_layers.py"
] |
[
"import numpy as np\r\nimport scipy.ndimage\r\nfrom skimage.measure import block_reduce\r\nimport util\r\nimport os,time\r\n#for testing \r\nimport torch.nn as nn\r\nimport torch\r\nimport deep_recog\r\ndef extract_deep_feature(x,vgg16_weights):\r\n\t'''\r\n\tExtracts deep features from the given VGG-16 weights.\r\n\r\n\t[input]\r\n\t* x: numpy.ndarray of shape (H,W,3)\r\n\t* vgg16_weights: numpy.ndarray of shape (L,3)\r\n\r\n\t[output]\r\n\t* feat: numpy.ndarray of shape (K)\r\n\t'''\r\n\t#-------TODO---------\r\n\r\n\tfor index, vgg_weights in enumerate(vgg16_weights[:-1]):\r\n\t\t\r\n\t\t#print(index, vgg_weights[0])\r\n\t\r\n\t\t\r\n\t\tif vgg_weights[0] == \"conv2d\":\r\n\t\t\tx = multichannel_conv2d(x, vgg_weights[1], vgg_weights[2])\r\n\r\n\t\tif vgg_weights[0] == \"relu\":\r\n\t\t\tx = relu(x)\r\n\r\n\t\tif vgg_weights[0] == \"maxpool2d\":\r\n\t\t\tx = max_pool2d(x, vgg_weights[1])\r\n\r\n\t\tif vgg_weights[0] == \"linear\":\r\n\r\n\t\t\tx = linear(x, vgg_weights[1], vgg_weights[2])\r\n\r\n\t\tif index == 30:#after conv flatten \r\n\t\t\tx = x.flatten()\r\n\r\n\t\t#print('Done computing layer: [' + str(index) + '] ' + vgg_weights[0])\t\r\n\t\r\n\treturn x\r\n\t\r\ndef multichannel_conv2d(x,weight,bias):\r\n\t'''\r\n\tPerforms multi-channel 2D convolution.\r\n\r\n\t[input]\r\n\t* x: numpy.ndarray of shape (H, W, input_dim)\r\n\t* weight: numpy.ndarray of shape (output_dim,input_dim,kernel_size,kernel_size)\r\n\t* bias: numpy.ndarray of shape (output_dim)\r\n\r\n\t[output]\r\n\t* feat: numpy.ndarray of shape (H,W,output_dim)\r\n\t'''\r\n\t\r\n\t(H,W,input_dim) = x.shape\r\n\t(output_dim,input_dim,kernel_size,kernel_size) = weight.shape\r\n\t\r\n\tfeat = np.zeros((H,W,output_dim))\r\n\r\n\tfor i in range(output_dim):#for each filter\r\n\t\tfor ch in range(input_dim):#for each channel\r\n\t\t\r\n\t\t\tw = np.flipud(np.fliplr(weight[i, ch, :, :]))\r\n\t\t\t\r\n\t\t\tfeat[:,:,i] += scipy.ndimage.convolve(x[:, :, ch],\r\n\t\t\t\t\t\t w, \r\n\t\t\t\t\t\t mode='constant', \r\n\t\t\t\t\t\t cval=0.0)\r\n\t\t\t\r\n\t\t\t\"\"\"\r\n\t\t\t#using correlation\r\n\t\t\tfeat[:,:,i] += scipy.ndimage.correlate(x[:, :, ch],\r\n\t\t\t\t\t\t weight[i, ch, :, :], \r\n\t\t\t\t\t\t mode='constant', \r\n\t\t\t\t\t\t cval=0.0) \r\n\t\t\t\"\"\"\r\n\t\t\t\t\t \r\n\t\tfeat[:,:,i] += bias[i]\r\n\t\t\r\n\treturn feat\r\n\r\ndef relu(x):\r\n\t'''\r\n\tRectified linear unit.\r\n\r\n\t[input]\r\n\t* x: numpy.ndarray\r\n\r\n\t[output]\r\n\t* y: numpy.ndarray\r\n\t'''\r\n\treturn np.maximum(0, x)\r\n\t\r\n\t\r\ndef max_pool2d(x,size):\r\n\t'''\r\n\t2D max pooling operation.\r\n\r\n\t[input]\r\n\t* x: numpy.ndarray of shape (H,W,input_dim)\r\n\t* size: pooling receptive field\r\n\r\n\t[output]\r\n\t* y: numpy.ndarray of shape (H/size,W/size,input_dim)\r\n\t'''\r\n\tout = block_reduce(x, block_size=(size, size, 1), func=np.max)\r\n\treturn out\r\n\r\ndef linear(x,W,b):\r\n\t'''\r\n\tFully-connected layer.\r\n\r\n\t[input]\r\n\t* x: numpy.ndarray of shape (input_dim)\r\n\t* weight: numpy.ndarray of shape (output_dim,input_dim)\r\n\t* bias: numpy.ndarray of shape (output_dim)\r\n\r\n\t[output]\r\n\t* y: numpy.ndarray of shape (output_dim)\r\n\t'''\r\n\tout = np.matmul(x, W.T) + b\r\n\treturn out\r\n\r\n\r\n\r\nif __name__=='__main__':\r\n\t\r\n\timage_np = np.random.randn(224, 224, 3)\r\n\t\r\n\t\"\"\"\r\n\timage = deep_recog.preprocess_image(image_np)\r\n\t\r\n\tconv = nn.Conv2d(in_channels=3, out_channels= 10,\r\n\t\t kernel_size =(5,5), padding= (2,2)).double()\r\n\trelu_torch = nn.ReLU()\r\n\tmax_torch = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1), ceil_mode=False)\r\n\tlinear_torch = nn.Linear(in_features=25088, out_features=4096, bias=True)\r\n\t\r\n\twl, bl = linear_torch.weight.data.numpy(), linear_torch.bias.data.numpy()\r\n\t\r\n\tprint('wl shape: ', wl.shape)\r\n\tw, b = conv.weight.data.numpy(), conv.bias.data.numpy()\r\n\r\n\r\n\tout = conv(torch.autograd.Variable(image))\r\n\tmy_out = multichannel_conv2d(image_np, w, b)\r\n\tmy_out_tensor = deep_recog.preprocess_image(my_out)\r\n\tprint('Conv: ', np.allclose(out.data.numpy(), my_out_tensor.numpy()))\r\n\t\r\n\tmy_out = relu(my_out)\r\n\tout = relu_torch(out)\r\n\tmy_out_tensor = deep_recog.preprocess_image(my_out)\t\r\n\tprint('Relu: ', np.allclose(out.data.numpy(), my_out_tensor.numpy()))\r\n\t\r\n\tmy_out = max_pool2d(my_out, 2)\r\n\tout = max_torch(out)\r\n\tout_np = out.permute(0,2,3,1).squeeze(0)\r\n\tprint('Max_pool2d: ', np.allclose(out_np.data.numpy(), my_out))\r\n\t\r\n\tx = np.random.randn(25088).astype('float')\r\n\tx_torch = torch.from_numpy(x).float()\r\n\t\r\n\tmy_lin_out = linear(x,wl,bl)\r\n\ttorch_lin_out = linear_torch(torch.autograd.Variable(x_torch))\r\n\tprint(np.min(my_lin_out), np.max(my_lin_out), np.mean(my_lin_out), np.std(my_lin_out))\r\n\tprint('Linear: ', np.allclose(my_lin_out, torch_lin_out.data.numpy(), atol=2e-06))\r\n\t\r\n\t\r\n\t\r\n\tvgg16_weights = util.get_VGG16_weights()\r\n\r\n\textract_deep_feature(image_np, vgg16_weights)\r\n\t\"\"\"\r\n\r\n\t\r\n"
] |
[
[
"numpy.matmul",
"numpy.zeros",
"numpy.random.randn",
"numpy.maximum",
"numpy.fliplr"
]
] |
Lkiraa/Contiki-ng
|
[
"87b55a9233d5588b454f6f5ec580ee9af1ae88f8"
] |
[
"arch/cpu/arm/CMSIS/CMSIS/DSP/Testing/PatternGeneration/Interpolate.py"
] |
[
"import os.path\nimport numpy as np\nimport itertools\nimport Tools\nfrom scipy.interpolate import interp1d,interp2d\n\n# Those patterns are used for tests and benchmarks.\n# For tests, there is the need to add tests for saturation\n\n# Get lists of points in row order for use in CMSIS function\ndef getLinearPoints(x,y):\n return(np.array([[p[1],p[0]] for p in np.array(np.meshgrid(y,x)).T.reshape(-1,2)]))\n\ndef writeTests(config,format):\n # Linear interpolation test\n NBSAMPLES=40\n\n x = np.linspace(0, NBSAMPLES, num=NBSAMPLES+1, endpoint=True)\n y = np.cos(-x**2/(NBSAMPLES - 1))\n f = interp1d(x, y)\n data=x+0.5\n data=data[:-1]\n z = f(data)\n\n if format != 0:\n data = data / 2.0**11\n if format != 0:\n config.writeInputQ31(1, data,\"Input\")\n else:\n config.writeInput(1, data)\n config.writeInput(1, y,\"YVals\")\n \n ref = z\n config.writeReference(1, ref)\n\n # Bilinear interpolation test\n x = np.arange(-3.14, 3.14, 1.0)\n y = np.arange(-3.14, 3.14, 0.8)\n xx, yy = np.meshgrid(x, y)\n z = np.sin(xx**2+yy**2)\n f = interp2d(x, y, z, kind='linear')\n\n\n # Configuration for the test (to initialize the bilinear structure)\n matrixSize=[np.size(x),np.size(y)]\n\n # Generate reference value for bilinear instance\n # getLinearPoints ensure they are in row order\n samples = getLinearPoints(x,y)\n # We recompute the value of the function on the samples in row\n # order\n yvals = np.array([np.sin(i[0]**2+i[1]**2) for i in samples])\n\n\n # Now we generate other points. The points where we want to evaluate\n # the function.\n # In Python they must be rescale between -3.14 and tghe max x or max y defined above.\n # In CMSIS they will be between 1 and numRow-1 or numCols-1.\n # Since we add 0.5 to be sure we are between grid point, we use\n # numCols-2 as bound to be sured we are <= numCols-1\n numCols = np.size(x)\n numRows = np.size(y)\n\n NBX = 10\n NBY = 15\n\n # The CMSIS indexes\n ix = np.linspace(0, numCols-3, num=NBX, endpoint=True)+0.5\n iy = np.linspace(0, numRows-3, num=NBY, endpoint=True)+0.5\n\n\n # The corresponding Python values\n ixVal = ((ix ) / (numCols-1)) * (x[-1] + 3.14) - 3.14\n iyVal = ((iy ) / (numRows-1)) * (y[-1] + 3.14) - 3.14\n \n # Input samples for CMSIS.\n inputSamples = getLinearPoints(ix,iy)\n \n # We compute the Python interpolated function on the values\n inputVals = getLinearPoints(ixVal,iyVal)\n ref=np.array([f(i[0],i[1]) for i in inputVals])\n\n\n if format != 0:\n inputSamples = inputSamples / 2.0**11\n data = inputSamples.reshape(np.size(inputSamples))\n if format != 0:\n config.writeInputQ31(2, data,\"Input\")\n else:\n config.writeInput(2, data)\n\n config.writeInput(2, yvals.reshape(np.size(yvals)),\"YVals\")\n config.writeReference(2, ref.reshape(np.size(ref)))\n config.writeInputS16(2, matrixSize,\"Config\")\n\n\n \n \n\n\n\n\ndef generatePatterns():\n PATTERNDIR = os.path.join(\"Patterns\",\"DSP\",\"Interpolation\",\"Interpolation\")\n PARAMDIR = os.path.join(\"Parameters\",\"DSP\",\"Interpolation\",\"Interpolation\")\n \n configf32=Tools.Config(PATTERNDIR,PARAMDIR,\"f32\")\n configq31=Tools.Config(PATTERNDIR,PARAMDIR,\"q31\")\n configq15=Tools.Config(PATTERNDIR,PARAMDIR,\"q15\")\n configq7=Tools.Config(PATTERNDIR,PARAMDIR,\"q7\")\n \n writeTests(configf32,0)\n writeTests(configq31,31)\n writeTests(configq15,15)\n writeTests(configq7,7)\n\n\nif __name__ == '__main__':\n generatePatterns()\n"
] |
[
[
"scipy.interpolate.interp1d",
"numpy.sin",
"numpy.arange",
"numpy.cos",
"scipy.interpolate.interp2d",
"numpy.size",
"numpy.linspace",
"numpy.meshgrid"
]
] |
ebucheli/TC1002S
|
[
"ff4647845a8e87bdb002d977501311fed96accd4"
] |
[
"k_means_solution.py"
] |
[
"import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nfrom io_utilities import load_data\nfrom visualizations import show_clusters_centroids\n\ndef distance(a,b):\n \"\"\"\n Compute Euclidean Distance Between Two Points\n Input:\n a (list): an n-dimensional list or array\n b (list): an n-dimensional list or array\n Output:\n The Euclidean Distance between vectors a and b\n \"\"\"\n return np.sqrt(np.sum((np.array(b)-np.array(a))**2))\n\ndef get_clusters(points,centroids):\n \"\"\"\n Returns a list of clusters given all the points in the dataset and\n the current centroids.\n Input:\n points (list of lists): A list with every point in the dataset\n centroids (list of lists): A list with the current centroids\n Output:\n clusters (list of lists of lists): A List of Clusters. Each cluster\n is also a list of points in the cluster.\n \"\"\"\n clusters = [[] for f in centroids]\n\n for i, point in enumerate(points):\n point_to_centroids = []\n for j, centroid in enumerate(centroids):\n point_to_centroids.append(distance(point,centroid))\n closest_idx = np.argmin(point_to_centroids)\n clusters[closest_idx].append(point)\n\n return clusters\n\ndef update_centroids(clusters):\n \"\"\"\n Given a list of clusters (as prepared by get_clusters) get the new centroids\n Input:\n clusters (list of lists of lists): A List of Clusters. Each cluster\n is also a list of points in the cluster.\n Output:\n A (list of lists): The new centroids.\n \"\"\"\n new_centroids = []\n\n for cluster in clusters:\n new_centroids.append(np.mean(cluster,axis = 0))\n return new_centroids\n\n\n\ndef k_means(points, k, iterations=10):\n \"\"\"\n K Means Unsupervised ML Algorithm Implementation with Forgy Initialization\n Input:\n points (numpy array): a 2D Array with the data to cluster.\n k (int): The number of clusters to find\n \"\"\"\n idx = np.random.randint(len(points),size=k)\n\n centroids = points[idx,:]\n clusters = get_clusters(points,centroids)\n\n for i in range(iterations):\n\n if i % 1 == 0:\n if i == 0:\n title = \"Initialization\"\n else:\n title = \"Iteration {}\".format(i+1)\n\n show_clusters_centroids(\n clusters,\n centroids,\n title,\n )\n\n clusters = get_clusters(points,centroids)\n centroids = update_centroids(clusters)\n\n return clusters,centroids\n\n\nif __name__ == \"__main__\":\n data = load_data('./data/iris.data')\n k = 3\n\n X = np.array([f[:-1] for f in data])\n y = np.array([f[-1] for f in data])\n\n clusters,centroids = k_means(X,3)\n\n show_clusters_centroids(clusters,centroids,\"Result\", keep=True)\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.show",
"numpy.array",
"numpy.mean",
"numpy.argmin"
]
] |
microprediction/nevergrad
|
[
"5e4c00d74e84dfb0283ab3d35dd85fde0bb49c29"
] |
[
"nevergrad/functions/base.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport inspect\nfrom pathlib import Path\nimport numbers\nimport numpy as np\nimport nevergrad.common.typing as tp\nfrom nevergrad.common import errors\nfrom nevergrad.common.errors import ( # pylint: disable=unused-import\n UnsupportedExperiment as UnsupportedExperiment,\n)\nfrom nevergrad.parametrization import parameter as p\nfrom nevergrad.optimization import multiobjective as mobj\n\nEF = tp.TypeVar(\"EF\", bound=\"ExperimentFunction\")\nME = tp.TypeVar(\"ME\", bound=\"MultiExperiment\")\n\n\ndef _reset_copy(obj: p.Parameter) -> p.Parameter:\n \"\"\"Copy a parameter and resets its random state to obtain variability\"\"\"\n out = obj.copy()\n out.random_state = None\n return out\n\n\n# pylint: disable=too-many-instance-attributes\nclass ExperimentFunction:\n \"\"\"Combines a function and its parametrization for running experiments (see benchmark subpackage)\n\n Parameters\n ----------\n function: callable\n the callable to convert\n parametrization: Parameter\n the parametrization of the function\n Notes\n -----\n - you can redefine custom \"evaluation_function\" and \"compute_pseudotime\" for custom behaviors in experiments\n - the bool/int/str/float init arguments are added as descriptors for the experiment which will serve in\n definining test cases. You can add more through \"add_descriptors\".\n - Makes sure you the \"copy()\" methods works (provides a new copy of the function *and* its parametrization)\n if you subclass ExperimentFunction since it is intensively used in benchmarks.\n By default, this will create a new instance using the same init arguments as your current instance\n (they were recorded through \"__new__\"'s magic) and apply the additional descriptors you may have added,\n as well as propagate the new parametrization *if it has a different name as the current one*.\n \"\"\"\n\n def __new__(cls: tp.Type[EF], *args: tp.Any, **kwargs: tp.Any) -> EF:\n \"\"\"Identifies initialization parameters during initialization and store them\"\"\"\n inst = object.__new__(cls)\n sig = inspect.signature(cls.__init__)\n callargs: tp.Dict[str, tp.Any] = {}\n try:\n boundargs = sig.bind(inst, *args, **kwargs)\n except TypeError:\n pass # either a problem which will be caught later or a unpickling\n else:\n boundargs.apply_defaults() # make sure we get the default non-provided arguments\n callargs = dict(boundargs.arguments)\n callargs.pop(\"self\")\n inst._auto_init = callargs\n inst._descriptors = {\n x: y for x, y in callargs.items() if isinstance(y, (str, tuple, int, float, bool))\n }\n inst._descriptors[\"function_class\"] = cls.__name__\n return inst # type: ignore\n\n def __init__(\n self: EF,\n function: tp.Callable[..., tp.Loss],\n parametrization: p.Parameter,\n ) -> None:\n assert callable(function)\n assert not hasattr(\n self, \"_initialization_kwargs\"\n ), '\"register_initialization\" was called before super().__init__'\n self._auto_init: tp.Dict[str, tp.Any] # filled by __new__\n self._descriptors: tp.Dict[str, tp.Any] # filled by __new__\n self._parametrization: p.Parameter\n self.parametrization = parametrization\n # force random state initialization\n self.multiobjective_upper_bounds: tp.Optional[np.ndarray] = None\n self.__function = function # __ to prevent overrides\n # if this is not a function bound to this very instance, add the function/callable name to the descriptors\n if not hasattr(function, \"__self__\") or function.__self__ != self: # type: ignore\n name = function.__name__ if hasattr(function, \"__name__\") else function.__class__.__name__\n self._descriptors.update(name=name)\n if len(self.parametrization.name) > 24:\n raise RuntimeError(\n f\"For the sake of benchmarking, please rename the current parametrization:\\n{self.parametrization!r}\\n\"\n \"to a shorter name. This way it will be more readable in the experiments.\\n\"\n 'Eg: parametrization.set_name(\"\") to just ignore it\\n'\n \"CAUTION: Make sure you set different names for different parametrization configurations if you want it \"\n \"to be used in order to differentiate between benchmarks cases.\"\n )\n\n @property\n def dimension(self) -> int:\n return self._parametrization.dimension\n\n @property\n def parametrization(self) -> p.Parameter:\n return self._parametrization\n\n @parametrization.setter\n def parametrization(self, parametrization: p.Parameter) -> None:\n self._parametrization = parametrization\n self._parametrization.freeze()\n # pylint: disable=pointless-statement\n self._parametrization.random_state # force initialization for synchronization of random state\n # # TODO investigate why this synchronization is needed\n\n @property\n def function(self) -> tp.Callable[..., tp.Loss]:\n return self.__function\n\n def __call__(self, *args: tp.Any, **kwargs: tp.Any) -> tp.Loss:\n \"\"\"Call the function directly (equivaluent to parametrized_function.function(*args, **kwargs))\"\"\"\n return self.function(*args, **kwargs)\n\n @property\n def descriptors(self) -> tp.Dict[str, tp.Any]:\n \"\"\"Description of the function parameterization, as a dict. This base class implementation provides function_class,\n noise_level, transform and dimension\n \"\"\"\n desc = dict(self._descriptors) # Avoid external modification\n desc.update(parametrization=self.parametrization.name, dimension=self.dimension)\n return desc\n\n def add_descriptors(self, **kwargs: tp.Optional[tp.Hashable]) -> None:\n self._descriptors.update(kwargs)\n\n def __repr__(self) -> str:\n \"\"\"Shows the function name and its summary\"\"\"\n params = [f\"{x}={repr(y)}\" for x, y in sorted(self._descriptors.items())]\n return \"Instance of {}({})\".format(self.__class__.__name__, \", \".join(params))\n\n def equivalent_to(self, other: tp.Any) -> bool:\n \"\"\"Check that two instances where initialized with same settings.\n This is not meant to be used to check if functions are exactly equal\n (initialization may hold some randomness)\n This is only useful for unit testing.\n (may need to be overloaded to make faster if tests are getting slow)\n \"\"\"\n if other.__class__ != self.__class__:\n return False\n return (\n bool(self._descriptors == other._descriptors)\n and self.parametrization.name == other.parametrization.name\n )\n\n def _internal_copy(self: EF) -> EF:\n \"\"\"This is \"black magic\" which creates a new instance using the same init parameters\n that you provided and which were recorded through the __new__ method of ExperimentFunction\n \"\"\"\n # auto_init is automatically filled by __new__, aka when creating the instance\n output: EF = self.__class__(\n **{x: _reset_copy(y) if isinstance(y, p.Parameter) else y for x, y in self._auto_init.items()}\n )\n return output\n\n def copy(self: EF) -> EF:\n \"\"\"Provides a new equivalent instance of the class, possibly with\n different random initialization, to provide different equivalent test cases\n when using different seeds.\n This also checks that parametrization and descriptors are correct.\n You should preferably override _internal_copy\n \"\"\"\n # add descriptors present in self but not added by initialization\n # (they must have been added manually)\n output = self._internal_copy()\n keys = set(output.descriptors)\n output.add_descriptors(**{x: y for x, y in self.descriptors.items() if x not in keys})\n # parametrization may have been overriden, so let's always update it\n # Caution: only if names differ!\n if output.parametrization.name != self.parametrization.name:\n output.parametrization = _reset_copy(self.parametrization)\n # then if there are still differences, something went wrong\n if not output.equivalent_to(self):\n raise errors.ExperimentFunctionCopyError(\n f\"Copy of\\n{self}\\nwith descriptors:\\n{self._descriptors}\\nreturned non-equivalent\\n\"\n f\"{output}\\nwith descriptors\\n{output._descriptors}.\\n\\n\"\n \"This means that the auto-copy behavior of ExperimentFunction does not work.\\n\"\n \"You may want to implement your own copy method, or check implementation of \"\n \"ExperimentFunction.__new__ and copy to better understand what happens\"\n )\n # propagate other useful information # TODO a bit hacky\n output.parametrization._constraint_checkers = self.parametrization._constraint_checkers\n output.multiobjective_upper_bounds = (\n self.multiobjective_upper_bounds\n ) # TODO not sure why this is needed\n return output\n\n def compute_pseudotime( # pylint: disable=unused-argument\n self, input_parameter: tp.Any, loss: tp.Loss\n ) -> float:\n \"\"\"Computes a pseudotime used during benchmarks for mocking parallelization in a reproducible way.\n By default, each call takes 1 unit of pseudotime, but this can be modified by overriding this\n function and the pseudo time can be a function of the function inputs and output.\n\n Note: This replaces get_postponing_delay which has been aggressively deprecated\n\n Parameters\n ----------\n input_parameter: Any\n the input that was provided to the actual function\n value: float\n the output of the actual function\n\n Returns\n -------\n float\n the pseudo computation time of the call to the actual function\n \"\"\"\n return 1.0\n\n def evaluation_function(self, *recommendations: p.Parameter) -> float:\n \"\"\"Provides the evaluation crieterion for the experiment.\n In case of mono-objective, it defers to evaluation_function\n Otherwise, it uses the hypervolume.\n This function can be overriden to provide custom behaviors.\n\n Parameters\n ----------\n *pareto: Parameter\n pareto front provided by the optimizer\n \"\"\"\n\n if self.multiobjective_upper_bounds is None: # monoobjective case\n assert len(recommendations) == 1\n output = self.function(*recommendations[0].args, **recommendations[0].kwargs)\n assert isinstance(\n output, numbers.Number\n ), f\"evaluation_function can only be called on monoobjective experiments (output={output}) function={self.function}.\"\n return output\n # multiobjective case\n hypervolume = mobj.HypervolumePareto(\n upper_bounds=self.multiobjective_upper_bounds, seed=self.parametrization.random_state\n )\n for candidate in recommendations:\n hypervolume.add(candidate)\n return -hypervolume.best_volume\n\n\ndef update_leaderboard(identifier: str, loss: float, array: np.ndarray, verbose: bool = True) -> None:\n \"\"\"Handy function for storing best results for challenging functions (eg.: Photonics)\n The best results are kept in a file that is automatically updated with new data.\n This may require installing nevergrad in dev mode.\n\n Parameters\n ----------\n identifier: str\n the identifier of the problem\n loss: float\n the new loss, if better than the one in the file, the file will be updated\n array: np.ndarray\n the array corresponding to the loss\n verbose: bool\n whether to also print a message if the leaderboard was updated\n \"\"\"\n # pylint: disable=import-outside-toplevel\n import pandas as pd # lazzy to avoid requiring pandas for using an ExperimentFunction\n\n loss = np.round(loss, decimals=12) # this is probably already too precise for the machine\n filepath = Path(__file__).with_name(\"leaderboard.csv\")\n bests = pd.DataFrame(columns=[\"loss\", \"array\"])\n if filepath.exists():\n bests = pd.read_csv(filepath, index_col=0)\n if identifier not in bests.index:\n bests.loc[identifier, :] = (float(\"inf\"), \"\")\n try:\n if not bests.loc[identifier, \"loss\"] < loss: # works for nan\n bests.loc[identifier, \"loss\"] = loss\n string = \"[\" + \",\".join(str(x) for x in array.ravel()) + \"]\"\n bests.loc[identifier, \"array\"] = string\n bests = bests.loc[sorted(x for x in bests.index), :]\n bests.to_csv(filepath)\n if verbose:\n print(f\"New best value for {identifier}: {loss}\\nwith: {string[:80]}\")\n except Exception: # pylint: disable=broad-except\n pass # better avoir bugs for this\n\n\nclass MultiExperiment(ExperimentFunction):\n \"\"\"Pack several mono-objective experiments into a multiobjective experiment\n\n\n Parameters\n ----------\n experiments: iterable of ExperimentFunction\n\n Notes\n -----\n - packing of multiobjective experiments is not supported.\n - parametrization must match between all functions (only their name is checked as initialization)\n - there is no descriptor for the packed functions, except the name (concatenetion of packed function names).\n \"\"\"\n\n def __init__(\n self,\n experiments: tp.Iterable[ExperimentFunction],\n upper_bounds: tp.ArrayLike,\n ) -> None:\n xps = list(experiments)\n assert xps\n assert len(xps) == len({id(xp) for xp in xps}), \"All experiments must be different instances\"\n assert all(\n xp.multiobjective_upper_bounds is None for xp in xps\n ), \"Packing multiobjective xps is not supported.\"\n assert all(\n xps[0].parametrization.name == xp.parametrization.name for xp in xps[1:]\n ), \"Parametrization do not match\"\n super().__init__(self._multi_func, xps[0].parametrization)\n self.multiobjective_upper_bounds = np.array(upper_bounds)\n self._descriptors.update(name=\",\".join(xp._descriptors.get(\"name\", \"#unknown#\") for xp in xps))\n self._experiments = xps\n\n def _multi_func(self, *args: tp.Any, **kwargs: tp.Any) -> np.ndarray:\n outputs = [f(*args, **kwargs) for f in self._experiments]\n return np.array(outputs)\n\n def _internal_copy(self) -> \"MultiExperiment\":\n assert self.multiobjective_upper_bounds is not None\n return MultiExperiment([f.copy() for f in self._experiments], self.multiobjective_upper_bounds)\n"
] |
[
[
"numpy.round",
"pandas.DataFrame",
"pandas.read_csv",
"numpy.array"
]
] |
roma-goodok/scannet_experiments
|
[
"8039fade13f9bd5664633f6a567f6785a5f9e3d2"
] |
[
"scannet_kekas/callbacks/csv_logger.py"
] |
[
"\"A `Callback` that saves tracked metrics into a persistent file.\"\n# Contribution from devforfu: https://nbviewer.jupyter.org/gist/devforfu/ea0b3fcfe194dad323c3762492b05cae\n\nimport pandas as pd\nfrom fastai.basic_train import Learner, LearnerCallback\n\nfrom fastai_sparse.core import Any\n\n__all__ = ['CSVLoggerIouByClass']\n\n\nclass CSVLoggerIouByClass(LearnerCallback):\n \"A `LearnerCallback` that saves history of IoU by classes into CSV `filename`.\"\n def __init__(self, learn: Learner, cb_iou_mean, class_names=None, filename: str = 'iou_by_class'):\n super().__init__(learn)\n self.filename = filename,\n self.path = self.learn.path / f'{filename}.csv'\n self.cb_iou_mean = cb_iou_mean\n self.class_names = class_names\n\n if self.class_names is None:\n self.class_names = [str(i) for i in range(cb_iou_mean.n_categories)]\n\n def read_logged_file(self):\n \"Read the content of saved file\"\n return pd.read_csv(self.path)\n\n def on_train_begin(self, **kwargs: Any) -> None:\n \"Prepare file with metric names.\"\n self.path.parent.mkdir(parents=True, exist_ok=True)\n self.file = self.path.open('w')\n columns = ['epoch', 'datatype', 'mean_iou'] + self.class_names\n self.file.write(','.join(columns) + '\\n')\n self.file.flush()\n\n def on_epoch_end(self, epoch: int, **kwargs: Any) -> bool:\n \"Add a line with `epoch` number, `smooth_loss` and `last_metrics`.\"\n cb = self.cb_iou_mean\n for datatype in ['train', 'valid']:\n d = cb._d[datatype]\n stats = [str(epoch), datatype, f\"{d['iou']:.6f}\"]\n\n iou_per_class = d['iou_per_class']\n stats += [f'{value:.6f}' for value in iou_per_class]\n\n str_stats = ','.join(stats)\n self.file.write(str_stats + '\\n')\n self.file.flush()\n\n def on_train_end(self, **kwargs: Any) -> None:\n \"Close the file.\"\n self.file.close()\n"
] |
[
[
"pandas.read_csv"
]
] |
ilyakz/alpha-game2
|
[
"fe492a59cd99b99b74979f81d407156dc7d0a08d"
] |
[
"cube_tic_tac_toe_3d/CubeTicTacToeGame.py"
] |
[
"from __future__ import print_function\nfrom cube_tic_tac_toe_3d.CubeTicTacToeLogic import Board\nfrom framework.Game import Game\nimport numpy as np\nimport pygame\nimport math\n\n\nclass CubeTicTacToeGame(Game):\n \"\"\"\n Game ΠΊΠ»Π°ΡΡ Π΄Π»Ρ ΠΊΡΠ±ΠΈΡΠ΅ΡΠΊΠΈΡ
ΠΊΡΠ΅ΡΡΠΈΠΊΠΎΠ² - Π½ΠΎΠ»ΠΈΠΊΠΎΠ². ΠΠΎΡΠΎΠΌΠΎΠΊ ΠΊΠ»Π°ΡΡΠ° Game.\n \"\"\"\n def __init__(self, n, players=(), ui=False):\n self.score = [0, 0]\n self.n = n\n self.tile_size = 40\n self.circle = self.load_picture(\"./cube_tic_tac_toe_3d/images/circle.bmp\")\n self.cross = self.load_picture(\"./cube_tic_tac_toe_3d/images/cross.bmp\")\n self.gray = self.load_picture(\"./cube_tic_tac_toe_3d/images/empty.png\")\n self.players = players\n if ui:\n pygame.init()\n self.ui = ui\n self.surf = pygame.display.set_mode((self.ui['width'], self.ui['height']))\n self.surf.fill((255, 255, 255))\n\n def getInitBoard(self):\n \"\"\"\n ΠΠΎΠ·Π²ΡΠ°ΡΠ°Π΅Ρ ΡΡΠ°ΡΡΠΎΠ²ΡΡ Π΄ΠΎΡΠΊΡ, ΠΏΡΠ΅ΠΎΠ±ΡΠ°Π·ΠΎΠ²Π°Π½Π½ΡΡ Π² ΠΌΠ°ΡΡΠΈΠ² numpy.\n \"\"\"\n b = Board(self.n)\n #print(b.pieces)\n return np.array(b.pieces)\n\n def getBoardSize(self):\n \"\"\"\n ΠΠΎΠ·Π²ΡΠ°ΡΠ°Π΅Ρ ΠΊΠΎΡΡΠ΅ΠΆ (z , y, x) Ρ ΡΠ°Π·ΠΌΠ΅ΡΠ½ΠΎΡΡΡΠΌΠΈ Π΄ΠΎΡΠΊΠΈ.\n \"\"\"\n return (self.n, self.n, self.n)\n \n\n def getActionSize(self):\n \"\"\"\n ΠΠΎΠ·Π²ΡΠ°ΡΠ°Π΅Ρ ΠΊΠΎΠ»ΠΈΡΠ΅ΡΡΠ²ΠΎ Π²ΠΎΠ·ΠΌΠΎΠΆΠ½ΡΡ
Π΄Π΅ΠΉΡΡΠ²ΠΈΠΉ.\n \"\"\"\n return self.n * self.n * self.n + 1\n\n def getNextState(self, board, player, action):\n \"\"\"\n ΠΠΎΠ·Π²ΡΠ°ΡΠ°Π΅Ρ ΡΠ»Π΅Π΄ΡΡΡΠ΅Π΅ ΡΠΎΡΡΠΎΡΠ½ΠΈΠ΅ Π΄ΠΎΡΠΊΠΈ.\n\n :param board: Π’Π΅ΠΊΡΡΠ°Ρ Π΄ΠΎΡΠΊΠ°.\n :param player: Π’Π΅ΠΊΡΡΠΈΠΉ ΠΈΠ³ΡΠΎΠΊ.\n :param action:\n ΠΠ΅ΠΉΡΡΠ²ΠΈΠ΅, ΠΊΠΎΡΠΎΡΠΎΠ΅ Π½ΡΠΆΠ½ΠΎ ΠΏΡΠΈΠΌΠ΅Π½ΠΈΡΡ.\n ΠΠ΅ΠΉΡΡΠ²ΠΈΠ΅ Π΄ΠΎΠ»ΠΆΠ½ΠΎ Π±ΡΡΡ Π΄ΠΎΠΏΡΡΡΠΈΠΌΡΠΌ Ρ
ΠΎΠ΄ΠΎΠΌ.\n :return: ΡΠ»Π΅Π΄ΡΡΡΠ΅Π΅ ΡΠΎΡΡΠΎΡΠ½ΠΈΠ΅ Π΄ΠΎΡΠΊΠΈ, ΠΏΠΎΡΠ»Π΅ ΡΠΎΠ²Π΅ΡΡΠ΅Π½ΠΈΠ΅ Π΄Π΅ΠΉΡΡΠ²ΠΈΡ.\n \"\"\"\n if action == self.n * self.n * self.n:\n return (board, -player)\n b = Board(self.n)\n b.pieces = np.copy(board)\n move = (int((action / self.n) / self.n), int((action / self.n) % self.n), int((action % self.n) % self.n))\n #print(\"MOVE:\", move)\n b.execute_move(move, player)\n return (b.pieces, -player)\n\n def getValidMoves(self, board, player):\n \"\"\"\n ΠΠΎΠ·Π²ΡΠ°ΡΠ°Π΅Ρ numpy ΠΌΠ°ΡΡΠΈΠ² Ρ Π΄ΠΎΠΏΡΡΡΠΈΠΌΡΠΌΠΈ Ρ
ΠΎΠ΄Π°ΠΌΠΈ Π΄Π»Ρ ΡΠΎΡΡΠΎΡΠ½ΠΈΡ Π΄ΠΎΡΠΊΠΈ\n board ΠΈ ΡΠ΅ΠΊΡΡΠ΅Π³ΠΎ ΠΈΠ³ΡΠΎΠΊΠ° player.\n\n :param board: Π’Π΅ΠΊΡΡΠ΅Π΅ ΡΠΎΡΡΠΎΡΠ½ΠΈΠ΅ Π΄ΠΎΡΠΊΠΈ.\n :param player: Π’Π΅ΠΊΡΡΠΈΠΉ ΠΈΠ³ΡΠΎΠΊ.\n \"\"\"\n valids = [0] * self.getActionSize()\n b = Board(self.n)\n b.pieces = np.copy(board)\n legal_moves = b.get_legal_moves(player)\n if len(legal_moves) == 0:\n valids[-1] = 1\n return np.array(valids)\n for z, y, x in legal_moves:\n valids[self.n * self.n * z + self.n * y + x] = 1\n return np.array(valids)\n \n def getEstimatePos(self, board, player):\n \"\"\"\n\t\t\t1.ΡΡΠΌΠΌΠ° Π² ΡΡΡΠΎΠΊΠ΅ \n\t\t\t2.ΡΡΠΌΠΌΠ° Π² ΡΡΠΎΠ»Π±ΡΠ΅ \n\t\t\t3.cΡΠΌΠΌΠ° Π² Π³Π»ΡΠ±ΠΈΠ½Ρ \n\t\t\t4.Π΄ΠΈΠ°Π³ΠΎΠ½Π°Π»Ρ ΠΏΠΎΠ±ΠΎΡΠ½Π°Ρ\n\t\t\t5.Π³Π»Π°Π²Π½Π°Ρ Π΄ΠΈΠ°Π³ΠΎΠ½Π°Π»Ρ\n\t\t\t6.Π΄ΠΈΠ°Π³ΠΎΠ½Π°Π»Ρ Π²Π½ΠΈΠ· Ρ Π²Π΅ΡΡ
Π½Π΅ΠΉ ΡΡΡΠΎΠΊΠΈ\n\t\t\t7.Π΄ΠΈΠ°Π³ΠΎΠ½Π°Π»Ρ Π²Π½ΠΈΠ· Ρ Π½ΠΈΠΆΠ½Π΅ΠΉ ΡΡΡΠΎΠΊΠΈ\n\t\t\t8.Π΄ΠΈΠ°Π³ΠΎΠ½Π°Π»Ρ Π²Π³Π»ΡΠ±Ρ Ρ Π²Π΅ΡΡ
Π½Π΅Π³ΠΎ Π»Π΅Π²ΠΎΠ³ΠΎ Π΄ΠΎ Π½ΠΈΠΆΠ½Π΅Π³ΠΎ ΠΏΡΠ°Π²ΠΎΠ³ΠΎ Π² ΠΎΠ΄Π½ΠΎΠΉ ΡΡΡΠΎΠΊΠ΅ Π² Π³Π»ΡΠ±ΠΈΠ½Ρ\n\t\t\t9.Π΄ΠΈΠ°Π³ΠΎΠ½Π°Π»Ρ Π²Π½ΠΈΠ· Ρ Π²Π΅ΡΡ
Π½Π΅Π³ΠΎ ΠΏΡΠ°Π²ΠΎΠ³ΠΎ Π΄ΠΎ Π½ΠΈΠΆΠ½Π΅Π³ΠΎ Π»Π΅Π²ΠΎΠ³ΠΎ Π² ΠΎΠ΄Π½ΠΎΠΉ ΡΡΡΠΎΠΊΠ΅ Π² Π³Π»ΡΠ±ΠΈΠ½Ρ\n\t\t\t10.Π΄ΠΈΠ°Π³ΠΎΠ»ΡΠ½Π°Π»Ρ ΠΎΡ Π²Π΅ΡΡ
Π½Π΅Π³ΠΎ Π»Π΅Π²ΠΎΠ³ΠΎ Π΄ΠΎ ΠΏΡΠ°Π²ΠΎΠ³ΠΎ Π½ΠΈΠΆΠ½Π΅Π³ΠΎ Π² Π³Π»ΡΠ±ΠΈΠ½Ρ ΠΈ Π² ΡΠΈΡΠΈΠ½Ρ\n\t\t\t11.Π΄ΠΈΠ°Π³ΠΎΠ½Π°Π»Ρ ΠΎΡ Π²Π΅ΡΡ
Π½Π΅Π³ΠΎ ΠΏΡΠ°Π²ΠΎΠ³ΠΎ Π΄ΠΎ Π»Π΅Π²ΠΎΠ³ΠΎ Π½ΠΈΠΆΠ½Π΅Π³ΠΎ Π² Π³Π»ΡΠ±ΠΈΠ½Ρ ΠΈ Π² ΡΠΈΡΠΈΠ½Ρ\n\t\t\t12.Π΄ΠΈΠ°Π³ΠΎΠ½Π°Π»Ρ ΠΎΡ (Π² Π²Π΅ΡΡ
Π½Π΅ΠΌ ΡΠ»ΠΎΠ΅) Π½ΠΈΠΆΠ½Π΅Π³ΠΎ Π»Π΅Π²ΠΎΠ³ΠΎ Π΄ΠΎ (Π½ΠΈΠΆΠ½Π΅Π³ΠΎ ΡΠ»ΠΎΡ) ΠΏΡΠ°Π²ΠΎΠ³ΠΎ Π²Π΅ΡΡ
Π½Π΅Π³ΠΎ\n 13.Π΄ΠΈΠ°Π³ΠΎΠ½Π°Π»Ρ ΠΎΡ (Π² Π²Π΅ΡΡ
Π½Π΅ΠΌ ΡΠ»ΠΎΠ΅) Π½ΠΈΠΆΠ½Π΅Π³ΠΎ ΠΏΡΠ°Π²ΠΎΠ³ΠΎ Π΄ΠΎ (Π½ΠΈΠΆΠ½Π΅Π³ΠΎ ΡΠ»ΠΎΡ) Π»Π΅Π²ΠΎΠ³ΠΎ Π²Π΅ΡΡ
Π½Π΅Π³ΠΎ\n \"\"\"\t\n b = Board(self.n)\n b.pieces = np.copy(board)\n n = self.n\n\n \n valid_moves_pl1 = [0]*self.getActionSize()\n valid_moves_pl2 = [0]*self.getActionSize()\n #print(player)\n for z in range(n):\n for y in range(n):\n for x in range(n):\n if b.pieces[z][y][x] == 0:\n valid_moves_pl1[z * n * n + y * n + x ] = +(sum([b.pieces[z][j][x] == player for j in range(n)]) + \\\n sum([b.pieces[z][y][i] == player for i in range(n)]) + \\\n sum([b.pieces[k][y][x] == player for k in range(n)]) + \\\n ((n - 1 - y) == x) * sum([b.pieces[z][(n - 1 - i)][i] == player for i in range(n)]) +\\\n (y == x) * sum([b.pieces[z][i][i] == player for i in range(n)]) + \\\n (z == y) * sum([b.pieces[k][k][x] == player for k in range(n)]) + \\\n (z == (n - 1 - y)) * sum([b.pieces[k][(n - 1 - k)][x] == player for k in range(n)]) + \\\n (z == x) * sum([b.pieces[k][y][k] == player for k in range(n)]) + \\\n (z == (n - 1 - x)) * sum([b.pieces[k][y][n - 1 - k] == player for k in range(n)]) + \\\n (z == y == x) * sum([b.pieces[k][k][k] == player for k in range(n)]) + \\\n (z == y == (n - 1 - x)) * sum([b.pieces[k][k][n - 1 - k] == player for k in range(n)]) + \\\n (z == (n - 1 - y) == x) * sum([b.pieces[k][(n - 1 - k)][k] == player for k in range(n)]) + \\\n (z == (n - 1 - y) == (n - 1 - x)) * sum([b.pieces[k][(n - 1 - k)][n - 1 - k] == player for k in range(n)]))\n valid_moves_pl2[z * n * n + y * n + x ] = +(sum([b.pieces[z][j][x] == -player for j in range(n)]) + \\\n sum([b.pieces[z][y][i] == -player for i in range(n)]) + \\\n sum([b.pieces[k][y][x] == -player for k in range(n)]) + \\\n ((n - 1 - y) == x) * sum([b.pieces[z][(n - 1 - i)][i] == -player for i in range(n)]) +\\\n (y == x) * sum([b.pieces[z][i][i] == -player for i in range(n)]) + \\\n (z == y) * sum([b.pieces[k][k][x] == -player for k in range(n)]) + \\\n (z == (n - 1 - y)) * sum([b.pieces[k][(n - 1 - k)][x] == -player for k in range(n)]) + \\\n (z == x) * sum([b.pieces[k][y][k] == -player for k in range(n)]) + \\\n (z == (n - 1 - x)) * sum([b.pieces[k][y][n - 1 - k] == -player for k in range(n)]) + \\\n (z == y == x) * sum([b.pieces[k][k][k] == -player for k in range(n)]) + \\\n (z == y == (n - 1 - x)) * sum([b.pieces[k][k][n - 1 - k] == -player for k in range(n)]) + \\\n (z == (n - 1 - y) == x) * sum([b.pieces[k][(n - 1 - k)][k] == -player for k in range(n)]) + \\\n (z == (n - 1 - y) == (n - 1 - x)) * sum([b.pieces[k][(n - 1 - k)][n - 1 - k] == -player for k in range(n)]))\n else:\n continue\n \n #print(valid_moves_pl2)\n #v = [1] * len(board)\n #return [(board,v)]\n return (valid_moves_pl1, valid_moves_pl2)\n \"\"\"\n def getGameEnded(self, board, player):\n # return 0 if not ended, 1 if player 1 won, -1 if player 1 lost\n # player = 1\n b = Board(self.n)\n b.pieces = np.copy(board)\n #print(b.pieces)\n if b.is_win(player):\n return 1\n if b.is_win(-player):\n return -1\n if b.has_legal_moves():\n return 0\n # draw has a very little value \n return 1e-4\n \n \"\"\"\n def getGameEnded(self, board, player):\n\n#ΡΡΡ Ρ Π½Π°Ρ ΡΠ°ΠΌΠΈ ΠΏΡΠ°Π²ΠΈΠ»Π°\n #ΡΡΠ΅Π±ΡΠ΅Ρ Π΄ΠΎΡΠ°Π±ΠΎΡΠΊΠΈ\n \n b = Board(self.n)\n b.pieces = np.copy(board)\n status = 1e-4\n n = self.n\n if any([any([sum([b.pieces[k][j][i] for i in range(n)]) == self.n for j in range(n)]) == 1 for k in range(n)]) or \\\n any([any([sum([b.pieces[k][j][i] for j in range(n)]) == self.n for i in range(n)]) == 1 for k in range(n)]) or \\\n any([any([sum([b.pieces[k][j][i]for k in range(n)]) == self.n for i in range(n)]) == 1 for j in range(n)]) or \\\n any([any([sum([b.pieces[k][i][i] for i in range(n)]) == self.n]) == 1 for k in range(n)]) or \\\n any([any([sum([b.pieces[k][(n - 1 - i)][i] for i in range(n)]) == self.n]) == 1 for k in range(n)]) or \\\n any([any([sum([b.pieces[k][k][i] for k in range(n)]) == self.n]) == 1 for i in range(n)]) or \\\n any([any([sum([b.pieces[k][(n - 1 - k)][i] for k in range(n)]) == self.n]) == 1 for i in range(n)]) or \\\n any([any([sum([b.pieces[k][i][k] for k in range(n)]) == self.n]) == 1 for i in range(n)]) or \\\n any([any([sum([b.pieces[k][i][(n - 1 - k)] for k in range(n)]) == self.n]) == 1 for i in range(n)]) or \\\n sum([b.pieces[k][k][k] for k in range(n)]) == self.n or \\\n sum([b.pieces[k][k][(n - 1 - k)] for k in range(n)]) == self.n or \\\n sum([b.pieces[k][(n - 1 - k)][k] for k in range(n)]) == self.n or \\\n sum([b.pieces[k][(n - 1 - k)][(n - 1 - k)] for k in range(n)]) == self.n :\n status = 1\n elif any([any([sum([b.pieces[k][j][i] for i in range(n)]) == -self.n for j in range(n)]) == 1 for k in range(n)]) or \\\n any([any([sum([b.pieces[k][j][i] for j in range(n)]) == -self.n for i in range(n)]) == 1 for k in range(n)]) or \\\n any([any([sum([b.pieces[k][j][i] for k in range(n)]) == -self.n for i in range(n)]) == 1 for j in range(n)]) or \\\n any([any([sum([b.pieces[k][i][i] for i in range(n)]) == -self.n]) == 1 for k in range(n)]) or \\\n any([any([sum([b.pieces[k][(n - 1 - i)][i] for i in range(n)]) == -self.n]) == 1 for k in range(n)]) or \\\n any([any([sum([b.pieces[k][k][i] for k in range(n)]) == -self.n]) == 1 for i in range(n)]) or \\\n any([any([sum([b.pieces[k][(n - 1 - k)][i] for k in range(n)]) == -self.n]) == 1 for i in range(n)]) or \\\n any([any([sum([b.pieces[k][i][k] for k in range(n)]) == -self.n]) == 1 for i in range(n)]) or \\\n any([any([sum([b.pieces[k][i][(n - 1 - k)] for k in range(n)]) == -self.n]) == 1 for i in range(n)]) or \\\n sum([b.pieces[k][k][k] for k in range(n)]) == -self.n or \\\n sum([b.pieces[k][k][(n - 1 - k)] for k in range(n)]) == -self.n or \\\n sum([b.pieces[k][(n - 1 - k)][k] for k in range(n)]) == -self.n or \\\n sum([b.pieces[k][(n - 1 - k)][(n - 1 - k)] for k in range(n)]) == -self.n :\n status = -1\n if status != 1e-4:\n return status\n if any([b.pieces[k][i][j] == 0 for k in range(self.n) for i in range(self.n) for j in range(self.n)]):\n status = 0\n return status\n \n \n \n def getCanonicalForm(self, board, player):\n #print(board)\n return player * board\n\n def getSymmetries(self, board, pi):\n assert(len(pi) == self.getActionSize()) \n pi = np.copy(pi)\n #l = [(board, pi)]\n #return l\n #return [(board, pi), (board[:,::-1,], pi[::-1])]\n \n pi_board = np.reshape(pi[:-1], (self.n,self.n, self.n))\n # print(pi)\n li = []\n rot1 = {(1,0),(2,0)}\n\n for i in range(1, 5):\n for m in range(1, 5):\n for j in [True, False]:\n newB = np.rot90(np.rot90(board, m, (1,-1)), i, (0, 2))\n newPi = np.rot90(np.rot90(pi_board, m, (1,-1)), i, (0,2))\n if j:\n newB = np.fliplr(newB)\n newPi = np.fliplr(newPi)\n li += [(newB, list(newPi.ravel()) + [pi[-1]])]\n for i in {1,3}:\n for m in range(1, 5):\n for j in [True, False]:\n newB = np.rot90(np.rot90(board, m, (1,-1)), i, (0, 1))\n newPi = np.rot90(np.rot90(pi_board, m, (1,-1)), i, (0, 1))\n if j:\n newB = np.fliplr(newB)\n newPi = np.fliplr(newPi)\n li += [(newB, list(newPi.ravel()) + [pi[-1]])]\n \n \n #print(li)\n \n return li\n\n\n def stringRepresentation(self, board):\n #print(board)\n return board.tostring()\n\n def getScore(self, board, player):\n b = Board(self.n)\n b.pieces = np.copy(board)\n return b.count_diff(player)\n\n def getCount(self, board, player):\n b = Board(self.n)\n b.pieces = np.copy(board)\n return b.get_count(player)\n\n def load_picture(self, filepath):\n \"\"\"\n :param filepath: path to file\n :return: picture: picture with size TILESIZE\n \"\"\"\n picture = pygame.image.load(filepath)\n picture = pygame.transform.scale(picture, (self.tile_size, self.tile_size))\n return picture\n\n def displays(self, board):\n #n = board.shape[0]\n n = self.n\n self.surf.fill((255, 255, 255))\n TILESIZE = self.tile_size\n for x in range(n):\n for y in range(n):\n for z in range(n):\t\t\t\t\n piece = board[z][y][x]\n \n if piece == 1:\n picture = self.cross\n elif piece == -1:\n picture = self.circle\n else:\n picture = self.gray\n rect = pygame.Rect(x * TILESIZE, z * TILESIZE * n + y * TILESIZE, TILESIZE, TILESIZE)\n \n self.surf.blit(picture, rect)\n pygame.draw.rect(self.surf, (240, 240, 240), rect, 1)\n \n if z != 0:\n pygame.draw.line(self.surf, (255, 160, 0), (0, TILESIZE * n * z ), (TILESIZE * n,TILESIZE * n * z), 5)\n\t\t\t\t\t\t\n draw_msg(self.surf, \"ΠΡΠ΅ΡΡΠΈΠΊΠΈ: \" + str(self.getCount(board, 1)),\n (6 * self.tile_size, 0 * self.tile_size))\n draw_msg(self.surf, \"ΠΠΎΠ»ΠΈΠΊΠΈ: \" + str(self.getCount(board, -1)),\n (6 * self.tile_size, int(1 * self.tile_size)))\n draw_msg(self.surf, \"Π‘ΡΠ΅Ρ \" + self.score[0].__str__() + \" : \" + self.score[1].__str__(),\n (6 * self.tile_size, int(2 * self.tile_size)))\n draw_msg(self.surf, \"ΠΠ³ΡΠΎΠΊ 1: \" + \"ΠΠΎΠ»ΠΈΠΊΠΈ\" if self.players[0] == -1 else \"ΠΠ³ΡΠΎΠΊ 1: \" + \"ΠΡΠ΅ΡΡΠΈΠΊΠΈ\",\n (6 * self.tile_size, int(4 * self.tile_size)))\n draw_msg(self.surf, \"ΠΠ³ΡΠΎΠΊ 2: \" + \"ΠΠΎΠ»ΠΈΠΊΠΈ\" if self.players[1] == -1 else \"ΠΠ³ΡΠΎΠΊ 2: \" + \"ΠΡΠ΅ΡΡΠΈΠΊΠΈ\",\n (6 * self.tile_size, int(5 * self.tile_size)))\n pygame.display.update()\n\n\ndef display(board):\n print(board)\n\n\ndef draw_msg(surf, text, pos, fontsize=40, color_param=(0, 0, 0), bold=False, erase=False):\n myfont = pygame.font.SysFont(\"arial\", fontsize, bold)\n if erase:\n surf.fill(pygame.Color(\"white\"), (pos[0], pos[1], surf.get_width(), pos[1] + len(text) * fontsize))\n label = myfont.render(text, 1, color_param)\n surf.blit(label, pos)\n"
] |
[
[
"numpy.rot90",
"numpy.array",
"numpy.reshape",
"numpy.copy",
"numpy.fliplr"
]
] |
sophiaalexander/GANs-for-High-Schoolers
|
[
"b33c36a399f49f5b48d76dd8141859990aaba285"
] |
[
"netdissect/renormalize.py"
] |
[
"import numpy, torch, PIL\nfrom torchvision import transforms\n\ndef as_tensor(data, source=None, mode='zc'):\n renorm = renormalizer(source=source, mode=mode)\n return renorm(data)\n\ndef as_image(data, source='zc', mode='byte'):\n assert len(data.shape) == 3\n renorm = renormalizer(source=source, mode=mode)\n return PIL.Image.fromarray(renorm(data).\n permute(1,2,0).cpu().numpy())\n\ndef renormalizer(source=None, mode='zc'):\n '''\n Returns a function that imposes a standard normalization on\n the image data. The returned renormalizer operates on either\n 3d tensor (single image) or 4d tensor (image batch) data.\n The normalization mode choices are:\n\n zc (default) - zero centered [-1..1]\n pt - pytorch [0..1]\n imagenet - zero mean, unit stdev imagenet stats (approx [-2.1...2.6])\n byte - as from an image file, [0..255]\n\n If a source is provided (a dataset or transform), then, the renormalizer\n first reverses any normalization found in the data source before\n imposing the specified normalization. When no source is provided,\n the input data is assumed to be pytorch-normalized (range [0..1]).\n '''\n if isinstance(source, str):\n oldoffset, oldscale = OFFSET_SCALE[source]\n else:\n normalizer = find_normalizer(source)\n oldoffset, oldscale = (\n (normalizer.mean, normalizer.std) if normalizer is not None\n else OFFSET_SCALE['pt'])\n newoffset, newscale = (mode if isinstance(mode, tuple)\n else OFFSET_SCALE[mode])\n return Renormalizer(oldoffset, oldscale, newoffset, newscale,\n tobyte=(mode == 'byte'))\n\n# The three commonly-seed image normalization schemes.\nOFFSET_SCALE=dict(\n pt=([0.0, 0.0, 0.0], [1.0, 1.0, 1.0]),\n zc=([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),\n imagenet=([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n byte=([0.0, 0.0, 0.0], [1.0/255, 1.0/255, 1.0/255]))\n\nNORMALIZER={k: transforms.Normalize(*OFFSET_SCALE[k]) for k in OFFSET_SCALE}\n\ndef find_normalizer(source=None):\n '''\n Crawl around the transforms attached to a dataset looking for a\n Normalize transform to return.\n '''\n if source is None:\n return None\n if isinstance(source, (transforms.Normalize, Renormalizer)):\n return source\n t = getattr(source, 'transform', None)\n if t is not None:\n return find_normalizer(t)\n ts = getattr(source, 'transforms', None)\n if ts is not None:\n for t in reversed(ts):\n result = find_normalizer(t)\n if result is not None:\n return result\n return None\n\nclass Renormalizer:\n def __init__(self, oldoffset, oldscale, newoffset, newscale, tobyte=False):\n self.mul = torch.from_numpy(\n numpy.array(oldscale) / numpy.array(newscale))\n self.add = torch.from_numpy(\n (numpy.array(oldoffset) - numpy.array(newoffset))\n / numpy.array(newscale))\n self.tobyte = tobyte\n # Store these away to allow the data to be renormalized again\n self.mean = newoffset\n self.std = newscale\n\n def __call__(self, data):\n mul, add = [d.to(data.device, data.dtype) for d in [self.mul, self.add]]\n if data.ndimension() == 3:\n mul, add = [d[:, None, None] for d in [mul, add]]\n elif data.ndimension() == 4:\n mul, add = [d[None, :, None, None] for d in [mul, add]]\n result = data.mul(mul).add_(add)\n if self.tobyte:\n result = result.clamp(0, 255).byte()\n return result\n"
] |
[
[
"numpy.array"
]
] |
conscienceli/node21_detection_baseline
|
[
"d9eadaf1b0a81320b5c38d8b8a74eec86aa0d80a"
] |
[
"training_utils/scouter/tools/prepare_things.py"
] |
[
"import torch\nimport os\nimport torch.distributed as dist\nfrom collections import defaultdict, deque\nfrom torch.utils.data import DataLoader\nfrom prefetch_generator import BackgroundGenerator\n\n\ndef init_distributed_mode(args):\n if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:\n args.rank = int(os.environ[\"RANK\"])\n args.world_size = int(os.environ['WORLD_SIZE'])\n args.gpu = int(os.environ['LOCAL_RANK'])\n elif 'SLURM_PROCID' in os.environ:\n args.rank = int(os.environ['SLURM_PROCID'])\n args.gpu = args.rank % torch.cuda.device_count()\n else:\n print('Not using distributed mode')\n args.distributed = False\n return\n\n args.distributed = True\n\n torch.cuda.set_device(args.gpu)\n args.dist_backend = 'nccl'\n print('| distributed init (rank {}): {}'.format(\n args.rank, args.dist_url), flush=True)\n torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n torch.distributed.barrier()\n setup_for_distributed(args.rank == 0)\n\n\ndef setup_for_distributed(is_master):\n \"\"\"\n This function disables printing when not in master process\n \"\"\"\n import builtins as __builtin__\n builtin_print = __builtin__.print\n\n def print(*args, **kwargs):\n force = kwargs.pop('force', False)\n if is_master or force:\n builtin_print(*args, **kwargs)\n\n __builtin__.print = print\n\n\ndef is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True\n\n\ndef get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()\n\n\ndef get_rank():\n if not is_dist_avail_and_initialized():\n return 0\n return dist.get_rank()\n\n\ndef is_main_process():\n return get_rank() == 0\n\n\ndef save_on_master(*args, **kwargs):\n if is_main_process():\n torch.save(*args, **kwargs)\n\n\nclass SmoothedValue(object):\n \"\"\"Track a series of values and provide access to smoothed values over a\n window or the global series average.\n \"\"\"\n\n def __init__(self, window_size=20, fmt=None):\n if fmt is None:\n fmt = \"{median:.4f} ({global_avg:.4f})\"\n self.deque = deque(maxlen=window_size)\n self.total = 0.0\n self.count = 0\n self.fmt = fmt\n\n def update(self, value, n=1):\n self.deque.append(value)\n self.count += n\n self.total += value * n\n\n def synchronize_between_processes(self):\n \"\"\"\n Warning: does not synchronize the deque!\n \"\"\"\n if not is_dist_avail_and_initialized():\n return\n t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')\n dist.barrier()\n dist.all_reduce(t)\n t = t.tolist()\n self.count = int(t[0])\n self.total = t[1]\n\n @property\n def median(self):\n d = torch.tensor(list(self.deque))\n return d.median().item()\n\n @property\n def avg(self):\n d = torch.tensor(list(self.deque), dtype=torch.float32)\n return d.mean().item()\n\n @property\n def global_avg(self):\n return self.total / self.count\n\n @property\n def max(self):\n return max(self.deque)\n\n @property\n def value(self):\n return self.deque[-1]\n\n def __str__(self):\n return self.fmt.format(\n median=self.median,\n avg=self.avg,\n global_avg=self.global_avg,\n max=self.max,\n value=self.value)\n\n\nclass DataLoaderX(DataLoader):\n def __iter__(self):\n return BackgroundGenerator(super().__iter__())\n\n\ndef get_name(root, mode_folder=True):\n for root, dirs, file in os.walk(root):\n if mode_folder:\n return sorted(dirs)\n else:\n return sorted(file)"
] |
[
[
"torch.distributed.get_world_size",
"torch.distributed.is_available",
"torch.distributed.init_process_group",
"torch.save",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.distributed.is_initialized",
"torch.tensor",
"torch.distributed.all_reduce",
"torch.distributed.get_rank",
"torch.distributed.barrier"
]
] |
GeneKong/wavy
|
[
"94598d65477c7479ae509b59ef5f64c1976c0589"
] |
[
"test/unit_tests/test_wave_file.py"
] |
[
"import numpy\nimport pytest\nfrom re import escape as esc\nfrom wavy import *\n\n\[email protected]('sample_width, data, channels, tags', [\n (8, numpy.array([1, 2], dtype=numpy.uint8), 1, None),\n (16, numpy.array([1, 2], dtype=numpy.int16), 1, None),\n (24, numpy.array([1, 2], dtype=numpy.int32), 1, None),\n (32, numpy.array([1, 2], dtype=numpy.int32), 1, None),\n (32, numpy.array([1, 2], dtype=numpy.float32), 1, None),\n (64, numpy.array([1, 2], dtype=numpy.float64), 1, None),\n (32, numpy.array([1, 2], dtype=numpy.int32), 1, None),\n (32, numpy.array([1, 2, 3, 4], dtype=numpy.int32).reshape(-1, 2), 2, Tags()),\n])\ndef test_wave_file(sample_width, data, channels, tags):\n \"\"\"\n Test that WaveFile is created correctly.\n \"\"\"\n wav_file = WaveFile(sample_width, 100, data, tags)\n\n assert wav_file.sample_width == sample_width\n assert wav_file.framerate == 100\n assert wav_file.n_channels == channels\n assert wav_file.n_frames == 2\n assert numpy.array_equal(wav_file.data, data)\n assert wav_file.tags == tags\n\n\[email protected]('sample_width, data, tags, error', [\n (None, [], None, \"Argument 'data' must be of type numpy.ndarray.\"),\n (8, numpy.array([], dtype=numpy.uint8), None, \"Data array cannot be empty.\"),\n (16, numpy.array([1], dtype=numpy.float16), None, \"Data array dtype 'float16' is not supported.\"),\n (16, numpy.array([1], dtype=numpy.uint8), None, \"Sample width of '16' is not supported for dtype 'uint8'.\"),\n (8, numpy.array([1, 2], dtype=numpy.uint8), 'Tags', \"Argument 'tags' must be of type 'wavy.Tags'.\"),\n])\ndef test_wave_file_invalid_args(sample_width, data, tags, error):\n \"\"\"\n Test that WaveFile is created correctly.\n \"\"\"\n with pytest.raises(wavy.WaveValueError, match=esc(error)):\n WaveFile(sample_width, 100, data, tags)\n"
] |
[
[
"numpy.array",
"numpy.array_equal"
]
] |
H-B-P/censorwork
|
[
"65731d2360d4ac78a413781e130972c75aa8ee35"
] |
[
"gamma/k_and_t/gen_data.py"
] |
[
"import numpy as np\nimport pandas as pd\n\nimport random\n\nc_k = 4\nm_k = 0.5\n\nc_t = 6\nm_t = 0.5\n\ncensor_mean = 30\ncensor_sig = 1\n\npossible_xes = [0,1,2,3,4,5,6,7,10]\n\nnrows = 10000\n\nx = np.random.choice(possible_xes, nrows)\n\n\nk = c_k + m_k*x\nt = c_t + m_t*x\n\ntrue_y = np.random.gamma(k,t,nrows)\ncensor_y = np.random.normal(censor_mean, censor_sig, nrows)\n\ndfDict = {'x':x, 'true_y':true_y, 'censor_y': censor_y}\n\ndf = pd.DataFrame(dfDict)\n\ndf['y'] = df[['true_y','censor_y']].max(axis=1)\n\ndf['censored'] = df['censor_y']>df['true_y']\n\ndf.to_csv('gamma.csv')\n"
] |
[
[
"numpy.random.gamma",
"numpy.random.normal",
"numpy.random.choice",
"pandas.DataFrame"
]
] |
ericmjl/openfold
|
[
"3ea45f90dbd314095d56bcf033e515555092dd15"
] |
[
"openfold/np/residue_constants.py"
] |
[
"# Copyright 2021 AlQuraishi Laboratory\n# Copyright 2021 DeepMind Technologies Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Constants used in AlphaFold.\"\"\"\n\nimport collections\nimport functools\nfrom typing import Mapping, List, Tuple\nfrom importlib import resources\n\nimport numpy as np\nimport tree\n\n# Internal import (35fd).\n\n\n# Distance from one CA to next CA [trans configuration: omega = 180].\nca_ca = 3.80209737096\n\n# Format: The list for each AA type contains chi1, chi2, chi3, chi4 in\n# this order (or a relevant subset from chi1 onwards). ALA and GLY don't have\n# chi angles so their chi angle lists are empty.\nchi_angles_atoms = {\n \"ALA\": [],\n # Chi5 in arginine is always 0 +- 5 degrees, so ignore it.\n \"ARG\": [\n [\"N\", \"CA\", \"CB\", \"CG\"],\n [\"CA\", \"CB\", \"CG\", \"CD\"],\n [\"CB\", \"CG\", \"CD\", \"NE\"],\n [\"CG\", \"CD\", \"NE\", \"CZ\"],\n ],\n \"ASN\": [[\"N\", \"CA\", \"CB\", \"CG\"], [\"CA\", \"CB\", \"CG\", \"OD1\"]],\n \"ASP\": [[\"N\", \"CA\", \"CB\", \"CG\"], [\"CA\", \"CB\", \"CG\", \"OD1\"]],\n \"CYS\": [[\"N\", \"CA\", \"CB\", \"SG\"]],\n \"GLN\": [\n [\"N\", \"CA\", \"CB\", \"CG\"],\n [\"CA\", \"CB\", \"CG\", \"CD\"],\n [\"CB\", \"CG\", \"CD\", \"OE1\"],\n ],\n \"GLU\": [\n [\"N\", \"CA\", \"CB\", \"CG\"],\n [\"CA\", \"CB\", \"CG\", \"CD\"],\n [\"CB\", \"CG\", \"CD\", \"OE1\"],\n ],\n \"GLY\": [],\n \"HIS\": [[\"N\", \"CA\", \"CB\", \"CG\"], [\"CA\", \"CB\", \"CG\", \"ND1\"]],\n \"ILE\": [[\"N\", \"CA\", \"CB\", \"CG1\"], [\"CA\", \"CB\", \"CG1\", \"CD1\"]],\n \"LEU\": [[\"N\", \"CA\", \"CB\", \"CG\"], [\"CA\", \"CB\", \"CG\", \"CD1\"]],\n \"LYS\": [\n [\"N\", \"CA\", \"CB\", \"CG\"],\n [\"CA\", \"CB\", \"CG\", \"CD\"],\n [\"CB\", \"CG\", \"CD\", \"CE\"],\n [\"CG\", \"CD\", \"CE\", \"NZ\"],\n ],\n \"MET\": [\n [\"N\", \"CA\", \"CB\", \"CG\"],\n [\"CA\", \"CB\", \"CG\", \"SD\"],\n [\"CB\", \"CG\", \"SD\", \"CE\"],\n ],\n \"PHE\": [[\"N\", \"CA\", \"CB\", \"CG\"], [\"CA\", \"CB\", \"CG\", \"CD1\"]],\n \"PRO\": [[\"N\", \"CA\", \"CB\", \"CG\"], [\"CA\", \"CB\", \"CG\", \"CD\"]],\n \"SER\": [[\"N\", \"CA\", \"CB\", \"OG\"]],\n \"THR\": [[\"N\", \"CA\", \"CB\", \"OG1\"]],\n \"TRP\": [[\"N\", \"CA\", \"CB\", \"CG\"], [\"CA\", \"CB\", \"CG\", \"CD1\"]],\n \"TYR\": [[\"N\", \"CA\", \"CB\", \"CG\"], [\"CA\", \"CB\", \"CG\", \"CD1\"]],\n \"VAL\": [[\"N\", \"CA\", \"CB\", \"CG1\"]],\n}\n\n# If chi angles given in fixed-length array, this matrix determines how to mask\n# them for each AA type. The order is as per restype_order (see below).\nchi_angles_mask = [\n [0.0, 0.0, 0.0, 0.0], # ALA\n [1.0, 1.0, 1.0, 1.0], # ARG\n [1.0, 1.0, 0.0, 0.0], # ASN\n [1.0, 1.0, 0.0, 0.0], # ASP\n [1.0, 0.0, 0.0, 0.0], # CYS\n [1.0, 1.0, 1.0, 0.0], # GLN\n [1.0, 1.0, 1.0, 0.0], # GLU\n [0.0, 0.0, 0.0, 0.0], # GLY\n [1.0, 1.0, 0.0, 0.0], # HIS\n [1.0, 1.0, 0.0, 0.0], # ILE\n [1.0, 1.0, 0.0, 0.0], # LEU\n [1.0, 1.0, 1.0, 1.0], # LYS\n [1.0, 1.0, 1.0, 0.0], # MET\n [1.0, 1.0, 0.0, 0.0], # PHE\n [1.0, 1.0, 0.0, 0.0], # PRO\n [1.0, 0.0, 0.0, 0.0], # SER\n [1.0, 0.0, 0.0, 0.0], # THR\n [1.0, 1.0, 0.0, 0.0], # TRP\n [1.0, 1.0, 0.0, 0.0], # TYR\n [1.0, 0.0, 0.0, 0.0], # VAL\n]\n\n# The following chi angles are pi periodic: they can be rotated by a multiple\n# of pi without affecting the structure.\nchi_pi_periodic = [\n [0.0, 0.0, 0.0, 0.0], # ALA\n [0.0, 0.0, 0.0, 0.0], # ARG\n [0.0, 0.0, 0.0, 0.0], # ASN\n [0.0, 1.0, 0.0, 0.0], # ASP\n [0.0, 0.0, 0.0, 0.0], # CYS\n [0.0, 0.0, 0.0, 0.0], # GLN\n [0.0, 0.0, 1.0, 0.0], # GLU\n [0.0, 0.0, 0.0, 0.0], # GLY\n [0.0, 0.0, 0.0, 0.0], # HIS\n [0.0, 0.0, 0.0, 0.0], # ILE\n [0.0, 0.0, 0.0, 0.0], # LEU\n [0.0, 0.0, 0.0, 0.0], # LYS\n [0.0, 0.0, 0.0, 0.0], # MET\n [0.0, 1.0, 0.0, 0.0], # PHE\n [0.0, 0.0, 0.0, 0.0], # PRO\n [0.0, 0.0, 0.0, 0.0], # SER\n [0.0, 0.0, 0.0, 0.0], # THR\n [0.0, 0.0, 0.0, 0.0], # TRP\n [0.0, 1.0, 0.0, 0.0], # TYR\n [0.0, 0.0, 0.0, 0.0], # VAL\n [0.0, 0.0, 0.0, 0.0], # UNK\n]\n\n# Atoms positions relative to the 8 rigid groups, defined by the pre-omega, phi,\n# psi and chi angles:\n# 0: 'backbone group',\n# 1: 'pre-omega-group', (empty)\n# 2: 'phi-group', (currently empty, because it defines only hydrogens)\n# 3: 'psi-group',\n# 4,5,6,7: 'chi1,2,3,4-group'\n# The atom positions are relative to the axis-end-atom of the corresponding\n# rotation axis. The x-axis is in direction of the rotation axis, and the y-axis\n# is defined such that the dihedral-angle-definiting atom (the last entry in\n# chi_angles_atoms above) is in the xy-plane (with a positive y-coordinate).\n# format: [atomname, group_idx, rel_position]\nrigid_group_atom_positions = {\n \"ALA\": [\n [\"N\", 0, (-0.525, 1.363, 0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.526, -0.000, -0.000)],\n [\"CB\", 0, (-0.529, -0.774, -1.205)],\n [\"O\", 3, (0.627, 1.062, 0.000)],\n ],\n \"ARG\": [\n [\"N\", 0, (-0.524, 1.362, -0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.525, -0.000, -0.000)],\n [\"CB\", 0, (-0.524, -0.778, -1.209)],\n [\"O\", 3, (0.626, 1.062, 0.000)],\n [\"CG\", 4, (0.616, 1.390, -0.000)],\n [\"CD\", 5, (0.564, 1.414, 0.000)],\n [\"NE\", 6, (0.539, 1.357, -0.000)],\n [\"NH1\", 7, (0.206, 2.301, 0.000)],\n [\"NH2\", 7, (2.078, 0.978, -0.000)],\n [\"CZ\", 7, (0.758, 1.093, -0.000)],\n ],\n \"ASN\": [\n [\"N\", 0, (-0.536, 1.357, 0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.526, -0.000, -0.000)],\n [\"CB\", 0, (-0.531, -0.787, -1.200)],\n [\"O\", 3, (0.625, 1.062, 0.000)],\n [\"CG\", 4, (0.584, 1.399, 0.000)],\n [\"ND2\", 5, (0.593, -1.188, 0.001)],\n [\"OD1\", 5, (0.633, 1.059, 0.000)],\n ],\n \"ASP\": [\n [\"N\", 0, (-0.525, 1.362, -0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.527, 0.000, -0.000)],\n [\"CB\", 0, (-0.526, -0.778, -1.208)],\n [\"O\", 3, (0.626, 1.062, -0.000)],\n [\"CG\", 4, (0.593, 1.398, -0.000)],\n [\"OD1\", 5, (0.610, 1.091, 0.000)],\n [\"OD2\", 5, (0.592, -1.101, -0.003)],\n ],\n \"CYS\": [\n [\"N\", 0, (-0.522, 1.362, -0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.524, 0.000, 0.000)],\n [\"CB\", 0, (-0.519, -0.773, -1.212)],\n [\"O\", 3, (0.625, 1.062, -0.000)],\n [\"SG\", 4, (0.728, 1.653, 0.000)],\n ],\n \"GLN\": [\n [\"N\", 0, (-0.526, 1.361, -0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.526, 0.000, 0.000)],\n [\"CB\", 0, (-0.525, -0.779, -1.207)],\n [\"O\", 3, (0.626, 1.062, -0.000)],\n [\"CG\", 4, (0.615, 1.393, 0.000)],\n [\"CD\", 5, (0.587, 1.399, -0.000)],\n [\"NE2\", 6, (0.593, -1.189, -0.001)],\n [\"OE1\", 6, (0.634, 1.060, 0.000)],\n ],\n \"GLU\": [\n [\"N\", 0, (-0.528, 1.361, 0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.526, -0.000, -0.000)],\n [\"CB\", 0, (-0.526, -0.781, -1.207)],\n [\"O\", 3, (0.626, 1.062, 0.000)],\n [\"CG\", 4, (0.615, 1.392, 0.000)],\n [\"CD\", 5, (0.600, 1.397, 0.000)],\n [\"OE1\", 6, (0.607, 1.095, -0.000)],\n [\"OE2\", 6, (0.589, -1.104, -0.001)],\n ],\n \"GLY\": [\n [\"N\", 0, (-0.572, 1.337, 0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.517, -0.000, -0.000)],\n [\"O\", 3, (0.626, 1.062, -0.000)],\n ],\n \"HIS\": [\n [\"N\", 0, (-0.527, 1.360, 0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.525, 0.000, 0.000)],\n [\"CB\", 0, (-0.525, -0.778, -1.208)],\n [\"O\", 3, (0.625, 1.063, 0.000)],\n [\"CG\", 4, (0.600, 1.370, -0.000)],\n [\"CD2\", 5, (0.889, -1.021, 0.003)],\n [\"ND1\", 5, (0.744, 1.160, -0.000)],\n [\"CE1\", 5, (2.030, 0.851, 0.002)],\n [\"NE2\", 5, (2.145, -0.466, 0.004)],\n ],\n \"ILE\": [\n [\"N\", 0, (-0.493, 1.373, -0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.527, -0.000, -0.000)],\n [\"CB\", 0, (-0.536, -0.793, -1.213)],\n [\"O\", 3, (0.627, 1.062, -0.000)],\n [\"CG1\", 4, (0.534, 1.437, -0.000)],\n [\"CG2\", 4, (0.540, -0.785, -1.199)],\n [\"CD1\", 5, (0.619, 1.391, 0.000)],\n ],\n \"LEU\": [\n [\"N\", 0, (-0.520, 1.363, 0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.525, -0.000, -0.000)],\n [\"CB\", 0, (-0.522, -0.773, -1.214)],\n [\"O\", 3, (0.625, 1.063, -0.000)],\n [\"CG\", 4, (0.678, 1.371, 0.000)],\n [\"CD1\", 5, (0.530, 1.430, -0.000)],\n [\"CD2\", 5, (0.535, -0.774, 1.200)],\n ],\n \"LYS\": [\n [\"N\", 0, (-0.526, 1.362, -0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.526, 0.000, 0.000)],\n [\"CB\", 0, (-0.524, -0.778, -1.208)],\n [\"O\", 3, (0.626, 1.062, -0.000)],\n [\"CG\", 4, (0.619, 1.390, 0.000)],\n [\"CD\", 5, (0.559, 1.417, 0.000)],\n [\"CE\", 6, (0.560, 1.416, 0.000)],\n [\"NZ\", 7, (0.554, 1.387, 0.000)],\n ],\n \"MET\": [\n [\"N\", 0, (-0.521, 1.364, -0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.525, 0.000, 0.000)],\n [\"CB\", 0, (-0.523, -0.776, -1.210)],\n [\"O\", 3, (0.625, 1.062, -0.000)],\n [\"CG\", 4, (0.613, 1.391, -0.000)],\n [\"SD\", 5, (0.703, 1.695, 0.000)],\n [\"CE\", 6, (0.320, 1.786, -0.000)],\n ],\n \"PHE\": [\n [\"N\", 0, (-0.518, 1.363, 0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.524, 0.000, -0.000)],\n [\"CB\", 0, (-0.525, -0.776, -1.212)],\n [\"O\", 3, (0.626, 1.062, -0.000)],\n [\"CG\", 4, (0.607, 1.377, 0.000)],\n [\"CD1\", 5, (0.709, 1.195, -0.000)],\n [\"CD2\", 5, (0.706, -1.196, 0.000)],\n [\"CE1\", 5, (2.102, 1.198, -0.000)],\n [\"CE2\", 5, (2.098, -1.201, -0.000)],\n [\"CZ\", 5, (2.794, -0.003, -0.001)],\n ],\n \"PRO\": [\n [\"N\", 0, (-0.566, 1.351, -0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.527, -0.000, 0.000)],\n [\"CB\", 0, (-0.546, -0.611, -1.293)],\n [\"O\", 3, (0.621, 1.066, 0.000)],\n [\"CG\", 4, (0.382, 1.445, 0.0)],\n # ['CD', 5, (0.427, 1.440, 0.0)],\n [\"CD\", 5, (0.477, 1.424, 0.0)], # manually made angle 2 degrees larger\n ],\n \"SER\": [\n [\"N\", 0, (-0.529, 1.360, -0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.525, -0.000, -0.000)],\n [\"CB\", 0, (-0.518, -0.777, -1.211)],\n [\"O\", 3, (0.626, 1.062, -0.000)],\n [\"OG\", 4, (0.503, 1.325, 0.000)],\n ],\n \"THR\": [\n [\"N\", 0, (-0.517, 1.364, 0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.526, 0.000, -0.000)],\n [\"CB\", 0, (-0.516, -0.793, -1.215)],\n [\"O\", 3, (0.626, 1.062, 0.000)],\n [\"CG2\", 4, (0.550, -0.718, -1.228)],\n [\"OG1\", 4, (0.472, 1.353, 0.000)],\n ],\n \"TRP\": [\n [\"N\", 0, (-0.521, 1.363, 0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.525, -0.000, 0.000)],\n [\"CB\", 0, (-0.523, -0.776, -1.212)],\n [\"O\", 3, (0.627, 1.062, 0.000)],\n [\"CG\", 4, (0.609, 1.370, -0.000)],\n [\"CD1\", 5, (0.824, 1.091, 0.000)],\n [\"CD2\", 5, (0.854, -1.148, -0.005)],\n [\"CE2\", 5, (2.186, -0.678, -0.007)],\n [\"CE3\", 5, (0.622, -2.530, -0.007)],\n [\"NE1\", 5, (2.140, 0.690, -0.004)],\n [\"CH2\", 5, (3.028, -2.890, -0.013)],\n [\"CZ2\", 5, (3.283, -1.543, -0.011)],\n [\"CZ3\", 5, (1.715, -3.389, -0.011)],\n ],\n \"TYR\": [\n [\"N\", 0, (-0.522, 1.362, 0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.524, -0.000, -0.000)],\n [\"CB\", 0, (-0.522, -0.776, -1.213)],\n [\"O\", 3, (0.627, 1.062, -0.000)],\n [\"CG\", 4, (0.607, 1.382, -0.000)],\n [\"CD1\", 5, (0.716, 1.195, -0.000)],\n [\"CD2\", 5, (0.713, -1.194, -0.001)],\n [\"CE1\", 5, (2.107, 1.200, -0.002)],\n [\"CE2\", 5, (2.104, -1.201, -0.003)],\n [\"OH\", 5, (4.168, -0.002, -0.005)],\n [\"CZ\", 5, (2.791, -0.001, -0.003)],\n ],\n \"VAL\": [\n [\"N\", 0, (-0.494, 1.373, -0.000)],\n [\"CA\", 0, (0.000, 0.000, 0.000)],\n [\"C\", 0, (1.527, -0.000, -0.000)],\n [\"CB\", 0, (-0.533, -0.795, -1.213)],\n [\"O\", 3, (0.627, 1.062, -0.000)],\n [\"CG1\", 4, (0.540, 1.429, -0.000)],\n [\"CG2\", 4, (0.533, -0.776, 1.203)],\n ],\n}\n\n# A list of atoms (excluding hydrogen) for each AA type. PDB naming convention.\nresidue_atoms = {\n \"ALA\": [\"C\", \"CA\", \"CB\", \"N\", \"O\"],\n \"ARG\": [\"C\", \"CA\", \"CB\", \"CG\", \"CD\", \"CZ\", \"N\", \"NE\", \"O\", \"NH1\", \"NH2\"],\n \"ASP\": [\"C\", \"CA\", \"CB\", \"CG\", \"N\", \"O\", \"OD1\", \"OD2\"],\n \"ASN\": [\"C\", \"CA\", \"CB\", \"CG\", \"N\", \"ND2\", \"O\", \"OD1\"],\n \"CYS\": [\"C\", \"CA\", \"CB\", \"N\", \"O\", \"SG\"],\n \"GLU\": [\"C\", \"CA\", \"CB\", \"CG\", \"CD\", \"N\", \"O\", \"OE1\", \"OE2\"],\n \"GLN\": [\"C\", \"CA\", \"CB\", \"CG\", \"CD\", \"N\", \"NE2\", \"O\", \"OE1\"],\n \"GLY\": [\"C\", \"CA\", \"N\", \"O\"],\n \"HIS\": [\"C\", \"CA\", \"CB\", \"CG\", \"CD2\", \"CE1\", \"N\", \"ND1\", \"NE2\", \"O\"],\n \"ILE\": [\"C\", \"CA\", \"CB\", \"CG1\", \"CG2\", \"CD1\", \"N\", \"O\"],\n \"LEU\": [\"C\", \"CA\", \"CB\", \"CG\", \"CD1\", \"CD2\", \"N\", \"O\"],\n \"LYS\": [\"C\", \"CA\", \"CB\", \"CG\", \"CD\", \"CE\", \"N\", \"NZ\", \"O\"],\n \"MET\": [\"C\", \"CA\", \"CB\", \"CG\", \"CE\", \"N\", \"O\", \"SD\"],\n \"PHE\": [\"C\", \"CA\", \"CB\", \"CG\", \"CD1\", \"CD2\", \"CE1\", \"CE2\", \"CZ\", \"N\", \"O\"],\n \"PRO\": [\"C\", \"CA\", \"CB\", \"CG\", \"CD\", \"N\", \"O\"],\n \"SER\": [\"C\", \"CA\", \"CB\", \"N\", \"O\", \"OG\"],\n \"THR\": [\"C\", \"CA\", \"CB\", \"CG2\", \"N\", \"O\", \"OG1\"],\n \"TRP\": [\n \"C\",\n \"CA\",\n \"CB\",\n \"CG\",\n \"CD1\",\n \"CD2\",\n \"CE2\",\n \"CE3\",\n \"CZ2\",\n \"CZ3\",\n \"CH2\",\n \"N\",\n \"NE1\",\n \"O\",\n ],\n \"TYR\": [\n \"C\",\n \"CA\",\n \"CB\",\n \"CG\",\n \"CD1\",\n \"CD2\",\n \"CE1\",\n \"CE2\",\n \"CZ\",\n \"N\",\n \"O\",\n \"OH\",\n ],\n \"VAL\": [\"C\", \"CA\", \"CB\", \"CG1\", \"CG2\", \"N\", \"O\"],\n}\n\n# Naming swaps for ambiguous atom names.\n# Due to symmetries in the amino acids the naming of atoms is ambiguous in\n# 4 of the 20 amino acids.\n# (The LDDT paper lists 7 amino acids as ambiguous, but the naming ambiguities\n# in LEU, VAL and ARG can be resolved by using the 3d constellations of\n# the 'ambiguous' atoms and their neighbours)\n# TODO: ^ interpret this\nresidue_atom_renaming_swaps = {\n \"ASP\": {\"OD1\": \"OD2\"},\n \"GLU\": {\"OE1\": \"OE2\"},\n \"PHE\": {\"CD1\": \"CD2\", \"CE1\": \"CE2\"},\n \"TYR\": {\"CD1\": \"CD2\", \"CE1\": \"CE2\"},\n}\n\n# Van der Waals radii [Angstroem] of the atoms (from Wikipedia)\nvan_der_waals_radius = {\n \"C\": 1.7,\n \"N\": 1.55,\n \"O\": 1.52,\n \"S\": 1.8,\n}\n\nBond = collections.namedtuple(\n \"Bond\", [\"atom1_name\", \"atom2_name\", \"length\", \"stddev\"]\n)\nBondAngle = collections.namedtuple(\n \"BondAngle\",\n [\"atom1_name\", \"atom2_name\", \"atom3name\", \"angle_rad\", \"stddev\"],\n)\n\n\[email protected]_cache(maxsize=None)\ndef load_stereo_chemical_props() -> Tuple[\n Mapping[str, List[Bond]],\n Mapping[str, List[Bond]],\n Mapping[str, List[BondAngle]],\n]:\n \"\"\"Load stereo_chemical_props.txt into a nice structure.\n\n Load literature values for bond lengths and bond angles and translate\n bond angles into the length of the opposite edge of the triangle\n (\"residue_virtual_bonds\").\n\n Returns:\n residue_bonds: dict that maps resname --> list of Bond tuples\n residue_virtual_bonds: dict that maps resname --> list of Bond tuples\n residue_bond_angles: dict that maps resname --> list of BondAngle tuples\n \"\"\"\n # TODO: this file should be downloaded in a setup script\n stereo_chemical_props = resources.read_text(\"openfold.resources\", \"stereo_chemical_props.txt\")\n\n lines_iter = iter(stereo_chemical_props.splitlines())\n # Load bond lengths.\n residue_bonds = {}\n next(lines_iter) # Skip header line.\n for line in lines_iter:\n if line.strip() == \"-\":\n break\n bond, resname, length, stddev = line.split()\n atom1, atom2 = bond.split(\"-\")\n if resname not in residue_bonds:\n residue_bonds[resname] = []\n residue_bonds[resname].append(\n Bond(atom1, atom2, float(length), float(stddev))\n )\n residue_bonds[\"UNK\"] = []\n\n # Load bond angles.\n residue_bond_angles = {}\n next(lines_iter) # Skip empty line.\n next(lines_iter) # Skip header line.\n for line in lines_iter:\n if line.strip() == \"-\":\n break\n bond, resname, angle_degree, stddev_degree = line.split()\n atom1, atom2, atom3 = bond.split(\"-\")\n if resname not in residue_bond_angles:\n residue_bond_angles[resname] = []\n residue_bond_angles[resname].append(\n BondAngle(\n atom1,\n atom2,\n atom3,\n float(angle_degree) / 180.0 * np.pi,\n float(stddev_degree) / 180.0 * np.pi,\n )\n )\n residue_bond_angles[\"UNK\"] = []\n\n def make_bond_key(atom1_name, atom2_name):\n \"\"\"Unique key to lookup bonds.\"\"\"\n return \"-\".join(sorted([atom1_name, atom2_name]))\n\n # Translate bond angles into distances (\"virtual bonds\").\n residue_virtual_bonds = {}\n for resname, bond_angles in residue_bond_angles.items():\n # Create a fast lookup dict for bond lengths.\n bond_cache = {}\n for b in residue_bonds[resname]:\n bond_cache[make_bond_key(b.atom1_name, b.atom2_name)] = b\n residue_virtual_bonds[resname] = []\n for ba in bond_angles:\n bond1 = bond_cache[make_bond_key(ba.atom1_name, ba.atom2_name)]\n bond2 = bond_cache[make_bond_key(ba.atom2_name, ba.atom3name)]\n\n # Compute distance between atom1 and atom3 using the law of cosines\n # c^2 = a^2 + b^2 - 2ab*cos(gamma).\n gamma = ba.angle_rad\n length = np.sqrt(\n bond1.length ** 2\n + bond2.length ** 2\n - 2 * bond1.length * bond2.length * np.cos(gamma)\n )\n\n # Propagation of uncertainty assuming uncorrelated errors.\n dl_outer = 0.5 / length\n dl_dgamma = (\n 2 * bond1.length * bond2.length * np.sin(gamma)\n ) * dl_outer\n dl_db1 = (\n 2 * bond1.length - 2 * bond2.length * np.cos(gamma)\n ) * dl_outer\n dl_db2 = (\n 2 * bond2.length - 2 * bond1.length * np.cos(gamma)\n ) * dl_outer\n stddev = np.sqrt(\n (dl_dgamma * ba.stddev) ** 2\n + (dl_db1 * bond1.stddev) ** 2\n + (dl_db2 * bond2.stddev) ** 2\n )\n residue_virtual_bonds[resname].append(\n Bond(ba.atom1_name, ba.atom3name, length, stddev)\n )\n\n return (residue_bonds, residue_virtual_bonds, residue_bond_angles)\n\n\n# Between-residue bond lengths for general bonds (first element) and for Proline\n# (second element).\nbetween_res_bond_length_c_n = [1.329, 1.341]\nbetween_res_bond_length_stddev_c_n = [0.014, 0.016]\n\n# Between-residue cos_angles.\nbetween_res_cos_angles_c_n_ca = [-0.5203, 0.0353] # degrees: 121.352 +- 2.315\nbetween_res_cos_angles_ca_c_n = [-0.4473, 0.0311] # degrees: 116.568 +- 1.995\n\n# This mapping is used when we need to store atom data in a format that requires\n# fixed atom data size for every residue (e.g. a numpy array).\natom_types = [\n \"N\",\n \"CA\",\n \"C\",\n \"CB\",\n \"O\",\n \"CG\",\n \"CG1\",\n \"CG2\",\n \"OG\",\n \"OG1\",\n \"SG\",\n \"CD\",\n \"CD1\",\n \"CD2\",\n \"ND1\",\n \"ND2\",\n \"OD1\",\n \"OD2\",\n \"SD\",\n \"CE\",\n \"CE1\",\n \"CE2\",\n \"CE3\",\n \"NE\",\n \"NE1\",\n \"NE2\",\n \"OE1\",\n \"OE2\",\n \"CH2\",\n \"NH1\",\n \"NH2\",\n \"OH\",\n \"CZ\",\n \"CZ2\",\n \"CZ3\",\n \"NZ\",\n \"OXT\",\n]\natom_order = {atom_type: i for i, atom_type in enumerate(atom_types)}\natom_type_num = len(atom_types) # := 37.\n\n# A compact atom encoding with 14 columns\n# pylint: disable=line-too-long\n# pylint: disable=bad-whitespace\nrestype_name_to_atom14_names = {\n \"ALA\": [\"N\", \"CA\", \"C\", \"O\", \"CB\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\n \"ARG\": [\n \"N\",\n \"CA\",\n \"C\",\n \"O\",\n \"CB\",\n \"CG\",\n \"CD\",\n \"NE\",\n \"CZ\",\n \"NH1\",\n \"NH2\",\n \"\",\n \"\",\n \"\",\n ],\n \"ASN\": [\n \"N\",\n \"CA\",\n \"C\",\n \"O\",\n \"CB\",\n \"CG\",\n \"OD1\",\n \"ND2\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n ],\n \"ASP\": [\n \"N\",\n \"CA\",\n \"C\",\n \"O\",\n \"CB\",\n \"CG\",\n \"OD1\",\n \"OD2\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n ],\n \"CYS\": [\"N\", \"CA\", \"C\", \"O\", \"CB\", \"SG\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\n \"GLN\": [\n \"N\",\n \"CA\",\n \"C\",\n \"O\",\n \"CB\",\n \"CG\",\n \"CD\",\n \"OE1\",\n \"NE2\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n ],\n \"GLU\": [\n \"N\",\n \"CA\",\n \"C\",\n \"O\",\n \"CB\",\n \"CG\",\n \"CD\",\n \"OE1\",\n \"OE2\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n ],\n \"GLY\": [\"N\", \"CA\", \"C\", \"O\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\n \"HIS\": [\n \"N\",\n \"CA\",\n \"C\",\n \"O\",\n \"CB\",\n \"CG\",\n \"ND1\",\n \"CD2\",\n \"CE1\",\n \"NE2\",\n \"\",\n \"\",\n \"\",\n \"\",\n ],\n \"ILE\": [\n \"N\",\n \"CA\",\n \"C\",\n \"O\",\n \"CB\",\n \"CG1\",\n \"CG2\",\n \"CD1\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n ],\n \"LEU\": [\n \"N\",\n \"CA\",\n \"C\",\n \"O\",\n \"CB\",\n \"CG\",\n \"CD1\",\n \"CD2\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n ],\n \"LYS\": [\n \"N\",\n \"CA\",\n \"C\",\n \"O\",\n \"CB\",\n \"CG\",\n \"CD\",\n \"CE\",\n \"NZ\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n ],\n \"MET\": [\n \"N\",\n \"CA\",\n \"C\",\n \"O\",\n \"CB\",\n \"CG\",\n \"SD\",\n \"CE\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n ],\n \"PHE\": [\n \"N\",\n \"CA\",\n \"C\",\n \"O\",\n \"CB\",\n \"CG\",\n \"CD1\",\n \"CD2\",\n \"CE1\",\n \"CE2\",\n \"CZ\",\n \"\",\n \"\",\n \"\",\n ],\n \"PRO\": [\"N\", \"CA\", \"C\", \"O\", \"CB\", \"CG\", \"CD\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\n \"SER\": [\"N\", \"CA\", \"C\", \"O\", \"CB\", \"OG\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\n \"THR\": [\n \"N\",\n \"CA\",\n \"C\",\n \"O\",\n \"CB\",\n \"OG1\",\n \"CG2\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n ],\n \"TRP\": [\n \"N\",\n \"CA\",\n \"C\",\n \"O\",\n \"CB\",\n \"CG\",\n \"CD1\",\n \"CD2\",\n \"NE1\",\n \"CE2\",\n \"CE3\",\n \"CZ2\",\n \"CZ3\",\n \"CH2\",\n ],\n \"TYR\": [\n \"N\",\n \"CA\",\n \"C\",\n \"O\",\n \"CB\",\n \"CG\",\n \"CD1\",\n \"CD2\",\n \"CE1\",\n \"CE2\",\n \"CZ\",\n \"OH\",\n \"\",\n \"\",\n ],\n \"VAL\": [\n \"N\",\n \"CA\",\n \"C\",\n \"O\",\n \"CB\",\n \"CG1\",\n \"CG2\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n ],\n \"UNK\": [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\n}\n# pylint: enable=line-too-long\n# pylint: enable=bad-whitespace\n\n\n# This is the standard residue order when coding AA type as a number.\n# Reproduce it by taking 3-letter AA codes and sorting them alphabetically.\nrestypes = [\n \"A\",\n \"R\",\n \"N\",\n \"D\",\n \"C\",\n \"Q\",\n \"E\",\n \"G\",\n \"H\",\n \"I\",\n \"L\",\n \"K\",\n \"M\",\n \"F\",\n \"P\",\n \"S\",\n \"T\",\n \"W\",\n \"Y\",\n \"V\",\n]\nrestype_order = {restype: i for i, restype in enumerate(restypes)}\nrestype_num = len(restypes) # := 20.\nunk_restype_index = restype_num # Catch-all index for unknown restypes.\n\nrestypes_with_x = restypes + [\"X\"]\nrestype_order_with_x = {restype: i for i, restype in enumerate(restypes_with_x)}\n\n\ndef sequence_to_onehot(\n sequence: str, mapping: Mapping[str, int], map_unknown_to_x: bool = False\n) -> np.ndarray:\n \"\"\"Maps the given sequence into a one-hot encoded matrix.\n\n Args:\n sequence: An amino acid sequence.\n mapping: A dictionary mapping amino acids to integers.\n map_unknown_to_x: If True, any amino acid that is not in the mapping will be\n mapped to the unknown amino acid 'X'. If the mapping doesn't contain\n amino acid 'X', an error will be thrown. If False, any amino acid not in\n the mapping will throw an error.\n\n Returns:\n A numpy array of shape (seq_len, num_unique_aas) with one-hot encoding of\n the sequence.\n\n Raises:\n ValueError: If the mapping doesn't contain values from 0 to\n num_unique_aas - 1 without any gaps.\n \"\"\"\n num_entries = max(mapping.values()) + 1\n\n if sorted(set(mapping.values())) != list(range(num_entries)):\n raise ValueError(\n \"The mapping must have values from 0 to num_unique_aas-1 \"\n \"without any gaps. Got: %s\" % sorted(mapping.values())\n )\n\n one_hot_arr = np.zeros((len(sequence), num_entries), dtype=np.int32)\n\n for aa_index, aa_type in enumerate(sequence):\n if map_unknown_to_x:\n if aa_type.isalpha() and aa_type.isupper():\n aa_id = mapping.get(aa_type, mapping[\"X\"])\n else:\n raise ValueError(\n f\"Invalid character in the sequence: {aa_type}\"\n )\n else:\n aa_id = mapping[aa_type]\n one_hot_arr[aa_index, aa_id] = 1\n\n return one_hot_arr\n\n\nrestype_1to3 = {\n \"A\": \"ALA\",\n \"R\": \"ARG\",\n \"N\": \"ASN\",\n \"D\": \"ASP\",\n \"C\": \"CYS\",\n \"Q\": \"GLN\",\n \"E\": \"GLU\",\n \"G\": \"GLY\",\n \"H\": \"HIS\",\n \"I\": \"ILE\",\n \"L\": \"LEU\",\n \"K\": \"LYS\",\n \"M\": \"MET\",\n \"F\": \"PHE\",\n \"P\": \"PRO\",\n \"S\": \"SER\",\n \"T\": \"THR\",\n \"W\": \"TRP\",\n \"Y\": \"TYR\",\n \"V\": \"VAL\",\n}\n\n\n# NB: restype_3to1 differs from Bio.PDB.protein_letters_3to1 by being a simple\n# 1-to-1 mapping of 3 letter names to one letter names. The latter contains\n# many more, and less common, three letter names as keys and maps many of these\n# to the same one letter name (including 'X' and 'U' which we don't use here).\nrestype_3to1 = {v: k for k, v in restype_1to3.items()}\n\n# Define a restype name for all unknown residues.\nunk_restype = \"UNK\"\n\nresnames = [restype_1to3[r] for r in restypes] + [unk_restype]\nresname_to_idx = {resname: i for i, resname in enumerate(resnames)}\n\n\n# The mapping here uses hhblits convention, so that B is mapped to D, J and O\n# are mapped to X, U is mapped to C, and Z is mapped to E. Other than that the\n# remaining 20 amino acids are kept in alphabetical order.\n# There are 2 non-amino acid codes, X (representing any amino acid) and\n# \"-\" representing a missing amino acid in an alignment. The id for these\n# codes is put at the end (20 and 21) so that they can easily be ignored if\n# desired.\nHHBLITS_AA_TO_ID = {\n \"A\": 0,\n \"B\": 2,\n \"C\": 1,\n \"D\": 2,\n \"E\": 3,\n \"F\": 4,\n \"G\": 5,\n \"H\": 6,\n \"I\": 7,\n \"J\": 20,\n \"K\": 8,\n \"L\": 9,\n \"M\": 10,\n \"N\": 11,\n \"O\": 20,\n \"P\": 12,\n \"Q\": 13,\n \"R\": 14,\n \"S\": 15,\n \"T\": 16,\n \"U\": 1,\n \"V\": 17,\n \"W\": 18,\n \"X\": 20,\n \"Y\": 19,\n \"Z\": 3,\n \"-\": 21,\n}\n\n# Partial inversion of HHBLITS_AA_TO_ID.\nID_TO_HHBLITS_AA = {\n 0: \"A\",\n 1: \"C\", # Also U.\n 2: \"D\", # Also B.\n 3: \"E\", # Also Z.\n 4: \"F\",\n 5: \"G\",\n 6: \"H\",\n 7: \"I\",\n 8: \"K\",\n 9: \"L\",\n 10: \"M\",\n 11: \"N\",\n 12: \"P\",\n 13: \"Q\",\n 14: \"R\",\n 15: \"S\",\n 16: \"T\",\n 17: \"V\",\n 18: \"W\",\n 19: \"Y\",\n 20: \"X\", # Includes J and O.\n 21: \"-\",\n}\n\nrestypes_with_x_and_gap = restypes + [\"X\", \"-\"]\nMAP_HHBLITS_AATYPE_TO_OUR_AATYPE = tuple(\n restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i])\n for i in range(len(restypes_with_x_and_gap))\n)\n\n\ndef _make_standard_atom_mask() -> np.ndarray:\n \"\"\"Returns [num_res_types, num_atom_types] mask array.\"\"\"\n # +1 to account for unknown (all 0s).\n mask = np.zeros([restype_num + 1, atom_type_num], dtype=np.int32)\n for restype, restype_letter in enumerate(restypes):\n restype_name = restype_1to3[restype_letter]\n atom_names = residue_atoms[restype_name]\n for atom_name in atom_names:\n atom_type = atom_order[atom_name]\n mask[restype, atom_type] = 1\n return mask\n\n\nSTANDARD_ATOM_MASK = _make_standard_atom_mask()\n\n\n# A one hot representation for the first and second atoms defining the axis\n# of rotation for each chi-angle in each residue.\ndef chi_angle_atom(atom_index: int) -> np.ndarray:\n \"\"\"Define chi-angle rigid groups via one-hot representations.\"\"\"\n chi_angles_index = {}\n one_hots = []\n\n for k, v in chi_angles_atoms.items():\n indices = [atom_types.index(s[atom_index]) for s in v]\n indices.extend([-1] * (4 - len(indices)))\n chi_angles_index[k] = indices\n\n for r in restypes:\n res3 = restype_1to3[r]\n one_hot = np.eye(atom_type_num)[chi_angles_index[res3]]\n one_hots.append(one_hot)\n\n one_hots.append(np.zeros([4, atom_type_num])) # Add zeros for residue `X`.\n one_hot = np.stack(one_hots, axis=0)\n one_hot = np.transpose(one_hot, [0, 2, 1])\n\n return one_hot\n\n\nchi_atom_1_one_hot = chi_angle_atom(1)\nchi_atom_2_one_hot = chi_angle_atom(2)\n\n# An array like chi_angles_atoms but using indices rather than names.\nchi_angles_atom_indices = [chi_angles_atoms[restype_1to3[r]] for r in restypes]\nchi_angles_atom_indices = tree.map_structure(\n lambda atom_name: atom_order[atom_name], chi_angles_atom_indices\n)\nchi_angles_atom_indices = np.array(\n [\n chi_atoms + ([[0, 0, 0, 0]] * (4 - len(chi_atoms)))\n for chi_atoms in chi_angles_atom_indices\n ]\n)\n\n# Mapping from (res_name, atom_name) pairs to the atom's chi group index\n# and atom index within that group.\nchi_groups_for_atom = collections.defaultdict(list)\nfor res_name, chi_angle_atoms_for_res in chi_angles_atoms.items():\n for chi_group_i, chi_group in enumerate(chi_angle_atoms_for_res):\n for atom_i, atom in enumerate(chi_group):\n chi_groups_for_atom[(res_name, atom)].append((chi_group_i, atom_i))\nchi_groups_for_atom = dict(chi_groups_for_atom)\n\n\ndef _make_rigid_transformation_4x4(ex, ey, translation):\n \"\"\"Create a rigid 4x4 transformation matrix from two axes and transl.\"\"\"\n # Normalize ex.\n ex_normalized = ex / np.linalg.norm(ex)\n\n # make ey perpendicular to ex\n ey_normalized = ey - np.dot(ey, ex_normalized) * ex_normalized\n ey_normalized /= np.linalg.norm(ey_normalized)\n\n # compute ez as cross product\n eznorm = np.cross(ex_normalized, ey_normalized)\n m = np.stack(\n [ex_normalized, ey_normalized, eznorm, translation]\n ).transpose()\n m = np.concatenate([m, [[0.0, 0.0, 0.0, 1.0]]], axis=0)\n return m\n\n\n# create an array with (restype, atomtype) --> rigid_group_idx\n# and an array with (restype, atomtype, coord) for the atom positions\n# and compute affine transformation matrices (4,4) from one rigid group to the\n# previous group\nrestype_atom37_to_rigid_group = np.zeros([21, 37], dtype=np.int)\nrestype_atom37_mask = np.zeros([21, 37], dtype=np.float32)\nrestype_atom37_rigid_group_positions = np.zeros([21, 37, 3], dtype=np.float32)\nrestype_atom14_to_rigid_group = np.zeros([21, 14], dtype=np.int)\nrestype_atom14_mask = np.zeros([21, 14], dtype=np.float32)\nrestype_atom14_rigid_group_positions = np.zeros([21, 14, 3], dtype=np.float32)\nrestype_rigid_group_default_frame = np.zeros([21, 8, 4, 4], dtype=np.float32)\n\n\ndef _make_rigid_group_constants():\n \"\"\"Fill the arrays above.\"\"\"\n for restype, restype_letter in enumerate(restypes):\n resname = restype_1to3[restype_letter]\n for atomname, group_idx, atom_position in rigid_group_atom_positions[\n resname\n ]:\n atomtype = atom_order[atomname]\n restype_atom37_to_rigid_group[restype, atomtype] = group_idx\n restype_atom37_mask[restype, atomtype] = 1\n restype_atom37_rigid_group_positions[\n restype, atomtype, :\n ] = atom_position\n\n atom14idx = restype_name_to_atom14_names[resname].index(atomname)\n restype_atom14_to_rigid_group[restype, atom14idx] = group_idx\n restype_atom14_mask[restype, atom14idx] = 1\n restype_atom14_rigid_group_positions[\n restype, atom14idx, :\n ] = atom_position\n\n for restype, restype_letter in enumerate(restypes):\n resname = restype_1to3[restype_letter]\n atom_positions = {\n name: np.array(pos)\n for name, _, pos in rigid_group_atom_positions[resname]\n }\n\n # backbone to backbone is the identity transform\n restype_rigid_group_default_frame[restype, 0, :, :] = np.eye(4)\n\n # pre-omega-frame to backbone (currently dummy identity matrix)\n restype_rigid_group_default_frame[restype, 1, :, :] = np.eye(4)\n\n # phi-frame to backbone\n mat = _make_rigid_transformation_4x4(\n ex=atom_positions[\"N\"] - atom_positions[\"CA\"],\n ey=np.array([1.0, 0.0, 0.0]),\n translation=atom_positions[\"N\"],\n )\n restype_rigid_group_default_frame[restype, 2, :, :] = mat\n\n # psi-frame to backbone\n mat = _make_rigid_transformation_4x4(\n ex=atom_positions[\"C\"] - atom_positions[\"CA\"],\n ey=atom_positions[\"CA\"] - atom_positions[\"N\"],\n translation=atom_positions[\"C\"],\n )\n restype_rigid_group_default_frame[restype, 3, :, :] = mat\n\n # chi1-frame to backbone\n if chi_angles_mask[restype][0]:\n base_atom_names = chi_angles_atoms[resname][0]\n base_atom_positions = [\n atom_positions[name] for name in base_atom_names\n ]\n mat = _make_rigid_transformation_4x4(\n ex=base_atom_positions[2] - base_atom_positions[1],\n ey=base_atom_positions[0] - base_atom_positions[1],\n translation=base_atom_positions[2],\n )\n restype_rigid_group_default_frame[restype, 4, :, :] = mat\n\n # chi2-frame to chi1-frame\n # chi3-frame to chi2-frame\n # chi4-frame to chi3-frame\n # luckily all rotation axes for the next frame start at (0,0,0) of the\n # previous frame\n for chi_idx in range(1, 4):\n if chi_angles_mask[restype][chi_idx]:\n axis_end_atom_name = chi_angles_atoms[resname][chi_idx][2]\n axis_end_atom_position = atom_positions[axis_end_atom_name]\n mat = _make_rigid_transformation_4x4(\n ex=axis_end_atom_position,\n ey=np.array([-1.0, 0.0, 0.0]),\n translation=axis_end_atom_position,\n )\n restype_rigid_group_default_frame[\n restype, 4 + chi_idx, :, :\n ] = mat\n\n\n_make_rigid_group_constants()\n\n\ndef make_atom14_dists_bounds(\n overlap_tolerance=1.5, bond_length_tolerance_factor=15\n):\n \"\"\"compute upper and lower bounds for bonds to assess violations.\"\"\"\n restype_atom14_bond_lower_bound = np.zeros([21, 14, 14], np.float32)\n restype_atom14_bond_upper_bound = np.zeros([21, 14, 14], np.float32)\n restype_atom14_bond_stddev = np.zeros([21, 14, 14], np.float32)\n residue_bonds, residue_virtual_bonds, _ = load_stereo_chemical_props()\n for restype, restype_letter in enumerate(restypes):\n resname = restype_1to3[restype_letter]\n atom_list = restype_name_to_atom14_names[resname]\n\n # create lower and upper bounds for clashes\n for atom1_idx, atom1_name in enumerate(atom_list):\n if not atom1_name:\n continue\n atom1_radius = van_der_waals_radius[atom1_name[0]]\n for atom2_idx, atom2_name in enumerate(atom_list):\n if (not atom2_name) or atom1_idx == atom2_idx:\n continue\n atom2_radius = van_der_waals_radius[atom2_name[0]]\n lower = atom1_radius + atom2_radius - overlap_tolerance\n upper = 1e10\n restype_atom14_bond_lower_bound[\n restype, atom1_idx, atom2_idx\n ] = lower\n restype_atom14_bond_lower_bound[\n restype, atom2_idx, atom1_idx\n ] = lower\n restype_atom14_bond_upper_bound[\n restype, atom1_idx, atom2_idx\n ] = upper\n restype_atom14_bond_upper_bound[\n restype, atom2_idx, atom1_idx\n ] = upper\n\n # overwrite lower and upper bounds for bonds and angles\n for b in residue_bonds[resname] + residue_virtual_bonds[resname]:\n atom1_idx = atom_list.index(b.atom1_name)\n atom2_idx = atom_list.index(b.atom2_name)\n lower = b.length - bond_length_tolerance_factor * b.stddev\n upper = b.length + bond_length_tolerance_factor * b.stddev\n restype_atom14_bond_lower_bound[\n restype, atom1_idx, atom2_idx\n ] = lower\n restype_atom14_bond_lower_bound[\n restype, atom2_idx, atom1_idx\n ] = lower\n restype_atom14_bond_upper_bound[\n restype, atom1_idx, atom2_idx\n ] = upper\n restype_atom14_bond_upper_bound[\n restype, atom2_idx, atom1_idx\n ] = upper\n restype_atom14_bond_stddev[restype, atom1_idx, atom2_idx] = b.stddev\n restype_atom14_bond_stddev[restype, atom2_idx, atom1_idx] = b.stddev\n return {\n \"lower_bound\": restype_atom14_bond_lower_bound, # shape (21,14,14)\n \"upper_bound\": restype_atom14_bond_upper_bound, # shape (21,14,14)\n \"stddev\": restype_atom14_bond_stddev, # shape (21,14,14)\n }\n\n\nrestype_atom14_ambiguous_atoms = np.zeros((21, 14), dtype=np.float32)\nrestype_atom14_ambiguous_atoms_swap_idx = np.tile(\n np.arange(14, dtype=np.int), (21, 1)\n)\n\n\ndef _make_atom14_ambiguity_feats():\n for res, pairs in residue_atom_renaming_swaps.items():\n res_idx = restype_order[restype_3to1[res]]\n for atom1, atom2 in pairs.items():\n atom1_idx = restype_name_to_atom14_names[res].index(atom1)\n atom2_idx = restype_name_to_atom14_names[res].index(atom2)\n restype_atom14_ambiguous_atoms[res_idx, atom1_idx] = 1\n restype_atom14_ambiguous_atoms[res_idx, atom2_idx] = 1\n restype_atom14_ambiguous_atoms_swap_idx[\n res_idx, atom1_idx\n ] = atom2_idx\n restype_atom14_ambiguous_atoms_swap_idx[\n res_idx, atom2_idx\n ] = atom1_idx\n\n\n_make_atom14_ambiguity_feats()\n"
] |
[
[
"numpy.concatenate",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"numpy.sin",
"numpy.zeros",
"numpy.eye",
"numpy.stack",
"numpy.transpose",
"numpy.arange",
"numpy.sqrt",
"numpy.cos",
"numpy.cross"
]
] |
shub1095/elasticsearch-py
|
[
"778c7e4ac000b51ced7c9a1a588200ec395e40ca"
] |
[
"test_elasticsearch/test_serializer.py"
] |
[
"# -*- coding: utf-8 -*-\n# Licensed to Elasticsearch B.V. under one or more contributor\n# license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright\n# ownership. Elasticsearch B.V. licenses this file to you under\n# the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# \thttp://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport sys\nimport uuid\n\nfrom datetime import datetime\nfrom decimal import Decimal\n\nimport numpy as np\nimport pandas as pd\n\nfrom elasticsearch.serializer import (\n JSONSerializer,\n Deserializer,\n DEFAULT_SERIALIZERS,\n TextSerializer,\n)\nfrom elasticsearch.exceptions import SerializationError, ImproperlyConfigured\n\nfrom .test_cases import TestCase, SkipTest\n\n\nclass TestJSONSerializer(TestCase):\n def test_datetime_serialization(self):\n self.assertEqual(\n '{\"d\":\"2010-10-01T02:30:00\"}',\n JSONSerializer().dumps({\"d\": datetime(2010, 10, 1, 2, 30)}),\n )\n\n def test_decimal_serialization(self):\n if sys.version_info[:2] == (2, 6):\n raise SkipTest(\"Float rounding is broken in 2.6.\")\n self.assertEqual('{\"d\":3.8}', JSONSerializer().dumps({\"d\": Decimal(\"3.8\")}))\n\n def test_uuid_serialization(self):\n self.assertEqual(\n '{\"d\":\"00000000-0000-0000-0000-000000000003\"}',\n JSONSerializer().dumps(\n {\"d\": uuid.UUID(\"00000000-0000-0000-0000-000000000003\")}\n ),\n )\n\n def test_serializes_numpy_bool(self):\n self.assertEqual('{\"d\":true}', JSONSerializer().dumps({\"d\": np.bool_(True)}))\n\n def test_serializes_numpy_integers(self):\n ser = JSONSerializer()\n for np_type in (\n np.int_,\n np.int8,\n np.int16,\n np.int32,\n np.int64,\n ):\n self.assertEqual(ser.dumps({\"d\": np_type(-1)}), '{\"d\":-1}')\n\n for np_type in (\n np.uint8,\n np.uint16,\n np.uint32,\n np.uint64,\n ):\n self.assertEqual(ser.dumps({\"d\": np_type(1)}), '{\"d\":1}')\n\n def test_serializes_numpy_floats(self):\n ser = JSONSerializer()\n for np_type in (\n np.float_,\n np.float32,\n np.float64,\n ):\n self.assertRegexpMatches(\n ser.dumps({\"d\": np_type(1.2)}), r'^\\{\"d\":1\\.2[\\d]*}$'\n )\n\n def test_serializes_numpy_datetime(self):\n self.assertEqual(\n '{\"d\":\"2010-10-01T02:30:00\"}',\n JSONSerializer().dumps({\"d\": np.datetime64(\"2010-10-01T02:30:00\")}),\n )\n\n def test_serializes_numpy_ndarray(self):\n self.assertEqual(\n '{\"d\":[0,0,0,0,0]}',\n JSONSerializer().dumps({\"d\": np.zeros((5,), dtype=np.uint8)}),\n )\n # This isn't useful for Elasticsearch, just want to make sure it works.\n self.assertEqual(\n '{\"d\":[[0,0],[0,0]]}',\n JSONSerializer().dumps({\"d\": np.zeros((2, 2), dtype=np.uint8)}),\n )\n\n def test_serializes_numpy_nan_to_nan(self):\n self.assertEqual(\n '{\"d\":NaN}', JSONSerializer().dumps({\"d\": np.nan}),\n )\n\n def test_serializes_pandas_timestamp(self):\n self.assertEqual(\n '{\"d\":\"2010-10-01T02:30:00\"}',\n JSONSerializer().dumps({\"d\": pd.Timestamp(\"2010-10-01T02:30:00\")}),\n )\n\n def test_serializes_pandas_series(self):\n self.assertEqual(\n '{\"d\":[\"a\",\"b\",\"c\",\"d\"]}',\n JSONSerializer().dumps({\"d\": pd.Series([\"a\", \"b\", \"c\", \"d\"])}),\n )\n\n def test_serializes_pandas_na(self):\n if not hasattr(pd, \"NA\"): # pandas.NA added in v1\n raise SkipTest(\"pandas.NA required\")\n self.assertEqual(\n '{\"d\":null}', JSONSerializer().dumps({\"d\": pd.NA}),\n )\n\n def test_raises_serialization_error_pandas_nat(self):\n if not hasattr(pd, \"NaT\"):\n raise SkipTest(\"pandas.NaT required\")\n self.assertRaises(SerializationError, JSONSerializer().dumps, {\"d\": pd.NaT})\n\n def test_serializes_pandas_category(self):\n cat = pd.Categorical([\"a\", \"c\", \"b\", \"a\"], categories=[\"a\", \"b\", \"c\"])\n self.assertEqual(\n '{\"d\":[\"a\",\"c\",\"b\",\"a\"]}', JSONSerializer().dumps({\"d\": cat}),\n )\n\n cat = pd.Categorical([1, 2, 3], categories=[1, 2, 3])\n self.assertEqual(\n '{\"d\":[1,2,3]}', JSONSerializer().dumps({\"d\": cat}),\n )\n\n def test_raises_serialization_error_on_dump_error(self):\n self.assertRaises(SerializationError, JSONSerializer().dumps, object())\n\n def test_raises_serialization_error_on_load_error(self):\n self.assertRaises(SerializationError, JSONSerializer().loads, object())\n self.assertRaises(SerializationError, JSONSerializer().loads, \"\")\n self.assertRaises(SerializationError, JSONSerializer().loads, \"{{\")\n\n def test_strings_are_left_untouched(self):\n self.assertEqual(\"δ½ ε₯½\", JSONSerializer().dumps(\"δ½ ε₯½\"))\n\n\nclass TestTextSerializer(TestCase):\n def test_strings_are_left_untouched(self):\n self.assertEqual(\"δ½ ε₯½\", TextSerializer().dumps(\"δ½ ε₯½\"))\n\n def test_raises_serialization_error_on_dump_error(self):\n self.assertRaises(SerializationError, TextSerializer().dumps, {})\n\n\nclass TestDeserializer(TestCase):\n def setup_method(self, _):\n self.de = Deserializer(DEFAULT_SERIALIZERS)\n\n def test_deserializes_json_by_default(self):\n self.assertEqual({\"some\": \"data\"}, self.de.loads('{\"some\":\"data\"}'))\n\n def test_deserializes_text_with_correct_ct(self):\n self.assertEqual(\n '{\"some\":\"data\"}', self.de.loads('{\"some\":\"data\"}', \"text/plain\")\n )\n self.assertEqual(\n '{\"some\":\"data\"}',\n self.de.loads('{\"some\":\"data\"}', \"text/plain; charset=whatever\"),\n )\n\n def test_raises_serialization_error_on_unknown_mimetype(self):\n self.assertRaises(SerializationError, self.de.loads, \"{}\", \"text/html\")\n\n def test_raises_improperly_configured_when_default_mimetype_cannot_be_deserialized(\n self,\n ):\n self.assertRaises(ImproperlyConfigured, Deserializer, {})\n"
] |
[
[
"numpy.zeros",
"pandas.Timestamp",
"pandas.Categorical",
"numpy.bool_",
"pandas.Series",
"numpy.datetime64"
]
] |
BMI203-2022/project3
|
[
"3a1935f98b47ef425f8c0551436e16e9e846f396"
] |
[
"test/test_mst.py"
] |
[
"# write tests for bfs\nimport pytest\nimport numpy as np\nfrom mst import Graph\nfrom sklearn.metrics import pairwise_distances\n\n\ndef check_mst(adj_mat: np.ndarray, \n mst: np.ndarray, \n expected_weight: int, \n allowed_error: float = 0.0001):\n \"\"\" Helper function to check the correctness of the adjacency matrix encoding an MST.\n Note that because the MST of a graph is not guaranteed to be unique, we cannot \n simply check for equality against a known MST of a graph. \n\n Arguments:\n adj_mat: Adjacency matrix of full graph\n mst: Adjacency matrix of proposed minimum spanning tree\n expected_weight: weight of the minimum spanning tree of the full graph\n allowed_error: Allowed difference between proposed MST weight and `expected_weight`\n\n TODO: \n Add additional assertions to ensure the correctness of your MST implementation\n For example, how many edges should a minimum spanning tree have? Are minimum spanning trees\n always connected? What else can you think of?\n \"\"\"\n def approx_equal(a, b):\n return abs(a - b) < allowed_error\n\n total = 0\n for i in range(mst.shape[0]):\n for j in range(i+1):\n total += mst[i, j]\n assert approx_equal(total, expected_weight), 'Proposed MST has incorrect expected weight'\n\n\ndef test_mst_small():\n \"\"\" Unit test for the construction of a minimum spanning tree on a small graph \"\"\"\n file_path = './data/small.csv'\n g = Graph(file_path)\n g.construct_mst()\n check_mst(g.adj_mat, g.mst, 8)\n\n\ndef test_mst_single_cell_data():\n \"\"\" Unit test for the construction of a minimum spanning tree using \n single cell data, taken from the Slingshot R package \n (https://bioconductor.org/packages/release/bioc/html/slingshot.html)\n \"\"\"\n file_path = './data/slingshot_example.txt'\n # load coordinates of single cells in low-dimensional subspace\n coords = np.loadtxt(file_path)\n # compute pairwise distances for all 140 cells to form an undirected weighted graph\n dist_mat = pairwise_distances(coords)\n g = Graph(dist_mat)\n g.construct_mst()\n check_mst(g.adj_mat, g.mst, 57.263561605571695)\n\n\ndef test_mst_student():\n \"\"\" TODO: Write at least one unit test for MST construction \"\"\"\n pass\n"
] |
[
[
"sklearn.metrics.pairwise_distances",
"numpy.loadtxt"
]
] |
theLongLab/phx-nn
|
[
"81e0f87faa82d6995b37095815655224cb5bf438"
] |
[
"data_split.py"
] |
[
"# data_split.py\n\nfrom pathlib import Path\nimport sys\nfrom typing import Optional\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\ndef main(test_size: float, seed: Optional[int]) -> None:\n \"\"\"\n Splits the data based on test set proportion.\n \"\"\"\n processed_dpath: Path = Path(\"data\", \"processed\")\n raw_data: pd.DataFrame = pd.read_csv(Path(\"data\", \"raw\", \"raw_sum_stats.csv\"))\n\n processed_train_data: pd.DataFrame\n processed_test_data: pd.DataFrame\n processed_train_data, processed_test_data = train_test_split(\n raw_data, test_size=test_size, random_state=seed\n )\n\n processed_train_data.to_csv(Path(processed_dpath, \"processed_sum_stats_train.csv\"), index=False)\n processed_test_data.to_csv(Path(processed_dpath, \"processed_sum_stats_test.csv\"), index=False)\n\n\nif __name__ == \"__main__\":\n test_size: float = float(sys.argv[1])\n seed: Optional[int] = None\n try:\n seed = int(sys.argv[2])\n except IndexError:\n pass\n\n main(test_size=test_size, seed=seed)\n"
] |
[
[
"sklearn.model_selection.train_test_split"
]
] |
johannah/balloon-learning-environment
|
[
"cdb2e582f2b03c41f037bf76142d31611f5e0316"
] |
[
"balloon_learning_environment/env/balloon/solar.py"
] |
[
"# coding=utf-8\n# Copyright 2022 The Balloon Learning Environment Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for modeling sun-related variables.\n\nThis includes solar power and also sunrise/sunset related time calculations.\n\"\"\"\n\nimport collections\nimport datetime as dt\nimport functools\nimport math\nimport operator\nfrom typing import Callable, Tuple, Union\n\nfrom balloon_learning_environment.utils import constants\nfrom balloon_learning_environment.utils import units\nimport numpy as np\n\nimport s2sphere as s2\n\n# We define minimum solar elevation based on Earth's radius and mean balloon\n# altitude. Balloons typically fly from 15km-20km, so we assume 17.5km.\n# If solar elevation is below min solar horizon we can't see the sun anymore.\n# NOTE: Angle is below horizon so we need a negative sign.\nMIN_SOLAR_EL_DEG = -4.242\n_SEARCH_TIME_DELTA = dt.timedelta(minutes=3)\n\n\n# TODO(joshgreaves): Use s2.S1Angle throughout.\ndef solar_calculator(latlng: s2.LatLng,\n time: dt.datetime) -> Tuple[float, float, float]:\n \"\"\"Computes solar elevation, azimuth, and flux given latitude/longitude/time.\n\n Based on NOAA Solar Calculator described at:\n http://www.esrl.noaa.gov/gmd/grad/solcalc/index.html\n\n Args:\n latlng: The latitude and longitude.\n time: Datetime object.\n\n Returns:\n el_deg: Solar elevation in degrees.\n az_deg: Solar azimuth in degrees.\n flux: Solar flux in W/m^2.\n \"\"\"\n # Check if latitude is within expected range.\n if not latlng.is_valid:\n raise ValueError(f'solar_calculator: latlng is invalid: {latlng}.')\n if time.tzinfo is None:\n raise ValueError('time parameter needs timezone. Try UTC.')\n\n # Compute fraction_of_day from time.\n fraction_of_day = (\n int(time.timestamp()) %\n constants.NUM_SECONDS_PER_DAY) / constants.NUM_SECONDS_PER_DAY\n\n # Compute Julian day number from Gregorian calendar.\n julian_day_number = (367.0 * time.year - np.floor(7.0 * (time.year + np.floor(\n (time.month + 9.0) / 12.0)) / 4.0) - np.floor(3.0 * (np.floor(\n (time.year + (time.month - 9.0) / 7.0) / 100.0) + 1.0) / 4.0) +\n np.floor(275.0 * time.month / 9.0) + time.day +\n 1721028.5)\n\n # Compute Julian time (in days and in centuries).\n julian_time = julian_day_number + fraction_of_day\n julian_century = (julian_time - 2451545.0) / 36525.0\n\n # Compute solar parameters.\n geometric_mean_long_sun = math.radians(\n 280.46646 + julian_century * (36000.76983 + julian_century * 0.0003032))\n sin2l0 = np.sin(2.0 * geometric_mean_long_sun)\n cos2l0 = np.cos(2.0 * geometric_mean_long_sun)\n sin4l0 = np.sin(4.0 * geometric_mean_long_sun)\n\n geometric_mean_anomaly_sun = math.radians(\n 357.52911 + julian_century * (35999.05029 - 0.0001537 * julian_century))\n sinm0 = np.sin(geometric_mean_anomaly_sun)\n sin2m0 = np.sin(2.0 * geometric_mean_anomaly_sun)\n sin3m0 = np.sin(3.0 * geometric_mean_anomaly_sun)\n\n mean_obliquity_of_ecliptic = math.radians(23.0 + (26.0 + (\n (21.448 - julian_century *\n (46.815 + julian_century *\n (0.00059 - julian_century * 0.001813)))) / 60.0) / 60.0)\n\n obliquity_correction = mean_obliquity_of_ecliptic + math.radians(\n 0.00256 * np.cos(math.radians(125.04 - 1934.136 * julian_century)))\n\n var_y = np.tan(obliquity_correction / 2.0)**2\n\n eccentricity_earth = 0.016708634 - julian_century * (\n 0.000042037 + 0.0000001267 * julian_century)\n\n equation_of_time = (4.0 *\n (var_y * sin2l0 - 2.0 * eccentricity_earth * sinm0 +\n 4.0 * eccentricity_earth * var_y * sinm0 * cos2l0 -\n 0.5 * var_y * var_y * sin4l0 -\n 1.25 * eccentricity_earth * eccentricity_earth * sin2m0))\n\n hour_angle = math.radians(\n math.fmod(\n 1440.0 * fraction_of_day + math.degrees(equation_of_time) +\n 4.0 * latlng.lng().degrees, 1440.0)) / 4.0\n if hour_angle < 0:\n hour_angle += math.pi\n else:\n hour_angle -= math.pi\n\n eq_of_center_sun = math.radians(sinm0 *\n (1.914602 - julian_century *\n (0.004817 + 0.000014 * julian_century)) +\n sin2m0 *\n (0.019993 - 0.000101 * julian_century) +\n sin3m0 * 0.000289)\n true_long_sun = geometric_mean_long_sun + eq_of_center_sun\n apparent_long_sun = true_long_sun - math.radians(\n 0.00569 -\n 0.00478 * np.sin(math.radians(125.04 - 1934.136 * julian_century)))\n declination_sun = np.arcsin(\n np.sin(obliquity_correction) * np.sin(apparent_long_sun))\n\n zenith_angle = np.arccos(\n np.sin(latlng.lat().radians) * np.sin(declination_sun) +\n np.cos(latlng.lat().radians) * np.cos(declination_sun) *\n np.cos(hour_angle))\n\n # Compute solar elevation. Correct for atmospheric refraction.\n el_uncorrected_deg = 90.0 - math.degrees(zenith_angle)\n\n if el_uncorrected_deg > 85.0:\n atmospheric_refraction = 0\n elif el_uncorrected_deg > 5.0:\n tan_seu = np.tan(math.radians(el_uncorrected_deg))\n atmospheric_refraction = (58.1 / tan_seu - 0.07 / (tan_seu**3) + 0.000086 /\n (tan_seu**5))\n elif el_uncorrected_deg > -0.575:\n atmospheric_refraction = (1735.0 + el_uncorrected_deg *\n (-518.2 + el_uncorrected_deg *\n (103.4 + el_uncorrected_deg *\n (-12.79 + el_uncorrected_deg * 0.711))))\n else:\n atmospheric_refraction = -20.772 / np.tan(math.radians(el_uncorrected_deg))\n\n el_deg = el_uncorrected_deg + atmospheric_refraction / 3600.0\n\n # Compute solar azimuth. Make sure cos_azimuth is in the range [-1, 1].\n cos_az = ((np.sin(latlng.lat().radians) * np.cos(zenith_angle) -\n np.sin(declination_sun)) /\n (np.cos(latlng.lat().radians) * np.sin(zenith_angle)))\n az_unwrapped = np.arccos(np.clip(cos_az, -1.0, 1.0))\n if hour_angle > 0:\n az_deg = math.degrees(az_unwrapped) + 180.0\n else:\n az_deg = 180.0 - math.degrees(az_unwrapped)\n\n # Compute solar flux in W/m^2.\n flux = 1366.0 * (1 + 0.5 * (\n ((1 + eccentricity_earth) /\n (1 - eccentricity_earth))**2 - 1) * np.cos(geometric_mean_anomaly_sun))\n\n return el_deg, az_deg, flux\n\n\ndef solar_atmospheric_attenuation(el_deg: float,\n pressure_altitude_pa: float) -> float:\n \"\"\"Computes atmospheric attenuation of incoming solar radiation.\n\n Args:\n el_deg: Solar elevation in degrees.\n pressure_altitude_pa: Balloon's pressure altitude in Pascals.\n\n Returns:\n attenuation_factor: Solar atmospheric attenuation factor in range [0, 1].\n \"\"\"\n\n # Check if solar elevation is within range [-90, 90] deg.\n if el_deg > 90.0 or el_deg < -90.0:\n raise ValueError('solar_atmospheric_attenuation: '\n 'Solar elevation out of expected range [-90, 90] deg.')\n\n # Check if pressure altitude [Pa] is within range [0, 101325] Pa.\n if pressure_altitude_pa > 101325.0 or pressure_altitude_pa < 0.0:\n raise ValueError('solar_atmospheric_attenuation: '\n 'Pressure altitude out of expected range [0, 101325] Pa.')\n\n # If solar elevation is below min solar horizon return 0.\n if el_deg < MIN_SOLAR_EL_DEG:\n return 0.0\n\n # Compute airmass.\n tmp_sin_elev = 614.0 * np.sin(math.radians(el_deg))\n airmass = (0.34764 * (pressure_altitude_pa / 101325.0) *\n (math.sqrt(1229.0 + tmp_sin_elev * tmp_sin_elev) - tmp_sin_elev))\n\n # Compute atmospheric attenuation factor.\n return 0.5 * (np.exp(-0.65 * airmass) + np.exp(-0.95 * airmass))\n\n\ndef balloon_shadow(el_deg: float, panel_height_below_balloon_m: float) -> float:\n \"\"\"Computes shadowing factor on solar panels due to balloon film.\n\n Args:\n el_deg: Solar elevation in degrees.\n panel_height_below_balloon_m: Panel location below balloon in meters.\n\n Returns:\n shadow_factor: Balloon shadowing factor in range [0, 1].\n \"\"\"\n balloon_radius = 8.69275\n balloon_height = 10.41603\n\n shadow_el_deg = math.degrees(\n np.arctan2(\n math.sqrt(panel_height_below_balloon_m *\n (balloon_height + panel_height_below_balloon_m)),\n balloon_radius))\n\n if el_deg >= shadow_el_deg:\n # Shadowing applies. Use a balloon shadow factor of 0.4392.\n return 0.4392\n else:\n # No shadow.\n return 1.0\n\n\ndef is_solar_afternoon(latlng: s2.LatLng, time: dt.datetime) -> bool:\n \"\"\"Returns whether it is the solar afternoon.\n\n That is, returns whether midnight will happen before noon (chronologically).\n\n Args:\n latlng: Latitude/longitude at which to calculate.\n time: Datetime at which to calculate.\n\n Returns:\n True if midnight is coming before noon.\n \"\"\"\n now_elevation, _, _ = solar_calculator(latlng, time)\n then_elevation, _, _ = solar_calculator(\n latlng, time + dt.timedelta(seconds=1))\n\n return then_elevation < now_elevation\n\n\ndef _find_solar_elevation(\n latlng: s2.LatLng,\n min_time: dt.datetime,\n max_time: dt.datetime,\n target: Union[str, float],\n time_delta=_SEARCH_TIME_DELTA) -> Tuple[dt.datetime, float]:\n \"\"\"A user-friendly wrapper around _find_solar_elevation_binary_search.\n\n See caveats in the comments to that function.\n\n Args:\n latlng: Latitude/longitude at which to calculate.\n min_time: Earliest time in the time interval to consider.\n max_time: Latest time in the time interval to consider.\n target: One of 'minimum', 'maximum', or a specific elevation to be located.\n time_delta: Resolution of search process. If None, use 3 minutes.\n\n Returns:\n time: time at which the next midnight (or noon) will occur.\n elevation: solar elevation at that time.\n \"\"\"\n if target == 'minimum':\n return _find_solar_elevation_binary_search(\n latlng, min_time, max_time, operator.pos, time_delta)\n elif target == 'maximum':\n return _find_solar_elevation_binary_search(\n latlng, min_time, max_time, operator.neg, time_delta)\n else:\n try:\n # Turn the numerical value into an absolute loss function.\n target_numeric = float(target)\n return _find_solar_elevation_binary_search(\n latlng, min_time, max_time,\n lambda x: abs(x - target_numeric), time_delta)\n except ValueError:\n raise ValueError('Unknown target type: {}'.format(target))\n\n\ndef _find_solar_elevation_binary_search(\n latlng: s2.LatLng,\n min_time: dt.datetime,\n max_time: dt.datetime,\n transfer_function: Callable[[float], float],\n time_delta=_SEARCH_TIME_DELTA) -> Tuple[dt.datetime, float]:\n \"\"\"Finds the next solar midnight or noon in the given time interval.\n\n This method assumes that on the given interval, the transfer_function results\n in a convex objective that can be minimized.\n\n Args:\n latlng: Latitude/longitude at which to calculate.\n min_time: Earliest time in the time interval to consider.\n max_time: Latest time in the time interval to consider.\n transfer_function: Transfer function to be applied to the elevation.\n time_delta: Resolution of search process. If None, use 3 minutes.\n\n Returns:\n time: time at which the next midnight (or noon) will occur.\n elevation: solar elevation at that time.\n \"\"\"\n if max_time < min_time:\n raise ValueError('Time interval must have positive extent.')\n\n max_steps = int((max_time - min_time) / time_delta)\n assert max_steps > 0\n\n # This calculates the solar elevation at a fixed timestep in the future, as\n # an objective to be minimized.\n # idx is the number of timesteps in the future.\n # Assumes that all timesteps have the same length.\n def _objective_function(idx: int) -> float:\n time = min_time + time_delta * idx\n el_degree, _, _ = solar_calculator(latlng, time)\n\n # If looking for noon, we negate the elevation curve to minimize it.\n return transfer_function(el_degree)\n\n # TODO(bellemare): Move this somewhere?\n class _LazySequence(collections.abc.Sequence):\n \"\"\"A Sequence that calculates its values on the fly.\"\"\"\n\n def __init__(self, length: int, fn: Callable[[int], float]):\n self._len = length\n self._fn = fn\n\n @functools.lru_cache(maxsize=200)\n def __getitem__(self, idx: int):\n return self._fn(idx)\n\n def __len__(self) -> int:\n return self._len\n\n # Perform binary search on the interval. The transfer_function transforms\n # solar elevation into a convex objective.\n objective = _LazySequence(max_steps, _objective_function)\n\n low = 0\n high = max_steps\n\n # Binary search the function for its minimum.\n while high > low + 1:\n midpoint = low + (high - low) / 2\n if objective[low] < objective[high]:\n # This trick works when the function is symmetric around its minimum.\n high = math.ceil(midpoint) # Ceil/floor is a bit more conservative.\n else:\n low = math.floor(midpoint)\n\n # If all went well, the minimum is either high or low.\n if objective[low] < objective[high]:\n min_index = low\n else:\n min_index = high\n\n time = min_time + time_delta * min_index\n\n elevation, _, _ = solar_calculator(latlng, time)\n return time, elevation\n\n\ndef get_next_solar_midnight(\n latlng: s2.LatLng, time: dt.datetime,\n time_delta=_SEARCH_TIME_DELTA) -> Tuple[dt.datetime, float]:\n \"\"\"Determines the next time at which solar midnight will occur.\n\n We call solar midnight the time at which the sun is at its lowest elevation.\n\n Args:\n latlng: Latitude/longitude at which to calculate.\n time: Datetime at which to calculate.\n time_delta: Resolution at which to determine time.\n\n Returns:\n time: time at which the next midnight will occur.\n elevation: solar elevation at that midnight.\n \"\"\"\n if is_solar_afternoon(latlng, time):\n # Midnight is in the next 12 hours.\n return _find_solar_elevation(\n latlng, time, time + dt.timedelta(hours=12), 'minimum', time_delta)\n else:\n # Midnight is 12 to 24 hours away.\n return _find_solar_elevation(\n latlng, time + dt.timedelta(hours=12),\n time + dt.timedelta(hours=24), 'minimum', time_delta)\n\n\ndef get_next_solar_noon(\n latlng: s2.LatLng, time: dt.datetime,\n time_delta=_SEARCH_TIME_DELTA) -> Tuple[dt.datetime, float]:\n \"\"\"Determines the next time at which solar noon will occur.\n\n This is the same as get_next_solar_midnight, but with some bits flipped.\n We call solar noon the time at which the sun is at its highest elevation.\n\n Args:\n latlng: Latitude/longitude at which to calculate.\n time: Datetime at which to calculate.\n time_delta: Resolution at which to determine time.\n\n Returns:\n time: time at which the next midnight will occur.\n elevation: solar elevation at that midnight.\n \"\"\"\n if is_solar_afternoon(latlng, time):\n # Noon is in the next 12 to 24 hours.\n return _find_solar_elevation(\n latlng, time + dt.timedelta(hours=12),\n time + dt.timedelta(hours=24), 'maximum', time_delta)\n else:\n return _find_solar_elevation(\n latlng, time, time + dt.timedelta(hours=12), 'maximum', time_delta)\n\n\ndef get_next_sunrise_sunset(\n latlng: s2.LatLng,\n time: dt.datetime,\n time_delta=_SEARCH_TIME_DELTA) -> Tuple[dt.datetime, dt.datetime]:\n \"\"\"Determines the next sunrise and sunset times.\n\n Args:\n latlng: Latitude/longitude at which to calculate.\n time: Datetime at which to calculate.\n time_delta: Resolution at which to determine time.\n\n Returns:\n sunrise: Time of next sunrise.\n sunset: Time of next sunset.\n \"\"\"\n # This avoids dealing with polar day/night.\n # TODO(joshgreaves): Decide if we want to deal with the polar cases.\n assert abs(latlng.lat().degrees) < 60.0, 'High latitudes not supported.'\n\n next_noon, _ = get_next_solar_noon(latlng, time, time_delta)\n next_midnight, _ = get_next_solar_midnight(latlng, time, time_delta)\n\n # There are four cases here (four quadrant of the solar day). The next\n # lines pick out two of these quadrants. They might return a time before\n # the current time, in which case we know the relevant sunrise or sunset\n # is one day ahead.\n if is_solar_afternoon(latlng, time):\n # 'next_noon' is tomorrow.\n sunrise = _find_solar_elevation(\n latlng, next_midnight, next_noon,\n MIN_SOLAR_EL_DEG, time_delta)[0]\n sunset = _find_solar_elevation(\n latlng,\n next_noon - dt.timedelta(days=1), next_midnight,\n MIN_SOLAR_EL_DEG, time_delta)[0]\n else:\n # 'next_noon' is today.\n sunrise = _find_solar_elevation(\n latlng,\n next_midnight - dt.timedelta(days=1), next_noon,\n MIN_SOLAR_EL_DEG, time_delta)[0]\n sunset = _find_solar_elevation(\n latlng, next_noon, next_midnight,\n MIN_SOLAR_EL_DEG, time_delta)[0]\n\n # Handle the post-sunrise and post-sunset quadrants.\n if sunrise < time:\n sunrise += dt.timedelta(days=1)\n if sunset < time:\n sunset += dt.timedelta(days=1)\n\n return sunrise, sunset # Swiftly flow the days.\n\n\ndef calculate_steps_to_sunrise(latlng: s2.LatLng,\n time: dt.datetime,\n time_delta=_SEARCH_TIME_DELTA) -> int:\n \"\"\"Calculates the number of steps to next sunrise.\n\n When the number of steps is fractional, this is rounded up.\n\n Args:\n latlng: Latitude/longitude at which to calculate.\n time: Datetime at which to calculate.\n time_delta: The amount of time between each action.\n\n Returns:\n The number of time steps of length time_delta until sunrise. If the\n sun is up, returns 0.\n \"\"\"\n # It's currently day, 0 steps.\n now_elevation, _, _ = solar_calculator(latlng, time)\n if now_elevation >= MIN_SOLAR_EL_DEG:\n return 0\n\n sunrise, _ = get_next_sunrise_sunset(latlng, time, time_delta)\n\n elapsed_time = sunrise - time\n elapsed_time_in_steps = math.ceil(elapsed_time / time_delta)\n\n return int(elapsed_time_in_steps)\n\n\ndef solar_power(el_deg: float, pressure_altitude_pa: float) -> units.Power:\n \"\"\"Computes solar power produced by panels on the balloon.\n\n Args:\n el_deg: Solar elevation in degrees.\n pressure_altitude_pa: Balloon's pressure altitude in Pascals.\n\n Returns:\n solar_power: Solar power from panels on the balloon [W].\n \"\"\"\n\n # Get atmospheric attenuation factor.\n attenuation = solar_atmospheric_attenuation(el_deg, pressure_altitude_pa)\n\n # Loon balloons have 4 main solar panels mounted at 35deg and hanging at 3.3m\n # below the balloon. There are an additional 2 panels mounted at 65deg\n # hanging at 2.7m below the balloon. All panels have a max power of 210 W.\n power = 210.0 * attenuation * (\n 4 * np.cos(math.radians(el_deg - 35)) * balloon_shadow(el_deg, 3.3) +\n 2 * np.cos(math.radians(el_deg - 65)) * balloon_shadow(el_deg, 2.7))\n\n return units.Power(watts=power)\n"
] |
[
[
"numpy.sin",
"numpy.tan",
"numpy.exp",
"numpy.cos",
"numpy.clip",
"numpy.floor"
]
] |
mashrikt/pyro
|
[
"0097b3008327d87a27506b1da2528bf989080495"
] |
[
"tests/distributions/test_shapes.py"
] |
[
"from __future__ import absolute_import, division, print_function\n\nimport torch\n\nimport pyro.distributions as dist\n\n\ndef test_categorical_shape():\n probs = torch.ones(3, 2) / 2\n d = dist.Categorical(probs)\n assert d.batch_shape == (3,)\n assert d.event_shape == ()\n assert d.shape() == (3,)\n assert d.sample().size() == d.shape()\n\n\ndef test_one_hot_categorical_shape():\n probs = torch.ones(3, 2) / 2\n d = dist.OneHotCategorical(probs)\n assert d.batch_shape == (3,)\n assert d.event_shape == (2,)\n assert d.shape() == (3, 2)\n assert d.sample().size() == d.shape()\n\n\ndef test_normal_shape():\n loc = torch.zeros(3, 2)\n scale = torch.ones(3, 2)\n d = dist.Normal(loc, scale)\n assert d.batch_shape == (3, 2)\n assert d.event_shape == ()\n assert d.shape() == (3, 2)\n assert d.sample().size() == d.shape()\n\n\ndef test_dirichlet_shape():\n alpha = torch.ones(3, 2) / 2\n d = dist.Dirichlet(alpha)\n assert d.batch_shape == (3,)\n assert d.event_shape == (2,)\n assert d.shape() == (3, 2)\n assert d.sample().size() == d.shape()\n\n\ndef test_bernoulli_log_prob_shape():\n probs = torch.ones(3, 2)\n x = torch.ones(3, 2)\n d = dist.Bernoulli(probs)\n assert d.log_prob(x).size() == (3, 2)\n\n\ndef test_categorical_log_prob_shape():\n probs = torch.ones(3, 2, 4) / 4\n x = torch.zeros(3, 2)\n d = dist.Categorical(probs)\n assert d.log_prob(x).size() == (3, 2)\n\n\ndef test_one_hot_categorical_log_prob_shape():\n probs = torch.ones(3, 2, 4) / 4\n x = torch.zeros(3, 2, 4)\n x[:, :, 0] = 1\n d = dist.OneHotCategorical(probs)\n assert d.log_prob(x).size() == (3, 2)\n\n\ndef test_normal_log_prob_shape():\n loc = torch.zeros(3, 2)\n scale = torch.ones(3, 2)\n x = torch.zeros(3, 2)\n d = dist.Normal(loc, scale)\n assert d.log_prob(x).size() == (3, 2)\n\n\ndef test_diag_normal_log_prob_shape():\n loc1 = torch.zeros(2, 3)\n loc2 = torch.zeros(2, 4)\n scale = torch.ones(2, 1)\n d1 = dist.Normal(loc1, scale.expand_as(loc1)).independent(1)\n d2 = dist.Normal(loc2, scale.expand_as(loc2)).independent(1)\n x1 = d1.sample()\n x2 = d2.sample()\n assert d1.log_prob(x1).size() == (2,)\n assert d2.log_prob(x2).size() == (2,)\n"
] |
[
[
"torch.zeros",
"torch.ones"
]
] |
unc-optimization/FedDR
|
[
"4097eb447a99c7180388527a2d05974906b77eb1"
] |
[
"FedDR/main.py"
] |
[
"import numpy as np\r\nimport argparse\r\nimport importlib\r\nimport random\r\nimport os, time\r\nimport tensorflow as tf\r\nfrom flearn.utils.model_utils import read_data\r\n\r\n# GLOBAL PARAMETERS\r\nOPTIMIZERS = ['fedavg', 'fedprox', 'feddr', 'fedpd']\r\nDATASETS = ['FEMNIST', 'synthetic_iid', 'synthetic_0_0', 'synthetic_0.5_0.5', 'synthetic_1_1']\r\nREG_TYPE = ['none','l1_norm','l2_norm_squared','l2_norm','linf_norm']\r\n\r\nMODEL_PARAMS = {\r\n 'FEMNIST.ann': (26,), # num_classes\r\n 'synthetic.ann': (10, ) # num_classes\r\n}\r\n\r\n\r\ndef read_options():\r\n ''' Parse command line arguments or load defaults '''\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('--optimizer',\r\n help='name of optimizer;',\r\n type=str,\r\n choices=OPTIMIZERS,\r\n default='fedavg')\r\n parser.add_argument('--dataset',\r\n help='name of dataset;',\r\n type=str,\r\n choices=DATASETS,\r\n default='nist')\r\n parser.add_argument('--model',\r\n help='name of model;',\r\n type=str,\r\n default='stacked_lstm.py')\r\n parser.add_argument('--num_rounds',\r\n help='number of rounds to simulate;',\r\n type=int,\r\n default=-1)\r\n parser.add_argument('--eval_every',\r\n help='evaluate every ____ rounds;',\r\n type=int,\r\n default=-1)\r\n parser.add_argument('--clients_per_round',\r\n help='number of clients trained per round;',\r\n type=int,\r\n default=-1)\r\n parser.add_argument('--batch_size',\r\n help='batch size when clients train on data;',\r\n type=int,\r\n default=10)\r\n parser.add_argument('--num_epochs', \r\n help='number of epochs when clients train on data;',\r\n type=int,\r\n default=1)\r\n parser.add_argument('--num_iters',\r\n help='number of iterations when clients train on data;',\r\n type=int,\r\n default=1)\r\n parser.add_argument('--learning_rate',\r\n help='learning rate for inner solver;',\r\n type=float,\r\n default=0.003)\r\n parser.add_argument('--mu',\r\n help='constant for prox;',\r\n type=float,\r\n default=0)\r\n parser.add_argument('--eta',\r\n help='constant for feddr;',\r\n type=float,\r\n default=1.0)\r\n parser.add_argument('--alpha',\r\n help='constant for feddr;',\r\n type=float,\r\n default=0.9)\r\n parser.add_argument('--seed',\r\n help='seed for randomness;',\r\n type=int,\r\n default=0)\r\n parser.add_argument('--drop_percent',\r\n help='percentage of slow devices',\r\n type=float,\r\n default=0.1)\r\n parser.add_argument('--reg_type',\r\n help='type of regularizer',\r\n type=str,\r\n choices=REG_TYPE,\r\n default='none')\r\n parser.add_argument('--reg_coeff',\r\n help='regularization parameter',\r\n type=float,\r\n default=0.01)\r\n parser.add_argument('--exp_id',\r\n help='experiment ID',\r\n type=str,\r\n default='')\r\n parser.add_argument('--log_suffix',\r\n help='string to append to file name',\r\n type=str,\r\n default='')\r\n\r\n try: parsed = vars(parser.parse_args())\r\n except IOError as msg: parser.error(str(msg))\r\n\r\n # Set seeds\r\n random.seed(1 + parsed['seed'])\r\n np.random.seed(12 + parsed['seed'])\r\n tf.set_random_seed(123 + parsed['seed'])\r\n\r\n # load selected model\r\n if parsed['dataset'].startswith(\"synthetic\"): # all synthetic datasets use the same model\r\n model_path = '%s.%s.%s.%s' % ('flearn', 'models', 'synthetic', parsed['model'])\r\n else:\r\n model_path = '%s.%s.%s.%s' % ('flearn', 'models', parsed['dataset'], parsed['model'])\r\n\r\n mod = importlib.import_module(model_path)\r\n learner = getattr(mod, 'Model')\r\n\r\n # load selected trainer\r\n opt_path = 'flearn.trainers.%s' % parsed['optimizer']\r\n mod = importlib.import_module(opt_path)\r\n optimizer = getattr(mod, 'Server')\r\n\r\n # add selected model parameter\r\n parsed['model_params'] = MODEL_PARAMS['.'.join(model_path.split('.')[2:])]\r\n\r\n # print and return\r\n maxLen = max([len(ii) for ii in parsed.keys()]);\r\n fmtString = '\\t%' + str(maxLen) + 's : %s';\r\n print('Arguments:')\r\n for keyPair in sorted(parsed.items()): print(fmtString % keyPair)\r\n\r\n return parsed, learner, optimizer\r\n\r\ndef main():\r\n # suppress tf warnings\r\n tf.logging.set_verbosity(tf.logging.ERROR)\r\n \r\n # parse command line arguments\r\n options, learner, optimizer = read_options()\r\n\r\n # read data\r\n train_path = os.path.join('data', options['dataset'], 'data', 'train')\r\n test_path = os.path.join('data', options['dataset'], 'data', 'test')\r\n dataset = read_data(train_path, test_path)\r\n\r\n users, groups, train_data, test_data = dataset\r\n\r\n # call appropriate trainer\r\n t = optimizer(options, learner, dataset)\r\n start = time.time()\r\n history = t.train()\r\n end= time.time()\r\n print('Total Training Time: {:.2f} s'.format(end - start))\r\n\r\n alg_name = options['optimizer']\r\n if len(options['log_suffix']) > 0:\r\n name_list = [ alg_name,options['dataset'],options['log_suffix']]\r\n else:\r\n name_list = [ alg_name,options['dataset']]\r\n \r\n file_name = '_'.join(name_list)\r\n log_folder = 'logs'\r\n if options['exp_id'] is None or len(options['exp_id']) < 1:\r\n exp_id = 'test_' + options['dataset']\r\n log_folder = os.path.join(log_folder,exp_id)\r\n else:\r\n log_folder = os.path.join(log_folder,options['exp_id'])\r\n\r\n save_df(history, log_folder, alg_name, file_name)\r\n\r\ndef save_df(df, log_folder, alg_name, file_name):\r\n \r\n\r\n if not os.path.isdir(log_folder):\r\n os.mkdir(log_folder)\r\n \r\n df.to_csv(os.path.join(log_folder, file_name +'.csv'), index=False) \r\n \r\nif __name__ == '__main__':\r\n main()\r\n"
] |
[
[
"numpy.random.seed",
"tensorflow.logging.set_verbosity",
"tensorflow.set_random_seed"
]
] |
NOWUM/EnSysMod
|
[
"18c8a2198db3510e667c1f0298d00a3dfcb0aab7"
] |
[
"ensysmod/crud/energy_transmission_distance.py"
] |
[
"from typing import List\n\nimport pandas as pd\nfrom sqlalchemy.orm import Session\n\nfrom ensysmod import crud\nfrom ensysmod.crud.base import CRUDBase\nfrom ensysmod.model import EnergyTransmissionDistance\nfrom ensysmod.schemas import EnergyTransmissionDistanceCreate, EnergyTransmissionDistanceUpdate\n\n\n# noinspection PyMethodMayBeStatic,PyArgumentList\nclass CRUDEnergyTransmissionDistance(CRUDBase[EnergyTransmissionDistance,\n EnergyTransmissionDistanceCreate,\n EnergyTransmissionDistanceUpdate]):\n \"\"\"\n CRUD operations for EnergyTransmissionDistance\n \"\"\"\n\n def remove_by_component(self, db: Session, component_id: int):\n \"\"\"\n Removes all EnergyTransmissionDistance entries for a given component.\n\n :param db: Database session\n :param component_id: ID of the component\n \"\"\"\n db.query(EnergyTransmissionDistance).filter(EnergyTransmissionDistance.ref_component == component_id).delete()\n\n def create(self, db: Session, obj_in: EnergyTransmissionDistanceCreate) -> EnergyTransmissionDistance:\n \"\"\"\n Creates a new energy transmission distance entry between two regions.\n\n :param db: Database session\n :param obj_in: Input data\n :return: New energy transmission distance entry\n \"\"\"\n\n if obj_in.ref_component is None and obj_in.component is None:\n raise ValueError(\"Component must be specified. Provide reference id or component name.\")\n\n if obj_in.ref_region_from is None and obj_in.region_from is None:\n raise ValueError(\"Region from must be specified. Provide reference id or region name.\")\n\n if obj_in.ref_component is not None:\n transmission = crud.energy_transmission.get(db, obj_in.ref_component)\n else:\n transmission = crud.energy_transmission.get_by_dataset_and_name(db, dataset_id=obj_in.ref_dataset,\n name=obj_in.component)\n\n if transmission is None or transmission.component.ref_dataset != obj_in.ref_dataset:\n raise ValueError(\"Component not found or from different dataset.\")\n obj_in.ref_component = transmission.ref_component\n\n if obj_in.ref_region_from is not None:\n region_from = crud.region.get(db, obj_in.ref_region_from)\n else:\n region_from = crud.region.get_by_dataset_and_name(db, dataset_id=obj_in.ref_dataset,\n name=obj_in.region_from)\n\n if region_from is None or region_from.ref_dataset != obj_in.ref_dataset:\n raise ValueError(\"Region from not found or from different dataset.\")\n obj_in.ref_region_from = region_from.id\n\n if obj_in.ref_region_to is not None:\n region_to = crud.region.get(db, obj_in.ref_region_to)\n else:\n region_to = crud.region.get_by_dataset_and_name(db, dataset_id=obj_in.ref_dataset, name=obj_in.region_to)\n\n if region_to is None or region_to.ref_dataset != obj_in.ref_dataset:\n raise ValueError(\"Region to not found or from different dataset.\")\n obj_in.ref_region_to = region_to.id\n\n return super().create(db=db, obj_in=obj_in)\n\n def get_dataframe(self, db: Session, component_id: int, region_ids: List[int]) -> pd.DataFrame:\n \"\"\"\n Returns the distances for the provided regions as matrix.\n \"\"\"\n data = db.query(self.model) \\\n .filter(self.model.ref_component == component_id) \\\n .filter(self.model.ref_region_from.in_(region_ids)) \\\n .filter(self.model.ref_region_to.in_(region_ids)) \\\n .all()\n\n region_names = [crud.region.get(db, id=r_id).name for r_id in region_ids]\n df = pd.DataFrame(0.0, index=region_names, columns=region_names)\n for d in data:\n df[d.region_to.name][d.region_from.name] = d.distance\n return df\n\n\nenergy_transmission_distance = CRUDEnergyTransmissionDistance(EnergyTransmissionDistance)\n"
] |
[
[
"pandas.DataFrame"
]
] |
queirozfcom/auto-tagger
|
[
"d9c0339648562ceca2d7cd10a02aaf56d353ae7b"
] |
[
"social-tags/src/helpers/embeddings.py"
] |
[
"import os\nimport numpy as np\n\n\ndef read_glove_wiki_weighted(d, weight_index, glove_dir = None):\n\n if glove_dir is None:\n glove_dir = \"/media/felipe/SAMSUNG/GloVe\"\n\n supported_dimensions = [50, 100, 200, 300]\n\n if d not in supported_dimensions:\n raise ValueError(\"argument d must be one of {0}\".format(\",\".join(supported_dimensions)))\n\n\n embeddings_index = {}\n\n matches = 0\n overall = 0\n\n with open(os.path.join(glove_dir, \"glove.6B.{0}d.txt\".format(d)), 'r') as f:\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n\n maybe_weight = weight_index.get(word)\n\n if maybe_weight is None:\n weight = 1.0\n else:\n weight = maybe_weight\n matches += 1\n\n overall += 1\n embeddings_index[word] = coefs * weight\n\n print(\"overall, {0} out of {1} embeddings were weighted. Total available embeddings: {2}\".format(matches,\n len(weight_index),\n overall))\n\n return embeddings_index\n"
] |
[
[
"numpy.asarray"
]
] |
seattleboy/allennlp
|
[
"6c87ff59c478c47ba03d4ac0ea693ac7b0fd80b6"
] |
[
"allennlp/tests/modules/elmo_test.py"
] |
[
"# pylint: disable=no-self-use,invalid-name,protected-access\nimport os\nimport json\nimport warnings\n\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=FutureWarning)\n import h5py\nimport numpy\nimport torch\n\nfrom allennlp.common.testing import AllenNlpTestCase\nfrom allennlp.data.token_indexers.elmo_indexer import ELMoTokenCharactersIndexer\nfrom allennlp.data import Token, Vocabulary, Instance\nfrom allennlp.data.dataset import Batch\nfrom allennlp.data.iterators import BasicIterator\nfrom allennlp.modules.elmo import _ElmoBiLm, Elmo, _ElmoCharacterEncoder\nfrom allennlp.modules.token_embedders import ElmoTokenEmbedder\nfrom allennlp.data.fields import TextField\nfrom allennlp.nn.util import remove_sentence_boundaries\n\n\nclass ElmoTestCase(AllenNlpTestCase):\n def setUp(self):\n super(ElmoTestCase, self).setUp()\n self.elmo_fixtures_path = self.FIXTURES_ROOT / 'elmo'\n self.options_file = str(self.elmo_fixtures_path / 'options.json')\n self.weight_file = str(self.elmo_fixtures_path / 'lm_weights.hdf5')\n self.sentences_json_file = str(self.elmo_fixtures_path / 'sentences.json')\n self.sentences_txt_file = str(self.elmo_fixtures_path / 'sentences.txt')\n\n def _load_sentences_embeddings(self):\n \"\"\"\n Load the test sentences and the expected LM embeddings.\n\n These files loaded in this method were created with a batch-size of 3.\n Due to idiosyncrasies with TensorFlow, the 30 sentences in sentences.json are split into 3 files in which\n the k-th sentence in each is from batch k.\n\n This method returns a (sentences, embeddings) pair where each is a list of length batch_size.\n Each list contains a sublist with total_sentence_count / batch_size elements. As with the original files,\n the k-th element in the sublist is in batch k.\n \"\"\"\n with open(self.sentences_json_file) as fin:\n sentences = json.load(fin)\n\n # the expected embeddings\n expected_lm_embeddings = []\n for k in range(len(sentences)):\n embed_fname = os.path.join(\n self.elmo_fixtures_path, 'lm_embeddings_{}.hdf5'.format(k)\n )\n expected_lm_embeddings.append([])\n with h5py.File(embed_fname, 'r') as fin:\n for i in range(10):\n sent_embeds = fin['%s' % i][...]\n sent_embeds_concat = numpy.concatenate(\n (sent_embeds[0, :, :], sent_embeds[1, :, :]),\n axis=-1\n )\n expected_lm_embeddings[-1].append(sent_embeds_concat)\n\n return sentences, expected_lm_embeddings\n\n\nclass TestElmoBiLm(ElmoTestCase):\n def test_elmo_bilm(self):\n # get the raw data\n sentences, expected_lm_embeddings = self._load_sentences_embeddings()\n\n # load the test model\n elmo_bilm = _ElmoBiLm(self.options_file, self.weight_file)\n\n # Deal with the data.\n indexer = ELMoTokenCharactersIndexer()\n\n # For each sentence, first create a TextField, then create an instance\n instances = []\n for batch in zip(*sentences):\n for sentence in batch:\n tokens = [Token(token) for token in sentence.split()]\n field = TextField(tokens, {'character_ids': indexer})\n instance = Instance({\"elmo\": field})\n instances.append(instance)\n\n vocab = Vocabulary()\n\n # Now finally we can iterate through batches.\n iterator = BasicIterator(3)\n iterator.index_with(vocab)\n for i, batch in enumerate(iterator(instances, num_epochs=1, shuffle=False)):\n lm_embeddings = elmo_bilm(batch['elmo']['character_ids'])\n top_layer_embeddings, mask = remove_sentence_boundaries(\n lm_embeddings['activations'][2],\n lm_embeddings['mask']\n )\n\n # check the mask lengths\n lengths = mask.data.numpy().sum(axis=1)\n batch_sentences = [sentences[k][i] for k in range(3)]\n expected_lengths = [\n len(sentence.split()) for sentence in batch_sentences\n ]\n self.assertEqual(lengths.tolist(), expected_lengths)\n\n # get the expected embeddings and compare!\n expected_top_layer = [expected_lm_embeddings[k][i] for k in range(3)]\n for k in range(3):\n self.assertTrue(\n numpy.allclose(\n top_layer_embeddings[k, :lengths[k], :].data.numpy(),\n expected_top_layer[k],\n atol=1.0e-6\n )\n )\n\n\nclass TestElmo(ElmoTestCase):\n def setUp(self):\n super(TestElmo, self).setUp()\n\n self.elmo = Elmo(self.options_file, self.weight_file, 2, dropout=0.0)\n\n def _sentences_to_ids(self, sentences):\n indexer = ELMoTokenCharactersIndexer()\n\n # For each sentence, first create a TextField, then create an instance\n instances = []\n for sentence in sentences:\n tokens = [Token(token) for token in sentence]\n field = TextField(tokens, {'character_ids': indexer})\n instance = Instance({'elmo': field})\n instances.append(instance)\n\n dataset = Batch(instances)\n vocab = Vocabulary()\n dataset.index_instances(vocab)\n return dataset.as_tensor_dict()['elmo']['character_ids']\n\n def test_elmo(self):\n # Correctness checks are in ElmoBiLm and ScalarMix, here we just add a shallow test\n # to ensure things execute.\n sentences = [['The', 'sentence', '.'],\n ['ELMo', 'helps', 'disambiguate', 'ELMo', 'from', 'Elmo', '.']]\n\n character_ids = self._sentences_to_ids(sentences)\n output = self.elmo(character_ids)\n elmo_representations = output['elmo_representations']\n mask = output['mask']\n\n assert len(elmo_representations) == 2\n assert list(elmo_representations[0].size()) == [2, 7, 32]\n assert list(elmo_representations[1].size()) == [2, 7, 32]\n assert list(mask.size()) == [2, 7]\n\n def test_elmo_4D_input(self):\n sentences = [[['The', 'sentence', '.'],\n ['ELMo', 'helps', 'disambiguate', 'ELMo', 'from', 'Elmo', '.']],\n [['1', '2'], ['1', '2', '3', '4', '5', '6', '7']],\n [['1', '2', '3', '4', '50', '60', '70'], ['The']]]\n\n all_character_ids = []\n for batch_sentences in sentences:\n all_character_ids.append(self._sentences_to_ids(batch_sentences))\n\n # (2, 3, 7, 50)\n character_ids = torch.cat([ids.unsqueeze(1) for ids in all_character_ids], dim=1)\n embeddings_4d = self.elmo(character_ids)\n\n # Run the individual batches.\n embeddings_3d = []\n for char_ids in all_character_ids:\n self.elmo._elmo_lstm._elmo_lstm.reset_states()\n embeddings_3d.append(self.elmo(char_ids))\n\n for k in range(3):\n numpy.testing.assert_array_almost_equal(\n embeddings_4d['elmo_representations'][0][:, k, :, :].data.numpy(),\n embeddings_3d[k]['elmo_representations'][0].data.numpy()\n )\n\n def test_elmo_with_module(self):\n # We will create the _ElmoBilm class and pass it in as a module.\n sentences = [['The', 'sentence', '.'],\n ['ELMo', 'helps', 'disambiguate', 'ELMo', 'from', 'Elmo', '.']]\n\n character_ids = self._sentences_to_ids(sentences)\n elmo_bilm = _ElmoBiLm(self.options_file, self.weight_file)\n elmo = Elmo(None, None, 2, dropout=0.0, module=elmo_bilm)\n output = elmo(character_ids)\n elmo_representations = output['elmo_representations']\n\n assert len(elmo_representations) == 2\n for k in range(2):\n assert list(elmo_representations[k].size()) == [2, 7, 32]\n\n\nclass TestElmoRequiresGrad(ElmoTestCase):\n def _run_test(self, requires_grad):\n embedder = ElmoTokenEmbedder(self.options_file, self.weight_file, requires_grad=requires_grad)\n batch_size = 3\n seq_len = 4\n char_ids = torch.from_numpy(numpy.random.randint(0, 262, (batch_size, seq_len, 50)))\n embeddings = embedder(char_ids)\n loss = embeddings.sum()\n loss.backward()\n\n elmo_grads = [param.grad for name, param in embedder.named_parameters() if '_elmo_lstm' in name]\n if requires_grad:\n # None of the elmo grads should be None.\n assert all([grad is not None for grad in elmo_grads])\n else:\n # All of the elmo grads should be None.\n assert all([grad is None for grad in elmo_grads])\n\n def test_elmo_requires_grad(self):\n self._run_test(True)\n\n def test_elmo_does_not_require_grad(self):\n self._run_test(False)\n\n\nclass TestElmoTokenRepresentation(ElmoTestCase):\n def test_elmo_token_representation(self):\n # Load the test words and convert to char ids\n with open(os.path.join(self.elmo_fixtures_path, 'vocab_test.txt'), 'r') as fin:\n tokens = fin.read().strip().split('\\n')\n\n indexer = ELMoTokenCharactersIndexer()\n indices = [indexer.token_to_indices(Token(token), Vocabulary()) for token in tokens]\n # There are 457 tokens. Reshape into 10 batches of 50 tokens.\n sentences = []\n for k in range(10):\n sentences.append(\n indexer.pad_token_sequence(\n indices[(k * 50):((k + 1) * 50)], desired_num_tokens=50, padding_lengths={}\n )\n )\n batch = torch.from_numpy(numpy.array(sentences))\n\n elmo_token_embedder = _ElmoCharacterEncoder(self.options_file, self.weight_file)\n elmo_token_embedder_output = elmo_token_embedder(batch)\n\n # Reshape back to a list of words and compare with ground truth. Need to also\n # remove <S>, </S>\n actual_embeddings = remove_sentence_boundaries(\n elmo_token_embedder_output['token_embedding'],\n elmo_token_embedder_output['mask']\n )[0].data.numpy()\n actual_embeddings = actual_embeddings.reshape(-1, actual_embeddings.shape[-1])\n\n embedding_file = os.path.join(self.elmo_fixtures_path, 'elmo_token_embeddings.hdf5')\n with h5py.File(embedding_file, 'r') as fin:\n expected_embeddings = fin['embedding'][...]\n\n assert numpy.allclose(actual_embeddings[:len(tokens)], expected_embeddings, atol=1e-6)\n\n def test_elmo_token_representation_bos_eos(self):\n # The additional <S> and </S> embeddings added by the embedder should be as expected.\n indexer = ELMoTokenCharactersIndexer()\n\n elmo_token_embedder = _ElmoCharacterEncoder(self.options_file, self.weight_file)\n\n for correct_index, token in [[0, '<S>'], [2, '</S>']]:\n indices = indexer.token_to_indices(Token(token), Vocabulary())\n indices = torch.from_numpy(numpy.array(indices)).view(1, 1, -1)\n embeddings = elmo_token_embedder(indices)['token_embedding']\n assert numpy.allclose(embeddings[0, correct_index, :].data.numpy(), embeddings[0, 1, :].data.numpy())\n"
] |
[
[
"numpy.concatenate",
"numpy.array",
"numpy.random.randint"
]
] |
cculianu/Bitcoin-Static
|
[
"8b19e8a83990d20bc25ef138a5050b23591fbb80"
] |
[
"test/functional/bchn-txbroadcastinterval.py"
] |
[
"#!/usr/bin/env python3\n# Copyright (c) 2020 The Bitcoin Static developers\n# Author matricz\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\"\"\"\nTest that inv messages are sent according to\nan exponential distribution with scale -txbroadcastinterval\nThe outbound interval should be half of the inbound\n\"\"\"\nimport time\n\nfrom test_framework.mininode import P2PInterface, mininode_lock\nfrom test_framework.test_framework import BitcoinTestFramework\nfrom test_framework.util import wait_until, connect_nodes, disconnect_nodes\nfrom scipy import stats\n\n\nclass InvReceiver(P2PInterface):\n\n def __init__(self):\n super().__init__()\n self.invTimes = []\n self.invDelays = []\n\n def on_inv(self, message):\n\n timeArrived = time.time()\n # If an inv contains more then one transaction, then the number of invs (==samplesize)\n # will be non-deterministic. This would be an error.\n assert(len(message.inv) == 1)\n self.invTimes.append(timeArrived)\n if len(self.invTimes) > 1:\n timediff = self.invTimes[-1] - self.invTimes[-2]\n self.invDelays.append(timediff)\n\n\nclass TxBroadcastIntervalTest(BitcoinTestFramework):\n\n # This test will have a node create a number of transactions and relay them\n # to the mininode InvReceivers (one inbound and one outbound)\n # according to test parameters.\n # A third disconnected node is used only to create signed transactions\n\n # The nodes are configured with \"-txbroadcastrate=1\" and\n # \"-excessiveblocksize=2000000\" so that they relay at most one tx per inv\n # It's convenient, because we can now define the exact number of invs\n # (== sample size -1) that we want to send\n # This holds true only for interval values <= 500 ms\n\n # The mininode InvReceiver just listens and registers the delays between invs\n # and constructs a sample array from these delays\n # This sample is tested against a reference exponential distribution\n # density with the same parameters with scipy.stats.kstest\n # (See https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test)\n # The test is accepted if the delays sample resembles the reference\n # distribution -- or, more specifically, if the probability that the\n # observed distribution would have occurred as a sampling of the theoretical\n # exponential distribution with a probability of at least alpha\n # (pvalue > alpha, default 0.001)\n\n # There is one mininode that connects directly to the node that generates transactions.\n # This tests the *inbound* connection interval.\n # The first node creates an outbound connection to the second node,\n # which relays the transactions instantly (-txbroadcastinterval=1)\n # to the second mininode, which tests the *outbound* connection interval (= 1/2 of the inbound).\n # (but is less reliable for small values of the -txbroadcastinterval)\n\n def skip_test_if_missing_module(self):\n self.skip_if_no_wallet()\n\n def add_options(self, parser):\n parser.add_argument(\"--interval\", dest=\"interval\", type=int, default=500,\n help=\"Set the average send interval in ms\")\n parser.add_argument(\"--samplesize\", dest=\"samplesize\", type=int, default=100,\n help=\"Set the samplesize (number of inv message delays) for testing\")\n parser.add_argument(\"--testoutbound\", dest=\"testoutbound\", action=\"store_true\",\n help=\"Set whether to test outbound (along inbound) connection interval\")\n parser.add_argument(\"--alpha\", dest=\"alpha\", type=float, default=\"0.001\",\n help=\"Set a confidence threshold for the kstest\")\n\n def set_test_params(self):\n self.scale = self.options.interval / 1000\n self.num_nodes = 3\n args = [\n [\"-txbroadcastinterval={}\".format(self.options.interval),\n \"-txbroadcastrate=1\", \"-excessiveblocksize=2000000\",\n \"-limitancestorcount={}\".format(self.options.samplesize+1),\n \"-limitdescendantcount={}\".format(self.options.samplesize+1)],\n [\"-txbroadcastinterval=1\",\n \"-txbroadcastrate=1\", \"-excessiveblocksize=2000000\",\n \"-limitancestorcount={}\".format(self.options.samplesize+1),\n \"-limitdescendantcount={}\".format(self.options.samplesize+1)],\n [\"-limitancestorcount={}\".format(self.options.samplesize+1),\n \"-limitdescendantcount={}\".format(self.options.samplesize+1)]\n ]\n self.extra_args = args\n\n def setup_network(self):\n self.setup_nodes()\n connect_nodes(self.nodes[0], self.nodes[1])\n connect_nodes(self.nodes[1], self.nodes[2])\n # Generate enough coins on the spending nodes\n self.nodes[2].generate(20 + 100)\n self.sync_all()\n\n # Disconnect node 3 so that it doesn't broadcast the txs it creates\n disconnect_nodes(self.nodes[1], self.nodes[2])\n self.signedtxs = []\n to = self.nodes[2].getnewaddress()\n for i in range(self.options.samplesize):\n txid = self.nodes[2].sendtoaddress(to, \"0.00001\", \"comment\", \"comment_to\", False, 2)\n self.signedtxs.append(self.nodes[2].gettransaction(txid)['hex'])\n\n def run_test(self):\n inboundReceiver, outboundReceiver = InvReceiver(), InvReceiver()\n self.nodes[0].add_p2p_connection(inboundReceiver)\n self.nodes[1].add_p2p_connection(outboundReceiver)\n\n for signextx in self.signedtxs:\n self.nodes[0].sendrawtransaction(signextx, True)\n\n wait_until(\n lambda: len(inboundReceiver.invTimes) == self.options.samplesize,\n lock=mininode_lock,\n timeout=self.options.samplesize * self.options.interval / 1000 * 2)\n wait_until(\n lambda: len(outboundReceiver.invTimes) == self.options.samplesize,\n lock=mininode_lock,\n timeout=self.options.samplesize * self.options.interval / 1000)\n\n inboundkstestresult = stats.kstest(inboundReceiver.invDelays, stats.expon(scale=self.scale).cdf)\n outboundkstestresult = stats.kstest(outboundReceiver.invDelays, stats.expon(scale=self.scale / 2).cdf)\n self.log.info(\"kstestresults for interval {}: inbound {}, outbound {}\".format(\n self.options.interval,\n inboundkstestresult,\n outboundkstestresult))\n assert(inboundkstestresult.pvalue > self.options.alpha), inboundReceiver.invDelays\n if self.options.testoutbound:\n assert(outboundkstestresult.pvalue > self.options.alpha), outboundReceiver.invDelays\n\n\nif __name__ == '__main__':\n TxBroadcastIntervalTest().main()\n"
] |
[
[
"scipy.stats.expon"
]
] |
fred3m/astropyp
|
[
"414c9e6d84da2604c6466b2046827d8b1988edab"
] |
[
"astropyp/phot/phot.py"
] |
[
"from __future__ import division\nimport logging\nimport warnings\nfrom collections import OrderedDict\nimport numpy as np\nimport astropy.units as apu\n\nimport astropyp.catalog\nfrom astropyp.phot import detect\n\ntry:\n import six\nexcept ImportError:\n try:\n from astropy.extern import six\n except:\n raise Exception(\"Unable to import six module\")\n\nlogger = logging.getLogger('astropyp.calibrate.phot')\n\nclass SingleImage:\n \"\"\"\n Collection of Groups and PSFs for an entire image\n \n Parameters\n ----------\n catalog: `~astropyp.catalog.Catalog` or `~astropy.table.Table`\n Catalog of sources with mappings to standard catalog fields.\n See `~astropyp.catalog.Catalog` for more on Catalogs.\n If an `~astropy.table.Table` is passed instead it will be\n converted to an Catalog with the standard mapping\n to x,y,ra,dec,etc.\n \"\"\"\n def __init__(self, header=None, img=None, dqmask=None, wtmap=None,\n wcs=None, separation=None, gain=None, exposure=None, \n exptime=None, catalog=None, psf=None,\n subsampling=5, aper_radius=None, bkg=None,\n cluster_method='dbscan', mask_group=True,\n show_plots=False, groups=None, indices=None):\n \n self.header = header\n self.img = img\n self.dqmask = dqmask\n self.wtmap = wtmap\n self.wcs = wcs\n self.separation = separation\n self.gain = gain\n self.catalog = catalog\n self.exposure = exposure\n self.exptime = exptime\n self.psf = psf\n self.subsampling = subsampling\n self.aper_radius = aper_radius\n self.bkg = bkg\n self.cluster_method = cluster_method\n self.mask_group = mask_group\n self.show_plots = show_plots\n if groups is None:\n groups = []\n self.groups = groups\n if indices is None:\n indices = OrderedDict()\n self.indices = indices\n self.kd_tree = None\n \n if not isinstance(self.catalog, astropyp.catalog.Catalog):\n self.catalog = astropyp.catalog.Catalog(self.catalog)\n # If the image is part of an focal array from a larger exposure,\n # use the exposure settings to set parameters\n if self.exptime is None and self.exposure is not None:\n self.exptime = exposure.exptime\n \n self.group_ids = range(len(self.groups))\n \n def detect_sources(self, sex_params={}, aper_radius=None, \n subtract_bkg=False, gain=None, wcs=None, exptime=None,\n windowed=True, edge_val=1, transform='wcs'):\n # Set optional parameters\n if aper_radius is None:\n aper_radius = self.aper_radius\n elif self.aper_radius is None:\n self.aper_radius = aper_radius\n if gain is None:\n gain = self.gain\n elif self.gain is None:\n self.gain = gain\n if wcs is None:\n wcs = self.wcs\n elif self.wcs is None:\n self.wcs = wcs\n if exptime is None:\n exptime = self.exptime\n elif self.exptime is None:\n self.exptime = exptime\n \n result = detect.get_sources(self.img, self.dqmask, self.wtmap, \n exptime, sex_params, None, subtract_bkg, gain, \n wcs, aper_radius, windowed, edge_val, transform)\n sources, self.bkg = result\n self.catalog = astropyp.catalog.Catalog(\n sources, a='a', b='b', peak='peak')\n \n if hasattr(self.exposure, 'airmass'):\n self.catalog.sources['airmass'] = self.exposure.airmass\n self.catalog.update_static_column('airmass', 'airmass')\n return self.catalog, self.bkg\n \n def select_psf_sources(self,\n min_flux=None, min_amplitude=None, min_dist=None, max_ratio=None, \n edge_dist=None, verbose=True, aper_radius=None,\n units='deg', badpix_flags=['flags'], flag_max=0, psf_idx='psf'):\n \"\"\"\n Select sources with a minimum flux, amplitude, separation,\n and circular shape to use to build the psf.\n \"\"\"\n if aper_radius is None:\n aper_radius = self.aper_radius\n elif self.aper_radius is None:\n self.aper_radius = aper_radius\n \n result = astropyp.phot.psf.select_psf_sources(self.img, \n self.catalog, aper_radius, min_flux, min_amplitude, min_dist, \n max_ratio, edge_dist, verbose, \n badpix_flags=badpix_flags, flag_max=flag_max)\n self.indices[psf_idx], self.flags = result\n return result\n \n def create_psf(self, psf_sources=None, psf_radius=None, \n combine_mode='median', offset_buffer=3):\n if psf_sources is None:\n psf_sources = 'psf'\n if psf_radius is None:\n psf_radius = self.aper_radius\n \n if isinstance(psf_sources, six.string_types):\n psf_sources = self.catalog.sources[self.indices[psf_sources]]\n \n psf_array = astropyp.phot.psf.build_psf(\n self.img, psf_radius, psf_sources, \n subsampling=self.subsampling, combine_mode=combine_mode, \n offset_buffer=offset_buffer)\n self.psf = astropyp.phot.psf.SinglePSF(psf_array, 1., 0, 0, \n self.subsampling)\n return psf_array\n \n def show_psf(self):\n import matplotlib\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d.axes3d import Axes3D\n\n X = np.arange(0, self.psf._psf_array.shape[1], 1)\n Y = np.arange(0, self.psf._psf_array.shape[0], 1)\n X, Y = np.meshgrid(X, Y)\n fig = plt.figure(figsize=(10, 10))\n ax=fig.add_subplot(1,1,1, projection='3d')\n ax.plot_wireframe(X, Y, self.psf._psf_array)#, rstride=5, cstride=5)\n plt.show()\n \n def perform_psf_photometry(self, method='neighbors',\n separation=None, \n verbose=False, fit_position=True, \n pos_range=0, indices=None, pool_size=None):\n \"\"\"\n Perform PSF photometry on all of the sources in the catalog,\n or if indices is specified, a subset of sources.\n \n Parameters\n ----------\n separation: float, optional\n Separation (in pixels) for members to be considered\n part of the same group *Default=psf width*\n method: string\n Method to use for performing photometry. This must\n be one of the following: \n 'neighbors', which searches\n for sources within ``separation`` and includes them\n in the fit for the source in the center;\n 'single', which only fits one source at a time,\n even if other sources have overlapping apertures; or\n 'group', which groups clusters of sources together\n and fits all of them simultaneously. *The default\n (and recommended method is 'nearest')*\n verbose: bool\n Whether or not to show info about the fit progress.\n *Default=False*\n fit_position: bool\n Whether or not to fit the position along with the\n amplitude of each source. *Default=True*\n pos_range: int\n Maximum number of pixels (in image pixels) that\n a sources position can be changed. If ``pos_range=0``\n no bounds will be set. *Defaul=0*\n indices: `~numpy.ndarray`\n Indices for sources to calculate PSF photometry.\n It is often advantageous to remove sources with\n bad pixels and sublinear flux to save processing time.\n All sources not included in indices will have their\n psf flux set to NaN.\n \"\"\"\n from astropyp.phot.psf import perform_psf_photometry\n if indices is not None:\n if isinstance(indices, six.string_types):\n indices = self.indices[indices]\n if separation is None and (method=='neighbors' or method=='group'):\n separation = self.psf._width\n \n if method=='neighbors' or method=='single':\n self.catalog, self.src_psfs, kd_tree = perform_psf_photometry(\n self.img, self.catalog, self.psf, \n separation=separation, \n verbose=verbose, fit_position=fit_position, \n pos_range=pos_range, indices=indices,\n kd_tree=self.kd_tree, exptime=self.exptime,\n pool_size=pool_size)\n if self.kd_tree is None and kd_tree is not None:\n self.kd_tree = kd_tree\n elif method=='group':\n # By default the sources are grouped before performing PSF\n # photometry\n if group_sources:\n self.create_psf_groups(separation=separation, verbose=verbose)\n \n if indices is not None:\n if isinstance(indices, six.string_types):\n indices = self.indices[indices]\n groups = np.unique(self.indices['group'][indices])\n else:\n groups = self.groups\n \n # Fit PSF for each group or isolated source\n psf_flux = np.zeros(self.catalog.shape[0])\n psf_flux[:] = np.nan\n positions = np.array(zip(self.catalog.x, self.catalog.y))\n group_indices = self.indices['group']\n data = self.img\n \n for group_id in groups:\n group = self.groups[group_id]\n if verbose:\n #level = logger.getEffectiveLevel()\n logger.setLevel(logging.INFO)\n logger.info(\"Fitting {0}\".format(group_id))\n #logger.setLevel(level)\n if isinstance(group, astropyp.phot.psf.SinglePSF):\n amplitude = group.fit(data, fit_position, pos_range)\n flux = self.psf.get_flux(amplitude)\n elif isinstance(group, astropyp.phot.psf.GroupPSF):\n group_idx = (group_indices==group.group_id)\n amplitudes = np.array(group.fit(data))\n flux = self.psf.get_flux(amplitude)\n else:\n raise Exception(\"PSF photometry is currently only\"\n \"supported for the SinglePSF and GroupPSF classes\")\n psf_flux[group_indices==group_id] = np.array(flux)\n # Caluclate the error in the PSF flux and magnitude\n # In the future a better method may be to look at the\n # residual left over after the PSF is subtracted from\n # the background\n if self.gain is not None:\n psf_flux_err = 1.0857*np.sqrt(\n 2*np.pi*self.psf._radius**2*self.bkg.globalrms**2+\n psf_flux/self.gain)\n else:\n psf_flux_err = 1.0857*np.sqrt(\n 2*np.pi*self.psf._radius**2*self.bkg.globalrms**2\n )\n # Save the psf derived quantities in the catalog\n # Ignore divide by zero errors that occur when sources\n # have zero psf flux (i.e. bad sources)\n np_err = np.geterr()\n np.seterr(divide='ignore')\n psf_mag = -2.5*np.log10(psf_flux/self.exptime)\n self.catalog.sources['psf_flux'] = psf_flux\n self.catalog.sources['psf_flux_err'] = psf_flux_err\n self.catalog.sources['psf_mag'] = psf_mag\n self.catalog.sources['psf_mag_err'] = psf_flux_err/psf_flux\n np.seterr(**np_err)\n else:\n raise Exception(\"PSF method not found\")\n return self.catalog.sources['psf_flux']\n def create_psf_groups(self, separation=None, cluster_method='dbscan',\n verbose=False):\n \"\"\"\n Group sources with overlapping PSF's. This method uses the\n GroupPSF class and is not as efficient as using the\n \"nearest: method to perform psf photometry on\n crowded fields.\n \"\"\"\n \n if separation is None:\n separation = self.psf._width\n \n if cluster_method=='dbscan':\n # If user has sklearn installed, use DBSCAN to cluster the objects\n # in groups with overlapping PSF's\n try:\n from sklearn.cluster import DBSCAN\n except ImportError:\n Exception(\n \"You must install sklearn to use 'dbscan' clustering\")\n \n positions = np.array(zip(self.catalog.x, self.catalog.y))\n # Compute DBSCAN\n db = DBSCAN(eps=separation, min_samples=1).fit(positions)\n self.db = db\n self.groups = OrderedDict()\n self.group_ids = np.unique(db.labels_)\n group_indices = db.labels_\n self.indices['group'] = group_indices\n else:\n raise Exception(\n \"cluster_method {0} is not currently supported\".format(\n cluster_method))\n \n # If a 'peak' field has not been defined in the catalog,\n # use the pixel value at each points position as the \n # initial amplitude for the fit\n amplitudes = self.catalog.peak\n if amplitudes is None:\n amplitudes = self.img[\n self.catalog.y.astype(int),\n self.catalog.x.astype(int)]\n # Add a SinglePSF object for each source without any neighbors\n # and a group object for each source with neighbors that might\n # affect its flux\n for group_id in self.group_ids:\n group_idx = (group_id==group_indices)\n group_count = positions[group_idx].shape[0]\n if group_count==1:\n group_psf = astropyp.phot.psf.SinglePSF(\n self.psf._psf_array,\n amplitudes[group_idx][0],\n positions[group_idx][0][0],\n positions[group_idx][0][1],\n self.psf._subsampling,\n self.psf.fix_com\n )\n else:\n # Create PSF object for the entire group\n group_psf = astropyp.phot.psf.GroupPSF(\n group_id, self.psf, positions[group_idx], \n amplitudes[group_idx], mask_img=self.mask_group,\n show_plots=self.show_plots)\n self.groups[group_id] = group_psf\n if self.show_plots or verbose:\n try:\n import matplotlib\n import matplotlib.pyplot as plt\n except ImportError:\n raise Exception(\n \"You must have matplotlib installed to create plots\")\n fig, ax = plt.subplots()\n x = positions[:,0]\n y = positions[:,1]\n for group in self.group_ids:\n ax.plot(\n x[group_indices==group], \n y[group_indices==group], 'o')\n plt.show()\n return self.groups\n\nclass Exposure:\n \"\"\"\n Container for a focal array of SingleImages, for example a camera image\n with multiple CCD images.\n \n Parameters\n ----------\n exp_info: dict\n Dictionary of parameters for the exposure. At a minimum this should\n include the exposure time and airmass of the exposure\n \"\"\"\n def __init__(self, exp_info, \n img_filename=None, dqmask_filename=None, wtmap_filename=None,\n img=None, dqmask=None, wtmap=None, wcs=None, \n detect_sources=True, frames=None, memmap=True,\n aper_radius=None, subsampling=5, psf=None, sex_params={}, \n gain=None):\n # Set exposure info as attributes of the class\n for k,v in exp_info.items():\n setattr(self, k, v)\n # Load the image data (if necessary)\n if img is None:\n img = fits.open(img_filename, memmap=memmap)\n if dqmask is None and dqmask_filename is not None:\n dqmask = fits.open(dqmask_filename, memmap=memmap)\n if wtmap is None and wtmap_filename is not None:\n wtmap = fits.open(wtmap_filename, memmap=memmap)\n self.img = img\n self.dqmask = dqmask\n self.wtmap = wtmap\n self.aper_radius = aper_radius\n self.sex_params = sex_params\n self.ccd_dict = OrderedDict()\n self.gain = gain\n \n # If the user didn't specify which frames to include, use all of them\n if frames is None:\n frames = range(1,len(img))\n \n # Add the CCD's to the exposure\n for frame in frames:\n img_data = img[frame].data.byteswap(True).newbyteorder()\n if dqmask is not None:\n dqmask_data = dqmask[frame].data.byteswap(True).newbyteorder()\n else:\n dqmask_data = None\n if wtmap is not None:\n wtmap_data = wtmap[frame].data.byteswap(True).newbyteorder()\n\n self.ccd_dict[frame] = DecamCCD(self, img[frame].header, img_data,\n dqmask_data, wtmap_data, aper_radius, subsampling, wcs)\n \n if detect_sources:\n for frame in frames:\n self.ccd_dict[frame].detect_sources(self.sex_params, \n gain=self.gain, aper_radius=self.aper_radius)\n\n########################################################\n# The Following functions are depreciated and will be\n# removed in a future release\n########################################################\n\ndef calculate_magnitude(x, zero, color, extinct):\n \"\"\"\n x[0] = reference in instrument band\n x[1] = reference in other band\n x[2] = airmass\n \"\"\"\n #return (x[0]-zero+color*x[1]-extinct*x[2])/(1+color)\n return x[0] + zero + color*(x[0]-x[1]) + extinct*x[2]\n\ndef calibrate_standard(sources, mag_name, ref1_name, ref2_name, \n mag_err_name=None, \n ref1_err_name=None, ref2_err_name=None, init_zero=-25, init_color=-.1, \n init_extinction=.1, fit_package='scipy', airmass_name='airmass'):\n \"\"\"\n Calibrate a standard field with a set of refernce fields\n \n Parameters\n ----------\n sources: `astropy.table.QTable`\n Catalog of observations\n mag_name: str\n Name of the magniude column in ``sources``\n ref1_name: str\n Name of the reference column in ``sources`` in the same filter as \n ``mag_name``\n ref2_name: str\n Name of the reference column in ``sources`` to use for the color \n correction coefficient\n mag_err_name: str\n Name of the magnitude error column\n ref1_err_name: str\n Name of the error column for reference 1\n ref2_err_name: str\n Name of the error column for reference 2\n init_zero: float\n Initial guess for the zero point\n init_color: float:\n Initial guess for the color correction coefficient\n init_extinction: float\n Initial guess for the extinction coefficient\n \"\"\"\n good_sources = sources\n init_params = [init_zero, init_color, init_extinction]\n instr_mag = good_sources[mag_name]\n ref_mag1 = good_sources[ref1_name]\n ref_mag2 = good_sources[ref2_name]\n airmass = good_sources[airmass_name]\n \n # Add weights if the user specified any weights\n if (mag_err_name is not None or \n ref1_err_name is not None or \n ref2_err_name is not None):\n weights = np.zeros((len(good_sources),))\n if mag_err_name is not None:\n weights += good_sources[mag_err_name]**2\n if ref1_err_name is not None:\n weights += good_sources[ref1_err_name]**2\n if ref2_err_name is not None:\n weights += good_sources[ref2_err_name]**2\n weights = 1/weights\n else:\n weights = None\n \n if fit_package=='scipy':\n from scipy.optimize import curve_fit\n x = [ref_mag1,ref_mag2,airmass]\n results = curve_fit(calculate_magnitude, x, instr_mag, init_params)\n elif fit_package=='statsmodels':\n import statsmodels.formula.api as smf\n good_sources['diff'] = good_sources[mag_name] - good_sources[ref1_name]\n good_sources['color'] = good_sources[ref1_name] - \\\n good_sources[ref2_name]\n if weights is None:\n result = smf.OLS.from_formula(formula='diff ~ color + airmass', \n data=good_sources).fit()\n else:\n result = smf.WLS.from_formula(formula='diff ~ color + airmass', \n data=good_sources, weights=weights).fit()\n results = [result.params.Intercept, result.params.color, \n result.params.airmass],result\n else:\n raise Exception(\n \"fit_package must be either 'statsmodels' or 'scipy'(default)\")\n logger.debug(\n \"Zero point: {0}\\nColor Correction: {1}\\nExtinction: {2}\\n\".format\n (*results[0]))\n return results\n\ndef calibrate_2band(instr1, instr2, airmass1, airmass2, coeff1, coeff2,\n zero_key='zero', color_key='color', extinct_key='extinction'):\n \"\"\"\n This solves the set of equations:\n i_0 = i + A_i + C_i(i-z) + k_i X\n z_0 = z + A_z + C_z(z-i) + k_z X\n where i_0 and z_0 are the instrumental magnitudes, A_i and A_z are the \n zero points, C_i and C_z are the color terms, k_i and k_z are the \n atmospheric coefficients, and X is the airmass.\n \n The solution is of the form:\n (1+C_i)i = b_i + C_i z\n (1+C_z)z = b_z + C_z i\n where\n b_i = i_0 - A_i - k_i X\n b_z = z_0 - A_z - k_z X\n so that\n i = (C_i b_z + C_z b_i + b_i) / d\n z = (C_z b_i + C_i b_z + b_z) / d\n where\n d = (1+C_i+C_z)\n \n Parameters\n ----------\n instr1: array-like\n Instrumental magnitudes of filter 1\n instr2: array-like\n Instrumental magnitudes of filter 2\n airmass1: array-like\n Airmass for each observation in filter 1\n airmass2: array-like\n Airmass for each observation in filter 2\n coeff1: array-like\n List of coeffients for calibrating instrumental magnitudes for \n instrument 1.\n * coeff1[0]: zeropoint\n * coeff1[1]: color coeffcient\n * coeff1[2]: extinction coefficient\n coeff2: array-like\n List of coeffients for calibrating instrumental magnitudes for \n instrument 2\n \n returns\n -------\n mag1: array-like\n Calibrated magnitude 1\n mag2: array-like\n Calibrated magnitude 2\n \"\"\"\n b1 = instr1 - coeff1[zero_key] - coeff1[extinct_key]*airmass1\n b2 = instr2 - coeff2[zero_key] - coeff2[extinct_key]*airmass2\n d = 1 + coeff1[color_key] + coeff2[color_key]\n mag1 = (coeff1[color_key]*b2 + b1*(1+coeff2[color_key])) / d\n mag2 = (coeff2[color_key]*b1 + b2*(1+coeff1[color_key])) / d\n return (mag1,mag2)\n\ndef calibrate_1band(instr, airmass, coeff, color_band=None, zero_key='zero',\n color_key='color', extinct_key='extinction'):\n \"\"\"\n Given a solution for z from calibrate_iz, this returns a Y magnitude using:\n Y_0 = Y + A_Y + C_Y(Y-z) + k_Y X\n where Y0 is the instrumental magnitude, A_Y is the zero point, C_Y is the \n color coefficent, k_Y is the extinction coefficient, and X is the airmass\n \"\"\"\n if color_band is not None:\n mag = (instr - coeff[zero_key] + coeff[color_key]*color_band - \n coeff[extinct_key]*airmass)/(1+coeff[color_key])\n else:\n mag = instr - coeff[zero_key] - coeff[color_key]*airmass\n return mag"
] |
[
[
"numpy.array",
"scipy.optimize.curve_fit",
"numpy.zeros",
"numpy.geterr",
"numpy.seterr",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"sklearn.cluster.DBSCAN",
"numpy.arange",
"numpy.sqrt",
"matplotlib.pyplot.show",
"numpy.log10",
"numpy.meshgrid",
"numpy.unique"
]
] |
WestXu/JAQS
|
[
"3c9389afab518f188b8628af72297d750c07dfb1"
] |
[
"jaqs/trade/strategy.py"
] |
[
"# encoding: utf-8\n\"\"\"\nClasses defined in strategy module\n\"\"\"\n\nfrom __future__ import print_function\nimport abc\nfrom abc import abstractmethod\nfrom six import with_metaclass\n\nimport numpy as np\nimport pandas as pd\n\nfrom jaqs.data.basic import GoalPosition\nfrom jaqs.util.sequence import SequenceGenerator\nfrom jaqs.data.basic import Bar, Quote\n# import jaqs.util as jutil\n\nfrom jaqs.trade import model\nfrom jaqs.trade import common\n\n\nclass Strategy(with_metaclass(abc.ABCMeta)):\n \"\"\"\n Abstract base class for strategies.\n\n Attributes\n ----------\n ctx : Context object\n Used to store relevant context of the strategy.\n run_mode : int\n Whether the trategy is under back-testing or live trading.\n pm : trade.PortfolioManger\n Responsible for managing orders, trades and positions.\n store : dict\n A dictionary to store variables that will be automatically saved.\n\n Methods\n -------\n\n \"\"\"\n \n def __init__(self):\n super(Strategy, self).__init__()\n self.ctx = None\n # self.run_mode = common.RUN_MODE.BACKTEST\n \n # self.ctx.pm = PortfolioManager(strategy=self)\n # self.pm = self.ctx.pm\n\n # self.task_id_map = defaultdict(list)\n self.seq_gen = SequenceGenerator()\n\n self.init_balance = 0.0\n \n def init_from_config(self, props):\n pass\n \n def initialize(self):\n pass\n \n def _get_next_num(self, key):\n \"\"\"used to generate id for orders and trades.\"\"\"\n return str(np.int64(self.ctx.trade_date) * 10000 + self.seq_gen.get_next(key))\n \n '''\n # -------------------------------------------------------------------------------------------\n # Order\n\n def place_order(self, symbol, action, price, size, algo=\"\", algo_param=None):\n \"\"\"\n Send a request with an order to the system. Execution algorithm will be automatically chosen.\n Returns task_id which can be used to query execution and orders of this task.\n\n Parameters\n ----------\n symbol : str\n the symbol of symbol to be ordered, eg. \"000001.SZ\".\n action : str\n price : float.\n The price to be ordered at.\n size : int\n The quantity to be ordered at.\n algo : str, optional\n The algorithm to be used. If None then use default algorithm.\n algo_param : dict, optional\n Parameters of the algorithm. Default {}.\n\n Returns\n -------\n res : str\n msg : str.\n if res is None, message contains error information\n\n \"\"\"\n pass\n\n def cancel_order(self, task_id):\n \"\"\"Cancel all uncome orders of a task according to its task ID.\n\n Parameters\n ----------\n task_id : str\n ID of the task.\n NOTE we CANNOT cancel order by entrust_no because this may break the execution of algorithm.\n\n Returns\n -------\n result : str\n Indicate whether the cancel succeed.\n err_msg : str\n\n \"\"\"\n pass\n\n # -------------------------------------------------------------------------------------------\n # Query\n\n def query_account(self):\n \"\"\"\n \n Returns\n -------\n pd.DataFrame\n\n \"\"\"\n pass\n\n def query_universe(self):\n \"\"\"\n \n Returns\n -------\n pd.DataFrame\n\n \"\"\"\n pass\n\n def query_position(self, mode=\"all\", symbols=\"\"):\n \"\"\"\n Parameters\n ----------\n mode : str, optional\n symbols : str, optional\n Separated by ,\n \n Returns\n -------\n pd.DataFrame\n \n \"\"\"\n pass\n\n def query_portfolio(self):\n \"\"\"\n Return net positions of all securities in the strategy universe (including zero positions).\n\n Returns\n --------\n pd.DataFrame\n Current position of the strategy.\n\n \"\"\"\n pass\n\n def query_task(self, task_id=-1):\n \"\"\"\n Query order information of current day.\n\n Parameters\n ----------\n task_id : int, optional\n ID of the task. -1 by default (return all orders of the day; else return orders of this task).\n\n Returns\n -------\n pd.DataFrame\n\n \"\"\"\n\n def query_order(self, task_id=-1):\n \"\"\"\n Query order information of current day.\n\n Parameters\n ----------\n task_id : int\n ID of the task. -1 by default (return all orders of the day; else return orders of this task).\n\n Returns\n -------\n pd.DataFrame\n\n \"\"\"\n pass\n\n def query_trade(self, task_id=-1):\n \"\"\"\n Query trade information of current day.\n\n Parameters\n -----------\n task_id : int\n ID of the task. -1 by default (return all orders of the day; else return orders of this task).\n\n Returns\n --------\n pd.DataFrame\n\n \"\"\"\n pass\n\n # -------------------------------------------------------------------------------------------\n # Portfolio Order\n\n def goal_portfolio(self, positions, algo=\"\", algo_param=None):\n \"\"\"\n Let the system automatically generate orders according to portfolio positions goal.\n If there are uncome orders of any symbol in the strategy universe, this order will be rejected. #TODO not impl\n\n Parameters\n -----------\n positions : list of GoalPosition\n This must include positions of all securities in the strategy universe.\n Use former value if there is no change.\n algo : str, optional\n The algorithm to be used. If None then use default algorithm.\n algo_param : dict, optional\n Parameters of the algorithm. Default {}.\n\n Returns\n --------\n result : bool\n Whether this command is accepted. True means the system's acceptance, instead of positions have changed.\n err_msg : str\n\n \"\"\"\n pass\n\n def stop_portfolio(self):\n \"\"\"\n Returns\n -------\n result : str\n message : str\n If result is None, message contains error information\n \n \"\"\"\n pass\n\n def place_batch_order(self, orders, algo=\"\", algo_param=None):\n \"\"\"Send a batch of orders to the system together.\n\n Parameters\n -----------\n orders : list\n a list of trade.model.Order objects.\n algo : str, optional\n The algorithm to be used. If None then use default algorithm.\n algo_param : dict, optional\n Parameters of the algorithm. Default {}.\n\n Returns\n -------\n task_id : str\n Task ID generated by entrust_order.\n err_msg : str.\n\n \"\"\"\n pass\n\n def basket_order(self, orders, algo=\"\", algo_param=None):\n \"\"\"\n Parameters\n ----------\n orders : list of dict\n [ {\"security\": \"000001.SZ\", \"ref_price\": 10.0, \"inc_size\" : 100}, ...]\n algo : str, optional\n algo_param : dict or None, optional\n \n Returns\n -------\n result : str\n message : str\n If result is None, message contains error information\n \n \"\"\"\n pass\n '''\n \n # -------------------------------------------------------------------------------------------\n # Callback Indications & Responses\n \n def on_trade(self, ind):\n \"\"\"\n\n Parameters\n ----------\n ind : TradeInd\n\n Returns\n -------\n\n \"\"\"\n pass\n\n def on_order_status(self, ind):\n \"\"\"\n\n Parameters\n ----------\n ind : OrderStatusInd\n\n Returns\n -------\n\n \"\"\"\n pass\n \n def on_order_rsp(self, rsp):\n \"\"\"\n \n Parameters\n ----------\n rsp\n\n \"\"\"\n pass\n\n def on_task_rsp(self, rsp):\n \"\"\"\n \n Parameters\n ----------\n rsp\n\n \"\"\"\n pass\n\n def on_task_status(self, ind):\n \"\"\"\n \n Parameters\n ----------\n rsp\n\n \"\"\"\n pass\n\n\nclass AlphaStrategy(Strategy, model.FuncRegisterable):\n \"\"\"\n Alpha strategy class.\n\n Attributes\n ----------\n period : str\n Interval between current and next. {'day', 'week', 'month'}\n days_delay : int\n n'th business day after next period.\n weights : np.array with the same shape with self.context.universe\n benchmark : str\n The benchmark symbol.\n risk_model : model.RiskModel\n signal_model : model.ReturnModel\n cost_model : model.CostModel\n\n Methods\n -------\n\n \"\"\"\n # TODO register context\n def __init__(self, signal_model=None, stock_selector=None,\n cost_model=None, risk_model=None,\n pc_method=\"equal_weight\",\n match_method=\"vwap\",\n fc_selector=None,\n fc_constructor=None,\n fc_options=None\n ):\n super(AlphaStrategy, self).__init__()\n \n self.period = \"\"\n self.n_periods = 1\n self.days_delay = 0\n self.cash = 0\n self.position_ratio = 0.98\n self.single_symbol_weight_limit = 1.0\n \n self.risk_model = risk_model\n self.signal_model = signal_model\n self.cost_model = cost_model\n self.stock_selector = stock_selector\n \n self.weights = None\n \n self.pc_method = pc_method\n\n self.goal_positions = None\n self.match_method = match_method\n\n self.portfolio_construction = self.forecast_portfolio_construction if pc_method==\"forecast\" else self.default_portfolio_construction\n self._fc_selector = fc_selector if fc_selector else AlphaStrategy.default_forecast_selector\n self._fc_constructor = fc_constructor if fc_constructor else AlphaStrategy.default_forecast_constructor\n self._fc_options = fc_options\n\n def init_from_config(self, props):\n Strategy.init_from_config(self, props)\n \n self.cash = props.get('init_balance', 100000000)\n self.period = props.get('period', 'month')\n self.days_delay = props.get('days_delay', 0)\n self.n_periods = props.get('n_periods', 1)\n self.position_ratio = props.get('position_ratio', 0.98)\n self.single_symbol_weight_limit = props.get('single_symbol_weight_limit', 1.0)\n\n self.use_pc_method(name='industry_neutral_equal_weight', func=self.industry_neutral_equal_weight, options=None)\n self.use_pc_method(name='industry_neutral_index_weight', func=self.industry_neutral_index_weight, options=None)\n self.use_pc_method(name='equal_weight', func=self.equal_weight, options=None)\n self.use_pc_method(name='mc', func=self.optimize_mc, options={'util_func': self.util_net_signal,\n 'constraints': None,\n 'initial_value': None})\n self.use_pc_method(name='factor_value_weight', func=self.factor_value_weight, options=None)\n self.use_pc_method(name='index_weight', func=self.index_weight, options=None)\n self.use_pc_method(name='market_value_weight', func=self.market_value_weight, options=None)\n self.use_pc_method(name='market_value_sqrt_weight', func=self.market_value_weight, options={'sqrt': True})\n self.use_pc_method(name='equal_index_weight', func=self.equal_index_weight, options=None)\n\n self._validate_parameters()\n print(\"AlphaStrategy Initialized.\")\n \n def _validate_parameters(self):\n if self.pc_method in ['mc', 'quad_opt']:\n if self.signal_model is None and self.cost_model is None and self.risk_model is None:\n raise ValueError(\"At least one model of signal, cost and risk must be provided.\")\n elif self.pc_method in ['factor_value_weight']:\n if self.signal_model is None:\n raise ValueError(\"signal_model must be provided when pc_method = 'factor_value_weight'\")\n elif self.pc_method in ['equal_weight',\n 'index_weight',\n 'equal_index_weight',\n 'market_value_weight',\n 'market_value_sqrt_weight',\n 'industry_neutral_index_weight',\n 'industry_neutral_equal_weight']:\n pass\n elif self.pc_method in ['forecast']:\n pass\n else:\n raise NotImplementedError(\"pc_method = {:s}\".format(self.pc_method))\n \n def on_trade(self, ind):\n \"\"\"\n\n Parameters\n ----------\n ind : TradeInd\n\n Returns\n -------\n\n \"\"\"\n pass\n \n def use_pc_method(self, name, func, options=None):\n self._register_func(name, func, options)\n \n def _get_weights_last(self):\n current_positions = self.query_portfolio()\n univ_pos_dic = {p.symbol: p.current_size for p in current_positions}\n for sec in self.ctx.universe:\n if sec not in univ_pos_dic:\n univ_pos_dic[sec] = 0\n return univ_pos_dic\n\n def util_net_signal(self, weights_target):\n \"\"\"\n util = net_signal = signal - all costs.\n \n Parameters\n ----------\n weights_target : dict\n \n \"\"\"\n weights_last = self._get_weights_last()\n \n signal = self.signal_model.forecast_signal(weights_target)\n cost = self.cost_model.calc_cost(weights_last, weights_target)\n # liquid = self.liquid_model.calc_liquid(weight_now)\n risk = self.risk_model.calc_risk(weights_target)\n \n risk_coef = 1.0\n cost_coef = 1.0\n net_signal = signal - risk_coef * risk - cost_coef * cost # - liquid * liq_factor\n return net_signal\n\n\n def default_portfolio_construction(self, universe_list=None):\n \"\"\"\n Calculate target weights of each symbol in the strategy universe.\n User should not modify this function arbitrarily.\n \n Attributes\n ----------\n universe_list : list of str\n Symbols that should be considered during this re-balance.\n\n Returns\n -------\n self.weights : weights / GoalPosition (without rounding)\n Weights of each symbol.\n\n \"\"\"\n # Step.1 filter and narrow down universe to sub-universe\n if self.stock_selector is not None:\n selected_list = self.stock_selector.get_selection()\n if type(selected_list) is pd.DataFrame:\n self.ctx.forecast_selected_list = selected_list\n universe_list = [s for s in universe_list if s in selected_list['symbol']]\n else:\n universe_list = [s for s in universe_list if s in selected_list]\n\n sub_univ = sorted(universe_list)\n\n self.ctx.snapshot_sub = self.ctx.snapshot.loc[sub_univ, :]\n\n # Step.2 pick the registered portfolio construction method\n\n rf = self.func_table[self.pc_method]\n func, options = rf.func, rf.options\n\n # Step.3 use the registered method to calculate weights and get weights for all symbols in universe\n weights_sub_universe, msg = func(**options)\n\n # portfolio balance check\n weights_all_universe = {symbol: weights_sub_universe.get(symbol, 0.0) for symbol in self.ctx.universe}\n if msg:\n print(msg)\n\n # if nan assign zero\n weights_all_universe = {k: 0.0 if np.isnan(v) else v for k, v in weights_all_universe.items()}\n \n # normalize\n w_sum = np.sum(np.abs(list(weights_all_universe.values())))\n if w_sum > 1e-8: # else all zeros weights\n weights_all_universe = {k: v / w_sum for k, v in weights_all_universe.items()}\n \n # single symbol weight limit process\n if self.single_symbol_weight_limit < 1:\n weights_all_universe = {k: v if v < self.single_symbol_weight_limit else self.single_symbol_weight_limit\n for k, v in weights_all_universe.items()}\n\n self.weights = weights_all_universe\n\n def forecast_portfolio_construction(self, universe_list=None):\n\n assert callable(self._fc_selector), \"fc_selector should be function\"\n assert callable(self._fc_constructor), \"fc_constuctor should be function\"\n\n forecast_list = self._fc_selector(self, universe=universe_list)\n\n options = {}\n if self._fc_options:\n options.update(self._fc_options)\n\n self.weights = self._fc_constructor(self,\n self.weights.copy() if self.weights else [],\n forecast_list,\n **options)\n\n @staticmethod\n def default_forecast_selector(self, forecast_field='close_adj', universe=None):\n forecast = self.ctx.dataview.get_snapshot(self.ctx.trade_date)[[forecast_field]].copy()\n forecast = forecast.rename( columns={forecast_field: \"forecast\"})\n forecast['symbol'] = forecast.index\n return forecast\n\n @staticmethod\n def default_forecast_constructor(self, cur_weights, forecast,\n max_turnover=1,\n alpha_threshold=0.0005,\n turnover_cost_rate=0.001,\n init_size=20):\n\n forecast = forecast.sort_values(['forecast'], ascending=False)\n forecast.index = forecast['symbol']\n if not cur_weights:\n new_weights = forecast[:init_size].copy()\n new_weights.loc[:, 'weight'] = 1.0 / len(forecast)\n new_weights.index = new_weights['symbol']\n return new_weights[['weight']].T.to_dict(orient='records')[0]\n\n cur_weights = pd.DataFrame({'symbol': list(cur_weights.keys()), 'weight': list(cur_weights.values())})\n zero_weights = cur_weights[cur_weights['weight'] == 0].copy()\n cur_weights = cur_weights[cur_weights['weight'] > 0].copy()\n cur_weights.index = cur_weights['symbol']\n\n cur_weights['forecast'] = forecast['forecast']\n cur_weights['forecast'] = cur_weights['forecast'].fillna(0.0)\n cur_weights = cur_weights.sort_values(['forecast'])\n cur_weights['handled'] = False\n\n turnover = 0.0\n new_weights = []\n for i in range(len(forecast)):\n fc = forecast.iloc[i]\n replaced = False\n if fc['symbol'] not in cur_weights['symbol']:\n for k in range(len(cur_weights)):\n tmp = cur_weights.iloc[k]\n if fc['forecast'] > tmp['forecast'] + turnover_cost_rate + alpha_threshold and \\\n tmp['weight'] + turnover <= max_turnover:\n new_weights.append({'symbol': fc['symbol'], 'weight': tmp['weight']})\n new_weights.append({'symbol': tmp['symbol'], 'weight': 0})\n cur_weights['handled'][0] = True\n turnover += tmp['weight']\n replaced = True\n break\n if not replaced:\n break\n else:\n tmp = cur_weights.loc[fc['symbol']]\n cur_weights.loc[fc['symbol'], 'handled'] = True\n new_weights.append({'symbol': fc['symbol'], 'weight': tmp['weight']})\n\n cur_weights = cur_weights[cur_weights['handled'] != True].copy()\n if cur_weights.empty or turnover >= max_turnover:\n break\n\n if not cur_weights.empty:\n for i in range(len(cur_weights)):\n tmp = cur_weights.iloc[i]\n new_weights.append({'symbol': tmp['symbol'], 'weight': tmp['weight']})\n\n new_weights = pd.DataFrame(new_weights)\n w_sum = new_weights['weight'].sum()\n if w_sum > 1e-8: # else all zeros weights\n new_weights.loc[:, 'weight'] /= w_sum\n\n # Keep all removed stocks in weight, so when stock can be sold after it is tradable again after suspended.\n tmp = zero_weights[ ~zero_weights['symbol'].isin(new_weights['symbol']) ]\n if not tmp.empty:\n new_weights = pd.concat([new_weights, tmp])\n\n new_weights.index = new_weights['symbol']\n del new_weights['symbol']\n\n return new_weights.T.to_dict(orient='records')[0]\n\n def equal_weight(self):\n # discrete\n weights = {k: 1.0 for k in self.ctx.snapshot_sub.index.values}\n return weights, ''\n\n def industry_neutral_equal_weight(self):\n snap = self.ctx.snapshot_sub\n snap['symbol'] = snap.index\n\n # calculate weight distribution of all industry\n # df_weight = self.ctx.dataview.get_snapshot(self.ctx.trade_date)[['total_mv', 'index_member', 'sw1']]\n # df_weight = df_weight[df_weight['index_member'] == 1]\n # df_weight['weight'] = df_weight['total_mv']/df_weight['total_mv'].sum()\n df_weight = self.ctx.dataview.get_snapshot(self.ctx.trade_date)[['index_weight', 'sw1']]\n df_weight.columns = ['weight', 'sw1']\n\n df_industry_weight = df_weight.groupby('sw1')['weight'].sum()\n\n # industries in portfolio\n industry_list = list(set(snap['sw1'].values.flatten()))\n df_industry_weight_sub = df_industry_weight.loc[industry_list]\n df_industry_weight_sub = pd.DataFrame(df_industry_weight_sub)\n df_industry_weight_sub.columns = ['weight']\n df_industry_weight_sub['equal_weight'] = pd.DataFrame(snap.groupby('sw1')['sw1'].count()/len(snap))\n\n df_industry_weight_sub['dif'] = df_industry_weight_sub['equal_weight'] - df_industry_weight_sub['weight']\n\n df_industry_weight_sub = df_industry_weight_sub.reset_index()\n\n count_industry = pd.DataFrame(snap.groupby('sw1')['close'].count()).reset_index()\n count_industry.columns = ['sw1', 'count']\n count_industry['internal_weight'] = 1.0/count_industry['count']\n\n snap = pd.merge(left = snap, right = count_industry[['sw1', 'internal_weight']], how = 'left', on = 'sw1')\n snap = pd.merge(left = snap, right = df_industry_weight_sub[['sw1', 'norm_weight']], how = 'left', on = 'sw1')\n snap['weight'] = snap['internal_weight'] * snap['norm_weight']\n df_weight = snap[['symbol','weight']]\n\n df_weight = df_weight.set_index('symbol')\n weights = df_weight['weight'].to_dict()\n\n return weights, ''\n\n def industry_neutral_index_weight(self):\n snap = self.ctx.snapshot_sub\n snap['symbol'] = snap.index\n\n # calculate weight distribution of all industry\n df_weight = self.ctx.dataview.get_snapshot(self.ctx.trade_date)[['total_mv', 'index_member', 'sw1']]\n df_weight = df_weight[df_weight['index_member'] == 1]\n df_weight['weight'] = df_weight['total_mv']/df_weight['total_mv'].sum()\n df_industry_weight = df_weight.groupby('sw1')['weight'].sum()\n\n # industries in portfolio\n industry_list = list(set(snap['sw1'].values.flatten()))\n df_industry_weight_sub = df_industry_weight.loc[industry_list]\n df_industry_weight_sub = pd.DataFrame(df_industry_weight_sub)\n df_industry_weight_sub.columns = ['weight']\n df_industry_weight_sub['norm_weight'] = df_industry_weight_sub['weight']/df_industry_weight_sub['weight'].sum()\n df_industry_weight_sub = df_industry_weight_sub.reset_index()\n\n count_industry = pd.DataFrame(snap.groupby('sw1')['index_weight'].sum()).reset_index()\n count_industry.columns = ['sw1', 'agg_index_weight']\n\n snap = pd.merge(left = snap, right = count_industry[['sw1', 'agg_index_weight']], how = 'left', on = 'sw1')\n snap['internal_weight'] = snap['index_weight']/snap['agg_index_weight']\n snap = pd.merge(left = snap, right = df_industry_weight_sub[['sw1', 'norm_weight']], how = 'left', on = 'sw1')\n snap['weight'] = snap['internal_weight'] * snap['norm_weight']\n df_weight = snap[['symbol','weight']]\n\n df_weight = df_weight.set_index('symbol')\n weights = df_weight['weight'].to_dict()\n\n return weights, ''\n\n def market_value_weight(self, sqrt=False):\n snap = self.ctx.snapshot_sub\n # TODO: pass options, instead of hard-code 'total_mv', 'float_mv'\n if 'total_mv' in snap.columns:\n mv = snap['total_mv']\n elif 'float_mv' in snap.columns:\n mv = snap['float_mv']\n else:\n raise ValueError(\"market_value_weight is chosen,\"\n \"while no [float_mv] or [total_mv] field found in dataview.\")\n mv = mv.fillna(0.0)\n if sqrt:\n print('sqrt')\n mv = np.sqrt(mv)\n weights = mv.to_dict()\n return weights, \"\"\n\n def index_weight(self):\n snap = self.ctx.snapshot_sub\n if 'index_weight' not in snap.columns:\n raise ValueError(\"index_weight is chosen,\"\n \"while no [index_weight] field found in dataview.\")\n ser_index_weight = snap['index_weight']\n ser_index_weight.fillna(0.0, inplace=True)\n weights = ser_index_weight.to_dict()\n return weights, \"\"\n\n def equal_index_weight(self):\n snap = self.ctx.snapshot_sub\n snap.fillna(0.0, inplace=True)\n\n wt_equal = snap['index_member'] / sum(snap['index_member'])\n wt_index = snap['index_weight'] / sum(snap['index_weight'])\n\n wt_final = (wt_equal + wt_index) / 2\n\n return wt_final.to_dict(), \"\"\n\n def factor_value_weight(self):\n def long_only_weight_adjust(w):\n \"\"\"\n Adjust weights for long only constraints.\n \n Parameters\n ----------\n w : dict\n \n Returns\n -------\n res : dict\n \n \"\"\"\n # TODO: we should not add a const\n if not len(w):\n return w\n w_min = np.min(list(w.values()))\n if w_min < 0:\n delta = 2 * abs(w_min)\n # if nan assign zero; else add const\n w = {k: v + delta for k, v in w.items()}\n return w\n \n dic_forecasts = self.signal_model.make_forecast()\n weights = {k: 0.0 if (np.isnan(v) or np.isinf(v)) else v for k, v in dic_forecasts.items()}\n weights = long_only_weight_adjust(weights)\n return weights, \"\"\n \n def optimize_mc(self, util_func):\n \"\"\"\n Use naive search (Monte Carol) to find variable that maximize util_func.\n \n Parameters\n ----------\n util_func : callable\n Input variables, output the value of util function.\n\n Returns\n -------\n min_weights : dict\n best weights.\n msg : str\n error message.\n\n \"\"\"\n n_exp = 5 # number of experiments of Monte Carol\n sub_univ = self.ctx.snapshot_sub.index.values\n n_var = len(sub_univ)\n \n weights_mat = np.random.rand(n_exp, n_var)\n weights_mat = weights_mat / weights_mat.sum(axis=1).reshape(-1, 1)\n \n min_f = 1e30\n min_weights = None\n for i in range(n_exp):\n weights = {sub_univ[j]: weights_mat[i, j] for j in range(n_var)}\n f = -util_func(weights)\n if f < min_f:\n min_weights = weights\n min_f = f\n \n if min_weights is None:\n msg = \"No weights can make f > {:.2e} found in this search\".format(min_f)\n else:\n msg = \"\"\n return min_weights, msg\n\n def re_weight_suspension(self, suspensions=None):\n \"\"\"\n How we deal with weights when there are suspension securities.\n\n Parameters\n ----------\n suspensions : list of securities\n None if no suspension.\n\n \"\"\"\n # TODO this can be refine: consider whether we increase or decrease shares on a suspended symbol.\n if not suspensions:\n return\n \n if len(suspensions) == len(self.ctx.universe):\n raise ValueError(\"All suspended\") # TODO custom error\n \n weights = {sec: w if sec not in suspensions else 0.0 for sec, w in self.weights.items()}\n weights_sum = np.sum(np.abs(list(weights.values())))\n if weights_sum > 0.0:\n weights = {sec: w / weights_sum for sec, w in weights.items()}\n \n self.weights = weights\n \n def on_after_rebalance(self, total):\n print(\"Before {} re-balance: available cash all = {:9.4e}\".format(self.ctx.trade_date, total)) # DEBUG\n pass\n \n def send_bullets(self):\n # self.ctx.trade_api.goal_portfolio_by_batch_order(self.goal_positions)\n self.ctx.trade_api.goal_portfolio(self.goal_positions, algo=self.match_method)\n \n def generate_weights_order(self, weights_dic, turnover, prices, suspensions=None):\n \"\"\"\n Send order according subject to total turnover and weights of different securities.\n\n Parameters\n ----------\n weights_dic : dict of {symbol: weight}\n Weight of each symbol.\n turnover : float\n Total turnover goal of all securities. (cash quota)\n prices : dict of {str: float}\n {symbol: price}\n suspensions : list of str\n\n Returns\n -------\n goals : list of GoalPosition\n cash_left : float\n\n \"\"\"\n # cash_left = 0.0\n cash_used = 0.0\n goals = []\n for sec, w in weights_dic.items():\n goal_pos = dict()\n goal_pos['symbol'] = sec\n \n if sec in suspensions:\n current_pos = self.ctx.pm.get_position(sec)\n goal_pos['size'] = current_pos.current_size if current_pos is not None else 0\n elif abs(w) < 1e-8:\n # order.entrust_size = 0\n goal_pos['size'] = 0\n else:\n price = prices[sec]\n if not (np.isfinite(price) and np.isfinite(w)):\n raise ValueError(\"NaN or Inf encountered! \\n\"\n \"trade_date={}, symbol={}, price={}, weight={}\".format(self.ctx.trade_date,\n sec, price, w))\n shares_raw = w * turnover / price\n # shares unit 100\n shares = int(round(shares_raw / 100., 0)) * 100 # TODO cash may be not enough\n # shares_left = shares_raw - shares * 100 # may be negative\n # cash_left += shares_left * price\n cash_used += shares * price\n goal_pos['size'] = shares\n \n goals.append(goal_pos)\n \n cash_left = turnover - cash_used\n return goals, cash_left\n \n def query_portfolio(self):\n positions = []\n for sec in self.ctx.pm.holding_securities:\n positions.append(self.ctx.pm.get_position(sec))\n return positions\n\n\nclass EventDrivenStrategy(Strategy):\n def __init__(self):\n \n super(EventDrivenStrategy, self).__init__()\n \n def on_bar(self, quote):\n pass\n \n def on_tick(self, quote):\n pass\n \n def on_cycle(self):\n pass\n \n def initialize(self):\n pass\n \n def buy_or_sell_with_bar(self, action, bar, size, slippage=0.0):\n \"\"\"\n Send a limit Buy order with quote.close + slippage.\n \n Parameters\n ----------\n action : {'Buy', 'Sell'}\n bar : Bar\n size : int or float\n Should be positive.\n slippage : float, optional\n Should be non-negative\n\n \"\"\"\n if not isinstance(bar, Bar):\n raise TypeError(\"quote must be Bar type. You may have passed a Quote.\")\n \n if action == common.ORDER_ACTION.SELL:\n slippage *= -1\n entrust_price = bar.close + slippage\n task_id, msg = self.ctx.trade_api.place_order(bar.symbol,\n action,\n entrust_price,\n size)\n if (task_id is None) or (task_id == 0):\n print(\"place_order FAILED! msg = {}\".format(msg))\n \n def buy(self, bar, size=1, slippage=0.0):\n \"\"\"\n Send a limit Buy order with bar.close + slippage.\n \n Parameters\n ----------\n bar : Bar\n size : int or float\n slippage : float\n\n \"\"\"\n self.buy_or_sell_with_bar(common.ORDER_ACTION.BUY, bar, size, slippage)\n\n def sell(self, bar, size=1, slippage=0.0):\n \"\"\"\n Send a limit Sell order with bar.close + slippage.\n \n Parameters\n ----------\n bar : Bar\n size : int or float\n slippage : float\n\n \"\"\"\n self.buy_or_sell_with_bar(common.ORDER_ACTION.SELL, bar, size, slippage)\n\n def cancel_all_orders(self):\n for task_id, task in self.ctx.pm.tasks.items():\n if task.trade_date == self.ctx.trade_date:\n if not task.is_finished:\n self.ctx.trade_api.cancel_order(task_id)\n\n def liquidate(self, quote, n, tick_size=1.0, pos=0):\n self.cancel_all_orders()\n if pos == 0:\n return\n \n ref_price = quote.close\n if pos < 0:\n action = common.ORDER_ACTION.BUY\n price = ref_price + n * tick_size\n else:\n action = common.ORDER_ACTION.SELL\n price = ref_price - n * tick_size\n self.ctx.trade_api.place_order(quote.symbol, action, price, abs(pos))\n"
] |
[
[
"numpy.isinf",
"numpy.isnan",
"numpy.random.rand",
"pandas.merge",
"pandas.DataFrame",
"numpy.sqrt",
"pandas.concat",
"numpy.isfinite",
"numpy.int64"
]
] |
cascino/poke-gan
|
[
"da0e18da78cbbe4cda73deacdfbf5254eccedfba"
] |
[
"main.py"
] |
[
"import torch\nimport os\nfrom PIL import Image\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch import nn, optim\nfrom torch.autograd import Variable\nfrom torchvision import transforms, utils\n\n'''Vanilla Generative Adversarial Network'''\n\n# disciminator network\nclass D(nn.Module):\n def __init__(self):\n super(D, self).__init__()\n self.ndf = 32\n self.main = nn.Sequential(\n nn.Conv2d(3, self.ndf, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv2d(self.ndf, self.ndf*2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(self.ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(self.ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1),\n nn.BatchNorm2d(self.ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Conv2d(self.ndf * 8, 1, 4, 1, 0, bias=False),\n nn.Sigmoid()\n )\n \n def forward(self, x):\n return self.main(x)\n\n# generator network \nclass G(nn.Module):\n def __init__(self, latent):\n super(G, self).__init__()\n self.ngf = 32\n self.latent = latent\n self.main = nn.Sequential(\n nn.ConvTranspose2d(self.latent, self.ngf * 8, 4, 1, 0, bias=False),\n nn.BatchNorm2d(self.ngf * 8),\n nn.ReLU(inplace=True),\n\n nn.ConvTranspose2d(self.ngf * 8, self.ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(self.ngf * 4),\n nn.ReLU(inplace=True),\n\n nn.ConvTranspose2d(self.ngf * 4, self.ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(self.ngf * 2),\n nn.ReLU(inplace=True),\n\n nn.ConvTranspose2d(self.ngf * 2, self.ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(self.ngf),\n nn.ReLU(inplace=True),\n\n nn.ConvTranspose2d(self.ngf, 3, 4, 2, 1, bias=False),\n nn.Tanh()\n )\n\n def forward(self, x):\n return self.main(x)\n\n# custom pytorch dataset\nclass PokeDataset(Dataset):\n def __init__(self, root_dir):\n self.root = root_dir\n self.tform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5),(0.5, 0.5, 0.5))\n ])\n\n def __len__(self):\n return len(os.listdir(self.root))\n\n def __getitem__(self, idx):\n file = os.path.dirname(__file__)\n working_dir = os.path.join(file, self.root)\n imname = str(idx).zfill(3) + '.jpg'\n impath = os.path.join(working_dir, imname)\n return self.tform(Image.open(impath))\n\n\n# hyperparameters\nepochs = 1000\nlr = 0.0003\ntorch.manual_seed(1)\nbatch_size = 64\nuse_cuda = torch.cuda.is_available()\nim_samples = 50\nlatent_size = 100\n\ndataset = PokeDataset('./data64')\ndataloader = DataLoader(dataset, batch_size, shuffle=True, num_workers=2)\n\ndiscriminator = D()\ngenerator = G(latent_size)\n\n# loss(o, t) = - 1/n \\sum_i (t[i] log(o[i]) + (1 - t[i]) log(1 - o[i]))\nloss = nn.BCELoss(size_average=True)\n\nif use_cuda:\n print('CUDA device found and active')\n discriminator.cuda()\n generator.cuda()\n loss.cuda()\n\n# optimizers\noptimD = optim.Adam(discriminator.parameters(), lr, betas=(0.5, 0.999))\noptimG = optim.Adam(generator.parameters(), lr, betas=(0.5, 0.999))\n\ntest_noise = torch.Tensor(batch_size, latent_size, 1, 1).normal_(0, 1)\nif use_cuda:\n test_noise = test_noise.cuda()\n\ntest_noiseV = Variable(test_noise)\n\nfor i in range(epochs):\n for j, data in enumerate(dataloader):\n latent = torch.Tensor(data.size(0), latent_size, 1, 1)\n label = torch.Tensor(data.size(0), 1, 1, 1)\n\n if use_cuda:\n latent = latent.cuda()\n label = label.cuda()\n data = data.cuda()\n\n # train discriminator \n # train on real\n # input an image, 0|1 if fake|real \n optimD.zero_grad()\n real_label = Variable(label.fill_(1), requires_grad=False)\n real_im = Variable(data, requires_grad=False)\n\n out = discriminator(real_im)\n loss_real = loss(out, real_label)\n loss_real.backward()\n\n # train D on fake\n noise = Variable(latent.normal_(0, 1), requires_grad=False)\n fake_label = Variable(label.fill_(0), requires_grad=False)\n\n fake = generator(noise)\n out = discriminator(fake.detach())\n loss_fake = loss(out, fake_label)\n loss_fake.backward()\n optimD.step()\n\n # train generator\n fake_real_label = Variable(label.fill_(1), requires_grad=False) \n optimG.zero_grad()\n out = discriminator(fake)\n loss_gen = loss(out, fake_real_label)\n loss_gen.backward()\n optimG.step()\n\n print('epoch [{}]/[{}] batch {} lossD {:.5f} lossG {:.5f}'.format(\n i, epochs, j, (loss_real.cpu().data[0] + loss_fake.cpu().data[0]), \n loss_gen.cpu().data[0]))\n\n if j % im_samples == 0:\n out = generator(test_noiseV).cpu().data\n utils.save_image(out, './fake.jpg', normalize=True)\n torch.save(discriminator, 'dis.pt')\n torch.save(generator, 'gen.pt')\n\ntorch.save(discriminator, 'dis.pt')\ntorch.save(generator, 'gen.pt')\n"
] |
[
[
"torch.nn.Sigmoid",
"torch.autograd.Variable",
"torch.nn.Tanh",
"torch.save",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.manual_seed",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.nn.BCELoss",
"torch.Tensor"
]
] |
ASethi77/StateOfTheMedia
|
[
"98512ba5e5242d7d9c2bdb0f6725d4ae0170ca8b"
] |
[
"src/model/linear_regression_model.py"
] |
[
"from .regression_model import RegressionModel\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\n\nclass LinearRegressionModel(RegressionModel):\n def __init__(self, train_data):\n RegressionModel.__init__(self, train_data)\n self.model = LinearRegression()\n\n def train(self, x=None, y=None):\n x = x if x is not None else self.train_x\n y = y if y is not None else self.train_y\n\n self.model.fit(x, y)\n\n def predict(self, x_in):\n return self.model.predict(x_in)\n\n def evaluate(self, x_in, y_out):\n predicted = self.predict(x_in)\n return mean_squared_error(y_out, predicted)\n\n"
] |
[
[
"sklearn.metrics.mean_squared_error",
"sklearn.linear_model.LinearRegression"
]
] |
ArtesiaWater/observations
|
[
"f04193dc3bfafc0bd9c465e30b16194ca411f266"
] |
[
"tests/test_002_obs_objects.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"Created on Fri Jan 31 13:26:04 2020.\n\n@author: oebbe\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom hydropandas import obs_collection as oc\nfrom hydropandas import observation as obs\n\n# import sys\n# sys.path.insert(1, \"..\")\n\n\n# TEST_DIR = os.path.dirname(os.path.abspath(__file__))\n# PROJECT_DIR = os.path.abspath(os.path.join(TEST_DIR, os.pardir))\n# sys.path.insert(0, PROJECT_DIR)\n# os.chdir(TEST_DIR)\n\n\ndef test_groundwater_obs(name=\"grondwaterobs_001\", filternr=2):\n df = pd.DataFrame(\n index=pd.date_range(\"2020-1-1\", \"2020-1-10\"),\n data={\"Stand_m_tov_NAP\": np.random.rand(10)},\n )\n maaiveld = np.random.random()\n x = np.random.randint(0, 10000)\n y = np.random.randint(10000, 20000)\n gwo = obs.GroundwaterObs(\n df,\n name=name,\n locatie=name.split(\"_\")[0],\n x=x,\n y=y,\n maaiveld=maaiveld,\n meetpunt=maaiveld - 0.2,\n onderkant_filter=maaiveld - 10.0,\n bovenkant_filter=maaiveld - 9.0,\n metadata_available=True,\n filternr=filternr,\n filename=\"\",\n meta={\"info\": \"in deze dictionary \" \"kan je extra informatie kwijt\"},\n )\n return gwo\n\n\ndef test_waterlvl_obs():\n df = pd.DataFrame(\n index=pd.date_range(\"2020-1-1\", \"2020-1-10\"),\n data={\"Stand_m_tov_NAP\": np.random.rand(10)},\n )\n x = np.random.randint(0, 10000)\n y = np.random.randint(10000, 20000)\n wlvl = obs.WaterlvlObs(\n df,\n name=\"waterlvl_obs1\",\n locatie=\"obs1\",\n x=x,\n y=y,\n filename=\"\",\n meta={\"info\": \"in deze dictionary \" \"kan je extra informatie kwijt\"},\n )\n return wlvl\n\n\ndef test_groundwater_quality_obs():\n df = pd.DataFrame(\n index=pd.date_range(\"2020-1-1\", \"2020-1-10\"), data={\"pH\": np.random.rand(10)}\n )\n gwq = obs.WaterlvlObs(\n df,\n name=\"waterquality_obs1\",\n locatie=\"waterquality\",\n x=3,\n y=4,\n filename=\"\",\n meta={\"info\": \"in deze dictionary \" \"kan je extra informatie kwijt\"},\n )\n return gwq\n\n\ndef test_obscollection_from_list():\n o_list = []\n for i in range(10):\n o_list.append(test_groundwater_obs(name=f\"grondwaterobs_00{i}\", filternr=i))\n\n obs_col = oc.ObsCollection.from_list(o_list)\n\n return obs_col\n"
] |
[
[
"numpy.random.random",
"pandas.date_range",
"numpy.random.randint",
"numpy.random.rand"
]
] |
ajabri/MONAI
|
[
"49e6bb8235ef7a9745d144d773f6377c9b1e5b71"
] |
[
"tests/test_convert_to_multi_channel.py"
] |
[
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nfrom parameterized import parameterized\n\nfrom monai.transforms import ConvertToMultiChannelBasedOnBratsClasses\n\nTEST_CASE = [\n np.array([[0, 1, 2], [1, 2, 4], [0, 1, 4]]),\n np.array([[[0, 1, 0], [1, 0, 1], [0, 1, 1]], [[0, 1, 1], [1, 1, 1], [0, 1, 1]], [[0, 0, 0], [0, 0, 1], [0, 0, 1]]]),\n]\n\n\nclass TestConvertToMultiChannel(unittest.TestCase):\n @parameterized.expand([TEST_CASE])\n def test_type_shape(self, data, expected_result):\n result = ConvertToMultiChannelBasedOnBratsClasses()(data)\n np.testing.assert_equal(result, expected_result)\n self.assertEqual(f\"{result.dtype}\", \"bool\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.array",
"numpy.testing.assert_equal"
]
] |
gaguilar/nlp
|
[
"a05ef16695fd8cdfc23cf07eba48d5e1b2d843e5"
] |
[
"tests/test_arrow_dataset.py"
] |
[
"import os\nimport pickle\nimport tempfile\nfrom unittest import TestCase\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\n\nfrom nlp import concatenate_datasets\nfrom nlp.arrow_dataset import Dataset\nfrom nlp.features import ClassLabel, Features, Sequence, Value\nfrom nlp.info import DatasetInfo\n\nfrom .utils import require_tf, require_torch\n\n\nclass Unpicklable:\n def __getstate__(self):\n raise pickle.PicklingError()\n\n\nclass BaseDatasetTest(TestCase):\n def _create_dummy_dataset(self, multiple_columns=False):\n if multiple_columns:\n data = {\"col_1\": [3, 2, 1, 0], \"col_2\": [\"a\", \"b\", \"c\", \"d\"]}\n dset = Dataset.from_dict(data)\n else:\n dset = Dataset.from_dict({\"filename\": [\"my_name-train\" + \"_\" + str(x) for x in np.arange(30).tolist()]})\n return dset\n\n def test_dummy_dataset(self):\n dset = self._create_dummy_dataset()\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertEqual(dset[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset[\"filename\"][0], \"my_name-train_0\")\n\n dset = self._create_dummy_dataset(multiple_columns=True)\n self.assertDictEqual(dset.features, Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")}))\n self.assertEqual(dset[0][\"col_1\"], 3)\n self.assertEqual(dset[\"col_1\"][0], 3)\n\n def test_dummy_dataset_pickle(self):\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"dset.pt\")\n\n dset = self._create_dummy_dataset().select(range(10))\n\n with open(tmp_file, \"wb\") as f:\n pickle.dump(dset, f)\n\n with open(tmp_file, \"rb\") as f:\n dset = pickle.load(f)\n\n self.assertEqual(len(dset), 10)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertEqual(dset[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset[\"filename\"][0], \"my_name-train_0\")\n\n def test_dummy_dataset_pickle_memory_mapped(self):\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"dset.pt\")\n\n dset = (\n self._create_dummy_dataset().map(cache_file_name=os.path.join(tmp_dir, \"test.arrow\")).select(range(10))\n )\n dset._data = Unpicklable() # check that we don't pickle the entire table\n\n with open(tmp_file, \"wb\") as f:\n pickle.dump(dset, f)\n\n with open(tmp_file, \"rb\") as f:\n dset = pickle.load(f)\n\n self.assertEqual(len(dset), 10)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertEqual(dset[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset[\"filename\"][0], \"my_name-train_0\")\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"dset.pt\")\n\n dset = (\n self._create_dummy_dataset()\n .map(cache_file_name=os.path.join(tmp_dir, \"test.arrow\"))\n .select(range(10), indices_cache_file_name=os.path.join(tmp_dir, \"ind.arrow\"))\n )\n dset._data = Unpicklable()\n dset._indices = Unpicklable()\n\n with open(tmp_file, \"wb\") as f:\n pickle.dump(dset, f)\n\n with open(tmp_file, \"rb\") as f:\n dset = pickle.load(f)\n\n self.assertEqual(len(dset), 10)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertEqual(dset[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset[\"filename\"][0], \"my_name-train_0\")\n\n def test_from_pandas(self):\n data = {\"col_1\": [3, 2, 1, 0], \"col_2\": [\"a\", \"b\", \"c\", \"d\"]}\n df = pd.DataFrame.from_dict(data)\n dset = Dataset.from_pandas(df)\n self.assertListEqual(dset[\"col_1\"], data[\"col_1\"])\n self.assertListEqual(dset[\"col_2\"], data[\"col_2\"])\n self.assertListEqual(list(dset.features.keys()), [\"col_1\", \"col_2\"])\n self.assertDictEqual(dset.features, Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")}))\n\n features = Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")})\n dset = Dataset.from_pandas(df, features=features)\n self.assertListEqual(dset[\"col_1\"], data[\"col_1\"])\n self.assertListEqual(dset[\"col_2\"], data[\"col_2\"])\n self.assertListEqual(list(dset.features.keys()), [\"col_1\", \"col_2\"])\n self.assertDictEqual(dset.features, Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")}))\n\n features = Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")})\n dset = Dataset.from_pandas(df, features=features, info=DatasetInfo(features=features))\n self.assertListEqual(dset[\"col_1\"], data[\"col_1\"])\n self.assertListEqual(dset[\"col_2\"], data[\"col_2\"])\n self.assertListEqual(list(dset.features.keys()), [\"col_1\", \"col_2\"])\n self.assertDictEqual(dset.features, Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")}))\n\n features = Features({\"col_1\": Value(\"string\"), \"col_2\": Value(\"string\")})\n self.assertRaises(pa.ArrowTypeError, Dataset.from_pandas, df, features=features)\n\n def test_from_dict(self):\n data = {\"col_1\": [3, 2, 1, 0], \"col_2\": [\"a\", \"b\", \"c\", \"d\"]}\n dset = Dataset.from_dict(data)\n self.assertListEqual(dset[\"col_1\"], data[\"col_1\"])\n self.assertListEqual(dset[\"col_2\"], data[\"col_2\"])\n self.assertListEqual(list(dset.features.keys()), [\"col_1\", \"col_2\"])\n self.assertDictEqual(dset.features, Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")}))\n\n features = Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")})\n dset = Dataset.from_dict(data, features=features)\n self.assertListEqual(dset[\"col_1\"], data[\"col_1\"])\n self.assertListEqual(dset[\"col_2\"], data[\"col_2\"])\n self.assertListEqual(list(dset.features.keys()), [\"col_1\", \"col_2\"])\n self.assertDictEqual(dset.features, Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")}))\n\n features = Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")})\n dset = Dataset.from_dict(data, features=features, info=DatasetInfo(features=features))\n self.assertListEqual(dset[\"col_1\"], data[\"col_1\"])\n self.assertListEqual(dset[\"col_2\"], data[\"col_2\"])\n self.assertListEqual(list(dset.features.keys()), [\"col_1\", \"col_2\"])\n self.assertDictEqual(dset.features, Features({\"col_1\": Value(\"int64\"), \"col_2\": Value(\"string\")}))\n\n features = Features({\"col_1\": Value(\"string\"), \"col_2\": Value(\"string\")})\n self.assertRaises(pa.ArrowTypeError, Dataset.from_dict, data, features=features)\n\n def test_set_format_numpy_multiple_columns(self):\n dset = self._create_dummy_dataset(multiple_columns=True)\n fingerprint = dset._fingerprint\n dset.set_format(type=\"numpy\", columns=[\"col_1\"])\n self.assertEqual(len(dset[0]), 1)\n self.assertIsInstance(dset[0][\"col_1\"], np.ndarray)\n self.assertListEqual(list(dset[0][\"col_1\"].shape), [])\n self.assertEqual(dset[0][\"col_1\"].item(), 3)\n self.assertIsInstance(dset[\"col_1\"], np.ndarray)\n self.assertListEqual(list(dset[\"col_1\"].shape), [4])\n np.testing.assert_array_equal(dset[\"col_1\"], np.array([3, 2, 1, 0]))\n self.assertNotEqual(dset._fingerprint, fingerprint)\n\n dset.reset_format()\n with dset.formatted_as(type=\"numpy\", columns=[\"col_1\"]):\n self.assertEqual(len(dset[0]), 1)\n self.assertIsInstance(dset[0][\"col_1\"], np.ndarray)\n self.assertListEqual(list(dset[0][\"col_1\"].shape), [])\n self.assertEqual(dset[0][\"col_1\"].item(), 3)\n self.assertIsInstance(dset[\"col_1\"], np.ndarray)\n self.assertListEqual(list(dset[\"col_1\"].shape), [4])\n np.testing.assert_array_equal(dset[\"col_1\"], np.array([3, 2, 1, 0]))\n\n self.assertEqual(dset.format[\"type\"], None)\n self.assertEqual(dset.format[\"format_kwargs\"], {})\n self.assertEqual(dset.format[\"columns\"], dset.column_names)\n self.assertEqual(dset.format[\"output_all_columns\"], False)\n\n dset.set_format(type=\"numpy\", columns=[\"col_1\"], output_all_columns=True)\n self.assertEqual(len(dset[0]), 2)\n self.assertIsInstance(dset[0][\"col_2\"], str)\n self.assertEqual(dset[0][\"col_2\"], \"a\")\n\n dset.set_format(type=\"numpy\", columns=[\"col_1\", \"col_2\"])\n self.assertEqual(len(dset[0]), 2)\n self.assertEqual(dset[0][\"col_2\"].item(), \"a\")\n\n @require_torch\n def test_set_format_torch(self):\n import torch\n\n dset = self._create_dummy_dataset(multiple_columns=True)\n dset.set_format(type=\"torch\", columns=[\"col_1\"])\n self.assertEqual(len(dset[0]), 1)\n self.assertIsInstance(dset[0][\"col_1\"], torch.Tensor)\n self.assertIsInstance(dset[\"col_1\"], torch.Tensor)\n self.assertListEqual(list(dset[0][\"col_1\"].shape), [])\n self.assertEqual(dset[0][\"col_1\"].item(), 3)\n\n dset.set_format(type=\"torch\", columns=[\"col_1\"], output_all_columns=True)\n self.assertEqual(len(dset[0]), 2)\n self.assertIsInstance(dset[0][\"col_2\"], str)\n self.assertEqual(dset[0][\"col_2\"], \"a\")\n\n dset.set_format(type=\"torch\", columns=[\"col_1\", \"col_2\"])\n with self.assertRaises(TypeError):\n dset[0]\n\n @require_tf\n def test_set_format_tf(self):\n import tensorflow as tf\n\n dset = self._create_dummy_dataset(multiple_columns=True)\n dset.set_format(type=\"tensorflow\", columns=[\"col_1\"])\n self.assertEqual(len(dset[0]), 1)\n self.assertIsInstance(dset[0][\"col_1\"], tf.Tensor)\n self.assertListEqual(list(dset[0][\"col_1\"].shape), [])\n self.assertEqual(dset[0][\"col_1\"].numpy().item(), 3)\n\n dset.set_format(type=\"tensorflow\", columns=[\"col_1\"], output_all_columns=True)\n self.assertEqual(len(dset[0]), 2)\n self.assertIsInstance(dset[0][\"col_2\"], str)\n self.assertEqual(dset[0][\"col_2\"], \"a\")\n\n dset.set_format(type=\"tensorflow\", columns=[\"col_1\", \"col_2\"])\n self.assertEqual(len(dset[0]), 2)\n self.assertEqual(dset[0][\"col_2\"].numpy().decode(\"utf-8\"), \"a\")\n\n def test_set_format_pandas(self):\n dset = self._create_dummy_dataset(multiple_columns=True)\n dset.set_format(type=\"pandas\", columns=[\"col_1\"])\n self.assertEqual(len(dset[0].columns), 1)\n self.assertIsInstance(dset[0], pd.DataFrame)\n self.assertListEqual(list(dset[0].shape), [1, 1])\n self.assertEqual(dset[0][\"col_1\"].item(), 3)\n\n dset.set_format(type=\"pandas\", columns=[\"col_1\", \"col_2\"])\n self.assertEqual(len(dset[0].columns), 2)\n self.assertEqual(dset[0][\"col_2\"].item(), \"a\")\n\n def test_cast_(self):\n dset = self._create_dummy_dataset(multiple_columns=True)\n features = dset.features\n features[\"col_1\"] = Value(\"float64\")\n fingerprint = dset._fingerprint\n dset.cast_(features)\n self.assertEqual(dset.num_columns, 2)\n self.assertEqual(dset.features[\"col_1\"], Value(\"float64\"))\n self.assertIsInstance(dset[0][\"col_1\"], float)\n self.assertNotEqual(dset._fingerprint, fingerprint)\n\n def test_remove_columns_(self):\n dset = self._create_dummy_dataset(multiple_columns=True)\n fingerprint = dset._fingerprint\n dset.remove_columns_(column_names=\"col_1\")\n self.assertEqual(dset.num_columns, 1)\n self.assertListEqual(list(dset.column_names), [\"col_2\"])\n\n dset = self._create_dummy_dataset(multiple_columns=True)\n dset.remove_columns_(column_names=[\"col_1\", \"col_2\"])\n self.assertEqual(dset.num_columns, 0)\n self.assertNotEqual(dset._fingerprint, fingerprint)\n\n def test_rename_column_(self):\n dset = self._create_dummy_dataset(multiple_columns=True)\n fingerprint = dset._fingerprint\n dset.rename_column_(original_column_name=\"col_1\", new_column_name=\"new_name\")\n self.assertEqual(dset.num_columns, 2)\n self.assertListEqual(list(dset.column_names), [\"new_name\", \"col_2\"])\n self.assertNotEqual(dset._fingerprint, fingerprint)\n\n def test_concatenate_from_memory(self):\n data1, data2, data3 = {\"id\": [0, 1, 2]}, {\"id\": [3, 4, 5]}, {\"id\": [6, 7]}\n info1 = DatasetInfo(description=\"Dataset1\")\n info2 = DatasetInfo(description=\"Dataset2\")\n dset1, dset2, dset3 = (\n Dataset.from_dict(data1, info=info1),\n Dataset.from_dict(data2, info=info2),\n Dataset.from_dict(data3),\n )\n\n dset_concat = concatenate_datasets([dset1, dset2, dset3])\n self.assertEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2))\n self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))\n self.assertListEqual(dset_concat[\"id\"], [0, 1, 2, 3, 4, 5, 6, 7])\n self.assertEqual(len(dset_concat._data_files), 0)\n self.assertEqual(len(dset_concat._indices_data_files), 0)\n self.assertEqual(dset_concat.info.description, \"Dataset1\\n\\nDataset2\\n\\n\")\n\n def test_concatenate_from_disk(self):\n data1, data2, data3 = {\"id\": [0, 1, 2]}, {\"id\": [3, 4, 5]}, {\"id\": [6, 7]}\n info1 = DatasetInfo(description=\"Dataset1\")\n info2 = DatasetInfo(description=\"Dataset2\")\n with tempfile.TemporaryDirectory() as tmp_dir:\n dset1, dset2, dset3 = (\n Dataset.from_dict(data1, info=info1).map(cache_file_name=os.path.join(tmp_dir, \"d1.arrow\")),\n Dataset.from_dict(data2, info=info2).map(cache_file_name=os.path.join(tmp_dir, \"d2.arrow\")),\n Dataset.from_dict(data3),\n )\n with self.assertRaises(ValueError):\n dset_concat = concatenate_datasets([dset1, dset2, dset3])\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n dset1, dset2, dset3 = (\n Dataset.from_dict(data1, info=info1).map(cache_file_name=os.path.join(tmp_dir, \"d1.arrow\")),\n Dataset.from_dict(data2, info=info2).map(cache_file_name=os.path.join(tmp_dir, \"d2.arrow\")),\n Dataset.from_dict(data3).map(cache_file_name=os.path.join(tmp_dir, \"d3.arrow\")),\n )\n dset_concat = concatenate_datasets([dset1, dset2, dset3])\n self.assertEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2))\n self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))\n self.assertListEqual(dset_concat[\"id\"], [0, 1, 2, 3, 4, 5, 6, 7])\n self.assertEqual(len(dset_concat._data_files), 3)\n self.assertEqual(len(dset_concat._indices_data_files), 0)\n self.assertEqual(dset_concat.info.description, \"Dataset1\\n\\nDataset2\\n\\n\")\n\n def test_concatenate_with_indices(self):\n data1, data2, data3 = {\"id\": [0, 1, 2] * 2}, {\"id\": [3, 4, 5] * 2}, {\"id\": [6, 7]}\n info1 = DatasetInfo(description=\"Dataset1\")\n info2 = DatasetInfo(description=\"Dataset2\")\n dset1, dset2, dset3 = (\n Dataset.from_dict(data1, info=info1).select([0, 1, 2]),\n Dataset.from_dict(data2, info=info2).select([0, 1, 2]),\n Dataset.from_dict(data3),\n )\n\n dset_concat = concatenate_datasets([dset1, dset2, dset3])\n self.assertEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2))\n self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))\n self.assertListEqual(dset_concat[\"id\"], [0, 1, 2, 3, 4, 5, 6, 7])\n self.assertEqual(len(dset_concat._data_files), 0)\n self.assertEqual(len(dset_concat._indices_data_files), 0)\n self.assertEqual(dset_concat.info.description, \"Dataset1\\n\\nDataset2\\n\\n\")\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n dset1, dset2, dset3 = (\n Dataset.from_dict(data1, info=info1).select(\n [0, 1, 2], indices_cache_file_name=os.path.join(tmp_dir, \"i.arrow\")\n ),\n Dataset.from_dict(data2, info=info2).select([0, 1, 2]),\n Dataset.from_dict(data3),\n )\n with self.assertRaises(ValueError):\n dset_concat = concatenate_datasets([dset1, dset2, dset3])\n\n def test_concatenate_with_indices_from_disk(self):\n data1, data2, data3 = {\"id\": [0, 1, 2] * 2}, {\"id\": [3, 4, 5] * 2}, {\"id\": [6, 7]}\n info1 = DatasetInfo(description=\"Dataset1\")\n info2 = DatasetInfo(description=\"Dataset2\")\n with tempfile.TemporaryDirectory() as tmp_dir:\n dset1, dset2, dset3 = (\n Dataset.from_dict(data1, info=info1).select(\n [0, 1, 2], indices_cache_file_name=os.path.join(tmp_dir, \"i1.arrow\")\n ),\n Dataset.from_dict(data2, info=info2).select(\n [0, 1, 2], indices_cache_file_name=os.path.join(tmp_dir, \"i2.arrow\")\n ),\n Dataset.from_dict(data3).select([0, 1], indices_cache_file_name=os.path.join(tmp_dir, \"i3.arrow\")),\n )\n\n dset_concat = concatenate_datasets([dset1, dset2, dset3])\n self.assertEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2))\n self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))\n self.assertListEqual(dset_concat[\"id\"], [0, 1, 2, 3, 4, 5, 6, 7])\n self.assertEqual(len(dset_concat._data_files), 0)\n self.assertEqual(len(dset_concat._indices_data_files), 3)\n self.assertEqual(dset_concat.info.description, \"Dataset1\\n\\nDataset2\\n\\n\")\n\n def test_concatenate_pickle_with_history(self):\n data1, data2, data3 = {\"id\": [0, 1, 2] * 2}, {\"id\": [3, 4, 5] * 2}, {\"id\": [6, 7], \"foo\": [\"bar\", \"bar\"]}\n info1 = DatasetInfo(description=\"Dataset1\")\n info2 = DatasetInfo(description=\"Dataset2\")\n with tempfile.TemporaryDirectory() as tmp_dir:\n dset1, dset2, dset3 = (\n Dataset.from_dict(data1, info=info1)\n .select([0, 1, 2])\n .map(cache_file_name=os.path.join(tmp_dir, \"d1.arrow\")),\n Dataset.from_dict(data2, info=info2)\n .select([0, 1, 2])\n .map(cache_file_name=os.path.join(tmp_dir, \"d2.arrow\")),\n Dataset.from_dict(data3).map(cache_file_name=os.path.join(tmp_dir, \"d3.arrow\")),\n )\n dset3.remove_columns_(\"foo\")\n dset1._data, dset2._data, dset3._data = Unpicklable(), Unpicklable(), Unpicklable()\n dset1, dset2, dset3 = [pickle.loads(pickle.dumps(d)) for d in (dset1, dset2, dset3)]\n dset_concat = concatenate_datasets([dset1, dset2, dset3])\n dset_concat._data = Unpicklable()\n dset_concat = pickle.loads(pickle.dumps(dset_concat))\n self.assertEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2))\n self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))\n self.assertListEqual(dset_concat[\"id\"], [0, 1, 2, 3, 4, 5, 6, 7])\n self.assertEqual(len(dset_concat._data_files), 3)\n self.assertEqual(len(dset_concat._inplace_history), 3)\n self.assertEqual(len(dset_concat._indices_data_files), 0)\n self.assertEqual(dset_concat.info.description, \"Dataset1\\n\\nDataset2\\n\\n\")\n\n def test_flatten(self):\n dset = Dataset.from_dict(\n {\"a\": [{\"b\": {\"c\": [\"text\"]}}] * 10, \"foo\": [1] * 10},\n features=Features({\"a\": {\"b\": Sequence({\"c\": Value(\"string\")})}, \"foo\": Value(\"int64\")}),\n )\n fingerprint = dset._fingerprint\n dset.flatten_()\n self.assertListEqual(dset.column_names, [\"a.b.c\", \"foo\"])\n self.assertListEqual(list(dset.features.keys()), [\"a.b.c\", \"foo\"])\n self.assertDictEqual(dset.features, Features({\"a.b.c\": Sequence(Value(\"string\")), \"foo\": Value(\"int64\")}))\n self.assertNotEqual(dset._fingerprint, fingerprint)\n\n def test_map_not_cached(self):\n dset = self._create_dummy_dataset()\n\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n fingerprint = dset._fingerprint\n dset_test = dset.map(\n lambda x: {\"name\": x[\"filename\"][:-2], \"id\": int(x[\"filename\"][-1])}, cache_file_name=None\n )\n self.assertEqual(len(dset_test), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test.features,\n Features({\"filename\": Value(\"string\"), \"name\": Value(\"string\"), \"id\": Value(\"int64\")}),\n )\n self.assertNotEqual(dset_test._fingerprint, fingerprint)\n\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n dset_test = dset.map(lambda x: None, cache_file_name=None)\n self.assertEqual(len(dset_test), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test.features,\n Features({\"filename\": Value(\"string\")}),\n )\n\n def test_map_cached(self):\n dset = self._create_dummy_dataset()\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n fingerprint = dset._fingerprint\n dset_test = dset.map(\n lambda x: {\"name\": x[\"filename\"][:-2], \"id\": int(x[\"filename\"][-1])}, cache_file_name=tmp_file\n )\n self.assertEqual(len(dset_test), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test.features,\n Features({\"filename\": Value(\"string\"), \"name\": Value(\"string\"), \"id\": Value(\"int64\")}),\n )\n self.assertNotEqual(dset_test._fingerprint, fingerprint)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n dset_test_with_indices = dset.map(\n lambda x, i: {\"name\": x[\"filename\"][:-2], \"id\": i}, with_indices=True, cache_file_name=tmp_file\n )\n self.assertEqual(len(dset_test_with_indices), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test_with_indices.features,\n Features({\"filename\": Value(\"string\"), \"name\": Value(\"string\"), \"id\": Value(\"int64\")}),\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n\n def func(x, i):\n if i == 4:\n raise KeyboardInterrupt()\n return {\"name\": x[\"filename\"][:-2], \"id\": i}\n\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n self.assertRaises(\n KeyboardInterrupt,\n dset.map,\n function=func,\n with_indices=True,\n cache_file_name=tmp_file,\n writer_batch_size=2,\n )\n self.assertFalse(os.path.exists(tmp_file))\n dset_test_with_indices = dset.map(\n lambda x, i: {\"name\": x[\"filename\"][:-2], \"id\": i},\n with_indices=True,\n cache_file_name=tmp_file,\n writer_batch_size=2,\n )\n self.assertTrue(os.path.exists(tmp_file))\n self.assertEqual(len(dset_test_with_indices), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test_with_indices.features,\n Features({\"filename\": Value(\"string\"), \"name\": Value(\"string\"), \"id\": Value(\"int64\")}),\n )\n\n def test_new_features(self):\n dset = self._create_dummy_dataset()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n features = Features({\"filename\": Value(\"string\"), \"label\": ClassLabel(names=[\"positive\", \"negative\"])})\n dset_test_with_indices = dset.map(\n lambda x, i: {\"label\": i % 2}, with_indices=True, cache_file_name=tmp_file, features=features\n )\n self.assertEqual(len(dset_test_with_indices), 30)\n self.assertDictEqual(\n dset_test_with_indices.features,\n features,\n )\n\n def test_map_batched(self):\n dset = self._create_dummy_dataset()\n\n def map_batched(example):\n return {\"filename_new\": [x + \"_extension\" for x in example[\"filename\"]]}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n dset_test_batched = dset.map(map_batched, batched=True, cache_file_name=tmp_file)\n self.assertEqual(len(dset_test_batched), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test_batched.features, Features({\"filename\": Value(\"string\"), \"filename_new\": Value(\"string\")})\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n with dset.formatted_as(\"numpy\", columns=[\"filename\"]):\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n dset_test_batched = dset.map(map_batched, batched=True, cache_file_name=tmp_file)\n self.assertEqual(len(dset_test_batched), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test_batched.features,\n Features({\"filename\": Value(\"string\"), \"filename_new\": Value(\"string\")}),\n )\n\n def map_batched_with_indices(example, idx):\n return {\"filename_new\": [x + \"_extension_\" + str(idx) for x in example[\"filename\"]]}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n dset_test_with_indices_batched = dset.map(\n map_batched_with_indices, batched=True, with_indices=True, cache_file_name=tmp_file\n )\n self.assertEqual(len(dset_test_with_indices_batched), 30)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(\n dset_test_with_indices_batched.features,\n Features({\"filename\": Value(\"string\"), \"filename_new\": Value(\"string\")}),\n )\n\n @require_torch\n def test_map_torch(self):\n import torch\n\n dset = self._create_dummy_dataset()\n\n def func(example):\n return {\"tensor\": torch.Tensor([1.0, 2, 3])}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n dset_test = dset.map(func, cache_file_name=tmp_file)\n self.assertEqual(len(dset_test), 30)\n self.assertDictEqual(\n dset_test.features, Features({\"filename\": Value(\"string\"), \"tensor\": Sequence(Value(\"float64\"))})\n )\n self.assertListEqual(dset_test[0][\"tensor\"], [1, 2, 3])\n\n @require_tf\n def test_map_tf(self):\n import tensorflow as tf\n\n dset = self._create_dummy_dataset()\n\n def func(example):\n return {\"tensor\": tf.constant([1.0, 2, 3])}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n dset_test = dset.map(func, cache_file_name=tmp_file)\n self.assertEqual(len(dset_test), 30)\n self.assertDictEqual(\n dset_test.features, Features({\"filename\": Value(\"string\"), \"tensor\": Sequence(Value(\"float64\"))})\n )\n self.assertListEqual(dset_test[0][\"tensor\"], [1, 2, 3])\n\n def test_map_numpy(self):\n dset = self._create_dummy_dataset()\n\n def func(example):\n return {\"tensor\": np.array([1.0, 2, 3])}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n dset_test = dset.map(func, cache_file_name=tmp_file)\n self.assertEqual(len(dset_test), 30)\n self.assertDictEqual(\n dset_test.features, Features({\"filename\": Value(\"string\"), \"tensor\": Sequence(Value(\"float64\"))})\n )\n self.assertListEqual(dset_test[0][\"tensor\"], [1, 2, 3])\n\n def test_map_remove_colums(self):\n dset = self._create_dummy_dataset()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n dset = dset.map(\n lambda x, i: {\"name\": x[\"filename\"][:-2], \"id\": i}, with_indices=True, cache_file_name=tmp_file\n )\n self.assertTrue(\"id\" in dset[0])\n self.assertDictEqual(\n dset.features, Features({\"filename\": Value(\"string\"), \"name\": Value(\"string\"), \"id\": Value(\"int64\")})\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n dset = dset.map(lambda x: x, remove_columns=[\"id\"], cache_file_name=tmp_file)\n self.assertTrue(\"id\" not in dset[0])\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\"), \"name\": Value(\"string\")}))\n\n def test_filter(self):\n dset = self._create_dummy_dataset()\n # keep only first five examples\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n fingerprint = dset._fingerprint\n dset_filter_first_five = dset.filter(lambda x, i: i < 5, with_indices=True, cache_file_name=tmp_file)\n self.assertEqual(len(dset_filter_first_five), 5)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_filter_first_five.features, Features({\"filename\": Value(\"string\")}))\n self.assertNotEqual(dset_filter_first_five._fingerprint, fingerprint)\n\n # filter filenames with even id at the end\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n fingerprint = dset._fingerprint\n dset_filter_even_num = dset.filter(lambda x: (int(x[\"filename\"][-1]) % 2 == 0), cache_file_name=tmp_file)\n self.assertEqual(len(dset_filter_even_num), 15)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_filter_even_num.features, Features({\"filename\": Value(\"string\")}))\n self.assertNotEqual(dset_filter_even_num._fingerprint, fingerprint)\n\n def test_keep_features_after_transform_specified(self):\n features = Features(\n {\"tokens\": Sequence(Value(\"string\")), \"labels\": Sequence(ClassLabel(names=[\"negative\", \"positive\"]))}\n )\n dset = Dataset.from_dict({\"tokens\": [[\"foo\"] * 5] * 10, \"labels\": [[1] * 5] * 10}, features=features)\n\n def invert_labels(x):\n return {\"labels\": [(1 - label) for label in x[\"labels\"]]}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n inverted_dset = dset.map(invert_labels, cache_file_name=tmp_file, features=features)\n self.assertEqual(inverted_dset.features.type, features.type)\n self.assertDictEqual(inverted_dset.features, features)\n\n def test_keep_features_after_transform_unspecified(self):\n features = Features(\n {\"tokens\": Sequence(Value(\"string\")), \"labels\": Sequence(ClassLabel(names=[\"negative\", \"positive\"]))}\n )\n dset = Dataset.from_dict({\"tokens\": [[\"foo\"] * 5] * 10, \"labels\": [[1] * 5] * 10}, features=features)\n\n def invert_labels(x):\n return {\"labels\": [(1 - label) for label in x[\"labels\"]]}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n inverted_dset = dset.map(invert_labels, cache_file_name=tmp_file)\n self.assertEqual(inverted_dset.features.type, features.type)\n self.assertDictEqual(inverted_dset.features, features)\n\n def test_keep_features_after_transform_from_file(self):\n features = Features(\n {\"tokens\": Sequence(Value(\"string\")), \"labels\": Sequence(ClassLabel(names=[\"negative\", \"positive\"]))}\n )\n dset = Dataset.from_dict({\"tokens\": [[\"foo\"] * 5] * 10, \"labels\": [[1] * 5] * 10}, features=features)\n\n def invert_labels(x):\n return {\"labels\": [(1 - label) for label in x[\"labels\"]]}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n dset.map(invert_labels, cache_file_name=tmp_file)\n inverted_dset = Dataset.from_file(tmp_file)\n self.assertEqual(inverted_dset.features.type, features.type)\n self.assertDictEqual(inverted_dset.features, features)\n\n def test_keep_features_after_transform_in_memory(self):\n features = Features(\n {\"tokens\": Sequence(Value(\"string\")), \"labels\": Sequence(ClassLabel(names=[\"negative\", \"positive\"]))}\n )\n dset = Dataset.from_dict({\"tokens\": [[\"foo\"] * 5] * 10, \"labels\": [[1] * 5] * 10}, features=features)\n\n def invert_labels(x):\n return {\"labels\": [(1 - label) for label in x[\"labels\"]]}\n\n inverted_dset = dset.map(invert_labels, keep_in_memory=True)\n self.assertEqual(inverted_dset.features.type, features.type)\n self.assertDictEqual(inverted_dset.features, features)\n\n def test_keep_features_after_loading_from_cache(self):\n features = Features(\n {\"tokens\": Sequence(Value(\"string\")), \"labels\": Sequence(ClassLabel(names=[\"negative\", \"positive\"]))}\n )\n dset = Dataset.from_dict({\"tokens\": [[\"foo\"] * 5] * 10, \"labels\": [[1] * 5] * 10}, features=features)\n\n def invert_labels(x):\n return {\"labels\": [(1 - label) for label in x[\"labels\"]]}\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n inverted_dset = dset.map(invert_labels, cache_file_name=tmp_file)\n inverted_dset = dset.map(invert_labels, cache_file_name=tmp_file)\n self.assertGreater(len(inverted_dset.cache_files), 0)\n self.assertEqual(inverted_dset.features.type, features.type)\n self.assertDictEqual(inverted_dset.features, features)\n\n def test_keep_features_with_new_features(self):\n features = Features(\n {\"tokens\": Sequence(Value(\"string\")), \"labels\": Sequence(ClassLabel(names=[\"negative\", \"positive\"]))}\n )\n dset = Dataset.from_dict({\"tokens\": [[\"foo\"] * 5] * 10, \"labels\": [[1] * 5] * 10}, features=features)\n\n def invert_labels(x):\n return {\"labels\": [(1 - label) for label in x[\"labels\"]], \"labels2\": x[\"labels\"]}\n\n expected_features = Features(\n {\n \"tokens\": Sequence(Value(\"string\")),\n \"labels\": Sequence(ClassLabel(names=[\"negative\", \"positive\"])),\n \"labels2\": Sequence(Value(\"int64\")),\n }\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n inverted_dset = dset.map(invert_labels, cache_file_name=tmp_file)\n self.assertEqual(inverted_dset.features.type, expected_features.type)\n self.assertDictEqual(inverted_dset.features, expected_features)\n\n def test_select(self):\n dset = self._create_dummy_dataset()\n # select every two example\n indices = list(range(0, len(dset), 2))\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n fingerprint = dset._fingerprint\n dset_select_even = dset.select(indices, indices_cache_file_name=tmp_file)\n self.assertEqual(len(dset_select_even), 15)\n for row in dset_select_even:\n self.assertEqual(int(row[\"filename\"][-1]) % 2, 0)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_select_even.features, Features({\"filename\": Value(\"string\")}))\n self.assertNotEqual(dset_select_even._fingerprint, fingerprint)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n bad_indices = list(range(5))\n bad_indices[3] = \"foo\"\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n self.assertRaises(\n Exception,\n dset.select,\n indices=bad_indices,\n indices_cache_file_name=tmp_file,\n writer_batch_size=2,\n )\n self.assertFalse(os.path.exists(tmp_file))\n dset_select_five = dset.select(\n range(5),\n indices_cache_file_name=tmp_file,\n writer_batch_size=2,\n )\n self.assertTrue(os.path.exists(tmp_file))\n self.assertEqual(len(dset_select_five), 5)\n for i, row in enumerate(dset_select_five):\n self.assertEqual(int(row[\"filename\"][-1]), i)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_select_even.features, Features({\"filename\": Value(\"string\")}))\n\n def test_select_then_map(self):\n dset = self._create_dummy_dataset()\n\n d1 = dset.select([0])\n d2 = dset.select([1])\n d1 = d1.map(lambda x: {\"id\": int(x[\"filename\"][-1])})\n d2 = d2.map(lambda x: {\"id\": int(x[\"filename\"][-1])})\n self.assertEqual(d1[0][\"id\"], 0)\n self.assertEqual(d2[0][\"id\"], 1)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n d1 = dset.select([0], indices_cache_file_name=os.path.join(tmp_dir, \"i1.arrow\"))\n d2 = dset.select([1], indices_cache_file_name=os.path.join(tmp_dir, \"i2.arrow\"))\n d1 = d1.map(lambda x: {\"id\": int(x[\"filename\"][-1])})\n d2 = d2.map(lambda x: {\"id\": int(x[\"filename\"][-1])})\n self.assertEqual(d1[0][\"id\"], 0)\n self.assertEqual(d2[0][\"id\"], 1)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n dataset = dset.map(cache_file_name=os.path.join(tmp_dir, \"test.arrow\"))\n d1 = dataset.select([0])\n d2 = dataset.select([1])\n d1 = d1.map(lambda x: {\"id\": int(x[\"filename\"][-1])})\n d2 = d2.map(lambda x: {\"id\": int(x[\"filename\"][-1])})\n self.assertEqual(d1[0][\"id\"], 0)\n self.assertEqual(d2[0][\"id\"], 1)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n dataset = dset.map(cache_file_name=os.path.join(tmp_dir, \"test.arrow\"))\n d1 = dataset.select([0], indices_cache_file_name=os.path.join(tmp_dir, \"i1.arrow\"))\n d2 = dataset.select([1], indices_cache_file_name=os.path.join(tmp_dir, \"i2.arrow\"))\n d1 = d1.map(lambda x: {\"id\": int(x[\"filename\"][-1])})\n d2 = d2.map(lambda x: {\"id\": int(x[\"filename\"][-1])})\n self.assertEqual(d1[0][\"id\"], 0)\n self.assertEqual(d2[0][\"id\"], 1)\n\n def test_pickle_after_many_transforms_on_disk(self):\n dset = self._create_dummy_dataset()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n dset = dset.map(cache_file_name=os.path.join(tmp_dir, \"d.arrow\"))\n self.assertEqual(len(dset._data_files), 1)\n dset.rename_column_(\"filename\", \"file\")\n self.assertListEqual(dset.column_names, [\"file\"])\n dset = dset.select(range(5))\n self.assertEqual(len(dset), 5)\n dset = dset.map(lambda x: {\"id\": int(x[\"file\"][-1])})\n self.assertListEqual(sorted(dset.column_names), [\"file\", \"id\"])\n dset.rename_column_(\"id\", \"number\")\n self.assertListEqual(sorted(dset.column_names), [\"file\", \"number\"])\n dset = dset.select([1])\n self.assertEqual(dset[0][\"file\"], \"my_name-train_1\")\n self.assertEqual(dset[0][\"number\"], 1)\n\n self.assertEqual(dset._indices[\"indices\"].to_pylist(), [1])\n self.assertEqual(dset._inplace_history, [{\"transforms\": [(\"rename_column_\", (\"id\", \"number\"), {})]}])\n dset._data = Unpicklable() # check that we don't pickle the entire table\n\n pickled = pickle.dumps(dset)\n loaded = pickle.loads(pickled)\n self.assertEqual(loaded[0][\"file\"], \"my_name-train_1\")\n self.assertEqual(loaded[0][\"number\"], 1)\n\n def test_pickle_after_many_transforms_in_memory(self):\n dset = self._create_dummy_dataset()\n\n self.assertEqual(len(dset._data_files), 0)\n dset.rename_column_(\"filename\", \"file\")\n self.assertListEqual(dset.column_names, [\"file\"])\n dset = dset.select(range(5))\n self.assertEqual(len(dset), 5)\n dset = dset.map(lambda x: {\"id\": int(x[\"file\"][-1])})\n self.assertListEqual(sorted(dset.column_names), [\"file\", \"id\"])\n dset.rename_column_(\"id\", \"number\")\n self.assertListEqual(sorted(dset.column_names), [\"file\", \"number\"])\n dset = dset.select([1])\n self.assertEqual(dset[0][\"file\"], \"my_name-train_1\")\n self.assertEqual(dset[0][\"number\"], 1)\n\n self.assertEqual(dset._indices[\"indices\"].to_pylist(), [1])\n self.assertEqual(dset._inplace_history, [])\n\n pickled = pickle.dumps(dset)\n loaded = pickle.loads(pickled)\n self.assertEqual(loaded[0][\"file\"], \"my_name-train_1\")\n self.assertEqual(loaded[0][\"number\"], 1)\n\n def test_shuffle(self):\n dset = self._create_dummy_dataset()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n fingerprint = dset._fingerprint\n dset_shuffled = dset.shuffle(seed=1234, indices_cache_file_name=tmp_file)\n self.assertEqual(len(dset_shuffled), 30)\n self.assertEqual(dset_shuffled[0][\"filename\"], \"my_name-train_28\")\n self.assertEqual(dset_shuffled[2][\"filename\"], \"my_name-train_10\")\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_shuffled.features, Features({\"filename\": Value(\"string\")}))\n self.assertNotEqual(dset_shuffled._fingerprint, fingerprint)\n\n # Reproducibility\n tmp_file = os.path.join(tmp_dir, \"test_2.arrow\")\n dset_shuffled_2 = dset.shuffle(seed=1234, indices_cache_file_name=tmp_file)\n self.assertListEqual(dset_shuffled[\"filename\"], dset_shuffled_2[\"filename\"])\n\n def test_sort(self):\n dset = self._create_dummy_dataset()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n # Keep only 10 examples\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n dset = dset.select(range(10), indices_cache_file_name=tmp_file)\n tmp_file = os.path.join(tmp_dir, \"test_2.arrow\")\n dset = dset.shuffle(seed=1234, indices_cache_file_name=tmp_file)\n self.assertEqual(len(dset), 10)\n self.assertEqual(dset[0][\"filename\"], \"my_name-train_8\")\n self.assertEqual(dset[1][\"filename\"], \"my_name-train_9\")\n # Sort\n tmp_file = os.path.join(tmp_dir, \"test_3.arrow\")\n fingerprint = dset._fingerprint\n dset_sorted = dset.sort(\"filename\", indices_cache_file_name=tmp_file)\n for i, row in enumerate(dset_sorted):\n self.assertEqual(int(row[\"filename\"][-1]), i)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_sorted.features, Features({\"filename\": Value(\"string\")}))\n self.assertNotEqual(dset_sorted._fingerprint, fingerprint)\n # Sort reversed\n tmp_file = os.path.join(tmp_dir, \"test_4.arrow\")\n fingerprint = dset._fingerprint\n dset_sorted = dset.sort(\"filename\", indices_cache_file_name=tmp_file, reverse=True)\n for i, row in enumerate(dset_sorted):\n self.assertEqual(int(row[\"filename\"][-1]), len(dset_sorted) - 1 - i)\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_sorted.features, Features({\"filename\": Value(\"string\")}))\n self.assertNotEqual(dset_sorted._fingerprint, fingerprint)\n\n def test_export(self):\n dset = self._create_dummy_dataset()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n # Export the data\n tfrecord_path = os.path.join(tmp_dir, \"test.tfrecord\")\n dset = dset.map(\n lambda ex, i: {\n \"id\": i,\n \"question\": f\"Question {i}\",\n \"answers\": {\"text\": [f\"Answer {i}-0\", f\"Answer {i}-1\"], \"answer_start\": [0, 1]},\n },\n with_indices=True,\n remove_columns=[\"filename\"],\n cache_file_name=tmp_file,\n )\n dset.flatten_()\n dset.set_format(\"numpy\")\n dset.export(filename=tfrecord_path, format=\"tfrecord\")\n\n # Import the data\n import tensorflow as tf\n\n tf_dset = tf.data.TFRecordDataset([tfrecord_path])\n feature_description = {\n \"id\": tf.io.FixedLenFeature([], tf.int64),\n \"question\": tf.io.FixedLenFeature([], tf.string),\n \"answers.text\": tf.io.VarLenFeature(tf.string),\n \"answers.answer_start\": tf.io.VarLenFeature(tf.int64),\n }\n tf_parsed_dset = tf_dset.map(\n lambda example_proto: tf.io.parse_single_example(example_proto, feature_description)\n )\n # Test that keys match original dataset\n for i, ex in enumerate(tf_parsed_dset):\n self.assertEqual(ex.keys(), dset[i].keys())\n # Test for equal number of elements\n self.assertEqual(i, len(dset) - 1)\n\n def test_train_test_split(self):\n dset = self._create_dummy_dataset()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n tmp_file_2 = os.path.join(tmp_dir, \"test_2.arrow\")\n fingerprint = dset._fingerprint\n dset_dict = dset.train_test_split(\n test_size=10,\n shuffle=False,\n train_indices_cache_file_name=tmp_file,\n test_indices_cache_file_name=tmp_file_2,\n )\n self.assertListEqual(list(dset_dict.keys()), [\"train\", \"test\"])\n dset_train = dset_dict[\"train\"]\n dset_test = dset_dict[\"test\"]\n\n self.assertEqual(len(dset_train), 20)\n self.assertEqual(len(dset_test), 10)\n self.assertEqual(dset_train[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset_train[-1][\"filename\"], \"my_name-train_19\")\n self.assertEqual(dset_test[0][\"filename\"], \"my_name-train_20\")\n self.assertEqual(dset_test[-1][\"filename\"], \"my_name-train_29\")\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_train.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_test.features, Features({\"filename\": Value(\"string\")}))\n self.assertNotEqual(dset_train._fingerprint, fingerprint)\n self.assertNotEqual(dset_test._fingerprint, fingerprint)\n self.assertNotEqual(dset_train._fingerprint, dset_test._fingerprint)\n\n tmp_file = os.path.join(tmp_dir, \"test_3.arrow\")\n tmp_file_2 = os.path.join(tmp_dir, \"test_4.arrow\")\n dset_dict = dset.train_test_split(\n test_size=0.5,\n shuffle=False,\n train_indices_cache_file_name=tmp_file,\n test_indices_cache_file_name=tmp_file_2,\n )\n self.assertListEqual(list(dset_dict.keys()), [\"train\", \"test\"])\n dset_train = dset_dict[\"train\"]\n dset_test = dset_dict[\"test\"]\n\n self.assertEqual(len(dset_train), 15)\n self.assertEqual(len(dset_test), 15)\n self.assertEqual(dset_train[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset_train[-1][\"filename\"], \"my_name-train_14\")\n self.assertEqual(dset_test[0][\"filename\"], \"my_name-train_15\")\n self.assertEqual(dset_test[-1][\"filename\"], \"my_name-train_29\")\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_train.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_test.features, Features({\"filename\": Value(\"string\")}))\n\n tmp_file = os.path.join(tmp_dir, \"test_5.arrow\")\n tmp_file_2 = os.path.join(tmp_dir, \"test_6.arrow\")\n dset_dict = dset.train_test_split(\n train_size=10,\n shuffle=False,\n train_indices_cache_file_name=tmp_file,\n test_indices_cache_file_name=tmp_file_2,\n )\n self.assertListEqual(list(dset_dict.keys()), [\"train\", \"test\"])\n dset_train = dset_dict[\"train\"]\n dset_test = dset_dict[\"test\"]\n\n self.assertEqual(len(dset_train), 10)\n self.assertEqual(len(dset_test), 20)\n self.assertEqual(dset_train[0][\"filename\"], \"my_name-train_0\")\n self.assertEqual(dset_train[-1][\"filename\"], \"my_name-train_9\")\n self.assertEqual(dset_test[0][\"filename\"], \"my_name-train_10\")\n self.assertEqual(dset_test[-1][\"filename\"], \"my_name-train_29\")\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_train.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_test.features, Features({\"filename\": Value(\"string\")}))\n\n tmp_file = os.path.join(tmp_dir, \"test_7.arrow\")\n tmp_file_2 = os.path.join(tmp_dir, \"test_8.arrow\")\n dset_dict = dset.train_test_split(\n train_size=10, train_indices_cache_file_name=tmp_file, test_indices_cache_file_name=tmp_file_2, seed=42\n )\n self.assertListEqual(list(dset_dict.keys()), [\"train\", \"test\"])\n dset_train = dset_dict[\"train\"]\n dset_test = dset_dict[\"test\"]\n\n self.assertEqual(len(dset_train), 10)\n self.assertEqual(len(dset_test), 20)\n self.assertNotEqual(dset_train[0][\"filename\"], \"my_name-train_0\")\n self.assertNotEqual(dset_train[-1][\"filename\"], \"my_name-train_9\")\n self.assertNotEqual(dset_test[0][\"filename\"], \"my_name-train_10\")\n self.assertNotEqual(dset_test[-1][\"filename\"], \"my_name-train_29\")\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_train.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_test.features, Features({\"filename\": Value(\"string\")}))\n\n def test_shard(self):\n dset = self._create_dummy_dataset()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n dset = dset.select(range(10), indices_cache_file_name=tmp_file)\n self.assertEqual(len(dset), 10)\n # Shard\n tmp_file_1 = os.path.join(tmp_dir, \"test_1.arrow\")\n fingerprint = dset._fingerprint\n dset_sharded = dset.shard(num_shards=8, index=1, indices_cache_file_name=tmp_file_1)\n self.assertEqual(2, len(dset_sharded))\n self.assertEqual([\"my_name-train_1\", \"my_name-train_9\"], dset_sharded[\"filename\"])\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_sharded.features, Features({\"filename\": Value(\"string\")}))\n self.assertNotEqual(dset_sharded._fingerprint, fingerprint)\n # Shard contiguous\n tmp_file_2 = os.path.join(tmp_dir, \"test_2.arrow\")\n dset_sharded_contiguous = dset.shard(\n num_shards=3, index=0, contiguous=True, indices_cache_file_name=tmp_file_2\n )\n self.assertEqual([f\"my_name-train_{i}\" for i in (0, 1, 2, 3)], dset_sharded_contiguous[\"filename\"])\n self.assertDictEqual(dset.features, Features({\"filename\": Value(\"string\")}))\n self.assertDictEqual(dset_sharded_contiguous.features, Features({\"filename\": Value(\"string\")}))\n # Test lengths of sharded contiguous\n self.assertEqual(\n [4, 3, 3],\n [len(dset.shard(3, index=i, contiguous=True, indices_cache_file_name=tmp_file_2)) for i in range(3)],\n )\n\n def test_flatten_indices(self):\n dset = self._create_dummy_dataset()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n self.assertEqual(dset._indices, None)\n\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n dset = dset.select(range(10), indices_cache_file_name=tmp_file)\n self.assertEqual(len(dset), 10)\n\n self.assertNotEqual(dset._indices, None)\n\n # Test unique fail\n with self.assertRaises(ValueError):\n dset.unique(dset.column_names[0])\n\n tmp_file_2 = os.path.join(tmp_dir, \"test_2.arrow\")\n fingerprint = dset._fingerprint\n dset = dset.flatten_indices(cache_file_name=tmp_file_2)\n\n self.assertEqual(len(dset), 10)\n self.assertEqual(dset._indices, None)\n self.assertNotEqual(dset._fingerprint, fingerprint)\n # Test unique works\n dset.unique(dset.column_names[0])\n\n def test_format_vectors(self):\n dset = self._create_dummy_dataset()\n import numpy as np\n import tensorflow as tf\n import torch\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n dset = dset.map(lambda ex, i: {\"vec\": np.ones(3) * i}, with_indices=True, cache_file_name=tmp_file)\n columns = dset.column_names\n\n self.assertIsNotNone(dset[0])\n self.assertIsNotNone(dset[:2])\n for col in columns:\n self.assertIsInstance(dset[0][col], (str, list))\n self.assertIsInstance(dset[:2][col], list)\n self.assertDictEqual(\n dset.features, Features({\"filename\": Value(\"string\"), \"vec\": Sequence(Value(\"float64\"))})\n )\n\n dset.set_format(\"tensorflow\")\n self.assertIsNotNone(dset[0])\n self.assertIsNotNone(dset[:2])\n for col in columns:\n self.assertIsInstance(dset[0][col], (tf.Tensor, tf.RaggedTensor))\n self.assertIsInstance(dset[:2][col], (tf.Tensor, tf.RaggedTensor))\n self.assertIsInstance(dset[col], (tf.Tensor, tf.RaggedTensor))\n self.assertEqual(tuple(dset[:2][\"vec\"].shape), (2, None))\n self.assertEqual(tuple(dset[\"vec\"][:2].shape), (2, None))\n\n dset.set_format(\"numpy\")\n self.assertIsNotNone(dset[0])\n self.assertIsNotNone(dset[:2])\n for col in columns:\n self.assertIsInstance(dset[0][col], np.ndarray)\n self.assertIsInstance(dset[:2][col], np.ndarray)\n self.assertIsInstance(dset[col], np.ndarray)\n self.assertEqual(dset[:2][\"vec\"].shape, (2, 3))\n self.assertEqual(dset[\"vec\"][:2].shape, (2, 3))\n\n dset.set_format(\"torch\", columns=[\"vec\"])\n self.assertIsNotNone(dset[0])\n self.assertIsNotNone(dset[:2])\n # torch.Tensor is only for numerical columns\n self.assertIsInstance(dset[0][\"vec\"], torch.Tensor)\n self.assertIsInstance(dset[:2][\"vec\"], torch.Tensor)\n self.assertIsInstance(dset[\"vec\"][:2], torch.Tensor)\n self.assertEqual(dset[:2][\"vec\"].shape, (2, 3))\n self.assertEqual(dset[\"vec\"][:2].shape, (2, 3))\n\n def test_format_ragged_vectors(self):\n dset = self._create_dummy_dataset()\n import numpy as np\n import tensorflow as tf\n import torch\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n dset = dset.map(lambda ex, i: {\"vec\": np.ones(3 + i) * i}, with_indices=True, cache_file_name=tmp_file)\n columns = dset.column_names\n\n self.assertIsNotNone(dset[0])\n self.assertIsNotNone(dset[:2])\n for col in columns:\n self.assertIsInstance(dset[0][col], (str, list))\n self.assertIsInstance(dset[:2][col], list)\n self.assertDictEqual(\n dset.features, Features({\"filename\": Value(\"string\"), \"vec\": Sequence(Value(\"float64\"))})\n )\n\n dset.set_format(\"tensorflow\")\n self.assertIsNotNone(dset[0])\n self.assertIsNotNone(dset[:2])\n for col in columns:\n self.assertIsInstance(dset[0][col], (tf.Tensor, tf.RaggedTensor))\n self.assertIsInstance(dset[:2][col], (tf.Tensor, tf.RaggedTensor))\n self.assertIsInstance(dset[col], (tf.Tensor, tf.RaggedTensor))\n # dim is None for ragged vectors in tensorflow\n self.assertListEqual(dset[:2][\"vec\"].shape.as_list(), [2, None])\n self.assertListEqual(dset[\"vec\"][:2].shape.as_list(), [2, None])\n\n dset.set_format(\"numpy\")\n self.assertIsNotNone(dset[0])\n self.assertIsNotNone(dset[:2])\n for col in columns:\n self.assertIsInstance(dset[0][col], np.ndarray)\n self.assertIsInstance(dset[:2][col], np.ndarray)\n self.assertIsInstance(dset[col], np.ndarray)\n # array is flat for raged vectors in numpy\n self.assertEqual(dset[:2][\"vec\"].shape, (2,))\n self.assertEqual(dset[\"vec\"][:2].shape, (2,))\n\n dset.set_format(\"torch\", columns=[\"vec\"])\n self.assertIsNotNone(dset[0])\n self.assertIsNotNone(dset[:2])\n # torch.Tensor is only for numerical columns\n self.assertIsInstance(dset[0][\"vec\"], torch.Tensor)\n self.assertIsInstance(dset[:2][\"vec\"][0], torch.Tensor)\n self.assertIsInstance(dset[\"vec\"][0], torch.Tensor)\n # pytorch doesn't support ragged tensors, so we should have lists\n self.assertIsInstance(dset[:2][\"vec\"], list)\n self.assertIsInstance(dset[\"vec\"][:2], list)\n\n def test_format_nested(self):\n dset = self._create_dummy_dataset()\n import numpy as np\n import tensorflow as tf\n import torch\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, \"test.arrow\")\n dset = dset.map(\n lambda ex: {\"nested\": [{\"foo\": np.ones(3)}] * len(ex[\"filename\"])},\n cache_file_name=tmp_file,\n batched=True,\n )\n self.assertDictEqual(\n dset.features, Features({\"filename\": Value(\"string\"), \"nested\": {\"foo\": Sequence(Value(\"float64\"))}})\n )\n\n dset.set_format(\"tensorflow\")\n self.assertIsNotNone(dset[0])\n self.assertIsInstance(dset[0][\"nested\"][\"foo\"], (tf.Tensor, tf.RaggedTensor))\n self.assertIsNotNone(dset[:2])\n self.assertIsInstance(dset[:2][\"nested\"][0][\"foo\"], (tf.Tensor, tf.RaggedTensor))\n self.assertIsInstance(dset[\"nested\"][0][\"foo\"], (tf.Tensor, tf.RaggedTensor))\n\n dset.set_format(\"numpy\")\n self.assertIsNotNone(dset[0])\n self.assertIsInstance(dset[0][\"nested\"][\"foo\"], np.ndarray)\n self.assertIsNotNone(dset[:2])\n self.assertIsInstance(dset[:2][\"nested\"][0][\"foo\"], np.ndarray)\n self.assertIsInstance(dset[\"nested\"][0][\"foo\"], np.ndarray)\n\n dset.set_format(\"torch\", columns=\"nested\")\n self.assertIsNotNone(dset[0])\n self.assertIsInstance(dset[0][\"nested\"][\"foo\"], torch.Tensor)\n self.assertIsNotNone(dset[:2])\n self.assertIsInstance(dset[:2][\"nested\"][0][\"foo\"], torch.Tensor)\n self.assertIsInstance(dset[\"nested\"][0][\"foo\"], torch.Tensor)\n\n def test_format_pandas(self):\n dset = self._create_dummy_dataset(multiple_columns=True)\n import pandas as pd\n\n dset.set_format(\"pandas\")\n self.assertIsInstance(dset[0], pd.DataFrame)\n self.assertIsInstance(dset[:2], pd.DataFrame)\n self.assertIsInstance(dset[\"col_1\"], pd.Series)\n"
] |
[
[
"numpy.array",
"tensorflow.data.TFRecordDataset",
"pandas.DataFrame.from_dict",
"tensorflow.io.FixedLenFeature",
"numpy.ones",
"tensorflow.constant",
"tensorflow.io.parse_single_example",
"numpy.arange",
"tensorflow.io.VarLenFeature",
"torch.Tensor"
]
] |
rohankumardubey/manim
|
[
"b3b7d214adff2088f6c75cbc814cbbf8f1a9b4e4"
] |
[
"manimlib/mobject/types/dot_cloud.py"
] |
[
"import numpy as np\nimport moderngl\n\nfrom manimlib.constants import GREY_C\nfrom manimlib.constants import YELLOW\nfrom manimlib.constants import ORIGIN\nfrom manimlib.mobject.types.point_cloud_mobject import PMobject\nfrom manimlib.utils.iterables import resize_preserving_order\n\n\nDEFAULT_DOT_RADIUS = 0.05\nDEFAULT_GLOW_DOT_RADIUS = 0.2\nDEFAULT_GRID_HEIGHT = 6\nDEFAULT_BUFF_RATIO = 0.5\n\n\nclass DotCloud(PMobject):\n CONFIG = {\n \"color\": GREY_C,\n \"opacity\": 1,\n \"radius\": DEFAULT_DOT_RADIUS,\n \"glow_factor\": 0,\n \"shader_folder\": \"true_dot\",\n \"render_primitive\": moderngl.POINTS,\n \"shader_dtype\": [\n ('point', np.float32, (3,)),\n ('radius', np.float32, (1,)),\n ('color', np.float32, (4,)),\n ],\n }\n\n def __init__(self, points=None, **kwargs):\n super().__init__(**kwargs)\n if points is not None:\n self.set_points(points)\n\n def init_data(self):\n super().init_data()\n self.data[\"radii\"] = np.zeros((1, 1))\n self.set_radius(self.radius)\n\n def init_uniforms(self):\n super().init_uniforms()\n self.uniforms[\"glow_factor\"] = self.glow_factor\n\n def to_grid(self, n_rows, n_cols, n_layers=1,\n buff_ratio=None,\n h_buff_ratio=1.0,\n v_buff_ratio=1.0,\n d_buff_ratio=1.0,\n height=DEFAULT_GRID_HEIGHT,\n ):\n n_points = n_rows * n_cols * n_layers\n points = np.repeat(range(n_points), 3, axis=0).reshape((n_points, 3))\n points[:, 0] = points[:, 0] % n_cols\n points[:, 1] = (points[:, 1] // n_cols) % n_rows\n points[:, 2] = points[:, 2] // (n_rows * n_cols)\n self.set_points(points.astype(float))\n\n if buff_ratio is not None:\n v_buff_ratio = buff_ratio\n h_buff_ratio = buff_ratio\n d_buff_ratio = buff_ratio\n\n radius = self.get_radius()\n ns = [n_cols, n_rows, n_layers]\n brs = [h_buff_ratio, v_buff_ratio, d_buff_ratio]\n self.set_radius(0)\n for n, br, dim in zip(ns, brs, range(3)):\n self.rescale_to_fit(2 * radius * (1 + br) * (n - 1), dim, stretch=True)\n self.set_radius(radius)\n if height is not None:\n self.set_height(height)\n self.center()\n return self\n\n def set_radii(self, radii):\n n_points = len(self.get_points())\n radii = np.array(radii).reshape((len(radii), 1))\n self.data[\"radii\"] = resize_preserving_order(radii, n_points)\n self.refresh_bounding_box()\n return self\n\n def get_radii(self):\n return self.data[\"radii\"]\n\n def set_radius(self, radius):\n self.data[\"radii\"][:] = radius\n self.refresh_bounding_box()\n return self\n\n def get_radius(self):\n return self.get_radii().max()\n\n def set_glow_factor(self, glow_factor):\n self.uniforms[\"glow_factor\"] = glow_factor\n\n def get_glow_factor(self):\n return self.uniforms[\"glow_factor\"]\n\n def compute_bounding_box(self):\n bb = super().compute_bounding_box()\n radius = self.get_radius()\n bb[0] += np.full((3,), -radius)\n bb[2] += np.full((3,), radius)\n return bb\n\n def scale(self, scale_factor, scale_radii=True, **kwargs):\n super().scale(scale_factor, **kwargs)\n if scale_radii:\n self.set_radii(scale_factor * self.get_radii())\n return self\n\n def make_3d(self, reflectiveness=0.5, shadow=0.2):\n self.set_reflectiveness(reflectiveness)\n self.set_shadow(shadow)\n self.apply_depth_test()\n return self\n\n def get_shader_data(self):\n shader_data = super().get_shader_data()\n self.read_data_to_shader(shader_data, \"radius\", \"radii\")\n self.read_data_to_shader(shader_data, \"color\", \"rgbas\")\n return shader_data\n\n\nclass TrueDot(DotCloud):\n def __init__(self, center=ORIGIN, **kwargs):\n super().__init__(points=[center], **kwargs)\n\n\nclass GlowDot(TrueDot):\n CONFIG = {\n \"glow_factor\": 2,\n \"radius\": DEFAULT_GLOW_DOT_RADIUS,\n \"color\": YELLOW,\n }\n"
] |
[
[
"numpy.full",
"numpy.array",
"numpy.zeros"
]
] |
AxelGard/university-projects
|
[
"0c9a6e785f1918c6ed0fd365b2d419c9f52edb50"
] |
[
"tsks24-sig/lab3/preamble.py"
] |
[
"import numpy as np\nfrom scipy import signal, misc\nimport imageio\nfrom matplotlib import pyplot as plt\nplt.rcParams['image.interpolation'] = 'nearest'\n\nclass Error:\n def __init__(self, original, altered):\n self.mse = np.mean((original-altered)**2)\n self.psnr = 10*np.log10(255**2/self.mse)\n"
] |
[
[
"numpy.log10",
"numpy.mean"
]
] |
jungin500/pytorch-lightning
|
[
"b5f2b3e12c099282d475c8ec4d058064c433d7d5"
] |
[
"pytorch_lightning/strategies/horovod.py"
] |
[
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom contextlib import ExitStack\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Optimizer\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO\nfrom pytorch_lightning.plugins.precision import PrecisionPlugin\nfrom pytorch_lightning.strategies.parallel import ParallelStrategy\nfrom pytorch_lightning.utilities.distributed import distributed_available\nfrom pytorch_lightning.utilities.distributed import group as dist_group\nfrom pytorch_lightning.utilities.distributed import ReduceOp\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.imports import _HOROVOD_AVAILABLE\nfrom pytorch_lightning.utilities.rank_zero import rank_zero_only\n\nif _HOROVOD_AVAILABLE:\n import horovod.torch as hvd\n\n\nclass HorovodStrategy(ParallelStrategy):\n \"\"\"Plugin for Horovod distributed training integration.\"\"\"\n\n strategy_name = \"horovod\"\n\n def __init__(\n self,\n accelerator: Optional[\"pl.accelerators.accelerator.Accelerator\"] = None,\n parallel_devices: Optional[List[Any]] = None,\n checkpoint_io: Optional[CheckpointIO] = None,\n precision_plugin: Optional[PrecisionPlugin] = None,\n ):\n super().__init__(\n accelerator=accelerator,\n parallel_devices=parallel_devices,\n cluster_environment=None,\n checkpoint_io=checkpoint_io,\n precision_plugin=precision_plugin,\n )\n rank_zero_only.rank = self.global_rank\n self._exit_stack: Optional[ExitStack] = None\n\n @property\n def global_rank(self) -> int:\n return hvd.rank()\n\n @property\n def local_rank(self) -> int:\n return hvd.local_rank()\n\n @property\n def world_size(self) -> int:\n return hvd.size()\n\n @property\n def root_device(self):\n root_device = self.parallel_devices[self.local_rank]\n if isinstance(root_device, int):\n return torch.device(f'cuda:{root_device}')\n else:\n return root_device\n\n @property\n def distributed_sampler_kwargs(self):\n distributed_sampler_kwargs = dict(num_replicas=self.world_size, rank=self.global_rank)\n return distributed_sampler_kwargs\n\n @property\n def handles_gradient_accumulation(self) -> bool:\n \"\"\"Whether the plugin handles gradient accumulation internally.\"\"\"\n return True\n\n def setup(self, trainer: \"pl.Trainer\") -> None:\n self.model_to_device()\n\n super().setup(trainer)\n\n self._exit_stack = ExitStack()\n self._exit_stack.__enter__()\n\n if not trainer.training:\n # no need to setup optimizers\n return\n\n def _unpack_lightning_optimizer(opt):\n return opt._optimizer if isinstance(opt, LightningOptimizer) else opt\n\n optimizers = self.optimizers\n optimizers = [_unpack_lightning_optimizer(opt) for opt in optimizers]\n\n # Horovod: scale the learning rate by the number of workers to account for\n # increased total batch size\n for optimizer in optimizers:\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] *= self.world_size\n\n # Horovod: adjust base LR used by schedulers to match scaled optimizer initial LR\n lr_scheduler_configs = self.lr_scheduler_configs\n for config in lr_scheduler_configs:\n scheduler = config.scheduler\n scheduler.base_lrs = [lr * self.world_size for lr in scheduler.base_lrs]\n\n # Horovod: broadcast parameters & optimizer state to ensure consistent initialization\n hvd.broadcast_parameters(self.lightning_module.state_dict(), root_rank=0)\n for optimizer in optimizers:\n hvd.broadcast_optimizer_state(optimizer, root_rank=0)\n\n accumulation_scheduler = trainer.accumulation_scheduler\n if accumulation_scheduler.epochs != [0]:\n raise MisconfigurationException(\n \"Horovod currently does not support different `accumulate_grad_batches` at different epochs.\"\n )\n\n self.optimizers = self._wrap_optimizers(optimizers, trainer.accumulate_grad_batches)\n for optimizer in self.optimizers:\n # Synchronization will be performed explicitly following backward()\n self._exit_stack.enter_context(optimizer.skip_synchronize())\n\n def barrier(self, *args, **kwargs):\n if distributed_available():\n self.join()\n\n def broadcast(self, obj: object, src: int = 0) -> object:\n obj = hvd.broadcast_object(obj, src)\n return obj\n\n def model_to_device(self):\n if self.root_device.type == \"cuda\":\n # this can potentially be removed after #8312. Not done due to lack of horovod testing\n torch.cuda.set_device(self.root_device)\n self.model.to(self.root_device)\n\n def join(self):\n if self.root_device.type == \"cuda\":\n hvd.join(self.local_rank)\n else:\n hvd.join()\n\n def reduce(self, tensor, group: Optional[Any] = None, reduce_op: Optional[Union[ReduceOp, str]] = \"mean\"):\n \"\"\"Reduces a tensor from several distributed processes to one aggregated tensor.\n\n Args:\n tensor: the tensor to sync and reduce\n group: the process group to gather results from. Defaults to all processes (world)\n reduce_op: the reduction operation. Defaults to 'mean'/'avg'.\n Can also be a string 'sum' to calculate the sum during reduction.\n\n Return:\n reduced value, except when the input was not a tensor the output remains is unchanged\n \"\"\"\n if group is not None:\n raise ValueError(\"Horovod does not support allreduce using a subcommunicator at this time. Unset `group`.\")\n\n if reduce_op in (None, \"avg\", \"mean\"):\n reduce_op = hvd.Average\n elif reduce_op in (\"sum\", ReduceOp.SUM):\n reduce_op = hvd.Sum\n else:\n raise ValueError(f\"unrecognized `reduce_op`: {reduce_op}\")\n\n # sync all processes before reduction\n self.join()\n return hvd.allreduce(tensor, op=reduce_op)\n\n def all_gather(\n self, result: torch.Tensor, group: Optional[Any] = dist_group.WORLD, sync_grads: bool = False\n ) -> torch.Tensor:\n if group is not None and group != dist_group.WORLD:\n raise ValueError(\"Horovod does not support allgather using a subcommunicator at this time. Unset `group`.\")\n\n if len(result.shape) == 0:\n # Convert scalars to single dimension tensors\n result = result.reshape(1)\n\n # sync and gather all\n self.join()\n return hvd.allgather(result)\n\n def post_backward(self, closure_loss: torch.Tensor) -> None:\n # synchronize all horovod optimizers.\n for optimizer in self.optimizers:\n optimizer.synchronize()\n\n def _wrap_optimizers(\n self, optimizers: List[Optimizer], accumulate_grad_batches: int\n ) -> List[\"hvd.DistributedOptimizer\"]:\n \"\"\"Wraps optimizers to perform gradient aggregation via allreduce.\"\"\"\n return [\n hvd.DistributedOptimizer(\n opt,\n backward_passes_per_step=accumulate_grad_batches,\n named_parameters=self._filter_named_parameters(self.lightning_module, opt),\n )\n if \"horovod\" not in str(opt.__class__)\n else opt\n for opt in optimizers\n ]\n\n @staticmethod\n def _filter_named_parameters(model: nn.Module, optimizer: Optimizer) -> List[Tuple[str, nn.Parameter]]:\n opt_params = {p for group in optimizer.param_groups for p in group.get(\"params\", [])}\n return [(name, p) for name, p in model.named_parameters() if p in opt_params]\n\n @classmethod\n def register_strategies(cls, strategy_registry: Dict) -> None:\n strategy_registry.register(\n cls.strategy_name,\n cls,\n description=f\"{cls.__class__.__name__}\",\n )\n\n def teardown(self) -> None:\n super().teardown()\n # teardown may be called before `_exit_stack` is set\n if self._exit_stack:\n self._exit_stack.__exit__(None, None, None)\n self._exit_stack = None\n # Make sure all workers have finished training before returning to the user\n self.join()\n if self.root_device.type == \"cuda\":\n # GPU teardown\n self.lightning_module.cpu()\n # clean up memory\n torch.cuda.empty_cache()\n"
] |
[
[
"torch.device",
"torch.cuda.empty_cache",
"torch.cuda.set_device"
]
] |
linupi/silx
|
[
"51bc5cbc696880e7cf13feb3ff2476a5c32422d4"
] |
[
"silx/gui/data/NXdataWidgets.py"
] |
[
"# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2017-2020 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"This module defines widgets used by _NXdataView.\n\"\"\"\n__authors__ = [\"P. Knobel\"]\n__license__ = \"MIT\"\n__date__ = \"12/11/2018\"\n\nimport logging\nimport numpy\n\nfrom silx.gui import qt\nfrom silx.gui.data.NumpyAxesSelector import NumpyAxesSelector\nfrom silx.gui.plot import Plot1D, Plot2D, StackView, ScatterView\nfrom silx.gui.plot.ComplexImageView import ComplexImageView\nfrom silx.gui.colors import Colormap\nfrom silx.gui.widgets.FrameBrowser import HorizontalSliderWithBrowser\n\nfrom silx.math.calibration import ArrayCalibration, NoCalibration, LinearCalibration\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass ArrayCurvePlot(qt.QWidget):\n \"\"\"\n Widget for plotting a curve from a multi-dimensional signal array\n and a 1D axis array.\n\n The signal array can have an arbitrary number of dimensions, the only\n limitation being that the last dimension must have the same length as\n the axis array.\n\n The widget provides sliders to select indices on the first (n - 1)\n dimensions of the signal array, and buttons to add/replace selected\n curves to the plot.\n\n This widget also handles simple 2D or 3D scatter plots (third dimension\n displayed as colour of points).\n \"\"\"\n def __init__(self, parent=None):\n \"\"\"\n\n :param parent: Parent QWidget\n \"\"\"\n super(ArrayCurvePlot, self).__init__(parent)\n\n self.__signals = None\n self.__signals_names = None\n self.__signal_errors = None\n self.__axis = None\n self.__axis_name = None\n self.__x_axis_errors = None\n self.__values = None\n\n self._plot = Plot1D(self)\n\n self._selector = NumpyAxesSelector(self)\n self._selector.setNamedAxesSelectorVisibility(False)\n self.__selector_is_connected = False\n\n self._plot.sigActiveCurveChanged.connect(self._setYLabelFromActiveLegend)\n\n layout = qt.QVBoxLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n layout.addWidget(self._plot)\n layout.addWidget(self._selector)\n\n self.setLayout(layout)\n\n def getPlot(self):\n \"\"\"Returns the plot used for the display\n\n :rtype: Plot1D\n \"\"\"\n return self._plot\n\n def setCurvesData(self, ys, x=None,\n yerror=None, xerror=None,\n ylabels=None, xlabel=None, title=None):\n \"\"\"\n\n :param List[ndarray] ys: List of arrays to be represented by the y (vertical) axis.\n It can be multiple n-D array whose last dimension must\n have the same length as x (and values must be None)\n :param ndarray x: 1-D dataset used as the curve's x values. If provided,\n its lengths must be equal to the length of the last dimension of\n ``y`` (and equal to the length of ``value``, for a scatter plot).\n :param ndarray yerror: Single array of errors for y (same shape), or None.\n There can only be one array, and it applies to the first/main y\n (no y errors for auxiliary_signals curves).\n :param ndarray xerror: 1-D dataset of errors for x, or None\n :param str ylabels: Labels for each curve's Y axis\n :param str xlabel: Label for X axis\n :param str title: Graph title\n \"\"\"\n self.__signals = ys\n self.__signals_names = ylabels or ([\"Y\"] * len(ys))\n self.__signal_errors = yerror\n self.__axis = x\n self.__axis_name = xlabel\n self.__x_axis_errors = xerror\n\n if self.__selector_is_connected:\n self._selector.selectionChanged.disconnect(self._updateCurve)\n self.__selector_is_connected = False\n self._selector.setData(ys[0])\n self._selector.setAxisNames([\"Y\"])\n\n if len(ys[0].shape) < 2:\n self._selector.hide()\n else:\n self._selector.show()\n\n self._plot.setGraphTitle(title or \"\")\n self._updateCurve()\n\n if not self.__selector_is_connected:\n self._selector.selectionChanged.connect(self._updateCurve)\n self.__selector_is_connected = True\n\n def _updateCurve(self):\n selection = self._selector.selection()\n ys = [sig[selection] for sig in self.__signals]\n y0 = ys[0]\n len_y = len(y0)\n x = self.__axis\n if x is None:\n x = numpy.arange(len_y)\n elif numpy.isscalar(x) or len(x) == 1:\n # constant axis\n x = x * numpy.ones_like(y0)\n elif len(x) == 2 and len_y != 2:\n # linear calibration a + b * x\n x = x[0] + x[1] * numpy.arange(len_y)\n\n self._plot.remove(kind=(\"curve\",))\n\n for i in range(len(self.__signals)):\n legend = self.__signals_names[i]\n\n # errors only supported for primary signal in NXdata\n y_errors = None\n if i == 0 and self.__signal_errors is not None:\n y_errors = self.__signal_errors[self._selector.selection()]\n self._plot.addCurve(x, ys[i], legend=legend,\n xerror=self.__x_axis_errors,\n yerror=y_errors)\n if i == 0:\n self._plot.setActiveCurve(legend)\n\n self._plot.resetZoom()\n self._plot.getXAxis().setLabel(self.__axis_name)\n self._plot.getYAxis().setLabel(self.__signals_names[0])\n\n def _setYLabelFromActiveLegend(self, previous_legend, new_legend):\n for ylabel in self.__signals_names:\n if new_legend is not None and new_legend == ylabel:\n self._plot.getYAxis().setLabel(ylabel)\n break\n\n def clear(self):\n old = self._selector.blockSignals(True)\n self._selector.clear()\n self._selector.blockSignals(old)\n self._plot.clear()\n\n\nclass XYVScatterPlot(qt.QWidget):\n \"\"\"\n Widget for plotting one or more scatters\n (with identical x, y coordinates).\n \"\"\"\n def __init__(self, parent=None):\n \"\"\"\n\n :param parent: Parent QWidget\n \"\"\"\n super(XYVScatterPlot, self).__init__(parent)\n\n self.__y_axis = None\n \"\"\"1D array\"\"\"\n self.__y_axis_name = None\n self.__values = None\n \"\"\"List of 1D arrays (for multiple scatters with identical\n x, y coordinates)\"\"\"\n\n self.__x_axis = None\n self.__x_axis_name = None\n self.__x_axis_errors = None\n self.__y_axis = None\n self.__y_axis_name = None\n self.__y_axis_errors = None\n\n self._plot = ScatterView(self)\n self._plot.setColormap(Colormap(name=\"viridis\",\n vmin=None, vmax=None,\n normalization=Colormap.LINEAR))\n\n self._slider = HorizontalSliderWithBrowser(parent=self)\n self._slider.setMinimum(0)\n self._slider.setValue(0)\n self._slider.valueChanged[int].connect(self._sliderIdxChanged)\n self._slider.setToolTip(\"Select auxiliary signals\")\n\n layout = qt.QGridLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n layout.addWidget(self._plot, 0, 0)\n layout.addWidget(self._slider, 1, 0)\n\n self.setLayout(layout)\n\n def _sliderIdxChanged(self, value):\n self._updateScatter()\n\n def getPlot(self):\n \"\"\"Returns the plot used for the display\n\n :rtype: PlotWidget\n \"\"\"\n return self._plot.getPlotWidget()\n\n def setScattersData(self, y, x, values,\n yerror=None, xerror=None,\n ylabel=None, xlabel=None,\n title=\"\", scatter_titles=None):\n \"\"\"\n\n :param ndarray y: 1D array for y (vertical) coordinates.\n :param ndarray x: 1D array for x coordinates.\n :param List[ndarray] values: List of 1D arrays of values.\n This will be used to compute the color map and assign colors\n to the points. There should be as many arrays in the list as\n scatters to be represented.\n :param ndarray yerror: 1D array of errors for y (same shape), or None.\n :param ndarray xerror: 1D array of errors for x, or None\n :param str ylabel: Label for Y axis\n :param str xlabel: Label for X axis\n :param str title: Main graph title\n :param List[str] scatter_titles: Subtitles (one per scatter)\n \"\"\"\n self.__y_axis = y\n self.__x_axis = x\n self.__x_axis_name = xlabel or \"X\"\n self.__y_axis_name = ylabel or \"Y\"\n self.__x_axis_errors = xerror\n self.__y_axis_errors = yerror\n self.__values = values\n\n self.__graph_title = title or \"\"\n self.__scatter_titles = scatter_titles\n\n self._slider.valueChanged[int].disconnect(self._sliderIdxChanged)\n self._slider.setMaximum(len(values) - 1)\n if len(values) > 1:\n self._slider.show()\n else:\n self._slider.hide()\n self._slider.setValue(0)\n self._slider.valueChanged[int].connect(self._sliderIdxChanged)\n\n self._updateScatter()\n\n def _updateScatter(self):\n x = self.__x_axis\n y = self.__y_axis\n\n idx = self._slider.value()\n\n if self.__graph_title:\n title = self.__graph_title # main NXdata @title\n if len(self.__scatter_titles) > 1:\n # Append dataset name only when there is many datasets\n title += '\\n' + self.__scatter_titles[idx]\n else:\n title = self.__scatter_titles[idx] # scatter dataset name\n\n self._plot.setGraphTitle(title)\n self._plot.setData(x, y, self.__values[idx],\n xerror=self.__x_axis_errors,\n yerror=self.__y_axis_errors)\n self._plot.resetZoom()\n self._plot.getXAxis().setLabel(self.__x_axis_name)\n self._plot.getYAxis().setLabel(self.__y_axis_name)\n\n def clear(self):\n self._plot.getPlotWidget().clear()\n\n\nclass ArrayImagePlot(qt.QWidget):\n \"\"\"\n Widget for plotting an image from a multi-dimensional signal array\n and two 1D axes array.\n\n The signal array can have an arbitrary number of dimensions, the only\n limitation being that the last two dimensions must have the same length as\n the axes arrays.\n\n Sliders are provided to select indices on the first (n - 2) dimensions of\n the signal array, and the plot is updated to show the image corresponding\n to the selection.\n\n If one or both of the axes does not have regularly spaced values, the\n the image is plotted as a coloured scatter plot.\n \"\"\"\n def __init__(self, parent=None):\n \"\"\"\n\n :param parent: Parent QWidget\n \"\"\"\n super(ArrayImagePlot, self).__init__(parent)\n\n self.__signals = None\n self.__signals_names = None\n self.__x_axis = None\n self.__x_axis_name = None\n self.__y_axis = None\n self.__y_axis_name = None\n\n self._plot = Plot2D(self)\n self._plot.setDefaultColormap(Colormap(name=\"viridis\",\n vmin=None, vmax=None,\n normalization=Colormap.LINEAR))\n self._plot.getIntensityHistogramAction().setVisible(True)\n\n # not closable\n self._selector = NumpyAxesSelector(self)\n self._selector.setNamedAxesSelectorVisibility(False)\n self._selector.selectionChanged.connect(self._updateImage)\n\n self._auxSigSlider = HorizontalSliderWithBrowser(parent=self)\n self._auxSigSlider.setMinimum(0)\n self._auxSigSlider.setValue(0)\n self._auxSigSlider.valueChanged[int].connect(self._sliderIdxChanged)\n self._auxSigSlider.setToolTip(\"Select auxiliary signals\")\n\n layout = qt.QVBoxLayout()\n layout.addWidget(self._plot)\n layout.addWidget(self._selector)\n layout.addWidget(self._auxSigSlider)\n\n self.setLayout(layout)\n\n def _sliderIdxChanged(self, value):\n self._updateImage()\n\n def getPlot(self):\n \"\"\"Returns the plot used for the display\n\n :rtype: Plot2D\n \"\"\"\n return self._plot\n\n def setImageData(self, signals,\n x_axis=None, y_axis=None,\n signals_names=None,\n xlabel=None, ylabel=None,\n title=None, isRgba=False):\n \"\"\"\n\n :param signals: list of n-D datasets, whose last 2 dimensions are used as the\n image's values, or list of 3D datasets interpreted as RGBA image.\n :param x_axis: 1-D dataset used as the image's x coordinates. If\n provided, its lengths must be equal to the length of the last\n dimension of ``signal``.\n :param y_axis: 1-D dataset used as the image's y. If provided,\n its lengths must be equal to the length of the 2nd to last\n dimension of ``signal``.\n :param signals_names: Names for each image, used as subtitle and legend.\n :param xlabel: Label for X axis\n :param ylabel: Label for Y axis\n :param title: Graph title\n :param isRgba: True if data is a 3D RGBA image\n \"\"\"\n self._selector.selectionChanged.disconnect(self._updateImage)\n self._auxSigSlider.valueChanged.disconnect(self._sliderIdxChanged)\n\n self.__signals = signals\n self.__signals_names = signals_names\n self.__x_axis = x_axis\n self.__x_axis_name = xlabel\n self.__y_axis = y_axis\n self.__y_axis_name = ylabel\n self.__title = title\n\n self._selector.clear()\n if not isRgba:\n self._selector.setAxisNames([\"Y\", \"X\"])\n img_ndim = 2\n else:\n self._selector.setAxisNames([\"Y\", \"X\", \"RGB(A) channel\"])\n img_ndim = 3\n self._selector.setData(signals[0])\n\n if len(signals[0].shape) <= img_ndim:\n self._selector.hide()\n else:\n self._selector.show()\n\n self._auxSigSlider.setMaximum(len(signals) - 1)\n if len(signals) > 1:\n self._auxSigSlider.show()\n else:\n self._auxSigSlider.hide()\n self._auxSigSlider.setValue(0)\n\n self._updateImage()\n self._plot.resetZoom()\n\n self._selector.selectionChanged.connect(self._updateImage)\n self._auxSigSlider.valueChanged.connect(self._sliderIdxChanged)\n\n def _updateImage(self):\n selection = self._selector.selection()\n auxSigIdx = self._auxSigSlider.value()\n\n legend = self.__signals_names[auxSigIdx]\n\n images = [img[selection] for img in self.__signals]\n image = images[auxSigIdx]\n\n x_axis = self.__x_axis\n y_axis = self.__y_axis\n\n if x_axis is None and y_axis is None:\n xcalib = NoCalibration()\n ycalib = NoCalibration()\n else:\n if x_axis is None:\n # no calibration\n x_axis = numpy.arange(image.shape[1])\n elif numpy.isscalar(x_axis) or len(x_axis) == 1:\n # constant axis\n x_axis = x_axis * numpy.ones((image.shape[1], ))\n elif len(x_axis) == 2:\n # linear calibration\n x_axis = x_axis[0] * numpy.arange(image.shape[1]) + x_axis[1]\n\n if y_axis is None:\n y_axis = numpy.arange(image.shape[0])\n elif numpy.isscalar(y_axis) or len(y_axis) == 1:\n y_axis = y_axis * numpy.ones((image.shape[0], ))\n elif len(y_axis) == 2:\n y_axis = y_axis[0] * numpy.arange(image.shape[0]) + y_axis[1]\n\n xcalib = ArrayCalibration(x_axis)\n ycalib = ArrayCalibration(y_axis)\n\n self._plot.remove(kind=(\"scatter\", \"image\",))\n if xcalib.is_affine() and ycalib.is_affine():\n # regular image\n xorigin, xscale = xcalib(0), xcalib.get_slope()\n yorigin, yscale = ycalib(0), ycalib.get_slope()\n origin = (xorigin, yorigin)\n scale = (xscale, yscale)\n\n self._plot.addImage(image, legend=legend,\n origin=origin, scale=scale,\n replace=True)\n else:\n scatterx, scattery = numpy.meshgrid(x_axis, y_axis)\n # fixme: i don't think this can handle \"irregular\" RGBA images\n self._plot.addScatter(numpy.ravel(scatterx),\n numpy.ravel(scattery),\n numpy.ravel(image),\n legend=legend)\n\n if self.__title:\n title = self.__title\n if len(self.__signals_names) > 1:\n # Append dataset name only when there is many datasets\n title += '\\n' + self.__signals_names[auxSigIdx]\n else:\n title = self.__signals_names[auxSigIdx]\n self._plot.setGraphTitle(title)\n self._plot.getXAxis().setLabel(self.__x_axis_name)\n self._plot.getYAxis().setLabel(self.__y_axis_name)\n\n def clear(self):\n old = self._selector.blockSignals(True)\n self._selector.clear()\n self._selector.blockSignals(old)\n self._plot.clear()\n\n\nclass ArrayComplexImagePlot(qt.QWidget):\n \"\"\"\n Widget for plotting an image of complex from a multi-dimensional signal array\n and two 1D axes array.\n\n The signal array can have an arbitrary number of dimensions, the only\n limitation being that the last two dimensions must have the same length as\n the axes arrays.\n\n Sliders are provided to select indices on the first (n - 2) dimensions of\n the signal array, and the plot is updated to show the image corresponding\n to the selection.\n\n If one or both of the axes does not have regularly spaced values, the\n the image is plotted as a coloured scatter plot.\n \"\"\"\n def __init__(self, parent=None, colormap=None):\n \"\"\"\n\n :param parent: Parent QWidget\n \"\"\"\n super(ArrayComplexImagePlot, self).__init__(parent)\n\n self.__signals = None\n self.__signals_names = None\n self.__x_axis = None\n self.__x_axis_name = None\n self.__y_axis = None\n self.__y_axis_name = None\n\n self._plot = ComplexImageView(self)\n if colormap is not None:\n for mode in (ComplexImageView.ComplexMode.ABSOLUTE,\n ComplexImageView.ComplexMode.SQUARE_AMPLITUDE,\n ComplexImageView.ComplexMode.REAL,\n ComplexImageView.ComplexMode.IMAGINARY):\n self._plot.setColormap(colormap, mode)\n\n self._plot.getPlot().getIntensityHistogramAction().setVisible(True)\n self._plot.setKeepDataAspectRatio(True)\n\n # not closable\n self._selector = NumpyAxesSelector(self)\n self._selector.setNamedAxesSelectorVisibility(False)\n self._selector.selectionChanged.connect(self._updateImage)\n\n self._auxSigSlider = HorizontalSliderWithBrowser(parent=self)\n self._auxSigSlider.setMinimum(0)\n self._auxSigSlider.setValue(0)\n self._auxSigSlider.valueChanged[int].connect(self._sliderIdxChanged)\n self._auxSigSlider.setToolTip(\"Select auxiliary signals\")\n\n layout = qt.QVBoxLayout()\n layout.addWidget(self._plot)\n layout.addWidget(self._selector)\n layout.addWidget(self._auxSigSlider)\n\n self.setLayout(layout)\n\n def _sliderIdxChanged(self, value):\n self._updateImage()\n\n def getPlot(self):\n \"\"\"Returns the plot used for the display\n\n :rtype: PlotWidget\n \"\"\"\n return self._plot.getPlot()\n\n def setImageData(self, signals,\n x_axis=None, y_axis=None,\n signals_names=None,\n xlabel=None, ylabel=None,\n title=None):\n \"\"\"\n\n :param signals: list of n-D datasets, whose last 2 dimensions are used as the\n image's values, or list of 3D datasets interpreted as RGBA image.\n :param x_axis: 1-D dataset used as the image's x coordinates. If\n provided, its lengths must be equal to the length of the last\n dimension of ``signal``.\n :param y_axis: 1-D dataset used as the image's y. If provided,\n its lengths must be equal to the length of the 2nd to last\n dimension of ``signal``.\n :param signals_names: Names for each image, used as subtitle and legend.\n :param xlabel: Label for X axis\n :param ylabel: Label for Y axis\n :param title: Graph title\n \"\"\"\n self._selector.selectionChanged.disconnect(self._updateImage)\n self._auxSigSlider.valueChanged.disconnect(self._sliderIdxChanged)\n\n self.__signals = signals\n self.__signals_names = signals_names\n self.__x_axis = x_axis\n self.__x_axis_name = xlabel\n self.__y_axis = y_axis\n self.__y_axis_name = ylabel\n self.__title = title\n\n self._selector.clear()\n self._selector.setAxisNames([\"Y\", \"X\"])\n self._selector.setData(signals[0])\n\n if len(signals[0].shape) <= 2:\n self._selector.hide()\n else:\n self._selector.show()\n\n self._auxSigSlider.setMaximum(len(signals) - 1)\n if len(signals) > 1:\n self._auxSigSlider.show()\n else:\n self._auxSigSlider.hide()\n self._auxSigSlider.setValue(0)\n\n self._updateImage()\n self._plot.getPlot().resetZoom()\n\n self._selector.selectionChanged.connect(self._updateImage)\n self._auxSigSlider.valueChanged.connect(self._sliderIdxChanged)\n\n def _updateImage(self):\n selection = self._selector.selection()\n auxSigIdx = self._auxSigSlider.value()\n\n images = [img[selection] for img in self.__signals]\n image = images[auxSigIdx]\n\n x_axis = self.__x_axis\n y_axis = self.__y_axis\n\n if x_axis is None and y_axis is None:\n xcalib = NoCalibration()\n ycalib = NoCalibration()\n else:\n if x_axis is None:\n # no calibration\n x_axis = numpy.arange(image.shape[1])\n elif numpy.isscalar(x_axis) or len(x_axis) == 1:\n # constant axis\n x_axis = x_axis * numpy.ones((image.shape[1], ))\n elif len(x_axis) == 2:\n # linear calibration\n x_axis = x_axis[0] * numpy.arange(image.shape[1]) + x_axis[1]\n\n if y_axis is None:\n y_axis = numpy.arange(image.shape[0])\n elif numpy.isscalar(y_axis) or len(y_axis) == 1:\n y_axis = y_axis * numpy.ones((image.shape[0], ))\n elif len(y_axis) == 2:\n y_axis = y_axis[0] * numpy.arange(image.shape[0]) + y_axis[1]\n\n xcalib = ArrayCalibration(x_axis)\n ycalib = ArrayCalibration(y_axis)\n\n self._plot.setData(image)\n if xcalib.is_affine():\n xorigin, xscale = xcalib(0), xcalib.get_slope()\n else:\n _logger.warning(\"Unsupported complex image X axis calibration\")\n xorigin, xscale = 0., 1.\n\n if ycalib.is_affine():\n yorigin, yscale = ycalib(0), ycalib.get_slope()\n else:\n _logger.warning(\"Unsupported complex image Y axis calibration\")\n yorigin, yscale = 0., 1.\n\n self._plot.setOrigin((xorigin, yorigin))\n self._plot.setScale((xscale, yscale))\n\n if self.__title:\n title = self.__title\n if len(self.__signals_names) > 1:\n # Append dataset name only when there is many datasets\n title += '\\n' + self.__signals_names[auxSigIdx]\n else:\n title = self.__signals_names[auxSigIdx]\n self._plot.setGraphTitle(title)\n self._plot.getXAxis().setLabel(self.__x_axis_name)\n self._plot.getYAxis().setLabel(self.__y_axis_name)\n\n def clear(self):\n old = self._selector.blockSignals(True)\n self._selector.clear()\n self._selector.blockSignals(old)\n self._plot.setData(None)\n\n\nclass ArrayStackPlot(qt.QWidget):\n \"\"\"\n Widget for plotting a n-D array (n >= 3) as a stack of images.\n Three axis arrays can be provided to calibrate the axes.\n\n The signal array can have an arbitrary number of dimensions, the only\n limitation being that the last 3 dimensions must have the same length as\n the axes arrays.\n\n Sliders are provided to select indices on the first (n - 3) dimensions of\n the signal array, and the plot is updated to load the stack corresponding\n to the selection.\n \"\"\"\n def __init__(self, parent=None):\n \"\"\"\n\n :param parent: Parent QWidget\n \"\"\"\n super(ArrayStackPlot, self).__init__(parent)\n\n self.__signal = None\n self.__signal_name = None\n # the Z, Y, X axes apply to the last three dimensions of the signal\n # (in that order)\n self.__z_axis = None\n self.__z_axis_name = None\n self.__y_axis = None\n self.__y_axis_name = None\n self.__x_axis = None\n self.__x_axis_name = None\n\n self._stack_view = StackView(self)\n self._hline = qt.QFrame(self)\n self._hline.setFrameStyle(qt.QFrame.HLine)\n self._hline.setFrameShadow(qt.QFrame.Sunken)\n self._legend = qt.QLabel(self)\n self._selector = NumpyAxesSelector(self)\n self._selector.setNamedAxesSelectorVisibility(False)\n self.__selector_is_connected = False\n\n layout = qt.QVBoxLayout()\n layout.addWidget(self._stack_view)\n layout.addWidget(self._hline)\n layout.addWidget(self._legend)\n layout.addWidget(self._selector)\n\n self.setLayout(layout)\n\n def getStackView(self):\n \"\"\"Returns the plot used for the display\n\n :rtype: StackView\n \"\"\"\n return self._stack_view\n\n def setStackData(self, signal,\n x_axis=None, y_axis=None, z_axis=None,\n signal_name=None,\n xlabel=None, ylabel=None, zlabel=None,\n title=None):\n \"\"\"\n\n :param signal: n-D dataset, whose last 3 dimensions are used as the\n 3D stack values.\n :param x_axis: 1-D dataset used as the image's x coordinates. If\n provided, its lengths must be equal to the length of the last\n dimension of ``signal``.\n :param y_axis: 1-D dataset used as the image's y. If provided,\n its lengths must be equal to the length of the 2nd to last\n dimension of ``signal``.\n :param z_axis: 1-D dataset used as the image's z. If provided,\n its lengths must be equal to the length of the 3rd to last\n dimension of ``signal``.\n :param signal_name: Label used in the legend\n :param xlabel: Label for X axis\n :param ylabel: Label for Y axis\n :param zlabel: Label for Z axis\n :param title: Graph title\n \"\"\"\n if self.__selector_is_connected:\n self._selector.selectionChanged.disconnect(self._updateStack)\n self.__selector_is_connected = False\n\n self.__signal = signal\n self.__signal_name = signal_name or \"\"\n self.__x_axis = x_axis\n self.__x_axis_name = xlabel\n self.__y_axis = y_axis\n self.__y_axis_name = ylabel\n self.__z_axis = z_axis\n self.__z_axis_name = zlabel\n\n self._selector.setData(signal)\n self._selector.setAxisNames([\"Y\", \"X\", \"Z\"])\n\n self._stack_view.setGraphTitle(title or \"\")\n # by default, the z axis is the image position (dimension not plotted)\n self._stack_view.getPlotWidget().getXAxis().setLabel(self.__x_axis_name or \"X\")\n self._stack_view.getPlotWidget().getYAxis().setLabel(self.__y_axis_name or \"Y\")\n\n self._updateStack()\n\n ndims = len(signal.shape)\n self._stack_view.setFirstStackDimension(ndims - 3)\n\n # the legend label shows the selection slice producing the volume\n # (only interesting for ndim > 3)\n if ndims > 3:\n self._selector.setVisible(True)\n self._legend.setVisible(True)\n self._hline.setVisible(True)\n else:\n self._selector.setVisible(False)\n self._legend.setVisible(False)\n self._hline.setVisible(False)\n\n if not self.__selector_is_connected:\n self._selector.selectionChanged.connect(self._updateStack)\n self.__selector_is_connected = True\n\n @staticmethod\n def _get_origin_scale(axis):\n \"\"\"Assuming axis is a regularly spaced 1D array,\n return a tuple (origin, scale) where:\n - origin = axis[0]\n - scale = (axis[n-1] - axis[0]) / (n -1)\n :param axis: 1D numpy array\n :return: Tuple (axis[0], (axis[-1] - axis[0]) / (len(axis) - 1))\n \"\"\"\n return axis[0], (axis[-1] - axis[0]) / (len(axis) - 1)\n\n def _updateStack(self):\n \"\"\"Update displayed stack according to the current axes selector\n data.\"\"\"\n stk = self._selector.selectedData()\n x_axis = self.__x_axis\n y_axis = self.__y_axis\n z_axis = self.__z_axis\n\n calibrations = []\n for axis in [z_axis, y_axis, x_axis]:\n\n if axis is None:\n calibrations.append(NoCalibration())\n elif len(axis) == 2:\n calibrations.append(\n LinearCalibration(y_intercept=axis[0],\n slope=axis[1]))\n else:\n calibrations.append(ArrayCalibration(axis))\n\n legend = self.__signal_name + \"[\"\n for sl in self._selector.selection():\n if sl == slice(None):\n legend += \":, \"\n else:\n legend += str(sl) + \", \"\n legend = legend[:-2] + \"]\"\n self._legend.setText(\"Displayed data: \" + legend)\n\n self._stack_view.setStack(stk, calibrations=calibrations)\n self._stack_view.setLabels(\n labels=[self.__z_axis_name,\n self.__y_axis_name,\n self.__x_axis_name])\n\n def clear(self):\n old = self._selector.blockSignals(True)\n self._selector.clear()\n self._selector.blockSignals(old)\n self._stack_view.clear()\n\n\nclass ArrayVolumePlot(qt.QWidget):\n \"\"\"\n Widget for plotting a n-D array (n >= 3) as a 3D scalar field.\n Three axis arrays can be provided to calibrate the axes.\n\n The signal array can have an arbitrary number of dimensions, the only\n limitation being that the last 3 dimensions must have the same length as\n the axes arrays.\n\n Sliders are provided to select indices on the first (n - 3) dimensions of\n the signal array, and the plot is updated to load the stack corresponding\n to the selection.\n \"\"\"\n def __init__(self, parent=None):\n \"\"\"\n\n :param parent: Parent QWidget\n \"\"\"\n super(ArrayVolumePlot, self).__init__(parent)\n\n self.__signal = None\n self.__signal_name = None\n # the Z, Y, X axes apply to the last three dimensions of the signal\n # (in that order)\n self.__z_axis = None\n self.__z_axis_name = None\n self.__y_axis = None\n self.__y_axis_name = None\n self.__x_axis = None\n self.__x_axis_name = None\n\n from ._VolumeWindow import VolumeWindow\n\n self._view = VolumeWindow(self)\n\n self._hline = qt.QFrame(self)\n self._hline.setFrameStyle(qt.QFrame.HLine)\n self._hline.setFrameShadow(qt.QFrame.Sunken)\n self._legend = qt.QLabel(self)\n self._selector = NumpyAxesSelector(self)\n self._selector.setNamedAxesSelectorVisibility(False)\n self.__selector_is_connected = False\n\n layout = qt.QVBoxLayout()\n layout.addWidget(self._view)\n layout.addWidget(self._hline)\n layout.addWidget(self._legend)\n layout.addWidget(self._selector)\n\n self.setLayout(layout)\n\n def getVolumeView(self):\n \"\"\"Returns the plot used for the display\n\n :rtype: SceneWindow\n \"\"\"\n return self._view\n\n def setData(self, signal,\n x_axis=None, y_axis=None, z_axis=None,\n signal_name=None,\n xlabel=None, ylabel=None, zlabel=None,\n title=None):\n \"\"\"\n\n :param signal: n-D dataset, whose last 3 dimensions are used as the\n 3D stack values.\n :param x_axis: 1-D dataset used as the image's x coordinates. If\n provided, its lengths must be equal to the length of the last\n dimension of ``signal``.\n :param y_axis: 1-D dataset used as the image's y. If provided,\n its lengths must be equal to the length of the 2nd to last\n dimension of ``signal``.\n :param z_axis: 1-D dataset used as the image's z. If provided,\n its lengths must be equal to the length of the 3rd to last\n dimension of ``signal``.\n :param signal_name: Label used in the legend\n :param xlabel: Label for X axis\n :param ylabel: Label for Y axis\n :param zlabel: Label for Z axis\n :param title: Graph title\n \"\"\"\n if self.__selector_is_connected:\n self._selector.selectionChanged.disconnect(self._updateVolume)\n self.__selector_is_connected = False\n\n self.__signal = signal\n self.__signal_name = signal_name or \"\"\n self.__x_axis = x_axis\n self.__x_axis_name = xlabel\n self.__y_axis = y_axis\n self.__y_axis_name = ylabel\n self.__z_axis = z_axis\n self.__z_axis_name = zlabel\n\n self._selector.setData(signal)\n self._selector.setAxisNames([\"Y\", \"X\", \"Z\"])\n\n self._updateVolume()\n\n # the legend label shows the selection slice producing the volume\n # (only interesting for ndim > 3)\n if signal.ndim > 3:\n self._selector.setVisible(True)\n self._legend.setVisible(True)\n self._hline.setVisible(True)\n else:\n self._selector.setVisible(False)\n self._legend.setVisible(False)\n self._hline.setVisible(False)\n\n if not self.__selector_is_connected:\n self._selector.selectionChanged.connect(self._updateVolume)\n self.__selector_is_connected = True\n\n def _updateVolume(self):\n \"\"\"Update displayed stack according to the current axes selector\n data.\"\"\"\n x_axis = self.__x_axis\n y_axis = self.__y_axis\n z_axis = self.__z_axis\n\n offset = []\n scale = []\n for axis in [x_axis, y_axis, z_axis]:\n if axis is None:\n calibration = NoCalibration()\n elif len(axis) == 2:\n calibration = LinearCalibration(\n y_intercept=axis[0], slope=axis[1])\n else:\n calibration = ArrayCalibration(axis)\n if not calibration.is_affine():\n _logger.warning(\"Axis has not linear values, ignored\")\n offset.append(0.)\n scale.append(1.)\n else:\n offset.append(calibration(0))\n scale.append(calibration.get_slope())\n\n legend = self.__signal_name + \"[\"\n for sl in self._selector.selection():\n if sl == slice(None):\n legend += \":, \"\n else:\n legend += str(sl) + \", \"\n legend = legend[:-2] + \"]\"\n self._legend.setText(\"Displayed data: \" + legend)\n\n # Update SceneWidget\n data = self._selector.selectedData()\n\n volumeView = self.getVolumeView()\n volumeView.setData(data, offset=offset, scale=scale)\n volumeView.setAxesLabels(\n self.__x_axis_name, self.__y_axis_name, self.__z_axis_name)\n\n def clear(self):\n old = self._selector.blockSignals(True)\n self._selector.clear()\n self._selector.blockSignals(old)\n self.getVolumeView().clear()\n"
] |
[
[
"numpy.ones_like",
"numpy.ones",
"numpy.ravel",
"numpy.arange",
"numpy.isscalar",
"numpy.meshgrid"
]
] |
ruclion/AutoVC_one_hot_ls
|
[
"db400a224b2e4544a480e0ab3f62b51b570d378c"
] |
[
"data_loader_hujk17.py"
] |
[
"import os\nimport torch\nimport numpy as np\nimport pickle as pkl\nfrom torch.utils import data\n\n \n\nspeaker_id_dict_path = '/ceph/home/hujk17/AutoVC_hujk17/full_106_spmel_nosli/speaker_seen_unseen.txt'\n\n\ndef text2list(file):\n f = open(file, 'r').readlines()\n file_list = [i.strip() for i in f]\n return file_list\n\n\ndef text2dict(file):\n speaker_id_dict = {}\n f = open(file, 'r').readlines()\n for i, name in enumerate(f):\n name = name.strip().split('|')[0]\n speaker_id_dict[name] = i\n # print(speaker_id_dict)\n return speaker_id_dict\n\n\ndef get_mel_data(fpath):\n # print('mel-path:', fpath)\n mel = np.load(fpath)\n return mel\n\n\n\nclass Utterances(data.Dataset):\n \"\"\"Dataset class for the Utterances dataset.\"\"\"\n\n def __init__(self, root_dir, meta_path, max_len):\n \"\"\"Initialize and preprocess the Utterances dataset.\"\"\"\n self.root_dir = root_dir\n self.max_len = max_len\n self.file_list = text2list(file=meta_path)\n self.speaker_id_dict = text2dict(speaker_id_dict_path)\n \n \n def __getitem__(self, index):\n now = self.file_list[index].split('|')\n speaker_id = self.speaker_id_dict[now[1]]\n mel = get_mel_data(os.path.join(self.root_dir, now[0]))\n\n\n if mel.shape[0] < self.max_len:\n len_pad = self.max_len - mel.shape[0]\n mel_fix = np.pad(mel, ((0,len_pad),(0,0)), 'constant')\n elif mel.shape[0] > self.max_len:\n left = np.random.randint(mel.shape[0]-self.max_len + 1)\n assert left + self.max_len <= mel.shape[0]\n mel_fix = mel[left:left+self.max_len, :]\n else:\n mel_fix = mel\n \n return mel_fix, speaker_id\n \n\n def __len__(self):\n return len(self.file_list)\n \n \n \n\ndef get_loader(root_dir, meta_path, batch_size=16, max_len=128, shuffle=True, drop_last = False, num_workers=0):\n \"\"\"Build and return a data loader.\"\"\"\n \n dataset = Utterances(root_dir, meta_path, max_len)\n \n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n drop_last=drop_last)\n return data_loader\n\n\n\n\n\n\n"
] |
[
[
"numpy.pad",
"numpy.random.randint",
"numpy.load",
"torch.utils.data.DataLoader"
]
] |
mjjjjm/helmholtz
|
[
"8ebe8e28fc4752c4da1d97b0ab3f25ae9debc44e"
] |
[
"ex1d_pointSource.py"
] |
[
"import os, sys, time\nimport numpy as np\nimport dolfin as df\n\nfrom HelmholtzSolver import *\n\n## =====================================================================================\n\n## -------------------------------------------------------------------------------------\ndef waveSpeed(x):\n return 1.0\n## -------------------------------------------------------------------------------------\ndef damping(x):\n return 0.0\n## -------------------------------------------------------------------------------------\ndef density(x):\n return 1.0\n## -------------------------------------------------------------------------------------\n\n## Define options\n## ==============\n\nomega = 2*np.pi*7\n\nmeshOpt = {'nXElem':80,\\\n 'polynomialOrder':1,\\\n\t 'stretchMesh':False,\\\n\t }\n\nbcOpt = {'left':{'DBC':True,\\\n 'real':df.Constant(0.0),\\\n 'imag':df.Constant(0.0),},\\\n 'right':{'DBC':True,\\\n 'real':df.Constant(0.0),\\\n 'imag':df.Constant(0.0),},\\\n\t }\n\nsourceOpt = {'real':{'choice':'pointSource',\\\n\t 'pointSourceLoc':np.array([0.5]),\\\n\t\t 'pointSourceMag':1.0},\n 'imag':{'choice':'none'},\\\n\t }\n\n\nWN = WaveNumber(omega,waveSpeed,damping)\nmaterialOpt = {'waveNumber':WN.evalWaveNumber,\\\n 'density':density,\\\n\t }\n\npmlOpt = {'left':True,\\\n 'right':True,\\\n\t 'exponent':2,\\\n\t 'sigmaMax':5000,\\\n\t 'numPmlWaveLengths':1,\\\n\t 'pmlWaveNumber':omega,\\\n\t }\n\n## Instantiate Helmoltz solver class with the options\n## ==================================================\nHS = HelmholtzSolver(1,meshOpt,bcOpt,sourceOpt,materialOpt,pmlOpt)\n\n## Write numerical solution to vtk file\n## ====================================\nfile = df.File(\"realNumericalSoln.pvd\")\nfile << HS.uSolnReal\nfile = df.File(\"imagNumericalSoln.pvd\")\nfile << HS.uSolnImag\n\n## Plot the numerical soltion\n## ==========================\n'''\ndf.plot(HS.domains,title=\"domain partitioned\")\ndf.plot(HS.uSolnReal,title=\"real(soln)\")\ndf.plot(HS.uSolnImag,title=\"imag(soln)\")\n'''\n\n## Determine analytical solution with exact solution u = i*exp(ik|x|)/2k\n## =====================================================================\n'''\nclass AnalyticalSoln(df.Expression):\n def __init__(self,sourceLoc,waveNumber,returnReal=True, **kwargs):\n self.sourceLoc = sourceLoc\n\tself.waveNumber = waveNumber\n\tself.returnReal = returnReal\n def eval(self,value,x):\n z = np.abs(x[0]-self.sourceLoc[0])\n uExact = 1j*np.exp(1j*self.waveNumber*z)/(2.0*self.waveNumber)\n if self.returnReal:\n value[0] = np.real(uExact)\n else:\n value[0] = np.imag(uExact)\n\nASolnReal = AnalyticalSoln(sourceOpt['real']['pointSourceLoc'],omega,\\\n degree=HS.meshOpt['polynomialOrder'])\nASolnImag = AnalyticalSoln(sourceOpt['real']['pointSourceLoc'],omega,\\\n returnReal=False,degree=HS.meshOpt['polynomialOrder'])\n\npASolnReal = df.project(ASolnReal,HS.VReal)\npASolnImag = df.project(ASolnImag,HS.VImag)\n\ndf.plot(pASolnReal,title=\"real(analytical soln)\")\ndf.plot(pASolnImag,title=\"imag(analytical soln)\")\ndf.interactive()\n'''\n"
] |
[
[
"numpy.array"
]
] |
ashawkey/FocalLoss.pytorch
|
[
"5e9fcc6ad28302900e4fb41cf543524b435eab84"
] |
[
"focalloss.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\n\nclass FocalLoss(nn.Module):\n '''Multi-class Focal loss implementation'''\n def __init__(self, gamma=2, weight=None):\n super(FocalLoss, self).__init__()\n self.gamma = gamma\n self.weight = weight\n\n def forward(self, input, target):\n \"\"\"\n input: [N, C]\n target: [N, ]\n \"\"\"\n logpt = F.log_softmax(input, dim=1)\n pt = torch.exp(logpt)\n logpt = (1-pt)**self.gamma * logpt\n loss = F.nll_loss(logpt, target, self.weight)\n return loss\n"
] |
[
[
"torch.nn.functional.nll_loss",
"torch.exp",
"torch.nn.functional.log_softmax"
]
] |
Lhumd/reactive_planners-1
|
[
"5d8bd04da3d06fb2f968aa23a0c6713dcd773f44"
] |
[
"python/reactive_planners/demos/bolt_step_adjustment.py"
] |
[
"\"\"\" @namespace Demos of Bolt step adjustment\n@file\n@copyright Copyright (c) 2017-2019,\n New York University and Max Planck Gesellschaft,\n License BSD-3-Clause\n@example\n\"\"\"\nimport numpy as np\nimport pybullet as p\nfrom matplotlib import pyplot as plt\nfrom robot_properties_bolt.config import BoltConfig\nfrom robot_properties_bolt.bolt_wrapper import BoltRobot\nfrom mim_control.robot_centroidal_controller import RobotCentroidalController\nfrom mim_control.robot_impedance_controller import RobotImpedanceController\nfrom mim_control.qp_solver import quadprog_solve_qp\nfrom reactive_planners.lipm_simulator import LipmSimpulator\nfrom reactive_planners_cpp import DcmReactiveStepper\nimport pinocchio as se3\nfrom pinocchio import RobotWrapper\nfrom pinocchio.utils import zero, eye\nfrom scipy.spatial.transform import Rotation as R\nfrom numpy.linalg import inv, pinv\nfrom math import sqrt\nfrom random import random\nfrom bullet_utils.env import BulletEnvWithGround\n\n\ndef zero_cnt_gain(kp, cnt_array):\n gain = np.array(kp).copy()\n for i, v in enumerate(cnt_array):\n if v == 1:\n gain[3 * i : 3 * (i + 1)] = 0.0\n return gain\n\n\ndef joint_controller(q, desired_q, qdot, desired_qdot, kp, kd, cnt_array):\n torque = np.zeros((6, 1))\n number_of_joints_per_leg = 3\n for i in range(7, len(q)):\n torque[i - 7] = cnt_array[int((i - 7) / number_of_joints_per_leg)] * (\n kp[i - 7] * (desired_q[i] - q[i])\n ) + cnt_array[int((i - 7) / number_of_joints_per_leg)] * (\n kd[i - 7] * (desired_qdot[i - 1] - qdot[i - 1])\n )\n return torque\n\n\nclass OperationalSpaceDynamics(object):\n def __init__(self, model, endeff_frame_names):\n def getFrameId(name):\n idx = model.getFrameId(name)\n if idx == len(model.frames):\n raise Exception(\"Unknown frame name: {}\".format(name))\n return idx\n\n self.robot = RobotWrapper(model)\n self.model = model\n self.data = self.robot.data\n self.mass = sum([i.mass for i in self.robot.model.inertias[1:]])\n print(self.robot.model.inertias)\n self.base_id = self.robot.model.getFrameId(\"base_link\")\n self.endeff_frame_names = endeff_frame_names\n self.endeff_ids = [getFrameId(name) for name in endeff_frame_names]\n self.is_init_time = 1\n\n self.ne = len(self.endeff_ids)\n self.nv = self.model.nv\n\n self.last_q = None\n self.last_dq = None\n self.p = eye(self.model.nv)\n self.swing_id = self.endeff_ids[1]\n self.stance_id = self.endeff_ids[0]\n self.last_J_c = self.get_world_oriented_frame_jacobian(\n q, self.stance_id\n )[:3]\n self.xdot = self.get_world_oriented_frame_jacobian(q, self.swing_id)[\n :3\n ].dot(qdot)[:3]\n self.last_xdot = self.xdot\n self.mu = 0.5\n\n # Allocate space for the jacobian and desired velocities.\n # Using two entires for the linear and angular velocity of the base.\n # (self.nv - 6) is the number of jointss for posture regularization\n # self.ne * 3 is the components of foot impulse\n self.J = np.zeros(\n ((self.ne + 2) * 3 + (self.nv - 6) + (self.ne * 3), self.nv)\n )\n self.vel_des = np.zeros(\n ((self.ne + 2) * 3 + (self.nv - 6) + (self.ne * 3), 1)\n )\n\n # full robot get_jacobian\n self.jacobian = np.zeros((self.ne * 3, self.nv))\n\n def foot_mass_matrix(self, q):\n foot_mass = np.zeros((self.ne * 3, 3))\n mass_matrix = se3.crba(self.model, self.data, q)\n for i, idx in enumerate(self.endeff_ids):\n self.jacobian[\n 3 * i : 3 * (i + 1), :\n ] = self.get_world_oriented_frame_jacobian(q, idx)[:3]\n M = inv(\n self.jacobian[3 * i : 3 * (i + 1), :]\n .dot(inv(mass_matrix))\n .dot(self.jacobian[3 * i : 3 * (i + 1), :].T)\n )\n foot_mass[3 * i : 3 * (i + 1), :] = M\n return foot_mass\n\n def update_null_space_projection(self, q):\n J_c = self.get_world_oriented_frame_jacobian(q, self.stance_id)[:3]\n self.p = np.matrix(eye(self.nv)) - np.matrix(pinv(J_c)) * J_c\n self.pdot = (self.p - self.last_p) * 1000\n\n def update_constraint_consistent_inertia(self, q):\n mass_matrix = se3.crba(self.model, self.data, q)\n self.m_c = self.p * mass_matrix + eye(self.nv) - self.p\n\n def update_constrained_swing_foot_inertia(self, q):\n self.null_space_projection(q)\n self.constraint_consistent_inertia(q)\n J = self.get_world_oriented_frame_jacobian(q, self.swing_id)[:3]\n self.lambda_c = inv(J * inv(self.m_c) * self.p * J.T)\n\n def constrained_swing_foot_inertia(self, q):\n self.update_constrained_swing_foot_inertia(q)\n return self.lambda_c\n\n def update_c(self, freq=1000):\n J_c = np.matrix(\n self.get_world_oriented_frame_jacobian(q, self.stance_id)[:3]\n )\n self.J_dot = (J_c - self.last_J_c) * freq\n self.c = -pinv(J_c) * self.J_dot\n\n def projected_nonlinear_terms_h(self, q, qdot):\n self.equation_eleven_mass_matrix(q)\n J = np.matrix(\n self.get_world_oriented_frame_jacobian(q, self.swing_id)[:3]\n )\n h = np.matrix(\n se3.nonLinearEffects(self.model, self.data, q, qdot)\n ).transpose()\n return self.lambda_c * J * inv(self.m_c) * self.p * h\n\n def projected_gravity(self, q, qdot):\n self.update_constrained_swing_foot_inertia(q)\n J = np.matrix(\n self.get_world_oriented_frame_jacobian(q, self.swing_id)[:3]\n )\n g = np.matrix(\n se3.computeGeneralizedGravity(self.model, self.data, q)\n ).transpose()\n return self.lambda_c * J * inv(self.m_c) * self.p * g\n\n def xddot(self, q, qdot):\n self.xdot = self.get_world_oriented_frame_jacobian(q, self.swing_id)[\n :3\n ].dot(qdot)[:3]\n return (self.xdot - self.last_xdot) * 1000\n\n def projected_nonlinear_terms_v(self, q, qdot):\n self.update_onstrained_swing_foot_inertia(q)\n self.update_c()\n J = self.get_world_oriented_frame_jacobian(q, self.swing_id)[:3]\n return (\n -self.lambda_c * (self.J_dot + J * inv(self.m_c) * self.c) * qdot\n )\n\n def rotate_J(self, jac, index):\n world_R_joint = se3.SE3(self.data.oMf[index].rotation, zero(3))\n return world_R_joint.action.dot(jac)\n\n def get_world_oriented_frame_jacobian(self, q, index):\n return self.rotate_J(\n se3.getFrameJacobian(\n self.model, self.data, index, se3.ReferenceFrame.LOCAL\n ),\n index,\n )\n\n def swing_force_boundaries(self, h, B, t, q, qdot): # equation 38\n I = np.matrix(eye(self.nv))\n M = np.matrix(se3.crba(self.robot.model, self.robot.data, q))\n J_c = np.matrix(\n self.get_world_oriented_frame_jacobian(q, self.stance_id)[:3]\n )\n J_X = np.matrix(\n self.get_world_oriented_frame_jacobian(q, self.swing_id)[:3]\n )\n\n eta = (\n -pinv(J_c.T) * (I - self.p) * (I - M * inv(self.m_c) * self.p) * B\n )\n rho = (\n pinv(J_c.T)\n * (I - self.p)\n * (\n (I - (M * inv(self.m_c) * self.p)) * h\n + M * inv(self.m_c) * self.pdot * qdot\n )\n )\n Q = np.eye(12) * 0.000001\n # p = np.zeros(12)\n p = np.array([1.0 for i in range(12)])\n G = np.zeros((4 + 2 * 12 + 1, 12))\n h = np.zeros(4 + 2 * 12 + 1)\n A = np.zeros((6, 12))\n b = np.zeros(6)\n A[0, 0] = 1\n A[1, 1] = 1\n A[2, 2] = 1\n A[3, 3] = 1\n A[4, 4] = 1\n A[5, 5] = 1\n\n for i in range(12):\n G[0, i] = (\n sqrt(2) / 2 * self.mu * eta[2, i]\n ) # -sqrt(2) / 2 * (eta_z + rho_z) <= eta_x + rho_x\n G[0, i] -= eta[0, i]\n\n G[1, i] = (\n sqrt(2) / 2 * self.mu * eta[2, i]\n ) # eta_x + rho_x <= sqrt(2) / 2 * (eta_z + rho_z)\n G[1, i] += eta[0, i]\n\n G[2, i] = (\n sqrt(2) / 2 * self.mu * eta[2, i]\n ) # -sqrt(2) / 2 * (eta_z + rho_z) <= eta_y + rho_y\n G[2, i] += -eta[1, i]\n\n G[3, i] = (\n sqrt(2) / 2 * self.mu * eta[2, i]\n ) # eta_y + rho_y <= sqrt(2) / 2 * (eta_z + rho_z)\n G[3, i] += eta[1, i]\n\n for i in range(12):\n G[4 + i, i] = -1\n h[4 + i] = t\n G[4 + 12 + i, i] = 1\n h[4 + 12 + i] = t\n\n G[4 + 12 * 2, :] = eta[2, :]\n h[4 + 12 * 2] = -rho[2]\n\n h[0] = rho[0] - sqrt(2) / 2 * self.mu * rho[2]\n h[1] = -rho[0] - sqrt(2) / 2 * self.mu * rho[2]\n h[2] = rho[1] - sqrt(2) / 2 * self.mu * rho[2]\n h[3] = -rho[1] - sqrt(2) / 2 * self.mu * rho[2]\n\n alpha = self.lambda_c * J_X * inv(self.m_c) * self.p * B\n\n result = np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n for nb in range(3):\n for i in range(12):\n p[i] = -alpha[nb, i]\n try:\n solution = quadprog_solve_qp(Q, p, G, h, A, b)\n solution = np.array([solution])\n result[0, nb] = (alpha * solution.T)[nb, 0]\n except:\n pass\n\n for nb in range(3):\n for i in range(12):\n p[i] = alpha[nb, i]\n try:\n solution = quadprog_solve_qp(Q, p, G, h, A, b)\n solution = np.array([solution])\n result[0, nb + 3] = (alpha * solution.T)[nb, 0]\n except:\n pass\n\n return result\n\n def evaluate_swing_force_boundaries(self, q, qdot):\n self.update_constrained_swing_foot_inertia(q)\n self.update_c()\n J = np.matrix(\n self.get_world_oriented_frame_jacobian(q, self.swing_id)[:3]\n )\n h = np.matrix(\n se3.nonLinearEffects(self.model, self.data, q, qdot)\n ).transpose()\n B = np.zeros((self.nv, self.nv))\n B[6:, 6:] = eye(self.nv - 6)\n t = 2\n tau = self.swing_force_boundaries(h, B, t, q, qdot)\n tau = tau.T\n return tau\n\n def forward_robot(self, q, dq):\n # Update the pinocchio model.\n self.last_J_c = self.get_world_oriented_frame_jacobian(\n q, self.stance_id\n )[:3]\n self.last_xdot = self.xdot\n self.last_p = self.p\n self.robot.forwardKinematics(q, dq)\n self.robot.computeJointJacobians(q)\n self.robot.framesForwardKinematics(q)\n self.robot.centroidalMomentum(q, dq)\n\n self.last_q = q.copy()\n self.last_dq = dq.copy()\n\n\ndef yaw(q):\n return np.array(\n R.from_quat([np.array(q)[3:7]]).as_euler(\"xyz\", degrees=False)\n )[0, 2]\n\n\ndef plot(f):\n if is_left_leg_in_contact:\n M = [[0.045, -0.0, 0.0], [-0.0, 0.045, -0.0], [0.0, -0.0, 0.09]]\n else:\n M = [[0.045, 0.0, 0.0], [0.0, 0.045, 0.0], [0.0, 0.0, 0.09]]\n M_inv = inv(M)\n x2 = []\n x3 = []\n v = []\n time = 0.010\n A = np.matrix(\n [\n [1.0, time, 0.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, time, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 1.0, time],\n [0.0, 0.0, 0.0, 0.0, 0.0, 1.0],\n ]\n )\n B = np.matrix(\n [\n [time * time / 2, 0.0, 0.0],\n [time, 0.0, 0.0],\n [0.0, time * time / 2, 0.0],\n [0.0, time, 0.0],\n [0.0, 0.0, time * time / 2],\n [0.0, 0.0, time],\n ]\n )\n x0 = pos_for_plotter\n v0 = vel_for_plotter\n x2.append(x0)\n x3.append(x0)\n h = np.array([-0.4, 0.0, 0.8])\n for i in range(len(f) / 3):\n x2.append(\n 0.5 * (f[i * 3 : i * 3 + 3] - h).dot(M_inv) * time * time\n + x0\n + v0 * time\n )\n sum = pos_for_plotter + vel_for_plotter * (i + 1) * time\n final = B\n for k in range(i + 1):\n sum[:] += np.array(\n final\n * np.matrix(\n f[(i - k) * 3 : (i - k) * 3 + 3].dot(M_inv)\n ).transpose()\n )[::2, 0]\n sum[:] += np.array(final * np.matrix(-h.dot(M_inv)).transpose())[\n ::2, 0\n ]\n final = A * final\n x3.append(sum)\n x0 = (\n 0.5 * (f[i * 3 : i * 3 + 3] - h).dot(M_inv) * time * time\n + x0\n + v0 * time\n )\n v0 = v0 + (f[i * 3 : i * 3 + 3] - h).dot(M_inv) * time\n v.append(v0)\n plt.plot(x2, label=\"x2\")\n plt.plot(x3, label=\"x3\")\n plt.axhline(\n y=dcm_reactive_stepper.get_next_support_foot_position()[0],\n linestyle=\"-\",\n )\n plt.axhline(\n y=dcm_reactive_stepper.get_next_support_foot_position()[1],\n linestyle=\"-\",\n )\n plt.axhline(\n y=dcm_reactive_stepper.get_next_support_foot_position()[2],\n linestyle=\"-\",\n )\n # plt.plot(v, label=\"v\")\n plt.legend()\n plt.grid()\n plt.show()\n\n\ndef dist(a, b):\n return sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2 + (a[2] - b[2]) ** 2)\n\n\ndef closed_loop():\n global open_loop\n open_loop = False\n if dcm_reactive_stepper.get_is_left_leg_in_contact():\n dcm_reactive_stepper.set_right_foot_position(right_foot_location)\n dcm_reactive_stepper.set_right_foot_velocity(right_foot_vel)\n else:\n dcm_reactive_stepper.set_left_foot_position(left_foot_location)\n dcm_reactive_stepper.set_left_foot_velocity(left_foot_vel)\n\n\ndef detect_contact():\n for contact in p.getContactPoints():\n if dist(left_foot_location, contact[5]) < 0.02 and dist(\n right_foot_location, contact[5]\n ) > dist(left_foot_location, contact[5]):\n contact_array[0] = 1\n if dist(right_foot_location, contact[5]) < 0.02 and dist(\n left_foot_location, contact[5]\n ) > dist(right_foot_location, contact[5]):\n contact_array[1] = 1\n\n\ndef create_box(\n halfExtents, collisionFramePosition, collisionFrameOrientation=[0, 0, 0, 1]\n):\n cuid = p.createCollisionShape(\n p.GEOM_BOX,\n halfExtents=halfExtents,\n collisionFramePosition=collisionFramePosition,\n collisionFrameOrientation=collisionFrameOrientation,\n )\n mass = 0 # static box\n p.createMultiBody(mass, cuid)\n p.changeDynamics(\n cuid,\n -1,\n linearDamping=0.04,\n angularDamping=0.04,\n restitution=0.0,\n lateralFriction=2.0,\n )\n\n\ndef plot_all_contact_points():\n plt_next_support_foot_position = []\n for j in range(100):\n if j / 100.0 + t_min >= t_max:\n break\n dcm_reactive_stepper.dcm_vrp_planner_initialization(\n l_min,\n l_max,\n w_min,\n w_max,\n t_min + j / 100.0,\n t_max,\n l_p,\n com_height,\n weight,\n )\n dcm_reactive_stepper.run(\n time,\n [left_foot_location[0], left_foot_location[1], 0],\n [right_foot_location[0], right_foot_location[1], 0],\n x_com,\n xd_com,\n yaw(q),\n contact_array,\n )\n plt_next_support_foot_position.append(\n dcm_reactive_stepper.get_next_support_foot_position().copy()\n )\n plt.figure(\"dcm\")\n plt.plot(np.array(plt_next_support_foot_position)[:, 0], label=\"x\")\n plt.plot(np.array(plt_next_support_foot_position)[:, 1], label=\"y\")\n plt.legend()\n plt.show()\n dcm_reactive_stepper.dcm_vrp_planner_initialization(\n l_min, l_max, w_min, w_max, t_min, t_max, l_p, com_height, weight\n )\n\n\ndef external_force(com):\n force = np.array(\n [\n (random() - 0.5) * 7000,\n (random() - 0.5) * 7000,\n (random() - 0.5) * 2500,\n ]\n )\n p.applyExternalForce(\n objectUniqueId=robot.robotId,\n linkIndex=-1,\n forceObj=force,\n posObj=[com[0], com[1], com[2]],\n flags=p.WORLD_FRAME,\n )\n\n\nif __name__ == \"__main__\":\n # Create a robot instance. This initializes the simulator as well.\n env = BulletEnvWithGround()\n robot = env.add_robot(BoltRobot())\n tau = np.zeros(6)\n p.resetDebugVisualizerCamera(1.6, 50, -35, (0.0, 0.0, 0.0))\n p.setTimeStep(0.0001)\n p.setRealTimeSimulation(0)\n for ji in range(8):\n p.changeDynamics(\n robot.robotId,\n ji,\n linearDamping=0.04,\n angularDamping=0.04,\n restitution=0.0,\n lateralFriction=4.0,\n spinningFriction=5.6,\n )\n\n MM = np.matrix(\n [[0.045, -0.002, 0.037], [-0.002, 0.042, 0.0], [0.037, 0.0, 0.065]]\n )\n MM = np.matrix(\n [\n [0.045, 0.005, 0.043],\n [\n 0.005,\n 0.045,\n 0.01,\n ],\n [0.043, 0.01, 0.09],\n ]\n )\n\n M = np.matrix([[0.045, 0.0, 0.0], [0.0, 0.045, 0.0], [0.0, 0.0, 0.09]])\n\n # p.startStateLogging(p.STATE_LOGGING_VIDEO_MP4, \"new_traj_obj_fall_2.mp4\")\n q = np.matrix(BoltConfig.initial_configuration).T\n qdot = np.matrix(BoltConfig.initial_velocity).T\n robot.reset_state(q, qdot)\n total_mass = sum([i.mass for i in robot.pin_robot.model.inertias[1:]])\n warmup = 5\n kp = np.array([150.0, 150.0, 150.0, 150.0, 150.0, 150.0])\n kd = [5.0, 5.0, 5.0, 5.0, 5.0, 5.0]\n x_ori = [0.0, 0.0, 0.0, 1.0]\n x_angvel = [0.0, 0.0, 0]\n robot_config = BoltConfig()\n config_file = robot_config.ctrl_path\n bolt_leg_ctrl = RobotImpedanceController(robot, config_file)\n centr_controller = RobotCentroidalController(\n robot_config,\n mu=1,\n kc=[0, 0, 100],\n dc=[0, 0, 10],\n kb=[100, 100, 100],\n db=[10.0, 10, 10],\n qp_penalty_lin=[1, 1, 1e6],\n qp_penalty_ang=[1e6, 1e6, 1],\n )\n\n is_left_leg_in_contact = True\n l_min = -0.1\n l_max = 0.1\n w_min = -0.08\n w_max = 0.2\n t_min = 0.1\n t_max = 0.8\n l_p = 0.1035 * 1\n com_height = 0.36487417\n weight = [1, 1, 5, 1000, 1000, 100000, 100000, 100000, 100000]\n mid_air_foot_height = 0.05\n control_period = 0.001\n planner_loop = 0.010\n x_des_local = [\n q[0].item(),\n q[1].item() + 0.02,\n 0.0,\n q[0].item(),\n q[1].item() - 0.02,\n 0.0,\n ]\n past_x = [\n q[0].item(),\n q[1].item() + 0.02,\n 0.0,\n q[0].item(),\n q[1].item() - 0.02,\n 0.0,\n ]\n v_des = [0.0, 0.0, 0.0]\n sim = LipmSimpulator(com_height)\n dcm_reactive_stepper = DcmReactiveStepper()\n dcm_reactive_stepper.initialize(\n is_left_leg_in_contact,\n l_min,\n l_max,\n w_min,\n w_max,\n t_min,\n t_max,\n l_p,\n com_height,\n weight,\n mid_air_foot_height,\n control_period,\n planner_loop,\n x_des_local[:3],\n x_des_local[3:],\n )\n\n dcm_reactive_stepper.set_desired_com_velocity(v_des)\n\n x_com = np.zeros((3, 1))\n x_com[:] = [[0.0], [0.0], [com_height]]\n xd_com = np.zeros((3, 1))\n x_com_cent = x_com.copy()\n xd_com_cent = xd_com.copy()\n omega = np.sqrt(9.8 / com_height)\n cnt_array = [1, 1]\n time = 0\n control_time = 0\n open_loop = True\n\n # plot\n plt_time = []\n plt_control_time = []\n plt_time_r = []\n plt_time_l = []\n plt_time_all = []\n plt_x_com = []\n plt_xd_com = []\n plt_right_foot_position = []\n plt_right_foot_velocity = []\n plt_right_foot_acceleration = []\n plt_left_foot_position = []\n plt_left_foot_velocity = []\n plt_left_foot_acceleration = []\n plt_time_from_last_step_touchdown = []\n # plt_duration_before_step_landing = []\n plt_current_support_foot = []\n plt_step_time = []\n plt_dcm_local = []\n plt_left_eef_real_pos = []\n plt_right_eef_real_pos = []\n plt_r = []\n plt_is_left_in_contact = []\n plt_pos_des_local = []\n plt_q_com = []\n plt_qdot_com = []\n plt_F = []\n plt_x = []\n plt_q = []\n plt_qdot = []\n plt_qdot2 = []\n plt_tau = []\n plt_euler_angles = []\n plt_dcm = []\n plt_next_step_location = []\n plt_foot_mass_r = []\n plt_foot_mass_l = []\n plt_eq_11_r = []\n plt_eq_11_l = []\n plt_eq_h = []\n plt_eq_g = []\n plt_eq_qdot = []\n plt_eq_qddot = []\n plt_F_M_new = []\n plt_eq_fifteen = []\n plt_F_M = []\n dcm_force = [0.0, 0.0, 0.0]\n offset = 0.025\n dcm_reactive_stepper.start()\n inv_kin = OperationalSpaceDynamics(\n robot.pin_robot.model, robot.end_effector_names\n )\n\n for i in range(5005):\n last_qdot = qdot\n q, qdot = robot.get_state()\n robot.pin_robot.com(q, qdot)\n x_com = robot.pin_robot.com(q, qdot)[0]\n xd_com = robot.pin_robot.com(q, qdot)[1]\n # robot.forward_robot(q, qdot)\n # if i > 600 and i < 670:\n # print(\"External Force\")\n # force = np.array([0, -138, 0])\n # p.applyExternalForce(objectUniqueId=robot.robotId, linkIndex=-1, forceObj=force,\n # posObj=[q[0], q[1], q[2]], flags=p.WORLD_FRAME)\n\n if warmup <= i:\n ###### mass matrix\n m_q = np.matrix(q).transpose()\n m_qdot = np.matrix(qdot).transpose()\n if (\n i > 10\n and 0.010\n < dcm_reactive_stepper.get_step_duration()\n - dcm_reactive_stepper.get_time_from_last_step_touchdown()\n ):\n force_flag = False\n\n #### test\n # if i > 10 and t_min + 0.001 > dcm_reactive_stepper.get_time_from_last_step_touchdown() \\\n # and t_min < dcm_reactive_stepper.get_time_from_last_step_touchdown() and not force_flag:\n # # additional_time = random() * 0.15\n # external_force(x_com)\n # force_flag = True\n # # dcm_reactive_stepper.dcm_vrp_planner_initialization(l_min, l_max, w_min, w_max, t_min + additional_time,\n # # t_max, l_p, com_height, weight)\n\n # if dcm_reactive_stepper.get_is_left_leg_in_contact() == 1: #inv_kin\n # # if inv_kin.endeff_ids[1] != inv_kin.swing_id:\n # # print(i)\n # inv_kin.swing_id = inv_kin.endeff_ids[1]\n # inv_kin.stance_id = inv_kin.endeff_ids[0]\n # else:\n # # if inv_kin.endeff_ids[0] != inv_kin.swing_id:\n # # print(i)\n # inv_kin.swing_id = inv_kin.endeff_ids[0]\n # inv_kin.stance_id = inv_kin.endeff_ids[1]\n # inv_kin.forward_robot(m_q, m_qdot)\n # # print(inv_kin.foot_mass_matrix(m_q))\n # if dcm_reactive_stepper.get_is_left_leg_in_contact() == 1:\n # plt_foot_mass_r.append(inv_kin.foot_mass_matrix(m_q)[3:6])\n # plt_eq_11_r.append(inv_kin.constrained_swing_foot_inertia(m_q))\n # plt_time_r.append(dcm_reactive_stepper.get_time_from_last_step_touchdown())\n # else:\n # plt_foot_mass_l.append(inv_kin.foot_mass_matrix(m_q)[:3])\n # plt_eq_11_l.append(inv_kin.constrained_swing_foot_inertia(m_q))\n # plt_time_l.append(dcm_reactive_stepper.get_time_from_last_step_touchdown())\n # # print(inv_kin.constrained_swing_foot_inertia(m_q).dot(inv_kin.xddot(m_q, m_qdot)))\n #\n # contact_array = [0, 0]\n # left = bolt_leg_ctrl.imps[0]\n # right = bolt_leg_ctrl.imps[1]\n # left_foot_location = np.array(left.pin_robot.data.oMf[left.frame_end_idx].translation).reshape(-1)\n # right_foot_location = np.array(right.pin_robot.data.oMf[right.frame_end_idx].translation).reshape(-1)\n # detect_contact()\n # if 0.003 < dcm_reactive_stepper.get_time_from_last_step_touchdown() and\\\n # contact_array[dcm_reactive_stepper.get_is_left_leg_in_contact()] == 0:\n # plt_eq_h.append(inv_kin.projected_nonlinear_terms_h(m_q, m_qdot))\n # plt_eq_g.append(inv_kin.projected_gravity(m_q, m_qdot))\n # plt_eq_qdot.append(inv_kin.projected_nonlinear_terms_v(m_q, m_qdot))\n # plt_eq_qddot.append(inv_kin.constrained_swing_foot_inertia(m_q).dot(inv_kin.xddot(m_q, m_qdot)))\n # plt_F_M.append(MM.dot(inv_kin.xddot(m_q, m_qdot)))\n # plt_F_M_new.append(M.dot(inv_kin.xddot(m_q, m_qdot)))\n # plt_time_all.append(dcm_reactive_stepper.get_time_from_last_step_touchdown())#if you uncomment this line, you should comment below similar line\n # else:\n # inv_kin.projected_nonlinear_terms_h(m_q, m_qdot)\n # inv_kin.projected_nonlinear_terms_v(m_q, m_qdot)\n # inv_kin.constrained_swing_foot_inertia(m_q).dot(inv_kin.xddot(m_q, m_qdot))\n # dcm_reactive_stepper.get_time_from_last_step_touchdown()\n #\n # contact_array = [0, 0]\n # s = inv_kin.evaluate_swing_force_boundaries(m_q, m_qdot)\n # if s[0].item() > 0.00001:\n # plt_eq_fifteen.append(inv_kin.evaluate_swing_force_boundaries(m_q, m_qdot))\n # # plt_time_all.append(dcm_reactive_stepper.get_time_from_last_step_touchdown())#if you uncomment this line, you should comment above similar line\n #\n # # if(len(inv_kin.evaluate_swing_force_boundaries(m_q, m_qdot) != 6)):\n # # print(inv_kin.evaluate_swing_force_boundaries(m_q, m_qdot))\n # ##### mass matrix done\n\n left = bolt_leg_ctrl.imp_ctrl_array[0]\n right = bolt_leg_ctrl.imp_ctrl_array[1]\n left_foot_location = np.array(\n left.pin_robot.data.oMf[left.frame_end_idx].translation\n ).reshape(-1)\n right_foot_location = np.array(\n right.pin_robot.data.oMf[right.frame_end_idx].translation\n ).reshape(-1)\n left_foot_vel = np.array(\n se3.SE3(\n left.pin_robot.data.oMf[left.frame_end_idx].rotation,\n np.zeros((3, 1)),\n )\n * se3.computeFrameJacobian(\n robot.pin_robot.model,\n robot.pin_robot.data,\n q,\n left.frame_end_idx,\n ).dot(qdot)[0:3]\n )\n right_foot_vel = np.array(\n se3.SE3(\n right.pin_robot.data.oMf[right.frame_end_idx].rotation,\n np.zeros((3, 1)),\n )\n * se3.computeFrameJacobian(\n robot.pin_robot.model,\n robot.pin_robot.data,\n q,\n right.frame_end_idx,\n ).dot(qdot)[0:3]\n )\n\n # closed_loop()\n contact_array = [0, 0]\n # detect_contact()\n\n # if i % 100 == 0:\n # plot_all_contact_points()\n if open_loop:\n if dcm_reactive_stepper.get_is_left_leg_in_contact():\n pos_for_plotter = (\n dcm_reactive_stepper.get_right_foot_position().copy()\n )\n vel_for_plotter = (\n dcm_reactive_stepper.get_right_foot_velocity().copy()\n )\n else:\n pos_for_plotter = (\n dcm_reactive_stepper.get_left_foot_position().copy()\n )\n vel_for_plotter = (\n dcm_reactive_stepper.get_left_foot_velocity().copy()\n )\n else:\n if dcm_reactive_stepper.get_is_left_leg_in_contact():\n pos_for_plotter = [\n right_foot_location[0],\n right_foot_location[1],\n right_foot_location[2] - offset,\n ]\n vel_for_plotter = right_foot_vel\n else:\n pos_for_plotter = [\n left_foot_location[0],\n left_foot_location[1],\n left_foot_location[2] - offset,\n ]\n vel_for_plotter = left_foot_vel\n\n dcm_reactive_stepper.run(\n time,\n [\n left_foot_location[0],\n left_foot_location[1],\n left_foot_location[2] - offset,\n ],\n [\n right_foot_location[0],\n right_foot_location[1],\n right_foot_location[2] - offset,\n ],\n left_foot_vel,\n right_foot_vel,\n x_com,\n xd_com,\n yaw(q),\n not open_loop,\n )\n dcm_force = (\n dcm_reactive_stepper.get_forces().copy()\n ) # feed forward\n # if (i + 5) % 1 == 0 and i > 85:# and int(dcm_reactive_stepper.get_time_from_last_step_touchdown() * 1000) == 0:\n # d = dcm_reactive_stepper.get_forces().copy()\n # plot(d)\n # if dcm_reactive_stepper.time_from_last_step_touchdown == 0:\n # desired_q = np.array(q.copy())[:, 0]\n # else:\n # dcm_reactive_stepper.run(time, dcm_reactive_stepper.flying_foot_position, x_com.copy(), xd_com.copy(), 0) # q[5])\n\n x_des_local = []\n x_des_local.extend(\n dcm_reactive_stepper.get_left_foot_position().copy()\n )\n x_des_local.extend(\n dcm_reactive_stepper.get_right_foot_position().copy()\n )\n\n if open_loop:\n x_des_local[2] += offset\n x_des_local[5] += offset\n\n if dcm_reactive_stepper.get_is_left_leg_in_contact():\n cnt_array = [1, 0]\n else:\n cnt_array = [0, 1]\n\n plt_time.append(time)\n plt_right_foot_position.append(x_des_local[3:6])\n plt_right_foot_velocity.append(\n dcm_reactive_stepper.get_right_foot_velocity().copy()\n )\n plt_right_foot_acceleration.append(\n dcm_reactive_stepper.get_right_foot_acceleration().copy()\n )\n plt_left_foot_position.append(x_des_local[:3])\n plt_left_foot_velocity.append(\n dcm_reactive_stepper.get_left_foot_velocity().copy()\n )\n plt_left_foot_acceleration.append(\n dcm_reactive_stepper.get_left_foot_acceleration().copy()\n )\n plt_time_from_last_step_touchdown.append(\n dcm_reactive_stepper.get_time_from_last_step_touchdown()\n )\n # plt_duration_before_step_landing.append(dcm_reactive_stepper.duration_before_step_landing)\n plt_current_support_foot.append(\n dcm_reactive_stepper.get_current_support_foot_position().copy()\n )\n # plt_dcm.append(dcm_reactive_stepper.dcm_vrp_planner.get_dcm_local().copy())\n plt_is_left_in_contact.append(\n dcm_reactive_stepper.get_is_left_leg_in_contact()\n )\n plt_next_step_location.append(\n dcm_reactive_stepper.get_next_support_foot_position().copy()\n )\n plt_dcm_local.append(x_com + xd_com / omega)\n\n if dcm_reactive_stepper.get_time_from_last_step_touchdown() == 0:\n plt_step_time.append(int(i) - warmup)\n time += 0.001\n\n for j in range(2):\n imp = bolt_leg_ctrl.imp_ctrl_array[j]\n x_des_local[3 * j : 3 * (j + 1)] -= imp.pin_robot.data.oMf[\n imp.frame_root_idx\n ].translation\n if j == 0:\n plt_left_eef_real_pos.append(\n np.array(\n imp.pin_robot.data.oMf[imp.frame_end_idx].translation\n ).reshape(-1)\n )\n else:\n plt_right_eef_real_pos.append(\n np.array(\n imp.pin_robot.data.oMf[imp.frame_end_idx].translation\n ).reshape(-1)\n )\n w_com = centr_controller.compute_com_wrench(\n q.copy(),\n qdot.copy(),\n [0.0, 0.0, com_height],\n [0.0, 0.0, 0.0],\n [0, 0.0, 0, 1.0],\n [0.0, 0.0, 0.0],\n )\n w_com[0] = 0.0\n w_com[1] = 0.0\n # w_com[2] += total_mass * 9.81\n\n F = centr_controller.compute_force_qp(q, qdot, cnt_array, w_com)\n\n # torque = joint_controller(q, desired_q, qdot, desired_qdot, kp_joint, kd_joint, cnt_array)\n\n des_vel = np.concatenate(\n (\n dcm_reactive_stepper.get_left_foot_velocity()\n - [qdot[0].item(), qdot[1].item(), qdot[2].item()],\n dcm_reactive_stepper.get_right_foot_velocity()\n - [qdot[0].item(), qdot[1].item(), qdot[2].item()],\n )\n )\n\n if cnt_array[0] == 1 and cnt_array[1] == 0:\n F[3:] = -dcm_force[:3]\n elif cnt_array[0] == 0 and cnt_array[1] == 1:\n F[:3] = -dcm_force[:3]\n tau = bolt_leg_ctrl.return_joint_torques(\n q.copy(),\n qdot.copy(),\n zero_cnt_gain(kp, cnt_array),\n zero_cnt_gain(kd, cnt_array),\n x_des_local,\n des_vel,\n F,\n )\n control_time += 0.001\n if warmup <= i:\n plt_control_time.append(control_time)\n # plt_r.append(r)\n plt_F.append(F)\n plt_x.append(x_des_local)\n plt_tau.append(tau)\n plt_xd_com.append(xd_com.copy())\n plt_qdot_com.append(qdot)\n plt_x_com.append(x_com.copy())\n plt_pos_des_local.append([x_des_local[1], x_des_local[4]])\n # plt_euler_angles.append(np.array(R.from_quat([np.array(q)[3:7, 0]]).as_euler('xyz', degrees=False))[0, :])\n plt_q.append(q[:].copy())\n # plt_qdot.append(inv_kin.xddot(q, qdot))\n # plt_qdot2.append(MM.dot(inv_kin.xddot(q, qdot)))\n # plt_q_com.append(np.array(R.from_quat([np.array(q)[3:7, 0]]).as_euler('xyz', degrees=False))[0, :])\n # plt_desired_q.append(desired_q[7:].copy())\n\n for j in range(10):\n robot.send_joint_command(tau)\n p.stepSimulation()\n\n dcm_reactive_stepper.stop()\n #\n # FIGSIZE = 3.7\n # FONT_SIZE = 8\n # FONT_WEIGHT = \"normal\"\n # # set the parameters\n # font = {'family' : 'normal',\n # 'weight' : FONT_WEIGHT,\n # 'size' : FONT_SIZE}\n # plt.rc('font', **font)\n # FIGURE_SIZE = ( FIGSIZE , FIGSIZE * 9.0/16.0)\n\n # p.stopStateLogging()\n\n # np.savetxt('plt_time_all.txt', np.array(plt_time_all))\n # np.savetxt('plt_eq_fifteen0.txt', np.array(plt_eq_fifteen)[:, 0])\n # np.savetxt('plt_eq_fifteen1.txt', np.array(plt_eq_fifteen)[:, 1])\n # np.savetxt('plt_eq_fifteen2.txt', np.array(plt_eq_fifteen)[:, 2])\n # np.savetxt('plt_eq_fifteen3.txt', np.array(plt_eq_fifteen)[:, 3])\n # np.savetxt('plt_eq_fifteen4.txt', np.array(plt_eq_fifteen)[:, 4])\n # np.savetxt('plt_eq_fifteen5.txt', np.array(plt_eq_fifteen)[:, 5])\n\n # fig, ax = plt.subplots(2, 3)\n # ax[0][0].plot(plt_time_r, np.array(plt_foot_mass_r)[:,0,0], 'o', markersize=1, label ='0,0')\n # ax[0][0].legend()\n # ax[0][1].plot(plt_time_r, np.array(plt_foot_mass_r)[:,0,1], 'o', markersize=1, label ='0,1')\n # ax[0][1].legend()\n # ax[0][2].plot(plt_time_r, np.array(plt_foot_mass_r)[:,0,2], 'o', markersize=1, label ='0,2')\n # ax[0][2].legend()\n # ax[1][0].plot(plt_time_r, np.array(plt_foot_mass_r)[:,1,1], 'o', markersize=1, label = '1,1')\n # ax[1][0].legend()\n # ax[1][1].plot(plt_time_r, np.array(plt_foot_mass_r)[:,1,2], 'o', markersize=1, label = '1,2')\n # ax[1][1].legend()\n # ax[1][2].plot(plt_time_r, np.array(plt_foot_mass_r)[:,2,2], 'o', markersize=1, label = '2,2')\n # ax[1][2].legend()\n # fig, ax = plt.subplots(2, 3)\n # ax[0][0].plot(plt_time_l, np.array(plt_foot_mass_l)[:, 0, 0], 'o', markersize=1, label='0,0')\n # ax[0][0].legend()\n # ax[0][1].plot(plt_time_l, np.array(plt_foot_mass_l)[:, 0, 1], 'o', markersize=1, label='0,1')\n # ax[0][1].legend()\n # ax[0][2].plot(plt_time_l, np.array(plt_foot_mass_l)[:, 0, 2], 'o', markersize=1, label='0,2')\n # ax[0][2].legend()\n # ax[1][0].plot(plt_time_l, np.array(plt_foot_mass_l)[:, 1, 1], 'o', markersize=1, label='1,1')\n # ax[1][0].legend()\n # ax[1][1].plot(plt_time_l, np.array(plt_foot_mass_l)[:, 1, 2], 'o', markersize=1, label='1,2')\n # ax[1][1].legend()\n # ax[1][2].plot(plt_time_l, np.array(plt_foot_mass_l)[:, 2, 2], 'o', markersize=1, label='2,2')\n # ax[1][2].legend()\n # fig, ax = plt.subplots(2, 3)\n # ax[0][0].plot(plt_time_r, np.array(plt_eq_11_r)[:, 0, 0], 'o', markersize=1, label='0,0')\n # ax[0][0].legend()\n # ax[0][1].plot(plt_time_r, np.array(plt_eq_11_r)[:, 0, 1], 'o', markersize=1, label='0,1')\n # ax[0][1].legend()\n # ax[0][2].plot(plt_time_r, np.array(plt_eq_11_r)[:, 0, 2], 'o', markersize=1, label='0,2')\n # ax[0][2].legend()\n # ax[1][0].plot(plt_time_r, np.array(plt_eq_11_r)[:, 1, 1], 'o', markersize=1, label='1,1')\n # ax[1][0].legend()\n # ax[1][1].plot(plt_time_r, np.array(plt_eq_11_r)[:, 1, 2], 'o', markersize=1, label='1,2')\n # ax[1][1].legend()\n # ax[1][2].plot(plt_time_r, np.array(plt_eq_11_r)[:, 2, 2], 'o', markersize=1, label='2,2')\n # ax[1][2].legend()\n # fig, ax = plt.subplots(2, 3)\n # ax[0][0].plot(plt_time_l, np.array(plt_eq_11_l)[:, 0, 0], 'o', markersize=1, label='0,0')\n # ax[0][0].legend()\n # ax[0][1].plot(plt_time_l, np.array(plt_eq_11_l)[:, 0, 1], 'o', markersize=1, label='0,1')\n # ax[0][1].legend()\n # ax[0][2].plot(plt_time_l, np.array(plt_eq_11_l)[:, 0, 2], 'o', markersize=1, label='0,2')\n # ax[0][2].legend()\n # ax[1][0].plot(plt_time_l, np.array(plt_eq_11_l)[:, 1, 1], 'o', markersize=1, label='1,1')\n # ax[1][0].legend()\n # ax[1][1].plot(plt_time_l, np.array(plt_eq_11_l)[:, 1, 2], 'o', markersize=1, label='1,2')\n # ax[1][1].legend()\n # ax[1][2].plot(plt_time_l, np.array(plt_eq_11_l)[:, 2, 2], 'o', markersize=1, label='2,2')\n # ax[1][2].legend()\n # np.savetxt('plt_time_l.txt', np.array(plt_time_l))\n # np.savetxt('plt_eq_11_l0.txt', np.array(plt_eq_11_l)[:, 0, 0])\n # np.savetxt('plt_eq_11_l1.txt', np.array(plt_eq_11_l)[:, 0, 1])\n # np.savetxt('plt_eq_11_l2.txt', np.array(plt_eq_11_l)[:, 0, 2])\n # np.savetxt('plt_eq_11_l3.txt', np.array(plt_eq_11_l)[:, 1, 1])\n # np.savetxt('plt_eq_11_l4.txt', np.array(plt_eq_11_l)[:, 1, 2])\n # np.savetxt('plt_eq_11_l5.txt', np.array(plt_eq_11_l)[:, 2, 2])\n # fig, ax = plt.subplots(3, 1)\n # ax[0].plot(plt_time_all, np.array(plt_eq_h)[:, 0], 'o', markersize=1, label='h')\n # ax[0].plot(plt_time_all, np.array(plt_eq_g)[:, 0], 'o', markersize=1, label='g')\n # ax[0].plot(plt_time_all, np.array(plt_eq_qdot)[:, 0], 'o', markersize=1, label='0')\n # ax[0].legend()\n # ax[1].plot(plt_time_all, np.array(plt_eq_h)[:, 1], 'o', markersize=1, label='h')\n # ax[1].plot(plt_time_all, np.array(plt_eq_g)[:, 1], 'o', markersize=1, label='g')\n # ax[1].plot(plt_time_all, np.array(plt_eq_qdot)[:, 1], 'o', markersize=1, label='1')\n # ax[1].legend()\n # ax[2].plot(plt_time_all, np.array(plt_eq_h)[:, 2], 'o', markersize=1, label='h')\n # ax[2].plot(plt_time_all, np.array(plt_eq_g)[:, 2], 'o', markersize=1, label='g')\n # ax[2].plot(plt_time_all, np.array(plt_eq_qdot)[:, 2], 'o', markersize=1, label='2')\n # ax[2].legend()\n # fig, ax = plt.subplots(3, 1)\n # ax[0].plot(plt_time, np.array(plt_eq_qdot)[:, 0], 'o', markersize=1, label='0')\n # ax[0].legend()\n # ax[1].plot(plt_time, np.array(plt_eq_qdot)[:, 1], 'o', markersize=1, label='1')\n # ax[1].legend()\n # ax[2].plot(plt_time, np.array(plt_eq_qdot)[:, 2], 'o', markersize=1, label='2')\n # ax[2].legend()\n # fig, ax = plt.subplots(3, 1)\n # ax[0].plot(plt_time_all, np.array(plt_eq_qddot)[:, 0], 'o', color = 'red', label='Inertia')\n # ax[0].plot(plt_time_all, np.array(plt_eq_qdot)[:, 0] + np.array(plt_eq_h)[:, 0], 'o', label='Nonlinear Terms')\n # ax[0].legend()\n # ax[1].plot(plt_time_all, np.array(plt_eq_qddot)[:, 1], 'o', color = 'red', label='Inertia')\n # ax[1].plot(plt_time_all, np.array(plt_eq_qdot)[:, 1] + np.array(plt_eq_h)[:, 1], 'o', label='Nonlinear Terms')\n # ax[1].legend()\n # ax[2].plot(plt_time_all, np.array(plt_eq_qddot)[:, 2], 'o', color = 'red', label='Inertia')\n # ax[2].plot(plt_time_all, np.array(plt_eq_qdot)[:, 2] + np.array(plt_eq_h)[:, 2], 'o', label='Nonlinear Terms')\n # ax[2].legend()\n\n # fig, ax = plt.subplots(3, 1)\n # ax[0].plot(plt_time_all, np.array(plt_F_M_new)[:, 0] - (np.array(plt_eq_qddot)[:, 0] + np.array(plt_eq_qdot)[:, 0] + np.array(plt_eq_h)[:, 0]), 'o', markersize=1 , color = 'red', label='Diagonal')\n # ax[0].plot(plt_time_all, np.array(plt_F_M)[:, 0] - (np.array(plt_eq_qddot)[:, 0] + np.array(plt_eq_qdot)[:, 0] + np.array(plt_eq_h)[:, 0]), 'o', markersize=1 , label='Non-diagonal')\n # ax[0].plot(plt_time_all, np.array(plt_F_M_new)[:, 0] - 0.4 - (np.array(plt_eq_qddot)[:, 0] + np.array(plt_eq_qdot)[:, 0] + np.array(plt_eq_h)[:, 0]), 'og', markersize=1 , label='Non-diagonal')\n # ax[0].legend()\n # ax[1].plot(plt_time_all, np.array(plt_F_M_new)[:, 1] - (np.array(plt_eq_qddot)[:, 1] + np.array(plt_eq_qdot)[:, 1] + np.array(plt_eq_h)[:, 1]), 'o', markersize=1 , color = 'red', label='Diagonal')\n # ax[1].plot(plt_time_all, np.array(plt_F_M)[:, 1] - (np.array(plt_eq_qddot)[:, 1] + np.array(plt_eq_qdot)[:, 1] + np.array(plt_eq_h)[:, 1]), 'o', markersize=1 , label='Non-diagonal')\n # ax[1].plot(plt_time_all, np.array(plt_F_M_new)[:, 1] + 0 - (np.array(plt_eq_qddot)[:, 1] + np.array(plt_eq_qdot)[:, 1] + np.array(plt_eq_h)[:, 1]), 'og', markersize=1 , label='Non-diagonal')\n # ax[1].legend()\n # ax[2].plot(plt_time_all, np.array(plt_F_M_new)[:, 2] - (np.array(plt_eq_qddot)[:, 2] + np.array(plt_eq_qdot)[:, 2] + np.array(plt_eq_h)[:, 2]), 'o', markersize=1 , color = 'red', label='Diagonal')\n # ax[2].plot(plt_time_all, np.array(plt_F_M)[:, 2] - (np.array(plt_eq_qddot)[:, 2] + np.array(plt_eq_qdot)[:, 2] + np.array(plt_eq_h)[:, 2]), 'o', markersize=1 , label='Non-diagonal')\n # ax[2].plot(plt_time_all, np.array(plt_F_M_new)[:, 2] + 0.8 - (np.array(plt_eq_qddot)[:, 2] + np.array(plt_eq_qdot)[:, 2] + np.array(plt_eq_h)[:, 2]), 'og', markersize=1 , label='Non-diagonal')\n # ax[2].legend()\n #\n #\n # np.savetxt('plt_D_error0.txt', np.array(plt_F_M_new)[:, 0] - (np.array(plt_eq_qddot)[:, 0] + np.array(plt_eq_qdot)[:, 0] + np.array(plt_eq_h)[:, 0]))\n # np.savetxt('plt_D_error1.txt', np.array(plt_F_M_new)[:, 1] - (np.array(plt_eq_qddot)[:, 1] + np.array(plt_eq_qdot)[:, 1] + np.array(plt_eq_h)[:, 1]))\n # np.savetxt('plt_D_error2.txt', np.array(plt_F_M_new)[:, 2] - (np.array(plt_eq_qddot)[:, 2] + np.array(plt_eq_qdot)[:, 2] + np.array(plt_eq_h)[:, 2]))\n # np.savetxt('plt_ND_error0.txt', np.array(plt_F_M)[:, 0] - (np.array(plt_eq_qddot)[:, 0] + np.array(plt_eq_qdot)[:, 0] + np.array(plt_eq_h)[:, 0]))\n # np.savetxt('plt_ND_error1.txt', np.array(plt_F_M)[:, 1] - (np.array(plt_eq_qddot)[:, 1] + np.array(plt_eq_qdot)[:, 1] + np.array(plt_eq_h)[:, 1]))\n # np.savetxt('plt_ND_error2.txt', np.array(plt_F_M)[:, 2] - (np.array(plt_eq_qddot)[:, 2] + np.array(plt_eq_qdot)[:, 2] + np.array(plt_eq_h)[:, 2]))\n # np.savetxt('plt_time_all_D_ND.txt', np.array(plt_time_all))\n #\n # np.savetxt('plt_F0.txt', np.array(plt_eq_qddot)[:, 0] + np.array(plt_eq_qdot)[:, 0] + np.array(plt_eq_h)[:, 0])\n # np.savetxt('plt_F1.txt', np.array(plt_eq_qddot)[:, 1] + np.array(plt_eq_qdot)[:, 1] + np.array(plt_eq_h)[:, 1])\n # np.savetxt('plt_F2.txt', np.array(plt_eq_qddot)[:, 2] + np.array(plt_eq_qdot)[:, 2] + np.array(plt_eq_h)[:, 2])\n # np.savetxt('plt_non_linear0.txt', np.array(plt_eq_qdot)[:, 0] + np.array(plt_eq_h)[:, 0])\n # np.savetxt('plt_non_linear1.txt', np.array(plt_eq_qdot)[:, 1] + np.array(plt_eq_h)[:, 1])\n # np.savetxt('plt_non_linear2.txt', np.array(plt_eq_qdot)[:, 2] + np.array(plt_eq_h)[:, 2])\n # np.savetxt('plt_qddot_MassMatrix0.txt', np.array(plt_eq_qddot)[:, 0])\n # np.savetxt('plt_qddot_MassMatrix1.txt', np.array(plt_eq_qddot)[:, 1])\n # np.savetxt('plt_qddot_MassMatrix2.txt', np.array(plt_eq_qddot)[:, 2])\n # np.savetxt('plt_Error_D_M0.txt', np.array(plt_F_M_new)[:, 0] - np.array(plt_eq_qddot)[:, 0])\n # np.savetxt('plt_Error_D_M1.txt', np.array(plt_F_M_new)[:, 1] - np.array(plt_eq_qddot)[:, 1])\n # np.savetxt('plt_Error_D_M2.txt', np.array(plt_F_M_new)[:, 2] - np.array(plt_eq_qddot)[:, 2])\n # np.savetxt('plt_Error_ND_M0.txt', np.array(plt_F_M)[:, 0] - np.array(plt_eq_qddot)[:, 0])\n # np.savetxt('plt_Error_ND_M1.txt', np.array(plt_F_M)[:, 1] - np.array(plt_eq_qddot)[:, 1])\n # np.savetxt('plt_Error_ND_M2.txt', np.array(plt_F_M)[:, 2] - np.array(plt_eq_qddot)[:, 2])\n # np.savetxt('plt_h0.txt', np.array(plt_eq_h)[:, 0])\n # np.savetxt('plt_h1.txt', np.array(plt_eq_h)[:, 1])\n # np.savetxt('plt_h2.txt', np.array(plt_eq_h)[:, 2])\n # np.savetxt('plt_qdot0.txt', np.array(plt_eq_qdot)[:, 0])\n # np.savetxt('plt_qdot1.txt', np.array(plt_eq_qdot)[:, 1])\n # np.savetxt('plt_qdot2.txt', np.array(plt_eq_qdot)[:, 2])\n # np.savetxt('plt_eq_g0.txt', np.array(plt_eq_g)[:, 0])\n # np.savetxt('plt_eq_g1.txt', np.array(plt_eq_g)[:, 1])\n # np.savetxt('plt_eq_g2.txt', np.array(plt_eq_g)[:, 2])\n\n # ax[0].set_ylabel(\"Force [N]\")\n # ax[1].set_ylabel(\"Force [N]\")\n # ax[2].set_ylabel(\"Force [N]\")\n # ax[2].set_xlabel(\"Time [ms]\")\n # plt.tight_layout()\n # plt.savefig(\"eq11\" + \".pdf\")\n #\n # np.savetxt('plt_F_M0WF.txt', np.array(plt_F_M)[:, 0])\n # np.savetxt('plt_F_M1WF.txt', np.array(plt_F_M)[:, 1])\n # np.savetxt('plt_F_M2WF.txt', np.array(plt_F_M)[:, 2])\n #\n # np.savetxt('plt_F_M20WF.txt', np.array(plt_eq_qddot)[:, 0] + np.array(plt_eq_qdot)[:, 0] + np.array(plt_eq_h)[:, 0])\n # np.savetxt('plt_F_M21WF.txt', np.array(plt_eq_qddot)[:, 1] + np.array(plt_eq_qdot)[:, 1] + np.array(plt_eq_h)[:, 1])\n # np.savetxt('plt_F_M22WF.txt', np.array(plt_eq_qddot)[:, 2] + np.array(plt_eq_qdot)[:, 2] + np.array(plt_eq_h)[:, 2])\n #\n # np.savetxt('plt_time_all_F_MWF.txt', np.array(plt_time_all))\n\n # print(shape(plt_eq_fifteen))\n\n # print(plt_eq_fifteen)\n\n # plt.figure(\"com\")\n # plt.plot(plt_time, np.array(plt_x_com)[:,0])\n # plt.plot(plt_time, np.array(plt_x_com)[:,1])\n # plt.plot(plt_time, np.array(plt_x_com)[:,2])\n\n # plt.figure(\"xd\")\n # plt.plot(plt_time, np.array(plt_xd_com)[:,0])\n # plt.plot(plt_time, np.array(plt_xd_com)[:,1])\n # plt.plot(plt_time, np.array(plt_xd_com)[:,2])\n\n # plt.figure(\"right_foot_pos\")\n # plt.plot(plt_time, np.array(plt_right_foot_position)[:,0])\n # plt.plot(plt_time, np.array(plt_right_foot_position)[:,1])\n # plt.plot(plt_time, np.array(plt_right_foot_position)[:,2])\n\n # plt.plot(plt_time, np.array(plt_left_foot_position)[:,0])\n # plt.plot(plt_time, np.array(plt_left_foot_position)[:,1])\n # plt.plot(plt_time, np.array(plt_left_foot_position)[:,2])\n\n # plt.figure(\"feet_pos_y\")\n # plt.plot(plt_time, np.array(plt_right_foot_position)[:,2])\n # plt.plot(plt_time, np.array(plt_left_foot_position)[:,2])\n\n plt.figure(\"y\")\n plt.plot(plt_time, np.array(plt_left_foot_position)[:, 1], label=\"left\")\n plt.plot(plt_time, np.array(plt_right_foot_position)[:, 1], label=\"right\")\n plt.plot(plt_control_time, np.array(plt_x_com)[warmup:, 1], label=\"com\")\n plt.plot(\n plt_control_time, np.array(plt_xd_com)[warmup:, 1], label=\"xd_com\"\n )\n plt.plot(plt_time, np.array(plt_dcm_local)[:, 1], label=\"dcm_local\")\n plt.plot(\n plt_time,\n np.array(plt_next_step_location)[:, 1],\n label=\"next_step_location\",\n )\n # plt.plot(plt_time, np.array(plt_dcm)[:, 1], label=\"dcm\")\n plt.plot(\n plt_time,\n np.array(plt_left_eef_real_pos)[warmup:, 1],\n label=\"left_eef_real_pos\",\n )\n plt.plot(\n plt_time,\n np.array(plt_right_eef_real_pos)[warmup:, 1],\n label=\"right_eef_real_pos\",\n )\n # plt.plot(plt_time, np.array(plt_current_support_foot)[:, 1], label=\"current_support_foot\")\n # plt.plot(plt_time, plt_pos_des_local[warmup + 1:], label = \"pos des_local_eef\")\n plt.legend()\n # for time in plt_step_time:\n # plt.axvline(time / 1000)\n\n # plt.figure(\"q\")\n # plt.plot(np.array(plt_qdot)[:, :, 0], label=\"plt_qddot\")\n # plt.plot(np.array(plt_qdot2)[:, :, 0], label=\"plt_M*qddot\")\n # plt.legend()\n # for time in plt_step_time:\n # plt.axvline(time / 1000)\n\n plt.figure(\"x\")\n plt.plot(\n plt_time, np.array(plt_left_foot_position)[:, 0], label=\"des_left\"\n )\n plt.plot(\n plt_time, np.array(plt_right_foot_position)[:, 0], label=\"des_right\"\n )\n plt.plot(plt_control_time, np.array(plt_x_com)[warmup:, 0], label=\"com\")\n plt.plot(\n plt_control_time, np.array(plt_xd_com)[warmup:, 0], label=\"xd_com\"\n )\n plt.plot(plt_time, np.array(plt_dcm_local)[:, 0], label=\"dcm_local\")\n plt.plot(\n plt_time,\n np.array(plt_next_step_location)[:, 0],\n label=\"next_step_location\",\n )\n # plt.plot(plt_time, np.array(plt_dcm)[:, 0], label=\"dcm\")\n plt.plot(\n plt_time,\n np.array(plt_left_eef_real_pos)[warmup:, 0],\n label=\"left_eef_real_pos\",\n )\n plt.plot(\n plt_time,\n np.array(plt_right_eef_real_pos)[warmup:, 0],\n label=\"right_eef_real_pos\",\n )\n # plt.plot(plt_time, np.array(plt_current_support_foot)[:, 0], label=\"current_support_foot\")\n # plt.plot(plt_time, np.array(plt_duration_before_step_landing)[:], label=\"plt_duration_before_step_landing\")\n # plt.plot(plt_time[:], plt_is_left_in_contact[:], label=\"is_left_in_contact\")\n plt.legend()\n # for time in plt_step_time:\n # plt.axvline(time / 1000)\n\n plt.figure(\"tau\")\n plt.plot(np.array(plt_tau)[:, :], label=\"tau\")\n plt.legend()\n\n plt.figure(\"z\")\n plt.plot(\n plt_time[:], plt_is_left_in_contact[:], label=\"is_left_in_contact\"\n )\n plt.plot(plt_time, np.array(plt_left_foot_position)[:, 2], label=\"left\")\n plt.plot(plt_time, np.array(plt_right_foot_position)[:, 2], label=\"right\")\n plt.plot(plt_control_time, np.array(plt_x_com)[warmup:, 2], label=\"com\")\n # plt.plot(plt_time, np.array(plt_dcm_local)[:, 2], label=\"dcm_local\")\n plt.plot(\n plt_time,\n np.array(plt_left_eef_real_pos)[warmup:, 2],\n label=\"left_eef_real_pos\",\n )\n plt.plot(\n plt_time,\n np.array(plt_right_eef_real_pos)[warmup:, 2],\n label=\"right_eef_real_pos\",\n )\n plt.legend()\n for time in plt_step_time:\n plt.axvline(time / 1000)\n\n # plt.figure(\"q\")\n # plt.plot(plt_time, np.array(plt_q_com)[:, 3], label=\"x\")\n # plt.plot(plt_time, np.array(plt_q_com)[:, 4], label=\"y\")\n # plt.plot(plt_time, np.array(plt_q_com)[:, 5], label=\"z\")\n # plt.legend()\n # for time in plt_step_time:\n # plt.axvline(time / 1000)\n\n plt.figure(\"F\")\n plt.plot(plt_time, plt_F[warmup:], label=\"F\")\n plt.plot(\n plt_time[:], plt_is_left_in_contact[:], label=\"is_left_in_contact\"\n )\n plt.legend()\n\n new_ = True\n plt.figure(\"Z\")\n plt.plot(\n plt_time[:],\n np.array(plt_left_eef_real_pos)[warmup:, 2],\n label=\"left_z\",\n )\n plt.plot(\n plt_time[:],\n np.array(plt_right_eef_real_pos)[warmup:, 2],\n label=\"right_z\",\n )\n plt.plot(\n plt_time[:], np.array(plt_left_foot_position)[:, 2], label=\"des_left_z\"\n )\n plt.plot(\n plt_time[:],\n np.array(plt_right_foot_position)[:, 2],\n label=\"des_right_z\",\n )\n # plt.plot(plt_time[230:], np.array(plt_next_step_location)[230:, 2], label=\"next_step_location_z\")\n plt.legend()\n # np.savetxt('plt_left_eef_real_posz' + str(new_) +'.txt', np.array(plt_left_eef_real_pos)[warmup:, 2])\n # np.savetxt('plt_right_eef_real_posz' + str(new_) +'.txt', np.array(plt_right_eef_real_pos)[warmup:, 2])\n # np.savetxt('plt_left_foot_positionz' + str(new_) +'.txt', np.array(plt_left_foot_position)[:, 2])\n # np.savetxt('plt_right_foot_positionz' + str(new_) +'.txt', np.array(plt_right_foot_position)[:, 2])\n\n plt.figure(\"xy\")\n plt.plot(\n plt_time[:],\n np.array(plt_left_eef_real_pos)[warmup:, 0],\n label=\"left_x\",\n )\n plt.plot(\n plt_time[:],\n np.array(plt_left_eef_real_pos)[warmup:, 1],\n label=\"left_y\",\n )\n plt.plot(\n plt_time[:],\n np.array(plt_right_eef_real_pos)[warmup:, 0],\n label=\"right_x\",\n )\n plt.plot(\n plt_time[:],\n np.array(plt_right_eef_real_pos)[warmup:, 1],\n label=\"right_y\",\n )\n plt.plot(\n plt_time[:], np.array(plt_left_foot_position)[:, 0], label=\"des_left_x\"\n )\n plt.plot(\n plt_time[:], np.array(plt_left_foot_position)[:, 1], label=\"des_lef_y\"\n )\n plt.plot(\n plt_time[:],\n np.array(plt_right_foot_position)[:, 0],\n label=\"des_right_x\",\n )\n plt.plot(\n plt_time[:],\n np.array(plt_right_foot_position)[:, 1],\n label=\"des_right_y\",\n )\n plt.plot(\n plt_time[:],\n np.array(plt_next_step_location)[:, 0],\n label=\"next_step_location_x\",\n )\n plt.plot(\n plt_time[:],\n np.array(plt_next_step_location)[:, 1],\n label=\"next_step_location_y\",\n )\n # plt.plot(plt_time[230:], np.array(plt_next_step_location)[230:, 2], label=\"next_step_location_z\")\n plt.legend()\n # np.savetxt('plt_left_eef_real_posx' + str(new_) +'.txt', np.array(plt_left_eef_real_pos)[warmup:, 0])\n # np.savetxt('plt_left_eef_real_posy' + str(new_) +'.txt', np.array(plt_left_eef_real_pos)[warmup:, 1])\n # np.savetxt('plt_right_eef_real_posx' + str(new_) +'.txt', np.array(plt_right_eef_real_pos)[warmup:, 0])\n # np.savetxt('plt_right_eef_real_posy' + str(new_) +'.txt', np.array(plt_right_eef_real_pos)[warmup:, 1])\n # np.savetxt('plt_left_foot_positionx' + str(new_) +'.txt', np.array(plt_left_foot_position)[:, 0])\n # np.savetxt('plt_left_foot_positiony' + str(new_) +'.txt', np.array(plt_left_foot_position)[:, 1])\n # np.savetxt('plt_right_foot_positionx' + str(new_) +'.txt', np.array(plt_right_foot_position)[:, 0])\n # np.savetxt('plt_right_foot_positiony' + str(new_) +'.txt', np.array(plt_right_foot_position)[:, 1])\n # np.savetxt('plt_next_step_locationx' + str(new_) +'.txt', np.array(plt_next_step_location)[:, 0])\n # np.savetxt('plt_next_step_locationy' + str(new_) +'.txt', np.array(plt_next_step_location)[:, 1])\n # np.savetxt('plt_is_left_in_contact' + str(new_) +'.txt', np.array(plt_is_left_in_contact)[:])\n\n # plt.figure(\"last_step_touchdown\")\n # plt.plot(plt_time, np.array(plt_time_from_last_step_touchdown)[:])\n # plt.plot(plt_time, np.array(plt_duration_before_step_landing)[:])\n\n # plt.figure(\"support_foot\")\n # plt.plot(plt_time, np.array(plt_current_support_foot)[:,0])\n # plt.plot(plt_time, np.array(plt_current_support_foot)[:,1])\n # plt.plot(plt_time, np.array(plt_current_support_foot)[:,2])\n\n # plt.figure(\"warmup2\")\n # # plt.plot(np.array(plt_x)[:, :], label=\"des\")\n # # plt.plot(np.array(plt_r)[warmup:, 2, 0], label = \"real\")\n # plt.plot(np.array(plt_F)[warmup:, :], label = \"Force\")\n # plt.legend()\n #\n # plt.figure(\"warm up\")\n # plt.plot(np.array(plt_left_eef_real_pos)[1:, :], label=\"left_eef_real_pos\")\n # plt.plot(np.array(plt_right_eef_real_pos)[1:, :], label=\"right_eef_real_pos\")\n # plt.legend()\n #\n # plt.figure(\"warm up1\")\n # plt.plot(np.array(plt_x_com)[:, :], label=\"com\")\n # plt.plot(np.array(plt_xd_com)[:, :], label=\"xd_com\")\n # plt.plot(np.array(plt_qdot_com)[:, :6, 0], label=\"qdot\")\n # plt.legend()\n\n # fig, ax = plt.subplots(3, 2)\n # ax[0][0].plot(plt_time_all, np.array(plt_eq_fifteen)[:, 0], 'o', markersize=1, label='$\\max F_x$')\n # ax[0][0].legend()\n # ax[1][0].plot(plt_time_all, np.array(plt_eq_fifteen)[:, 1], 'o', markersize=1, label='$\\max F_y$')\n # ax[1][0].legend()\n # ax[2][0].plot(plt_time_all, np.array(plt_eq_fifteen)[:, 2], 'o', markersize=1, label='$\\max F_z$')\n # ax[2][0].legend()\n # ax[0][1].plot(plt_time_all, np.array(plt_eq_fifteen)[:, 3], 'o', markersize=1, label='$\\min F_x$')\n # ax[0][1].legend()\n # ax[1][1].plot(plt_time_all, np.array(plt_eq_fifteen)[:, 4], 'o', markersize=1, label='$\\min F_y$')\n # ax[1][1].legend()\n # ax[2][1].plot(plt_time_all, np.array(plt_eq_fifteen)[:, 5], 'o', markersize=1, label='$\\min F_z$')\n # ax[2][1].legend()\n # plt.savefig(\"min_max\" + \".pdf\")\n\n plt.show()\n"
] |
[
[
"numpy.array",
"numpy.matrix",
"numpy.zeros",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.linalg.pinv",
"matplotlib.pyplot.figure",
"numpy.eye",
"numpy.sqrt",
"matplotlib.pyplot.show",
"numpy.linalg.inv"
]
] |
TanselArif-21/ds_modules_101
|
[
"047516e8aa2086e7c0628e23785f9f3ba14b271c"
] |
[
"ds_modules_101/Data/__init__.py"
] |
[
"import pkg_resources\nimport pandas as pd\n\ntitanic_f = pkg_resources.resource_filename(__name__, 'titanic/titanic.csv')\ntitanic_df = pd.read_csv(titanic_f)\n\nibd_f = pkg_resources.resource_filename(__name__, 'ibd/IBD.csv')\nibd_df = pd.read_csv(ibd_f)\n\nhr_f = pkg_resources.resource_filename(__name__, 'HR/HR.csv')\nhr_df = pd.read_csv(hr_f)"
] |
[
[
"pandas.read_csv"
]
] |
rhett-chen/airobot
|
[
"5c098e675c48a2b47ab931df02731c5e0b256bda"
] |
[
"scripts/ur5e/sim/camera_pitch.py"
] |
[
"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom airobot import Robot\n\n\ndef main():\n \"\"\"\n This function demonstrates how the pitch angle (\n the pitch angle that is defined in robot.setup_camera) changes\n the camera view.\n \"\"\"\n robot = Robot('ur5e', pb_cfg={'gui': True})\n focus_pt = [0, 0, 1] # ([x, y, z])\n robot.arm.go_home()\n img = np.random.rand(480, 640)\n image = plt.imshow(img, interpolation='none',\n animated=True, label=\"cam\")\n ax = plt.gca()\n while True:\n for pitch in range(0, 360, 10):\n robot.cam.setup_camera(focus_pt=focus_pt,\n dist=3,\n yaw=0,\n pitch=pitch,\n roll=0)\n rgb, depth = robot.cam.get_images(get_rgb=True,\n get_depth=True)\n image.set_data(rgb)\n ax.plot([0])\n plt.pause(0.01)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"matplotlib.pyplot.gca",
"numpy.random.rand",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.imshow"
]
] |
vagrantxiao/pylearn2
|
[
"95dca41d8f4818399d891fed9bd0c4bedca8efc7"
] |
[
"pylearn2/datasets/svhn.py"
] |
[
"\"\"\"\n.. todo::\n\n WRITEME\n\"\"\"\nimport os\nimport gc\nimport warnings\ntry:\n import tables\nexcept ImportError:\n warnings.warn(\"Couldn't import tables, so far SVHN is \"\n \"only supported with PyTables\")\nimport numpy\nfrom theano.compat.six.moves import xrange\nfrom theano import config\nfrom pylearn2.datasets import dense_design_matrix\nfrom pylearn2.utils.serial import load\nfrom pylearn2.utils.string_utils import preprocess\nfrom pylearn2.utils.rng import make_np_rng\n\n\nclass SVHN(dense_design_matrix.DenseDesignMatrixPyTables):\n\n \"\"\"\n Only for faster access there is a copy of hdf5 file in PYLEARN2_DATA_PATH\n but it mean to be only readable. If you wish to modify the data, you\n should pass a local copy to the path argument.\n\n Parameters\n ----------\n which_set : WRITEME\n path : WRITEME\n center : WRITEME\n scale : WRITEME\n start : WRITEME\n stop : WRITEME\n axes : WRITEME\n preprocessor : WRITEME\n \"\"\"\n\n mapper = {'train': 0, 'test': 1, 'extra': 2, 'train_all': 3,\n 'splitted_train': 4, 'valid': 5}\n\n data_path = '${PYLEARN2_DATA_PATH}/SVHN/format2/'\n\n def __init__(self, which_set, path=None, center=False, scale=False,\n start=None, stop=None, axes=('b', 0, 1, 'c'),\n preprocessor=None):\n\n assert which_set in self.mapper.keys()\n\n self.__dict__.update(locals())\n del self.self\n\n if path is None:\n path = self.data_path\n mode = 'r'\n else:\n mode = 'r+'\n warnings.warn(\"Because path is not same as PYLEARN2_DATA_PATH \"\n \"be aware that data might have been \"\n \"modified or pre-processed.\")\n\n if mode == 'r' and (scale or\n center or\n (start is not None) or\n (stop is not None)):\n raise ValueError(\"Only for speed there is a copy of hdf5 file in \"\n \"PYLEARN2_DATA_PATH but it meant to be only \"\n \"readable. If you wish to modify the data, you \"\n \"should pass a local copy to the path argument.\")\n\n # load data\n path = preprocess(path)\n file_n = \"{0}_32x32.h5\".format(os.path.join(path, \"h5\", which_set))\n if os.path.isfile(file_n):\n make_new = False\n else:\n make_new = True\n warnings.warn(\"Over riding existing file: {0}\".format(file_n))\n\n # if hdf5 file does not exist make them\n if make_new:\n self.filters = tables.Filters(complib='blosc', complevel=5)\n self.make_data(which_set, path)\n\n self.h5file = tables.openFile(file_n, mode=mode)\n data = self.h5file.getNode('/', \"Data\")\n\n if start is not None or stop is not None:\n if not hasattr(self, 'filters'):\n self.filters = tables.Filters(complib='blosc', complevel=5)\n self.h5file, data = self.resize(self.h5file, start, stop)\n\n # rescale or center if permitted\n if center and scale:\n data.X[:] -= 127.5\n data.X[:] /= 127.5\n elif center:\n data.X[:] -= 127.5\n elif scale:\n data.X[:] /= 255.\n\n view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3),\n axes)\n super(SVHN, self).__init__(X=data.X, y=data.y,\n y_labels=numpy.max(data.y) + 1,\n view_converter=view_converter)\n\n if preprocessor:\n if which_set in ['train', 'train_all', 'splitted_train']:\n can_fit = True\n preprocessor.apply(self, can_fit)\n\n self.h5file.flush()\n\n def get_test_set(self):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n return SVHN(which_set='test', path=self.path,\n center=self.center, scale=self.scale,\n start=self.start, stop=self.stop,\n axes=self.axes, preprocessor=self.preprocessor)\n\n def make_data(self, which_set, path, shuffle=True):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n sizes = {'train': 73257, 'test': 26032, 'extra': 531131,\n 'train_all': 604388, 'valid': 6000, 'splitted_train': 598388}\n image_size = 32 * 32 * 3\n h_file_n = \"{0}_32x32.h5\".format(os.path.join(path, \"h5\", which_set))\n # The table size for y is being set to [sizes[which_set], 1] since y\n # contains the labels. If you are using the old one-hot scheme then\n # this needs to be set to 10.\n h5file, node = self.init_hdf5(h_file_n,\n ([sizes[which_set], image_size],\n [sizes[which_set], 1]),\n title=\"SVHN Dataset\",\n y_dtype='int')\n\n # For consistency between experiments better to make new random stream\n rng = make_np_rng(None, 322, which_method=\"shuffle\")\n\n def design_matrix_view(data_x):\n \"\"\"reshape data_x to design matrix view\n \"\"\"\n data_x = numpy.transpose(data_x, axes=[3, 2, 0, 1])\n data_x = data_x.reshape((data_x.shape[0], 32 * 32 * 3))\n return data_x\n\n def load_data(path):\n \"Loads data from mat files\"\n\n data = load(path)\n data_x = numpy.cast[config.floatX](data['X'])\n data_y = data['y']\n del data\n gc.collect()\n return design_matrix_view(data_x), data_y\n\n def split_train_valid(path, num_valid_train=400,\n num_valid_extra=200):\n \"\"\"\n Extract number of class balanced samples from train and extra\n sets for validation, and regard the remaining as new train set.\n\n Parameters\n ----------\n num_valid_train : int, optional\n Number of samples per class from train\n num_valid_extra : int, optional\n Number of samples per class from extra\n \"\"\"\n\n # load difficult train\n data = load(\"{0}train_32x32.mat\".format(path))\n valid_index = []\n for i in xrange(1, 11):\n index = numpy.nonzero(data['y'] == i)[0]\n index.flags.writeable = 1\n rng.shuffle(index)\n valid_index.append(index[:num_valid_train])\n\n valid_index = set(numpy.concatenate(valid_index))\n train_index = set(numpy.arange(data['X'].shape[3])) - valid_index\n valid_index = list(valid_index)\n train_index = list(train_index)\n\n train_x = data['X'][:, :, :, train_index]\n train_y = data['y'][train_index, :]\n valid_x = data['X'][:, :, :, valid_index]\n valid_y = data['y'][valid_index, :]\n\n train_size = data['X'].shape[3]\n assert train_x.shape[3] == train_size - num_valid_train * 10\n assert train_y.shape[0] == train_size - num_valid_train * 10\n assert valid_x.shape[3] == num_valid_train * 10\n assert valid_y.shape[0] == num_valid_train * 10\n del data\n gc.collect()\n\n # load extra train\n data = load(\"{0}extra_32x32.mat\".format(path))\n valid_index = []\n for i in xrange(1, 11):\n index = numpy.nonzero(data['y'] == i)[0]\n index.flags.writeable = 1\n rng.shuffle(index)\n valid_index.append(index[:num_valid_extra])\n\n valid_index = set(numpy.concatenate(valid_index))\n train_index = set(numpy.arange(data['X'].shape[3])) - valid_index\n valid_index = list(valid_index)\n train_index = list(train_index)\n\n train_x = numpy.concatenate((train_x,\n data['X'][:, :, :, train_index]),\n axis=3)\n train_y = numpy.concatenate((train_y, data['y'][train_index, :]))\n valid_x = numpy.concatenate((valid_x,\n data['X'][:, :, :, valid_index]),\n axis=3)\n valid_y = numpy.concatenate((valid_y, data['y'][valid_index, :]))\n\n extra_size = data['X'].shape[3]\n sizes['valid'] = (num_valid_train + num_valid_extra) * 10\n sizes['splitted_train'] = train_size + extra_size - sizes['valid']\n assert train_x.shape[3] == sizes['splitted_train']\n assert train_y.shape[0] == sizes['splitted_train']\n assert valid_x.shape[3] == sizes['valid']\n assert valid_y.shape[0] == sizes['valid']\n del data\n gc.collect()\n\n train_x = numpy.cast[config.floatX](train_x)\n valid_x = numpy.cast[config.floatX](valid_x)\n\n return (design_matrix_view(train_x), train_y),\\\n (design_matrix_view(valid_x), valid_y)\n\n # The original splits\n if which_set in ['train', 'test']:\n data_x, data_y = load_data(\"{0}{1}_32x32.mat\".format(path,\n which_set))\n\n # Train valid splits\n elif which_set in ['splitted_train', 'valid']:\n train_data, valid_data = split_train_valid(path)\n if which_set == 'splitted_train':\n data_x, data_y = train_data\n else:\n data_x, data_y = valid_data\n del train_data\n\n # extra data\n elif which_set in ['train_all', 'extra']:\n data_x, data_y = load_data(\"{0}extra_32x32.mat\".format(path))\n if which_set == 'train_all':\n train_x, train_y = load_data(\"{0}train_32x32.mat\".format(path))\n data_x = numpy.concatenate((data_x, train_x))\n data_y = numpy.concatenate((data_y, train_y))\n\n assert data_x.shape[0] == sizes[which_set]\n assert data_y.shape[0] == sizes[which_set]\n\n if shuffle:\n index = range(data_x.shape[0])\n rng.shuffle(index)\n data_x = data_x[index, :]\n data_y = data_y[index, :]\n\n # .mat labels for SVHN are in range [1,10]\n # So subtract 1 to map labels to range [0,9]\n # This is consistent with range for MNIST dataset labels\n data_y = data_y - 1\n\n SVHN.fill_hdf5(h5file, data_x, data_y, node)\n h5file.close()\n\n\nclass SVHN_On_Memory(dense_design_matrix.DenseDesignMatrix):\n\n \"\"\"\n A version of SVHN dataset that loads everything into the memory instead of\n using pytables.\n\n Parameters\n ----------\n which_set : WRITEME\n center : WRITEME\n scale : WRITEME\n start : WRITEME\n stop : WRITEME\n axes : WRITEME\n preprocessor : WRITEME\n \"\"\"\n\n mapper = {'train': 0, 'test': 1, 'extra': 2, 'train_all': 3,\n 'splitted_train': 4, 'valid': 5}\n\n def __init__(self, which_set, center=False, scale=False,\n start=None, stop=None, axes=('b', 0, 1, 'c'),\n preprocessor=None):\n\n assert which_set in self.mapper.keys()\n\n self.__dict__.update(locals())\n del self.self\n\n path = '${PYLEARN2_DATA_PATH}/SVHN/format2/'\n\n # load data\n path = preprocess(path)\n data_x, data_y = self.make_data(which_set, path)\n\n # rescale or center if permitted\n if center and scale:\n data_x -= 127.5\n data_x /= 127.5\n elif center:\n data_x -= 127.5\n elif scale:\n data_x /= 255.\n\n view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3),\n axes)\n super(SVHN_On_Memory, self).__init__(X=data_x, y=data_y, y_labels=10,\n view_converter=view_converter)\n\n if preprocessor:\n if which_set in ['train', 'train_all', 'splitted_train']:\n can_fit = True\n else:\n can_fit = False\n preprocessor.apply(self, can_fit)\n\n del data_x, data_y\n gc.collect()\n\n def get_test_set(self):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n return SVHN_On_Memory(which_set='test', path=self.path,\n center=self.center, scale=self.scale,\n start=self.start, stop=self.stop,\n axes=self.axes, preprocessor=self.preprocessor)\n\n def make_data(self, which_set, path, shuffle=True):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n sizes = {'train': 73257, 'test': 26032, 'extra': 531131,\n 'train_all': 604388, 'valid': 6000, 'splitted_train': 598388}\n image_size = 32 * 32 * 3\n\n # For consistency between experiments better to make new random stream\n rng = make_np_rng(None, 322, which_method=\"shuffle\")\n\n def design_matrix_view(data_x):\n \"\"\"reshape data_x to design matrix view\n \"\"\"\n data_x = numpy.transpose(data_x, axes=[3, 2, 0, 1])\n data_x = data_x.reshape((data_x.shape[0], 32 * 32 * 3))\n return data_x\n\n def load_data(path):\n \"Loads data from mat files\"\n\n data = load(path)\n data_x = numpy.cast[config.floatX](data['X'])\n import ipdb\n ipdb.set_trace()\n data_y = data['y']\n del data\n gc.collect()\n return design_matrix_view(data_x), data_y\n\n def split_train_valid(path, num_valid_train=400,\n num_valid_extra=200):\n \"\"\"\n Extract number of class balanced samples from train and extra\n sets for validation, and regard the remaining as new train set.\n\n Parameters\n ----------\n num_valid_train : int, optional\n Number of samples per class from train\n num_valid_extra : int, optional\n Number of samples per class from extra\n \"\"\"\n\n # load difficult train\n data = load(\"{0}train_32x32.mat\".format(path))\n valid_index = []\n for i in xrange(1, 11):\n index = numpy.nonzero(data['y'] == i)[0]\n index.flags.writeable = 1\n rng.shuffle(index)\n valid_index.append(index[:num_valid_train])\n\n valid_index = set(numpy.concatenate(valid_index))\n train_index = set(numpy.arange(data['X'].shape[3])) - valid_index\n valid_index = list(valid_index)\n train_index = list(train_index)\n\n train_x = data['X'][:, :, :, train_index]\n train_y = data['y'][train_index, :]\n valid_x = data['X'][:, :, :, valid_index]\n valid_y = data['y'][valid_index, :]\n\n train_size = data['X'].shape[3]\n assert train_x.shape[3] == train_size - num_valid_train * 10\n assert train_y.shape[0] == train_size - num_valid_train * 10\n assert valid_x.shape[3] == num_valid_train * 10\n assert valid_y.shape[0] == num_valid_train * 10\n del data\n gc.collect()\n\n # load extra train\n data = load(\"{0}extra_32x32.mat\".format(path))\n valid_index = []\n for i in xrange(1, 11):\n index = numpy.nonzero(data['y'] == i)[0]\n index.flags.writeable = 1\n rng.shuffle(index)\n valid_index.append(index[:num_valid_extra])\n\n valid_index = set(numpy.concatenate(valid_index))\n train_index = set(numpy.arange(data['X'].shape[3])) - valid_index\n valid_index = list(valid_index)\n train_index = list(train_index)\n\n train_x = numpy.concatenate((train_x,\n data['X'][:, :, :, train_index]),\n axis=3)\n train_y = numpy.concatenate((train_y, data['y'][train_index, :]))\n valid_x = numpy.concatenate(\n (valid_x, data['X'][:, :, :, valid_index]),\n axis=3)\n valid_y = numpy.concatenate((valid_y, data['y'][valid_index, :]))\n\n extra_size = data['X'].shape[3]\n sizes['valid'] = (num_valid_train + num_valid_extra) * 10\n sizes['splitted_train'] = train_size + extra_size - sizes['valid']\n assert train_x.shape[3] == sizes['splitted_train']\n assert train_y.shape[0] == sizes['splitted_train']\n assert valid_x.shape[3] == sizes['valid']\n assert valid_y.shape[0] == sizes['valid']\n del data\n gc.collect()\n\n train_x = numpy.cast[config.floatX](train_x)\n valid_x = numpy.cast[config.floatX](valid_x)\n return design_matrix_view(train_x), train_y,\\\n design_matrix_view(valid_x), valid_y\n\n # The original splits\n if which_set in ['train', 'test']:\n data_x, data_y = load_data(\"{0}{1}_32x32.mat\".format(path,\n which_set))\n\n # Train valid splits\n elif which_set in ['splitted_train', 'valid']:\n train_data, valid_data = split_train_valid(path)\n if which_set == 'splitted_train':\n data_x, data_y = train_data\n else:\n data_x, data_y = valid_data\n del train_data\n\n # extra data\n elif which_set in ['train_all', 'extra']:\n data_x, data_y = load_data(\"{0}extra_32x32.mat\".format(path))\n if which_set == 'train_all':\n train_x, train_y = load_data(\"{0}train_32x32.mat\".format(path))\n data_x = numpy.concatenate((data_x, train_x))\n data_y = numpy.concatenate((data_y, train_y))\n\n assert data_x.shape[0] == sizes[which_set]\n assert data_y.shape[0] == sizes[which_set]\n\n if shuffle:\n index = range(data_x.shape[0])\n rng.shuffle(index)\n data_x = data_x[index, :]\n data_y = data_y[index, :]\n\n return data_x, data_y\n"
] |
[
[
"numpy.concatenate",
"numpy.max",
"numpy.nonzero",
"numpy.transpose",
"numpy.arange"
]
] |
clemenshage/grslra
|
[
"00f61b4ef08208d12e8e803d10f8ebbe16d8614a"
] |
[
"experiments/4_grpca/video/print_frames.py"
] |
[
"from scipy.io import loadmat\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# This script prints selected frames of the stored escalator video sequence\n\ndata = loadmat('escalator_130p.mat')\n\nX = data[\"X\"]\ndimensions = data[\"dimensions\"][0]\n\nframenumbers = [1806, 1813, 1820]\n\nfor framenumber in framenumbers:\n imagename = 'frames/escalator_' + '{:03d}'.format(framenumber) + '.png'\n\n frame = np.reshape(X[:,framenumber+1], dimensions, order='F')\n plt.figure()\n fig=plt.imshow(frame, cmap=\"gray\", clim=(0, 255))\n fig.axes.get_xaxis().set_visible(False)\n fig.axes.get_yaxis().set_visible(False)\n plt.tight_layout()\n plt.savefig(imagename, dpi=300, bbox_inches='tight')\n plt.close()"
] |
[
[
"numpy.reshape",
"matplotlib.pyplot.savefig",
"scipy.io.loadmat",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.imshow"
]
] |
Sigma-i/traveling-salesman-problem_hybrid
|
[
"dcb0d263282b6ba94323f1d4e30aee7713ad2b90"
] |
[
"traveling_salesman_problem.py"
] |
[
"# Copyright 2020 Sigma-i Co.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport numpy as np\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom copy import deepcopy\n\nfrom pyqubo import Array, Placeholder, Constraint\nfrom dwave.system import LeapHybridSampler\n\n\n# --- Problem setting ---\n\n# Define the coordinates of each city and one origin city (at random in this demo)\nN = 9\n\nX_range = Y_range = 500\nx_pos = [np.random.randint(0, X_range) for _ in range(N)]\ny_pos = [np.random.randint(0, Y_range) for _ in range(N)]\npositions = {i: (x_pos[i], y_pos[i]) for i in range(N)} # you can rewrite this line\n\n# Choose origin (and end) city and fix it\norigin = np.random.choice(np.arange(N)) # \norigin_pos = positions[origin]\n\nothers = list(range(N))\nothers.remove(origin)\n\n# Set a graph\nG = nx.Graph()\nG.add_nodes_from(np.arange(N))\nnx.draw(G, positions, node_color=['red' if i == origin else 'blue' for i in range(N)], with_labels=True)\n\n# Calculate the distance between each city\ndistances = np.zeros((N, N))\nfor i in range(N):\n for j in range(i+1, N):\n distances[i][j] = np.sqrt((x_pos[i] - x_pos[j])**2 + (y_pos[i] - y_pos[j])**2)\n distances[j][i] = distances[i][j]\n\n\n# --- Problem formulation ---\n\n# Use pyqubo package\nq = Array.create('q', shape=(N-1, N-1), vartype='BINARY')\n\ndef normalize(exp):\n \"\"\" Normalization function \"\"\"\n qubo, offset = exp.compile().to_qubo()\n \n max_coeff = abs(np.max(list(qubo.values())))\n min_coeff = abs(np.min(list(qubo.values())))\n norm = max_coeff if max_coeff - min_coeff > 0 else min_coeff\n \n return exp / norm\n\n# Cost function\nexp_origin = sum(distances[origin][others[i]]*1*q[i][0] + \n distances[others[i]][origin]*q[i][N-2]*1 for i in range(N-1))\nexp_others = sum(distances[others[i]][others[j]]*q[i][t]*q[j][t+1]\n for i in range(N-1) for j in range(N-1) for t in range(N-2))\nH_cost = normalize(exp_origin + exp_others)\n\n# Constraint\nH_city = Constraint(normalize(sum((sum(q[i][t] for t in range(N-1))-1)**2 for i in range(N-1))), 'city')\nH_time = Constraint(normalize(sum((sum(q[i][t] for i in range(N-1))-1)**2 for t in range(N-1))), 'time')\n\n# Express objective function and compile it to model\nH = H_cost + Placeholder('lam') * (H_city + H_time)\nmodel = H.compile()\n\n\n# --- Solve QUBO ---\n\n# Get the QUBO matrix from the model\nfeed_dict = {'lam':5.0} # the value of constraint\nqubo, offset = model.to_qubo(feed_dict=feed_dict)\n\n# Run QUBO on Leap's Hybrid Solver (hybrid_v1)\nsampler = LeapHybridSampler(token='') \nresponse = sampler.sample_qubo(qubo)\nsample = response.record['sample'][0]\n\n# decode the solution and check if constrains are satisfied\nsample_dict = {idx: sample[i] for i,idx in enumerate(response.variables)}\ndecoded, broken, energy = model.decode_solution(sample_dict, 'BINARY', feed_dict=feed_dict) \nif broken == {}:\n print('The solution is valid')\nelse:\n print('The solution is invalid')\n\n\n# --- Visualize the result ---\n\n# Create an array which shows traveling order from the solution\nsolution = sample.reshape(N-1, N-1)\norder = [origin]\n\nfor li in solution.T:\n cities = np.where(li)[0].tolist()\n if cities == []:\n continue\n if len(cities) > 1:\n order.append(others[np.random.choice(cities, 1)[0]])\n else:\n order.append(others[cities[0]])\n\n# Plot the result\nnew = np.append(np.zeros((N-1,1)), solution, axis=1) \nresult_arr = np.insert(new, origin, np.append(1,np.zeros(N-1)), axis=0)\n \nfig = plt.figure(facecolor='w', edgecolor='k')\nax = fig.subplots()\nax.imshow(result_arr)\nax.set_xlabel('Order')\nax.set_ylabel('Cities')\nplt.tight_layout()\nplt.savefig('result.png')\n\n# Draw the route on graph G \nedges = []\nedges += [(order[t], order[t+1]) for t in range(len(order)-1)]\nedges += [(order[-1], origin)]\n \nG_copy = deepcopy(G)\nG_copy.add_edges_from(edges)\n\nplt.figure()\nnx.draw(G_copy, positions, node_color=['red' if i == origin else 'blue' for i in range(N)], with_labels=True)\nplt.savefig('route.png')\n \ndef calc_dis(order_arr):\n \"\"\"Calculate total traveling distance (the value of H_cost) of the solution\"\"\"\n dis = sum(distances[order_arr[t]][order_arr[t+1]] for t in range(len(order_arr)-1))\\\n + distances[order_arr[-1]][origin]\n return dis\nprint(f'distance: {calc_dis(order)}')"
] |
[
[
"numpy.random.choice",
"numpy.zeros",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.random.randint",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"numpy.sqrt"
]
] |
khf118/Unscented-Kalman-Filter
|
[
"8cb35a9c8246f849a26bf3692ed8ecace963b956"
] |
[
"nis_plot.py"
] |
[
"import matplotlib.pyplot as plt\nimport numpy as np\n\nx = \"2 2 6 2 1 2 6 2 0 2 2 0 1 5 2 7 6 4 4 3 2 0 1 0 2 10 0 1 3 2 1 1 0 3 1 2 0 0 0 0 3 0 2 0 0 2 7 2 4 9 4 2 2 1 0 1 3 7 1 1 0 6 6 2 2 6 1 2 3 10 3 5 2 1 0 2 1 2 0 1 4 5 1 1 1 4 0 3 0 0 2 1 1 4 1 1 7 3 2 2 2 4 0 2 0 2 1 2 0 1 3 3 2 2 1 1 0 2 1 2 5 5 1 3 3 2 0 1 1 4 0 0 3 0 1 5 5 3 5 2 3 3 1 0 4 0 0 2 3 0 5 1 7 0 1 1 1 1 2 1 2 3 2 0 4 2 2 4 8 1 0 1 3 1 4 1 2 1 6 2 0 3 1 0 1 3 0 1 1 6 0 11 3 0 1 1 2 1 5 2 2 1 6 8 0 0 3 2 1 0 8 2 0 3 4 1 5 1 2 5 1 5 4 1 0 4 1 5 2 0 0 0 0 7 6 0 1 0 1 0 2 9 0 6 3 2 6 1 1\"\ny = x.split(\" \")\n#y = [2,3,1]\nprint(y)\nplt.plot(y)\nplt.show()\n\nx = \"0 0 0 0 0 2 0 13 3 2 5 0 3 0 0 0 0 3 1 1 2 0 0 2 2 0 0 2 0 0 0 0 0 0 0 1 0 0 1 3 1 1 3 3 3 1 1 2 0 5 0 2 0 1 0 1 2 3 0 0 0 2 3 0 1 0 0 2 0 0 1 1 0 0 1 0 2 0 1 0 0 3 1 0 2 2 0 2 0 0 0 2 1 0 1 1 2 1 6 0 0 2 1 0 1 0 0 4 1 3 0 0 4 2 0 2 0 3 0 1 0 0 2 0 1 0 2 2 0 0 1 0 1 5 3 1 1 2 1 0 0 0 2 1 4 0 1 0 1 0 3 0 0 3 3 3 2 0 1 4 1 1 0 1 5 2 0 1 0 0 2 0 3 1 14 0 1 1 0 1 6 2 0 2 3 5 2 0 0 0 0 1 4 1 5 1 1 0 0 2 1 2 0 0 0 0 1 0 3 2 0 0 1 0 0 4 0 0 2 0 4 3 1 0 0 6 0 2 0 0 1 1 2 0 2 7 0 3 2 2 0 2 1 2 0 1 4 6\"\ny = x.split(\" \")\n#y = [2,3,1]\nprint(y)\nplt.plot(y)\nplt.show()"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot"
]
] |
jplalor/transformers-android-demo
|
[
"926c98ca827319880e25ae18b985e056972c5014"
] |
[
"model_converter/nsmc/jit_compile.py"
] |
[
"import os\nimport sys\nimport argparse\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))\n\nimport torch\nimport numpy as np\nfrom transformers import ElectraTokenizer, ElectraConfig\n\nfrom model import ElectraForSequenceClassification\n\n\nparser = argparse.ArgumentParser()\n# NOTE This should be same as the setting of the android!!!\nparser.add_argument(\"--max_seq_len\", default=40, type=int, help=\"Maximum sequence length\")\nargs = parser.parse_args()\n\n# 1. Convert model\ntokenizer = ElectraTokenizer.from_pretrained(\"monologg/koelectra-small-finetuned-sentiment\")\n\nmodel = ElectraForSequenceClassification.from_pretrained(\"monologg/koelectra-small-finetuned-sentiment\", torchscript=True)\nmodel.eval()\n\ninput_ids = torch.tensor([[0] * args.max_seq_len], dtype=torch.long)\nprint(input_ids.size())\ntraced_model = torch.jit.trace(\n model,\n input_ids\n)\ntorch.jit.save(traced_model, \"app/src/main/assets/nsmc_small.pt\")\n\n# 2. Testing...\n# Tokenize input text\ntext = \"μ΄ μν μ μμ§λ μ λ΄?\"\nencode_inputs = tokenizer.encode_plus(\n text,\n return_tensors=\"pt\",\n max_length=args.max_seq_len,\n pad_to_max_length=True\n)\nprint(encode_inputs)\nprint(encode_inputs[\"input_ids\"].size())\n\n# Load model\nloaded_model = torch.jit.load(\"app/src/main/assets/nsmc_small.pt\")\nloaded_model.eval()\nwith torch.no_grad():\n outputs = loaded_model(encode_inputs[\"input_ids\"])\nprint(outputs)\nwith torch.no_grad():\n outputs = model(encode_inputs[\"input_ids\"])\nprint(outputs)\n"
] |
[
[
"torch.no_grad",
"torch.jit.load",
"torch.jit.save",
"torch.tensor",
"torch.jit.trace"
]
] |
lyq628/NLP-Tutorials
|
[
"7c9d117a3542695e79419c835ba9e98ef80800b8"
] |
[
"pytorch/transformer.py"
] |
[
"import torch.nn as nn\nfrom torch.nn.functional import cross_entropy,softmax, relu\nimport numpy as np\nimport torch\nfrom torch.utils import data\nimport utils\nfrom torch.utils.data import DataLoader\nimport argparse\n\nMAX_LEN = 11\n\nclass MultiHead(nn.Module):\n def __init__(self, n_head, model_dim, drop_rate):\n super().__init__()\n self.head_dim = model_dim // n_head\n self.n_head = n_head\n self.model_dim = model_dim\n self.wq = nn.Linear(model_dim, n_head * self.head_dim)\n self.wk = nn.Linear(model_dim, n_head * self.head_dim)\n self.wv = nn.Linear(model_dim, n_head * self.head_dim)\n\n self.o_dense = nn.Linear(model_dim, model_dim)\n self.o_drop = nn.Dropout(drop_rate)\n self.layer_norm = nn.LayerNorm(model_dim)\n self.attention = None\n\n def forward(self,q,k,v,mask,training):\n # residual connect\n residual = q\n dim_per_head= self.head_dim\n num_heads = self.n_head\n batch_size = q.size(0)\n\n # linear projection\n key = self.wk(k) # [n, step, num_heads * head_dim]\n value = self.wv(v) # [n, step, num_heads * head_dim]\n query = self.wq(q) # [n, step, num_heads * head_dim]\n\n # split by head\n query = self.split_heads(query) # [n, n_head, q_step, h_dim]\n key = self.split_heads(key)\n value = self.split_heads(value) # [n, h, step, h_dim]\n context = self.scaled_dot_product_attention(query,key, value, mask) # [n, q_step, h*dv]\n o = self.o_dense(context) # [n, step, dim]\n o = self.o_drop(o)\n\n o = self.layer_norm(residual+o)\n return o\n\n def split_heads(self, x):\n x = torch.reshape(x,(x.shape[0], x.shape[1], self.n_head, self.head_dim))\n return x.permute(0,2,1,3)\n \n def scaled_dot_product_attention(self, q, k, v, mask=None):\n dk = torch.tensor(k.shape[-1]).type(torch.float)\n score = torch.matmul(q,k.permute(0,1,3,2)) / (torch.sqrt(dk) + 1e-8) # [n, n_head, step, step]\n if mask is not None:\n # change the value at masked position to negative infinity,\n # so the attention score at these positions after softmax will close to 0.\n score = score.masked_fill_(mask,-np.inf)\n self.attention = softmax(score,dim=-1)\n context = torch.matmul(self.attention,v) # [n, num_head, step, head_dim]\n context = context.permute(0,2,1,3) # [n, step, num_head, head_dim]\n context = context.reshape((context.shape[0], context.shape[1],-1)) \n return context # [n, step, model_dim]\n\nclass PositionWiseFFN(nn.Module):\n def __init__(self,model_dim, dropout = 0.0):\n super().__init__()\n dff = model_dim*4\n self.l = nn.Linear(model_dim,dff)\n self.o = nn.Linear(dff,model_dim)\n self.dropout = nn.Dropout(dropout)\n self.layer_norm = nn.LayerNorm(model_dim)\n\n def forward(self,x):\n o = relu(self.l(x))\n o = self.o(o)\n o = self.dropout(o)\n\n o = self.layer_norm(x + o)\n return o # [n, step, dim]\n\n\n\nclass EncoderLayer(nn.Module):\n\n def __init__(self, n_head, emb_dim, drop_rate):\n super().__init__()\n self.mh = MultiHead(n_head, emb_dim, drop_rate)\n self.ffn = PositionWiseFFN(emb_dim,drop_rate)\n \n def forward(self, xz, training, mask):\n # xz: [n, step, emb_dim]\n context = self.mh(xz, xz, xz, mask, training) # [n, step, emb_dim]\n o = self.ffn(context)\n return o\n\nclass Encoder(nn.Module):\n def __init__(self, n_head, emb_dim, drop_rate, n_layer):\n super().__init__()\n self.encoder_layers = nn.ModuleList(\n [EncoderLayer(n_head, emb_dim, drop_rate) for _ in range(n_layer)]\n ) \n def forward(self, xz, training, mask):\n\n for encoder in self.encoder_layers:\n xz = encoder(xz,training,mask)\n return xz # [n, step, emb_dim]\n\nclass DecoderLayer(nn.Module):\n def __init__(self,n_head,model_dim,drop_rate):\n super().__init__()\n self.mh = nn.ModuleList([MultiHead(n_head, model_dim, drop_rate) for _ in range(2)])\n self.ffn = PositionWiseFFN(model_dim,drop_rate)\n \n def forward(self,yz, xz, training, yz_look_ahead_mask,xz_pad_mask):\n dec_output = self.mh[0](yz, yz, yz, yz_look_ahead_mask, training) # [n, step, model_dim]\n \n dec_output = self.mh[1](dec_output, xz, xz, xz_pad_mask, training) # [n, step, model_dim]\n\n dec_output = self.ffn(dec_output) # [n, step, model_dim]\n\n return dec_output\n \nclass Decoder(nn.Module):\n def __init__(self, n_head, model_dim, drop_rate, n_layer):\n super().__init__()\n\n self.num_layers = n_layer\n\n self.decoder_layers = nn.ModuleList(\n [DecoderLayer(n_head, model_dim, drop_rate) for _ in range(n_layer)]\n )\n \n def forward(self, yz, xz, training, yz_look_ahead_mask, xz_pad_mask):\n for decoder in self.decoder_layers:\n yz = decoder(yz, xz, training, yz_look_ahead_mask, xz_pad_mask)\n return yz # [n, step, model_dim]\n\nclass PositionEmbedding(nn.Module):\n def __init__(self, max_len, emb_dim, n_vocab):\n super().__init__()\n pos = np.expand_dims(np.arange(max_len),1) # [max_len, 1]\n pe = pos / np.power(1000, 2*np.expand_dims(np.arange(emb_dim)//2,0)/emb_dim) # [max_len, emb_dim]\n pe[:, 0::2] = np.sin(pe[:, 0::2])\n pe[:, 1::2] = np.cos(pe[:, 1::2])\n pe = np.expand_dims(pe,0) # [1, max_len, emb_dim]\n self.pe = torch.from_numpy(pe).type(torch.float32)\n self.embeddings = nn.Embedding(n_vocab,emb_dim)\n self.embeddings.weight.data.normal_(0,0.1)\n \n def forward(self, x):\n device = self.embeddings.weight.device\n self.pe = self.pe.to(device) \n x_embed = self.embeddings(x) + self.pe # [n, step, emb_dim]\n return x_embed # [n, step, emb_dim]\n\nclass Transformer(nn.Module):\n def __init__(self, n_vocab, max_len, n_layer = 6, emb_dim=512, n_head = 8, drop_rate=0.1, padding_idx=0):\n super().__init__()\n self.max_len = max_len\n self.padding_idx = torch.tensor(padding_idx)\n self.dec_v_emb = n_vocab \n\n self.embed = PositionEmbedding(max_len, emb_dim, n_vocab)\n self.encoder = Encoder(n_head, emb_dim, drop_rate, n_layer)\n self.decoder = Decoder(n_head, emb_dim, drop_rate, n_layer)\n self.o = nn.Linear(emb_dim,n_vocab)\n self.opt = torch.optim.Adam(self.parameters(),lr=0.002)\n \n def forward(self,x,y,training= None):\n x_embed, y_embed = self.embed(x), self.embed(y) # [n, step, emb_dim] * 2\n pad_mask = self._pad_mask(x) # [n, 1, step, step]\n encoded_z = self.encoder(x_embed,training,pad_mask) # [n, step, emb_dim]\n yz_look_ahead_mask = self._look_ahead_mask(y) # [n, 1, step, step]\n decoded_z = self.decoder(y_embed,encoded_z, training, yz_look_ahead_mask, pad_mask) # [n, step, emb_dim]\n o = self.o(decoded_z) # [n, step, n_vocab]\n return o\n \n def step(self, x, y):\n self.opt.zero_grad()\n logits = self(x,y[:, :-1],training=True)\n pad_mask = ~torch.eq(y[:,1:],self.padding_idx) # [n, seq_len]\n loss = cross_entropy(logits.reshape(-1, self.dec_v_emb),y[:,1:].reshape(-1))\n loss.backward()\n self.opt.step()\n return loss.cpu().data.numpy(), logits\n\n def _pad_bool(self, seqs):\n o = torch.eq(seqs,self.padding_idx) # [n, step]\n return o\n def _pad_mask(self, seqs):\n len_q = seqs.size(1)\n mask = self._pad_bool(seqs).unsqueeze(1).expand(-1,len_q,-1) # [n, len_q, step]\n return mask.unsqueeze(1) # [n, 1, len_q, step]\n \n def _look_ahead_mask(self,seqs):\n device = next(self.parameters()).device\n batch_size, seq_len = seqs.shape\n mask = torch.triu(torch.ones((seq_len,seq_len), dtype=torch.long), diagonal=1).to(device) # [seq_len ,seq_len]\n mask = torch.where(self._pad_bool(seqs)[:,None,None,:],1,mask[None,None,:,:]).to(device) # [n, 1, seq_len, seq_len]\n return mask>0 # [n, 1, seq_len, seq_len]\n \n def translate(self, src, v2i, i2v):\n self.eval()\n device = next(self.parameters()).device\n src_pad = src\n # Initialize Decoder input by constructing a matrix M([n, self.max_len+1]) with initial value:\n # M[n,0] = start token id\n # M[n,:] = 0\n target = torch.from_numpy(utils.pad_zero(np.array([[v2i[\"<GO>\"], ] for _ in range(len(src))]), self.max_len+1)).to(device)\n x_embed = self.embed(src_pad)\n encoded_z = self.encoder(x_embed,False,mask=self._pad_mask(src_pad))\n for i in range(0,self.max_len):\n y = target[:,:-1]\n y_embed = self.embed(y)\n decoded_z = self.decoder(y_embed,encoded_z,False,self._look_ahead_mask(y),self._pad_mask(src_pad))\n o = self.o(decoded_z)[:,i,:]\n idx = o.argmax(dim = 1).detach()\n # Update the Decoder input, to predict for the next position.\n target[:,i+1] = idx\n self.train()\n return target\n\n\n\n\ndef train(emb_dim=32,n_layer=3,n_head=4):\n \n dataset = utils.DateData(4000)\n print(\"Chinese time order: yy/mm/dd \",dataset.date_cn[:3],\"\\nEnglish time order: dd/M/yyyy\", dataset.date_en[:3])\n print(\"Vocabularies: \", dataset.vocab)\n print(f\"x index sample: \\n{dataset.idx2str(dataset.x[0])}\\n{dataset.x[0]}\",\n f\"\\ny index sample: \\n{dataset.idx2str(dataset.y[0])}\\n{dataset.y[0]}\")\n loader = DataLoader(dataset,batch_size=32,shuffle=True)\n model = Transformer(n_vocab=dataset.num_word, max_len=MAX_LEN, n_layer = n_layer, emb_dim=emb_dim, n_head = n_head, drop_rate=0.1, padding_idx=0)\n if torch.cuda.is_available():\n print(\"GPU train avaliable\")\n device =torch.device(\"cuda\")\n model = model.cuda()\n else:\n device = torch.device(\"cpu\")\n model = model.cpu()\n for i in range(100):\n for batch_idx , batch in enumerate(loader):\n bx, by, decoder_len = batch\n bx, by = torch.from_numpy(utils.pad_zero(bx,max_len = MAX_LEN)).type(torch.LongTensor).to(device), torch.from_numpy(utils.pad_zero(by,MAX_LEN+1)).type(torch.LongTensor).to(device)\n loss, logits = model.step(bx,by)\n if batch_idx%50 == 0:\n target = dataset.idx2str(by[0, 1:-1].cpu().data.numpy())\n pred = model.translate(bx[0:1],dataset.v2i,dataset.i2v)\n res = dataset.idx2str(pred[0].cpu().data.numpy())\n src = dataset.idx2str(bx[0].cpu().data.numpy())\n print(\n \"Epoch: \",i,\n \"| t: \", batch_idx,\n \"| loss: %.3f\" % loss,\n \"| input: \", src,\n \"| target: \", target,\n \"| inference: \", res,\n )\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--emb_dim\",type=int, help=\"change the model dimension\")\n parser.add_argument(\"--n_layer\",type=int, help=\"change the number of layers in Encoder and Decoder\")\n parser.add_argument(\"--n_head\",type=int, help=\"change the number of heads in MultiHeadAttention\")\n\n args = parser.parse_args()\n args = dict(filter(lambda x: x[1],vars(args).items()))\n train(**args)"
] |
[
[
"torch.nn.Linear",
"torch.ones",
"torch.cuda.is_available",
"numpy.cos",
"torch.reshape",
"numpy.sin",
"torch.nn.LayerNorm",
"torch.sqrt",
"torch.tensor",
"torch.utils.data.DataLoader",
"numpy.arange",
"numpy.expand_dims",
"torch.device",
"torch.nn.functional.softmax",
"torch.matmul",
"torch.nn.Dropout",
"torch.eq",
"torch.from_numpy",
"torch.nn.Embedding"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.