repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
PABannier/sparseglm
|
[
"ec1c6a15786d9fc0cc58ae3d7e28227bbc9077e9"
] |
[
"python/sparseglm/benchmarks/lasso.py"
] |
[
"import time\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom sklearn.linear_model import Lasso as Lasso_sk\nfrom sparseglm.estimators import Lasso\n\nfrom sparseglm.utils import make_correlated_data, compute_alpha_max\n\nn_samples = 300\nn_features = 1000\n\nsnr = 2\ncorr = 0.6\ndensity = 0.5\n\ntol = 1e-9\n\nreg = 0.1\n\nX, y, _ = make_correlated_data(\n n_samples=n_samples, n_features=n_features, corr=corr, snr=snr,\n density=density, random_state=0)\n\nX_sparse = sp.csc_matrix(X * np.random.binomial(1, 0.1, X.shape))\n\n\ndef time_estimator(clf, X, y):\n start = time.time()\n clf.fit(X, y)\n duration = time.time() - start\n return clf.coef_, duration\n\n\nalpha_max = compute_alpha_max(X, y)\n\n\nestimator_sk = Lasso_sk(alpha_max * reg, fit_intercept=False, tol=tol,\n max_iter=10**6)\nestimator_rl = Lasso(alpha_max * reg, tol=tol, verbose=False)\n\nprint(\"Fitting dense matrices...\")\n\nprint(\"=\" * 5 + \" SCIKIT-LEARN \" + 5 * \" \")\ncoef_sk, duration_sk = time_estimator(estimator_sk, X, y)\nprint(\"=\" * 5 + \" SparseGLM \" + 5 * \" \")\ncoef_rl, duration_rl = time_estimator(estimator_rl, X, y)\n\nnp.testing.assert_allclose(coef_sk, coef_rl, atol=1e-5)\n\nprint(\"Fitting sparse matrices...\")\n\nprint(\"=\" * 5 + \" SCIKIT-LEARN \" + 5 * \" \")\ncoef_sk_sparse, duration_sk_sparse = time_estimator(estimator_sk, X_sparse, y)\nprint(\"=\" * 5 + \" SparseGLM \" + 5 * \" \")\ncoef_rl_sparse, duration_rl_sparse = time_estimator(estimator_rl, X_sparse, y)\n\nnp.testing.assert_allclose(coef_sk_sparse, coef_rl_sparse, atol=1e-6)\n\nprint(\"\\n\")\nprint(\"=\" * 5 + \" RESULTS \" + \"=\" * 5)\n\nprint(f\"[DENSE] Scikit-learn :: {duration_sk} s\")\nprint(f\"[DENSE] SparseGLM :: {duration_rl} s\")\nprint(\"--\" * 5)\nprint(f\"[SPARSE] Scikit-learn :: {duration_sk_sparse} s\")\nprint(f\"[SPARSE] SparseGLM :: {duration_rl_sparse} s\")\n"
] |
[
[
"numpy.random.binomial",
"numpy.testing.assert_allclose",
"sklearn.linear_model.Lasso"
]
] |
The-SocialLion/Smart-Attendance-System
|
[
"92d846027bbf367ee6b6ba99a4eb83e74bdc15c7"
] |
[
"4_recognizingPerson.py"
] |
[
"import numpy as np\nimport imutils\nimport pickle\nimport time\nimport cv2\n\nembeddingModel = \"openface_nn4.small2.v1.t7\"\n\nembeddingFile = \"output/embeddings.pickle\"\nrecognizerFile = \"output/recognizer.pickle\"\nlabelEncFile = \"output/le.pickle\"\nconf = 0.5\n\nprint(\"Loading face detector...\")\nprototxt = \"model/deploy.prototxt\"\nmodel = \"model/res10_300x300_ssd_iter_140000.caffemodel\"\ndetector = cv2.dnn.readNetFromCaffe(prototxt, model)\n\nprint(\"Loading face recognizer...\")\nembedder = cv2.dnn.readNetFromTorch(embeddingModel)\n\nrecognizer = pickle.loads(open(recognizerFile, \"rb\").read())\nle = pickle.loads(open(labelEncFile, \"rb\").read())\n\nbox = []\nprint(\"Starting video stream...\")\ncam = cv2.VideoCapture(1)\ntime.sleep(2.0)\n\nwhile True:\n _, frame = cam.read()\n frame = imutils.resize(frame, width=600)\n (h, w) = frame.shape[:2]\n imageBlob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0, (300, 300),(104.0, 177.0, 123.0), swapRB=False, crop=False)\n\n detector.setInput(imageBlob)\n detections = detector.forward()\n\n for i in range(0, detections.shape[2]):\n\n confidence = detections[0, 0, i, 2]\n\n if confidence > conf:\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n face = frame[startY:endY, startX:endX]\n (fH, fW) = face.shape[:2]\n\n if fW < 20 or fH < 20:\n continue\n\n faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)\n embedder.setInput(faceBlob)\n vec = embedder.forward()\n\n preds = recognizer.predict_proba(vec)[0]\n j = np.argmax(preds)\n proba = preds[j]\n name = le.classes_[j]\n text = \"{} : {:.2f}%\".format(name, proba * 100)\n y = startY - 10 if startY - 10 > 10 else startY + 10\n cv2.rectangle(frame, (startX, startY), (endX, endY),(0, 0, 255), 2)\n cv2.putText(frame, text, (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n if key == 27:\n break\n\ncam.release()\ncv2.destroyAllWindows()"
] |
[
[
"numpy.array",
"numpy.argmax"
]
] |
ArgonneCPAC/skysim
|
[
"5bc2abebd7123f29b424efc11c3ef374a51cd6c1"
] |
[
"lightcone_resample/test_v4.py"
] |
[
"#!/usr/bin/env python2.7\n\nfrom __future__ import print_function, division\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as clr\nimport dtk \nimport h5py\nimport sys\nimport time\n\n\nif __name__ == \"__main__\":\n param = dtk.Param(sys.argv[1])\n output_fname = param.get_string('output_fname').replace(\"${step}\",\"all\")\n hgp = h5py.File(output_fname,'r')['galaxyProperties']\n print(hgp.keys())\n redshift = hgp['redshiftHubble'].value\n pos_angle = hgp['morphology/positionAngle'].value\n\n ellip = hgp['morphology/totalEllipticity'].value\n ellip1 = hgp['morphology/totalEllipticity1'].value\n ellip2 = hgp['morphology/totalEllipticity2'].value\n\n dellip = hgp['morphology/diskEllipticity'].value\n dellip1 = hgp['morphology/diskEllipticity1'].value\n dellip2 = hgp['morphology/diskEllipticity2'].value\n\n sellip = hgp['morphology/spheroidEllipticity'].value\n sellip1 = hgp['morphology/spheroidEllipticity1'].value\n sellip2 = hgp['morphology/spheroidEllipticity2'].value\n\n mag_g = hgp['SDSS_filters/magnitude:SDSS_g:rest'].value\n mag_r = hgp['SDSS_filters/magnitude:SDSS_r:rest'].value\n mag_i = hgp['SDSS_filters/magnitude:SDSS_i:rest'].value\n \n fnt_mag = np.isfinite(mag_r)\n \n dsize = hgp['morphology/diskHalfLightRadius'].value\n ssize = hgp['morphology/spheroidHalfLightRadius'].value\n\n dsize_as = hgp['morphology/diskHalfLightRadiusArcsec'].value\n ssize_as = hgp['morphology/spheroidHalfLightRadiusArcsec'].value\n\n dsm = hgp['diskMassStellar'].value\n ssm = hgp['diskMassStellar'].value\n \n bhm = hgp['blackHoleMass'].value\n bhar= hgp['blackHoleAccretionRate'].value\n\n h,xbins = np.histogram(pos_angle,bins=100)\n plt.figure()\n plt.plot(dtk.bins_avg(xbins),h)\n plt.xlabel(\"pos_angle\")\n plt.grid()\n \n # print(ellip)\n xbins = np.linspace(0,1,100)\n slct = np.isfinite(ellip)\n print(np.sum(slct)/np.size(slct))\n h,xbins = np.histogram(ellip[slct],bins=xbins)\n plt.figure()\n plt.plot(dtk.bins_avg(xbins),h)\n plt.xlabel(\"ellip\");plt.ylabel('freq')\n plt.grid()\n\n print(np.min(dellip), np.average(dellip), np.max(dellip))\n\n plt.figure()\n h,xbins = np.histogram(dellip,bins=xbins)\n plt.plot(dtk.bins_avg(xbins),h,label='ellipticity')\n h,xbins = np.histogram((1-dellip)/(1+dellip),bins=xbins)\n plt.plot(dtk.bins_avg(xbins),h,label='axis ratio')\n plt.xlabel(\"disk ellip\");plt.ylabel('freq')\n plt.legend(loc='best')\n plt.grid()\n\n slct = np.isfinite(sellip)\n print(np.sum(slct)/np.size(slct))\n print(np.min(sellip[slct]), np.average(sellip[slct]), np.max(sellip[slct]))\n plt.figure()\n h,xbins = np.histogram(sellip[slct],bins=xbins)\n plt.plot(dtk.bins_avg(xbins),h)\n h,xbins = np.histogram((1-sellip[slct])/(1+sellip[slct]),bins=xbins)\n plt.plot(dtk.bins_avg(xbins),h)\n plt.xlabel(\"sphere ellip\");plt.ylabel('freq')\n plt.legend(loc='best')\n plt.grid()\n \n \n # Take too long to plot\n # plt.figure()\n # plt.plot(pos_angle,ellip1,',',alpha=0.3)\n # plt.plot(pos_angle,ellip2,',',alpha=0.3)\n\n # plt.figure()\n # plt.plot(pos_angle,dellip1,',',alpha=0.3)\n # plt.plot(pos_angle,dellip2,',',alpha=0.3) \n\n # plt.figure()\n # plt.plot(pos_angle,sellip1,',',alpha=0.3)\n # plt.plot(pos_angle,sellip2,',',alpha=0.3) \n \n rad_bins = np.logspace(-3,3,100)\n plt.figure()\n h,xbins,ybins = np.histogram2d(mag_r, dsize,bins=(100,rad_bins))\n plt.pcolor(xbins,ybins,h.T,cmap='PuBu',norm=clr.LogNorm())\n plt.xlabel('mag r');plt.ylabel('disk size [kpc]')\n plt.yscale('log')\n plt.grid()\n \n\n plt.figure()\n h,xbins,ybins = np.histogram2d(mag_r, ssize,bins=(100,rad_bins))\n plt.pcolor(xbins,ybins,h.T,cmap='PuBu',norm=clr.LogNorm())\n plt.xlabel('mag r');plt.ylabel('bulge size [kpc]')\n plt.yscale('log')\n plt.grid()\n\n plt.figure()\n h,xbins,ybins = np.histogram2d(mag_r, dsize_as,bins=(100,rad_bins))\n plt.pcolor(xbins,ybins,h.T,cmap='PuBu',norm=clr.LogNorm())\n plt.xlabel('mag r');plt.ylabel('disk size [arcsec]')\n plt.yscale('log')\n plt.grid()\n \n plt.figure()\n h,xbins,ybins = np.histogram2d(mag_r, ssize_as,bins=(100,rad_bins))\n plt.pcolor(xbins,ybins,h.T,cmap='PuBu',norm=clr.LogNorm())\n plt.xlabel('mag r');plt.ylabel('bulge size [arcsec]')\n plt.yscale('log')\n plt.grid()\n\n\n plt.figure()\n logbins = np.logspace(1,13,100)\n h,xbins,ybins = np.histogram2d(ssm,bhm,bins=(logbins,logbins))\n plt.pcolor(xbins,ybins,h.T,cmap='PuBu',norm=clr.LogNorm())\n plt.xlabel('bulge stellar mass');plt.ylabel('black hole mass')\n plt.xscale('log');plt.yscale('log')\n plt.grid()\n\n plt.figure()\n logbins = np.logspace(1,13,100)\n \n h,xbins,ybins = np.histogram2d(bhm,bhar,bins=(logbins,logbins))\n plt.pcolor(xbins,ybins,h.T,cmap='PuBu',norm=clr.LogNorm())\n plt.xlabel('black hole mass');plt.ylabel('black hole accreation rate [Msun/Gyr]')\n plt.xscale('log');plt.yscale('log')\n plt.grid()\n\n plt.show()\n\n\n \n"
] |
[
[
"matplotlib.pyplot.legend",
"numpy.linspace",
"numpy.max",
"numpy.histogram",
"numpy.size",
"matplotlib.pyplot.figure",
"numpy.min",
"numpy.logspace",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"numpy.histogram2d",
"matplotlib.colors.LogNorm",
"numpy.isfinite",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xscale",
"numpy.average"
]
] |
camposouza/NMR
|
[
"a55ebf738799b4c1cc14dcd1ebebae3ed57a8d58"
] |
[
"NMR_Pulse_Sequences/PulsoSimples.py"
] |
[
"import numpy as np\nfrom numpy import pi, sin, cos\nimport matplotlib.pyplot as plt\n\nT1 = 600\nT2 = 400\nFo = 10\ndf = np.arange(-10, 50, 0.1)\n\ntheta = pi/2.0\nT = 2000\ndT = 1\n\nT2inom = 200\nT22 = 1/((1/T2) + (1/T2inom))\n\nRtheta = np.matrix([[1, 0, 0], [0, cos(theta), sin(theta)], [0, -sin(theta), cos(theta)]])\nE1 = np.exp(-dT/T1)\nE2 = np.exp(-dT/T2)\nE = np.matrix([[E2, 0, 0], [0, E2, 0], [0, 0, E1]])\nB = np.matrix([0, 0, 1 - E1])\n\nN0 = int(T/dT)\nM = np.zeros((N0, 3))\nMs = np.zeros((N0, 3))\n\nM[0] = [0, 0, 1]\n\nfor f in range(0, len(df)):\n phi = 2*pi*(df[f])*dT/1000.0\n Rphi = np.matrix([[cos(phi), -sin(phi), 0], [sin(phi), cos(phi), 0], [0, 0, 1]])\n M[1] = Rtheta @ M[0] + B\n\n for k in range(2, N0):\n M[k] = E @ Rphi @ M[k - 1] + B\n\n g = T22 / ((1 + (df[f] - Fo)**2) * (2*pi*T22/1000)**2)\n\n Ms[:, 0] = g * M[:, 0] + Ms[:, 0]\n Ms[:, 1] = g * M[:, 1] + Ms[:, 1]\n Ms[:, 2] = g * M[:, 2] + Ms[:, 2]\n\nMs = Ms/max(Ms[0])\n\ntempo = np.arange(0, N0).T * dT\nCurvaT2 = [np.exp(-t/float(T2)) for t in tempo]\nCurvaT22 = [np.exp(-t/float(T22)) for t in tempo]\n\n# ===== Graficando Resultados =====\n\nplt.plot(tempo, CurvaT2, 'g')\n# plt.plot(tempo, CurvaT22, 'y')\nplt.plot(tempo, Ms[:, 0], 'b')\nplt.plot(tempo, Ms[:, 1], 'k')\nplt.plot(tempo, Ms[:, 2], 'r')\nplt.grid()\n\nplt.show()\n"
] |
[
[
"numpy.matrix",
"numpy.arange",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"numpy.exp",
"numpy.zeros",
"matplotlib.pyplot.show"
]
] |
zomglings/MONAI
|
[
"ba127a2077b1a5d8aed01bc98a6f9550cc191c2b"
] |
[
"monai/transforms/spatial/array.py"
] |
[
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA collection of \"vanilla\" transforms for spatial operations\nhttps://github.com/Project-MONAI/MONAI/wiki/MONAI_Design\n\"\"\"\n\nimport warnings\nfrom typing import Any, List, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport torch\n\nfrom monai.config import USE_COMPILED, DtypeLike\nfrom monai.data.utils import compute_shape_offset, to_affine_nd, zoom_affine\nfrom monai.networks.layers import AffineTransform, GaussianFilter, grid_pull\nfrom monai.transforms.croppad.array import CenterSpatialCrop\nfrom monai.transforms.transform import RandomizableTransform, Transform\nfrom monai.transforms.utils import (\n create_control_grid,\n create_grid,\n create_rotate,\n create_scale,\n create_shear,\n create_translate,\n map_spatial_axes,\n)\nfrom monai.utils import (\n GridSampleMode,\n GridSamplePadMode,\n InterpolateMode,\n NumpyPadMode,\n ensure_tuple,\n ensure_tuple_rep,\n ensure_tuple_size,\n fall_back_tuple,\n issequenceiterable,\n optional_import,\n)\n\nnib, _ = optional_import(\"nibabel\")\n\n__all__ = [\n \"Spacing\",\n \"Orientation\",\n \"Flip\",\n \"Resize\",\n \"Rotate\",\n \"Zoom\",\n \"Rotate90\",\n \"RandRotate90\",\n \"RandRotate\",\n \"RandFlip\",\n \"RandAxisFlip\",\n \"RandZoom\",\n \"AffineGrid\",\n \"RandAffineGrid\",\n \"RandDeformGrid\",\n \"Resample\",\n \"Affine\",\n \"RandAffine\",\n \"Rand2DElastic\",\n \"Rand3DElastic\",\n]\n\nRandRange = Optional[Union[Sequence[Union[Tuple[float, float], float]], float]]\n\n\nclass Spacing(Transform):\n \"\"\"\n Resample input image into the specified `pixdim`.\n \"\"\"\n\n def __init__(\n self,\n pixdim: Union[Sequence[float], float],\n diagonal: bool = False,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,\n align_corners: bool = False,\n dtype: DtypeLike = np.float64,\n ) -> None:\n \"\"\"\n Args:\n pixdim: output voxel spacing.\n diagonal: whether to resample the input to have a diagonal affine matrix.\n If True, the input data is resampled to the following affine::\n\n np.diag((pixdim_0, pixdim_1, ..., pixdim_n, 1))\n\n This effectively resets the volume to the world coordinate system (RAS+ in nibabel).\n The original orientation, rotation, shearing are not preserved.\n\n If False, this transform preserves the axes orientation, orthogonal rotation and\n translation components from the original affine. This option will not flip/swap axes\n of the original data.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"border\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n align_corners: Geometrically, we consider the pixels of the input as squares rather than points.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.\n If None, use the data type of input data. To be compatible with other modules,\n the output data type is always ``np.float32``.\n \"\"\"\n self.pixdim = np.array(ensure_tuple(pixdim), dtype=np.float64)\n self.diagonal = diagonal\n self.mode: GridSampleMode = GridSampleMode(mode)\n self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode)\n self.align_corners = align_corners\n self.dtype = dtype\n\n def __call__(\n self,\n data_array: np.ndarray,\n affine: Optional[np.ndarray] = None,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n align_corners: Optional[bool] = None,\n dtype: DtypeLike = None,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Args:\n data_array: in shape (num_channels, H[, W, ...]).\n affine (matrix): (N+1)x(N+1) original affine matrix for spatially ND `data_array`. Defaults to identity.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``self.padding_mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n align_corners: Geometrically, we consider the pixels of the input as squares rather than points.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n dtype: data type for resampling computation. Defaults to ``self.dtype``.\n If None, use the data type of input data. To be compatible with other modules,\n the output data type is always ``np.float32``.\n\n Raises:\n ValueError: When ``data_array`` has no spatial dimensions.\n ValueError: When ``pixdim`` is nonpositive.\n\n Returns:\n data_array (resampled into `self.pixdim`), original affine, current affine.\n\n \"\"\"\n _dtype = dtype or self.dtype or data_array.dtype\n sr = data_array.ndim - 1\n if sr <= 0:\n raise ValueError(\"data_array must have at least one spatial dimension.\")\n if affine is None:\n # default to identity\n affine = np.eye(sr + 1, dtype=np.float64)\n affine_ = np.eye(sr + 1, dtype=np.float64)\n else:\n affine_ = to_affine_nd(sr, affine)\n out_d = self.pixdim[:sr]\n if out_d.size < sr:\n out_d = np.append(out_d, [1.0] * (out_d.size - sr))\n if np.any(out_d <= 0):\n raise ValueError(f\"pixdim must be positive, got {out_d}.\")\n # compute output affine, shape and offset\n new_affine = zoom_affine(affine_, out_d, diagonal=self.diagonal)\n output_shape, offset = compute_shape_offset(data_array.shape[1:], affine_, new_affine)\n new_affine[:sr, -1] = offset[:sr]\n transform = np.linalg.inv(affine_) @ new_affine\n # adapt to the actual rank\n transform = to_affine_nd(sr, transform)\n\n # no resampling if it's identity transform\n if np.allclose(transform, np.diag(np.ones(len(transform))), atol=1e-3):\n output_data = data_array.copy().astype(np.float32)\n new_affine = to_affine_nd(affine, new_affine)\n return output_data, affine, new_affine\n\n # resample\n affine_xform = AffineTransform(\n normalized=False,\n mode=mode or self.mode,\n padding_mode=padding_mode or self.padding_mode,\n align_corners=self.align_corners if align_corners is None else align_corners,\n reverse_indexing=True,\n )\n output_data = affine_xform(\n # AffineTransform requires a batch dim\n torch.as_tensor(np.ascontiguousarray(data_array).astype(_dtype)).unsqueeze(0),\n torch.as_tensor(np.ascontiguousarray(transform).astype(_dtype)),\n spatial_size=output_shape,\n )\n output_data = np.asarray(output_data.squeeze(0).detach().cpu().numpy(), dtype=np.float32) # type: ignore\n new_affine = to_affine_nd(affine, new_affine)\n return output_data, affine, new_affine\n\n\nclass Orientation(Transform):\n \"\"\"\n Change the input image's orientation into the specified based on `axcodes`.\n \"\"\"\n\n def __init__(\n self,\n axcodes: Optional[str] = None,\n as_closest_canonical: bool = False,\n labels: Optional[Sequence[Tuple[str, str]]] = tuple(zip(\"LPI\", \"RAS\")),\n ) -> None:\n \"\"\"\n Args:\n axcodes: N elements sequence for spatial ND input's orientation.\n e.g. axcodes='RAS' represents 3D orientation:\n (Left, Right), (Posterior, Anterior), (Inferior, Superior).\n default orientation labels options are: 'L' and 'R' for the first dimension,\n 'P' and 'A' for the second, 'I' and 'S' for the third.\n as_closest_canonical: if True, load the image as closest to canonical axis format.\n labels: optional, None or sequence of (2,) sequences\n (2,) sequences are labels for (beginning, end) of output axis.\n Defaults to ``(('L', 'R'), ('P', 'A'), ('I', 'S'))``.\n\n Raises:\n ValueError: When ``axcodes=None`` and ``as_closest_canonical=True``. Incompatible values.\n\n See Also: `nibabel.orientations.ornt2axcodes`.\n\n \"\"\"\n if axcodes is None and not as_closest_canonical:\n raise ValueError(\"Incompatible values: axcodes=None and as_closest_canonical=True.\")\n if axcodes is not None and as_closest_canonical:\n warnings.warn(\"using as_closest_canonical=True, axcodes ignored.\")\n self.axcodes = axcodes\n self.as_closest_canonical = as_closest_canonical\n self.labels = labels\n\n def __call__(\n self, data_array: np.ndarray, affine: Optional[np.ndarray] = None\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n original orientation of `data_array` is defined by `affine`.\n\n Args:\n data_array: in shape (num_channels, H[, W, ...]).\n affine (matrix): (N+1)x(N+1) original affine matrix for spatially ND `data_array`. Defaults to identity.\n\n Raises:\n ValueError: When ``data_array`` has no spatial dimensions.\n ValueError: When ``axcodes`` spatiality differs from ``data_array``.\n\n Returns:\n data_array (reoriented in `self.axcodes`), original axcodes, current axcodes.\n\n \"\"\"\n sr = data_array.ndim - 1\n if sr <= 0:\n raise ValueError(\"data_array must have at least one spatial dimension.\")\n if affine is None:\n affine = np.eye(sr + 1, dtype=np.float64)\n affine_ = np.eye(sr + 1, dtype=np.float64)\n else:\n affine_ = to_affine_nd(sr, affine)\n src = nib.io_orientation(affine_)\n if self.as_closest_canonical:\n spatial_ornt = src\n else:\n if self.axcodes is None:\n raise AssertionError\n dst = nib.orientations.axcodes2ornt(self.axcodes[:sr], labels=self.labels)\n if len(dst) < sr:\n raise ValueError(\n f\"axcodes must match data_array spatially, got axcodes={len(self.axcodes)}D data_array={sr}D\"\n )\n spatial_ornt = nib.orientations.ornt_transform(src, dst)\n ornt = spatial_ornt.copy()\n ornt[:, 0] += 1 # skip channel dim\n ornt = np.concatenate([np.array([[0, 1]]), ornt])\n shape = data_array.shape[1:]\n data_array = np.ascontiguousarray(nib.orientations.apply_orientation(data_array, ornt))\n new_affine = affine_ @ nib.orientations.inv_ornt_aff(spatial_ornt, shape)\n new_affine = to_affine_nd(affine, new_affine)\n return data_array, affine, new_affine\n\n\nclass Flip(Transform):\n \"\"\"\n Reverses the order of elements along the given spatial axis. Preserves shape.\n Uses ``np.flip`` in practice. See numpy.flip for additional details:\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html.\n\n Args:\n spatial_axis: spatial axes along which to flip over. Default is None.\n The default `axis=None` will flip over all of the axes of the input array.\n If axis is negative it counts from the last to the first axis.\n If axis is a tuple of ints, flipping is performed on all of the axes\n specified in the tuple.\n\n \"\"\"\n\n def __init__(self, spatial_axis: Optional[Union[Sequence[int], int]] = None) -> None:\n self.spatial_axis = spatial_axis\n\n def __call__(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Args:\n img: channel first array, must have shape: (num_channels, H[, W, ..., ]),\n \"\"\"\n\n result: np.ndarray = np.flip(img, map_spatial_axes(img.ndim, self.spatial_axis))\n return result.astype(img.dtype)\n\n\nclass Resize(Transform):\n \"\"\"\n Resize the input image to given spatial size (with scaling, not cropping/padding).\n Implemented using :py:class:`torch.nn.functional.interpolate`.\n\n Args:\n spatial_size: expected shape of spatial dimensions after resize operation.\n if the components of the `spatial_size` are non-positive values, the transform will use the\n corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n mode: {``\"nearest\"``, ``\"linear\"``, ``\"bilinear\"``, ``\"bicubic\"``, ``\"trilinear\"``, ``\"area\"``}\n The interpolation mode. Defaults to ``\"area\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate\n align_corners: This only has an effect when mode is\n 'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.\n See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate\n \"\"\"\n\n def __init__(\n self,\n spatial_size: Union[Sequence[int], int],\n mode: Union[InterpolateMode, str] = InterpolateMode.AREA,\n align_corners: Optional[bool] = None,\n ) -> None:\n self.spatial_size = ensure_tuple(spatial_size)\n self.mode: InterpolateMode = InterpolateMode(mode)\n self.align_corners = align_corners\n\n def __call__(\n self,\n img: np.ndarray,\n mode: Optional[Union[InterpolateMode, str]] = None,\n align_corners: Optional[bool] = None,\n ) -> np.ndarray:\n \"\"\"\n Args:\n img: channel first array, must have shape: (num_channels, H[, W, ..., ]).\n mode: {``\"nearest\"``, ``\"linear\"``, ``\"bilinear\"``, ``\"bicubic\"``, ``\"trilinear\"``, ``\"area\"``}\n The interpolation mode. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate\n align_corners: This only has an effect when mode is\n 'linear', 'bilinear', 'bicubic' or 'trilinear'. Defaults to ``self.align_corners``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate\n\n Raises:\n ValueError: When ``self.spatial_size`` length is less than ``img`` spatial dimensions.\n\n \"\"\"\n input_ndim = img.ndim - 1 # spatial ndim\n output_ndim = len(self.spatial_size)\n if output_ndim > input_ndim:\n input_shape = ensure_tuple_size(img.shape, output_ndim + 1, 1)\n img = img.reshape(input_shape)\n elif output_ndim < input_ndim:\n raise ValueError(\n \"len(spatial_size) must be greater or equal to img spatial dimensions, \"\n f\"got spatial_size={output_ndim} img={input_ndim}.\"\n )\n spatial_size = fall_back_tuple(self.spatial_size, img.shape[1:])\n resized = torch.nn.functional.interpolate( # type: ignore\n input=torch.as_tensor(np.ascontiguousarray(img), dtype=torch.float).unsqueeze(0),\n size=spatial_size,\n mode=self.mode.value if mode is None else InterpolateMode(mode).value,\n align_corners=self.align_corners if align_corners is None else align_corners,\n )\n resized = resized.squeeze(0).detach().cpu().numpy()\n return np.asarray(resized)\n\n\nclass Rotate(Transform):\n \"\"\"\n Rotates an input image by given angle using :py:class:`monai.networks.layers.AffineTransform`.\n\n Args:\n angle: Rotation angle(s) in radians. should a float for 2D, three floats for 3D.\n keep_size: If it is True, the output shape is kept the same as the input.\n If it is False, the output shape is adapted so that the\n input array is contained completely in the output. Default is True.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"border\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n align_corners: Defaults to False.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.\n If None, use the data type of input data. To be compatible with other modules,\n the output data type is always ``np.float32``.\n \"\"\"\n\n def __init__(\n self,\n angle: Union[Sequence[float], float],\n keep_size: bool = True,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,\n align_corners: bool = False,\n dtype: DtypeLike = np.float64,\n ) -> None:\n self.angle = angle\n self.keep_size = keep_size\n self.mode: GridSampleMode = GridSampleMode(mode)\n self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode)\n self.align_corners = align_corners\n self.dtype = dtype\n self.rotation_matrix: Optional[np.ndarray] = None\n\n def __call__(\n self,\n img: np.ndarray,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n align_corners: Optional[bool] = None,\n dtype: DtypeLike = None,\n ) -> np.ndarray:\n \"\"\"\n Args:\n img: channel first array, must have shape: [chns, H, W] or [chns, H, W, D].\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``self.padding_mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n align_corners: Defaults to ``self.align_corners``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n align_corners: Defaults to ``self.align_corners``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n dtype: data type for resampling computation. Defaults to ``self.dtype``.\n If None, use the data type of input data. To be compatible with other modules,\n the output data type is always ``np.float32``.\n\n Raises:\n ValueError: When ``img`` spatially is not one of [2D, 3D].\n\n \"\"\"\n _dtype = dtype or self.dtype or img.dtype\n im_shape = np.asarray(img.shape[1:]) # spatial dimensions\n input_ndim = len(im_shape)\n if input_ndim not in (2, 3):\n raise ValueError(f\"Unsupported img dimension: {input_ndim}, available options are [2, 3].\")\n _angle = ensure_tuple_rep(self.angle, 1 if input_ndim == 2 else 3)\n transform = create_rotate(input_ndim, _angle)\n shift = create_translate(input_ndim, (im_shape - 1) / 2)\n if self.keep_size:\n output_shape = im_shape\n else:\n corners = np.asarray(np.meshgrid(*[(0, dim) for dim in im_shape], indexing=\"ij\")).reshape(\n (len(im_shape), -1)\n )\n corners = transform[:-1, :-1] @ corners\n output_shape = np.asarray(corners.ptp(axis=1) + 0.5, dtype=int)\n shift_1 = create_translate(input_ndim, -(output_shape - 1) / 2)\n transform = shift @ transform @ shift_1\n\n xform = AffineTransform(\n normalized=False,\n mode=mode or self.mode,\n padding_mode=padding_mode or self.padding_mode,\n align_corners=self.align_corners if align_corners is None else align_corners,\n reverse_indexing=True,\n )\n output = xform(\n torch.as_tensor(np.ascontiguousarray(img).astype(_dtype)).unsqueeze(0),\n torch.as_tensor(np.ascontiguousarray(transform).astype(_dtype)),\n spatial_size=output_shape,\n )\n self.rotation_matrix = transform\n return np.asarray(output.squeeze(0).detach().cpu().numpy(), dtype=np.float32)\n\n def get_rotation_matrix(self) -> Optional[np.ndarray]:\n \"\"\"Get the most recently applied rotation matrix\"\"\"\n return self.rotation_matrix\n\n\nclass Zoom(Transform):\n \"\"\"\n Zooms an ND image using :py:class:`torch.nn.functional.interpolate`.\n For details, please see https://pytorch.org/docs/stable/nn.functional.html#interpolate.\n\n Different from :py:class:`monai.transforms.resize`, this transform takes scaling factors\n as input, and provides an option of preserving the input spatial size.\n\n Args:\n zoom: The zoom factor along the spatial axes.\n If a float, zoom is the same for each spatial axis.\n If a sequence, zoom should contain one value for each spatial axis.\n mode: {``\"nearest\"``, ``\"linear\"``, ``\"bilinear\"``, ``\"bicubic\"``, ``\"trilinear\"``, ``\"area\"``}\n The interpolation mode. Defaults to ``\"area\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate\n padding_mode: {``\"constant\"``, ``\"edge``\", ``\"linear_ramp``\", ``\"maximum``\", ``\"mean``\", `\"median``\",\n ``\"minimum``\", `\"reflect``\", ``\"symmetric``\", ``\"wrap``\", ``\"empty``\", ``\"<function>``\"}\n The mode to pad data after zooming.\n See also: https://numpy.org/doc/stable/reference/generated/numpy.pad.html\n align_corners: This only has an effect when mode is\n 'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.\n See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate\n keep_size: Should keep original size (padding/slicing if needed), default is True.\n \"\"\"\n\n def __init__(\n self,\n zoom: Union[Sequence[float], float],\n mode: Union[InterpolateMode, str] = InterpolateMode.AREA,\n padding_mode: Union[NumpyPadMode, str] = NumpyPadMode.EDGE,\n align_corners: Optional[bool] = None,\n keep_size: bool = True,\n ) -> None:\n self.zoom = zoom\n self.mode: InterpolateMode = InterpolateMode(mode)\n self.padding_mode: NumpyPadMode = NumpyPadMode(padding_mode)\n self.align_corners = align_corners\n self.keep_size = keep_size\n\n def __call__(\n self,\n img: np.ndarray,\n mode: Optional[Union[InterpolateMode, str]] = None,\n padding_mode: Optional[Union[NumpyPadMode, str]] = None,\n align_corners: Optional[bool] = None,\n ):\n \"\"\"\n Args:\n img: channel first array, must have shape: (num_channels, H[, W, ..., ]).\n mode: {``\"nearest\"``, ``\"linear\"``, ``\"bilinear\"``, ``\"bicubic\"``, ``\"trilinear\"``, ``\"area\"``}\n The interpolation mode. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate\n padding_mode: {``\"constant\"``, ``\"edge``\", ``\"linear_ramp``\", ``\"maximum``\", ``\"mean``\", `\"median``\",\n ``\"minimum``\", `\"reflect``\", ``\"symmetric``\", ``\"wrap``\", ``\"empty``\", ``\"<function>``\"}\n The mode to pad data after zooming, default to ``self.padding_mode``.\n See also: https://numpy.org/doc/stable/reference/generated/numpy.pad.html\n align_corners: This only has an effect when mode is\n 'linear', 'bilinear', 'bicubic' or 'trilinear'. Defaults to ``self.align_corners``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate\n\n \"\"\"\n _zoom = ensure_tuple_rep(self.zoom, img.ndim - 1) # match the spatial image dim\n zoomed = torch.nn.functional.interpolate( # type: ignore\n recompute_scale_factor=True,\n input=torch.as_tensor(np.ascontiguousarray(img), dtype=torch.float).unsqueeze(0),\n scale_factor=list(_zoom),\n mode=self.mode.value if mode is None else InterpolateMode(mode).value,\n align_corners=self.align_corners if align_corners is None else align_corners,\n )\n zoomed = zoomed.squeeze(0).detach().cpu().numpy()\n if not self.keep_size or np.allclose(img.shape, zoomed.shape):\n return zoomed\n\n pad_vec = [[0, 0]] * len(img.shape)\n slice_vec = [slice(None)] * len(img.shape)\n for idx, (od, zd) in enumerate(zip(img.shape, zoomed.shape)):\n diff = od - zd\n half = abs(diff) // 2\n if diff > 0: # need padding\n pad_vec[idx] = [half, diff - half]\n elif diff < 0: # need slicing\n slice_vec[idx] = slice(half, half + od)\n\n padding_mode = self.padding_mode if padding_mode is None else NumpyPadMode(padding_mode)\n zoomed = np.pad(zoomed, pad_vec, mode=padding_mode.value)\n return zoomed[tuple(slice_vec)]\n\n\nclass Rotate90(Transform):\n \"\"\"\n Rotate an array by 90 degrees in the plane specified by `axes`.\n See np.rot90 for additional details:\n https://numpy.org/doc/stable/reference/generated/numpy.rot90.html.\n\n \"\"\"\n\n def __init__(self, k: int = 1, spatial_axes: Tuple[int, int] = (0, 1)) -> None:\n \"\"\"\n Args:\n k: number of times to rotate by 90 degrees.\n spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes.\n Default: (0, 1), this is the first two axis in spatial dimensions.\n If axis is negative it counts from the last to the first axis.\n \"\"\"\n self.k = k\n spatial_axes_: Tuple[int, int] = ensure_tuple(spatial_axes) # type: ignore\n if len(spatial_axes_) != 2:\n raise ValueError(\"spatial_axes must be 2 int numbers to indicate the axes to rotate 90 degrees.\")\n self.spatial_axes = spatial_axes_\n\n def __call__(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Args:\n img: channel first array, must have shape: (num_channels, H[, W, ..., ]),\n \"\"\"\n\n result: np.ndarray = np.rot90(img, self.k, map_spatial_axes(img.ndim, self.spatial_axes))\n return result.astype(img.dtype)\n\n\nclass RandRotate90(RandomizableTransform):\n \"\"\"\n With probability `prob`, input arrays are rotated by 90 degrees\n in the plane specified by `spatial_axes`.\n \"\"\"\n\n def __init__(self, prob: float = 0.1, max_k: int = 3, spatial_axes: Tuple[int, int] = (0, 1)) -> None:\n \"\"\"\n Args:\n prob: probability of rotating.\n (Default 0.1, with 10% probability it returns a rotated array)\n max_k: number of rotations will be sampled from `np.random.randint(max_k) + 1`, (Default 3).\n spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes.\n Default: (0, 1), this is the first two axis in spatial dimensions.\n \"\"\"\n RandomizableTransform.__init__(self, prob)\n self.max_k = max_k\n self.spatial_axes = spatial_axes\n\n self._rand_k = 0\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self._rand_k = self.R.randint(self.max_k) + 1\n super().randomize(None)\n\n def __call__(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Args:\n img: channel first array, must have shape: (num_channels, H[, W, ..., ]),\n \"\"\"\n self.randomize()\n if not self._do_transform:\n return img\n rotator = Rotate90(self._rand_k, self.spatial_axes)\n return rotator(img)\n\n\nclass RandRotate(RandomizableTransform):\n \"\"\"\n Randomly rotate the input arrays.\n\n Args:\n range_x: Range of rotation angle in radians in the plane defined by the first and second axes.\n If single number, angle is uniformly sampled from (-range_x, range_x).\n range_y: Range of rotation angle in radians in the plane defined by the first and third axes.\n If single number, angle is uniformly sampled from (-range_y, range_y).\n range_z: Range of rotation angle in radians in the plane defined by the second and third axes.\n If single number, angle is uniformly sampled from (-range_z, range_z).\n prob: Probability of rotation.\n keep_size: If it is False, the output shape is adapted so that the\n input array is contained completely in the output.\n If it is True, the output shape is the same as the input. Default is True.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"border\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n align_corners: Defaults to False.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.\n If None, use the data type of input data. To be compatible with other modules,\n the output data type is always ``np.float32``.\n \"\"\"\n\n def __init__(\n self,\n range_x: Union[Tuple[float, float], float] = 0.0,\n range_y: Union[Tuple[float, float], float] = 0.0,\n range_z: Union[Tuple[float, float], float] = 0.0,\n prob: float = 0.1,\n keep_size: bool = True,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,\n align_corners: bool = False,\n dtype: DtypeLike = np.float64,\n ) -> None:\n RandomizableTransform.__init__(self, prob)\n self.range_x = ensure_tuple(range_x)\n if len(self.range_x) == 1:\n self.range_x = tuple(sorted([-self.range_x[0], self.range_x[0]]))\n self.range_y = ensure_tuple(range_y)\n if len(self.range_y) == 1:\n self.range_y = tuple(sorted([-self.range_y[0], self.range_y[0]]))\n self.range_z = ensure_tuple(range_z)\n if len(self.range_z) == 1:\n self.range_z = tuple(sorted([-self.range_z[0], self.range_z[0]]))\n\n self.keep_size = keep_size\n self.mode: GridSampleMode = GridSampleMode(mode)\n self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode)\n self.align_corners = align_corners\n self.dtype = dtype\n\n self.x = 0.0\n self.y = 0.0\n self.z = 0.0\n\n def randomize(self, data: Optional[Any] = None) -> None:\n super().randomize(None)\n self.x = self.R.uniform(low=self.range_x[0], high=self.range_x[1])\n self.y = self.R.uniform(low=self.range_y[0], high=self.range_y[1])\n self.z = self.R.uniform(low=self.range_z[0], high=self.range_z[1])\n\n def __call__(\n self,\n img: np.ndarray,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n align_corners: Optional[bool] = None,\n dtype: DtypeLike = None,\n ) -> np.ndarray:\n \"\"\"\n Args:\n img: channel first array, must have shape 2D: (nchannels, H, W), or 3D: (nchannels, H, W, D).\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``self.padding_mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n align_corners: Defaults to ``self.align_corners``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n dtype: data type for resampling computation. Defaults to ``self.dtype``.\n If None, use the data type of input data. To be compatible with other modules,\n the output data type is always ``np.float32``.\n \"\"\"\n self.randomize()\n if not self._do_transform:\n return img\n rotator = Rotate(\n angle=self.x if img.ndim == 3 else (self.x, self.y, self.z),\n keep_size=self.keep_size,\n mode=mode or self.mode,\n padding_mode=padding_mode or self.padding_mode,\n align_corners=self.align_corners if align_corners is None else align_corners,\n dtype=dtype or self.dtype or img.dtype,\n )\n return np.array(rotator(img))\n\n\nclass RandFlip(RandomizableTransform):\n \"\"\"\n Randomly flips the image along axes. Preserves shape.\n See numpy.flip for additional details.\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html\n\n Args:\n prob: Probability of flipping.\n spatial_axis: Spatial axes along which to flip over. Default is None.\n \"\"\"\n\n def __init__(self, prob: float = 0.1, spatial_axis: Optional[Union[Sequence[int], int]] = None) -> None:\n RandomizableTransform.__init__(self, prob)\n self.flipper = Flip(spatial_axis=spatial_axis)\n\n def __call__(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Args:\n img: channel first array, must have shape: (num_channels, H[, W, ..., ]),\n \"\"\"\n self.randomize(None)\n if not self._do_transform:\n return img\n return self.flipper(img)\n\n\nclass RandAxisFlip(RandomizableTransform):\n \"\"\"\n Randomly select a spatial axis and flip along it.\n See numpy.flip for additional details.\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html\n\n Args:\n prob: Probability of flipping.\n\n \"\"\"\n\n def __init__(self, prob: float = 0.1) -> None:\n RandomizableTransform.__init__(self, min(max(prob, 0.0), 1.0))\n self._axis: Optional[int] = None\n\n def randomize(self, data: np.ndarray) -> None:\n super().randomize(None)\n self._axis = self.R.randint(data.ndim - 1)\n\n def __call__(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Args:\n img: channel first array, must have shape: (num_channels, H[, W, ..., ]),\n \"\"\"\n self.randomize(data=img)\n if not self._do_transform:\n return img\n flipper = Flip(spatial_axis=self._axis)\n return flipper(img)\n\n\nclass RandZoom(RandomizableTransform):\n \"\"\"\n Randomly zooms input arrays with given probability within given zoom range.\n\n Args:\n prob: Probability of zooming.\n min_zoom: Min zoom factor. Can be float or sequence same size as image.\n If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims\n to keep the original spatial shape ratio.\n If a sequence, min_zoom should contain one value for each spatial axis.\n If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio.\n max_zoom: Max zoom factor. Can be float or sequence same size as image.\n If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims\n to keep the original spatial shape ratio.\n If a sequence, max_zoom should contain one value for each spatial axis.\n If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio.\n mode: {``\"nearest\"``, ``\"linear\"``, ``\"bilinear\"``, ``\"bicubic\"``, ``\"trilinear\"``, ``\"area\"``}\n The interpolation mode. Defaults to ``\"area\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate\n padding_mode: {``\"constant\"``, ``\"edge``\", ``\"linear_ramp``\", ``\"maximum``\", ``\"mean``\", `\"median``\",\n ``\"minimum``\", `\"reflect``\", ``\"symmetric``\", ``\"wrap``\", ``\"empty``\", ``\"<function>``\"}\n The mode to pad data after zooming.\n See also: https://numpy.org/doc/stable/reference/generated/numpy.pad.html\n align_corners: This only has an effect when mode is\n 'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.\n See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate\n keep_size: Should keep original size (pad if needed), default is True.\n \"\"\"\n\n def __init__(\n self,\n prob: float = 0.1,\n min_zoom: Union[Sequence[float], float] = 0.9,\n max_zoom: Union[Sequence[float], float] = 1.1,\n mode: Union[InterpolateMode, str] = InterpolateMode.AREA,\n padding_mode: Union[NumpyPadMode, str] = NumpyPadMode.EDGE,\n align_corners: Optional[bool] = None,\n keep_size: bool = True,\n ) -> None:\n RandomizableTransform.__init__(self, prob)\n self.min_zoom = ensure_tuple(min_zoom)\n self.max_zoom = ensure_tuple(max_zoom)\n if len(self.min_zoom) != len(self.max_zoom):\n raise AssertionError(\"min_zoom and max_zoom must have same length.\")\n self.mode: InterpolateMode = InterpolateMode(mode)\n self.padding_mode: NumpyPadMode = NumpyPadMode(padding_mode)\n self.align_corners = align_corners\n self.keep_size = keep_size\n\n self._zoom: Sequence[float] = [1.0]\n\n def randomize(self, data: Optional[Any] = None) -> None:\n super().randomize(None)\n self._zoom = [self.R.uniform(l, h) for l, h in zip(self.min_zoom, self.max_zoom)]\n\n def __call__(\n self,\n img: np.ndarray,\n mode: Optional[Union[InterpolateMode, str]] = None,\n padding_mode: Optional[Union[NumpyPadMode, str]] = None,\n align_corners: Optional[bool] = None,\n ) -> np.ndarray:\n \"\"\"\n Args:\n img: channel first array, must have shape 2D: (nchannels, H, W), or 3D: (nchannels, H, W, D).\n mode: {``\"nearest\"``, ``\"linear\"``, ``\"bilinear\"``, ``\"bicubic\"``, ``\"trilinear\"``, ``\"area\"``}\n The interpolation mode. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate\n padding_mode: {``\"constant\"``, ``\"edge``\", ``\"linear_ramp``\", ``\"maximum``\", ``\"mean``\", `\"median``\",\n ``\"minimum``\", `\"reflect``\", ``\"symmetric``\", ``\"wrap``\", ``\"empty``\", ``\"<function>``\"}\n The mode to pad data after zooming, default to ``self.padding_mode``.\n See also: https://numpy.org/doc/stable/reference/generated/numpy.pad.html\n align_corners: This only has an effect when mode is\n 'linear', 'bilinear', 'bicubic' or 'trilinear'. Defaults to ``self.align_corners``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate\n \"\"\"\n # match the spatial image dim\n self.randomize()\n _dtype = np.float32\n if not self._do_transform:\n return img.astype(_dtype)\n if len(self._zoom) == 1:\n # to keep the spatial shape ratio, use same random zoom factor for all dims\n self._zoom = ensure_tuple_rep(self._zoom[0], img.ndim - 1)\n elif len(self._zoom) == 2 and img.ndim > 3:\n # if 2 zoom factors provided for 3D data, use the first factor for H and W dims, second factor for D dim\n self._zoom = ensure_tuple_rep(self._zoom[0], img.ndim - 2) + ensure_tuple(self._zoom[-1])\n zoomer = Zoom(self._zoom, keep_size=self.keep_size)\n return np.asarray(\n zoomer(\n img,\n mode=mode or self.mode,\n padding_mode=padding_mode or self.padding_mode,\n align_corners=self.align_corners if align_corners is None else align_corners,\n ),\n dtype=_dtype,\n )\n\n\nclass AffineGrid(Transform):\n \"\"\"\n Affine transforms on the coordinates.\n\n Args:\n rotate_params: angle range in radians. rotate_params[0] with be used to generate the 1st rotation\n parameter from `uniform[-rotate_params[0], rotate_params[0])`. Similarly, `rotate_params[1]` and\n `rotate_params[2]` are used in 3D affine for the range of 2nd and 3rd axes.\n shear_params: shear_params[0] with be used to generate the 1st shearing parameter from\n `uniform[-shear_params[0], shear_params[0])`. Similarly, `shear_params[1]` to\n `shear_params[N]` controls the range of the uniform distribution used to generate the 2nd to\n N-th parameter.\n translate_params : translate_params[0] with be used to generate the 1st shift parameter from\n `uniform[-translate_params[0], translate_params[0])`. Similarly, `translate_params[1]`\n to `translate_params[N]` controls the range of the uniform distribution used to generate\n the 2nd to N-th parameter.\n scale_params: scale_params[0] with be used to generate the 1st scaling factor from\n `uniform[-scale_params[0], scale_params[0]) + 1.0`. Similarly, `scale_params[1]` to\n `scale_params[N]` controls the range of the uniform distribution used to generate the 2nd to\n N-th parameter.\n as_tensor_output: whether to output tensor instead of numpy array.\n defaults to True.\n device: device to store the output grid data.\n affine: If applied, ignore the params (`rotate_params`, etc.) and use the\n supplied matrix. Should be square with each side = num of image spatial\n dimensions + 1.\n\n \"\"\"\n\n def __init__(\n self,\n rotate_params: Optional[Union[Sequence[float], float]] = None,\n shear_params: Optional[Union[Sequence[float], float]] = None,\n translate_params: Optional[Union[Sequence[float], float]] = None,\n scale_params: Optional[Union[Sequence[float], float]] = None,\n as_tensor_output: bool = True,\n device: Optional[torch.device] = None,\n affine: Optional[Union[np.ndarray, torch.Tensor]] = None,\n ) -> None:\n self.rotate_params = rotate_params\n self.shear_params = shear_params\n self.translate_params = translate_params\n self.scale_params = scale_params\n\n self.as_tensor_output = as_tensor_output\n self.device = device\n\n self.affine = affine\n\n def __call__(\n self,\n spatial_size: Optional[Sequence[int]] = None,\n grid: Optional[Union[np.ndarray, torch.Tensor]] = None,\n ) -> Tuple[Union[np.ndarray, torch.Tensor], Union[np.ndarray, torch.Tensor]]:\n \"\"\"\n Args:\n spatial_size: output grid size.\n grid: grid to be transformed. Shape must be (3, H, W) for 2D or (4, H, W, D) for 3D.\n\n Raises:\n ValueError: When ``grid=None`` and ``spatial_size=None``. Incompatible values.\n\n \"\"\"\n if grid is None:\n if spatial_size is not None:\n grid = create_grid(spatial_size)\n else:\n raise ValueError(\"Incompatible values: grid=None and spatial_size=None.\")\n\n if self.affine is None:\n spatial_dims = len(grid.shape) - 1\n affine = np.eye(spatial_dims + 1)\n if self.rotate_params:\n affine = affine @ create_rotate(spatial_dims, self.rotate_params)\n if self.shear_params:\n affine = affine @ create_shear(spatial_dims, self.shear_params)\n if self.translate_params:\n affine = affine @ create_translate(spatial_dims, self.translate_params)\n if self.scale_params:\n affine = affine @ create_scale(spatial_dims, self.scale_params)\n else:\n affine = self.affine\n\n if isinstance(affine, np.ndarray):\n affine = torch.as_tensor(np.ascontiguousarray(affine))\n\n grid = torch.tensor(grid) if not isinstance(grid, torch.Tensor) else grid.detach().clone()\n if self.device:\n affine = affine.to(self.device)\n grid = grid.to(self.device)\n grid = (affine.float() @ grid.reshape((grid.shape[0], -1)).float()).reshape([-1] + list(grid.shape[1:]))\n if grid is None or not isinstance(grid, torch.Tensor):\n raise ValueError(\"Unknown grid.\")\n return grid if self.as_tensor_output else np.asarray(grid.cpu().numpy()), affine\n\n\nclass RandAffineGrid(RandomizableTransform):\n \"\"\"\n Generate randomised affine grid.\n \"\"\"\n\n def __init__(\n self,\n rotate_range: RandRange = None,\n shear_range: RandRange = None,\n translate_range: RandRange = None,\n scale_range: RandRange = None,\n as_tensor_output: bool = True,\n device: Optional[torch.device] = None,\n ) -> None:\n \"\"\"\n Args:\n rotate_range: angle range in radians. If element `i` is iterable, then\n `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter\n for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can\n be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range\n `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0\n and nothing for the remaining dimensions.\n shear_range: shear_range with format matching `rotate_range`.\n translate_range: translate_range with format matching `rotate_range`.\n scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result.\n This allows 0 to correspond to no change (i.e., a scaling of 1).\n as_tensor_output: whether to output tensor instead of numpy array.\n defaults to True.\n device: device to store the output grid data.\n\n See also:\n - :py:meth:`monai.transforms.utils.create_rotate`\n - :py:meth:`monai.transforms.utils.create_shear`\n - :py:meth:`monai.transforms.utils.create_translate`\n - :py:meth:`monai.transforms.utils.create_scale`\n \"\"\"\n self.rotate_range = ensure_tuple(rotate_range)\n self.shear_range = ensure_tuple(shear_range)\n self.translate_range = ensure_tuple(translate_range)\n self.scale_range = ensure_tuple(scale_range)\n\n self.rotate_params: Optional[List[float]] = None\n self.shear_params: Optional[List[float]] = None\n self.translate_params: Optional[List[float]] = None\n self.scale_params: Optional[List[float]] = None\n\n self.as_tensor_output = as_tensor_output\n self.device = device\n self.affine: Optional[Union[np.ndarray, torch.Tensor]] = None\n\n def _get_rand_param(self, param_range, add_scalar: float = 0.0):\n out_param = []\n for f in param_range:\n if issequenceiterable(f):\n if len(f) != 2:\n raise ValueError(\"If giving range as [min,max], should only have two elements per dim.\")\n out_param.append(self.R.uniform(f[0], f[1]) + add_scalar)\n elif f is not None:\n out_param.append(self.R.uniform(-f, f) + add_scalar)\n return out_param\n\n def randomize(self, data: Optional[Any] = None) -> None:\n self.rotate_params = self._get_rand_param(self.rotate_range)\n self.shear_params = self._get_rand_param(self.shear_range)\n self.translate_params = self._get_rand_param(self.translate_range)\n self.scale_params = self._get_rand_param(self.scale_range, 1.0)\n\n def __call__(\n self,\n spatial_size: Optional[Sequence[int]] = None,\n grid: Optional[Union[np.ndarray, torch.Tensor]] = None,\n ) -> Union[np.ndarray, torch.Tensor]:\n \"\"\"\n Args:\n spatial_size: output grid size.\n grid: grid to be transformed. Shape must be (3, H, W) for 2D or (4, H, W, D) for 3D.\n\n Returns:\n a 2D (3xHxW) or 3D (4xHxWxD) grid.\n \"\"\"\n self.randomize()\n affine_grid = AffineGrid(\n rotate_params=self.rotate_params,\n shear_params=self.shear_params,\n translate_params=self.translate_params,\n scale_params=self.scale_params,\n as_tensor_output=self.as_tensor_output,\n device=self.device,\n )\n grid, self.affine = affine_grid(spatial_size, grid)\n return grid\n\n def get_transformation_matrix(self) -> Optional[Union[np.ndarray, torch.Tensor]]:\n \"\"\"Get the most recently applied transformation matrix\"\"\"\n return self.affine\n\n\nclass RandDeformGrid(RandomizableTransform):\n \"\"\"\n Generate random deformation grid.\n \"\"\"\n\n def __init__(\n self,\n spacing: Union[Sequence[float], float],\n magnitude_range: Tuple[float, float],\n as_tensor_output: bool = True,\n device: Optional[torch.device] = None,\n ) -> None:\n \"\"\"\n Args:\n spacing: spacing of the grid in 2D or 3D.\n e.g., spacing=(1, 1) indicates pixel-wise deformation in 2D,\n spacing=(1, 1, 1) indicates voxel-wise deformation in 3D,\n spacing=(2, 2) indicates deformation field defined on every other pixel in 2D.\n magnitude_range: the random offsets will be generated from\n `uniform[magnitude[0], magnitude[1])`.\n as_tensor_output: whether to output tensor instead of numpy array.\n defaults to True.\n device: device to store the output grid data.\n \"\"\"\n self.spacing = spacing\n self.magnitude = magnitude_range\n\n self.rand_mag = 1.0\n self.as_tensor_output = as_tensor_output\n self.random_offset = 0.0\n self.device = device\n\n def randomize(self, grid_size: Sequence[int]) -> None:\n self.random_offset = self.R.normal(size=([len(grid_size)] + list(grid_size))).astype(np.float32)\n self.rand_mag = self.R.uniform(self.magnitude[0], self.magnitude[1])\n\n def __call__(self, spatial_size: Sequence[int]):\n \"\"\"\n Args:\n spatial_size: spatial size of the grid.\n \"\"\"\n self.spacing = fall_back_tuple(self.spacing, (1.0,) * len(spatial_size))\n control_grid = create_control_grid(spatial_size, self.spacing)\n self.randomize(control_grid.shape[1:])\n control_grid[: len(spatial_size)] += self.rand_mag * self.random_offset\n if self.as_tensor_output:\n control_grid = torch.as_tensor(np.ascontiguousarray(control_grid), device=self.device)\n return control_grid\n\n\nclass Resample(Transform):\n def __init__(\n self,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,\n as_tensor_output: bool = False,\n device: Optional[torch.device] = None,\n ) -> None:\n \"\"\"\n computes output image using values from `img`, locations from `grid` using pytorch.\n supports spatially 2D or 3D (num_channels, H, W[, D]).\n\n Args:\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"border\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n as_tensor_output: whether to return a torch tensor. Defaults to False.\n device: device on which the tensor will be allocated.\n \"\"\"\n self.mode: GridSampleMode = GridSampleMode(mode)\n self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode)\n self.as_tensor_output = as_tensor_output\n self.device = device\n\n def __call__(\n self,\n img: Union[np.ndarray, torch.Tensor],\n grid: Optional[Union[np.ndarray, torch.Tensor]] = None,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n ) -> Union[np.ndarray, torch.Tensor]:\n \"\"\"\n Args:\n img: shape must be (num_channels, H, W[, D]).\n grid: shape must be (3, H, W) for 2D or (4, H, W, D) for 3D.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``self.padding_mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n \"\"\"\n\n if not isinstance(img, torch.Tensor):\n img = torch.as_tensor(np.ascontiguousarray(img))\n if grid is None:\n raise AssertionError(\"Error, grid argument must be supplied as an ndarray or tensor \")\n grid = torch.tensor(grid) if not isinstance(grid, torch.Tensor) else grid.detach().clone()\n if self.device:\n img = img.to(self.device)\n grid = grid.to(self.device)\n\n if USE_COMPILED:\n for i, dim in enumerate(img.shape[1:]):\n grid[i] += (dim - 1.0) / 2.0\n grid = grid[:-1] / grid[-1:]\n grid = grid.permute(list(range(grid.ndimension()))[1:] + [0])\n _padding_mode = self.padding_mode.value if padding_mode is None else GridSamplePadMode(padding_mode).value\n if _padding_mode == \"zeros\":\n bound = 7\n elif _padding_mode == \"border\":\n bound = 0\n else:\n bound = 1\n _interp_mode = self.mode.value if mode is None else GridSampleMode(mode).value\n out = grid_pull(\n img.unsqueeze(0).float(),\n grid.unsqueeze(0).float(),\n bound=bound,\n extrapolate=True,\n interpolation=1 if _interp_mode == \"bilinear\" else _interp_mode,\n )[0]\n else:\n for i, dim in enumerate(img.shape[1:]):\n grid[i] = 2.0 * grid[i] / (dim - 1.0)\n grid = grid[:-1] / grid[-1:]\n index_ordering: List[int] = list(range(img.ndimension() - 2, -1, -1))\n grid = grid[index_ordering]\n grid = grid.permute(list(range(grid.ndimension()))[1:] + [0])\n out = torch.nn.functional.grid_sample(\n img.unsqueeze(0).float(),\n grid.unsqueeze(0).float(),\n mode=self.mode.value if mode is None else GridSampleMode(mode).value,\n padding_mode=self.padding_mode.value if padding_mode is None else GridSamplePadMode(padding_mode).value,\n align_corners=True,\n )[0]\n if self.as_tensor_output:\n return torch.as_tensor(out)\n return np.asarray(out.cpu().numpy())\n\n\nclass Affine(Transform):\n \"\"\"\n Transform ``img`` given the affine parameters.\n \"\"\"\n\n def __init__(\n self,\n rotate_params: Optional[Union[Sequence[float], float]] = None,\n shear_params: Optional[Union[Sequence[float], float]] = None,\n translate_params: Optional[Union[Sequence[float], float]] = None,\n scale_params: Optional[Union[Sequence[float], float]] = None,\n spatial_size: Optional[Union[Sequence[int], int]] = None,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,\n as_tensor_output: bool = False,\n device: Optional[torch.device] = None,\n ) -> None:\n \"\"\"\n The affine transformations are applied in rotate, shear, translate, scale order.\n\n Args:\n rotate_params: a rotation angle in radians, a scalar for 2D image, a tuple of 3 floats for 3D.\n Defaults to no rotation.\n shear_params: a tuple of 2 floats for 2D, a tuple of 6 floats for 3D. Defaults to no shearing.\n translate_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Translation is in\n pixel/voxel relative to the center of the input image. Defaults to no translation.\n scale_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Defaults to no scaling.\n spatial_size: output image spatial size.\n if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,\n the transform will use the spatial size of `img`.\n if the components of the `spatial_size` are non-positive values, the transform will use the\n corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"reflection\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n as_tensor_output: the computation is implemented using pytorch tensors, this option specifies\n whether to convert it back to numpy arrays.\n device: device on which the tensor will be allocated.\n \"\"\"\n self.affine_grid = AffineGrid(\n rotate_params=rotate_params,\n shear_params=shear_params,\n translate_params=translate_params,\n scale_params=scale_params,\n as_tensor_output=True,\n device=device,\n )\n self.resampler = Resample(as_tensor_output=as_tensor_output, device=device)\n self.spatial_size = spatial_size\n self.mode: GridSampleMode = GridSampleMode(mode)\n self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode)\n\n def __call__(\n self,\n img: Union[np.ndarray, torch.Tensor],\n spatial_size: Optional[Union[Sequence[int], int]] = None,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n ) -> Tuple[Union[np.ndarray, torch.Tensor], Union[np.ndarray, torch.Tensor]]:\n \"\"\"\n Args:\n img: shape must be (num_channels, H, W[, D]),\n spatial_size: output image spatial size.\n if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,\n the transform will use the spatial size of `img`.\n if `img` has two spatial dimensions, `spatial_size` should have 2 elements [h, w].\n if `img` has three spatial dimensions, `spatial_size` should have 3 elements [h, w, d].\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``self.padding_mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n \"\"\"\n sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])\n grid, affine = self.affine_grid(spatial_size=sp_size)\n return (\n self.resampler(img=img, grid=grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode),\n affine,\n )\n\n\nclass RandAffine(RandomizableTransform):\n \"\"\"\n Random affine transform.\n \"\"\"\n\n def __init__(\n self,\n prob: float = 0.1,\n rotate_range: RandRange = None,\n shear_range: RandRange = None,\n translate_range: RandRange = None,\n scale_range: RandRange = None,\n spatial_size: Optional[Union[Sequence[int], int]] = None,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,\n as_tensor_output: bool = True,\n device: Optional[torch.device] = None,\n ) -> None:\n \"\"\"\n Args:\n prob: probability of returning a randomized affine grid.\n defaults to 0.1, with 10% chance returns a randomized grid.\n rotate_range: angle range in radians. If element `i` is iterable, then\n `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter\n for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can\n be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range\n `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0\n and nothing for the remaining dimensions.\n shear_range: shear_range with format matching `rotate_range`.\n translate_range: translate_range with format matching `rotate_range`.\n scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result.\n This allows 0 to correspond to no change (i.e., a scaling of 1).\n spatial_size: output image spatial size.\n if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,\n the transform will use the spatial size of `img`.\n if the components of the `spatial_size` are non-positive values, the transform will use the\n corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"reflection\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n as_tensor_output: the computation is implemented using pytorch tensors, this option specifies\n whether to convert it back to numpy arrays.\n device: device on which the tensor will be allocated.\n\n See also:\n - :py:class:`RandAffineGrid` for the random affine parameters configurations.\n - :py:class:`Affine` for the affine transformation parameters configurations.\n \"\"\"\n RandomizableTransform.__init__(self, prob)\n\n self.rand_affine_grid = RandAffineGrid(\n rotate_range=rotate_range,\n shear_range=shear_range,\n translate_range=translate_range,\n scale_range=scale_range,\n as_tensor_output=True,\n device=device,\n )\n self.resampler = Resample(as_tensor_output=as_tensor_output, device=device)\n\n self.spatial_size = spatial_size\n self.mode: GridSampleMode = GridSampleMode(mode)\n self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode)\n\n def set_random_state(\n self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None\n ) -> \"RandAffine\":\n self.rand_affine_grid.set_random_state(seed, state)\n super().set_random_state(seed, state)\n return self\n\n def randomize(self, data: Optional[Any] = None) -> None:\n super().randomize(None)\n self.rand_affine_grid.randomize()\n\n def __call__(\n self,\n img: Union[np.ndarray, torch.Tensor],\n spatial_size: Optional[Union[Sequence[int], int]] = None,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n ) -> Union[np.ndarray, torch.Tensor]:\n \"\"\"\n Args:\n img: shape must be (num_channels, H, W[, D]),\n spatial_size: output image spatial size.\n if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,\n the transform will use the spatial size of `img`.\n if `img` has two spatial dimensions, `spatial_size` should have 2 elements [h, w].\n if `img` has three spatial dimensions, `spatial_size` should have 3 elements [h, w, d].\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``self.padding_mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n \"\"\"\n self.randomize()\n sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])\n if self._do_transform:\n grid = self.rand_affine_grid(spatial_size=sp_size)\n else:\n grid = create_grid(spatial_size=sp_size)\n return self.resampler(\n img=img, grid=grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode\n )\n\n\nclass Rand2DElastic(RandomizableTransform):\n \"\"\"\n Random elastic deformation and affine in 2D\n \"\"\"\n\n def __init__(\n self,\n spacing: Union[Tuple[float, float], float],\n magnitude_range: Tuple[float, float],\n prob: float = 0.1,\n rotate_range: RandRange = None,\n shear_range: RandRange = None,\n translate_range: RandRange = None,\n scale_range: RandRange = None,\n spatial_size: Optional[Union[Tuple[int, int], int]] = None,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,\n as_tensor_output: bool = False,\n device: Optional[torch.device] = None,\n ) -> None:\n \"\"\"\n Args:\n spacing : distance in between the control points.\n magnitude_range: the random offsets will be generated from ``uniform[magnitude[0], magnitude[1])``.\n prob: probability of returning a randomized elastic transform.\n defaults to 0.1, with 10% chance returns a randomized elastic transform,\n otherwise returns a ``spatial_size`` centered area extracted from the input image.\n rotate_range: angle range in radians. If element `i` is iterable, then\n `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter\n for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can\n be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range\n `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0\n and nothing for the remaining dimensions.\n shear_range: shear_range with format matching `rotate_range`.\n translate_range: translate_range with format matching `rotate_range`.\n scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result.\n This allows 0 to correspond to no change (i.e., a scaling of 1).\n spatial_size: specifying output image spatial size [h, w].\n if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,\n the transform will use the spatial size of `img`.\n if the components of the `spatial_size` are non-positive values, the transform will use the\n corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"reflection\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n as_tensor_output: the computation is implemented using pytorch tensors, this option specifies\n whether to convert it back to numpy arrays.\n device: device on which the tensor will be allocated.\n\n See also:\n - :py:class:`RandAffineGrid` for the random affine parameters configurations.\n - :py:class:`Affine` for the affine transformation parameters configurations.\n \"\"\"\n RandomizableTransform.__init__(self, prob)\n self.deform_grid = RandDeformGrid(\n spacing=spacing, magnitude_range=magnitude_range, as_tensor_output=True, device=device\n )\n self.rand_affine_grid = RandAffineGrid(\n rotate_range=rotate_range,\n shear_range=shear_range,\n translate_range=translate_range,\n scale_range=scale_range,\n as_tensor_output=True,\n device=device,\n )\n self.resampler = Resample(as_tensor_output=as_tensor_output, device=device)\n\n self.spatial_size = spatial_size\n self.mode: GridSampleMode = GridSampleMode(mode)\n self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode)\n\n def set_random_state(\n self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None\n ) -> \"Rand2DElastic\":\n self.deform_grid.set_random_state(seed, state)\n self.rand_affine_grid.set_random_state(seed, state)\n super().set_random_state(seed, state)\n return self\n\n def randomize(self, spatial_size: Sequence[int]) -> None:\n super().randomize(None)\n self.deform_grid.randomize(spatial_size)\n self.rand_affine_grid.randomize()\n\n def __call__(\n self,\n img: Union[np.ndarray, torch.Tensor],\n spatial_size: Optional[Union[Tuple[int, int], int]] = None,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n ) -> Union[np.ndarray, torch.Tensor]:\n \"\"\"\n Args:\n img: shape must be (num_channels, H, W),\n spatial_size: specifying output image spatial size [h, w].\n if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,\n the transform will use the spatial size of `img`.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``self.padding_mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n \"\"\"\n sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])\n self.randomize(spatial_size=sp_size)\n if self._do_transform:\n grid = self.deform_grid(spatial_size=sp_size)\n grid = self.rand_affine_grid(grid=grid)\n grid = torch.nn.functional.interpolate( # type: ignore\n recompute_scale_factor=True,\n input=torch.as_tensor(grid).unsqueeze(0),\n scale_factor=list(ensure_tuple(self.deform_grid.spacing)),\n mode=InterpolateMode.BICUBIC.value,\n align_corners=False,\n )\n grid = CenterSpatialCrop(roi_size=sp_size)(grid[0])\n else:\n grid = create_grid(spatial_size=sp_size)\n return self.resampler(img, grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode)\n\n\nclass Rand3DElastic(RandomizableTransform):\n \"\"\"\n Random elastic deformation and affine in 3D\n \"\"\"\n\n def __init__(\n self,\n sigma_range: Tuple[float, float],\n magnitude_range: Tuple[float, float],\n prob: float = 0.1,\n rotate_range: RandRange = None,\n shear_range: RandRange = None,\n translate_range: RandRange = None,\n scale_range: RandRange = None,\n spatial_size: Optional[Union[Tuple[int, int, int], int]] = None,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.REFLECTION,\n as_tensor_output: bool = False,\n device: Optional[torch.device] = None,\n ) -> None:\n \"\"\"\n Args:\n sigma_range: a Gaussian kernel with standard deviation sampled from\n ``uniform[sigma_range[0], sigma_range[1])`` will be used to smooth the random offset grid.\n magnitude_range: the random offsets on the grid will be generated from\n ``uniform[magnitude[0], magnitude[1])``.\n prob: probability of returning a randomized elastic transform.\n defaults to 0.1, with 10% chance returns a randomized elastic transform,\n otherwise returns a ``spatial_size`` centered area extracted from the input image.\n rotate_range: angle range in radians. If element `i` is iterable, then\n `uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter\n for the ith dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used. This can\n be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be in range\n `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]` for dim0\n and nothing for the remaining dimensions.\n shear_range: shear_range with format matching `rotate_range`.\n translate_range: translate_range with format matching `rotate_range`.\n scale_range: scaling_range with format matching `rotate_range`. A value of 1.0 is added to the result.\n This allows 0 to correspond to no change (i.e., a scaling of 1).\n spatial_size: specifying output image spatial size [h, w, d].\n if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,\n the transform will use the spatial size of `img`.\n if the components of the `spatial_size` are non-positive values, the transform will use the\n corresponding components of img size. For example, `spatial_size=(32, 32, -1)` will be adapted\n to `(32, 32, 64)` if the third spatial dimension size of img is `64`.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"reflection\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n as_tensor_output: the computation is implemented using pytorch tensors, this option specifies\n whether to convert it back to numpy arrays.\n device: device on which the tensor will be allocated.\n\n See also:\n - :py:class:`RandAffineGrid` for the random affine parameters configurations.\n - :py:class:`Affine` for the affine transformation parameters configurations.\n \"\"\"\n RandomizableTransform.__init__(self, prob)\n self.rand_affine_grid = RandAffineGrid(rotate_range, shear_range, translate_range, scale_range, True, device)\n self.resampler = Resample(as_tensor_output=as_tensor_output, device=device)\n\n self.sigma_range = sigma_range\n self.magnitude_range = magnitude_range\n self.spatial_size = spatial_size\n self.mode: GridSampleMode = GridSampleMode(mode)\n self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode)\n self.device = device\n\n self.rand_offset = None\n self.magnitude = 1.0\n self.sigma = 1.0\n\n def set_random_state(\n self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None\n ) -> \"Rand3DElastic\":\n self.rand_affine_grid.set_random_state(seed, state)\n super().set_random_state(seed, state)\n return self\n\n def randomize(self, grid_size: Sequence[int]) -> None:\n super().randomize(None)\n if self._do_transform:\n self.rand_offset = self.R.uniform(-1.0, 1.0, [3] + list(grid_size)).astype(np.float32)\n self.magnitude = self.R.uniform(self.magnitude_range[0], self.magnitude_range[1])\n self.sigma = self.R.uniform(self.sigma_range[0], self.sigma_range[1])\n self.rand_affine_grid.randomize()\n\n def __call__(\n self,\n img: Union[np.ndarray, torch.Tensor],\n spatial_size: Optional[Union[Tuple[int, int, int], int]] = None,\n mode: Optional[Union[GridSampleMode, str]] = None,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = None,\n ) -> Union[np.ndarray, torch.Tensor]:\n \"\"\"\n Args:\n img: shape must be (num_channels, H, W, D),\n spatial_size: specifying spatial 3D output image spatial size [h, w, d].\n if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,\n the transform will use the spatial size of `img`.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n Interpolation mode to calculate output values. Defaults to ``self.mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``self.padding_mode``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n \"\"\"\n sp_size = fall_back_tuple(spatial_size or self.spatial_size, img.shape[1:])\n self.randomize(grid_size=sp_size)\n grid = create_grid(spatial_size=sp_size)\n if self._do_transform:\n if self.rand_offset is None:\n raise AssertionError\n grid = torch.as_tensor(np.ascontiguousarray(grid), device=self.device)\n gaussian = GaussianFilter(3, self.sigma, 3.0).to(device=self.device)\n offset = torch.as_tensor(self.rand_offset, device=self.device).unsqueeze(0)\n grid[:3] += gaussian(offset)[0] * self.magnitude\n grid = self.rand_affine_grid(grid=grid)\n return self.resampler(img, grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode)\n"
] |
[
[
"numpy.pad",
"numpy.allclose",
"numpy.meshgrid",
"numpy.asarray",
"numpy.linalg.inv",
"numpy.eye",
"numpy.ascontiguousarray",
"torch.tensor",
"numpy.append",
"numpy.any",
"numpy.array",
"torch.as_tensor"
]
] |
JoeyLI17/2019_11_30_AV_System_Integration_ROS
|
[
"32ea70758d1082660dd5be09098bcf7a23f1338e"
] |
[
"ros/src/waypoint_updater/waypoint_updater.py"
] |
[
"#!/usr/bin/env python\n\nimport rospy\nfrom geometry_msgs.msg import PoseStamped\nfrom styx_msgs.msg import Lane, Waypoint\nfrom std_msgs.msg import Int32\nfrom scipy import spatial\nimport numpy as np\nimport math\n\n'''\nThis node will publish waypoints from the car's current position to some `x` distance ahead.\n\nAs mentioned in the doc, you should ideally first implement a version which does not care\nabout traffic lights or obstacles.\n\nOnce you have created dbw_node, you will update this node to use the status of traffic lights too.\n\nPlease note that our simulator also provides the exact location of traffic lights and their\ncurrent status in `/vehicle/traffic_lights` message. You can use this message to build this node\nas well as to verify your TL classifier.\n\nTODO (for Yousuf and Aaron): Stopline location for each traffic light.\n'''\n\nLOOKAHEAD_WPS = 100 # Number of waypoints we will publish. You can change this number\nMAX_DECEL = 0.5\nLOOP_RATE = 5\n\nclass WaypointUpdater(object):\n def __init__(self):\n \n #rospy.init_node('waypoint_updater',log_level=rospy.DEBUG)\n rospy.init_node('waypoint_updater')\n\n self.final_waypoints_pub = rospy.Publisher('/final_waypoints', Lane, queue_size=1)\n\n self.base_waypoints = None\n self.waypoints_2d = None\n self.waypoint_tree = None\n self.pose = None\n \n self.stopline_idx = -1\n self.obstacle_idx = -1\n self.stop_buffer = 2\n\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb) \n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb, queue_size=10)\n \n # these recieve only a waypoint index that refers back to the self.base_waypoints list\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb, queue_size=10)\n \n # optional: \n #rospy.Subscriber('/obstacle_waypoint', Int32, self.obstacle_cb)\n\n self.loop()\n \n def loop(self):\n \n rate = rospy.Rate(LOOP_RATE)\n while not rospy.is_shutdown():\n if self.pose and self.base_waypoints and self.waypoint_tree:\n closest_waypoint_idx = self.get_closest_waypoint_idx()\n #rospy.logerr(\"cwp %s\",closest_waypoint_idx)\n self.publish_waypoints(closest_waypoint_idx)\n rate.sleep() \n\n def get_closest_waypoint_idx(self):\n\n x = self.pose.pose.position.x\n y = self.pose.pose.position.y\n \n if self.waypoint_tree == None:\n rospy.logerr(\"error: get_closest_waypoint_idx - waypoint_tree (kdtree) not assigned\")\n return\n \n closest_idx = self.waypoint_tree.query([x,y],1)[1]\n \n # ahead or behind?\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx-1]\n \n # Equation for hyperplane through closest_coords\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n\n val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)\n\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n \n return closest_idx\n \n def publish_waypoints(self, closest_idx):\n \n #rospy.logerr(\"--- closest: %s: \",closest_idx)\n \n lane = Lane()\n lane.header = self.base_waypoints.header\n \n if self.base_waypoints.waypoints == None:\n rospy.logerr(\"error: self.base_waypoints.waypoints== none!\")\n return\n\n size = len(self.base_waypoints.waypoints)\n sl_idx = self.stopline_idx\n\n lane.waypoints = self.base_waypoints.waypoints[closest_idx:closest_idx+LOOKAHEAD_WPS]\n \n if sl_idx >= 0 and sl_idx < size:\n #rospy.logerr(\"decelerate waypoint\")\n lane.waypoints = self.decelerate_waypoints(closest_idx,lane.waypoints) \n \n self.final_waypoints_pub.publish(lane)\n \n def pose_cb(self, msg): \n self.pose = msg\n \n def waypoints_cb(self, waypoints):\n \n #rospy.logerr(\"wp updater: waypoints_cp\") \n size = len(waypoints.waypoints)\n \n # make a copy as they are only sent once\n self.base_waypoints = waypoints\n \n #use scipi KDTree to get closes waypoint\n if not self.waypoints_2d:\n self.waypoints_2d = [[waypoint.pose.pose.position.x,waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]\n self.waypoint_tree = spatial.KDTree(self.waypoints_2d) \n else:\n rospy.logerr(\"self.waypoints_2d already assigned?: %s\",self.waypoints_2d)\n \n def traffic_cb(self, msg):\n self.stopline_idx = msg.data\n #rospy.logerr(\"waypoint_updater: way point index %s \",self.stopline_idx)\n \n # def obstacle_cb(self, msg):\n # self.obstacle_idx = msg\n \n def get_waypoint_velocity(self, waypoint):\n return waypoint.twist.twist.linear.x\n\n def set_waypoint_velocity(self, waypoints, waypoint, velocity):\n waypoints[waypoint].twist.twist.linear.x = velocity\n\n def distance(self, waypoints, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n for i in range(wp1, wp2+1):\n dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)\n wp1 = i\n return dist\n\n def decelerate_waypoints(self,closest_idx,wp_in):\n \n wp_out = []\n\n for i, wp in enumerate(wp_in):\n \n p = Waypoint()\n p.pose = wp.pose\n \n stop_idx = max(self.stopline_idx - closest_idx - self.stop_buffer,0)\n \n dist = self.distance(self.base_waypoints.waypoints, i, stop_idx)\n vel = math.sqrt(2*MAX_DECEL*dist)\n if vel < 1.0:\n vel = 0\n \n p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)\n wp_out.append(p)\n \n return wp_out\n\n\nif __name__ == '__main__':\n try:\n WaypointUpdater()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start waypoint updater node.')\n"
] |
[
[
"numpy.dot",
"numpy.array",
"scipy.spatial.KDTree"
]
] |
dema-software-solutions/paz-1
|
[
"e75ab1ba5b0980ee76ae6bd95545497665d69304"
] |
[
"paz/processors/keypoints.py"
] |
[
"from warnings import warn\n\nimport numpy as np\n\nfrom ..abstract import Processor\nfrom ..backend.keypoints import translate_keypoints\nfrom ..backend.keypoints import arguments_to_image_points2D\nfrom ..backend.keypoints import normalize_keypoints2D\nfrom ..backend.keypoints import denormalize_keypoints2D\nfrom ..backend.keypoints import normalize_keypoints\nfrom ..backend.keypoints import denormalize_keypoints\n\n\nclass ProjectKeypoints(Processor):\n \"\"\"Projects homogenous keypoints (4D) in the camera coordinates system into\n image coordinates using a projective transformation.\n\n # Arguments\n projector: Instance of ''paz.models.Project''.\n keypoints: Numpy array of shape ''(num_keypoints, 3)''\n \"\"\"\n def __init__(self, projector, keypoints):\n self.projector = projector\n self.keypoints = keypoints\n super(ProjectKeypoints, self).__init__()\n\n def call(self, world_to_camera):\n keypoints = np.matmul(self.keypoints, world_to_camera.T)\n keypoints = np.expand_dims(keypoints, 0)\n keypoints = self.projector.project(keypoints)[0]\n return keypoints\n\n\nclass NormalizeKeypoints2D(Processor):\n \"\"\"Transform keypoints in image-size coordinates to normalized coordinates.\n\n # Arguments\n image_size: List of two ints indicating ''(height, width)''\n \"\"\"\n def __init__(self, image_size):\n self.image_size = image_size\n super(NormalizeKeypoints2D, self).__init__()\n\n def call(self, keypoints):\n height, width = self.image_size[0:2]\n keypoints = normalize_keypoints2D(keypoints, height, width)\n return keypoints\n\n\nclass DenormalizeKeypoints2D(Processor):\n \"\"\"Transform normalized keypoints coordinates into image-size coordinates.\n\n # Arguments\n image_size: List of two floats having height and width of image.\n \"\"\"\n def __init__(self):\n super(DenormalizeKeypoints2D, self).__init__()\n\n def call(self, keypoints, image):\n height, width = image.shape[0:2]\n keypoints = denormalize_keypoints2D(keypoints, height, width)\n return keypoints\n\n\nclass NormalizeKeypoints(Processor):\n \"\"\"Transform keypoints in image-size coordinates to normalized coordinates.\n\n # Arguments\n image_size: List of two ints indicating ''(height, width)''\n \"\"\"\n def __init__(self, image_size):\n self.image_size = image_size\n warn('DEPRECATED please use normalize_points2D')\n super(NormalizeKeypoints, self).__init__()\n\n def call(self, keypoints):\n height, width = self.image_size[0:2]\n keypoints = normalize_keypoints(keypoints, height, width)\n return keypoints\n\n\nclass DenormalizeKeypoints(Processor):\n \"\"\"Transform normalized keypoints coordinates into image-size coordinates.\n\n # Arguments\n image_size: List of two floats having height and width of image.\n \"\"\"\n def __init__(self):\n warn('DEPRECATED please use denomarlize_points2D')\n super(DenormalizeKeypoints, self).__init__()\n\n def call(self, keypoints, image):\n height, width = image.shape[0:2]\n keypoints = denormalize_keypoints(keypoints, height, width)\n return keypoints\n\n\nclass RemoveKeypointsDepth(Processor):\n \"\"\"Removes Z component from keypoints.\n \"\"\"\n def __init__(self):\n super(RemoveKeypointsDepth, self).__init__()\n\n def call(self, keypoints):\n return keypoints[:, :2]\n\n\nclass PartitionKeypoints(Processor):\n \"\"\"Partitions keypoints from shape [num_keypoints, 2] into a list of the form\n ((2), (2), ....) and length equal to num_of_keypoints.\n \"\"\"\n def __init__(self):\n super(PartitionKeypoints, self).__init__()\n\n def call(self, keypoints):\n keypoints = np.vsplit(keypoints, len(keypoints))\n keypoints = [np.squeeze(keypoint) for keypoint in keypoints]\n partioned_keypoints = []\n for keypoint_arg, keypoint in enumerate(keypoints):\n partioned_keypoints.append(keypoint)\n return np.asarray(partioned_keypoints)\n\n\nclass ChangeKeypointsCoordinateSystem(Processor):\n \"\"\"Changes ``keypoints`` 2D coordinate system using ``box2D`` coordinates\n to locate the new origin at the openCV image origin (top-left).\n \"\"\"\n def __init__(self):\n super(ChangeKeypointsCoordinateSystem, self).__init__()\n\n def call(self, keypoints, box2D):\n x_min, y_min, x_max, y_max = box2D.coordinates\n keypoints[:, 0] = keypoints[:, 0] + x_min\n keypoints[:, 1] = keypoints[:, 1] + y_min\n return keypoints\n\n\nclass TranslateKeypoints(Processor):\n \"\"\"Applies a translation to keypoints.\n The translation is a list of length two indicating the x, y values.\n \"\"\"\n def __init__(self):\n super(TranslateKeypoints, self).__init__()\n\n def call(self, keypoints, translation):\n return translate_keypoints(keypoints, translation)\n\n\nclass ArgumentsToImageKeypoints2D(Processor):\n \"\"\"Convert array arguments into UV coordinates.\n\n Image plane\n\n (0,0)--------> (U)\n |\n |\n |\n v\n\n (V)\n\n # Arguments\n row_args: Array (num_rows).\n col_args: Array (num_cols).\n\n # Returns\n Array (num_cols, num_rows) representing points2D in UV space.\n\n # Notes\n Arguments are row args (V) and col args (U). Image points are in UV\n coordinates; thus, we concatenate them in that order\n i.e. [col_args, row_args]\n \"\"\"\n def __init__(self):\n super(ArgumentsToImageKeypoints2D, self).__init__()\n\n def call(self, row_args, col_args):\n image_points2D = arguments_to_image_points2D(row_args, col_args)\n return image_points2D\n"
] |
[
[
"numpy.asarray",
"numpy.squeeze",
"numpy.expand_dims",
"numpy.matmul"
]
] |
anukaal/rlax
|
[
"caf976f6a808792e604de348187cda15b091085b"
] |
[
"rlax/_src/base.py"
] |
[
"# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Common utilities for RLax functions.\"\"\"\n\nfrom typing import Optional, Sequence, Union\nimport chex\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nArray = chex.Array\nNumeric = chex.Numeric\n\n\ndef batched_index(\n values: Array, indices: Array, keepdims: bool = False\n) -> Array:\n \"\"\"Index into the last dimension of a tensor, preserving all others dims.\n\n Args:\n values: a tensor of shape [..., D],\n indices: indices of shape [...].\n keepdims: whether to keep the final dimension.\n\n Returns:\n a tensor of shape [...] or [..., 1].\n \"\"\"\n indexed = jnp.take_along_axis(values, indices[..., None], axis=-1)\n if not keepdims:\n indexed = jnp.squeeze(indexed, axis=-1)\n return indexed\n\n\ndef one_hot(indices, num_classes, dtype=jnp.float32):\n \"\"\"Returns a one-hot version of indices.\n\n Args:\n indices: A tensor of indices.\n num_classes: Number of classes in the one-hot dimension.\n dtype: The dtype.\n\n Returns:\n The one-hot tensor. If indices' shape is [A, B, ...], shape is\n [A, B, ..., num_classes].\n \"\"\"\n labels = jnp.arange(num_classes)\n for _ in range(indices.ndim):\n labels = jnp.expand_dims(labels, axis=0)\n return jnp.array(\n indices[..., jnp.newaxis] == labels, dtype=dtype)\n\n\ndef lhs_broadcast(source, target):\n \"\"\"Ensures that source is compatible with target for broadcasting.\"\"\"\n same_shape = np.array(source.shape) == np.array(target.shape[:source.ndim])\n ones = np.array(source.shape) == np.ones((source.ndim,))\n if np.all(same_shape + ones):\n broadcast_shape = source.shape + (1,) * (target.ndim - source.ndim)\n return jnp.reshape(source, broadcast_shape)\n raise ValueError(\n \"source shape {} is not compatible with target shape {}\".format(\n source.shape, target.shape))\n\n\nclass AllSum:\n \"\"\"Helper for summing over elements in an array and over devices.\"\"\"\n\n def __init__(self, axis_name: Optional[str] = None):\n \"\"\"Sums locally and then over devices with the axis name provided.\"\"\"\n self._axis_name = axis_name\n\n def __call__(\n self, x: Array, axis: Optional[Union[int, Sequence[int]]] = None\n ) -> Numeric:\n s = jnp.sum(x, axis=axis)\n if self._axis_name:\n s = jax.lax.psum(s, axis_name=self._axis_name)\n return s\n"
] |
[
[
"numpy.all",
"numpy.array",
"numpy.ones"
]
] |
songmeixu/snowfall
|
[
"1f79957e9716c3f980c151df5b1d77bc4bb7ce78"
] |
[
"egs/gigaspeech/asr/simple_v1/ctc_decode.py"
] |
[
"#!/usr/bin/env python3\n\n# Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)\n# Copyright (c) 2020-2021 Johns Hopkins University (author: Piotr Żelasko)\n# Apache 2.0\n\nimport logging\nimport os\nfrom pathlib import Path\nfrom typing import Union\n\nimport k2\nimport torch\nfrom k2 import Fsa, SymbolTable\nfrom kaldialign import edit_distance\n\nfrom lhotse import CutSet\nfrom lhotse.dataset import K2SpeechRecognitionDataset\nfrom lhotse.dataset import SingleCutSampler\nfrom snowfall.common import find_first_disambig_symbol\nfrom snowfall.common import get_phone_symbols\nfrom snowfall.common import get_texts\nfrom snowfall.common import load_checkpoint\nfrom snowfall.common import setup_logger\nfrom snowfall.decoding.graph import compile_HLG\nfrom snowfall.models import AcousticModel\nfrom snowfall.models.tdnn_lstm import TdnnLstm1b\nfrom snowfall.training.ctc_graph import build_ctc_topo\n\n\ndef decode(\n dataloader: torch.utils.data.DataLoader,\n model: AcousticModel,\n device: Union[str, torch.device],\n HLG: Fsa,\n symbols: SymbolTable,\n):\n num_batches = None\n try:\n num_batches = len(dataloader)\n except TypeError:\n pass\n num_cuts = 0\n results = [] # a list of pair (ref_words, hyp_words)\n for batch_idx, batch in enumerate(dataloader):\n feature = batch[\"inputs\"]\n supervisions = batch[\"supervisions\"]\n supervision_segments = torch.stack(\n (\n supervisions[\"sequence_idx\"],\n torch.floor_divide(\n supervisions[\"start_frame\"], model.subsampling_factor\n ),\n torch.floor_divide(\n supervisions[\"num_frames\"], model.subsampling_factor\n ),\n ),\n 1,\n ).to(torch.int32)\n indices = torch.argsort(supervision_segments[:, 2], descending=True)\n supervision_segments = supervision_segments[indices]\n texts = supervisions[\"text\"]\n assert feature.ndim == 3\n\n feature = feature.to(device)\n # at entry, feature is [N, T, C]\n feature = feature.permute(0, 2, 1) # now feature is [N, C, T]\n with torch.no_grad():\n nnet_output = model(feature)\n # nnet_output is [N, C, T]\n nnet_output = nnet_output.permute(0, 2, 1) # now nnet_output is [N, T, C]\n\n blank_bias = -3.0\n nnet_output[:, :, 0] += blank_bias\n\n dense_fsa_vec = k2.DenseFsaVec(nnet_output, supervision_segments)\n # assert HLG.is_cuda()\n assert (\n HLG.device == nnet_output.device\n ), f\"Check failed: HLG.device ({HLG.device}) == nnet_output.device ({nnet_output.device})\"\n # TODO(haowen): with a small `beam`, we may get empty `target_graph`,\n # thus `tot_scores` will be `inf`. Definitely we need to handle this later.\n lattices = k2.intersect_dense_pruned(HLG, dense_fsa_vec, 20.0, 7.0, 30, 10000)\n\n # lattices = k2.intersect_dense(HLG, dense_fsa_vec, 10.0)\n best_paths = k2.shortest_path(lattices, use_double_scores=True)\n assert best_paths.shape[0] == len(texts)\n hyps = get_texts(best_paths, indices)\n assert len(hyps) == len(texts)\n\n for i in range(len(texts)):\n hyp_words = [symbols.get(x) for x in hyps[i]]\n ref_words = texts[i].split(\" \")\n results.append((ref_words, hyp_words))\n\n if batch_idx % 10 == 0:\n batch_str = f\"{batch_idx}\" if num_batches is None else f\"{batch_idx}/{num_batches}\"\n logging.info(f\"batch {batch_str}, number of cuts processed until now is {num_cuts}\")\n\n num_cuts += len(texts)\n\n return results\n\n\ndef main():\n exp_dir = Path(\"exp-lstm-adam-ctc-musan\")\n setup_logger(\"{}/log/log-decode\".format(exp_dir), log_level=\"debug\")\n\n # load L, G, symbol_table\n lang_dir = Path(\"data/lang_nosp\")\n symbol_table = k2.SymbolTable.from_file(lang_dir / \"words.txt\")\n phone_symbol_table = k2.SymbolTable.from_file(lang_dir / \"phones.txt\")\n phone_ids = get_phone_symbols(phone_symbol_table)\n phone_ids_with_blank = [0] + phone_ids\n ctc_topo = k2.arc_sort(build_ctc_topo(phone_ids_with_blank))\n\n if not os.path.exists(lang_dir / \"HLG.pt\"):\n print(\"Loading L_disambig.fst.txt\")\n with open(lang_dir / \"L_disambig.fst.txt\") as f:\n L = k2.Fsa.from_openfst(f.read(), acceptor=False)\n print(\"Loading G.fst.txt\")\n with open(lang_dir / \"G.fst.txt\") as f:\n G = k2.Fsa.from_openfst(f.read(), acceptor=False)\n first_phone_disambig_id = find_first_disambig_symbol(phone_symbol_table)\n first_word_disambig_id = find_first_disambig_symbol(symbol_table)\n HLG = compile_HLG(\n L=L,\n G=G,\n H=ctc_topo,\n labels_disambig_id_start=first_phone_disambig_id,\n aux_labels_disambig_id_start=first_word_disambig_id,\n )\n torch.save(HLG.as_dict(), lang_dir / \"HLG.pt\")\n else:\n print(\"Loading pre-compiled HLG\")\n d = torch.load(lang_dir / \"HLG.pt\")\n HLG = k2.Fsa.from_dict(d)\n\n # load dataset\n feature_dir = Path(\"exp/data\")\n print(\"About to get test cuts\")\n cuts_test = CutSet.from_file(feature_dir / \"gigaspeech_cuts_TEST.jsonl.gz\")\n\n print(\"About to create test dataset\")\n test = K2SpeechRecognitionDataset(cuts_test)\n sampler = SingleCutSampler(cuts_test, max_frames=100000)\n print(\"About to create test dataloader\")\n test_dl = torch.utils.data.DataLoader(\n test, batch_size=None, sampler=sampler, num_workers=1\n )\n\n # if not torch.cuda.is_available():\n # logging.error('No GPU detected!')\n # sys.exit(-1)\n\n print(\"About to load model\")\n # Note: Use \"export CUDA_VISIBLE_DEVICES=N\" to setup device id to N\n # device = torch.device('cuda', 1)\n device = torch.device(\"cuda\")\n model = TdnnLstm1b(\n num_features=80,\n num_classes=len(phone_ids) + 1, # +1 for the blank symbol\n subsampling_factor=4,\n )\n\n checkpoint = os.path.join(exp_dir, \"epoch-7.pt\")\n load_checkpoint(checkpoint, model)\n model.to(device)\n model.eval()\n\n print(\"convert HLG to device\")\n HLG = HLG.to(device)\n HLG.aux_labels = k2.ragged.remove_values_eq(HLG.aux_labels, 0)\n HLG.requires_grad_(False)\n print(\"About to decode\")\n results = decode(\n dataloader=test_dl, model=model, device=device, HLG=HLG, symbols=symbol_table\n )\n s = \"\"\n for ref, hyp in results:\n s += f\"ref={ref}\\n\"\n s += f\"hyp={hyp}\\n\"\n logging.info(s)\n # compute WER\n dists = [edit_distance(r, h) for r, h in results]\n errors = {\n key: sum(dist[key] for dist in dists) for key in [\"sub\", \"ins\", \"del\", \"total\"]\n }\n total_words = sum(len(ref) for ref, _ in results)\n # Print Kaldi-like message:\n # %WER 8.20 [ 4459 / 54402, 695 ins, 427 del, 3337 sub ]\n logging.info(\n f'%WER {errors[\"total\"] / total_words:.2%} '\n f'[{errors[\"total\"]} / {total_words}, {errors[\"ins\"]} ins, {errors[\"del\"]} del, {errors[\"sub\"]} sub ]'\n )\n\n\ntorch.set_num_threads(1)\ntorch.set_num_interop_threads(1)\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"torch.set_num_interop_threads",
"torch.load",
"torch.utils.data.DataLoader",
"torch.floor_divide",
"torch.set_num_threads",
"torch.no_grad",
"torch.device",
"torch.argsort"
]
] |
irap-omp/deconv3d
|
[
"0ab5322f99e28a19ce5540d9a27dcbe340542d0a"
] |
[
"tests/run_test.py"
] |
[
"\n# GENERAL PACKAGES ############################################################\nfrom os import pardir, remove\nfrom os.path import abspath, dirname, join, isfile\nimport numpy as np\nimport unittest\nfrom hyperspectral import HyperspectralCube as Cube\n\n# LOCAL PACKAGES ##############################################################\n\nimport sys\nsys.path.append('.')\n\nfrom lib.run import Run\nfrom lib.instruments import MUSE\nfrom lib.line_models import SingleGaussianLineModel\nfrom lib.masks import above_percentile\n\n\n# ACTUAL TESTS ################################################################\n\n\nclass RunTest(unittest.TestCase):\n\n longMessage = True\n\n root_folder = abspath(join(dirname(abspath(__file__)), pardir))\n fits_folder = join(root_folder, 'tests/input')\n\n fits_muse_filename = join(fits_folder, 'test_cube_01.fits')\n\n data_galpak1_filename = join(fits_folder, 'GalPaK_cube_1101_size4.08_flux1e-16_incl60_vmax199_disp80_seeing1.0_PAm50.fits')\n mask_galpak1_filename = join(fits_folder, 'GalPaK_cube_1101_size4.08_flux1e-16_incl60_vmax199_disp80_seeing1.0_PAm50_mask.fits')\n\n data_galpak2_filename = join(fits_folder, 'input_snrx10GalPaK_cube_1101_size4.08_flux1e-16_incl60_vmax199_disp80_seeing1.00_PA_m50_renorm.fits')\n mask_galpak2_filename = join(fits_folder, 'input_snrx10GalPaK_cube_1101_size4.08_flux1e-16_incl60_vmax199_disp80_seeing1.00_PA_m50_mask.fits')\n\n def test_init_with_empty_cube(self):\n cube = Cube()\n inst = MUSE()\n\n self.assertTrue(cube.is_empty())\n\n with self.assertRaises(ValueError):\n run = Run(cube, inst)\n\n def test_init_with_muse_cube(self):\n cube = Cube.from_fits(self.fits_muse_filename)\n inst = MUSE()\n\n self.assertFalse(cube.is_empty(), \"Sanity check.\")\n\n run = Run(cube, instrument=inst, max_iterations=200)\n\n run.save('run_test_init_with_muse_cube', clobber=True)\n\n def test_save(self):\n cube = Cube.from_fits(self.fits_muse_filename)\n inst = MUSE()\n\n run = Run(cube, instrument=inst, max_iterations=2)\n run.save('test', clobber=True)\n\n self.assertTrue(isfile('test_parameters.npy'))\n remove('test_parameters.npy')\n self.assertTrue(isfile('test_images.png'))\n remove('test_images.png')\n self.assertTrue(isfile('test_convolved_cube.fits'))\n remove('test_convolved_cube.fits')\n self.assertTrue(isfile('test_clean_cube.fits'))\n remove('test_clean_cube.fits')\n\n def test_initial_parameters(self):\n cube = Cube.from_fits(self.fits_muse_filename)\n inst = MUSE()\n\n m = SingleGaussianLineModel()\n nump = len(m.parameters())\n minp = np.array(m.min_boundaries(cube))\n maxp = np.array(m.max_boundaries(cube))\n\n # FROM A 1D ARRAY\n p1d = minp + (maxp - minp) * np.random.rand(nump)\n\n run = Run(cube, inst, initial_parameters=p1d, max_iterations=1)\n # Note: p1d is broacasted on each spaxel in this assertion\n self.assertTrue((run.extract_parameters() == p1d).all())\n\n # FROM A 3D ARRAY\n p3d = np.resize(p1d, (cube.shape[1], cube.shape[0], nump))\n run = Run(cube, inst, initial_parameters=p3d, max_iterations=1)\n self.assertTrue((run.extract_parameters() == p3d).all())\n\n # FROM A NPY FILE\n np.save('test.npy', p3d)\n run = Run(cube, inst, initial_parameters='test.npy', max_iterations=1)\n self.assertTrue((run.extract_parameters() == p3d).all())\n remove('test.npy')\n\n def test_plot_chain(self):\n cube = Cube.from_fits(self.fits_muse_filename)\n inst = MUSE()\n\n run = Run(cube, inst, max_iterations=666)\n\n run.plot_chain()\n\n def test_masks(self):\n cube = Cube.from_fits(self.fits_muse_filename)\n inst = MUSE()\n\n run = Run(cube, inst,\n mask=above_percentile(cube),\n max_iterations=42)\n\n run.plot_images()\n\n # OTHER TESTS #############################################################\n\n def test_numpy_extrude(self):\n \"\"\"\n Testing extrusion using the `numpy.newaxis` syntax.\n This does not test deconv3d per se, but it's good to know.\n \"\"\"\n a2d = np.array([[0, 1],\n [2, 0]])\n a1d = np.array([1, 2, 3])\n\n b3d = np.array([\n [[0, 1],\n [2, 0]],\n [[0, 2],\n [4, 0]],\n [[0, 3],\n [6, 0]],\n ])\n\n # Not trivial, but fast\n a3d = a2d * a1d[:, np.newaxis][:, np.newaxis]\n\n np.testing.assert_array_almost_equal(a3d, b3d)\n\n # GARBAGE TESTS ###########################################################\n\n # def test_with_galpak1_data(self):\n # cube = Cube.from_fits(self.data_galpak1_filename)\n # inst = MUSE()\n #\n # self.assertFalse(cube.is_empty())\n #\n # run = Run(\n # cube, inst,\n # mask=self.mask_galpak1_filename,\n # max_iterations=42\n # )\n #\n # run.save('run_004', clobber=True)\n #\n # def test_with_galpak2_data(self):\n # # FIX THESE DAMN HEADERS\n # from astropy.io.fits import setval\n # setval(self.data_galpak2_filename, keyword='CDELT1', value=5.5555555555555e-05, ext=0)\n # setval(self.data_galpak2_filename, keyword='CDELT2', value=5.5555555555555e-05, ext=0)\n # setval(self.data_galpak2_filename, keyword='CRVAL1', value=1.0, ext=0)\n # setval(self.data_galpak2_filename, keyword='CRVAL2', value=1.0, ext=0)\n # setval(self.data_galpak2_filename, keyword='CRPIX1', value=1.0, ext=0)\n # setval(self.data_galpak2_filename, keyword='CRPIX2', value=1.0, ext=0)\n # setval(self.data_galpak2_filename, keyword='CUNIT1', value='deg ', ext=0)\n # setval(self.data_galpak2_filename, keyword='CUNIT2', value='deg ', ext=0)\n # setval(self.data_galpak2_filename, keyword='CTYPE1', value='RA---TAN', ext=0)\n # setval(self.data_galpak2_filename, keyword='CTYPE2', value='DEC--TAN', ext=0)\n # ########################\n #\n # cube = Cube.from_fits(self.data_galpak2_filename)\n # inst = MUSE()\n #\n # print(\"HEADER\")\n # print(cube.meta['fits'])\n #\n # self.assertFalse(cube.is_empty())\n #\n # run = Run(\n # cube, inst,\n # mask=self.mask_galpak2_filename,\n # jump_amplitude=2.0,\n # max_iterations=10\n # )\n #\n # run.save('run_galpak_renorm_2', clobber=True)\n\n"
] |
[
[
"numpy.resize",
"numpy.save",
"numpy.random.rand",
"numpy.array",
"numpy.testing.assert_array_almost_equal"
]
] |
aayars/py-noisemaker
|
[
"4e27f536632ade583eb0110aaaa9e19c59355ba6"
] |
[
"noisemaker/value.py"
] |
[
"\"\"\"Low-level value noise functions\"\"\"\n\nfrom collections import defaultdict\n\nimport math\nimport random\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom noisemaker.constants import (\n DistanceMetric,\n InterpolationType,\n PointDistribution,\n ValueDistribution,\n ValueMask,\n VoronoiDiagramType,\n)\nfrom noisemaker.effects_registry import effect\nfrom noisemaker.points import point_cloud\n\nimport noisemaker.masks as masks\nimport noisemaker.oklab as oklab\nimport noisemaker.simplex as simplex\n\n\ndef set_seed(seed):\n \"\"\"\n \"\"\"\n\n if seed is not None:\n random.seed(seed)\n\n np.random.seed(seed)\n\n tf.random.set_seed(seed)\n\n simplex._seed = seed\n\n\ndef values(freq, shape, distrib=ValueDistribution.uniform, corners=False, mask=None, mask_inverse=False, mask_static=False,\n spline_order=InterpolationType.bicubic, time=0.0, speed=1.0):\n \"\"\"\n \"\"\"\n\n if isinstance(freq, int):\n freq = freq_for_shape(freq, shape)\n\n initial_shape = freq + [shape[-1]]\n\n if distrib is None:\n distrib = ValueDistribution.uniform\n\n distrib = coerce_enum(distrib, ValueDistribution)\n\n mask = coerce_enum(mask, ValueMask)\n\n if distrib == ValueDistribution.ones:\n tensor = tf.ones(initial_shape)\n\n elif distrib == ValueDistribution.mids:\n tensor = tf.ones(initial_shape) * .5\n\n elif distrib == ValueDistribution.zeros:\n tensor = tf.zeros(initial_shape)\n\n elif distrib == ValueDistribution.column_index:\n tensor = tf.expand_dims(normalize(tf.cast(column_index(initial_shape), tf.float32)), -1) * tf.ones(initial_shape, tf.float32)\n\n elif distrib == ValueDistribution.row_index:\n tensor = tf.expand_dims(normalize(tf.cast(row_index(initial_shape), tf.float32)), -1) * tf.ones(initial_shape, tf.float32)\n\n elif ValueDistribution.is_center_distance(distrib):\n sdf_sides = None\n\n if distrib == ValueDistribution.center_circle:\n metric = DistanceMetric.euclidean\n elif distrib == ValueDistribution.center_triangle:\n metric = DistanceMetric.triangular\n elif distrib == ValueDistribution.center_diamond:\n metric = DistanceMetric.manhattan\n elif distrib == ValueDistribution.center_square:\n metric = DistanceMetric.chebyshev\n elif distrib == ValueDistribution.center_pentagon:\n metric = DistanceMetric.sdf\n sdf_sides = 5\n elif distrib == ValueDistribution.center_hexagon:\n metric = DistanceMetric.hexagram\n elif distrib == ValueDistribution.center_heptagon:\n metric = DistanceMetric.sdf\n sdf_sides = 7\n elif distrib == ValueDistribution.center_octagon:\n metric = DistanceMetric.octagram\n elif distrib == ValueDistribution.center_nonagon:\n metric = DistanceMetric.sdf\n sdf_sides = 9\n elif distrib == ValueDistribution.center_decagon:\n metric = DistanceMetric.sdf\n sdf_sides = 10\n elif distrib == ValueDistribution.center_hendecagon:\n metric = DistanceMetric.sdf\n sdf_sides = 11\n elif distrib == ValueDistribution.center_dodecagon:\n metric = DistanceMetric.sdf\n sdf_sides = 12\n\n # make sure speed doesn't break looping\n if speed > 0:\n rounded_speed = math.floor(1 + speed)\n else:\n rounded_speed = math.ceil(-1 + speed)\n\n tensor = normalized_sine(singularity(None, shape, dist_metric=metric, sdf_sides=sdf_sides) * math.tau * max(freq[0], freq[1])\n - math.tau * time * rounded_speed) * tf.ones(shape)\n\n elif ValueDistribution.is_scan(distrib):\n if distrib in (ValueDistribution.scan_up, ValueDistribution.scan_down):\n scan_distrib = ValueDistribution.column_index\n\n elif distrib in (ValueDistribution.scan_left, ValueDistribution.scan_right):\n scan_distrib = ValueDistribution.row_index\n\n tensor = values([shape[0], shape[1]], value_shape(shape), distrib=scan_distrib)\n\n if distrib in (ValueDistribution.scan_up, ValueDistribution.scan_left):\n tensor = 1.0 - tensor\n\n # make sure speed doesn't break looping\n # XXX copied from center distance\n if speed > 0:\n rounded_speed = math.floor(1 + speed)\n else:\n rounded_speed = math.ceil(-1 + speed)\n\n tensor = normalized_sine(tensor * math.tau - math.tau * time * rounded_speed) * tf.ones(shape)\n\n elif ValueDistribution.is_noise(distrib):\n # we need to control the periodic function's visual speed (i.e. scale the time factor), but without breaking loops.\n # to accomplish this, we will use a scaled periodic uniform noise as the time value for periodic noise types.\n # since time values are per-pixel, this has the added bonus of animating different parts of the image at different\n # rates, rather than ping-ponging the entire image back and forth in lockstep. this creates a visual effect which\n # closely resembles higher-dimensional noise.\n\n # get a periodic uniform noise, and scale it to speed:\n scaled_time = periodic_value(time, tf.random.uniform(initial_shape)) * speed\n\n tensor = periodic_value(scaled_time, tf.random.uniform(initial_shape))\n\n if distrib == ValueDistribution.exp:\n tensor = tf.math.pow(tensor, 4)\n\n else:\n raise ValueError(\"%s (%s) is not a ValueDistribution\" % (distrib, type(distrib)))\n\n if mask:\n atlas = masks.get_atlas(mask)\n\n glyph_shape = freq + [1]\n\n mask_values, _ = masks.mask_values(mask, glyph_shape, atlas=atlas, inverse=mask_inverse,\n time=0 if mask_static else time, speed=speed)\n\n # These noise types are generated at full size, resize and pin just the mask.\n if ValueDistribution.is_native_size(distrib):\n mask_values = resample(mask_values, shape, spline_order=spline_order)\n mask_values = pin_corners(mask_values, shape, freq, corners)\n\n if shape[2] == 2:\n tensor = tf.stack([tensor[:, :, 0], tf.stack(mask_values)[:, :, 0]], 2)\n\n elif shape[2] == 4:\n tensor = tf.stack([tensor[:, :, 0], tensor[:, :, 1], tensor[:, :, 2], tf.stack(mask_values)[:, :, 0]], 2)\n\n else:\n tensor *= mask_values\n\n if not ValueDistribution.is_native_size(distrib):\n tensor = resample(tensor, shape, spline_order=spline_order)\n tensor = pin_corners(tensor, shape, freq, corners)\n\n if distrib not in (ValueDistribution.ones, ValueDistribution.mids, ValueDistribution.zeros):\n # I wish we didn't have to do this, but values out of the 0..1 range screw all kinds of things up\n tensor = normalize(tensor)\n\n return tensor\n\n\ndef distance(a, b, metric=DistanceMetric.euclidean, sdf_sides=5):\n \"\"\"\n Compute the distance from a to b, using the specified metric.\n\n :param Tensor a:\n :param Tensor b:\n :param DistanceMetric|int|str metric: Distance metric\n :return: Tensor\n \"\"\"\n\n metric = coerce_enum(metric, DistanceMetric)\n\n if metric == DistanceMetric.euclidean:\n dist = tf.sqrt(a * a + b * b)\n\n elif metric == DistanceMetric.manhattan:\n dist = tf.abs(a) + tf.abs(b)\n\n elif metric == DistanceMetric.chebyshev:\n dist = tf.maximum(tf.abs(a), tf.abs(b))\n\n elif metric == DistanceMetric.octagram:\n dist = tf.maximum((tf.abs(a) + tf.abs(b)) / math.sqrt(2), tf.maximum(tf.abs(a), tf.abs(b)))\n\n elif metric == DistanceMetric.triangular:\n dist = tf.maximum(tf.abs(a) - b * .5, b)\n\n elif metric == DistanceMetric.hexagram:\n dist = tf.maximum(\n tf.maximum(tf.abs(a) - b * .5, b),\n tf.maximum(tf.abs(a) - b * -.5, b * -1)\n )\n\n elif metric == DistanceMetric.sdf:\n # https://thebookofshaders.com/07/\n arctan = tf.math.atan2(a, -b) + math.pi\n r = math.tau / sdf_sides\n\n dist = tf.math.cos(tf.math.floor(.5 + arctan / r) * r - arctan) * tf.sqrt(a * a + b * b)\n\n else:\n raise ValueError(\"{0} isn't a distance metric.\".format(metric))\n\n return dist\n\n\n@effect()\ndef voronoi(tensor, shape, diagram_type=VoronoiDiagramType.range, nth=0,\n dist_metric=DistanceMetric.euclidean, sdf_sides=3, alpha=1.0, with_refract=0.0, inverse=False,\n xy=None, ridges_hint=False, refract_y_from_offset=True, time=0.0, speed=1.0,\n point_freq=3, point_generations=1, point_distrib=PointDistribution.random, point_drift=0.0, point_corners=False):\n \"\"\"\n Create a voronoi diagram, blending with input image Tensor color values.\n\n .. image:: images/voronoi.jpg\n :width: 1024\n :height: 256\n :alt: Noisemaker example output (CC0)\n\n :param Tensor tensor:\n :param list[int] shape:\n :param VoronoiDiagramType|int diagram_type: Diagram type (0=Off, 1=Range, 2=Color Range, 3=Indexed, 4=Color Map, 5=Blended, 6=Flow)\n :param float nth: Plot Nth nearest neighbor, or -Nth farthest\n :param DistanceMetric|int dist_metric: Voronoi distance metric\n :param bool regions: Assign colors to control points (memory intensive)\n :param float alpha: Blend with original tensor (0.0 = Original, 1.0 = Voronoi)\n :param float with_refract: Domain warp input tensor against resulting voronoi\n :param bool inverse: Invert range brightness values (does not affect hue)\n :param (Tensor, Tensor, int) xy: Bring your own x, y, and point count (You shouldn't normally need this)\n :param float ridges_hint: Adjust output colors to match ridged multifractal output (You shouldn't normally need this)\n :return: Tensor\n \"\"\"\n\n diagram_type = coerce_enum(diagram_type, VoronoiDiagramType)\n\n dist_metric = coerce_enum(dist_metric, DistanceMetric)\n\n original_shape = shape\n\n shape = [int(shape[0] * .5), int(shape[1] * .5), shape[2]] # Gotta upsample later, this one devours memory.\n\n height, width, channels = shape\n\n if xy is None:\n if point_freq == 1:\n x, y = point_cloud(point_freq, PointDistribution.square, shape)\n\n else:\n x, y = point_cloud(point_freq, distrib=point_distrib, shape=shape, corners=point_corners, generations=point_generations,\n drift=point_drift, time=time, speed=speed)\n\n point_count = len(x)\n\n else:\n if len(xy) == 2:\n x, y = xy\n point_count = len(x)\n\n else:\n x, y, point_count = xy\n\n x = tf.cast(tf.stack(x), tf.float32) / 2.0\n y = tf.cast(tf.stack(y), tf.float32) / 2.0\n\n vshape = value_shape(shape)\n\n x_index = tf.cast(tf.reshape(row_index(shape), vshape), tf.float32)\n y_index = tf.cast(tf.reshape(column_index(shape), vshape), tf.float32)\n\n is_triangular = dist_metric in (\n DistanceMetric.triangular,\n DistanceMetric.hexagram,\n DistanceMetric.sdf,\n )\n\n if diagram_type in VoronoiDiagramType.flow_members():\n # If we're using flow with a perfectly tiled grid, it just disappears. Perturbing the points seems to prevent this from happening.\n x += tf.random.normal(shape=tf.shape(x), stddev=.0001, dtype=tf.float32)\n y += tf.random.normal(shape=tf.shape(y), stddev=.0001, dtype=tf.float32)\n\n if is_triangular:\n # Keep it visually flipped \"horizontal\"-side-up\n y_sign = -1.0 if inverse else 1.0\n\n dist = distance((x_index - x) / width, (y_index - y) * y_sign / height, dist_metric, sdf_sides=sdf_sides)\n\n else:\n half_width = int(width * .5)\n half_height = int(height * .5)\n\n # Wrapping edges! Nearest neighbors might be actually be \"wrapped around\", on the opposite side of the image.\n # Determine which direction is closer, and use the minimum.\n\n # Subtracting the list of points from the index results in a new shape\n # [y, x, value] - [point_count] -> [y, x, value, point_count]\n x0_diff = x_index - x - half_width\n x1_diff = x_index - x + half_width\n y0_diff = y_index - y - half_height\n y1_diff = y_index - y + half_height\n\n #\n x_diff = tf.minimum(tf.abs(x0_diff), tf.abs(x1_diff)) / width\n y_diff = tf.minimum(tf.abs(y0_diff), tf.abs(y1_diff)) / height\n\n # Not-wrapping edges!\n # x_diff = (x_index - x) / width\n # y_diff = (y_index - y) / height\n\n dist = distance(x_diff, y_diff, dist_metric)\n\n ###\n if diagram_type not in VoronoiDiagramType.flow_members():\n dist, indices = tf.nn.top_k(dist, k=point_count)\n index = min(nth + 1, point_count - 1) * -1\n\n ###\n\n # Seamless alg offset pixels by half image size. Move results slice back to starting points with `offset`:\n offset_kwargs = {\n 'x': 0.0 if is_triangular else half_width,\n 'y': 0.0 if is_triangular else half_height,\n }\n\n if diagram_type in (VoronoiDiagramType.range, VoronoiDiagramType.color_range, VoronoiDiagramType.range_regions):\n range_slice = normalize(dist[:, :, index])\n range_slice = tf.expand_dims(tf.sqrt(range_slice), -1)\n range_slice = resample(offset(range_slice, shape, **offset_kwargs), original_shape)\n\n if inverse:\n range_slice = 1.0 - range_slice\n\n if diagram_type in (VoronoiDiagramType.regions, VoronoiDiagramType.color_regions, VoronoiDiagramType.range_regions):\n regions_slice = offset(indices[:, :, index], shape, **offset_kwargs)\n\n ###\n if diagram_type == VoronoiDiagramType.range:\n range_out = range_slice\n\n if diagram_type in VoronoiDiagramType.flow_members():\n dist = tf.math.log(dist)\n\n # Clamp to avoid infinities\n dist = tf.minimum(10, dist)\n dist = tf.maximum(-10, dist)\n\n dist = tf.expand_dims(dist, -1)\n\n if diagram_type == VoronoiDiagramType.color_flow:\n colors = tf.gather_nd(tensor, tf.cast(tf.stack([y * 2, x * 2], 1), tf.int32))\n colors = tf.reshape(colors, [1, 1, point_count, shape[2]])\n if ridges_hint:\n colors = tf.abs(colors * 2 - 1)\n\n # normalize() can make animation twitchy. TODO: figure out a way to do this without normalize\n range_out = normalize(tf.math.reduce_mean(1.0 - (1.0 - normalize(dist * colors)), 2))\n\n else: # flow\n # This is dicey as hell. Try to get range_out into a reasonable range.\n # Difficulty level: Without using normalize()\n range_out = (tf.math.reduce_mean(dist, 2) + 1.75) / 1.45\n # print(tf.reduce_min(range_out))\n # print(tf.reduce_max(range_out))\n\n range_out = resample(offset(range_out, shape, **offset_kwargs), original_shape)\n\n if inverse:\n range_out = 1.0 - range_out\n\n if diagram_type in (VoronoiDiagramType.color_range, VoronoiDiagramType.range_regions):\n # range_out = regions_out * range_slice\n range_out = blend(tensor * range_slice, range_slice, range_slice)\n\n if diagram_type == VoronoiDiagramType.regions:\n regions_out = resample(tf.cast(regions_slice, tf.float32), original_shape, spline_order=InterpolationType.constant)\n\n if diagram_type in (VoronoiDiagramType.color_regions, VoronoiDiagramType.range_regions):\n colors = tf.gather_nd(tensor, tf.cast(tf.stack([y * 2, x * 2], 1), tf.int32))\n\n if ridges_hint:\n colors = tf.abs(colors * 2 - 1)\n\n spline_order = 0 if diagram_type == VoronoiDiagramType.color_regions else 3\n\n regions_out = resample(tf.reshape(tf.gather(colors, regions_slice), shape), original_shape, spline_order=spline_order)\n\n ###\n if diagram_type == VoronoiDiagramType.range_regions:\n out = blend(regions_out, range_out, tf.square(range_out))\n\n elif diagram_type in [VoronoiDiagramType.range, VoronoiDiagramType.color_range] + VoronoiDiagramType.flow_members():\n out = range_out\n\n elif diagram_type in (VoronoiDiagramType.regions, VoronoiDiagramType.color_regions):\n out = regions_out\n\n else:\n raise Exception(f\"Not sure what to do with diagram type {diagram_type}\")\n\n if diagram_type == VoronoiDiagramType.regions:\n out = tf.expand_dims(out, -1) / point_count\n\n if with_refract != 0.0:\n out = refract(tensor, original_shape, displacement=with_refract, reference_x=out,\n y_from_offset=refract_y_from_offset)\n\n if tensor is not None:\n out = blend(tensor, out, alpha)\n\n return out\n\n\ndef periodic_value(time, value):\n \"\"\"\n Coerce the received value to animate smoothly between time values 0 and 1, by applying a sine function and scaling the result.\n\n :param float time:\n :param float|Tensor value:\n \"\"\"\n\n # h/t Etienne Jacob again\n # https://bleuje.github.io/tutorial2/\n return normalized_sine((time - value) * math.tau)\n\n\ndef normalize(tensor, signed_range=False):\n \"\"\"\n Squeeze the given Tensor into a range between 0 and 1.\n\n :param Tensor tensor: An image tensor.\n :param bool signed_range: Use a range between -1 and 1.\n :return: Tensor\n \"\"\"\n\n floor = float(tf.reduce_min(tensor))\n if floor == math.inf or floor == -math.inf or floor == math.nan: # Avoid GIGO\n raise ValueError(f\"Input tensor contains {floor}, check caller for shenanigans\")\n\n ceil = float(tf.reduce_max(tensor))\n if ceil == math.inf or ceil == -math.inf or ceil == math.nan: # Avoid GIGO\n raise ValueError(f\"Input tensor contains {ceil}, check caller for shenanigans\")\n\n if floor == ceil: # Avoid divide by zero\n raise ValueError(f\"Input tensor min and max are each {floor}, check caller for shenanigans\")\n\n delta = ceil - floor\n\n values = (tensor - floor) / delta\n\n if signed_range:\n values = values * 2.0 - 1.0\n\n return values\n\n\ndef _gather_scaled_offset(tensor, input_column_index, input_row_index, output_index):\n \"\"\" Helper function for resample(). Apply index offset to input tensor, return output_index values gathered post-offset. \"\"\"\n\n return tf.gather_nd(tf.gather_nd(tensor, tf.stack([input_column_index, input_row_index], 2)), output_index)\n\n\ndef resample(tensor, shape, spline_order=3):\n \"\"\"\n Resize an image tensor to the specified shape.\n\n :param Tensor tensor:\n :param list[int] shape:\n :param int spline_order: Spline point count. 0=Constant, 1=Linear, 2=Cosine, 3=Bicubic\n :return: Tensor\n \"\"\"\n\n spline_order = coerce_enum(spline_order, InterpolationType)\n\n input_shape = tf.shape(tensor)\n\n # Blown up row and column indices. These map into input tensor, producing a big blocky version.\n resized_row_index = tf.cast(row_index(shape), tf.float32) \\\n * (tf.cast(input_shape[1], tf.float32) / tf.cast(shape[1], tf.float32)) # 0, 1, 2, 3, -> 0, 0.5, 1, 1.5A\n\n resized_col_index = tf.cast(column_index(shape), tf.float32) * (tf.cast(input_shape[0], tf.float32) / tf.cast(shape[0], tf.float32))\n\n # Map to input indices as int\n resized_row_index_trunc = tf.floor(resized_row_index)\n resized_col_index_trunc = tf.floor(resized_col_index)\n resized_index_trunc = tf.cast(tf.stack([resized_col_index_trunc, resized_row_index_trunc], 2), tf.int32)\n\n # Resized original\n resized = defaultdict(dict)\n resized[1][1] = tf.gather_nd(tensor, resized_index_trunc)\n\n if spline_order == InterpolationType.constant:\n return resized[1][1]\n\n # Resized neighbors\n input_rows = defaultdict(dict)\n input_columns = defaultdict(dict)\n\n input_rows[1] = row_index(input_shape)\n input_columns[1] = column_index(input_shape)\n\n input_rows[2] = (input_rows[1] + 1) % input_shape[1]\n input_columns[2] = (input_columns[1] + 1) % input_shape[0]\n\n # Create fractional diffs (how much to blend with each neighbor)\n vshape = value_shape(shape)\n resized_row_index_fract = tf.reshape(resized_row_index - resized_row_index_trunc, vshape) # 0, 0.5, 1, 1.5 -> 0, .5, 0, .5\n resized_col_index_fract = tf.reshape(resized_col_index - resized_col_index_trunc, vshape)\n\n for x in range(1, 3):\n for y in range(1, 3):\n if x == 1 and y == 1:\n continue\n\n resized[y][x] = _gather_scaled_offset(tensor, input_columns[y], input_rows[x], resized_index_trunc)\n\n if spline_order == InterpolationType.linear:\n y1 = blend(resized[1][1], resized[1][2], resized_row_index_fract)\n y2 = blend(resized[2][1], resized[2][2], resized_row_index_fract)\n\n return blend(y1, y2, resized_col_index_fract)\n\n if spline_order == InterpolationType.cosine:\n y1 = blend_cosine(resized[1][1], resized[1][2], resized_row_index_fract)\n y2 = blend_cosine(resized[2][1], resized[2][2], resized_row_index_fract)\n\n return blend_cosine(y1, y2, resized_col_index_fract)\n\n if spline_order == InterpolationType.bicubic:\n # Extended neighborhood for bicubic\n points = []\n\n for y in range(0, 4):\n if y not in input_columns:\n input_columns[y] = (input_columns[1] + (y - 1)) % input_shape[0]\n\n for x in range(0, 4):\n if x not in input_rows:\n input_rows[x] = (input_rows[1] + (x - 1)) % input_shape[1]\n\n resized[y][x] = _gather_scaled_offset(tensor, input_columns[y], input_rows[x], resized_index_trunc)\n\n points.append(blend_cubic(resized[y][0], resized[y][1], resized[y][2], resized[y][3], resized_row_index_fract))\n\n args = points + [resized_col_index_fract]\n\n return blend_cubic(*args)\n\n\ndef proportional_downsample(tensor, shape, new_shape):\n \"\"\"\n Given a new shape which is evenly divisible by the old shape, shrink the image by averaging pixel values.\n\n :param Tensor tensor:\n :param list[int] shape:\n :param list[int] new_shape:\n \"\"\"\n\n kernel_shape = [int(max(shape[0] / new_shape[0], 1)), int(max(shape[1] / new_shape[1], 1)), shape[2], 1]\n\n kernel = tf.ones(kernel_shape)\n\n out = tf.nn.depthwise_conv2d([tensor], kernel, [1, kernel_shape[0], kernel_shape[1], 1], \"VALID\")[0] / (kernel_shape[0] * kernel_shape[1])\n\n return resample(out, new_shape)\n\n\ndef row_index(shape):\n \"\"\"\n Generate an X index for the given tensor.\n\n .. code-block:: python\n\n [\n [ 0, 1, 2, ... width-1 ],\n [ 0, 1, 2, ... width-1 ],\n ... (x height)\n ]\n\n .. image:: images/row_index.jpg\n :width: 1024\n :height: 256\n :alt: Noisemaker example output (CC0)\n\n :param list[int] shape:\n :return: Tensor of shape (height, width)\n \"\"\"\n\n height = shape[0]\n width = shape[1]\n\n row_identity = tf.cumsum(tf.ones([width], dtype=tf.int32), exclusive=True)\n row_identity = tf.reshape(tf.tile(row_identity, [height]), [height, width])\n\n return row_identity\n\n\ndef column_index(shape):\n \"\"\"\n Generate a Y index for the given tensor.\n\n .. code-block:: python\n\n [\n [ 0, 0, 0, ... ],\n [ 1, 1, 1, ... ],\n [ n, n, n, ... ],\n ...\n [ height-1, height-1, height-1, ... ]\n ]\n\n .. image:: images/column_index.jpg\n :width: 1024\n :height: 256\n :alt: Noisemaker example output (CC0)\n\n :param list[int] shape:\n :return: Tensor of shape (height, width)\n \"\"\"\n\n height = shape[0]\n width = shape[1]\n\n column_identity = tf.ones([width], dtype=tf.int32)\n column_identity = tf.tile(column_identity, [height])\n column_identity = tf.reshape(column_identity, [height, width])\n column_identity = tf.cumsum(column_identity, exclusive=True)\n\n return column_identity\n\n\ndef offset(tensor, shape, x=0, y=0):\n \"\"\"\n \"\"\"\n\n if x == 0 and y == 0:\n return tensor\n\n return tf.gather_nd(tensor, tf.stack([(column_index(shape) + y) % shape[0], (row_index(shape) + x) % shape[1]], 2))\n\n\ndef _linear_components(a, b, g):\n return a * (1 - g), b * g\n\n\ndef blend(a, b, g):\n \"\"\"\n Blend a and b values with linear interpolation.\n\n :param Tensor a:\n :param Tensor b:\n :param float|Tensor g: Blending gradient a to b (0..1)\n :return Tensor:\n \"\"\"\n\n return sum(_linear_components(a, b, g))\n\n\ndef _cosine_components(a, b, g):\n # This guy is great http://paulbourke.net/miscellaneous/interpolation/\n\n g2 = (1 - tf.cos(g * math.pi)) / 2\n\n return a * (1 - g2), b * g2\n\n\ndef blend_cosine(a, b, g):\n \"\"\"\n Blend a and b values with cosine interpolation.\n\n :param Tensor a:\n :param Tensor b:\n :param float|Tensor g: Blending gradient a to b (0..1)\n :return Tensor:\n \"\"\"\n\n return sum(_cosine_components(a, b, g))\n\n\ndef _cubic_components(a, b, c, d, g):\n # This guy is great http://paulbourke.net/miscellaneous/interpolation/\n\n g2 = g * g\n\n a0 = d - c - a + b\n a1 = a - b - a0\n a2 = c - a\n a3 = b\n\n return a0 * g * g2, a1 * g2, a2 * g + a3\n\n\ndef blend_cubic(a, b, c, d, g):\n \"\"\"\n Blend b and c values with bi-cubic interpolation.\n\n :param Tensor a:\n :param Tensor b:\n :param Tensor c:\n :param Tensor d:\n :param float|Tensor g: Blending gradient b to c (0..1)\n :return Tensor:\n \"\"\"\n\n return sum(_cubic_components(a, b, c, d, g))\n\n\ndef freq_for_shape(freq, shape):\n \"\"\"\n Given a base frequency as int, generate noise frequencies for each spatial dimension.\n\n :param int freq: Base frequency\n :param list[int] shape: List of spatial dimensions, e.g. [height, width]\n \"\"\"\n\n height = shape[0]\n width = shape[1]\n\n if height == width:\n return [freq, freq]\n\n elif height < width:\n return [freq, int(freq * width / height)]\n\n else:\n return [int(freq * height / width), freq]\n\n\ndef ridge(tensor):\n \"\"\"\n Create a \"ridge\" at midpoint values. 1 - abs(n * 2 - 1)\n\n .. image:: images/crease.jpg\n :width: 1024\n :height: 256\n :alt: Noisemaker example output (CC0)\n\n :param Tensor tensor: An image tensor.\n :return: Tensor\n \"\"\"\n\n return 1.0 - tf.abs(tensor * 2 - 1)\n\n\ndef simple_multires(freq, shape, octaves=1, spline_order=InterpolationType.bicubic, distrib=ValueDistribution.uniform, corners=False,\n ridges=False, mask=None, mask_inverse=False, mask_static=False, time=0.0, speed=1.0):\n \"\"\"Generate multi-octave value noise. Unlike generators.multires, this function is single-channel and does not apply effects.\"\"\"\n\n if isinstance(freq, int):\n freq = freq_for_shape(freq, shape)\n\n tensor = tf.zeros(shape)\n\n for octave in range(1, octaves + 1):\n multiplier = 2 ** octave\n\n base_freq = [int(f * .5 * multiplier) for f in freq]\n\n if all(base_freq[i] > shape[i] for i in range(len(base_freq))):\n break\n\n layer = values(freq=base_freq, shape=shape, spline_order=spline_order, distrib=distrib, corners=corners,\n mask=mask, mask_inverse=mask_inverse, mask_static=mask_static, time=time, speed=speed)\n\n if ridges:\n layer = ridge(layer)\n\n tensor += layer / multiplier\n\n return normalize(tensor)\n\n\ndef value_shape(shape):\n \"\"\"\n \"\"\"\n\n return [shape[0], shape[1], 1]\n\n\ndef normalized_sine(value):\n \"\"\"\n \"\"\"\n\n return (tf.sin(value) + 1.0) * 0.5\n\n\ndef _conform_kernel_to_tensor(kernel, tensor, shape):\n \"\"\"Re-shape a convolution kernel to match the given tensor's color dimensions.\"\"\"\n\n values, _ = masks.mask_values(kernel)\n\n length = len(values)\n\n channels = shape[-1]\n\n temp = np.repeat(values, channels)\n\n temp = tf.reshape(temp, (length, length, channels, 1))\n\n temp = tf.cast(temp, tf.float32)\n\n temp /= tf.maximum(tf.reduce_max(temp), tf.reduce_min(temp) * -1)\n\n return temp\n\n\n@effect()\ndef convolve(tensor, shape, kernel=None, with_normalize=True, alpha=1.0, time=0.0, speed=1.0):\n \"\"\"\n Apply a convolution kernel to an image tensor.\n\n .. code-block:: python\n\n image = convolve(image, shape, ValueMask.conv2d_shadow)\n\n :param Tensor tensor: An image tensor.\n :param list[int] shape:\n :param ValueMask kernel: See conv2d_* members in ValueMask enum\n :param bool with_normalize: Normalize output (True)\n :paral float alpha: Alpha blending amount\n :return: Tensor\n\n \"\"\"\n\n height, width, channels = shape\n\n kernel_values = _conform_kernel_to_tensor(kernel, tensor, shape)\n\n # Give the conv kernel some room to play on the edges\n half_height = tf.cast(height / 2, tf.int32)\n half_width = tf.cast(width / 2, tf.int32)\n\n double_shape = [height * 2, width * 2, channels]\n\n out = tf.tile(tensor, [2, 2, 1]) # Tile 2x2\n\n out = offset(out, double_shape, half_width, half_height)\n\n out = tf.nn.depthwise_conv2d([out], kernel_values, [1, 1, 1, 1], \"VALID\")[0]\n\n out = tf.image.resize_with_crop_or_pad(out, height, width)\n\n if with_normalize:\n out = normalize(out)\n\n if kernel == ValueMask.conv2d_edges:\n out = tf.abs(out - .5) * 2\n\n if alpha == 1.0:\n return out\n\n return blend(tensor, out, alpha)\n\n\n@effect()\ndef refract(tensor, shape, displacement=.5, reference_x=None, reference_y=None, warp_freq=None, spline_order=InterpolationType.bicubic,\n from_derivative=False, signed_range=True, time=0.0, speed=1.0, y_from_offset=False):\n \"\"\"\n Apply displacement from pixel values.\n\n .. image:: images/refract.jpg\n :width: 1024\n :height: 256\n :alt: Noisemaker example output (CC0)\n\n :param Tensor tensor: An image tensor.\n :param list[int] shape:\n :param float displacement:\n :param Tensor reference_x: An optional horizontal displacement map.\n :param Tensor reference_y: An optional vertical displacement map.\n :param list[int] warp_freq: If given, generate new reference_x and reference_y noise with this base frequency.\n :param int spline_order: Interpolation for warp effect only. 0=Constant, 1=Linear, 2=Cosine, 3=Bicubic\n :param bool from_derivative: If True, generate X and Y offsets from noise derivatives.\n :param bool signed_range: Scale displacement values from -1..1 instead of 0..1\n :param bool y_from_offset: If True, derive Y offsets from offsetting the image\n :return: Tensor\n \"\"\"\n\n height, width, channels = shape\n\n x0_index = row_index(shape)\n y0_index = column_index(shape)\n\n warp_shape = None\n\n if warp_freq:\n warp_shape = [height, width, 1]\n\n if reference_x is None:\n if from_derivative:\n reference_x = convolve(kernel=ValueMask.conv2d_deriv_x, tensor=tensor, shape=shape, with_normalize=False)\n\n elif warp_freq:\n reference_x = values(freq=warp_freq, shape=warp_shape, distrib=ValueDistribution.uniform,\n time=time, speed=speed, spline_order=spline_order)\n\n else:\n reference_x = tensor\n\n if reference_y is None:\n if from_derivative:\n reference_y = convolve(kernel=ValueMask.conv2d_deriv_y, tensor=tensor, shape=shape, with_normalize=False)\n\n elif warp_freq:\n reference_y = values(freq=warp_freq, shape=warp_shape, distrib=ValueDistribution.uniform,\n time=time, speed=speed, spline_order=spline_order)\n\n else:\n if y_from_offset:\n # \"the old way\"\n y0_index += int(height * .5)\n x0_index += int(width * .5)\n reference_y = tf.gather_nd(reference_x, tf.stack([y0_index % height, x0_index % width], 2))\n else:\n reference_y = reference_x\n reference_x = tf.cos(reference_x * math.tau)\n reference_y = tf.sin(reference_y * math.tau)\n\n quad_directional = signed_range and not from_derivative\n\n # Use extended range so we can refract in 4 directions (-1..1) instead of 2 (0..1).\n # Doesn't work with derivatives (and isn't needed), because derivatives are signed naturally.\n x_offsets = value_map(reference_x, shape, signed_range=quad_directional, with_normalize=False) * displacement * tf.cast(width, tf.float32)\n y_offsets = value_map(reference_y, shape, signed_range=quad_directional, with_normalize=False) * displacement * tf.cast(height, tf.float32)\n # If not using extended range (0..1 instead of -1..1), keep the value range consistent.\n if not quad_directional:\n x_offsets *= 2.0\n y_offsets *= 2.0\n\n # Bilinear interpolation of midpoints\n x0_offsets = (tf.cast(x_offsets, tf.int32) + x0_index) % width\n x1_offsets = (x0_offsets + 1) % width\n y0_offsets = (tf.cast(y_offsets, tf.int32) + y0_index) % height\n y1_offsets = (y0_offsets + 1) % height\n\n x0_y0 = tf.gather_nd(tensor, tf.stack([y0_offsets, x0_offsets], 2))\n x1_y0 = tf.gather_nd(tensor, tf.stack([y0_offsets, x1_offsets], 2))\n x0_y1 = tf.gather_nd(tensor, tf.stack([y1_offsets, x0_offsets], 2))\n x1_y1 = tf.gather_nd(tensor, tf.stack([y1_offsets, x1_offsets], 2))\n\n x_fract = tf.reshape(x_offsets - tf.floor(x_offsets), [height, width, 1])\n y_fract = tf.reshape(y_offsets - tf.floor(y_offsets), [height, width, 1])\n\n x_y0 = blend(x0_y0, x1_y0, x_fract)\n x_y1 = blend(x0_y1, x1_y1, x_fract)\n\n return blend(x_y0, x_y1, y_fract)\n\n\ndef value_map(tensor, shape, keepdims=False, signed_range=False, with_normalize=True):\n \"\"\"\n Create a grayscale value map from the given image Tensor, based on apparent luminance.\n\n Return value ranges between 0 and 1.\n\n :param Tensor tensor:\n :param list[int] shape:\n :param bool keepdims: If True, don't collapse the channel dimension.\n :param bool signed_range: If True, use an extended value range between -1 and 1.\n :return: Tensor of shape (height, width), or (height, width, channels) if keepdims was True.\n \"\"\"\n\n # XXX Why is shape sometimes wrong when passed in from refract?\n shape = tf.shape(tensor)\n\n if shape[2] in (1, 2):\n tensor = tensor[:, :, 0]\n\n elif shape[2] == 3:\n tensor = oklab.rgb_to_oklab(clamp01(tensor))[:, :, 0]\n\n elif shape[2] == 4:\n tensor = clamp01(tensor)\n tensor = oklab.rgb_to_oklab(tf.stack([tensor[:, :, 0], tensor[:, :, 1], tensor[:, :, 2]], 2))[:, :, 0]\n\n if keepdims:\n tensor = tf.expand_dims(tensor, -1)\n\n if with_normalize:\n tensor = normalize(tensor, signed_range=signed_range)\n\n elif signed_range:\n tensor = tensor * 2.0 - 1.0\n\n return tensor\n\n\ndef singularity(tensor, shape, diagram_type=VoronoiDiagramType.range, **kwargs):\n \"\"\"\n Return the range diagram for a single voronoi point, approximately centered.\n\n :param Tensor tensor:\n :param list[int] shape:\n :param VoronoiDiagramType|int diagram_type:\n :param DistanceMetric|int dist_metric:\n\n Additional kwargs will be sent to the `voronoi` metric.\n \"\"\"\n\n x, y = point_cloud(1, PointDistribution.square, shape)\n\n return voronoi(tensor, shape, diagram_type=diagram_type, xy=(x, y, 1), **kwargs)\n\n\ndef pin_corners(tensor, shape, freq, corners):\n \"\"\"Pin values to image corners, or align with image center, as per the given \"corners\" arg.\"\"\"\n\n if (not corners and (freq[0] % 2) == 0) or (corners and (freq[0] % 2) == 1):\n tensor = offset(tensor, shape, x=int((shape[1] / freq[1]) * .5), y=int((shape[0] / freq[0]) * .5))\n\n return tensor\n\n\ndef coerce_enum(value, cls):\n \"\"\"Attempt to coerce a given string or int value into an Enum instance.\"\"\"\n\n if isinstance(value, int):\n value = cls(value)\n\n elif isinstance(value, str):\n value = cls[value]\n\n return value\n\n\ndef clamp01(tensor):\n return tf.maximum(tf.minimum(tensor, 1.0), 0.0)\n"
] |
[
[
"tensorflow.zeros",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.minimum",
"tensorflow.nn.depthwise_conv2d",
"tensorflow.random.set_seed",
"tensorflow.cumsum",
"tensorflow.floor",
"tensorflow.nn.top_k",
"tensorflow.gather",
"tensorflow.square",
"numpy.repeat",
"tensorflow.math.pow",
"tensorflow.tile",
"tensorflow.gather_nd",
"tensorflow.image.resize_with_crop_or_pad",
"tensorflow.shape",
"tensorflow.random.uniform",
"tensorflow.math.atan2",
"tensorflow.math.floor",
"tensorflow.reduce_max",
"tensorflow.sin",
"tensorflow.cos",
"numpy.random.seed",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.expand_dims",
"tensorflow.math.log",
"tensorflow.math.reduce_mean",
"tensorflow.reduce_min",
"tensorflow.sqrt",
"tensorflow.abs"
]
] |
radiasoft/rssympic
|
[
"14b119267686c64e2d0fcd3be19c365b8d486e22"
] |
[
"rssympim/examples/rz_profile.py"
] |
[
"# # A simple test of SymPIM-rz which can be used for profiling the Python bottlenecks\n# \n# September 26, 2017\n# Nathan Cook\n\nimport numpy as np\n\nfrom rssympim.sympim_rz.data import particle_data, field_data\nfrom rssympim.sympim_rz.integrators import integrator\nfrom rssympim.constants import constants\n\n#from scipy.special import j0, j1, jn_zeros\n#import time\n#import itertools\n\n#import matplotlib\n#matplotlib.use('Agg')\n#import matplotlib.pyplot as plt\n#import matplotlib as mpl\n\n###############################\n#Define some useful parameters\n################################\n\n# species data\ncharge = constants.electron_charge\nmass = constants.electron_mass\nspeed_of_light = constants.c\n\n# plasma properties\nn0 = 1.e18 # cm^-3\nomega_p = np.sqrt(4.*np.pi*n0*charge*charge/mass)\nk_p = omega_p/speed_of_light\n\n# compute the simulation domain volume\nl_r = 4./(k_p/(2.*np.pi)) # cm\nl_z = 2./(k_p/(2.*np.pi)) # cm\nvolume = np.pi*l_r*l_r*l_z\n\n# Domain parameters\nn_electrons = np.round(n0*volume)\n\n# Simulation parameters\nn_macro_ptcls = 1000\nmacro_weight = n_electrons/n_macro_ptcls\nn_r_modes = 10\nn_z_modes = 10\n\n########################################\n#Define initial dictionary of parameters\n########################################\n\n_PD = {\n 'np_mode': 10, #number of particles per mode\n 'n_r': n_r_modes, # number of radial modes\n 'n_z': n_z_modes, # number of longitudinal modes\n 'charge': charge, # 1 esu\n 'mass': mass, # mass in m_e\n 'n_e': n_electrons, #electron density\n 'n_macro': n_macro_ptcls, #total number of macro particles\n 'weight': n_electrons/n_macro_ptcls, #20, # macroparticle weighting\n 'R': l_r, #4./(k_p/(2.*np.pi)), #cm 4., # maximum R value\n 'PR': 0.5, # maximum PR value\n 'Z': l_z, #2./(k_p/(2.*np.pi)), #cm #10., # maximum z value\n 'V': np.pi*l_r*l_r*l_z, #volume\n 'num_steps': 100, # number of steps to perform\n 'n_r_max': 64,# maximum number of radial modes\n 'n_z_max': 64\n }\n \n \nmax_fields = field_data.field_data(_PD['Z'], _PD['R'], _PD['n_r_max'], _PD['n_z_max'])\ndt_max = .1*2.*np.pi/np.amax(max_fields.omega) #set timestep as 1/10 of period for largest mode\n_PD['dt'] = dt_max\n\n######################################## \n#User defined functions\n########################################\n\ndef create_init_conds(particles, fields, PD):\n '''Instantiate particle data'''\n fields.omega_coords = particles.mc[0] * np.ones((fields.n_modes_z, fields.n_modes_r, 2))\n fields.dc_coords = np.zeros((fields.n_modes_z, fields.n_modes_r, 2))\n\n #better to use linspace than arange\n particles.r = np.linspace(0.1*PD['R'],0.9*PD['R'],particles.np) #np.arange(0.1*PD['R'], 0.9*PD['R'], 0.8*PD['R']/particles.np)\n particles.z = np.linspace(0.1*PD['Z'],0.9*PD['Z'],particles.np) #np.arange(0.1*PD['Z'], 0.9*PD['Z'], 0.8*PD['Z']/particles.np)\n particles.pr = -particles.mc * np.arange(0.1, .5, .4 / particles.np)\n particles.ell = particles.weight*constants.electron_mass*constants.c*particles.r\n particles.pz = particles.mc * np.arange(0., 10., 10. / particles.np)\n\n\n\ndef vary_rz_modes(mode_pair, PD, ns=1e2):\n '''\n Simulate ns number of steps of size ss using npart particles and a number of modes\n specified by mode_pair.\n\n Arguments:\n mode_pair (tuple): [n_r,n_z]\n PD (dict): dictionary of other fixed parameters\n ns (Optional[int]) : number of steps to run. Defaults to 100.\n\n Returns:\n timing (float): number of seconds/macroparticle/mode\n\n '''\n num_steps = ns\n PD['n_r'] = mode_pair[0]\n PD['n_z'] = mode_pair[1]\n num_modes = mode_pair[0] + mode_pair[1]\n PD['n_macro'] = PD['np_mode']*num_modes\n num_particles = PD['n_macro']\n PD['weight'] = PD['n_e']/PD['n_macro']\n \n \n # create fields, particles, integrator\n particles = particle_data.particle_data(PD['n_macro'], PD['charge'], PD['mass'], PD['weight'])\n fields = field_data.field_data(PD['Z'], PD['R'], PD['n_z'], PD['n_r'])\n create_init_conds(particles,fields, PD)\n \n \n my_integrator = integrator.integrator(PD['dt'], fields)\n\n step = 0\n\n #print \"Running {} total modes and {} particles\".format(num_modes, num_particles)\n \n #t0 = time.time()\n while step < num_steps:\n my_integrator.second_order_step(particles,fields)\n \n step = step + 1\n \n\n return\n \n\n###################\n# Main run sequence\n###################\n\nmode_pair = np.asarray([64,64])\nvary_rz_modes(mode_pair, _PD, ns=1)\n"
] |
[
[
"numpy.amax",
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"numpy.arange",
"numpy.ones",
"numpy.round",
"numpy.zeros"
]
] |
ptrendx/tvm
|
[
"f07fe80aaf110086b651f2850506c803a5688ddb"
] |
[
"tests/python/unittest/test_codegen_c_host.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport tvm\nimport numpy as np\nfrom tvm.contrib import util\n\ndef test_add():\n nn = 1024\n n = tvm.convert(nn)\n A = tvm.placeholder((n,), name='A')\n B = tvm.placeholder((n,), name='B')\n C = tvm.compute(A.shape, lambda *i: A(*i) + B(*i), name='C')\n s = tvm.create_schedule(C.op)\n\n def check_c():\n mhost = tvm.build(s, [A, B, C], \"c\", name=\"fadd\")\n temp = util.tempdir()\n path_dso = temp.relpath(\"temp.so\")\n mhost.export_library(path_dso)\n m = tvm.module.load(path_dso)\n fadd = m['fadd']\n ctx = tvm.cpu(0)\n # launch the kernel.\n n = nn\n a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), ctx)\n b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), ctx)\n c = tvm.nd.array(np.zeros(n, dtype=C.dtype), ctx)\n fadd(a, b, c)\n tvm.testing.assert_allclose(\n c.asnumpy(), a.asnumpy() + b.asnumpy())\n check_c()\n\n\ndef test_add_pipeline():\n nn = 1024\n n = tvm.convert(nn)\n A = tvm.placeholder((n,), name='A')\n B = tvm.placeholder((n,), name='B')\n AA = tvm.compute((n,), lambda *i: A(*i), name='A')\n BB = tvm.compute((n,), lambda *i: B(*i), name='B')\n T = tvm.compute(A.shape, lambda *i: AA(*i) + BB(*i), name='T')\n C = tvm.compute(A.shape, lambda *i: T(*i), name='C')\n s = tvm.create_schedule(C.op)\n xo, xi = s[C].split(C.op.axis[0], factor=4)\n xo1, xo2 = s[C].split(xo, factor=13)\n s[C].parallel(xo2)\n s[C].pragma(xo1, \"parallel_launch_point\")\n s[C].pragma(xo2, \"parallel_stride_pattern\")\n s[C].pragma(xo2, \"parallel_barrier_when_finish\")\n s[C].vectorize(xi)\n\n def check_c():\n if not tvm.module.enabled(\"llvm\"):\n return\n # Specifically allow offset to test codepath when offset is available\n Ab = tvm.decl_buffer(\n A.shape, A.dtype,\n elem_offset=tvm.var('Aoffset'),\n offset_factor=8,\n name='A')\n binds = {A : Ab}\n # BUILD and invoke the kernel.\n f1 = tvm.lower(s, [A,B,C], name=\"fadd_pipeline\")\n fsplits = [x for x in tvm.ir_pass.SplitHostDevice(f1)]\n fsplits[0] = tvm.ir_pass.LowerTVMBuiltin(fsplits[0])\n mhost = tvm.codegen.build_module(fsplits[0], \"c\")\n temp = util.tempdir()\n path_dso = temp.relpath(\"temp.so\")\n mhost.export_library(path_dso)\n m = tvm.module.load(path_dso)\n fadd = m[\"fadd_pipeline\"]\n ctx = tvm.cpu(0)\n # launch the kernel.\n n = nn\n a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), ctx)\n b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), ctx)\n c = tvm.nd.array(np.zeros(n, dtype=C.dtype), ctx)\n fadd(a, b, c)\n tvm.testing.assert_allclose(\n c.asnumpy(), a.asnumpy() + b.asnumpy())\n\n with tvm.build_config(offset_factor=4):\n check_c()\n\n\ndef test_reinterpret():\n nn = 1024\n n = tvm.convert(nn)\n A = tvm.placeholder((n,), name='A', dtype=\"int32\")\n B = tvm.compute(A.shape, lambda *i: tvm.call_pure_intrin(\"float32\", \"reinterpret\", A(*i)), name='B')\n s = tvm.create_schedule(B.op)\n\n def check_c():\n mhost = tvm.build(s, [A, B], \"c\", name=\"reinterpret\")\n temp = util.tempdir()\n path_dso = temp.relpath(\"temp.so\")\n mhost.export_library(path_dso)\n m = tvm.module.load(path_dso)\n fadd = m['reinterpret']\n ctx = tvm.cpu(0)\n n = nn\n a = tvm.nd.array(np.random.randint(-2 ** 30, 2 ** 30, size=n).astype(A.dtype), ctx)\n b = tvm.nd.array(np.zeros(n, dtype=B.dtype), ctx)\n fadd(a, b)\n tvm.testing.assert_allclose(\n b.asnumpy(), a.asnumpy().view('float32'))\n check_c()\n\n\nif __name__ == \"__main__\":\n test_add()\n test_add_pipeline()\n test_reinterpret()\n"
] |
[
[
"numpy.random.uniform",
"numpy.zeros",
"numpy.random.randint"
]
] |
twang5/mne-python
|
[
"a885741c16abc21b5fccba85a3650a03f87e5e8f"
] |
[
"mne/gui/tests/test_ieeg_locate_gui.py"
] |
[
"# Authors: Alex Rockhill <[email protected]>\n#\n# License: BSD-3-clause\n\nimport os.path as op\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nimport pytest\n\nimport mne\nfrom mne.datasets import testing\nfrom mne.utils import requires_nibabel, requires_version\nfrom mne.viz.utils import _fake_click\n\ndata_path = testing.data_path(download=False)\nsubject = 'sample'\nsubjects_dir = op.join(data_path, 'subjects')\nsample_dir = op.join(data_path, 'MEG', subject)\nraw_path = op.join(sample_dir, 'sample_audvis_trunc_raw.fif')\nfname_trans = op.join(sample_dir, 'sample_audvis_trunc-trans.fif')\n\n\n@requires_nibabel()\[email protected]\ndef _fake_CT_coords(skull_size=5, contact_size=2):\n \"\"\"Make somewhat realistic CT data with contacts.\"\"\"\n import nibabel as nib\n brain = nib.load(\n op.join(subjects_dir, subject, 'mri', 'brain.mgz'))\n verts = mne.read_surface(\n op.join(subjects_dir, subject, 'bem', 'outer_skull.surf'))[0]\n verts = mne.transforms.apply_trans(\n np.linalg.inv(brain.header.get_vox2ras_tkr()), verts)\n x, y, z = np.array(brain.shape).astype(int) // 2\n coords = [(x, y - 14, z), (x - 10, y - 15, z),\n (x - 20, y - 16, z + 1), (x - 30, y - 16, z + 1)]\n center = np.array(brain.shape) / 2\n # make image\n np.random.seed(99)\n ct_data = np.random.random(brain.shape).astype(np.float32) * 100\n # make skull\n for vert in verts:\n x, y, z = np.round(vert).astype(int)\n ct_data[slice(x - skull_size, x + skull_size + 1),\n slice(y - skull_size, y + skull_size + 1),\n slice(z - skull_size, z + skull_size + 1)] = 1000\n # add electrode with contacts\n for (x, y, z) in coords:\n # make sure not in skull\n assert np.linalg.norm(center - np.array((x, y, z))) < 50\n ct_data[slice(x - contact_size, x + contact_size + 1),\n slice(y - contact_size, y + contact_size + 1),\n slice(z - contact_size, z + contact_size + 1)] = \\\n 1000 - np.linalg.norm(np.array(np.meshgrid(\n *[range(-contact_size, contact_size + 1)] * 3)), axis=0)\n ct = nib.MGHImage(ct_data, brain.affine)\n coords = mne.transforms.apply_trans(\n ct.header.get_vox2ras_tkr(), np.array(coords))\n return ct, coords\n\n\n@requires_nibabel()\[email protected]\ndef _locate_ieeg(renderer_interactive_pyvistaqt):\n # Use a fixture to create these classes so we can ensure that they\n # are closed at the end of the test\n guis = list()\n\n def fun(*args, **kwargs):\n guis.append(mne.gui.locate_ieeg(*args, **kwargs))\n return guis[-1]\n\n yield fun\n\n for gui in guis:\n try:\n gui.close()\n except Exception:\n pass\n\n\ndef test_ieeg_elec_locate_gui_io(_locate_ieeg):\n \"\"\"Test the input/output of the intracranial location GUI.\"\"\"\n import nibabel as nib\n info = mne.create_info([], 1000)\n aligned_ct = nib.MGHImage(np.zeros((256, 256, 256), dtype=np.float32),\n np.eye(4))\n trans = mne.transforms.Transform('head', 'mri')\n with pytest.raises(ValueError,\n match='No channels found in `info` to locate'):\n _locate_ieeg(info, aligned_ct, subject, subjects_dir)\n info = mne.create_info(['test'], 1000, ['seeg'])\n with pytest.raises(ValueError, match='CT is not aligned to MRI'):\n _locate_ieeg(info, trans, aligned_ct, subject=subject,\n subjects_dir=subjects_dir)\n\n\n@requires_version('sphinx_gallery')\[email protected]_testing_data\ndef test_locate_scraper(_locate_ieeg, _fake_CT_coords, tmp_path):\n \"\"\"Test sphinx-gallery scraping of the GUI.\"\"\"\n raw = mne.io.read_raw_fif(raw_path)\n raw.pick_types(eeg=True)\n ch_dict = {'EEG 001': 'LAMY 1', 'EEG 002': 'LAMY 2',\n 'EEG 003': 'LSTN 1', 'EEG 004': 'LSTN 2'}\n raw.pick_channels(list(ch_dict.keys()))\n raw.rename_channels(ch_dict)\n raw.set_montage(None)\n aligned_ct, _ = _fake_CT_coords\n trans = mne.read_trans(fname_trans)\n with pytest.warns(RuntimeWarning, match='`pial` surface not found'):\n gui = _locate_ieeg(raw.info, trans, aligned_ct,\n subject=subject, subjects_dir=subjects_dir)\n (tmp_path / '_images').mkdir()\n image_path = str(tmp_path / '_images' / 'temp.png')\n gallery_conf = dict(builder_name='html', src_dir=str(tmp_path))\n block_vars = dict(\n example_globals=dict(gui=gui),\n image_path_iterator=iter([image_path]))\n assert not op.isfile(image_path)\n assert not getattr(gui, '_scraped', False)\n mne.gui._LocateScraper()(None, block_vars, gallery_conf)\n assert op.isfile(image_path)\n assert gui._scraped\n\n\[email protected]_testing_data\ndef test_ieeg_elec_locate_gui_display(_locate_ieeg, _fake_CT_coords):\n \"\"\"Test that the intracranial location GUI displays properly.\"\"\"\n raw = mne.io.read_raw_fif(raw_path)\n raw.pick_types(eeg=True)\n ch_dict = {'EEG 001': 'LAMY 1', 'EEG 002': 'LAMY 2',\n 'EEG 003': 'LSTN 1', 'EEG 004': 'LSTN 2'}\n raw.pick_channels(list(ch_dict.keys()))\n raw.rename_channels(ch_dict)\n raw.set_montage(None)\n aligned_ct, coords = _fake_CT_coords\n trans = mne.read_trans(fname_trans)\n with pytest.warns(RuntimeWarning, match='`pial` surface not found'):\n gui = _locate_ieeg(raw.info, trans, aligned_ct,\n subject=subject, subjects_dir=subjects_dir)\n\n gui._ras[:] = coords[0] # start in the right position\n gui._move_cursors_to_pos()\n for coord in coords:\n coord_vox = mne.transforms.apply_trans(gui._ras_vox_t, coord)\n _fake_click(gui._figs[2], gui._figs[2].axes[0],\n coord_vox[:-1], xform='data', kind='release')\n assert_allclose(coord, gui._ras, atol=3) # clicks are a bit off\n\n # test snap to center\n gui._ras[:] = coords[0] # move to first position\n gui._move_cursors_to_pos()\n gui._mark_ch()\n assert_allclose(coords[0], gui._chs['LAMY 1'], atol=0.2)\n gui._snap_button.click()\n assert gui._snap_button.text() == 'Off'\n # now make sure no snap happens\n gui._ras[:] = coords[1] + 1\n gui._mark_ch()\n assert_allclose(coords[1] + 1, gui._chs['LAMY 2'], atol=0.01)\n # check that it turns back on\n gui._snap_button.click()\n assert gui._snap_button.text() == 'On'\n\n # test remove\n gui._ch_index = 1\n gui._update_ch_selection()\n gui._remove_ch()\n assert np.isnan(gui._chs['LAMY 2']).all()\n\n # check that raw object saved\n assert not np.isnan(raw.info['chs'][0]['loc'][:3]).any() # LAMY 1\n assert np.isnan(raw.info['chs'][1]['loc'][:3]).all() # LAMY 2 (removed)\n\n # move sliders\n gui._alpha_slider.setValue(75)\n assert gui._ch_alpha == 0.75\n gui._radius_slider.setValue(5)\n assert gui._radius == 5\n ct_sum_before = np.nansum(gui._images['ct'][0].get_array().data)\n gui._ct_min_slider.setValue(500)\n assert np.nansum(gui._images['ct'][0].get_array().data) < ct_sum_before\n\n # test buttons\n gui._toggle_show_brain()\n assert 'mri' in gui._images\n assert 'local_max' not in gui._images\n gui._toggle_show_max()\n assert 'local_max' in gui._images\n assert 'mip' not in gui._images\n gui._toggle_show_mip()\n assert 'mip' in gui._images\n"
] |
[
[
"numpy.random.random",
"numpy.random.seed",
"numpy.isnan",
"numpy.eye",
"numpy.round",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.zeros"
]
] |
YFCodeDream/novelvqa
|
[
"63a6bdd91282d09685d98a7cdf3cc80b3783e048"
] |
[
"core/model/losses.py"
] |
[
"import torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom math import sqrt\nimport numpy as np\n\n\nclass ContrastProjection(nn.Module):\n def __init__(self, __C):\n super().__init__()\n self.linear1 = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)\n self.linear2 = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)\n\n def forward(self, tokens):\n return self.linear2(F.relu(self.linear1(tokens)))\n\n\nclass Losses:\n def __init__(self, __C):\n self.__C = __C\n self.maskval = -1e9\n if __C.USE_GROUNDING:\n self._point_loss = nn.CrossEntropyLoss().cuda()\n else:\n self._point_loss = None\n\n if __C.SKILL_CONT_LOSS:\n self._skill_contrast_proj = ContrastProjection(__C).cuda()\n self._skill_contrast_loss = nn.CrossEntropyLoss().cuda()\n else:\n self._skill_contrast_proj = None\n self._skill_contrast_loss = None\n\n self._skill_pool_method = __C.SKILL_POOL\n\n self._skill_temp = __C.SK_TEMP\n\n self._point_temp = __C.PT_TEMP\n\n def get_pointing_scores(self, tgt, refs, ref_masks, point_mask_tok):\n # tgt: size: batch x sent_len x 512\n # refs[i]: size: batch x sent_len x 512\n # ref_masks[i]: batch x sent_len; indicates the locations where there is padding (1 if the index is padding, 0 otherwise)\n # point_mask_tok: batch x 1; vector indicating where in the target sequence is the masked token used for pointing\n\n batch_size, num_toks, tok_dim = tgt.size()\n n_refs = len(refs)\n\n row_id = torch.from_numpy(np.array(range(batch_size)))\n masked_tok = tgt[row_id.long(), point_mask_tok.squeeze(1)] # batch_size x tok_dim\n\n all_ref_hiddens = torch.cat(refs, dim=1)\n all_ref_masks = torch.cat(ref_masks, dim=-1)\n\n scores = torch.zeros(batch_size, num_toks * n_refs, dtype=tgt.dtype, device=tgt.device)\n\n for i in range(batch_size):\n scores[i, :] = torch.matmul(masked_tok[i], all_ref_hiddens[i].t()) / sqrt(tok_dim)\n\n logits = scores.masked_fill(all_ref_masks, self.maskval) # mask out padding\n\n return logits, F.softmax(logits, dim=-1)\n\n def pointing_loss(self, tgt, refs, ref_masks, point_mask_tok, pos):\n logits, _ = self.get_pointing_scores(tgt, refs, ref_masks, point_mask_tok)\n\n print(f'\\nlogits: {logits}')\n print(f'logits.shape: {logits.shape}')\n print('-------------------------------------------------------------------')\n # logits的shape是[32, 42],32是batch size,那么pos的元素就应该小于42啊\n # pos.squeeze(1):tensor([3132, 3132, 4, 1, 3134, 6267, 5, 6, 2, 6260, 2, 3132,\n # 3131, 5, 5, 2, 3133, 1, 3136, 6260, 6265, 6260, 6261, 3134,\n # 6261, 10, 2, 6, 3, 6263, 2, 6261], device='cuda:0')\n\n # 测试:File \"C:\\Users\\Administrator\\anaconda3\\envs\\CT-py37\\lib\\site-packages\\torch\\nn\\functional.py\", line 2824, in cross_entropy\n # return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)\n # IndexError: Target 3132 is out of bounds.\n print(f'pos: {pos.squeeze(1)}')\n print('-------------------------------------------------------------------')\n\n point_loss_ = self._point_loss(logits, pos.squeeze(1))\n return point_loss_\n\n def skill_contrast_loss(self, tgt_tokens, tgt_mask, all_ref_tokens, ref_masks, ref_labels):\n # tgt_tokens: batch x 1 x dim OR batch x # tokens x dim (if pool_method is given)\n # all_ref_tokens: [batch x 1 x dim OR batch x # tokens x dim] x # refs\n\n if self._skill_pool_method in {'mean', 'max'}:\n tgt_tokens.masked_fill_(tgt_mask.unsqueeze(2), 0.)\n\n if self._skill_pool_method == 'mean':\n tgt_tokens = torch.mean(tgt_tokens, dim=1, keepdim=True)\n elif self._skill_pool_method == 'max':\n tgt_tokens = torch.max(tgt_tokens, dim=1, keepdim=True)\n\n masked_ref_tokens = []\n\n for rt, rm in zip(all_ref_tokens, ref_masks):\n\n rt.masked_fill_(rm.unsqueeze(2), 0.)\n\n if self._skill_pool_method == 'mean':\n rt = torch.mean(rt, dim=1, keepdim=True)\n elif self._skill_pool_method == 'max':\n rt = torch.max(rt, dim=1, keepdim=True)\n masked_ref_tokens.append(rt)\n\n all_ref_tokens = torch.cat(masked_ref_tokens, dim=1) # batch x # refs x D\n else:\n all_ref_tokens = torch.cat(all_ref_tokens, dim=1) # batch x # refs x D\n\n tgt_tokens = self._skill_contrast_proj(tgt_tokens)\n all_ref_tokens = self._skill_contrast_proj(all_ref_tokens)\n\n norm_tgt_cls = nn.functional.normalize(tgt_tokens, p=2, dim=-1)\n norm_all_ref_cls = nn.functional.normalize(all_ref_tokens, p=2, dim=-1)\n\n sims_ = torch.bmm(norm_all_ref_cls, norm_tgt_cls.permute(0, 2, 1)).squeeze(2)\n\n sims_ = torch.div(sims_, self._skill_temp)\n\n print(f'\\nsims_: {sims_}')\n print(\"--------------------------------------------------------------\")\n print(f'ref_labels: {ref_labels.squeeze(-1)}')\n print(\"--------------------------------------------------------------\")\n return self._skill_contrast_loss(sims_, ref_labels.squeeze(-1))\n"
] |
[
[
"torch.nn.functional.normalize",
"torch.div",
"torch.nn.functional.softmax",
"torch.mean",
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.zeros",
"torch.cat",
"torch.nn.Linear"
]
] |
AadilLatif/HELICS-Examples
|
[
"2aa7e9e280b6cb96ee70e79802a68905e5002253"
] |
[
"user_guide_examples/fundamental/fundamental_message_comm/endpoints/Battery.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 9/28/2020\n\nThis is a simple battery message federate that models an EV\nbattery as it is being charged. The federate receives a voltage signal\nrepresenting the voltage applied to the charging terminals of the battery\nand based on its internally modeled SOC, calculates the current draw of\nthe battery and sends it back to the EV federate. Note that this SOC should\nbe considered the true SOC of the battery which may be different than the\nSOC modeled by the charger. Each battery ceases charging when its SOC reaches 100%.\n\n@author: Allison M. Campbell\[email protected]\n\"\"\"\n\nimport helics as h\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.StreamHandler())\nlogger.setLevel(logging.DEBUG)\n\n\ndef destroy_federate(fed):\n '''\n As part of ending a HELICS co-simulation it is good housekeeping to\n formally destroy a federate. Doing so informs the rest of the\n federation that it is no longer a part of the co-simulation and they\n should proceed without it (if applicable). Generally this is done\n when the co-simulation is complete and all federates end execution\n at more or less the same wall-clock time.\n\n :param fed: Federate to be destroyed\n :return: (none)\n '''\n\n # Adding extra time request to clear out any pending messages to avoid\n # annoying errors in the broker log. Any message are tacitly disregarded.\n grantedtime = h.helicsFederateRequestTime(fed, h.HELICS_TIME_MAXTIME)\n status = h.helicsFederateDisconnect(fed)\n h.helicsFederateFree(fed)\n h.helicsCloseLibrary()\n logger.info('Federate finalized')\n\n\ndef get_new_battery(numBattery):\n '''\n Using hard-coded probabilities, a distribution of batteries of\n fixed battery sizes are generated. The number of batteries is a user\n provided parameter.\n\n :param numBattery: Number of batteries to generate\n :return\n listOfBatts: List of generated batteries\n\n '''\n\n # Probabilities of a new EV battery having small capacity (sm),\n # medium capacity (med), and large capacity (lg).\n sm = 0.2\n med = 0.2\n lg = 0.6\n\n # Batteries have different sizes:\n # [25,62,100]\n listOfBatts = np.random.choice([25,62,100],numBattery,p=[sm,med,\n lg]).tolist()\n\n return listOfBatts\n\n\nif __name__ == \"__main__\":\n np.random.seed(2622)\n\n ########## Registering federate and configuring from JSON################\n fed = h.helicsCreateMessageFederateFromConfig(\"BatteryConfig.json\")\n federate_name = h.helicsFederateGetName(fed)\n logger.info(f'Created federate {federate_name}')\n\n end_count = h.helicsFederateGetEndpointCount(fed)\n logger.debug(f'\\tNumber of endpoints: {end_count}')\n\n # Diagnostics to confirm JSON config correctly added the required\n # endpoints\n endid = {}\n for i in range(0, end_count):\n endid[i] = h.helicsFederateGetEndpointByIndex(fed, i)\n end_name = h.helicsEndpointGetName(endid[i])\n logger.debug(f'\\tRegistered Endpoint ---> {end_name}')\n\n ############## Entering Execution Mode ##################################\n h.helicsFederateEnterExecutingMode(fed)\n logger.info('Entered HELICS execution mode')\n\n # Define battery physics as empirical values\n socs = np.array([0, 1])\n\n # 8 ohms to 150 ohms\n effective_R = np.array([8, 150])\n\n batt_list = get_new_battery(end_count)\n\n current_soc = {}\n for i in range (0, end_count):\n current_soc[i] = (np.random.randint(0,60))/100\n\n hours = 24 * 1\n total_interval = int(60 * 60 * hours)\n update_interval = int(h.helicsFederateGetTimeProperty(\n fed,\n h.helics_property_time_period))\n update_offset = int(h.helicsFederateGetTimeProperty(\n fed,\n h.helics_property_time_offset))\n grantedtime = 0\n\n # Data collection lists\n time_sim = []\n total_current = []\n soc = {}\n\n # As long as granted time is in the time range to be simulated...\n while grantedtime < total_interval:\n\n # Time request for the next interval to be simulated\n requested_time = (grantedtime+update_interval+update_offset)\n logger.debug(f'Requesting time {requested_time}')\n grantedtime = h.helicsFederateRequestTime(fed, requested_time)\n logger.debug(f'Granted time {grantedtime}')\n\n charging_current = 0;\n # Iterating over endpoints in this case since this example\n # uses only one charging voltage for all five batteries\n for j in range(0,end_count):\n logger.debug(f'Battery {j+1} time {grantedtime}')\n\n # Get the applied charging voltage from the EV\n # Check for messages from Charger\n endpoint_name = h.helicsEndpointGetName(endid[j])\n if h.helicsEndpointHasMessage(endid[j]):\n msg = h.helicsEndpointGetMessage(endid[j])\n charging_voltage = float(h.helicsMessageGetString(msg))\n source = h.helicsMessageGetOriginalSource(msg)\n logger.debug(f'Received message voltage {charging_voltage:.2f}'\n f' at endpoint {endpoint_name}'\n f' from {source}'\n f' at time {grantedtime}')\n\n # Calculate charging current and update SOC\n R = np.interp(current_soc[j], socs, effective_R)\n logger.debug(f'\\tEffective R (ohms): {R:.2f}')\n # If battery is full assume its stops charging on its own\n # and the charging current goes to zero.\n if current_soc[j] >= 1:\n charging_current = 0;\n else:\n charging_current = charging_voltage / R\n logger.debug(f'\\tCharging current (A): {charging_current:.2f}')\n\n added_energy = (charging_current * charging_voltage * \\\n update_interval/3600) / 1000\n logger.debug(f'\\tAdded energy (kWh): {added_energy:.4f}')\n current_soc[j] = current_soc[j] + added_energy / batt_list[j]\n logger.debug(f'\\tSOC: {current_soc[j]:.4f}')\n else:\n logger.debug(f'\\tNo messages at endpoint {endpoint_name} '\n f'recieved at '\n f'time {grantedtime}')\n\n\n # send charging current message\n # to this endpoint's default destination, \"\"\n h.helicsEndpointSendBytesTo(endid[j], str(charging_current), \"\") #\n logger.debug(f'Sent message {charging_current:.2f}'\n f' from endpoint {endpoint_name}'\n f' at time {grantedtime}')\n\n # Store SOC for later analysis/graphing\n if endid[j] not in soc:\n soc[endid[j]] = []\n soc[endid[j]].append(float(current_soc[j]))\n\n # Data collection vectors\n time_sim.append(grantedtime)\n\n # Cleaning up HELICS stuff once we've finished the co-simulation.\n destroy_federate(fed)\n # Printing out final results graphs for comparison/diagnostic purposes.\n xaxis = np.array(time_sim)/3600\n y = []\n for key in soc:\n y.append(np.array(soc[key]))\n\n plt.figure()\n\n fig, axs = plt.subplots(5, sharex=True, sharey=True)\n fig.suptitle('SOC of each EV Battery')\n\n axs[0].plot(xaxis, y[0], color='tab:blue', linestyle='-')\n axs[0].set_yticks(np.arange(0,1.25,0.5))\n axs[0].set(ylabel='Batt1')\n axs[0].grid(True)\n\n axs[1].plot(xaxis, y[1], color='tab:blue', linestyle='-')\n axs[1].set(ylabel='Batt2')\n axs[1].grid(True)\n\n axs[2].plot(xaxis, y[2], color='tab:blue', linestyle='-')\n axs[2].set(ylabel='Batt3')\n axs[2].grid(True)\n\n axs[3].plot(xaxis, y[3], color='tab:blue', linestyle='-')\n axs[3].set(ylabel='Batt4')\n axs[3].grid(True)\n\n axs[4].plot(xaxis, y[4], color='tab:blue', linestyle='-')\n axs[4].set(ylabel='Batt5')\n axs[4].grid(True)\n plt.xlabel('time (hr)')\n #for ax in axs():\n# ax.label_outer()\n plt.savefig('fundamental_endpoints_battery_SOCs.png', format='png')\n\n plt.show()\n"
] |
[
[
"numpy.random.seed",
"numpy.random.choice",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.random.randint",
"numpy.interp",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
ChristophorusX/Burer-Monteiro
|
[
"c69b72c3ad5eab901a6df4da6ebf48d4c6ee8e3b"
] |
[
"trigonometric.py"
] |
[
"import numpy as np\nimport burer_monteiro as bm\nimport basin_of_attraction as basin\nimport aux\nfrom scipy import optimize as opt\n\n\ndef trig_objective_function(A, theta):\n \"\"\"\n Returns the function value in trigonometric parameterization.\n \"\"\"\n\n # dim, _ = A.shape\n # T = np.empty((dim, dim))\n # for i in range(dim):\n # T[i, :] = theta[i]\n # T[i, :] = T[i, :] - theta\n # return np.trace(A.dot(np.cos(T)))\n Q = recover_solution(theta)\n return np.trace(A.dot(Q.dot(Q.T)))\n\n\ndef trig_grad(A, theta):\n \"\"\"\n Returns the function gradient in trigonometric parameterization.\n \"\"\"\n\n dim, _ = A.shape\n # T = np.empty((dim, dim))\n # for i in range(dim):\n # T[i, :] = theta[i]\n # T[i, :] = T[i, :] - theta\n # one = np.ones(dim).reshape((-1, 1))\n # hadamard = (A * np.sin(T)).transpose()\n # return hadamard.dot(one).ravel()\n stack = np.tile(theta, (dim, 1))\n T = stack.transpose() - stack\n one = np.ones(dim).reshape((-1, 1))\n hadamard = (A * np.sin(T)).transpose()\n return hadamard.dot(one).ravel()\n \n\ndef trig_hess(A, theta):\n \"\"\"\n Returns the function Hessian in trigonometric parameterization.\n \"\"\"\n\n dim, _ = A.shape\n T = np.empty((dim, dim))\n for i in range(dim):\n T[i, :] = theta[i]\n T[i, :] = T[i, :] - theta\n # one = np.ones(dim).reshape((-1, 1))\n return A * np.cos(T) - np.diag(np.sum(A * np.cos(T), axis=1)) # np.diag((A * np.cos(T)).dot(one))\n\n\ndef trig_hessp(A, theta, p):\n \"\"\"\n Returns the function Hessian in trigonometric parameterization\n as an inner product function.\n \"\"\"\n\n H = trig_hess(A, theta)\n return p.dot(H)\n\n\ndef recover_solution(theta):\n \"\"\"\n Recovers triangular matrix Q from trigonometric parameterization.\n \"\"\"\n\n Q = np.hstack((np.cos(theta).reshape((-1, 1)),\n np.sin(theta).reshape((-1, 1))))\n return Q\n\n\ndef trig_trust_region(A, z):\n \"\"\"\n Returns the optimization result Q under trust region with\n trigonometric parameterization.\n \"\"\"\n\n dim, _ = A.shape\n init = np.random.uniform(-10, 10, size=dim)\n optimizer = bm.minimize_with_trust(fun=lambda theta: -trig_objective_function(A, theta),\n x0=init, n_rows=1, plotting=None, printing=None,\n jac=lambda theta: -trig_grad(A, theta),\n hessp=lambda theta, p: -\n trig_hessp(A, theta, p),\n hess=lambda theta: -trig_hess(A, theta))\n theta = optimizer.x\n Q = recover_solution(theta)\n return Q, theta\n\n\ndef trig_bfgs(A, z, init=None):\n \"\"\"\n Returns the optimization result Q under BFGS with\n trigonometric parameterization.\n \"\"\"\n\n dim, _ = A.shape\n if init is None:\n init = np.random.uniform(-10, 10, size=dim)\n optimizer = opt.minimize(fun=lambda theta: -trig_objective_function(A, theta),\n x0=init, jac=lambda theta: -trig_grad(A, theta),\n method='BFGS')\n theta = optimizer.x\n Q = recover_solution(theta)\n return Q, theta\n\n\nif __name__ == '__main__':\n A, z = basin.get_observation(10, 3, 'sync')\n z = z.reshape((-1, 1))\n # print(trig_objective_function(A, np.ones(10)))\n # print(trig_grad(A, np.ones(10)))\n # print(trig_hess(A, np.ones(10)))\n Q = trig_trust_region(A, z)\n # Q = trig_bfgs(A, z)\n diff_norm = aux.frobenius_distance(z.dot(z.T), Q.dot(Q.T))\n print(\"The Frobenius distance to the ground truth is: {}\".format(diff_norm))\n"
] |
[
[
"numpy.cos",
"numpy.tile",
"numpy.ones",
"numpy.sin",
"numpy.random.uniform",
"numpy.empty"
]
] |
tehZevo/pget
|
[
"f3b5ff85221d15911548f7d18d45b1bec9c68ddb"
] |
[
"pget/agent.py"
] |
[
"import tensorflow as tf\nimport numpy as np\n\nfrom ml_utils.keras import get_states, set_states, apply_regularization\nfrom ml_utils.model_builders import dense_stack\n\nfrom .pget import create_traces, update_traces, step_weights_opt\nfrom .pget import explore_continuous, explore_discrete, explore_multibinary\n\n#TODO: saving/loading?\n#TODO: args/kwargs for get_action/train, maybe accept \"done\" in train\n#TODO: actually support alt_trace_method.... (see pget.update_traces...)\n\nclass Agent():\n \"\"\"Note: requires TF eager\"\"\"\n def __init__(self, model, optimizer=None, action_type=\"continuous\", alt_trace_method=False,\n epsilon=1e-7, advantage_clip=1, gamma=0.99, lambda_=0.9,\n regularization=1e-6, noise=0.1, initial_deviation=1,\n late_squash=True, use_squared_deviation=True):\n self.model = model\n\n #TODO: is this needed?\n self.input_shape = tuple(self.model.input_shape[1:])\n self.output_shape = tuple(self.model.output_shape[1:])\n\n #hyperparameters\n self.eps = epsilon\n self.advantage_clip = advantage_clip\n self.gamma = gamma\n self.lambda_ = lambda_\n self.alt_trace_method = alt_trace_method\n self.regularization = regularization\n self.noise = noise\n self.last_advantage = 0\n self.late_squash = late_squash\n self.optimizer = (optimizer if optimizer is not None else\n tf.keras.optimizers.Adam(1e-3, clipnorm=1.0))\n self.use_squared_deviation = use_squared_deviation\n \n #resolve exploration method/loss function\n self.action_type = action_type.lower()\n\n if self.action_type == \"discrete\":\n self.loss = tf.keras.losses.categorical_crossentropy\n explore_func = explore_discrete\n elif self.action_type == \"multibinary\":\n self.loss = tf.keras.losses.binary_crossentropy\n explore_func = explore_multibinary\n elif self.action_type == \"continuous\":\n #TODO: try huber loss again?\n self.loss = tf.losses.mean_squared_error\n explore_func = explore_continuous\n else:\n raise ValueError(\"Unknown action type '{}'\".format(action_type))\n\n #haha jk.\n if self.late_squash:\n explore_func = explore_continuous\n self.loss = tf.losses.mean_squared_error\n self.squash = (tf.nn.softmax if self.action_type == \"discrete\" else\n tf.nn.sigmoid if self.action_type == \"multibinary\" else None)\n\n self.explore = lambda x: explore_func(x, self.noise)\n\n #initialization\n self.traces = create_traces(self.model)\n self.reward_mean = 0\n self.reward_deviation = initial_deviation\n\n def get_action(self, state):\n #housekeeping\n state = state.astype(\"float32\")\n #save pre-step hidden state\n pre_step_state = get_states(self.model)\n\n #calc action from state\n #action = self.model.predict(np.expand_dims(state, 0))[0]\n #https://github.com/keras-team/keras/issues/13118\n #https://github.com/tensorflow/tensorflow/issues/33009\n action = self.model.predict_on_batch(np.expand_dims(state, 0))[0]\n\n #apply noise to action\n action = self.explore(action)\n\n #TODO: early bail?\n\n #calc gradient for modified action & update traces based on gradient\n update_traces(self.model, pre_step_state, self.traces,\n np.expand_dims(state, 0), np.expand_dims(action, 0), self.loss, lambda_=self.lambda_)\n\n #if discrete/multibinary, then squash\n if self.late_squash and self.action_type != \"continuous\":\n action = self.squash(action)\n #explore with 0 noise just to get 0s/1s\n action = (explore_discrete(action, 0) if self.action_type == \"discrete\" else\n explore_multibinary(action, 0) if self.action_type == \"multibinary\" else action)\n\n return action\n\n def train(self, reward):\n #scale/clip reward to calculate advantage\n delta_reward = reward - self.reward_mean\n advantage = delta_reward / (self.reward_deviation + self.eps)\n if self.advantage_clip is not None:\n advantage = np.clip(advantage, -self.advantage_clip, self.advantage_clip)\n\n #update reward mean/deviation\n self.reward_mean += delta_reward * (1 - self.gamma)\n #TODO: experimental square instead of abs\n if self.use_squared_deviation:\n self.reward_deviation += (delta_reward ** 2 - self.reward_deviation) * (1 - self.gamma)\n else:\n self.reward_deviation += (np.abs(delta_reward) - self.reward_deviation) * (1 - self.gamma)\n self.last_advantage = advantage\n\n #step network in direction of trace gradient * advantage\n apply_regularization(self.model, self.regularization)\n step_weights_opt(self.model, self.traces, advantage, self.model.optimizer)\n"
] |
[
[
"tensorflow.keras.optimizers.Adam",
"numpy.expand_dims",
"numpy.abs",
"numpy.clip"
]
] |
yuhengwang1/kalm-fl
|
[
"c713014d85e71bf728f6687a1b41c47ebd626263"
] |
[
"kalmfl/multistanza/models/common/char_model.py"
] |
[
"import torch\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import pack_sequence, pad_packed_sequence, pack_padded_sequence, PackedSequence\n\nfrom kalmfl.multistanza.models.common.packed_lstm import PackedLSTM\nfrom kalmfl.multistanza.models.common.utils import tensor_unsort, unsort\nfrom kalmfl.multistanza.models.common.dropout import SequenceUnitDropout\nfrom kalmfl.multistanza.models.common.vocab import UNK_ID, CharVocab\n\nclass CharacterModel(nn.Module):\n def __init__(self, args, vocab, pad=False, bidirectional=False, attention=True):\n super().__init__()\n self.args = args\n self.pad = pad\n self.num_dir = 2 if bidirectional else 1\n self.attn = attention\n\n # char embeddings\n self.char_emb = nn.Embedding(len(vocab['char']), self.args['char_emb_dim'], padding_idx=0)\n if self.attn: \n self.char_attn = nn.Linear(self.num_dir * self.args['char_hidden_dim'], 1, bias=False)\n self.char_attn.weight.data.zero_()\n\n # modules\n self.charlstm = PackedLSTM(self.args['char_emb_dim'], self.args['char_hidden_dim'], self.args['char_num_layers'], batch_first=True, \\\n dropout=0 if self.args['char_num_layers'] == 1 else args['dropout'], rec_dropout = self.args['char_rec_dropout'], bidirectional=bidirectional)\n self.charlstm_h_init = nn.Parameter(torch.zeros(self.num_dir * self.args['char_num_layers'], 1, self.args['char_hidden_dim']))\n self.charlstm_c_init = nn.Parameter(torch.zeros(self.num_dir * self.args['char_num_layers'], 1, self.args['char_hidden_dim']))\n\n self.dropout = nn.Dropout(args['dropout'])\n\n def forward(self, chars, chars_mask, word_orig_idx, sentlens, wordlens):\n embs = self.dropout(self.char_emb(chars))\n batch_size = embs.size(0)\n embs = pack_padded_sequence(embs, wordlens, batch_first=True)\n output = self.charlstm(embs, wordlens, hx=(\\\n self.charlstm_h_init.expand(self.num_dir * self.args['char_num_layers'], batch_size, self.args['char_hidden_dim']).contiguous(), \\\n self.charlstm_c_init.expand(self.num_dir * self.args['char_num_layers'], batch_size, self.args['char_hidden_dim']).contiguous()))\n \n # apply attention, otherwise take final states\n if self.attn:\n char_reps = output[0]\n weights = torch.sigmoid(self.char_attn(self.dropout(char_reps.data)))\n char_reps = PackedSequence(char_reps.data * weights, char_reps.batch_sizes)\n char_reps, _ = pad_packed_sequence(char_reps, batch_first=True)\n res = char_reps.sum(1)\n else:\n h, c = output[1]\n res = h[-2:].transpose(0,1).contiguous().view(batch_size, -1)\n\n # recover character order and word separation\n res = tensor_unsort(res, word_orig_idx)\n res = pack_sequence(res.split(sentlens))\n if self.pad:\n res = pad_packed_sequence(res, batch_first=True)[0]\n\n return res\n\nclass CharacterLanguageModel(nn.Module):\n\n def __init__(self, args, vocab, pad=False, is_forward_lm=True):\n super().__init__()\n self.args = args\n self.vocab = vocab\n self.is_forward_lm = is_forward_lm\n self.pad = pad\n self.finetune = True # always finetune unless otherwise specified\n\n # char embeddings\n self.char_emb = nn.Embedding(len(self.vocab['char']), self.args['char_emb_dim'], padding_idx=None) # we use space as padding, so padding_idx is not necessary\n \n # modules\n self.charlstm = PackedLSTM(self.args['char_emb_dim'], self.args['char_hidden_dim'], self.args['char_num_layers'], batch_first=True, \\\n dropout=0 if self.args['char_num_layers'] == 1 else args['char_dropout'], rec_dropout = self.args['char_rec_dropout'], bidirectional=False)\n self.charlstm_h_init = nn.Parameter(torch.zeros(self.args['char_num_layers'], 1, self.args['char_hidden_dim']))\n self.charlstm_c_init = nn.Parameter(torch.zeros(self.args['char_num_layers'], 1, self.args['char_hidden_dim']))\n\n # decoder\n self.decoder = nn.Linear(self.args['char_hidden_dim'], len(self.vocab['char']))\n self.dropout = nn.Dropout(args['char_dropout'])\n self.char_dropout = SequenceUnitDropout(args.get('char_unit_dropout', 0), UNK_ID)\n\n def forward(self, chars, charlens, hidden=None):\n chars = self.char_dropout(chars)\n embs = self.dropout(self.char_emb(chars))\n batch_size = embs.size(0)\n embs = pack_padded_sequence(embs, charlens, batch_first=True)\n if hidden is None: \n hidden = (self.charlstm_h_init.expand(self.args['char_num_layers'], batch_size, self.args['char_hidden_dim']).contiguous(),\n self.charlstm_c_init.expand(self.args['char_num_layers'], batch_size, self.args['char_hidden_dim']).contiguous())\n output, hidden = self.charlstm(embs, charlens, hx=hidden)\n output = self.dropout(pad_packed_sequence(output, batch_first=True)[0])\n decoded = self.decoder(output)\n return output, hidden, decoded\n\n def get_representation(self, chars, charoffsets, charlens, char_orig_idx):\n with torch.no_grad():\n output, _, _ = self.forward(chars, charlens)\n res = [output[i, offsets] for i, offsets in enumerate(charoffsets)]\n res = unsort(res, char_orig_idx)\n res = pack_sequence(res)\n if self.pad:\n res = pad_packed_sequence(res, batch_first=True)[0]\n return res\n\n def hidden_dim(self):\n return self.args['char_hidden_dim']\n\n def char_vocab(self):\n return self.vocab['char']\n\n def train(self, mode=True):\n \"\"\"\n Override the default train() function, so that when self.finetune == False, the training mode \n won't be impacted by the parent models' status change.\n \"\"\"\n if not mode: # eval() is always allowed, regardless of finetune status\n super().train(mode)\n else:\n if self.finetune: # only set to training mode in finetune status\n super().train(mode)\n\n def save(self, filename):\n state = {\n 'vocab': self.vocab['char'].state_dict(),\n 'args': self.args,\n 'state_dict': self.state_dict(),\n 'pad': self.pad,\n 'is_forward_lm': self.is_forward_lm\n }\n torch.save(state, filename, _use_new_zipfile_serialization=False)\n\n @classmethod\n def load(cls, filename, finetune=False):\n state = torch.load(filename, lambda storage, loc: storage)\n vocab = {'char': CharVocab.load_state_dict(state['vocab'])}\n model = cls(state['args'], vocab, state['pad'], state['is_forward_lm'])\n model.load_state_dict(state['state_dict'])\n model.eval()\n model.finetune = finetune # set finetune status\n return model\n"
] |
[
[
"torch.nn.Dropout",
"torch.load",
"torch.zeros",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.Linear",
"torch.nn.utils.rnn.PackedSequence",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.no_grad",
"torch.nn.utils.rnn.pack_sequence",
"torch.save"
]
] |
jacobchanyeol/style-transfer_v1.0
|
[
"d272cb2aa0f67a8db4f8efe01808d1e480c9d075"
] |
[
"transformer_net.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 4 02:03:13 2019\r\n\r\n@author: jacobchoi\r\n\"\"\"\r\n\r\nimport torch\r\n\r\n\r\nclass TransformerNet(torch.nn.Module):\r\n def __init__(self):\r\n super(TransformerNet, self).__init__()\r\n # Initial convolution layers\r\n self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1)\r\n self.in1 = torch.nn.InstanceNorm2d(32, affine=True)\r\n self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2)\r\n self.in2 = torch.nn.InstanceNorm2d(64, affine=True)\r\n self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2)\r\n self.in3 = torch.nn.InstanceNorm2d(128, affine=True)\r\n # Residual layers\r\n self.res1 = ResidualBlock(128)\r\n self.res2 = ResidualBlock(128)\r\n self.res3 = ResidualBlock(128)\r\n self.res4 = ResidualBlock(128)\r\n self.res5 = ResidualBlock(128)\r\n # Upsampling Layers\r\n self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2)\r\n self.in4 = torch.nn.InstanceNorm2d(64, affine=True)\r\n self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2)\r\n self.in5 = torch.nn.InstanceNorm2d(32, affine=True)\r\n self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1)\r\n # Non-linearities\r\n self.relu = torch.nn.ReLU()\r\n\r\n def forward(self, X):\r\n y = self.relu(self.in1(self.conv1(X)))\r\n y = self.relu(self.in2(self.conv2(y)))\r\n y = self.relu(self.in3(self.conv3(y)))\r\n y = self.res1(y)\r\n y = self.res2(y)\r\n y = self.res3(y)\r\n y = self.res4(y)\r\n y = self.res5(y)\r\n y = self.relu(self.in4(self.deconv1(y)))\r\n y = self.relu(self.in5(self.deconv2(y)))\r\n y = self.deconv3(y)\r\n return y\r\n\r\n\r\nclass ConvLayer(torch.nn.Module):\r\n def __init__(self, in_channels, out_channels, kernel_size, stride):\r\n super(ConvLayer, self).__init__()\r\n reflection_padding = kernel_size // 2\r\n self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)\r\n self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)\r\n\r\n def forward(self, x):\r\n out = self.reflection_pad(x)\r\n out = self.conv2d(out)\r\n return out\r\n\r\n\r\nclass ResidualBlock(torch.nn.Module):\r\n \"\"\"ResidualBlock\r\n introduced in: https://arxiv.org/abs/1512.03385\r\n recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html\r\n \"\"\"\r\n\r\n def __init__(self, channels):\r\n super(ResidualBlock, self).__init__()\r\n self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)\r\n self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)\r\n self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)\r\n self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)\r\n self.relu = torch.nn.ReLU()\r\n\r\n def forward(self, x):\r\n residual = x\r\n out = self.relu(self.in1(self.conv1(x)))\r\n out = self.in2(self.conv2(out))\r\n out = out + residual\r\n return out\r\n\r\n\r\nclass UpsampleConvLayer(torch.nn.Module):\r\n \"\"\"UpsampleConvLayer\r\n Upsamples the input and then does a convolution. This method gives better results\r\n compared to ConvTranspose2d.\r\n ref: http://distill.pub/2016/deconv-checkerboard/\r\n \"\"\"\r\n\r\n def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):\r\n super(UpsampleConvLayer, self).__init__()\r\n self.upsample = upsample\r\n reflection_padding = kernel_size // 2\r\n self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)\r\n self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)\r\n\r\n def forward(self, x):\r\n x_in = x\r\n if self.upsample:\r\n x_in = torch.nn.functional.interpolate(x_in, mode='nearest', scale_factor=self.upsample)\r\n out = self.reflection_pad(x_in)\r\n out = self.conv2d(out)\r\n return out"
] |
[
[
"torch.nn.ReflectionPad2d",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"torch.nn.functional.interpolate",
"torch.nn.ReLU"
]
] |
marko4789/Visualizacion-Covid19-MX
|
[
"9194a2590c35a12ddd66f1914c93f1ff869620b0"
] |
[
"Visualizacion_comparativo_nuevos.py"
] |
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndataset_casos = pd.read_csv(\"Datos/comparativo_casos_nuevos.csv\")\ndataset_muertes = pd.read_csv(\"Datos/comparativo_muertes_nuevas.csv\")\n\ndias = np.arange(585)\ncasos = dataset_casos.iloc[:, 1]\ncasos_prom = dataset_casos.iloc[:, 3]\nmuertes = dataset_muertes.iloc[:, 1]\nmuertes_prom = dataset_muertes.iloc[:, 3]\n\nplt.title(\"Covid-19 MX - Comparativo casos-muertes diarias\")\nplt.suptitle(\"13 de Enero - 25 de Septiembre 2021\", fontsize = 10)\nplt.xlabel(\"Días\")\nplt.ylabel(\"Numero de casos-muertes\")\nplt.plot(dias, casos_prom, color = \"green\", label = \"Promedio diario de casos\")\nplt.scatter(dias, casos, color = \"blue\", label = \"Casos nuevos diarios\", alpha=0.5)\nplt.plot(dias, muertes_prom, color = \"orange\", label = \"Promedio diario de muertes\")\nplt.scatter(dias, muertes, color = \"red\", label = \"Muertes nuevas diarias\", alpha=0.5)\nplt.legend()\n#plt.semilogy()\n#plt.grid(True)\nplt.show()"
] |
[
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
cuhksz-nlp/SANER
|
[
"dffc7a06cdbff2671a3ca73d2398159d91a4a7db"
] |
[
"fastNLP/io/pipe/summarization.py"
] |
[
"\"\"\"undocumented\"\"\"\nimport os\nimport numpy as np\n\nfrom .pipe import Pipe\nfrom .utils import _drop_empty_instance\nfrom ..loader.summarization import ExtCNNDMLoader\nfrom ..data_bundle import DataBundle\nfrom ...core.const import Const\nfrom ...core.vocabulary import Vocabulary\nfrom ...core._logger import logger\n\n\nWORD_PAD = \"[PAD]\"\nWORD_UNK = \"[UNK]\"\nDOMAIN_UNK = \"X\"\nTAG_UNK = \"X\"\n\n\nclass ExtCNNDMPipe(Pipe):\n \"\"\"\n 对CNN/Daily Mail数据进行适用于extractive summarization task的预处理,预处理之后的数据,具备以下结构:\n \n .. csv-table::\n :header: \"text\", \"summary\", \"label\", \"publication\", \"text_wd\", \"words\", \"seq_len\", \"target\"\n \n \"\"\"\n def __init__(self, vocab_size, sent_max_len, doc_max_timesteps, vocab_path=None, domain=False):\n \"\"\"\n \n :param vocab_size: int, 词表大小\n :param sent_max_len: int, 句子最大长度,不足的句子将padding,超出的将截断\n :param doc_max_timesteps: int, 文章最多句子个数,不足的将padding,超出的将截断\n :param vocab_path: str, 外部词表路径\n :param domain: bool, 是否需要建立domain词表\n \"\"\"\n self.vocab_size = vocab_size\n self.vocab_path = vocab_path\n self.sent_max_len = sent_max_len\n self.doc_max_timesteps = doc_max_timesteps\n self.domain = domain\n\n def process(self, data_bundle: DataBundle):\n \"\"\"\n 传入的DataSet应该具备如下的结构\n\n .. csv-table::\n :header: \"text\", \"summary\", \"label\", \"publication\"\n\n [\"I got new tires from them and... \",\"...\"], [\"The new tires...\",\"...\"], [0, 1], \"cnndm\"\n [\"Don't waste your time. We had two...\",\"...\"], [\"Time is precious\",\"...\"], [1], \"cnndm\"\n [\"...\"], [\"...\"], [], \"cnndm\"\n\n :param data_bundle:\n :return: 处理得到的数据包括\n .. csv-table::\n :header: \"text_wd\", \"words\", \"seq_len\", \"target\"\n\n [[\"I\",\"got\",...\".\"],...,[\"...\"]], [[54,89,...,5],...,[9,43,..,0]], [1,1,...,0], [0,1,...,0]\n [[\"Don't\",\"waste\",...,\".\"],...,[\"...\"]], [[5234,653,...,5],...,[87,234,..,0]], [1,1,...,0], [1,1,...,0]\n [[\"\"],...,[\"\"]], [[],...,[]], [], []\n \"\"\"\n\n if self.vocab_path is None:\n error_msg = 'vocab file is not defined!'\n logger.error(error_msg)\n raise RuntimeError(error_msg)\n data_bundle.apply(lambda x: _lower_text(x['text']), new_field_name='text')\n data_bundle.apply(lambda x: _lower_text(x['summary']), new_field_name='summary')\n data_bundle.apply(lambda x: _split_list(x['text']), new_field_name='text_wd')\n data_bundle.apply(lambda x: _convert_label(x[\"label\"], len(x[\"text\"])), new_field_name=Const.TARGET)\n\n data_bundle.apply(lambda x: _pad_sent(x[\"text_wd\"], self.sent_max_len), new_field_name=Const.INPUT)\n # db.apply(lambda x: _token_mask(x[\"text_wd\"], self.sent_max_len), new_field_name=\"pad_token_mask\")\n\n # pad document\n data_bundle.apply(lambda x: _pad_doc(x[Const.INPUT], self.sent_max_len, self.doc_max_timesteps), new_field_name=Const.INPUT)\n data_bundle.apply(lambda x: _sent_mask(x[Const.INPUT], self.doc_max_timesteps), new_field_name=Const.INPUT_LEN)\n data_bundle.apply(lambda x: _pad_label(x[Const.TARGET], self.doc_max_timesteps), new_field_name=Const.TARGET)\n\n data_bundle = _drop_empty_instance(data_bundle, \"label\")\n\n # set input and target\n data_bundle.set_input(Const.INPUT, Const.INPUT_LEN)\n data_bundle.set_target(Const.TARGET, Const.INPUT_LEN)\n\n # print(\"[INFO] Load existing vocab from %s!\" % self.vocab_path)\n word_list = []\n with open(self.vocab_path, 'r', encoding='utf8') as vocab_f:\n cnt = 2 # pad and unk\n for line in vocab_f:\n pieces = line.split(\"\\t\")\n word_list.append(pieces[0])\n cnt += 1\n if cnt > self.vocab_size:\n break\n vocabs = Vocabulary(max_size=self.vocab_size, padding=WORD_PAD, unknown=WORD_UNK)\n vocabs.add_word_lst(word_list)\n vocabs.build_vocab()\n data_bundle.set_vocab(vocabs, \"vocab\")\n\n if self.domain is True:\n domaindict = Vocabulary(padding=None, unknown=DOMAIN_UNK)\n domaindict.from_dataset(data_bundle.get_dataset(\"train\"), field_name=\"publication\")\n data_bundle.set_vocab(domaindict, \"domain\")\n\n return data_bundle\n\n def process_from_file(self, paths=None):\n \"\"\"\n :param paths: dict or string\n :return: DataBundle\n \"\"\"\n loader = ExtCNNDMLoader()\n if self.vocab_path is None:\n if paths is None:\n paths = loader.download()\n if not os.path.isdir(paths):\n error_msg = 'vocab file is not defined!'\n logger.error(error_msg)\n raise RuntimeError(error_msg)\n self.vocab_path = os.path.join(paths, 'vocab')\n db = loader.load(paths=paths)\n db = self.process(db)\n for ds in db.datasets.values():\n db.get_vocab(\"vocab\").index_dataset(ds, field_name=Const.INPUT, new_field_name=Const.INPUT)\n\n return db\n\n\ndef _lower_text(text_list):\n return [text.lower() for text in text_list]\n\n\ndef _split_list(text_list):\n return [text.split() for text in text_list]\n\n\ndef _convert_label(label, sent_len):\n np_label = np.zeros(sent_len, dtype=int)\n if label != []:\n np_label[np.array(label)] = 1\n return np_label.tolist()\n\n\ndef _pad_sent(text_wd, sent_max_len):\n pad_text_wd = []\n for sent_wd in text_wd:\n if len(sent_wd) < sent_max_len:\n pad_num = sent_max_len - len(sent_wd)\n sent_wd.extend([WORD_PAD] * pad_num)\n else:\n sent_wd = sent_wd[:sent_max_len]\n pad_text_wd.append(sent_wd)\n return pad_text_wd\n\n\ndef _token_mask(text_wd, sent_max_len):\n token_mask_list = []\n for sent_wd in text_wd:\n token_num = len(sent_wd)\n if token_num < sent_max_len:\n mask = [1] * token_num + [0] * (sent_max_len - token_num)\n else:\n mask = [1] * sent_max_len\n token_mask_list.append(mask)\n return token_mask_list\n\n\ndef _pad_label(label, doc_max_timesteps):\n text_len = len(label)\n if text_len < doc_max_timesteps:\n pad_label = label + [0] * (doc_max_timesteps - text_len)\n else:\n pad_label = label[:doc_max_timesteps]\n return pad_label\n\n\ndef _pad_doc(text_wd, sent_max_len, doc_max_timesteps):\n text_len = len(text_wd)\n if text_len < doc_max_timesteps:\n padding = [WORD_PAD] * sent_max_len\n pad_text = text_wd + [padding] * (doc_max_timesteps - text_len)\n else:\n pad_text = text_wd[:doc_max_timesteps]\n return pad_text\n\n\ndef _sent_mask(text_wd, doc_max_timesteps):\n text_len = len(text_wd)\n if text_len < doc_max_timesteps:\n sent_mask = [1] * text_len + [0] * (doc_max_timesteps - text_len)\n else:\n sent_mask = [1] * doc_max_timesteps\n return sent_mask\n\n\n"
] |
[
[
"numpy.array",
"numpy.zeros"
]
] |
alvaro-serra/malib
|
[
"fe2b0736974c2a3ed9e41121b6cf475a3ee0b5a0"
] |
[
"tests/fixtures/fixtures.py"
] |
[
"import gc\nimport unittest\n\nimport tensorflow as tf\n\n# from malib.experiment import deterministic\nfrom malib.logger import logger\nfrom tests.fixtures.logger import NullOutput\n\n\nclass TfTestCase(unittest.TestCase):\n def setUp(self):\n self.sess = tf.Session()\n self.sess.__enter__()\n\n def tearDown(self):\n if tf.get_default_session() is self.sess:\n self.sess.__exit__(None, None, None)\n self.sess.close()\n del self.sess\n gc.collect()\n\n\nclass TfGraphTestCase(unittest.TestCase):\n def setUp(self):\n self.graph = tf.Graph()\n # self.sess = tf.Session(graph=self.graph)\n # self.sess.__enter__()\n logger.add_output(NullOutput())\n # deterministic.set_seed(1)\n\n def tearDown(self):\n logger.remove_all()\n # if tf.get_default_session() is self.sess:\n # self.sess.__exit__(None, None, None)\n # self.sess.close()\n # These del are crucial to prevent ENOMEM in the CI\n # b/c TensorFlow does not release memory explicitly\n # del self.graph\n # del self.sess\n gc.collect()\n"
] |
[
[
"tensorflow.get_default_session",
"tensorflow.Graph",
"tensorflow.Session"
]
] |
minkj1992/Korean_emotion_classification_attention
|
[
"7e09cd8cd9b18c438d7b9032efb8ee37c505a7fa"
] |
[
"ex1_w2v/predict.py"
] |
[
"# 0. 사용할 패키지 불러오기\nfrom keras.utils import np_utils\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nimport numpy as np\nimport keras\nfrom keras.models import model_from_json\nfrom konlpy.tag import Okt as Twitter\nimport re\nimport json\n\n\n# token2index에서는 '0'외에는 token들로 구성되어 있으며, index2vector값에서는 각 token들 index와, '0'의 index값과 마지막 index로 np.zeros(300)이 들어있다. \nclass Predict():\n \n def __init__(self, sent=\"나는 오늘 화가 많이 났다.\"):\n # 1.데이터 준비하기\n self.sentence = sent\n # 2. 모델 불러오기\n json_file = open('model.json','r')\n loaded_model_json = json_file.read()\n json_file.close()\n self.model = model_from_json(loaded_model_json)\n self.model.load_weights('Attention_weight.h5')\n #이거 필요없을것 같은데\n self.index2vec = np.fromfile('index2vec.dat',dtype='float32')\n# self.index2vec = np.fromfile('index2vec.dat')\n self.max_sentence = 100\n #\"토큰당 인덱스가 들어있는파일\"\n with open('token2index.json','r') as dictionary_file:\n self.token2index = json.load(dictionary_file)\n \n def tokenizer(self):\n token = []\n twitter = Twitter()\n self.sentence = re.sub(r\"[^ㄱ-힣a-zA-Z0-9]+\", ' ', self.sentence).strip().split()\n \n token.extend(twitter.pos(str(self.sentence), norm=True, stem=True))\n print(token)\n print(type(token))\n return token\n \n def t2index(self, t_list):\n index_list = []\n for token in t_list:\n if str(token).replace(\" \", \"\") in self.token2index:\n index_list.append(self.token2index[str(token).replace(\" \", \"\")])\n else:\n index_list.append(self.token2index['0'])\n #padding\n if(len(index_list)> self.max_sentence):\n return index_list[0:99]\n else:\n #token2index의key길이==zero가 있는 index이다. index2vec의 마지막 index와 len(token2index)같으면 된다. \n #-1 지금 붙여논 상황\n index_list.extend([len(self.token2index)-1]*(self.max_sentence-len(index_list)))\n print(len(self.token2index))\n print(len(self.index2vec))\n# 100개의 index 토큰으로 분류된 문장 return\n return index_list\n\n\nfrom keras import backend as K\n\nif __name__ == '__main__':\n sentence = input(\"문장을 입력하시오: \")\n predict = Predict(sentence)\n tok = predict.tokenizer()\n #토큰화 시킨 후 토큰화 된 단어를 token2index에 넣을떄, ()의 공백을 없애주어야 한다.\n #1. in word_tmp_dict\n #2. 또한 x_test의 데이터타입과 token2index의 key데이터 타입이 같은지 확인해 주어야한다.\n # for i in token2index.keys(): print(type(i)), tuple(x_test) or str(x_test)\n x_test = predict.t2index(tok)\n #3. token2index파트에서, 전해진 tok 데이터 타입이 int가 되도록 해주어, index2vec의 index데이터 타입과 같도록 만들어준다. \n #4. 넘파이화 시켜주어서, 이후 plot 찍어줄수 있도록한다.\n x_test = np.asarray([x_test])\n# print(x_test)\n# print(x_test.shape)\n# # 3. 모델 사용하기\n# #5. yhat이 [0.4,0.8,0.9.....]이런식으로 나오는데, threshold(0.5)를 넘기면 모두 1이 되도록 만들어주고 그게 아니면 0이되도록 변환시켜준다.\n# yhat = predict.model.predict(x_test,batch_size=1)\n yhat = predict.model.predict(x_test)\n yhat = yhat[0]\n \n #yhat중 1인 녀석 index를 뽑아낸다.\n columns = [\"분노\",\"기대\",\"혐오\",\"두려움\",\"기쁨\",\"사랑\",\"낙관\",\"비관\",\"슬픔\",\"놀라움\",\"믿음\"]\n y_index = list()\n for i,e in enumerate(yhat):\n y_index = [i for i, e in enumerate(yhat) if e > 0.3]\n for i in y_index:\n print(\"'{0}'가{1:.2f}% 발현되었습니다.\".format(str(columns[i]),(yhat[i])*100))\n "
] |
[
[
"numpy.asarray",
"numpy.fromfile"
]
] |
Leevisir/TimeseriesForecastingQA
|
[
"df6402fdc499d28195e88177c2c866787cf1f019"
] |
[
"QUANTAXIS/TSFetch/fetchdata.py"
] |
[
"import QUANTAXIS as QA\nimport pandas as pd\nimport json\nimport datetime\nfrom QUANTAXIS.QAUtil import QASETTING\nfrom QUANTAXIS.TSData.TSRawdata import TSRawdata\n\n\ndef TS_fetch_stock_day_adv(code, start, end):\n #get all history data from tdx\n # date = datetime.date.today()\n # data=QA.QAFetch.QATdx.QA_fetch_get_stock_day('00001','2017-01-01','2019-01-31')\n #get data from local database\n\n data = QA.QA_fetch_stock_day_adv(code=code, start=start, end=end)\n result = data.data\n result = result.sort_index(ascending=True)\n result = result.reset_index(level=1)\n result = result.drop(columns='code')\n result['date'] = result.index\n result = result.rename(columns={'close': 'y'})\n # print(result)\n rawdata = TSRawdata(result)\n # print(rawdata.data)\n return rawdata\n\n#upload to mongodb\n# outcome = rawdata.data\n#\n# client = QASETTING.client\n# database = client['mydatabase']\n# datacol = database[code+str(datetime.date.today())]\n# outcome = date2str(outcome)\n# datacol.insert_many(outcome)\n\ndef getrawfrommongodb(start,end,databaseid,collectionid,client = QASETTING.client):\n database = client[databaseid]\n datacol = database[collectionid]\n cursor = datacol.find()\n outcome = pd.DataFrame(list(cursor))\n outcome = outcome.drop(columns = '_id')\n outcome['datetime'] = pd.to_datetime(outcome['datetime'])\n outcome.set_index('datetime', inplace=True)\n #inplace=True\n outcome = outcome[start:end]\n outcome['datetime'] = outcome.index\n rawdata = TSRawdata(outcome)\n return rawdata\n# rawdatafrommongo = getrawfrommongodb()\n# print(rawdatafrommongo.data)"
] |
[
[
"pandas.to_datetime"
]
] |
campagnola/acq4
|
[
"c77636a76d68ffa1bc7dbd41edc522e523b909b8",
"c77636a76d68ffa1bc7dbd41edc522e523b909b8"
] |
[
"acq4/util/generator/SeqParamSet.py",
"acq4/util/igorpro.py"
] |
[
"from __future__ import print_function\nimport pyqtgraph.units as units\nfrom pyqtgraph.parametertree.parameterTypes import SimpleParameter, GroupParameter\nimport pyqtgraph as pg\nimport numpy as np\nimport acq4.util.functions as fn\nimport sys, collections\n\n\nclass SequenceParamSet(GroupParameter):\n ## top-level parameter in the simple stim generator tree\n def __init__(self):\n GroupParameter.__init__(self, name='SequenceParams', type='group',\n addText='Add Sequence Parameter')\n self.meta = {}\n \n def addNew(self):\n with self.treeChangeBlocker(): ## about to make lots of tree changes;\n ## suppress change signal until we're done.\n ch = self.addChild(SeqParameter())\n \n \n #if type == 'Pulse':\n #ch = self.addChild(PulseParameter())\n #elif type == 'Pulse Train':\n #ch = self.addChild(PulseTrainParameter())\n #else:\n #raise Exception('Unknown type %s' % type)\n \n #for ax in self.meta:\n #self.setMeta(ax, self.meta[ax], ch)\n\n def compile(self):\n params = collections.OrderedDict()\n for ch in self:\n try:\n params[ch.name()] = ch.compile()\n except SeqEvalError as ex:\n #print sys.exc_info()\n raise Exception(\"'%s.%s': %s\" % (ch.name(), ex.name, ex.exc))\n except:\n raise Exception(\"'%s': %s\" % (ch.name(), str(sys.exc_info()[1])))\n \n return params\n \n def setState(self, state):\n with self.treeChangeBlocker():\n self.clearChildren()\n for k in state:\n ch = self.addChild(SeqParameter())\n ch.setName(k)\n ch.setState(state[k])\n \n def getState(self):\n state = collections.OrderedDict()\n for ch in self:\n state[ch.name()] = ch.getState()\n return state\n \n\nclass SeqEvalError(Exception): ## raised when a sequence parameter field fails to evaluate\n def __init__(self, name, exc):\n Exception.__init__(self)\n self.name = name\n self.exc = str(exc)\n\nclass SeqParameter(GroupParameter):\n def __init__(self, **args):\n \n self.evalLocals = units.allUnits.copy()\n exec('from numpy import *', self.evalLocals) ## import all of numpy into the eval namespace\n \n args['renamable'] = True\n args['removable'] = True\n args['name'] = args.get('name', 'Param')\n args['autoIncrementName'] = True\n args['strictNaming'] = True\n \n args['children'] = [\n {'name': 'default', 'type': 'str', 'value': '0'},\n {'name': 'sequence', 'type': 'list', 'value': 'off', 'values': ['off', 'range', 'list', 'eval']},\n {'name': 'start', 'type': 'str', 'value': '0', 'visible': False}, \n {'name': 'stop', 'type': 'str', 'value': '0', 'visible': False}, \n {'name': 'steps', 'type': 'int', 'value': 10, 'visible': False},\n {'name': 'log spacing', 'type': 'bool', 'value': False, 'visible': False}, \n {'name': 'list', 'type': 'str', 'value': '', 'visible': False}, \n {'name': 'randomize', 'type': 'bool', 'value': False, 'visible': False}, \n {'name': 'expression', 'type': 'str', 'visible': False},\n ]\n \n GroupParameter.__init__(self, **args)\n #self.sequence.sigValueChanged.connect(self.seqChanged)\n \n self.visibleParams = { ## list of params to display in each mode\n 'off': ['default', 'sequence'],\n 'range': ['default', 'sequence', 'start', 'stop', 'steps', 'log spacing', 'randomize'],\n 'list': ['default', 'sequence', 'list', 'randomize'],\n 'eval': ['default', 'sequence', 'expression']\n }\n \n \n def treeStateChanged(self, param, changes):\n ## catch changes to 'sequence' so we can hide/show other params.\n ## Note: it would be easier to just catch self.sequence.sigValueChanged,\n ## but this approach allows us to block tree change events so they are all\n ## released as a single update.\n with self.treeChangeBlocker():\n ## queue up change \n GroupParameter.treeStateChanged(self, param, changes)\n \n ## if needed, add some more changes before releasing the signal\n for param, change, data in changes:\n ## if the sequence value changes, hide/show other parameters\n if param is self.param('sequence') and change == 'value':\n vis = self.visibleParams[self['sequence']]\n for ch in self:\n if ch.name() in vis:\n ch.show()\n else:\n ch.hide()\n #def seqChanged(self):\n #with self.treeChangeBlocker():\n #vis = self.visibleParams[self['sequence']]\n #for ch in self:\n #if ch.name() in vis:\n #ch.show()\n #else:\n #ch.hide()\n \n def compile(self):\n name = self.name()\n default = self.evalStr('default')\n mode = self['sequence']\n if mode == 'off':\n seq = []\n elif mode == 'range':\n start = self.evalStr('start')\n stop = self.evalStr('stop')\n nPts = self['steps']\n if self['log spacing']:\n seq = fn.logSpace(start, stop, nPts)\n else:\n seq = np.linspace(start, stop, nPts)\n elif mode == 'list':\n if self['list'] == '':\n seq = []\n else:\n seq = list(self.evalStr('list'))\n elif mode == 'eval':\n seq = self.evalStr('expression')\n else:\n raise Exception('Unknown sequence mode %s' % mode)\n \n if self['randomize']:\n np.random.shuffle(seq)\n \n ## sanity check\n try:\n len(seq)\n except:\n raise Exception(\"Parameter %s generated invalid sequence: %s\" % (name, str(seq)))\n \n return default, seq\n\n def evalStr(self, name):\n try:\n s = eval(self[name], self.evalLocals)\n except:\n raise SeqEvalError(name, sys.exc_info()[1])\n return s\n \n def setState(self, state):\n for k in state:\n self[k] = state[k]\n self.param(k).setDefault(state[k])\n \n def getState(self):\n state = collections.OrderedDict()\n for ch in self:\n if not ch.opts['visible']:\n continue\n name = ch.name()\n val = ch.value()\n if val is False:\n continue\n state[name] = val\n return state\n\n\n\n\n\n",
"from __future__ import print_function\nimport sys\nimport win32com.client\nimport pywintypes\nimport pythoncom\nimport numpy as np\nimport subprocess as sp\nimport concurrent.futures\nimport atexit\nimport json\nimport zmq\n\n\nimport os\nfrom six.moves import range\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))\nfrom acq4.util import Qt\nfrom pyqtgraph.util.mutex import Mutex\n\n\"\"\"\nThanks to: Jason Yamada-Hanff https://github.com/yamad/igor-mode\n\nMain documentation: Igor Pro Folder\\Miscellaneous\\Windows Automation\\Automation Server.ihf\n\n\n* Use fprintf to return data:\n igor('fprintf 0, \"%d\", 1+3')\n\n* Access waves:\n df = i.app.DataFolder(\"root:MIES:ITCDevices:ITC1600:Device0\")\n wave = df.Wave('OscilloscopeData')\n\n # get data type and array shape\n typ, rows, cols, layers, chunks = wave.GetDimensions()\n dtype = dtypes[typ]\n shape = [rows, cols, layers, chunks]\n ndim = shape.index(0)\n shape = shape[:ndim]\n\n # get [(slope, intercept), ...] scale factors for each axis\n scaling = [wave.GetScaling(ax) for ax in range(len(shape))]\n\n np.array(wave.GetNumericWaveData(typ))\n\n* Access global variables:\n df = i.app.DataFolder(\"root\")\n var = df.Variable(\"myvar\")\n var.GetNumericValue()\n var.GetStringValue()\n\"\"\"\n\ndtypes = { \n 0x02: 'float32',\n 0x04: 'float64',\n 0x08: 'byte',\n 0x10: 'short',\n 0x20: 'long',\n 0x48: 'ubyte',\n 0x50: 'ushort',\n 0x60: 'ulong',\n 0x01: 'complex',\n 0x00: 'str',\n}\n\n\nclass IgorCallError(Exception):\n FAILED = 1\n TIMEDOUT = 2\n def __init__(self, message, errno=1):\n self.errno = errno\n super(IgorCallError, self).__init__(message)\n\n\nclass IgorThread(Qt.QThread):\n\n _newRequest = Qt.Signal(object)\n\n def __init__(self, useZMQ=False):\n Qt.QThread.__init__(self)\n self.moveToThread(self)\n if useZMQ:\n self.igor = ZMQIgorBridge()\n else:\n self.igor = IgorBridge()\n self._newRequest.connect(self._processRequest)\n self.start()\n atexit.register(self.quit)\n\n def __call__(self, *args, **kwds):\n return self._sendRequest('__call__', args, kwds)\n\n def getWave(self, *args, **kwds):\n return self._sendRequest('getWave', args, kwds)\n\n def getVariable(self, *args, **kwds):\n return self._sendRequest('getVariable', args, kwds)\n\n def _sendRequest(self, req, args, kwds):\n if isinstance(self.igor, ZMQIgorBridge):\n return getattr(self.igor, req)(*args)\n else:\n fut = concurrent.futures.Future()\n self._newRequest.emit((fut, req, args, kwds))\n return fut\n\n def _processRequest(self, req):\n fut, method, args, kwds = req\n try:\n result = getattr(self.igor, method)(*args, **kwds)\n fut.set_result(result)\n except Exception as exc:\n fut.set_exception(exc)\n\n def run(self):\n pythoncom.CoInitialize()\n Qt.QThread.run(self)\n\n\nclass IgorBridge(object):\n def __init__(self):\n self.app = None\n\n def tryReconnect(func):\n def _tryReconnect(self, *args, **kwds):\n if self.app is None:\n self.connect()\n try:\n return func(self, *args, **kwds)\n except pywintypes.com_error as exc:\n if exc.args[0] == -2147023174:\n # server unavailable; try reconnecting\n self.connect()\n return func(self, *args, **kwds)\n else:\n raise\n return _tryReconnect\n\n @staticmethod\n def igorProcessExists():\n \"\"\"Return True if an Igor process is currently running.\n \"\"\"\n return 'Igor.exe' in sp.check_output(['wmic', 'process', 'get', 'description,executablepath']) \n\n def connect(self):\n self.app = None\n # Need to check for running process to avoid starting a new one.\n if self.igorProcessExists():\n self.app = win32com.client.gencache.EnsureDispatch(\"IgorPro.Application\")\n else:\n raise Exception(\"No Igor process found.\")\n\n @tryReconnect\n def __call__(self, cmd, *args, **kwds):\n \"\"\"Make an Igor function call.\n \n Any keyword arguments are optional parameters.\n \"\"\"\n cmd = self.formatCall(cmd, *args, **kwds)\n err, errmsg, hist, res = self.app.Execute2(1, 0, cmd, 0, \"\", \"\", \"\")\n if err != 0:\n raise RuntimeError(\"Igor call returned error code %d: %s\" % (err, errmsg))\n return res\n\n def formatCall(self, cmd, *args, **kwds):\n for kwd, val in kwds.items():\n if isinstance(val, int):\n args.append(\"{}={:d}\".format(kwd, val))\n elif isinstance(val, float):\n args.append(\"{}={:f}\".format(kwd, val))\n else:\n raise TypeError(\"Invalid value: {}\".format(val))\n return \"{}({})\".format(cmd, \", \".join([\"{}\"]*len(args)).format(*args))\n\n @tryReconnect\n def getWave(self, folder, waveName):\n df = self.app.DataFolder(folder)\n wave = df.Wave(waveName)\n\n # get data type and array shape\n typ, rows, cols, layers, chunks = wave.GetDimensions()\n dtype = dtypes[typ]\n shape = [rows, cols, layers, chunks]\n ndim = shape.index(0)\n shape = shape[:ndim]\n\n # get [(slope, intercept), ...] scale factors for each axis\n # could use this to return a metaarray..\n scaling = [wave.GetScaling(ax) for ax in range(len(shape))]\n\n data = np.array(wave.GetNumericWaveData(typ))\n\n return data, scaling\n\n @tryReconnect\n def getVariable(self, folder, varName):\n df = self.app.DataFolder(folder)\n var = df.Variable(varName)\n typ = var.get_DataType()\n if dtypes[typ] == 'str':\n return var.GetStringValue()\n else:\n r,i = var.getNumericValue()\n if dtypes[typ] == 'complex':\n return complex(r, i)\n else:\n return r\n\n\nclass ZMQIgorBridge(object):\n \"\"\"Bridge to Igor via ZMQ DEALER/ROUTER.\"\"\"\n _context = zmq.Context()\n\n _types = {\"NT_FP32\": np.float32,\n \"NT_FP64\": np.float64}\n\n def __init__(self, host=\"tcp://localhost\", port=5670):\n super(ZMQIgorBridge, self).__init__()\n self._unresolvedFutures = {}\n self._currentMessageID = 0\n self.address = \"{}:{}\".format(host, port)\n self._socket = self._context.socket(zmq.DEALER)\n self._socket.setsockopt(zmq.IDENTITY, \"igorbridge\")\n self._socket.setsockopt(zmq.SNDTIMEO, 1000)\n self._socket.setsockopt(zmq.RCVTIMEO, 0)\n self._socket.connect(self.address)\n self._pollTimer = Qt.QTimer()\n self._pollTimer.timeout.connect(self._checkRecv)\n self._pollTimer.start(100)\n\n def __call__(self, cmd, *args):\n # TODO: Handle optional values whenever they become supported in Igor\n messageID = self._getMessageID()\n future = concurrent.futures.Future()\n call = self.formatCall(cmd, params=args, messageID=messageID)\n try:\n self._socket.send_multipart(call)\n self._unresolvedFutures[messageID] = future\n except zmq.error.Again:\n self._unresolvedFutures.pop(messageID)\n future.set_exception(IgorCallError(\"Send timed out\",\n IgorCallError.TIMEDOUT))\n return future\n\n def _checkRecv(self):\n try:\n reply = json.loads(self._socket.recv_multipart()[-1])\n messageID = reply.get(\"messageID\", None)\n future = self._unresolvedFutures.get(messageID, None)\n if future is None:\n raise RuntimeError(\"No future found for messageID {}\".format(messageID))\n try:\n reply = self.parseReply(reply)\n future.set_result(reply)\n except IgorCallError as e:\n future.set_exception(e)\n except zmq.error.Again:\n pass\n\n def _getMessageID(self):\n mid = self._currentMessageID\n self._currentMessageID += 1\n return str(mid)\n\n def formatCall(self, cmd, params, messageID):\n call = {\"version\": 1,\n \"messageID\": messageID,\n \"CallFunction\": {\n \"name\": cmd,\n \"params\": params}\n }\n msg = [b\"\", json.dumps(call).encode()]\n return msg\n\n def parseReply(self, reply):\n err = reply.get(\"errorCode\", {}).get(\"value\", None)\n if err is None:\n raise RuntimeError(\"Invalid response from Igor\")\n elif err != 0:\n msg = reply.get(\"errorCode\", {}).get(\"msg\", \"\")\n raise IgorCallError(\"Call failed with message: {}\".format(msg))\n else:\n result = reply.get(\"result\", {})\n restype = result.get(\"type\", \"\")\n val = result.get(\"value\", None)\n if (restype == \"wave\") and (val is not None):\n return self.parseWave(val)\n else:\n return val\n\n def parseWave(self, jsonWave):\n dtype = self._types.get(jsonWave[\"type\"], np.float)\n shape = jsonWave[\"dimension\"][\"size\"]\n raw = np.array(jsonWave[\"data\"][\"raw\"], dtype=dtype)\n return raw.reshape(shape, order=\"F\")\n\n\n\nif __name__ == '__main__':\n import sys\n if len(sys.argv) < 3:\n path = 'root:MIES:ITCDevices:ITC1600:Device0'\n file = 'OscilloscopeData'\n else:\n path, file = sys.argv[1:3]\n\n import pyqtgraph as pg\n app = pg.mkQApp()\n plt = pg.plot(labels={'bottom': ('Time', 's')})\n igor = IgorThread()\n fut = []\n\n def update():\n global data, scaling, fut\n if not plt.isVisible():\n timer.stop()\n return\n\n if len(fut) < 10:\n fut.append(igor.getWave(path, file))\n\n if fut[0].done():\n data, scaling = fut.pop(0).result()\n #data, scaling = igor.getWave('root:MIES:ITCDevices:ITC1600:Device0:TestPulse', 'TestPulseITC')\n x = np.arange(data.shape[0]) * (scaling[0][0] * 1e-3)\n plt.clear()\n if data.ndim == 2:\n plt.plot(x, data[:,-1])\n else:\n plt.plot(x, data)\n\n\n timer = Qt.QTimer()\n timer.timeout.connect(update)\n timer.start(10)\n\n app.exec_()"
] |
[
[
"numpy.random.shuffle",
"numpy.linspace"
],
[
"numpy.arange",
"numpy.array"
]
] |
zhuxinqimac/disentanglement_lib
|
[
"6620090e90ce935758cb607c01764ee404749e87"
] |
[
"disentanglement_lib/data/ground_truth/shapes3d.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The DisentanglementLib Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Shapes3D data set.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nfrom disentanglement_lib.data.ground_truth import ground_truth_data\nfrom disentanglement_lib.data.ground_truth import util\nimport numpy as np\nfrom six.moves import range\nimport tensorflow.compat.v1 as tf\nimport h5py\nimport pickle\n\n\n# SHAPES3D_PATH = os.path.join(\n # os.environ.get(\"DISENTANGLEMENT_LIB_DATA\", \".\"), \"3dshapes\",\n # \"look-at-object-room_floor-hueXwall-hueXobj-\"\n # \"hueXobj-sizeXobj-shapeXview-azi.npz\"\n# )\n# SHAPES3D_PATH = os.path.join(\n # os.environ.get(\"DISENTANGLEMENT_LIB_DATA\", \".\"), \"3dshapes\",\n # \"shapes3d_dict.pkl\"\n# )\nSHAPES3D_PATH = os.path.join(\n os.environ.get(\"DISENTANGLEMENT_LIB_DATA\", \".\"),\n \"3dshapes.h5\"\n)\n\n\nclass Shapes3D(ground_truth_data.GroundTruthData):\n \"\"\"Shapes3D dataset.\n\n The data set was originally introduced in \"Disentangling by Factorising\".\n\n The ground-truth factors of variation are:\n 0 - floor color (10 different values)\n 1 - wall color (10 different values)\n 2 - object color (10 different values)\n 3 - object size (8 different values)\n 4 - object type (4 different values)\n 5 - azimuth (15 different values)\n \"\"\"\n\n def __init__(self):\n # with tf.gfile.GFile(SHAPES3D_PATH, \"rb\") as f:\n # # Data was saved originally using python2, so we need to set the encoding.\n # data = np.load(f, encoding=\"latin1\")\n # images = data[\"images\"]\n # labels = data[\"labels\"]\n # n_samples = np.prod(images.shape[0:6])\n\n with h5py.File(SHAPES3D_PATH, 'r') as dataset:\n images = dataset['images'][()]\n labels = dataset['labels'][()]\n n_samples = images.shape[0]\n\n # with tf.gfile.GFile(SHAPES3D_PATH, \"rb\") as f:\n # data = pickle.load(f)\n # images = data[\"images\"]\n # labels = data[\"labels\"]\n # n_samples = images.shape[0]\n\n self.images = (\n images.reshape([n_samples, 64, 64, 3]).astype(np.float32) / 255.)\n features = labels.reshape([n_samples, 6])\n self.factor_sizes = [10, 10, 10, 8, 4, 15]\n self.latent_factor_indices = list(range(6))\n self.num_total_factors = features.shape[1]\n self.state_space = util.SplitDiscreteStateSpace(self.factor_sizes,\n self.latent_factor_indices)\n self.factor_bases = np.prod(self.factor_sizes) / np.cumprod(\n self.factor_sizes)\n\n @property\n def num_factors(self):\n return self.state_space.num_latent_factors\n\n @property\n def factors_num_values(self):\n return self.factor_sizes\n\n @property\n def observation_shape(self):\n return [64, 64, 3]\n\n\n def sample_factors(self, num, random_state):\n \"\"\"Sample a batch of factors Y.\"\"\"\n return self.state_space.sample_latent_factors(num, random_state)\n\n def sample_observations_from_factors(self, factors, random_state):\n all_factors = self.state_space.sample_all_factors(factors, random_state)\n indices = np.array(np.dot(all_factors, self.factor_bases), dtype=np.int64)\n return self.images[indices]\n"
] |
[
[
"numpy.dot",
"numpy.cumprod",
"numpy.prod"
]
] |
lavanaythakral/Aspect-Based-Sentiment-Analysis
|
[
"1e2d57277b33778309131e69b69ead7afbd0dd59"
] |
[
"test_file.py"
] |
[
"import aspect_based_sentiment_analysis as absa\nimport pandas as pd\nimport time\nimport re \nimport csv\nimport logging\nimport os\n\ndef find_sentence(word,test_sentences):\n\tprint(\"Finding sentences\")\n\tsentence =[]\n\tfor sen in test_sentences:\n\t\tif(word in sen.lower()):\n\t\t\tsentence.append(sen.lower())\n\tprint(\"Sentences found\\n\")\n\tprint(\"----------------------\")\n\treturn sentence\n\ndef sentim(word,test_sentences,nlp):\n\tsentences = find_sentence(word,test_sentences)\n\tprint(\"Calculating sentiments\")\n\tprint(\"----------------------\")\n\tall_sentiments = []\n\tfor sen in sentences:\n\t\tprint(sen)\n\t\tsentiment = nlp(sen, aspects=[word])\n\t\tall_sentiments.append(sentiment.subtasks[word].sentiment.value)\n\t\t# sentiment = 0\n\t\t# all_sentiments.append(0)\n\tprint(\"----------------------\")\n\tprint(\"Sentiments calculated\\n\")\n\treturn all_sentiments\n\ndef make_row(word,all_sentiments):\n\tprint(\"Making rows\")\n\tcnt_pos = 0\n\tcnt_neg = 0\n\tcnt_neu = 0\n\tfor sen in all_sentiments:\n\t\tif(sen == 0):\n\t\t\tcnt_neu = cnt_neu+1\n\t\telif(sen == 1):\n\t\t\tcnt_neg = cnt_neg+1\n\t\telif(sen == 2):\n\t\t\tcnt_pos = cnt_pos+1\n\n\trow = [word,cnt_pos,cnt_neu,cnt_neg]\n\treturn row\n\ndef main():\n\tnlp = absa.load()\n\tprint(\"Model Loaded\")\n\tdf = pd.read_csv('/home/luv/Downloads/Thesis project-20200829T070727Z-001/Thesis project/datasets/WOF_split_into_sentences.csv')\n\ttest_sentences = list(df['Sentences'])\n\tkeys = ['time','stage','minister','research','defence','slv','missiles','launch','technology','work','rocket','rameswaram','sarabhai','development','project','space','brahm']\n\twith open('/home/luv/Aspect-Based-Sentiment-Analysis/Custom/Results.csv',\"w\") as f:\n\t\twriter = csv.writer(f)\n\t\twriter.writerow(['word','Pos','Neu','Neg'])\n\t\tfor key in keys:\n\t\t\tprint(\"Using key : \",key)\n\t\t\tall_sentiments = sentim(key,test_sentences,nlp)\n\t\t\trow = make_row(key,all_sentiments)\n\t\t\twriter.writerow(row)\t\n\n\tdf = pd.read_csv('/home/luv/Aspect-Based-Sentiment-Analysis/Custom/Results.csv')\n\tprint(df)\n\nif __name__ == \"__main__\":\n\tmain()"
] |
[
[
"pandas.read_csv"
]
] |
thortom/models
|
[
"57e263422b51032b424efd50e0501001052ea083"
] |
[
"official/vision/beta/ops/mask_ops.py"
] |
[
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utility functions for segmentations.\"\"\"\n\nimport math\n# Import libraries\nfrom cvx2 import latest as cv2\nimport numpy as np\n\n\ndef paste_instance_masks(masks,\n detected_boxes,\n image_height,\n image_width):\n \"\"\"Paste instance masks to generate the image segmentation results.\n\n Args:\n masks: a numpy array of shape [N, mask_height, mask_width] representing the\n instance masks w.r.t. the `detected_boxes`.\n detected_boxes: a numpy array of shape [N, 4] representing the reference\n bounding boxes.\n image_height: an integer representing the height of the image.\n image_width: an integer representing the width of the image.\n\n Returns:\n segms: a numpy array of shape [N, image_height, image_width] representing\n the instance masks *pasted* on the image canvas.\n \"\"\"\n\n def expand_boxes(boxes, scale):\n \"\"\"Expands an array of boxes by a given scale.\"\"\"\n # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/boxes.py#L227 # pylint: disable=line-too-long\n # The `boxes` in the reference implementation is in [x1, y1, x2, y2] form,\n # whereas `boxes` here is in [x1, y1, w, h] form\n w_half = boxes[:, 2] * .5\n h_half = boxes[:, 3] * .5\n x_c = boxes[:, 0] + w_half\n y_c = boxes[:, 1] + h_half\n\n w_half *= scale\n h_half *= scale\n\n boxes_exp = np.zeros(boxes.shape)\n boxes_exp[:, 0] = x_c - w_half\n boxes_exp[:, 2] = x_c + w_half\n boxes_exp[:, 1] = y_c - h_half\n boxes_exp[:, 3] = y_c + h_half\n\n return boxes_exp\n\n # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/core/test.py#L812 # pylint: disable=line-too-long\n # To work around an issue with cv2.resize (it seems to automatically pad\n # with repeated border values), we manually zero-pad the masks by 1 pixel\n # prior to resizing back to the original image resolution. This prevents\n # \"top hat\" artifacts. We therefore need to expand the reference boxes by an\n # appropriate factor.\n _, mask_height, mask_width = masks.shape\n scale = max((mask_width + 2.0) / mask_width,\n (mask_height + 2.0) / mask_height)\n\n ref_boxes = expand_boxes(detected_boxes, scale)\n ref_boxes = ref_boxes.astype(np.int32)\n padded_mask = np.zeros((mask_height + 2, mask_width + 2), dtype=np.float32)\n segms = []\n for mask_ind, mask in enumerate(masks):\n im_mask = np.zeros((image_height, image_width), dtype=np.uint8)\n # Process mask inside bounding boxes.\n padded_mask[1:-1, 1:-1] = mask[:, :]\n\n ref_box = ref_boxes[mask_ind, :]\n w = ref_box[2] - ref_box[0] + 1\n h = ref_box[3] - ref_box[1] + 1\n w = np.maximum(w, 1)\n h = np.maximum(h, 1)\n\n mask = cv2.resize(padded_mask, (w, h))\n mask = np.array(mask > 0.5, dtype=np.uint8)\n\n x_0 = min(max(ref_box[0], 0), image_width)\n x_1 = min(max(ref_box[2] + 1, 0), image_width)\n y_0 = min(max(ref_box[1], 0), image_height)\n y_1 = min(max(ref_box[3] + 1, 0), image_height)\n\n im_mask[y_0:y_1, x_0:x_1] = mask[\n (y_0 - ref_box[1]):(y_1 - ref_box[1]),\n (x_0 - ref_box[0]):(x_1 - ref_box[0])\n ]\n segms.append(im_mask)\n\n segms = np.array(segms)\n assert masks.shape[0] == segms.shape[0]\n return segms\n\n\ndef paste_instance_masks_v2(masks,\n detected_boxes,\n image_height,\n image_width):\n \"\"\"Paste instance masks to generate the image segmentation (v2).\n\n Args:\n masks: a numpy array of shape [N, mask_height, mask_width] representing the\n instance masks w.r.t. the `detected_boxes`.\n detected_boxes: a numpy array of shape [N, 4] representing the reference\n bounding boxes.\n image_height: an integer representing the height of the image.\n image_width: an integer representing the width of the image.\n\n Returns:\n segms: a numpy array of shape [N, image_height, image_width] representing\n the instance masks *pasted* on the image canvas.\n \"\"\"\n _, mask_height, mask_width = masks.shape\n\n segms = []\n for i, mask in enumerate(masks):\n box = detected_boxes[i, :]\n xmin = box[0]\n ymin = box[1]\n xmax = xmin + box[2]\n ymax = ymin + box[3]\n\n # Sample points of the cropped mask w.r.t. the image grid.\n # Note that these coordinates may fall beyond the image.\n # Pixel clipping will happen after warping.\n xmin_int = int(math.floor(xmin))\n xmax_int = int(math.ceil(xmax))\n ymin_int = int(math.floor(ymin))\n ymax_int = int(math.ceil(ymax))\n\n alpha = box[2] / (1.0 * mask_width)\n beta = box[3] / (1.0 * mask_height)\n # pylint: disable=invalid-name\n # Transformation from mask pixel indices to image coordinate.\n M_mask_to_image = np.array(\n [[alpha, 0, xmin],\n [0, beta, ymin],\n [0, 0, 1]],\n dtype=np.float32)\n # Transformation from image to cropped mask coordinate.\n M_image_to_crop = np.array(\n [[1, 0, -xmin_int],\n [0, 1, -ymin_int],\n [0, 0, 1]],\n dtype=np.float32)\n M = np.dot(M_image_to_crop, M_mask_to_image)\n # Compensate the half pixel offset that OpenCV has in the\n # warpPerspective implementation: the top-left pixel is sampled\n # at (0,0), but we want it to be at (0.5, 0.5).\n M = np.dot(\n np.dot(\n np.array([[1, 0, -0.5],\n [0, 1, -0.5],\n [0, 0, 1]], np.float32),\n M),\n np.array([[1, 0, 0.5],\n [0, 1, 0.5],\n [0, 0, 1]], np.float32))\n # pylint: enable=invalid-name\n cropped_mask = cv2.warpPerspective(\n mask.astype(np.float32), M,\n (xmax_int - xmin_int, ymax_int - ymin_int))\n cropped_mask = np.array(cropped_mask > 0.5, dtype=np.uint8)\n\n img_mask = np.zeros((image_height, image_width))\n x0 = max(min(xmin_int, image_width), 0)\n x1 = max(min(xmax_int, image_width), 0)\n y0 = max(min(ymin_int, image_height), 0)\n y1 = max(min(ymax_int, image_height), 0)\n img_mask[y0:y1, x0:x1] = cropped_mask[\n (y0 - ymin_int):(y1 - ymin_int),\n (x0 - xmin_int):(x1 - xmin_int)]\n\n segms.append(img_mask)\n\n segms = np.array(segms)\n return segms\n\n"
] |
[
[
"numpy.dot",
"numpy.maximum",
"numpy.array",
"numpy.zeros"
]
] |
yvonwin/lanenet-lane-detection-pytorch
|
[
"b1a14c3123e988069034eaa783a2dbc6bc509065"
] |
[
"model/lanenet/loss.py"
] |
[
"# coding: utf-8\n\"\"\"\nThis is the implementation of following paper:\nhttps://arxiv.org/pdf/1802.05591.pdf\n\"\"\"\nfrom torch.nn.modules.loss import _Loss\n# from torch.autograd import Variable\nimport torch\nimport torch.nn as nn\nfrom torch.functional import F\n\nDEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\nclass FocalLoss(nn.Module):\n '''\n Only consider two class now: foreground, background.\n '''\n def __init__(self,\n gamma=2,\n alpha=[0.5, 0.5],\n n_class=2,\n reduction='mean',\n device=DEVICE):\n super().__init__()\n self.gamma = gamma\n self.alpha = alpha\n self.reduction = reduction\n self.n_class = n_class\n self.device = device\n\n def forward(self, input, target):\n pt = F.softmax(input, dim=1)\n pt = pt.clamp(min=0.000001, max=0.999999)\n target_onehot = torch.zeros(\n (target.size(0), self.n_class, target.size(1),\n target.size(2))).to(self.device)\n # print(target_onehot.size()) # torch.Size([4, 2, 276, 704])\n loss = 0\n for i in range(self.n_class):\n target_onehot[:, i, ...][target == i] = 1\n for i in range(self.n_class):\n # print(pt[:,i,...].size()) # torch.Size([4, 272, 704])\n # print(target_onehot[:,i,...].size()) # torch.Size([4, 276, 704])\n # print(torch.log(pt[:,i,...]).size()) # torch.Size([4, 272, 704])\n loss -= self.alpha[i] * (\n 1 - pt[:, i, ...]\n )**self.gamma * target_onehot[:, i, ...] * torch.log(pt[:, i, ...])\n\n if self.reduction == 'mean':\n loss = torch.mean(loss)\n elif self.reduction == 'sum':\n loss = torch.sum(loss)\n\n return loss\n\n\nclass DiscriminativeLoss(_Loss):\n def __init__(self,\n delta_var=0.5,\n delta_dist=1.5,\n norm=2,\n alpha=1.0,\n beta=1.0,\n gamma=0.001,\n usegpu=False,\n size_average=True):\n super(DiscriminativeLoss, self).__init__(reduction='mean')\n self.delta_var = delta_var\n self.delta_dist = delta_dist\n self.norm = norm\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n self.usegpu = usegpu\n assert self.norm in [1, 2]\n\n def forward(self, input, target):\n\n return self._discriminative_loss(input, target)\n\n def _discriminative_loss(self, embedding, seg_gt):\n batch_size, embed_dim, H, W = embedding.shape\n embedding = embedding.reshape(batch_size, embed_dim, H * W)\n seg_gt = seg_gt.reshape(batch_size, H * W)\n\n var_loss = torch.tensor(0,\n dtype=embedding.dtype,\n device=embedding.device)\n dist_loss = torch.tensor(0,\n dtype=embedding.dtype,\n device=embedding.device)\n reg_loss = torch.tensor(0,\n dtype=embedding.dtype,\n device=embedding.device)\n\n for b in range(batch_size):\n embedding_b = embedding[b] # (embed_dim, H*W)\n seg_gt_b = seg_gt[b] # (H*W)\n\n labels, indexs = torch.unique(seg_gt_b, return_inverse=True)\n num_lanes = len(labels)\n if num_lanes == 0:\n _nonsense = embedding.sum()\n _zero = torch.zeros_like(_nonsense)\n var_loss = var_loss + _nonsense * _zero\n dist_loss = dist_loss + _nonsense * _zero\n reg_loss = reg_loss + _nonsense * _zero\n continue\n\n centroid_mean = []\n for lane_idx in labels:\n seg_mask_i = (seg_gt_b == lane_idx)\n\n if not seg_mask_i.any():\n continue\n\n embedding_i = embedding_b * seg_mask_i\n mean_i = torch.sum(embedding_i, dim=1) / torch.sum(seg_mask_i)\n centroid_mean.append(mean_i)\n # ---------- var_loss -------------\n var_loss = var_loss + torch.sum(\n F.relu(\n torch.norm(embedding_i[:, seg_mask_i] -\n mean_i.reshape(embed_dim, 1),\n dim=0) - self.delta_var)**\n 2) / torch.sum(seg_mask_i) / num_lanes\n centroid_mean = torch.stack(centroid_mean) # (n_lane, embed_dim)\n\n if num_lanes > 1:\n centroid_mean1 = centroid_mean.reshape(-1, 1, embed_dim)\n centroid_mean2 = centroid_mean.reshape(1, -1, embed_dim)\n\n dist = torch.norm(centroid_mean1 - centroid_mean2,\n dim=2) # shape (num_lanes, num_lanes)\n dist = dist + torch.eye(num_lanes,\n dtype=dist.dtype,\n device=dist.device) * self.delta_dist\n\n # divided by two for double calculated loss above, for implementation convenience\n dist_loss = dist_loss + torch.sum(\n F.relu(-dist + self.delta_dist)**2) / (num_lanes *\n (num_lanes - 1)) / 2\n\n # reg_loss is not used in original paper\n # reg_loss = reg_loss + torch.mean(torch.norm(centroid_mean, dim=1))\n\n var_loss = var_loss / batch_size\n dist_loss = dist_loss / batch_size\n reg_loss = reg_loss / batch_size\n\n return var_loss, dist_loss, reg_loss\n"
] |
[
[
"torch.mean",
"torch.norm",
"torch.functional.F.softmax",
"torch.functional.F.relu",
"torch.sum",
"torch.zeros_like",
"torch.eye",
"torch.tensor",
"torch.unique",
"torch.log",
"torch.cuda.is_available",
"torch.stack"
]
] |
cjsyzwsh/South_Australia_Transport_Econ
|
[
"8c27f3015193113f8f479e7c0e0c3ff1ac42944e"
] |
[
"src/d02_intermediate/preprocess_1_socioecon.py"
] |
[
"# preprocessing the socioeconomic variables\n# inputs: five raw socio-demographcics data.\n# outputs: one combined socio-econ geopandas data frame.\n\n# import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport geopandas as gpd\n\n# system path\nimport sys\nimport os\n\n# util path\nutility_path = os.path.join(os.getcwd(),'src/d00_utils/')\nsys.path.append(utility_path)\nimport utilities as util\n\n# data path\n# sw: define the path based on the root project directory.\nraw_data_path = os.path.join(os.getcwd(),'data/01_raw/')\nintermediate_data_path = os.path.join(os.getcwd(),'data/02_intermediate/')\n\n\n# # raw data\n# mount_path = \"/Users/shenhaowang/Dropbox (MIT)/project_econ_opportunity_south_Australia\"\n\n\n#region 1. read and edit job and income data frames\njobs_all = pd.read_csv(raw_data_path + \"SA2_Jobs_All_Jobs_and_Income.csv\")\njobs_industries = pd.read_csv(raw_data_path + \"SA2_Jobs_In_Australia_Employee_Jobs_and_Income.csv\")\n\n# change column names for the two jobs data frame.\nnew_idx = []\nfor col in jobs_industries.columns:\n if col[0] == ' ':\n new_idx.append(col[1:])\n else:\n new_idx.append(col)\n\njobs_industries.columns = new_idx\n\nnew_idx = []\nfor col in jobs_all.columns:\n if col[0] == ' ':\n new_idx.append(col[1:])\n else:\n new_idx.append(col)\n\njobs_all.columns = new_idx\n\n# change types of job data frames\njobs_all['sa2_code16'] = jobs_all['sa2_code16'].astype('str')\njobs_industries['sa2_code16'] = jobs_industries['sa2_code16'].astype('str')\n# useful variables: all\n#endregion\n\n\n#region 2. read and edit age dataframe\nage_df = pd.read_csv(raw_data_path + \"data_age.csv\")\n# choose one section of age df\nage_df = age_df[[' sa2_main16', 'p_tot_75_84_yrs', ' p_tot_35_44_yrs', ' p_tot_45_54_yrs', ' p_tot_25_34_yrs', ' p_tot_85ov',\n ' p_tot_65_74_yrs', ' p_tot_20_24_yrs', ' p_tot_15_19_yrs', ' p_tot_55_64_yrs', ' p_tot_tot']]\n\nfor col in ['p_tot_75_84_yrs', ' p_tot_35_44_yrs', ' p_tot_45_54_yrs', ' p_tot_25_34_yrs', ' p_tot_85ov',\n ' p_tot_65_74_yrs', ' p_tot_20_24_yrs', ' p_tot_15_19_yrs', ' p_tot_55_64_yrs']:\n age_df[col] = age_df[col] / age_df[\" p_tot_tot\"] # compute percentage\n\navg_med_age = []\nfor idx, row in age_df.iterrows():\n avg = 0\n for col in ['p_tot_75_84_yrs', ' p_tot_35_44_yrs',\n ' p_tot_45_54_yrs', ' p_tot_25_34_yrs', ' p_tot_85ov',\n ' p_tot_65_74_yrs', ' p_tot_20_24_yrs', ' p_tot_15_19_yrs',\n ' p_tot_55_64_yrs']:\n if col != \" p_tot_85ov\":\n temp = col.split(\"_\")\n x = int(temp[2])\n y = int(temp[3])\n med = (x + y) / 2\n avg += (row[col] * med)\n else:\n avg += (row[col] * 85)\n # avg = avg/row[' p_tot_tot']\n avg_med_age.append(avg)\n\n# create average age column\nage_df[\"avg_age\"] = avg_med_age\n\n# edit sa2_main16 type\nage_df[' sa2_main16'] = age_df[' sa2_main16'].astype('str')\n\n# rename the columns\nrename_dic = {'p_tot_75_84_yrs':'percent_75_84_yrs',\n ' p_tot_35_44_yrs':'percent_35_44_yrs',\n ' p_tot_45_54_yrs':'percent_45_54_yrs',\n ' p_tot_25_34_yrs':'percent_25_34_yrs',\n ' p_tot_85ov':'percent_85ov_yrs',\n ' p_tot_65_74_yrs':'percent_65_74_yrs',\n ' p_tot_20_24_yrs':'percent_20_24_yrs',\n ' p_tot_15_19_yrs':'percent_15_19_yrs',\n ' p_tot_55_64_yrs':'percent_55_64_yrs',\n ' p_tot_tot':'total_pop',\n ' sa2_main16':'sa2_main16'}\n\n\nage_df.rename(columns=rename_dic, inplace=True)\n# note: average age = NaN or zero exists, because many zones don't have population..\n# useful variables: all.\n#endregion\n\n\n#region 3. read and edit gender and education dataframe\ngender_educ_df = pd.read_csv(raw_data_path + \"data_gender_educ.csv\")\nall_educ_df = pd.read_csv(raw_data_path + \"data_all_educ.csv\")\n\n#\ngender_educ_df[\" gender_tot_tot\"] = gender_educ_df[\" m_tot_tot\"] + gender_educ_df[\" f_tot_tot\"]\n\nfor col in ['m_tot_75_84_yr', ' m_adv_dip_dip_total', ' m_tot_35_44_yr',\n ' m_tot_55_64_yr', ' f_b_deg_tot', ' m_tot_85_yr_over',\n ' f_tot_55_64_yr', ' f_cer_tot_tot', ' f_tot_65_74_yr',\n ' f_tot_15_24_yr', ' m_grad_dip_cer_tot',\n ' f_tot_75_84_yr', ' f_tot_45_54_yr', ' m_tot_45_54_yr',\n ' f_adv_dip_dip_total', ' f_tot_35_44_yr', ' m_tot_25_34_yr',\n ' m_pg_deg_tot', ' m_tot_65_74_yr', ' m_tot_15_24_yr',\n ' f_pguate_deg_tot', ' m_b_deg_tot', ' m_cer_tot_tot',\n ' f_tot_25_34_yr', ' f_grad_dip_cer_tot',\n ' f_tot_85_yr_over']:\n g = col.split(\"_\")[0]\n if g in {\" m\", \"m\"}:\n gender_educ_df[col] = gender_educ_df[col] / gender_educ_df[\" m_tot_tot\"]\n else:\n gender_educ_df[col] = gender_educ_df[col] / gender_educ_df[\" f_tot_tot\"]\n\ngender_educ_df[\"male_percent\"] = gender_educ_df[\" m_tot_tot\"]/gender_educ_df[\" gender_tot_tot\"]\ngender_educ_df[\"female_percent\"] = gender_educ_df[\" f_tot_tot\"]/gender_educ_df[\" gender_tot_tot\"]\n\n# edit sa2_main var\nhelper = [str(elt) for elt in gender_educ_df[\" sa2_main16\"].values]\ngender_educ_df[\"SA2_MAIN16\"] = helper\n\n#\nall_educ_df['bachelor_degree_percent'] = all_educ_df[' p_b_deg_tot']/all_educ_df[' p_tot_tot']\nall_educ_df['master_degree_percent'] = all_educ_df[' p_grad_dip_cer_tot']/all_educ_df[' p_tot_tot']\n\n#\nall_educ_df.rename(columns={' sa2_main16':'sa2_main16'},inplace=True)\n\n#\nall_educ_df['sa2_main16']=all_educ_df['sa2_main16'].astype('str')\n\n#\ngender_educ_df = gender_educ_df[['SA2_MAIN16', 'male_percent', 'female_percent']]\nall_educ_df = all_educ_df[['sa2_main16', 'bachelor_degree_percent', 'master_degree_percent']]\n#endregion\n\n\n#region 4. read and edit indigenous social variables\nindigenous_social_df = pd.read_csv(raw_data_path + \"data_indigenous.csv\")\n\n# replace names\nrename_dic = {'perc_indig_age_0_14':'perc_indig_age_0_14',\n ' perc_indig_hsld_equiv_inc_less_than_300':'perc_indig_hsld_equiv_inc_less_than_300',\n ' perc_indig_rent_oth_dwl':'perc_indig_rent_oth_dwl',\n ' perc_indig_no_vehicle_in_hsld':'perc_indig_no_vehicle_in_hsld',\n ' perc_indig_age_65_over':'perc_indig_age_65_over',\n ' perc_indig_rent_priv_dwl':'perc_indig_rent_priv_dwl',\n ' perc_indig_rent_pub_dwl':'perc_indig_rent_pub_dwl',\n ' perc_indig_f':'perc_indig_f',\n ' perc_indig_owned_outright_dwl':'perc_indig_owned_outright_dwl',\n ' perc_indig_age_35_64':'perc_indig_age_35_64',\n ' perc_indig_age_15plus_edu_degree_diploma_certificate':'perc_indig_age_15plus_edu_degree_diploma_certificate',\n ' perc_indig_hsld_equiv_inc_1000_1500':'perc_indig_hsld_equiv_inc_1000_1500',\n ' perc_indig_1_or_more_vehicle_in_hsld':'perc_indig_1_or_more_vehicle_in_hsld',\n ' perc_indig_hsld_equiv_inc_above_1500':'perc_indig_hsld_equiv_inc_above_1500',\n ' perc_indig_hsld_equiv_inc_300_1000':'perc_indig_hsld_equiv_inc_300_1000',\n ' sa2_code16':'sa2_code16',\n ' perc_indig_m':'perc_indig_m',\n ' perc_indig_age_15_34':'perc_indig_age_15_34',\n ' perc_indig_age_15plus_edu_none':'perc_indig_age_15plus_edu_none'}\n\nindigenous_social_df.rename(columns=rename_dic,inplace=True)\n\nindigenous_social_df['sa2_code16']=indigenous_social_df['sa2_code16'].astype('str')\n\n# print(indigenous_social_df.shape)\n#endregion\n\n\n#region 5. Other socio economic variables\necon_df = pd.read_csv(raw_data_path+'social_econ_indicators.csv')\nunemployment_rate_df = pd.read_csv(raw_data_path+'data_unemployment_rate.csv')\n\n#\nrename_dic = {'pov_rt_exc_hc_syn':'poverty_rate_1',\n ' housestrs_syn': 'hh_finance_stress',\n ' equivinc_median_syn':'equivinc_median_syn',\n ' pov_rt_syn':'poverty_rate_2',\n ' inc_median_syn':'median_inc',\n ' gini_syn':'gini',\n ' sa2_code16': 'sa2_code16'}\necon_df.rename(columns=rename_dic,inplace=True)\necon_df['sa2_code16']=econ_df['sa2_code16'].astype('str')\n\n\n#\nrename_dic = {'unemployment_rate':'unemployment_rate',\n ' sa2_code16': 'sa2_code16'}\nunemployment_rate_df.rename(columns=rename_dic,inplace=True)\nunemployment_rate_df['sa2_code16']=unemployment_rate_df['sa2_code16'].astype('str')\n\n#endregion\n\n\n\n\n#region 6. merge all socio-economic variables\n# jobs_all, jobs_industries, age_df, gender_educ_df, all_educ_df, indigenous_social_df\n# print(jobs_all.columns) # sa2_code16\n# print(jobs_all.shape)\n# print(jobs_industries.columns) # sa2_code16\n# print(jobs_industries.shape)\n# print(age_df.columns) # sa2_main16\n# print(age_df.shape)\n# print(gender_educ_df.columns) # SA2_MAIN16\n# print(gender_educ_df.shape)\n# print(all_educ_df.columns) # sa2_main16\n# print(all_educ_df.shape)\n# print(indigenous_social_df.columns) # sa2_code16\n# print(indigenous_social_df.shape)\n\nsocio_econ_df = jobs_all.merge(jobs_industries, on='sa2_code16', suffixes=(\"\",\"_y\"))\nsocio_econ_df = socio_econ_df.merge(age_df, left_on='sa2_code16', right_on='sa2_main16')\nsocio_econ_df = socio_econ_df.merge(gender_educ_df, left_on='sa2_code16', right_on='SA2_MAIN16')\nsocio_econ_df = socio_econ_df.merge(all_educ_df, left_on='sa2_code16', right_on='sa2_main16')\nsocio_econ_df = socio_econ_df.merge(indigenous_social_df, on='sa2_code16', suffixes=(\"\",\"_z\"))\nsocio_econ_df = socio_econ_df.merge(econ_df, on='sa2_code16', suffixes=(\"\",\"_drop\"))\nsocio_econ_df = socio_econ_df.merge(unemployment_rate_df, on='sa2_code16', suffixes=(\"\",\"_drop\"))\n\nprint(socio_econ_df.shape)\nprint(socio_econ_df.columns)\n#endregion\n\n\n# save files\nsocio_econ_df.to_pickle(intermediate_data_path+'sa2_node_with_socio_econ_df.pickle') # Pycharm code\n# socio_econ_df.to_pickle('../data/socio_econ_df.pickle') # command line code.\n\n\n\n\n\n\n"
] |
[
[
"pandas.read_csv"
]
] |
lingluodlut/BioCreativeVII_DrugProt
|
[
"b3ee015286d0168ccc30e62bdfaca5a341164401"
] |
[
"src/represent_sl.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 30 19:54:17 2021\n\n@author: luol2\n\"\"\"\n\n\n\nimport os, sys\nimport numpy as np\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom transformers import AutoTokenizer\n \n\nclass Hugface_RepresentationLayer(object):\n \n \n def __init__(self, tokenizer_name_or_path, label_file,lowercase=True):\n \n\n #load vocab\n #self.bert_vocab_dict = {}\n #self.cased=cased\n #self.load_bert_vocab(vocab_path,self.bert_vocab_dict)\n self.model_type='bert'\n #self.model_type='roberta'\n if self.model_type in {\"gpt2\", \"roberta\"}:\n self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, use_fast=True, add_prefix_space=True,do_lower_case=lowercase)\n else:\n self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, use_fast=True,do_lower_case=lowercase)\n \n self.tokenizer.add_tokens([\"arg1s\",\"arg1e\",\"gene1s\",\"gene1e\",\"chemical1s\",\"chemical1e\"])\n\n #load label\n self.label_2_index={}\n self.index_2_label={}\n self.label_table_size=0\n self.load_label_vocab(label_file,self.label_2_index,self.index_2_label)\n self.label_table_size=len(self.label_2_index)\n self.vocab_len=len(self.tokenizer)\n \n def load_label_vocab(self,fea_file,fea_index,index_2_label):\n \n fin=open(fea_file,'r',encoding='utf-8')\n all_text=fin.read().strip().split('\\n')\n fin.close()\n for i in range(0,len(all_text)):\n fea_index[all_text[i]]=i\n index_2_label[str(i)]=all_text[i]\n \n\n \n def generate_label_list(self,bert_tokens,labels,word_index):\n label_list=['O']*len(word_index)\n label_i=0\n if len(word_index)!=len(bert_tokens):\n print('index != tokens',word_index,bert_tokens)\n sys.exit()\n last_word_index=0\n for i in range(0,len(word_index)):\n if word_index[i]==None:\n pass\n else:\n label_list[i]=labels[word_index[i]]\n \n label_list_index=[]\n bert_text_label=[]\n for i in range(0,len(bert_tokens)):\n label_list_index.append(self.label_2_index[label_list[i]])\n bert_text_label.append([bert_tokens[i],label_list[i]])\n # for label in labels:\n # temp_label=[0]*self.label_table_size\n # temp_label[self.label_2_index[label]]=1\n # label_list.append(temp_label)\n #print(bert_text_label)\n return label_list_index,bert_text_label\n \n \n def load_data_hugface(self,instances, labels, word_max_len=100, label_type='crf',training_set=False):\n \n x_index=[]\n x_seg=[]\n x_mask=[]\n y_list=[]\n bert_text_labels=[]\n max_len=0\n over_num=0\n maxT=word_max_len\n ave_len=0\n\n #print('instances:', instances)\n #print('labels:',labels)\n \n \n for sentence in instances: \n sentence_text_list=[]\n label_list=[]\n for j in range(0,len(sentence)):\n sentence_text_list.append(sentence[j][0])\n label_list.append(sentence[j][-1])\n\n token_result=self.tokenizer(\n sentence_text_list,\n max_length=word_max_len,\n truncation=True,is_split_into_words=True)\n \n bert_tokens=self.tokenizer.convert_ids_to_tokens(token_result['input_ids'])\n word_index=token_result.word_ids(batch_index=0)\n ave_len+=len(bert_tokens)\n if len(sentence_text_list)>max_len:\n max_len=len(sentence_text_list)\n if len(bert_tokens)==maxT:\n over_num+=1\n\n x_index.append(token_result['input_ids'])\n if self.model_type in {\"gpt2\", \"roberta\"}:\n x_seg.append([0]*len(token_result['input_ids']))\n else:\n x_seg.append(token_result['token_type_ids'])\n x_mask.append(token_result['attention_mask'])\n\n #print('label:',label_list)\n label_list,bert_text_label=self.generate_label_list(bert_tokens,label_list,word_index)\n #print('\\nlabel list:',label_list)\n #print('\\nbert_text_label:',bert_text_label)\n #sys.exit()\n y_list.append(label_list)\n #print(y_list)\n bert_text_labels.append(bert_text_label)\n\n \n x1_np = pad_sequences(x_index, word_max_len, value=0, padding='post',truncating='post') # right padding\n x2_np = pad_sequences(x_seg, word_max_len, value=0, padding='post',truncating='post')\n x3_np = pad_sequences(x_mask, word_max_len, value=0, padding='post',truncating='post')\n y_np = pad_sequences(y_list, word_max_len, value=0, padding='post',truncating='post')\n #print('x1_np:',x1_np)\n #print('\\nx2_np:',x2_np)\n #print('\\ny_np:',y_np)\n #print('\\nbert_text:',bert_text_labels)\n #print('bert max len:',max_len,',Over',maxT,':',over_num,'ave len:',ave_len/len(instances),'total:',len(instances))\n if label_type=='onehot': \n y_np = np.eye(len(labels), dtype='float32')[y_np]\n elif label_type=='softmax':\n y_np = np.expand_dims(y_np, 2)\n elif label_type=='crf':\n pass\n \n if training_set:\n #class_weight = {0: 1.0, 1: 3.597, 2: 4.106, 3: 4.004, 4: 7.312, 5: 3.956, 6: 8.046, 7: 2.254, 8: 3.612, 9: 3.715, 10: 8.170, 11: 3.239, 12: 3.066, 13: 3.654}\n class_weight = {0: 1.0, 1: 3.597, 2: 4.106, 3: 4.004, 4: 7.312, 5: 8.956, 6: 8.046, 7: 8.254, 8: 3.612, 9: 3.715, 10: 8.170, 11: 8.239, 12: 8.066, 13: 3.654}\n print('\\n.......sample weight:',class_weight)\n sample_weight=[]\n for _line in y_list:\n _tempw=[]\n for _y in _line:\n _tempw.append(class_weight[_y])\n sample_weight.append(_tempw)\n samplew_np = pad_sequences(sample_weight, word_max_len, value=0,dtype=\"float32\", padding='post',truncating='post')\n return [x1_np, x2_np,x3_np], y_np,bert_text_labels, samplew_np\n \n return [x1_np, x2_np,x3_np], y_np,bert_text_labels \n\n\n \n\nif __name__ == '__main__':\n pass\n \n \n \n"
] |
[
[
"numpy.expand_dims",
"tensorflow.keras.preprocessing.sequence.pad_sequences"
]
] |
juniors90/Flask-Plots
|
[
"1ab233217dc37b1cd2ea727af5d3a7433184863c"
] |
[
"tests/tests_plots/test_render_img.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# This file is part of the\n# Flask-Plots Project\n# https://github.com/juniors90/Flask-Plots/\n# Copyright (c) 2021, Ferreira Juan David\n# License: MIT\n# Full Text:\n# https://github.com/juniors90/Flask-Plots/blob/master/LICENSE\n\n# =====================================================================\n# TESTS\n# =====================================================================\n\n\nfrom flask import render_template_string\n\nfrom matplotlib.figure import Figure\n\nimport pytest as pt\n\n\ndef test_render_img(app, client, plots):\n @app.route(\"/render-image\")\n def render_image():\n # Generate the figure **without using pyplot**.\n fig = Figure()\n ax = fig.subplots()\n ax.plot([-1, 4])\n ax.set_title(\"Linear Function\")\n # Embed the result in the html output.\n data_img = plots.get_data(fig)\n return render_template_string(\n \"\"\"{% from 'plots/utils.html' import render_img %}\n {{ render_img(data=data, alt_img='some_img') }}\n \"\"\",\n data=data_img,\n )\n\n response = client.get(\"/render-image\")\n data = response.get_data(as_text=True)\n assert '<img src=\"data:image/png;base64,' in data\n assert 'alt=\"some_img\"/>' in data\n\n @app.route(\"/render-image-with-class\")\n def render_image_with_class():\n # Generate the figure **without using pyplot**.\n fig = Figure()\n ax = fig.subplots()\n ax.plot([1, 4])\n ax.set_title(\"Linear Function 2\")\n # Embed the result in the html output.\n data_img = plots.get_data(fig)\n return render_template_string(\n \"\"\"{% from 'plots/utils.html' import render_img %}\n {{ render_img(data=data, alt_img='some_img', class_img='ui') }}\n \"\"\",\n data=data_img,\n )\n\n response = client.get(\"/render-image-with-class\")\n data = response.get_data(as_text=True)\n assert '<img src=\"data:image/png;base64,' in data\n assert 'alt=\"some_img\" class=\"ui\"/>' in data\n\n @app.route(\"/render-image-with-style\")\n def render_image_with_style():\n # Generate the figure **without using pyplot**.\n fig = Figure()\n ax = fig.subplots()\n ax.plot([1, -4])\n ax.set_title(\"Linear Function 3\")\n # Embed the result in the html output.\n data_img = plots.get_data(fig)\n return render_template_string(\n \"\"\"{% from 'plots/utils.html' import render_img %}\n {{ render_img(data=data, alt_img='my_img', style='float:right') }}\n \"\"\",\n data=data_img,\n )\n\n response = client.get(\"/render-image-with-style\")\n data = response.get_data(as_text=True)\n assert '<img src=\"data:image/png;base64,' in data\n assert 'alt=\"my_img\" style=\"float:right\"/>' in data\n\n with pt.raises(RuntimeError) as excinfo:\n\n @app.route(\"/render-image-with-runtimeerror\")\n def render_image_with_runtimeerror():\n # Generate the figure **without using pyplot**.\n fig = Figure()\n ax = fig.subplots()\n ax.plot([1, -4])\n ax.set_title(\"Linear Function 3\")\n # Embed the result in the html output.\n data_img = plots.get_data(fig) # noqa\n return render_template_string(\n \"\"\"{% from 'plots/utils.html' import render_img %}\n {{ render_img(data=data, alt_img='my_img', style='float:right') }}\n \"\"\" # , data=data_img\n )\n\n response = client.get(\"/render-image-with-runtimeerror\")\n data = response.get_data(as_text=True)\n assert '<img src=\"data:image/png;base64,' not in data\n assert 'alt=\"my_img\" style=\"float:right\"/>' not in data\n assert \"RuntimeError: You must send the data of the image.\" in data\n assert \"RuntimeError: You must send the data of the image.\" in str(\n excinfo.value\n )\n"
] |
[
[
"matplotlib.figure.Figure"
]
] |
plazmer/stm32f0_ARM
|
[
"26a81590875134d0752cead89e713bb22592e78b"
] |
[
"labs/12_i2c_oled_display/convert.py"
] |
[
"import imageio\nimport sys\nimport os\nimport numpy as np\n\nif (len(sys.argv) != 2):\n print(\"Format: python convert.py grayscale_image_name\")\n sys.exit(1)\n\ntry:\n data = imageio.imread(sys.argv[1])\nexcept:\n print(\"Wrong image name!\")\n sys.exit(1)\n\nif (len(data.shape) != 2):\n print(\"Image must be grayscale!\")\n sys.exit(1)\n\noutput = open(os.path.splitext(sys.argv[1])[0] + \".c\", \"w\")\noutput.write(\"const unsigned char my_pic[] = {\\n\")\n\nimage = data.flatten(order='C')\n\nfimage = np.array_split(image, image.shape[0]//16)\n\nfor chunk in fimage:\n fstr = ', '.join(['0x%02x'%x for x in chunk])\n output.write(\" \" + fstr)\n output.write(\",\\n\")\n\noutput.write(\"}\")\noutput.close()\n\nprint(\"Done! The array is stored in \" +\\\n os.path.splitext(sys.argv[1])[0] + \".c\")\n"
] |
[
[
"numpy.array_split"
]
] |
abigailbishop/pyrex
|
[
"10ba2e9f4c8820f4fcf5f00bd866927dacb0b2b5",
"10ba2e9f4c8820f4fcf5f00bd866927dacb0b2b5",
"10ba2e9f4c8820f4fcf5f00bd866927dacb0b2b5"
] |
[
"pyrex/kernel.py",
"pyrex/generation.py",
"analyses/ara_coincidences/missed_events_TD_plotsaver.py"
] |
[
"\"\"\"\nModule for the simulation kernel.\n\nThe simulation kernel is responsible for running through the simulation\nchain by controlling classes and objects which will independently produce\nneutrinos, create corresponding signals, propagate the signals to antennas,\nand handle antenna processing of the signals.\n\n\"\"\"\n\nfrom collections.abc import Sequence\nimport logging\nimport numpy as np\nfrom pyrex.internal_functions import normalize\nfrom pyrex.signals import EmptySignal\nfrom pyrex.askaryan import AskaryanSignal\nfrom pyrex.ray_tracing import RayTracer\nfrom pyrex.ice_model import ice\n\nlogger = logging.getLogger(__name__)\n\n\nclass EventKernel:\n \"\"\"\n High-level kernel for controlling event simulation.\n\n The kernel is responsible for handling the classes and objects which\n control the major simulation steps: particle creation, signal production,\n signal propagation, and antenna response. The modular kernel structure\n allows for easy switching of the classes or objects which handle any of the\n simulation steps.\n\n Parameters\n ----------\n generator\n A particle generator to create neutrino events.\n antennas\n An iterable object consisting of antenna objects which can receive and\n store signals.\n ice_model : optional\n An ice model describing the ice surrounding the `antennas`.\n ray_tracer : optional\n A ray tracer capable of propagating signals from the neutrino vertex\n to the antenna positions.\n signal_model : optional\n A signal class which generates signals based on the particle.\n signal_times : array_like, optional\n The array of times over which the neutrino signal should be generated.\n event_writer : File, optional\n A file object to be used for writing data output.\n triggers : function or dict, optional\n A function or dictionary with function values representing trigger\n conditions of the detector. If a dictionary, must have a \"global\" key\n with its value representing the global detector trigger.\n offcone_max : float or None, optional\n The maximum angle away from the Cherenkov angle to be simulated.\n Antennas which view an event with an angle larger than this angle will\n skip the calculation of the Askaryan signal and assume no significant\n signal is seen. If `None`, no offcone cut is applied.\n weight_min : float or tuple or None, optional\n The minimum particle weight(s) which should be simulated. If a float,\n particles with a total weight less than this value will be skipped. If\n a tuple, particles with a survival weight less than the first element\n of the tuple or with an interaction weight less than the second element\n of the tuple will be skipped. If `None`, no minimum weight is applied.\n attenuation_interpolation : float or None, optional\n The logarithmic (base 10) interpolation step size to be used for\n interpolating attenuation along the ray path. If `None`, no\n interpolation of the attenuation is applied.\n\n Attributes\n ----------\n gen\n The particle generator responsible for particle creation.\n antennas\n The iterable of antennas responsible for handling applying their\n response and storing the resulting signals.\n ice\n The ice model describing the ice containing the `antennas`.\n ray_tracer\n The ray tracer responsible for signal propagation through the `ice`.\n signal_model\n The signal class to use to generate signals based on the particle.\n signal_times\n The array of times over which the neutrino signal should be generated.\n writer\n The file object to be used for writing data output.\n triggers\n The trigger condition(s) of the detector.\n offcone_max\n The maximum angle away from the Cherenkov angle to be simulated.\n weight_min\n The minimum particle weight(s) which should be simulated.\n attenuation_interpolation : float or None, optional\n The logarithmic (base 10) interpolation step size to be used for\n interpolating attenuation along the ray path.\n\n See Also\n --------\n pyrex.Event : Class for storing a tree of `Particle` objects\n representing an event.\n pyrex.Particle : Class for storing particle attributes.\n pyrex.ice_model.AntarcticIce : Class describing the ice at the south pole.\n pyrex.RayTracer : Class for calculating the ray-trace solutions between\n points.\n pyrex.AskaryanSignal : Class for generating Askaryan signals according to\n ARZ parameterization.\n pyrex.File : Class for reading or writing data files.\n\n Notes\n -----\n The kernel is designed to be modular so individual parts of the simulation\n chain can be exchanged. In order to interchange the pieces, their classes\n require the following at a minimum:\n\n The particle generator `generator` must have a ``create_event`` method\n which takes no arguments and returns a `Event` object consisting of\n `Particle` objects with ``vertex``, ``direction``, ``energy``, and\n ``weight`` attributes.\n\n The antenna iterable `antennas` must yield each antenna object once when\n iterating directly over `antennas`. Each antenna object must have a\n ``position`` attribute and a ``receive`` method which takes a signal object\n as its first argument, and ``ndarray`` objects as ``direction`` and\n ``polarization`` keyword arguments.\n\n The `ice_model` must have an ``index`` method returning the index of\n refraction given a (negative-valued) depth, and it must support anything\n required of it by the `ray_tracer`.\n\n The `ray_tracer` must be initialized with the particle vertex and an\n antenna position as its first two arguments, and the `ice_model` of the\n kernel as the ``ice_model`` keyword argument. The ray tracer must also have\n ``exists`` and ``solutions`` attributes, the first of which denotes whether\n any paths exist between the given points and the second of which is an\n iterable revealing each path between the points. These paths must have\n ``emitted_direction``, ``received_direction``, and ``path_length``\n attributes, as well as a ``propagate`` method which takes a signal object\n and applies the propagation effects of the path in-place to that object.\n\n The `signal_model` must be initialized with the `signal_times` array,\n a `Particle` object from the `Event`, the ``viewing_angle`` and\n ``viewing_distance`` according to the `ray_tracer`, and the `ice_model`.\n The object created should be a `Signal` object with ``times`` and\n ``values`` attributes representing the time-domain Askaryan signal produced\n by the `Particle`.\n\n \"\"\"\n def __init__(self, generator, antennas, ice_model=ice,\n ray_tracer=RayTracer, signal_model=AskaryanSignal,\n signal_times=np.linspace(-50e-9, 50e-9, 2000, endpoint=False),\n event_writer=None, triggers=None, offcone_max=40,\n weight_min=None, attenuation_interpolation=0.1):\n self.gen = generator\n self.antennas = antennas\n self.ice = ice_model\n self.ray_tracer = ray_tracer\n self.signal_model = signal_model\n self.signal_times = signal_times\n self.writer = event_writer\n self.triggers = triggers\n if offcone_max is None:\n self.offcone_max = np.radians(180)\n else:\n self.offcone_max = np.radians(offcone_max)\n if weight_min is None:\n self.weight_min = 0\n else:\n self.weight_min = weight_min\n self.attenuation_interpolation = attenuation_interpolation\n self._gen_count = self.gen.count\n if self.writer is not None:\n if not self.writer.is_open:\n logger.warning(\"Event writer was not open. Opening now.\")\n self.writer.open()\n if not self.writer.has_detector:\n self.writer.set_detector(antennas)\n # Add metadata about the classes used\n kernel_metadata = {\n \"detector_class\": str(type(self.antennas)),\n \"generator_class\": str(type(self.gen)),\n \"ice_model_class\": str(type(self.ice)),\n \"ray_tracer_class\": str(self.ray_tracer),\n \"signal_model_class\": str(self.signal_model),\n \"offcone_max\": np.degrees(self.offcone_max),\n \"attenuation_interpolation\": (self.attenuation_interpolation\n if self.attenuation_interpolation\n is not None else 0),\n }\n if isinstance(self.weight_min, Sequence):\n kernel_metadata[\"survival_weight_min\"] = self.weight_min[0]\n kernel_metadata[\"interaction_weight_min\"] = self.weight_min[1]\n else:\n kernel_metadata[\"weight_min\"] = self.weight_min\n try:\n kernel_metadata[\"earth_model_class\"] = str(type(\n self.gen.earth_model\n ))\n except AttributeError:\n pass\n self.writer.create_analysis_metadataset(\"sim_parameters\")\n self.writer.add_analysis_metadata(\"sim_parameters\", kernel_metadata)\n\n def event(self):\n \"\"\"\n Create a neutrino event and run it through the simulation chain.\n\n Creates a particle using the ``generator``, produces a signal from that\n event, propagates that signal through the ice according to the\n ``ice_model`` and the ``ray_tracer``, and passes it into the\n ``antennas`` for processing.\n\n Returns\n -------\n event : Event\n The neutrino event generated which is responsible for the waveforms\n on the antennas.\n triggered : bool, optional\n If the ``triggers`` parameter was specified, contains whether the\n global trigger condition of the detector was met.\n\n See Also\n --------\n pyrex.Event : Class for storing a tree of `Particle` objects\n representing an event.\n pyrex.Particle : Class for storing particle attributes.\n\n \"\"\"\n event = self.gen.create_event()\n ray_paths = []\n polarizations = []\n for i in range(len(self.antennas)):\n ray_paths.append([])\n polarizations.append([])\n for particle in event:\n logger.info(\"Processing event for %s\", particle)\n if isinstance(self.weight_min, Sequence):\n if ((particle.survival_weight is not None and\n particle.survival_weight<self.weight_min[0]) or\n (particle.interaction_weight is not None and\n particle.interaction_weight<self.weight_min[1])):\n logger.debug(\"Skipping particle with weight below %s\",\n self.weight_min)\n continue\n elif particle.weight<self.weight_min:\n logger.debug(\"Skipping particle with weight below %s\",\n self.weight_min)\n continue\n\n for i, ant in enumerate(self.antennas):\n rt = self.ray_tracer(particle.vertex, ant.position,\n ice_model=self.ice)\n\n # If no path(s) between the points, skip ahead\n if not rt.exists:\n logger.debug(\"Ray paths to %s do not exist\", ant)\n continue\n\n theta_c = np.arccos(1/self.ice.index(particle.vertex[2]))\n\n ray_paths[i].extend(rt.solutions)\n for path in rt.solutions:\n # nu_pol is the signal polarization at the neutrino vertex\n # It's calculated as the (negative) vector rejection of\n # path.emitted_direction onto particle.direction, making\n # epol orthogonal to path.emitted_direction in the same\n # plane as particle.direction and path.emitted_direction\n # This is equivalent to the vector triple product\n # (particle.direction x path.emitted_direction) x\n # path.emitted_direction\n # In the case when path.emitted_direction and\n # particle.direction are equal, just let nu_pol be zeros\n nu_pol = normalize(np.vdot(path.emitted_direction,\n particle.direction)\n * path.emitted_direction\n - particle.direction)\n polarizations[i].append(nu_pol)\n\n psi = np.arccos(np.vdot(particle.direction,\n path.emitted_direction))\n logger.debug(\"Angle to %s is %f degrees\", ant,\n np.degrees(psi))\n\n try:\n if np.abs(psi-theta_c)>self.offcone_max:\n raise ValueError(\"Viewing angle is larger than \"+\n \"offcone limit \"+\n str(np.degrees(self.offcone_max)))\n pulse = self.signal_model(\n times=self.signal_times,\n particle=particle,\n viewing_angle=psi,\n viewing_distance=path.path_length,\n ice_model=self.ice\n )\n except ValueError as err:\n logger.debug(\"Eliminating invalid Askaryan signal: %s\",\n err)\n ant.receive(\n EmptySignal(self.signal_times+path.tof,\n value_type=EmptySignal.Type.field)\n )\n else:\n ant_pulses, ant_pols = path.propagate(\n signal=pulse, polarization=nu_pol,\n attenuation_interpolation=self.attenuation_interpolation\n )\n ant.receive(\n ant_pulses,\n direction=path.received_direction,\n polarization=ant_pols\n )\n\n if self.triggers is None:\n triggered = None\n elif isinstance(self.triggers, dict):\n triggered = {key: trigger_func(self.antennas)\n for key, trigger_func in self.triggers.items()}\n else:\n triggered = self.triggers(self.antennas)\n\n if self.writer is not None:\n self.writer.add(event=event, triggered=triggered,\n ray_paths=ray_paths, polarizations=polarizations,\n events_thrown=self.gen.count-self._gen_count)\n\n self._gen_count = self.gen.count\n\n if triggered is None:\n return event\n elif isinstance(self.triggers, dict):\n return event, triggered['global']\n else:\n return event, triggered\n",
"\"\"\"\nModule for particle (neutrino) generators.\n\nGenerators are responsible for the input of events into the simulation.\n\n\"\"\"\n\nfrom collections.abc import Iterable\nfrom enum import Enum\nimport logging\nimport numpy as np\nfrom pyrex.internal_functions import get_from_enum\nfrom pyrex.earth_model import earth\nfrom pyrex.particle import Event, Particle, NeutrinoInteraction\nfrom pyrex.io import File\n\nlogger = logging.getLogger(__name__)\n\n\nclass Generator:\n \"\"\"\n Base class for neutrino generators.\n\n Provides methods for generating neutrino attributes except for neutrino\n vertex, which should be provided by child classes to generate neutrinos\n in specific volumes.\n\n Parameters\n ----------\n energy : float or function\n Energy (GeV) of the neutrinos. If ``float``, all neutrinos have the\n same constant energy. If ``function``, neutrinos are generated with the\n energy returned by successive function calls.\n shadow : bool, optional\n Whether Earth shadowing effects should be used to reject events. If\n ``True`` then neutrinos which don't survive transit through the Earth\n will be skipped when creating events. If ``False`` then all events are\n allowed and assigned a weight to scale their probability of occurrence.\n flavor_ratio : array_like, optional\n Flavor ratio of neutrinos to be generated. Of the form [electron, muon,\n tau] neutrino fractions.\n source : optional\n Source type of neutrinos to be generated. Used in the determination of\n per-flavor neutrino/antineutrino fractions.\n interaction_model : optional\n Class to use to describe interactions of the generated particles.\n Should inherit from (or behave like) the base ``Interaction`` class.\n\n Attributes\n ----------\n count : int\n Number of neutrinos produced by the generator, including those not\n returned due to Earth shadowing or other effects.\n get_energy : function\n Function returning energy (GeV) of the neutrinos by successive function\n calls.\n shadow : bool\n Whether Earth shadowing effects will be used to reject events.\n ratio : ndarray\n (Normalized) flavor ratio of neutrinos to be generated. Of the form\n [electron, muon, tau] neutrino fractions.\n source : Generator.SourceType\n Source type of neutrinos to be generated. Used in the determination of\n per-flavor neutrino/antineutrino fractions.\n interaction_model : Interaction\n Class to use to describe interactions of the generated particles.\n volume\n solid_angle\n\n See Also\n --------\n pyrex.particle.Interaction : Base class for describing neutrino interaction\n attributes.\n\n \"\"\"\n class SourceType(Enum):\n \"\"\"\n Enum containing possible sources for neutrinos.\n\n Attributes\n ----------\n pgamma, cosmogenic\n pp, astrophysical\n unknown, undefined\n\n \"\"\"\n undefined = 0\n unknown = 0\n cosmogenic = 1\n pgamma = 1\n astrophysical = 2\n pp = 2\n\n def __init__(self, energy, shadow=False, flavor_ratio=(1,1,1),\n source=\"cosmogenic\", interaction_model=NeutrinoInteraction,\n earth_model=earth):\n if not callable(energy):\n try:\n e = float(energy)\n except TypeError:\n raise ValueError(\"energy argument must be a function \"+\n \"or a number\")\n else:\n energy = lambda: e\n self.get_energy = energy\n self.shadow = shadow\n self.ratio = np.array(flavor_ratio)/np.sum(flavor_ratio)\n self.source = source\n self.interaction_model = interaction_model\n self.earth_model = earth_model\n self.count = 0\n\n @property\n def source(self):\n \"\"\"\n Value of the source type.\n\n Should always be a value from the ``Interaction.Type`` enum. Setting\n with integer or string values may work if carefully chosen.\n\n \"\"\"\n return self._source\n\n @source.setter\n def source(self, src_type):\n if src_type is None:\n self._source = self.SourceType.undefined\n else:\n self._source = get_from_enum(src_type, self.SourceType)\n\n @property\n def volume(self):\n \"\"\"\n Generation volume (m^3) in which event vertices are produced.\n\n \"\"\"\n raise NotImplementedError(\"volume property must be implemented by \"+\n \"inheriting class\")\n\n @property\n def solid_angle(self):\n \"\"\"\n Generation solid angle (sr) in which event directions are produced.\n\n \"\"\"\n logger.debug(\"Using default solid_angle from \"+\n \"pyrex.generation.Generator\")\n return 4 * np.pi\n\n def get_vertex(self):\n \"\"\"\n Get the vertex of the next particle to be generated.\n\n For the `Generator` class, this method is not implemented.\n Subclasses should override this method with their own procedure for\n generating neutrino vertices in some volume.\n\n Raises\n ------\n NotImplementedError\n Always, unless a subclass overrides the function.\n\n \"\"\"\n logger.debug(\"Using default get_vertex from \"+\n \"pyrex.generation.Generator\")\n raise NotImplementedError(\"get_vertex method must be implemented by \"\n +\"inheriting class\")\n\n def get_direction(self):\n \"\"\"\n Get the direction of the next particle to be generated.\n\n Randomly generates a cartesian unit vector uniformly distributed over\n the unit sphere.\n\n Returns\n -------\n ndarray\n (Unit) vector direction.\n\n Notes\n -----\n Generates random vector direction by pulling from uniform distributions\n for -1<cos(theta)<1 and 0<phi<2*pi.\n\n \"\"\"\n cos_theta = np.random.random_sample()*2-1\n sin_theta = np.sqrt(1 - cos_theta**2)\n phi = np.random.random_sample() * 2*np.pi\n return np.array([sin_theta * np.cos(phi),\n sin_theta * np.sin(phi),\n cos_theta])\n\n def get_particle_type(self):\n \"\"\"\n Get the particle type of the next particle to be generated.\n\n Randomly generates a neutrino flavor according to the flavor ratio of\n the generator, and chooses neutrino or antineutrino based on ratios\n derived from the source type.\n\n Returns\n -------\n Particle.Type\n Enum value for the type of the particle.\n\n See Also\n --------\n pyrex.Particle : Class for storing particle attributes.\n\n Notes\n -----\n The neutrino/antineutrino choice is based on Section 3 of [1]_.\n\n References\n ----------\n .. [1] A. Bhattacharya et al, \"The Glashow resonance at IceCube.\"\n JCAP **1110**, 017 (2011). :arxiv:`1108.3163`\n :doi:`10.1088/1475-7516/2011/10/017`\n\n \"\"\"\n rand_flavor = np.random.rand()\n rand_nunubar = np.random.rand()\n if self.source==self.SourceType.cosmogenic:\n nunubar_ratios = [0.78, 0.61, 0.61]\n elif self.source==self.SourceType.astrophysical:\n nunubar_ratios = [0.5, 0.5, 0.5]\n else:\n raise ValueError(\"Source type not supported\")\n\n # Electron neutrinos\n if rand_flavor<self.ratio[0]:\n if rand_nunubar<nunubar_ratios[0]:\n return Particle.Type.electron_neutrino\n else:\n return Particle.Type.electron_antineutrino\n # Muon neutrinos\n elif rand_flavor<self.ratio[0]+self.ratio[1]:\n if rand_nunubar<nunubar_ratios[1]:\n return Particle.Type.muon_neutrino\n else:\n return Particle.Type.muon_antineutrino\n # Tau neutrinos\n else:\n if rand_nunubar<nunubar_ratios[2]:\n return Particle.Type.tau_neutrino\n else:\n return Particle.Type.tau_antineutrino\n\n def get_exit_points(self, particle):\n \"\"\"\n Get the intersections of the particle path with the ice volume edges.\n\n For the `Generator` class, this method is not implemented.\n Subclasses should override this method with their own procedure for\n calculating exit points given the generation volume.\n\n Parameters\n ----------\n particle : Particle\n Particle traveling through the ice.\n\n Raises\n ------\n NotImplementedError\n Always, unless a subclass overrides the function.\n\n See Also\n --------\n pyrex.Particle : Class for storing particle attributes.\n\n \"\"\"\n logger.debug(\"Using default get_exit_points from \"+\n \"pyrex.generation.Generator\")\n raise NotImplementedError(\"get_exit_points method must be implemented \"\n +\"by inheriting class\")\n\n def get_weights(self, particle):\n \"\"\"\n Get the weighting factors to be applied to the particle.\n\n Calculates both the survival and interaction weights of `particle`.\n The survival weight is based on the probability of interaction along\n the path through the Earth. The interaction weight of `particle` based\n on the probability of interaction at its given vertex in the ice\n volume.\n\n Parameters\n ----------\n particle : Particle\n Particle to be weighted.\n\n Returns\n -------\n survival_weight : float\n Survival weight of the given `particle`.\n interaction_weight : float\n Interaction weight of the given `particle`.\n\n See Also\n --------\n pyrex.Particle : Class for storing particle attributes.\n\n \"\"\"\n t = self.earth_model.slant_depth(particle.vertex, -particle.direction)\n x = t / particle.interaction.total_interaction_length\n survival_weight = np.exp(-x)\n\n entry_point, exit_point = self.get_exit_points(particle)\n in_ice_vector = np.array(exit_point) - np.array(entry_point)\n in_ice_length = np.sqrt(np.sum(in_ice_vector**2))\n vertex_vector = particle.vertex - np.array(entry_point)\n travel_length = np.sqrt(np.sum(vertex_vector**2))\n # Convert cm water equivalent interaction length to meters in ice\n interaction_length = (particle.interaction.total_interaction_length\n / 0.92 / 100)\n interaction_weight = (in_ice_length/interaction_length *\n np.exp(-travel_length/interaction_length))\n\n return survival_weight, interaction_weight\n\n\n def create_event(self):\n \"\"\"\n Generate a neutrino event in the ice volume.\n\n Creates a neutrino with a random vertex in the volume, a random\n direction, and an energy based on ``get_energy``. Particle type is\n randomly chosen, and its interaction type is also randomly chosen based\n on the branching ratio. Weights the particles according to their\n survival probability through the Earth and their probability of\n interacting in the ice at their vertex. If Earth shadowing has been\n turned on then particles which don't survive transit through the Earth\n are skipped, and surviving particles are given a survival weight of 1.\n Currently each `Event` returned consists of only a single `Particle`.\n\n Returns\n -------\n Event\n Random neutrino event not shadowed by the Earth.\n\n See Also\n --------\n pyrex.Event : Class for storing a tree of `Particle` objects\n representing an event.\n pyrex.Particle : Class for storing particle attributes.\n\n \"\"\"\n self.count += 1\n vtx = self.get_vertex()\n u = self.get_direction()\n E = self.get_energy()\n particle_id = self.get_particle_type()\n particle = Particle(particle_id=particle_id, vertex=vtx, direction=u,\n energy=E, interaction_model=self.interaction_model)\n\n weights = self.get_weights(particle)\n if not self.shadow:\n particle.survival_weight = weights[0]\n particle.interaction_weight = weights[1]\n logger.debug(\"Successfully created %s with survival weight %d and \"\n +\"interaction weight %d\", particle, weights[0],\n weights[1])\n return Event(particle)\n elif np.random.rand() < weights[0]:\n particle.survival_weight = 1\n particle.interaction_weight = weights[1]\n logger.debug(\"Successfully created %s with survival weight %d and \"\n +\"interaction weight %d\", particle, weights[0],\n weights[1])\n return Event(particle)\n else:\n # Particle was shadowed by the earth. Try again\n logger.debug(\"Particle creation shadowed by the Earth\")\n return self.create_event()\n\n\nclass CylindricalGenerator(Generator):\n \"\"\"\n Class to generate neutrino vertices in a cylindrical ice volume.\n\n Generates neutrinos in a cylinder with given radius and height.\n\n Parameters\n ----------\n dr : float\n Radius of the ice volume. Neutrinos generated within (0, `dr`).\n dz : float\n Height of the ice volume in the z-direction. Neutrinos generated within\n (-`dz`, 0).\n energy : float or function\n Energy (GeV) of the neutrinos. If ``float``, all neutrinos have the\n same constant energy. If ``function``, neutrinos are generated with the\n energy returned by successive function calls.\n shadow : bool, optional\n Whether Earth shadowing effects should be used to reject events. If\n ``True`` then neutrinos which don't survive transit through the Earth\n will be skipped when creating events. If ``False`` then all events are\n allowed and assigned a weight to scale their probability of occurrence.\n flavor_ratio : array_like, optional\n Flavor ratio of neutrinos to be generated. Of the form [electron, muon,\n tau] neutrino fractions.\n source : optional\n Source type of neutrinos to be generated. Used in the determination of\n per-flavor neutrino/antineutrino fractions.\n interaction_model : optional\n Class to use to describe interactions of the generated particles.\n Should inherit from (or behave like) the base ``Interaction`` class.\n\n Attributes\n ----------\n count : int\n Number of neutrinos produced by the generator, including those not\n returned due to Earth shadowing or other effects.\n dr : float\n Radius of the ice volume. Neutrinos generated within (0, `dr`).\n dz : float\n Height of the ice volume in the z-direction. Neutrinos generated within\n (-`dz`, 0).\n get_energy : function\n Function returning energy (GeV) of the neutrinos by successive function\n calls.\n shadow : bool\n Whether Earth shadowing effects will be used to reject events.\n ratio : ndarray\n (Normalized) flavor ratio of neutrinos to be generated. Of the form\n [electron, muon, tau] neutrino fractions.\n source : Generator.SourceType\n Source type of neutrinos to be generated. Used in the determination of\n per-flavor neutrino/antineutrino fractions.\n interaction_model : Interaction\n Class to use to describe interactions of the generated particles.\n volume\n solid_angle\n\n See Also\n --------\n pyrex.particle.Interaction : Base class for describing neutrino interaction\n attributes.\n\n \"\"\"\n def __init__(self, dr, dz, energy, shadow=False, flavor_ratio=(1,1,1),\n source=\"cosmogenic\", interaction_model=NeutrinoInteraction,\n earth_model=earth):\n self.dr = dr\n self.dz = dz\n super().__init__(energy=energy, shadow=shadow,\n flavor_ratio=flavor_ratio, source=source,\n interaction_model=interaction_model,\n earth_model=earth_model)\n\n @property\n def volume(self):\n \"\"\"\n Generation volume (m^3) in which event vertices are produced.\n\n \"\"\"\n return np.pi * self.dr**2 * self.dz\n\n def get_vertex(self):\n \"\"\"\n Get the vertex of the next particle to be generated.\n\n Randomly generates a vertex uniformly distributed within the specified\n ice volume.\n\n Returns\n -------\n ndarray\n Vector vertex in the ice volume.\n\n \"\"\"\n r = self.dr * np.sqrt(np.random.random_sample())\n theta = 2*np.pi * np.random.random_sample()\n z = -self.dz * np.random.random_sample()\n return np.array([r*np.cos(theta), r*np.sin(theta), z])\n\n def get_exit_points(self, particle):\n \"\"\"\n Get the intersections of the particle path with the ice volume edges.\n\n For the given `particle`, calculates where its travel path intersects\n with the edges of the ice volume.\n\n Parameters\n ----------\n particle : Particle\n Particle traveling through the ice.\n\n Returns\n -------\n enter_point, exit_point : ndarray\n Vector points where the particle's path intersects with the edges\n of the ice volume.\n\n See Also\n --------\n pyrex.Particle : Class for storing particle attributes.\n\n \"\"\"\n enter_point = None\n exit_point = None\n\n # Find the intersection points of the circle, assuming infinite z\n if particle.direction[0]==0:\n x0 = particle.vertex[0]\n y0 = -np.sqrt(self.dr**2 - x0**2)\n z0 = (particle.vertex[2] + (y0-particle.vertex[1])\n * particle.direction[2]/particle.direction[1])\n x1 = particle.vertex[0]\n y1 = np.sqrt(self.dr**2 - x1**2)\n z1 = (particle.vertex[2] + (y1-particle.vertex[1])\n * particle.direction[2]/particle.direction[1])\n else:\n slope = particle.direction[1]/particle.direction[0]\n a = 1 + slope**2\n b = particle.vertex[1] - slope*particle.vertex[0]\n x0 = - (slope*b + np.sqrt(-b**2 + a*self.dr**2)) / a\n y0 = (particle.vertex[1] - slope *\n (particle.vertex[0] + np.sqrt(-b**2 + a*self.dr**2))) / a\n z0 = (particle.vertex[2] + (x0-particle.vertex[0])\n * particle.direction[2]/particle.direction[0])\n x1 = (-slope*b + np.sqrt(-b**2 + a*self.dr**2)) / a\n y1 = (particle.vertex[1] + slope *\n (-particle.vertex[0] + np.sqrt(-b**2 + a*self.dr**2))) / a\n z1 = (particle.vertex[2] + (x1-particle.vertex[0])\n * particle.direction[2]/particle.direction[0])\n\n for pt in ([x0, y0, z0], [x1, y1, z1]):\n # Check for intersections at the top & bottom that supersede the\n # intersections at the sides\n z = None\n if pt[2]>0:\n z = 0\n elif pt[2]<-self.dz:\n z = -self.dz\n if z is not None:\n pt[0] = (particle.vertex[0] + (z-particle.vertex[2])\n * particle.direction[0]/particle.direction[2])\n pt[1] = (particle.vertex[1] + (z-particle.vertex[2])\n * particle.direction[1]/particle.direction[2])\n pt[2] = z\n pt = np.array(pt)\n # Sort into enter and exit points based on particle direction\n nonzero = particle.direction!=0\n direction = ((pt[nonzero]-particle.vertex[nonzero])\n /particle.direction[nonzero])\n if np.all(direction<0):\n enter_point = pt\n elif np.all(direction>0):\n exit_point = pt\n elif np.all(direction==0):\n if enter_point is None:\n enter_point = pt\n if exit_point is None:\n exit_point = pt\n\n if enter_point is not None and exit_point is not None:\n return enter_point, exit_point\n else:\n raise ValueError(\"Could not determine exit points\")\n\n\nclass RectangularGenerator(Generator):\n \"\"\"\n Class to generate neutrino vertices in a rectangular ice volume.\n\n Generates neutrinos in a box with given width, length, and height.\n\n Parameters\n ----------\n dx : float\n Width of the ice volume in the x-direction. Neutrinos generated within\n (-`dx` / 2, `dx` / 2).\n dy : float\n Length of the ice volume in the y-direction. Neutrinos generated within\n (-`dy` / 2, `dy` / 2).\n dz : float\n Height of the ice volume in the z-direction. Neutrinos generated within\n (-`dz`, 0).\n energy : float or function\n Energy (GeV) of the neutrinos. If ``float``, all neutrinos have the\n same constant energy. If ``function``, neutrinos are generated with the\n energy returned by successive function calls.\n shadow : bool, optional\n Whether Earth shadowing effects should be used to reject events. If\n ``True`` then neutrinos which don't survive transit through the Earth\n will be skipped when creating events. If ``False`` then all events are\n allowed and assigned a weight to scale their probability of occurrence.\n flavor_ratio : array_like, optional\n Flavor ratio of neutrinos to be generated. Of the form [electron, muon,\n tau] neutrino fractions.\n source : optional\n Source type of neutrinos to be generated. Used in the determination of\n per-flavor neutrino/antineutrino fractions.\n interaction_model : optional\n Class to use to describe interactions of the generated particles.\n Should inherit from (or behave like) the base ``Interaction`` class.\n\n Attributes\n ----------\n count : int\n Number of neutrinos produced by the generator, including those not\n returned due to Earth shadowing or other effects.\n dx : float\n Width of the ice volume in the x-direction. Neutrinos generated within\n (-`dx` / 2, `dx` / 2).\n dy : float\n Length of the ice volume in the y-direction. Neutrinos generated within\n (-`dy` / 2, `dy` / 2).\n dz : float\n Height of the ice volume in the z-direction. Neutrinos generated within\n (-`dz`, 0).\n get_energy : function\n Function returning energy (GeV) of the neutrinos by successive function\n calls.\n shadow : bool\n Whether Earth shadowing effects will be used to reject events.\n ratio : ndarray\n (Normalized) flavor ratio of neutrinos to be generated. Of the form\n [electron, muon, tau] neutrino fractions.\n source : Generator.SourceType\n Source type of neutrinos to be generated. Used in the determination of\n per-flavor neutrino/antineutrino fractions.\n interaction_model : Interaction\n Class to use to describe interactions of the generated particles.\n volume\n solid_angle\n\n See Also\n --------\n pyrex.particle.Interaction : Base class for describing neutrino interaction\n attributes.\n\n \"\"\"\n def __init__(self, dx, dy, dz, energy, shadow=False, flavor_ratio=(1,1,1),\n source=\"cosmogenic\", interaction_model=NeutrinoInteraction,\n earth_model=earth):\n self.dx = dx\n self.dy = dy\n self.dz = dz\n super().__init__(energy=energy, shadow=shadow,\n flavor_ratio=flavor_ratio, source=source,\n interaction_model=interaction_model,\n earth_model=earth_model)\n\n @property\n def volume(self):\n \"\"\"\n Generation volume (m^3) in which event vertices are produced.\n\n \"\"\"\n return self.dx * self.dy * self.dz\n\n def get_vertex(self):\n \"\"\"\n Get the vertex of the next particle to be generated.\n\n Randomly generates a vertex uniformly distributed within the specified\n ice volume.\n\n Returns\n -------\n ndarray\n Vector vertex in the ice volume.\n\n \"\"\"\n return np.random.uniform(low=(-self.dx/2, -self.dy/2, -self.dz),\n high=(self.dx/2, self.dy/2, 0))\n\n def get_exit_points(self, particle):\n \"\"\"\n Get the intersections of the particle path with the ice volume edges.\n\n For the given `particle`, calculates where its travel path intersects\n with the edges of the ice volume.\n\n Parameters\n ----------\n particle : Particle\n Particle traveling through the ice.\n\n Returns\n -------\n enter_point, exit_point : ndarray\n Vector points where the particle's path intersects with the edges\n of the ice volume.\n\n See Also\n --------\n pyrex.Particle : Class for storing particle attributes.\n\n \"\"\"\n enter_point = None\n exit_point = None\n sides = ((-self.dx/2, self.dx/2),\n (-self.dy/2, self.dy/2),\n (-self.dz, 0))\n for count in range(6):\n coord = int(count/2)\n min_max = count%2\n if particle.direction[coord]==0:\n continue\n scale = ((sides[coord][min_max] - particle.vertex[coord]) /\n particle.direction[coord])\n intersection = particle.vertex + particle.direction * scale\n valid = True\n for i, pair in enumerate(sides):\n if i==coord:\n continue\n if intersection[i]<pair[0] or intersection[i]>pair[1]:\n valid = False\n if valid:\n sign = 1 if min_max==1 else -1\n if sign*particle.direction[coord]<0:\n enter_point = intersection\n else:\n exit_point = intersection\n if enter_point is not None and exit_point is not None:\n return enter_point, exit_point\n raise ValueError(\"Could not determine exit points\")\n\n\nclass ListGenerator:\n \"\"\"\n Class to generate neutrino events from a list.\n\n Generates events by simply pulling them from a list of `Event` objects. By\n default returns to the start of the list once the end is reached, but can\n optionally fail after reaching the list's end.\n\n Parameters\n ----------\n events : Event, or list of Event\n List of `Event` objects to draw from. If only a single `Event` object\n is given, creates a list of that event alone.\n loop : boolean, optional\n Whether or not to return to the start of the list after throwing the\n last `Event`. If ``False``, raises an error if trying to throw after\n the last `Event`.\n\n Attributes\n ----------\n count : int\n Number of neutrinos produced by the generator, including those not\n returned due to Earth shadowing or other effects.\n events : list of Event\n List to draw `Event` objects from, sequentially.\n loop : boolean\n Whether or not to loop through the list more than once.\n\n See Also\n --------\n pyrex.Event : Class for storing a tree of `Particle` objects\n representing an event.\n pyrex.Particle : Class for storing particle attributes.\n\n \"\"\"\n def __init__(self, events, loop=True):\n if (isinstance(events, Iterable) and\n not isinstance(events, Event)):\n self.events = events\n else:\n self.events = [events]\n for i, event in enumerate(self.events):\n if isinstance(event, Particle):\n self.events[i] = Event(event)\n self.loop = loop\n self._index = 0\n self._additional_counts = 0\n\n @property\n def count(self):\n \"\"\"\n Number of neutrinos produced by the generator.\n\n Count includes events which were not returned due to Earth shadowing\n or other effects.\n\n \"\"\"\n return self._index + self._additional_counts\n\n @count.setter\n def count(self, custom_count):\n self._additional_counts = custom_count - self._index\n\n def create_event(self):\n \"\"\"\n Generate a neutrino event.\n\n Pulls the next `Event` object from the class's list of events.\n\n Returns\n -------\n Event\n Next `Event` object in the list of events.\n\n See Also\n --------\n pyrex.Event : Class for storing a tree of `Particle` objects\n representing an event.\n pyrex.Particle : Class for storing particle attributes.\n\n Raises\n ------\n StopIteration\n If ``loop`` is ``False`` and the end of the list has been exceeded.\n\n \"\"\"\n if not self.loop and self._index>=len(self.events):\n raise StopIteration(\"No more events to be generated\")\n self._index += 1\n return self.events[(self._index-1)%len(self.events)]\n\n\nclass FileGenerator:\n \"\"\"\n Class to generate neutrino events from simulation file(s).\n\n Generates neutrinos by pulling their attributes from a (list of) simulation\n output file(s). Designed to make reproducing simulations easier.\n\n Parameters\n ----------\n files : str or list of str\n List of file names containing neutrino event information. If only a\n single file name is provided, creates a list with that file alone.\n slice_range : int, optional\n Number of events to load into memory at a time from the files.\n Increasing this value should result in an improvement in speed, while\n decreasing this value should result in an improvement in memory\n consumption.\n interaction_model : optional\n Class used to describe the interactions of the stored particles.\n\n Attributes\n ----------\n count : int\n Number of neutrinos produced by the generator, including those not\n returned due to Earth shadowing or other effects.\n files : list of str\n List of file names containing neutrino information.\n\n Warnings\n --------\n This generator only supports `Event` objects containing a single level of\n `Particle` objects. Any dependencies among `Particle` objects will be\n ignored and they will all appear in the root level.\n\n See Also\n --------\n pyrex.particle.Interaction : Base class for describing neutrino interaction\n attributes.\n pyrex.Event : Class for storing a tree of `Particle` objects\n representing an event.\n pyrex.Particle : Class for storing particle attributes.\n\n \"\"\"\n def __init__(self, files, slice_range=100,\n interaction_model=NeutrinoInteraction):\n if isinstance(files, str):\n self.files = [files]\n else:\n self.files = files\n self.slice_range = slice_range\n self.interaction_model = interaction_model\n self._file_index = -1\n self._file_counts = [0] * (len(self.files)+1)\n self._load_events()\n\n @property\n def count(self):\n \"\"\"\n Number of neutrinos produced by the generator.\n\n Count includes events which were not returned due to Earth shadowing\n or other effects.\n\n \"\"\"\n return sum(self._file_counts)\n\n @count.setter\n def count(self, custom_count):\n self._file_counts[0] = custom_count - sum(self._file_counts[1:])\n\n def _load_events(self):\n \"\"\"\n Pulls the next chunk of events into memory.\n\n Reads events up to the ``slice_range`` into memory from the current\n file. If the current file is exhausted, loads the next file.\n\n Returns\n -------\n list\n List of `Event` objects read from the current file.\n\n Raises\n ------\n StopIteration\n If the end of the last file in the file list has been reached.\n\n \"\"\"\n if self._file_index<0 or self._event_index>=len(self._file):\n self._next_file()\n start = self._event_index\n stop = self._event_index + self.slice_range\n self._event_index += self.slice_range\n if stop>len(self._file):\n stop = len(self._file)\n self._events = []\n self._event_counts = []\n for file_event in self._file[start:stop]:\n info = file_event.get_particle_info()\n particles = []\n for p in info:\n part = Particle(\n particle_id=p['particle_id'],\n vertex=(p['vertex_x'],\n p['vertex_y'],\n p['vertex_z']),\n direction=(p['direction_x'],\n p['direction_y'],\n p['direction_z']),\n energy=p['energy'],\n interaction_model=self.interaction_model,\n interaction_type=p['interaction_kind']\n )\n part.interaction.inelasticity = p['interaction_inelasticity']\n part.interaction.em_frac = p['interaction_em_frac']\n part.interaction.had_frac = p['interaction_had_frac']\n part.survival_weight = p['survival_weight']\n part.interaction_weight = p['interaction_weight']\n particles.append(part)\n self._events.append(Event(particles))\n self._event_counts.append(file_event.total_events_thrown)\n\n def _next_file(self):\n \"\"\"\n Pulls the next file into memory.\n\n Reads in the next file from the ``files`` list and stores its `Event`\n objects in memory.\n\n Raises\n ------\n StopIteration\n If the end of the last file in the file list has been reached.\n\n \"\"\"\n self._file_index += 1\n self._event_index = 0\n if self._file_index>0:\n self._file.close()\n if self._file_index>=len(self.files):\n raise StopIteration(\"No more events to be generated\")\n # Try to open the next file with the appropriate slice range,\n # otherwise just settle for opening it at all\n try:\n self._file = File(self.files[self._file_index], 'r',\n slice_range=self.slice_range)\n except TypeError:\n self._file = File(self.files[self._file_index], 'r')\n self._file.open()\n\n def create_event(self):\n \"\"\"\n Generate a neutrino.\n\n Pulls the next `Event` object from the file(s).\n\n Returns\n -------\n Event\n Next neutrino `Event` object from the file(s).\n\n Raises\n ------\n StopIteration\n If the end of the last file in the file list has been reached.\n\n See Also\n --------\n pyrex.Event : Class for storing a tree of `Particle` objects\n representing an event.\n pyrex.Particle : Class for storing particle attributes.\n\n \"\"\"\n if len(self._events)==0:\n self._load_events()\n self._file_counts[self._file_index+1] = self._event_counts.pop(0)\n return self._events.pop(0)\n",
"\"\"\"\n\n\"\"\"\n\n\nimport pyrex\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nenergies = ['1e8','1e9','1e10','1e11']\n# energies = ['1e8','3e8','1e9','3e9','1e10','3e10','1e11']\nfolders = ['01']\n# folders = ['01','02']\nindices = range(1)\n#indices = range(20)\n\nplot_dir = 'missed_events_TD_small/'\n# plot_dir = '/data/user/abishop/ara/coincidences/plots/missed_events_TD/'\n\n\ndef get_df(energy, folder, index, stations):\n \"\"\"\n Pulls data from data_dir, checks that 1 and only 1 event energy is stored\n then creates a dictionary where keys are power threshold and items are \n pandas dataframes with event information\n \"\"\"\n \n data_dir = \"/data/user/abishop/ara/coincidences/0930/compiler/\"\n file_name = (f\"{data_dir}diffTriggers-0930-v2-\"\n f\"{folder}-{energy}-{index}-{stations}Station.npy\")\n \n try: \n event_data = np.load(file_name, allow_pickle=True)[()] \n except FileNotFoundError:\n print(f\"File not found: {file_name}\",\n end='..')\n return None\n \n for p, p_dict in event_data.items():\n if len(list(p_dict.keys())) != 1:\n print(\"Data may not have saved properly in\", file_name)\n print(f\"{len(list(p_dict.keys()))} energies saved\")\n \n energy = list(event_data[list(event_data.keys())[0]].keys())[0]\n return_dict = {p: pd.DataFrame(p_dict[energy]) \n for p, p_dict in event_data.items()}\n \n return return_dict\n\n\n# Get Generation Volumes (they're the same for all energies)\ngeneration_volume = np.pi * 13000**2 * 3000\n\n\nprint(\"Prepping Classic Events\")\n\nclassic = {\n 'power':-9, 'polants':3, 'stations':1,\n 'trigger_desc':\"Coincident Threshold: 2 Stations each with\\nat least 3 antennas Triggering on -9 Power\",\n 'file_name' :\"2station_1ant_power-6\",\n 'vertex' : {e: [] for e in energies},\n 'tot' : {e: 0 for e in energies},\n 'trig' : {e: 0 for e in energies},\n 'weights' : {e: [] for e in energies},\n}\n\nfor energy in energies:\n print(energy, end=': ', flush=True) \n for folder in folders: \n for index in indices: \n \n print(index, end=\"..\", flush=True)\n \n events = get_df(energy, folder, index, 1)\n if events == None: continue\n events = events[classic['power']]\n \n triggered_vertices = [\n list(v) for v in \n events.loc[events['TD triggered']==True]['vertex']\n ]\n classic['vertex'][energy] += triggered_vertices\n classic['tot'][energy] += len(events)\n classic['trig'][energy] += len(triggered_vertices)\n classic['weights'][energy] += list(\n events.loc[events['TD triggered']==True]['survival_weight'] )\n \n print()\n \nprint()\nprint(\"Classic Events:\")\nprint(\"\\t\", classic['trig'])\nprint(\"Total Classic Events:\")\nprint(\"\\t\", classic['tot'])\nprint()\n\n\n\ndef num_triggered(events, \n vpols=None, hpols=None, polants=None, total_ants=None, \n stations=1):\n \"\"\"\n Assuming events is a pandas dataframe (one of the items in the bigger\n events objects, power already having been specified)\n \n pol_ants = either vpols or hpols\n \"\"\"\n \n vertices = [] # neutrino vertex for trig'd events\n weights = [] # neutrino survival weight for trig'd events\n n_trig_stations = [] # total number of triggered stations \n # (double counts stations for coincident events)\n \n for i, event in events.iterrows():\n \n event = events.iloc[i]\n \n # Get all stations with more than 3 antennas \n # of the same polarization triggering\n stations_with_3polants = []\n tcs = event['TD triggered components']\n if tcs == None: continue\n \n for station in tcs['Vpol'].keys():\n \n triggered = False\n \n if vpols != None:\n if tcs['Vpol'][station]>=vpols:\n triggered=True\n if hpols != None:\n if tcs['Hpol'][station]>=hpols:\n triggered=True\n if polants != None:\n if tcs['Vpol'][station]>=polants or tcs['Hpol'][station]>=polants:\n triggered=True\n if total_ants != None: \n if tcs['Hpol'][station]+tcs['Vpol'][station] >= total_ants:\n triggered=True\n \n if triggered:\n stations_with_3polants.append(station)\n\n # Sort data first by signals, then by atps\n if len(stations_with_3polants) >= stations: \n vertices.append(list(event['vertex']))\n weights.append(event['survival_weight'])\n n_trig_stations.append(len(stations_with_3polants))\n \n return vertices, weights, n_trig_stations\n\n\n\ndef plots(new_events, coin_events, shared_events, total_events, trigger_desc, file_name):\n \n fig, ax = plt.subplots()\n \n ax.errorbar([float(e) for e in energies], \n [n[0] for e, n in new_events.items()])\n ax.set_xscale('log')\n ax.set_ylabel(\"Number of New Events\")\n ax.set_xlabel(\"Energy (GeV)\")\n ax.set_title(trigger_desc)\n \n plt.tight_layout()\n plt.savefig(plot_dir+'numberNew_'+file_name+'.jpg', dpi=300)\n plt.savefig(plot_dir+'numberNew_'+file_name+'.pdf') \n plt.close()\n \n \n fig, ax = plt.subplots()\n\n shared_veffs = []\n classic_veffs = []\n new_veffs = []\n summed_veffs = []\n for energy in energies:\n\n shared_veff = ( sum(shared_events[energy][1]) \n / classic['tot'][energy] \n * generation_volume * 4 * np.pi)\n error = ( np.sqrt(sum(shared_events[energy][1])) \n / classic['tot'][energy] \n * generation_volume * 4 * np.pi)\n shared_veffs.append([shared_veff, error])\n\n classic_veff = ( sum(classic['weights'][energy]) \n / classic['tot'][energy] \n * generation_volume * 4 * np.pi)\n error = ( np.sqrt(sum(classic['weights'][energy])) \n / classic['tot'][energy] \n * generation_volume * 4 * np.pi)\n classic_veffs.append([classic_veff, error])\n\n new_veff = ( sum(new_events[energy][1]) \n / classic['tot'][energy]\n * generation_volume * 4 * np.pi)\n error = (np.sqrt(sum(new_events[energy][1]))\n / classic['tot'][energy]\n * generation_volume * 4 * np.pi)\n new_veffs.append([new_veff, error])\n\n summed_veff = ((sum(classic['weights'][energy])+sum(new_events[energy][1]) )\n / classic['tot'][energy]\n * generation_volume * 4 * np.pi)\n error = (np.sqrt(sum(classic['weights'][energy])+sum(new_events[energy][1]) )\n / classic['tot'][energy]\n * generation_volume * 4 * np.pi)\n summed_veffs.append([summed_veff, error])\n shared_veffs = np.array(shared_veffs)\n summed_veffs = np.array(summed_veffs)\n classic_veffs = np.array(classic_veffs)\n new_veffs = np.array(new_veffs)\n\n ax.errorbar([float(e) for e in energies], summed_veffs[:,0], \n yerr=summed_veffs[:,1], \n label='Coincident + ARA Analysis',\n c=\"#6D13C3\")\n ax.errorbar([float(e) for e in energies], classic_veffs[:,0], \n yerr=classic_veffs[:,1], \n label=r\"ARA Analysis: 1 Station, 3 Antennas on 9 $\\sigma$\",\n c=\"#FF590A\")\n ax.errorbar([float(e) for e in energies], shared_veffs[:,0], \n yerr=shared_veffs[:,1], label='Coincident Events in ARA Analysis',\n c=\"#FF590A\", linestyle='--')\n ax.errorbar([float(e) for e in energies], new_veffs[:,0], \n yerr=new_veffs[:,1], \n label=trigger_desc,\n c=\"k\")\n \n ax.set_xscale('log')\n# ax.set_yscale('log')\n ax.set_xlabel(\"Energy (GeV)\")\n ax.set_ylabel(f\"Effective Volume (km$^3$ sr)\")\n ax.set_title(trigger_desc)\n \n plt.legend()\n plt.tight_layout()\n plt.savefig(plot_dir+'veff_'+file_name+'.jpg', dpi=300)\n plt.savefig(plot_dir+'veff_'+file_name+'.pdf') \n plt.close()\n\n \n # Not sure how to get error of the percent difference\n fig, ax = plt.subplots()\n\n ax.errorbar([float(e) for e in energies], \n [(n-o)/o*100 for o,n in zip(classic_veffs[:,0], new_veffs[:,0])],\n # yerr=[(n-o)/o*100 for o,n in zip(old_veffs[:,1], new_veffs[:,1])]\n )\n\n ax.set_ylabel(\"Percent Change in Veff\")\n ax.set_xlabel(\"Energy (GeV)\")\n ax.set_xscale(\"log\")\n ax.set_title(trigger_desc)\n\n plt.tight_layout()\n plt.savefig(plot_dir+'veffChange_'+file_name+'.jpg', dpi=300)\n plt.savefig(plot_dir+'veffChange_'+file_name+'.pdf') \n plt.close()\n \n \n\n# These are the analyses we'll be plotting and investigating\nanalyzed_data = [\n\n {'stations':1, 'polants':3, 'power':-9, \n 'trigger_desc': r\"ARA Analysis: 1 Station, 3 Antennas on -9$\\sigma$\",\n 'file_name':\"1station_3ant_power-9\" },\n \n {'stations':1, 'polants':3, 'power':-6, \n 'trigger_desc': r\"ARA Trigger: 1 Station, 3 Antennas on -6$\\sigma$\",\n 'file_name':\"1station_3ant_power-6\" },\n \n \n {'stations':2, 'polants':1, 'power':-7, \n 'trigger_desc': r\"Coincident: 2 Stations, 1 Antenna on -7$\\sigma$\",\n 'file_name':\"2station_1ant_power-7\" },\n \n {'stations':2, 'polants':1, 'power':-8, \n 'trigger_desc': r\"Coincident: 2 Stations, 1 Antenna on -8$\\sigma$\",\n 'file_name':\"2station_1ant_power-8\" },\n \n {'stations':2, 'polants':2, 'power':-6, \n 'trigger_desc': r\"Coincident: 2 Stations, 2 Antennas on -6$\\sigma$\",\n 'file_name':\"2station_2ant_power-6\" },\n \n {'stations':2, 'polants':3, 'power':-6, \n 'trigger_desc': r\"Coincident: 2 Stations, 3 Antennas on -6$\\sigma$\",\n 'file_name':\"2station_3ant_power-6\" },\n \n \n {'stations':3, 'polants':1, 'power':-6, \n 'trigger_desc': r\"Coincident: 3 Stations, 1 Antenna on -6$\\sigma$\",\n 'file_name':\"3station_1ant_power-6\" },\n \n {'stations':3, 'polants':1, 'power':-7, \n 'trigger_desc': r\"Coincident: 3 Stations, 1 Antenna on -7$\\sigma$\",\n 'file_name':\"3station_1ant_power-7\" },\n \n {'stations':3, 'polants':1, 'power':-8, \n 'trigger_desc': r\"Coincident: 3 Stations, 1 Antenna on -8$\\sigma$\",\n 'file_name':\"3station_1ant_power-8\" },\n\n]\n\n# Add variables for standard data to each of the analysis dictionaries\nfor ad in analyzed_data: \n# ad['vertex'] = {e: [] for e in energies}\n ad['total_events'] = {e: 0 for e in energies}\n ad['new_events'] = {e: [0,[],[]] for e in energies}\n ad['coin_events'] = {e: [0,[],[]] for e in energies}\n ad['shared_events'] = {e: [0,[],[]] for e in energies}\n # for the 3 above, index 0 = number of triggered events, \n # index 1 = survival weights of triggered events\n # index 2 = number of stations triggered per event\n\n\nfor energy in energies: \n print(energy, end=': ', flush=True)\n for folder in folders: \n for index in indices: \n \n print(index, end=\"..\", flush=True)\n \n all_events = get_df(energy, folder, index, 1)\n if all_events == None: continue\n\n for d in analyzed_data:\n events = all_events[d['power']]\n \n coin_vtx, c_weights, n_stations = num_triggered(\n events, polants=d['polants'], stations=d['stations'] )\n\n new = []\n new_weights = []\n new_n_stations = []\n shared = []\n shared_weights = []\n shared_n_stations = []\n for i, event in enumerate(coin_vtx): \n if event not in classic['vertex'][energy]:\n new.append(event)\n new_weights.append(c_weights[i])\n new_n_stations.append(n_stations[i])\n else:\n shared.append(event)\n shared_weights.append(c_weights[i])\n shared_n_stations.append(n_stations[i])\n\n d['new_events'][energy][0] += len(new)\n d['coin_events'][energy][0] += len(coin_vtx)\n d['shared_events'][energy][0] += len(shared)\n d['total_events'][energy] += len(events)\n\n d['coin_events'][energy][1] += c_weights\n d['new_events'][energy][1] += new_weights\n d['shared_events'][energy][1] += shared_weights \n\n d['coin_events'][energy][2] += n_stations\n d['new_events'][energy][2] += new_n_stations\n d['shared_events'][energy][2] += shared_n_stations \n \n# d['vertex'][energy] += coin_vtx \n \n print()\n\n \nfor analysis in analyzed_data: \n print(analysis['trigger_desc'])\n print(\"New Events:\")\n print(\"\\t\", {e: n[0] for e, n in analysis['new_events'].items()})\n print(\"Total (at least) Single Station Events\")\n print(\"\\t\", {e: n[0] for e, n in analysis['shared_events'].items()})\n print(\"Total Coincident Events:\")\n print(\"\\t\", {e: n[0] for e, n in analysis['coin_events'].items()})\n print(\"Total Simulated Events:\")\n print(\"\\t\", d['total_events'])\n print()\n\n plots(analysis['new_events'], analysis['coin_events'], \n analysis['shared_events'], analysis['total_events'], \n analysis['trigger_desc'], analysis['file_name'])\n\n\n# Plot Single Station event rate increases with coincident events\n\nindices_to_analyze = [0,1]\nplotters = {}\nfor i in indices_to_analyze:\n if analyzed_data[i]['stations'] > 1: \n print(f\"{analyzed_data[i]['stations']} stations required in single station plot analysis\")\n print(\"You may have made a mistake in indices_to_analyze array\")\n \n label = (f\"{analyzed_data[i]['stations']} Stations, \"\n f\"{analyzed_data[i]['polants']} Antenna on \"\n f\"{abs(analyzed_data[i]['power'])}\"+r\"$\\sigma$\")\n \n plotters[label] = []\n for e in energies: \n events = analyzed_data[i]['coin_events'][e]\n plotters[label].append( ( sum(events[2]) - len(events[2]) )\n / len(events[2]) * 100 )\n \nfig, ax = plt.subplots() \nfor label, plotter in plotters.items():\n ax.plot([float(e) for e in energies], plotter, label=label) \nax.set_xscale('log')\nax.set_title(\"Percent Increase in Event Rate\\n\"\n \"when Double Counting Coincident Events\")\nax.set_xlabel(\"Energy (GeV)\")\nplt.legend(title=\"Station Trigger Setting\")\nplt.tight_layout()\nplt.savefig(plot_dir+'eventRateInc_1station.jpg', dpi=300)\nplt.savefig(plot_dir+'eventRateInc_1station.pdf') \nplt.close()\n\n\n# Plot event rate increase for new coincident events + ARA analysis events\nindices_to_analyze = [0, 2, 7]\nplotters = {}\nfor i in indices_to_analyze: \n if (analyzed_data[i]['stations'] == 1 and analyzed_data[i]['polants'] == 3\n and abs(analyzed_data[i]['power']) == 9):\n label = r\"Just ARA Analysis: 3 antennas on 9$\\sigma$\"\n else:\n label = (f\"{analyzed_data[i]['stations']} Stations, \"\n f\"{analyzed_data[i]['polants']} Antenna on \"\n f\"{abs(analyzed_data[i]['power'])}\"+r\"$\\sigma$ \"\n f\"+ ARA Analysis\")\n plotters[label] = []\n for e in energies: \n new_events = analyzed_data[i]['new_events'][e]\n shared_events = analyzed_data[i]['shared_events'][e]\n classic_events = classic['trig'][e]\n \n stations_trigd = ( sum(new_events[2]) + sum(shared_events[2]) \n + (classic_events - len(shared_events[2])) )\n total_events = len(new_events[2]) + classic_events\n \n plotters[label].append( ( stations_trigd - total_events )\n / total_events * 100 )\n \nfig, ax = plt.subplots() \nfor label, plotter in plotters.items():\n ax.plot([float(e) for e in energies], plotter, label=label) \nax.set_xscale('log')\nax.set_title(\"Percent Increase in Neutrino Event Rate\\n\"\n \"when Double Counting Coincident Events\")\nax.set_xlabel(\"Energy (GeV)\")\nplt.legend(title=\"Trigger Setting\")\nplt.tight_layout()\nplt.savefig(plot_dir+'eventRateInc_multipleStation.jpg', dpi=300)\nplt.savefig(plot_dir+'eventRateInc_multipleStation.pdf') \nplt.close()"
] |
[
[
"numpy.radians",
"numpy.abs",
"numpy.linspace",
"numpy.degrees",
"numpy.vdot"
],
[
"numpy.sqrt",
"numpy.random.random_sample",
"numpy.cos",
"numpy.sin",
"numpy.all",
"numpy.random.rand",
"numpy.exp",
"numpy.random.uniform",
"numpy.array",
"numpy.sum"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"numpy.load",
"numpy.array"
]
] |
vanashimko/digital-signals-modeling
|
[
"31ec88759194c332a5fffeeba4f41126f59f307d"
] |
[
"main.py"
] |
[
"from math import pi\n\nimport matplotlib.pyplot as plt\n\nimport drawer\nfrom signals import HarmonicParameters, harmonic_signal, polyharmonic_signal, polyharmonic_linear\n\n\ndef main():\n results = []\n for i, viewer in enumerate([show_phases, show_frequencies, show_amplitudes, show_polyharmonic_linear]):\n plt.subplot(2, 2, i + 1)\n results.append(viewer())\n results.append(show_polyharmonic())\n plt.show()\n\n\ndef show_phases():\n phases = [0, pi, pi/6, pi/4, pi/2]\n params = lambda x: HarmonicParameters(amplitude=10, phase=x, frequency=2)\n drawer.draw_plots(r'\\phi', harmonic_signal, params, phases)\n\n\ndef show_frequencies():\n params = lambda x: HarmonicParameters(amplitude=3, phase=pi/2, frequency=x)\n frequencies = [5, 4, 2, 6, 3]\n drawer.draw_plots(r'f', harmonic_signal, params, frequencies)\n\n\ndef show_amplitudes():\n params = lambda x: HarmonicParameters(amplitude=x, phase=pi/2, frequency=1)\n amplitudes = [2, 3, 6, 5, 1]\n drawer.draw_plots(r'a', harmonic_signal, params, amplitudes)\n\n\ndef show_polyharmonic():\n params = list(map(lambda x: HarmonicParameters(*x), [\n (1, 0, 1),\n (1, pi/4, 2),\n (1, pi/6, 3),\n (1, 2*pi, 4),\n (1, pi, 5)\n ]))\n return drawer.draw_polyharmonic(polyharmonic_signal, params)\n\n\ndef show_polyharmonic_linear():\n params = HarmonicParameters(10, 0, 10)\n drawer.draw_plot(polyharmonic_linear, params)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplot"
]
] |
anqif/a2dr
|
[
"b101b13c17448f43c5c9bb3ec6bcdf18aca73a66"
] |
[
"a2dr/tests/test_precondition.py"
] |
[
"\"\"\"\nCopyright 2019 Anqi Fu, Junzi Zhang\n\nThis file is part of A2DR.\n\nA2DR is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nA2DR is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with A2DR. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\nimport numpy as np\nimport scipy as sp\nfrom scipy import sparse\n\nfrom a2dr import a2dr\nfrom a2dr.proximal import prox_norm1, prox_sum_squares_affine\nfrom a2dr.precondition import precondition\nfrom a2dr.tests.base_test import BaseTest\n\nclass TestPrecondition(BaseTest):\n \"\"\"Unit tests for preconditioning data before S-DRS\"\"\"\n\n def setUp(self):\n np.random.seed(1)\n self.MAX_ITERS = 1000\n\n def test_precond_l1_trend_filter(self):\n # Problem data.\n N = 2\n n0 = 2*10**4\n n = 2*n0-2\n m = n0-2\n y = np.random.randn(n)\n alpha = 0.1*np.linalg.norm(y, np.inf)\n\n # Form second difference matrix.\n D = sparse.lil_matrix(sparse.eye(n0))\n D.setdiag(-2, k = 1)\n D.setdiag(1, k = 2)\n D = D[:(n0-2),:]\n\n # Convert problem to standard form.\n # f_1(x_1) = (1/2)||y - x_1||_2^2, f_2(x_2) = \\alpha*||x_2||_1.\n # A_1 = D, A_2 = -I_{n-2}, b = 0.\n prox_list = [lambda v, t: (t*y + v)/(t + 1.0), lambda v, t: prox_norm1(v, t = alpha*t)]\n A_list = [D, -sparse.eye(n0-2)]\n b = np.zeros(n0-2)\n\n b = np.random.randn(m)\n prox_list = [prox_norm1] * N\n A = sparse.csr_matrix(sparse.hstack(A_list))\n \n p_eq_list, A_eq_list, db, e = precondition(prox_list, A_list, b)\n A_eq = sparse.csr_matrix(sparse.hstack(A_eq_list))\n \n print(r'[Sanity Check]')\n print(r'\\|A\\|_2 = {}, \\|DAE\\|_2 = {}'.format(sparse.linalg.norm(A), sparse.linalg.norm(A_eq)))\n print(r'min(|A|) = {}, max(|A|) = {}, mean(|A|) = {}'.format(np.min(np.abs(A)), \n np.max(np.abs(A)), sparse.csr_matrix.mean(np.abs(A))))\n print(r'min(|DAE|) = {}, max(|DAE|) = {}, mean(|DAE|) = {}'.format(np.min(np.abs(A_eq)), \n np.max(np.abs(A_eq)), sparse.csr_matrix.mean(np.abs(A_eq))))\n\n def test_nnls(self):\n # Solve the non-negative least squares problem\n # Minimize (1/2)*||A*x - b||_2^2 subject to x >= 0.\n m = 100\n n = 10\n N = 1 # Number of nodes (split A row-wise)\n\n # Problem data.\n mu = 100\n sigma = 10\n X = mu + sigma*np.random.randn(m,n)\n y = mu + sigma*np.random.randn(m)\n\n # Solve with SciPy.\n sp_result = sp.optimize.nnls(X, y)\n sp_beta = sp_result[0]\n sp_obj = sp_result[1] ** 2 # SciPy objective is ||y - X\\beta||_2.\n print(\"Scipy Objective:\", sp_obj)\n print(\"SciPy Solution:\", sp_beta)\n\n X_split = np.split(X, N)\n y_split = np.split(y, N)\n p_list = [lambda v, t: prox_sum_squares_affine(v, t, F=X_sub, g=y_sub, method=\"lsqr\") \\\n for X_sub, y_sub in zip(X_split, y_split)]\n p_list += [lambda u, rho: np.maximum(u, 0)] # Projection onto non-negative orthant.\n A_list = np.hsplit(np.eye(N*n), N) + [-np.vstack(N*(np.eye(n),))]\n b = np.zeros(N*n)\n\n # Solve with A2DR.\n a2dr_result = a2dr(p_list, A_list, b, anderson=True, precond=False, max_iter=self.MAX_ITERS)\n a2dr_beta = a2dr_result[\"x_vals\"][-1]\n a2dr_obj = np.sum((y - X.dot(a2dr_beta))**2)\n print(\"A2DR Objective:\", a2dr_obj)\n print(\"A2DR Solution:\", a2dr_beta)\n self.assertAlmostEqual(sp_obj, a2dr_obj)\n self.assertItemsAlmostEqual(sp_beta, a2dr_beta, places=3)\n\n # Solve with preconditioned A2DR.\n cond_result = a2dr(p_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITERS)\n cond_beta = cond_result[\"x_vals\"][-1]\n cond_obj = np.sum((y - X.dot(cond_beta))**2)\n print(\"Preconditioned A2DR Objective:\", cond_obj)\n print(\"Preconditioned A2DR Solution:\", cond_beta)\n self.assertAlmostEqual(sp_obj, cond_obj)\n self.assertItemsAlmostEqual(sp_beta, cond_beta, places=3)\n"
] |
[
[
"numpy.split",
"numpy.maximum",
"numpy.abs",
"numpy.random.seed",
"scipy.sparse.eye",
"numpy.eye",
"scipy.optimize.nnls",
"numpy.linalg.norm",
"scipy.sparse.linalg.norm",
"numpy.random.randn",
"scipy.sparse.hstack",
"numpy.zeros"
]
] |
tehunter/pandas
|
[
"c57f883e24405fb4ee561ded1612acf4f4f2bdef"
] |
[
"pandas/core/reshape/reshape.py"
] |
[
"from __future__ import annotations\n\nimport itertools\nfrom typing import TYPE_CHECKING\nimport warnings\n\nimport numpy as np\n\nimport pandas._libs.reshape as libreshape\nfrom pandas._libs.sparse import IntIndex\nfrom pandas._typing import (\n Dtype,\n npt,\n)\nfrom pandas.errors import PerformanceWarning\nfrom pandas.util._decorators import cache_readonly\n\nfrom pandas.core.dtypes.cast import maybe_promote\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_1d_only_ea_dtype,\n is_extension_array_dtype,\n is_integer,\n is_integer_dtype,\n is_list_like,\n is_object_dtype,\n needs_i8_conversion,\n)\nfrom pandas.core.dtypes.dtypes import ExtensionDtype\nfrom pandas.core.dtypes.missing import notna\n\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import SparseArray\nfrom pandas.core.arrays.categorical import factorize_from_iterable\nfrom pandas.core.construction import ensure_wrapped_if_datetimelike\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.indexes.api import (\n Index,\n MultiIndex,\n)\nfrom pandas.core.indexes.frozen import FrozenList\nfrom pandas.core.series import Series\nfrom pandas.core.sorting import (\n compress_group_index,\n decons_obs_group_ids,\n get_compressed_ids,\n get_group_index,\n get_group_index_sorter,\n)\n\nif TYPE_CHECKING:\n from pandas.core.arrays import ExtensionArray\n\n\nclass _Unstacker:\n \"\"\"\n Helper class to unstack data / pivot with multi-level index\n\n Parameters\n ----------\n index : MultiIndex\n level : int or str, default last level\n Level to \"unstack\". Accepts a name for the level.\n fill_value : scalar, optional\n Default value to fill in missing values if subgroups do not have the\n same set of labels. By default, missing values will be replaced with\n the default fill value for that data type, NaN for float, NaT for\n datetimelike, etc. For integer types, by default data will converted to\n float and missing values will be set to NaN.\n constructor : object\n Pandas ``DataFrame`` or subclass used to create unstacked\n response. If None, DataFrame will be used.\n\n Examples\n --------\n >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),\n ... ('two', 'a'), ('two', 'b')])\n >>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)\n >>> s\n one a 1\n b 2\n two a 3\n b 4\n dtype: int64\n\n >>> s.unstack(level=-1)\n a b\n one 1 2\n two 3 4\n\n >>> s.unstack(level=0)\n one two\n a 1 3\n b 2 4\n\n Returns\n -------\n unstacked : DataFrame\n \"\"\"\n\n def __init__(self, index: MultiIndex, level=-1, constructor=None):\n\n if constructor is None:\n constructor = DataFrame\n self.constructor = constructor\n\n self.index = index.remove_unused_levels()\n\n self.level = self.index._get_level_number(level)\n\n # when index includes `nan`, need to lift levels/strides by 1\n self.lift = 1 if -1 in self.index.codes[self.level] else 0\n\n # Note: the \"pop\" below alters these in-place.\n self.new_index_levels = list(self.index.levels)\n self.new_index_names = list(self.index.names)\n\n self.removed_name = self.new_index_names.pop(self.level)\n self.removed_level = self.new_index_levels.pop(self.level)\n self.removed_level_full = index.levels[self.level]\n\n # Bug fix GH 20601\n # If the data frame is too big, the number of unique index combination\n # will cause int32 overflow on windows environments.\n # We want to check and raise an error before this happens\n num_rows = np.max([index_level.size for index_level in self.new_index_levels])\n num_columns = self.removed_level.size\n\n # GH20601: This forces an overflow if the number of cells is too high.\n num_cells = num_rows * num_columns\n\n # GH 26314: Previous ValueError raised was too restrictive for many users.\n if num_cells > np.iinfo(np.int32).max:\n warnings.warn(\n f\"The following operation may generate {num_cells} cells \"\n f\"in the resulting pandas object.\",\n PerformanceWarning,\n )\n\n self._make_selectors()\n\n @cache_readonly\n def _indexer_and_to_sort(\n self,\n ) -> tuple[\n npt.NDArray[np.intp],\n list[np.ndarray], # each has _some_ signed integer dtype\n ]:\n v = self.level\n\n codes = list(self.index.codes)\n levs = list(self.index.levels)\n to_sort = codes[:v] + codes[v + 1 :] + [codes[v]]\n sizes = tuple(len(x) for x in levs[:v] + levs[v + 1 :] + [levs[v]])\n\n comp_index, obs_ids = get_compressed_ids(to_sort, sizes)\n ngroups = len(obs_ids)\n\n indexer = get_group_index_sorter(comp_index, ngroups)\n return indexer, to_sort\n\n @cache_readonly\n def sorted_labels(self):\n indexer, to_sort = self._indexer_and_to_sort\n return [line.take(indexer) for line in to_sort]\n\n def _make_sorted_values(self, values: np.ndarray) -> np.ndarray:\n indexer, _ = self._indexer_and_to_sort\n\n sorted_values = algos.take_nd(values, indexer, axis=0)\n return sorted_values\n\n def _make_selectors(self):\n new_levels = self.new_index_levels\n\n # make the mask\n remaining_labels = self.sorted_labels[:-1]\n level_sizes = tuple(len(x) for x in new_levels)\n\n comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)\n ngroups = len(obs_ids)\n\n comp_index = ensure_platform_int(comp_index)\n stride = self.index.levshape[self.level] + self.lift\n self.full_shape = ngroups, stride\n\n selector = self.sorted_labels[-1] + stride * comp_index + self.lift\n mask = np.zeros(np.prod(self.full_shape), dtype=bool)\n mask.put(selector, True)\n\n if mask.sum() < len(self.index):\n raise ValueError(\"Index contains duplicate entries, cannot reshape\")\n\n self.group_index = comp_index\n self.mask = mask\n self.unique_groups = obs_ids\n self.compressor = comp_index.searchsorted(np.arange(ngroups))\n\n @cache_readonly\n def mask_all(self) -> bool:\n return bool(self.mask.all())\n\n @cache_readonly\n def arange_result(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.bool_]]:\n # We cache this for re-use in ExtensionBlock._unstack\n dummy_arr = np.arange(len(self.index), dtype=np.intp)\n new_values, mask = self.get_new_values(dummy_arr, fill_value=-1)\n return new_values, mask.any(0)\n # TODO: in all tests we have mask.any(0).all(); can we rely on that?\n\n def get_result(self, values, value_columns, fill_value):\n\n if values.ndim == 1:\n values = values[:, np.newaxis]\n\n if value_columns is None and values.shape[1] != 1: # pragma: no cover\n raise ValueError(\"must pass column labels for multi-column data\")\n\n values, _ = self.get_new_values(values, fill_value)\n columns = self.get_new_columns(value_columns)\n index = self.new_index\n\n return self.constructor(\n values, index=index, columns=columns, dtype=values.dtype\n )\n\n def get_new_values(self, values, fill_value=None):\n\n if values.ndim == 1:\n values = values[:, np.newaxis]\n\n sorted_values = self._make_sorted_values(values)\n\n # place the values\n length, width = self.full_shape\n stride = values.shape[1]\n result_width = width * stride\n result_shape = (length, result_width)\n mask = self.mask\n mask_all = self.mask_all\n\n # we can simply reshape if we don't have a mask\n if mask_all and len(values):\n # TODO: Under what circumstances can we rely on sorted_values\n # matching values? When that holds, we can slice instead\n # of take (in particular for EAs)\n new_values = (\n sorted_values.reshape(length, width, stride)\n .swapaxes(1, 2)\n .reshape(result_shape)\n )\n new_mask = np.ones(result_shape, dtype=bool)\n return new_values, new_mask\n\n dtype = values.dtype\n\n # if our mask is all True, then we can use our existing dtype\n if mask_all:\n dtype = values.dtype\n new_values = np.empty(result_shape, dtype=dtype)\n else:\n if isinstance(dtype, ExtensionDtype):\n # GH#41875\n cls = dtype.construct_array_type()\n new_values = cls._empty(result_shape, dtype=dtype)\n new_values[:] = fill_value\n else:\n dtype, fill_value = maybe_promote(dtype, fill_value)\n new_values = np.empty(result_shape, dtype=dtype)\n new_values.fill(fill_value)\n\n name = dtype.name\n new_mask = np.zeros(result_shape, dtype=bool)\n\n # we need to convert to a basic dtype\n # and possibly coerce an input to our output dtype\n # e.g. ints -> floats\n if needs_i8_conversion(values.dtype):\n sorted_values = sorted_values.view(\"i8\")\n new_values = new_values.view(\"i8\")\n else:\n sorted_values = sorted_values.astype(name, copy=False)\n\n # fill in our values & mask\n libreshape.unstack(\n sorted_values,\n mask.view(\"u1\"),\n stride,\n length,\n width,\n new_values,\n new_mask.view(\"u1\"),\n )\n\n # reconstruct dtype if needed\n if needs_i8_conversion(values.dtype):\n # view as datetime64 so we can wrap in DatetimeArray and use\n # DTA's view method\n new_values = new_values.view(\"M8[ns]\")\n new_values = ensure_wrapped_if_datetimelike(new_values)\n new_values = new_values.view(values.dtype)\n\n return new_values, new_mask\n\n def get_new_columns(self, value_columns: Index | None):\n if value_columns is None:\n if self.lift == 0:\n return self.removed_level._rename(name=self.removed_name)\n\n lev = self.removed_level.insert(0, item=self.removed_level._na_value)\n return lev.rename(self.removed_name)\n\n stride = len(self.removed_level) + self.lift\n width = len(value_columns)\n propagator = np.repeat(np.arange(width), stride)\n\n new_levels: FrozenList | list[Index]\n\n if isinstance(value_columns, MultiIndex):\n new_levels = value_columns.levels + (self.removed_level_full,)\n new_names = value_columns.names + (self.removed_name,)\n\n new_codes = [lab.take(propagator) for lab in value_columns.codes]\n else:\n new_levels = [\n value_columns,\n self.removed_level_full,\n ]\n new_names = [value_columns.name, self.removed_name]\n new_codes = [propagator]\n\n repeater = self._repeater\n\n # The entire level is then just a repetition of the single chunk:\n new_codes.append(np.tile(repeater, width))\n return MultiIndex(\n levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False\n )\n\n @cache_readonly\n def _repeater(self) -> np.ndarray:\n # The two indices differ only if the unstacked level had unused items:\n if len(self.removed_level_full) != len(self.removed_level):\n # In this case, we remap the new codes to the original level:\n repeater = self.removed_level_full.get_indexer(self.removed_level)\n if self.lift:\n repeater = np.insert(repeater, 0, -1)\n else:\n # Otherwise, we just use each level item exactly once:\n stride = len(self.removed_level) + self.lift\n repeater = np.arange(stride) - self.lift\n\n return repeater\n\n @cache_readonly\n def new_index(self):\n # Does not depend on values or value_columns\n result_codes = [lab.take(self.compressor) for lab in self.sorted_labels[:-1]]\n\n # construct the new index\n if len(self.new_index_levels) == 1:\n level, level_codes = self.new_index_levels[0], result_codes[0]\n if (level_codes == -1).any():\n level = level.insert(len(level), level._na_value)\n return level.take(level_codes).rename(self.new_index_names[0])\n\n return MultiIndex(\n levels=self.new_index_levels,\n codes=result_codes,\n names=self.new_index_names,\n verify_integrity=False,\n )\n\n\ndef _unstack_multiple(data, clocs, fill_value=None):\n if len(clocs) == 0:\n return data\n\n # NOTE: This doesn't deal with hierarchical columns yet\n\n index = data.index\n\n # GH 19966 Make sure if MultiIndexed index has tuple name, they will be\n # recognised as a whole\n if clocs in index.names:\n clocs = [clocs]\n clocs = [index._get_level_number(i) for i in clocs]\n\n rlocs = [i for i in range(index.nlevels) if i not in clocs]\n\n clevels = [index.levels[i] for i in clocs]\n ccodes = [index.codes[i] for i in clocs]\n cnames = [index.names[i] for i in clocs]\n rlevels = [index.levels[i] for i in rlocs]\n rcodes = [index.codes[i] for i in rlocs]\n rnames = [index.names[i] for i in rlocs]\n\n shape = tuple(len(x) for x in clevels)\n group_index = get_group_index(ccodes, shape, sort=False, xnull=False)\n\n comp_ids, obs_ids = compress_group_index(group_index, sort=False)\n recons_codes = decons_obs_group_ids(comp_ids, obs_ids, shape, ccodes, xnull=False)\n\n if not rlocs:\n # Everything is in clocs, so the dummy df has a regular index\n dummy_index = Index(obs_ids, name=\"__placeholder__\")\n else:\n dummy_index = MultiIndex(\n levels=rlevels + [obs_ids],\n codes=rcodes + [comp_ids],\n names=rnames + [\"__placeholder__\"],\n verify_integrity=False,\n )\n\n if isinstance(data, Series):\n dummy = data.copy()\n dummy.index = dummy_index\n\n unstacked = dummy.unstack(\"__placeholder__\", fill_value=fill_value)\n new_levels = clevels\n new_names = cnames\n new_codes = recons_codes\n else:\n if isinstance(data.columns, MultiIndex):\n result = data\n for i in range(len(clocs)):\n val = clocs[i]\n result = result.unstack(val, fill_value=fill_value)\n clocs = [v if v < val else v - 1 for v in clocs]\n\n return result\n\n # GH#42579 deep=False to avoid consolidating\n dummy = data.copy(deep=False)\n dummy.index = dummy_index\n\n unstacked = dummy.unstack(\"__placeholder__\", fill_value=fill_value)\n if isinstance(unstacked, Series):\n unstcols = unstacked.index\n else:\n unstcols = unstacked.columns\n assert isinstance(unstcols, MultiIndex) # for mypy\n new_levels = [unstcols.levels[0]] + clevels\n new_names = [data.columns.name] + cnames\n\n new_codes = [unstcols.codes[0]]\n for rec in recons_codes:\n new_codes.append(rec.take(unstcols.codes[-1]))\n\n new_columns = MultiIndex(\n levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False\n )\n\n if isinstance(unstacked, Series):\n unstacked.index = new_columns\n else:\n unstacked.columns = new_columns\n\n return unstacked\n\n\ndef unstack(obj, level, fill_value=None):\n\n if isinstance(level, (tuple, list)):\n if len(level) != 1:\n # _unstack_multiple only handles MultiIndexes,\n # and isn't needed for a single level\n return _unstack_multiple(obj, level, fill_value=fill_value)\n else:\n level = level[0]\n\n # Prioritize integer interpretation (GH #21677):\n if not is_integer(level) and not level == \"__placeholder__\":\n level = obj.index._get_level_number(level)\n\n if isinstance(obj, DataFrame):\n if isinstance(obj.index, MultiIndex):\n return _unstack_frame(obj, level, fill_value=fill_value)\n else:\n return obj.T.stack(dropna=False)\n elif not isinstance(obj.index, MultiIndex):\n # GH 36113\n # Give nicer error messages when unstack a Series whose\n # Index is not a MultiIndex.\n raise ValueError(\n f\"index must be a MultiIndex to unstack, {type(obj.index)} was passed\"\n )\n else:\n if is_1d_only_ea_dtype(obj.dtype):\n return _unstack_extension_series(obj, level, fill_value)\n unstacker = _Unstacker(\n obj.index, level=level, constructor=obj._constructor_expanddim\n )\n return unstacker.get_result(\n obj._values, value_columns=None, fill_value=fill_value\n )\n\n\ndef _unstack_frame(obj, level, fill_value=None):\n if not obj._can_fast_transpose:\n unstacker = _Unstacker(obj.index, level=level)\n mgr = obj._mgr.unstack(unstacker, fill_value=fill_value)\n return obj._constructor(mgr)\n else:\n unstacker = _Unstacker(obj.index, level=level, constructor=obj._constructor)\n return unstacker.get_result(\n obj._values, value_columns=obj.columns, fill_value=fill_value\n )\n\n\ndef _unstack_extension_series(series, level, fill_value):\n \"\"\"\n Unstack an ExtensionArray-backed Series.\n\n The ExtensionDtype is preserved.\n\n Parameters\n ----------\n series : Series\n A Series with an ExtensionArray for values\n level : Any\n The level name or number.\n fill_value : Any\n The user-level (not physical storage) fill value to use for\n missing values introduced by the reshape. Passed to\n ``series.values.take``.\n\n Returns\n -------\n DataFrame\n Each column of the DataFrame will have the same dtype as\n the input Series.\n \"\"\"\n # Defer to the logic in ExtensionBlock._unstack\n df = series.to_frame()\n result = df.unstack(level=level, fill_value=fill_value)\n\n # equiv: result.droplevel(level=0, axis=1)\n # but this avoids an extra copy\n result.columns = result.columns.droplevel(0)\n return result\n\n\ndef stack(frame, level=-1, dropna=True):\n \"\"\"\n Convert DataFrame to Series with multi-level Index. Columns become the\n second level of the resulting hierarchical index\n\n Returns\n -------\n stacked : Series\n \"\"\"\n\n def factorize(index):\n if index.is_unique:\n return index, np.arange(len(index))\n codes, categories = factorize_from_iterable(index)\n return categories, codes\n\n N, K = frame.shape\n\n # Will also convert negative level numbers and check if out of bounds.\n level_num = frame.columns._get_level_number(level)\n\n if isinstance(frame.columns, MultiIndex):\n return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)\n elif isinstance(frame.index, MultiIndex):\n new_levels = list(frame.index.levels)\n new_codes = [lab.repeat(K) for lab in frame.index.codes]\n\n clev, clab = factorize(frame.columns)\n new_levels.append(clev)\n new_codes.append(np.tile(clab, N).ravel())\n\n new_names = list(frame.index.names)\n new_names.append(frame.columns.name)\n new_index = MultiIndex(\n levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False\n )\n else:\n levels, (ilab, clab) = zip(*map(factorize, (frame.index, frame.columns)))\n codes = ilab.repeat(K), np.tile(clab, N).ravel()\n new_index = MultiIndex(\n levels=levels,\n codes=codes,\n names=[frame.index.name, frame.columns.name],\n verify_integrity=False,\n )\n\n if not frame.empty and frame._is_homogeneous_type:\n # For homogeneous EAs, frame._values will coerce to object. So\n # we concatenate instead.\n dtypes = list(frame.dtypes._values)\n dtype = dtypes[0]\n\n if is_extension_array_dtype(dtype):\n arr = dtype.construct_array_type()\n new_values = arr._concat_same_type(\n [col._values for _, col in frame.items()]\n )\n new_values = _reorder_for_extension_array_stack(new_values, N, K)\n else:\n # homogeneous, non-EA\n new_values = frame._values.ravel()\n\n else:\n # non-homogeneous\n new_values = frame._values.ravel()\n\n if dropna:\n mask = notna(new_values)\n new_values = new_values[mask]\n new_index = new_index[mask]\n\n return frame._constructor_sliced(new_values, index=new_index)\n\n\ndef stack_multiple(frame, level, dropna=True):\n # If all passed levels match up to column names, no\n # ambiguity about what to do\n if all(lev in frame.columns.names for lev in level):\n result = frame\n for lev in level:\n result = stack(result, lev, dropna=dropna)\n\n # Otherwise, level numbers may change as each successive level is stacked\n elif all(isinstance(lev, int) for lev in level):\n # As each stack is done, the level numbers decrease, so we need\n # to account for that when level is a sequence of ints\n result = frame\n # _get_level_number() checks level numbers are in range and converts\n # negative numbers to positive\n level = [frame.columns._get_level_number(lev) for lev in level]\n\n # Can't iterate directly through level as we might need to change\n # values as we go\n for index in range(len(level)):\n lev = level[index]\n result = stack(result, lev, dropna=dropna)\n # Decrement all level numbers greater than current, as these\n # have now shifted down by one\n updated_level = []\n for other in level:\n if other > lev:\n updated_level.append(other - 1)\n else:\n updated_level.append(other)\n level = updated_level\n\n else:\n raise ValueError(\n \"level should contain all level names or all level \"\n \"numbers, not a mixture of the two.\"\n )\n\n return result\n\n\ndef _stack_multi_column_index(columns: MultiIndex) -> MultiIndex:\n \"\"\"Creates a MultiIndex from the first N-1 levels of this MultiIndex.\"\"\"\n if len(columns.levels) <= 2:\n return columns.levels[0]._rename(name=columns.names[0])\n\n levs = [\n [lev[c] if c >= 0 else None for c in codes]\n for lev, codes in zip(columns.levels[:-1], columns.codes[:-1])\n ]\n\n # Remove duplicate tuples in the MultiIndex.\n tuples = zip(*levs)\n unique_tuples = (key for key, _ in itertools.groupby(tuples))\n new_levs = zip(*unique_tuples)\n\n # The dtype of each level must be explicitly set to avoid inferring the wrong type.\n # See GH-36991.\n return MultiIndex.from_arrays(\n [\n # Not all indices can accept None values.\n Index(new_lev, dtype=lev.dtype) if None not in new_lev else new_lev\n for new_lev, lev in zip(new_levs, columns.levels)\n ],\n names=columns.names[:-1],\n )\n\n\ndef _stack_multi_columns(frame, level_num=-1, dropna=True):\n def _convert_level_number(level_num: int, columns):\n \"\"\"\n Logic for converting the level number to something we can safely pass\n to swaplevel.\n\n If `level_num` matches a column name return the name from\n position `level_num`, otherwise return `level_num`.\n \"\"\"\n if level_num in columns.names:\n return columns.names[level_num]\n\n return level_num\n\n this = frame.copy()\n\n # this makes life much simpler\n if level_num != frame.columns.nlevels - 1:\n # roll levels to put selected level at end\n roll_columns = this.columns\n for i in range(level_num, frame.columns.nlevels - 1):\n # Need to check if the ints conflict with level names\n lev1 = _convert_level_number(i, roll_columns)\n lev2 = _convert_level_number(i + 1, roll_columns)\n roll_columns = roll_columns.swaplevel(lev1, lev2)\n this.columns = roll_columns\n\n if not this.columns._is_lexsorted():\n # Workaround the edge case where 0 is one of the column names,\n # which interferes with trying to sort based on the first\n # level\n level_to_sort = _convert_level_number(0, this.columns)\n this = this.sort_index(level=level_to_sort, axis=1)\n\n new_columns = _stack_multi_column_index(this.columns)\n\n # time to ravel the values\n new_data = {}\n level_vals = this.columns.levels[-1]\n level_codes = sorted(set(this.columns.codes[-1]))\n level_vals_nan = level_vals.insert(len(level_vals), None)\n\n level_vals_used = np.take(level_vals_nan, level_codes)\n levsize = len(level_codes)\n drop_cols = []\n for key in new_columns:\n try:\n loc = this.columns.get_loc(key)\n except KeyError:\n drop_cols.append(key)\n continue\n\n # can make more efficient?\n # we almost always return a slice\n # but if unsorted can get a boolean\n # indexer\n if not isinstance(loc, slice):\n slice_len = len(loc)\n else:\n slice_len = loc.stop - loc.start\n\n if slice_len != levsize:\n chunk = this.loc[:, this.columns[loc]]\n chunk.columns = level_vals_nan.take(chunk.columns.codes[-1])\n value_slice = chunk.reindex(columns=level_vals_used).values\n else:\n if frame._is_homogeneous_type and is_extension_array_dtype(\n frame.dtypes.iloc[0]\n ):\n # TODO(EA2D): won't need special case, can go through .values\n # paths below (might change to ._values)\n dtype = this[this.columns[loc]].dtypes.iloc[0]\n subset = this[this.columns[loc]]\n\n value_slice = dtype.construct_array_type()._concat_same_type(\n [x._values for _, x in subset.items()]\n )\n N, K = subset.shape\n idx = np.arange(N * K).reshape(K, N).T.ravel()\n value_slice = value_slice.take(idx)\n\n elif frame._is_mixed_type:\n value_slice = this[this.columns[loc]].values\n else:\n value_slice = this.values[:, loc]\n\n if value_slice.ndim > 1:\n # i.e. not extension\n value_slice = value_slice.ravel()\n\n new_data[key] = value_slice\n\n if len(drop_cols) > 0:\n new_columns = new_columns.difference(drop_cols)\n\n N = len(this)\n\n if isinstance(this.index, MultiIndex):\n new_levels = list(this.index.levels)\n new_names = list(this.index.names)\n new_codes = [lab.repeat(levsize) for lab in this.index.codes]\n else:\n old_codes, old_levels = factorize_from_iterable(this.index)\n new_levels = [old_levels]\n new_codes = [old_codes.repeat(levsize)]\n new_names = [this.index.name] # something better?\n\n new_levels.append(level_vals)\n new_codes.append(np.tile(level_codes, N))\n new_names.append(frame.columns.names[level_num])\n\n new_index = MultiIndex(\n levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False\n )\n\n result = frame._constructor(new_data, index=new_index, columns=new_columns)\n\n # more efficient way to go about this? can do the whole masking biz but\n # will only save a small amount of time...\n if dropna:\n result = result.dropna(axis=0, how=\"all\")\n\n return result\n\n\ndef get_dummies(\n data,\n prefix=None,\n prefix_sep=\"_\",\n dummy_na: bool = False,\n columns=None,\n sparse: bool = False,\n drop_first: bool = False,\n dtype: Dtype | None = None,\n) -> DataFrame:\n \"\"\"\n Convert categorical variable into dummy/indicator variables.\n\n Parameters\n ----------\n data : array-like, Series, or DataFrame\n Data of which to get dummy indicators.\n prefix : str, list of str, or dict of str, default None\n String to append DataFrame column names.\n Pass a list with length equal to the number of columns\n when calling get_dummies on a DataFrame. Alternatively, `prefix`\n can be a dictionary mapping column names to prefixes.\n prefix_sep : str, default '_'\n If appending prefix, separator/delimiter to use. Or pass a\n list or dictionary as with `prefix`.\n dummy_na : bool, default False\n Add a column to indicate NaNs, if False NaNs are ignored.\n columns : list-like, default None\n Column names in the DataFrame to be encoded.\n If `columns` is None then all the columns with\n `object` or `category` dtype will be converted.\n sparse : bool, default False\n Whether the dummy-encoded columns should be backed by\n a :class:`SparseArray` (True) or a regular NumPy array (False).\n drop_first : bool, default False\n Whether to get k-1 dummies out of k categorical levels by removing the\n first level.\n dtype : dtype, default np.uint8\n Data type for new columns. Only a single dtype is allowed.\n\n Returns\n -------\n DataFrame\n Dummy-coded data.\n\n See Also\n --------\n Series.str.get_dummies : Convert Series to dummy codes.\n\n Examples\n --------\n >>> s = pd.Series(list('abca'))\n\n >>> pd.get_dummies(s)\n a b c\n 0 1 0 0\n 1 0 1 0\n 2 0 0 1\n 3 1 0 0\n\n >>> s1 = ['a', 'b', np.nan]\n\n >>> pd.get_dummies(s1)\n a b\n 0 1 0\n 1 0 1\n 2 0 0\n\n >>> pd.get_dummies(s1, dummy_na=True)\n a b NaN\n 0 1 0 0\n 1 0 1 0\n 2 0 0 1\n\n >>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],\n ... 'C': [1, 2, 3]})\n\n >>> pd.get_dummies(df, prefix=['col1', 'col2'])\n C col1_a col1_b col2_a col2_b col2_c\n 0 1 1 0 0 1 0\n 1 2 0 1 1 0 0\n 2 3 1 0 0 0 1\n\n >>> pd.get_dummies(pd.Series(list('abcaa')))\n a b c\n 0 1 0 0\n 1 0 1 0\n 2 0 0 1\n 3 1 0 0\n 4 1 0 0\n\n >>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)\n b c\n 0 0 0\n 1 1 0\n 2 0 1\n 3 0 0\n 4 0 0\n\n >>> pd.get_dummies(pd.Series(list('abc')), dtype=float)\n a b c\n 0 1.0 0.0 0.0\n 1 0.0 1.0 0.0\n 2 0.0 0.0 1.0\n \"\"\"\n from pandas.core.reshape.concat import concat\n\n dtypes_to_encode = [\"object\", \"category\"]\n\n if isinstance(data, DataFrame):\n # determine columns being encoded\n if columns is None:\n data_to_encode = data.select_dtypes(include=dtypes_to_encode)\n elif not is_list_like(columns):\n raise TypeError(\"Input must be a list-like for parameter `columns`\")\n else:\n data_to_encode = data[columns]\n\n # validate prefixes and separator to avoid silently dropping cols\n def check_len(item, name):\n\n if is_list_like(item):\n if not len(item) == data_to_encode.shape[1]:\n len_msg = (\n f\"Length of '{name}' ({len(item)}) did not match the \"\n \"length of the columns being encoded \"\n f\"({data_to_encode.shape[1]}).\"\n )\n raise ValueError(len_msg)\n\n check_len(prefix, \"prefix\")\n check_len(prefix_sep, \"prefix_sep\")\n\n if isinstance(prefix, str):\n prefix = itertools.cycle([prefix])\n if isinstance(prefix, dict):\n prefix = [prefix[col] for col in data_to_encode.columns]\n\n if prefix is None:\n prefix = data_to_encode.columns\n\n # validate separators\n if isinstance(prefix_sep, str):\n prefix_sep = itertools.cycle([prefix_sep])\n elif isinstance(prefix_sep, dict):\n prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]\n\n with_dummies: list[DataFrame]\n if data_to_encode.shape == data.shape:\n # Encoding the entire df, do not prepend any dropped columns\n with_dummies = []\n elif columns is not None:\n # Encoding only cols specified in columns. Get all cols not in\n # columns to prepend to result.\n with_dummies = [data.drop(columns, axis=1)]\n else:\n # Encoding only object and category dtype columns. Get remaining\n # columns to prepend to result.\n with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]\n\n for (col, pre, sep) in zip(data_to_encode.items(), prefix, prefix_sep):\n # col is (column_name, column), use just column data here\n dummy = _get_dummies_1d(\n col[1],\n prefix=pre,\n prefix_sep=sep,\n dummy_na=dummy_na,\n sparse=sparse,\n drop_first=drop_first,\n dtype=dtype,\n )\n with_dummies.append(dummy)\n result = concat(with_dummies, axis=1)\n else:\n result = _get_dummies_1d(\n data,\n prefix,\n prefix_sep,\n dummy_na,\n sparse=sparse,\n drop_first=drop_first,\n dtype=dtype,\n )\n return result\n\n\ndef _get_dummies_1d(\n data,\n prefix,\n prefix_sep=\"_\",\n dummy_na: bool = False,\n sparse: bool = False,\n drop_first: bool = False,\n dtype: Dtype | None = None,\n) -> DataFrame:\n from pandas.core.reshape.concat import concat\n\n # Series avoids inconsistent NaN handling\n codes, levels = factorize_from_iterable(Series(data))\n\n if dtype is None:\n dtype = np.dtype(np.uint8)\n # error: Argument 1 to \"dtype\" has incompatible type \"Union[ExtensionDtype, str,\n # dtype[Any], Type[object]]\"; expected \"Type[Any]\"\n dtype = np.dtype(dtype) # type: ignore[arg-type]\n\n if is_object_dtype(dtype):\n raise ValueError(\"dtype=object is not a valid dtype for get_dummies\")\n\n def get_empty_frame(data) -> DataFrame:\n if isinstance(data, Series):\n index = data.index\n else:\n index = Index(range(len(data)))\n return DataFrame(index=index)\n\n # if all NaN\n if not dummy_na and len(levels) == 0:\n return get_empty_frame(data)\n\n codes = codes.copy()\n if dummy_na:\n codes[codes == -1] = len(levels)\n levels = levels.insert(len(levels), np.nan)\n\n # if dummy_na, we just fake a nan level. drop_first will drop it again\n if drop_first and len(levels) == 1:\n return get_empty_frame(data)\n\n number_of_cols = len(levels)\n\n if prefix is None:\n dummy_cols = levels\n else:\n dummy_cols = Index([f\"{prefix}{prefix_sep}{level}\" for level in levels])\n\n index: Index | None\n if isinstance(data, Series):\n index = data.index\n else:\n index = None\n\n if sparse:\n\n fill_value: bool | float | int\n if is_integer_dtype(dtype):\n fill_value = 0\n elif dtype == np.dtype(bool):\n fill_value = False\n else:\n fill_value = 0.0\n\n sparse_series = []\n N = len(data)\n sp_indices: list[list] = [[] for _ in range(len(dummy_cols))]\n mask = codes != -1\n codes = codes[mask]\n n_idx = np.arange(N)[mask]\n\n for ndx, code in zip(n_idx, codes):\n sp_indices[code].append(ndx)\n\n if drop_first:\n # remove first categorical level to avoid perfect collinearity\n # GH12042\n sp_indices = sp_indices[1:]\n dummy_cols = dummy_cols[1:]\n for col, ixs in zip(dummy_cols, sp_indices):\n sarr = SparseArray(\n np.ones(len(ixs), dtype=dtype),\n sparse_index=IntIndex(N, ixs),\n fill_value=fill_value,\n dtype=dtype,\n )\n sparse_series.append(Series(data=sarr, index=index, name=col))\n\n return concat(sparse_series, axis=1, copy=False)\n\n else:\n # take on axis=1 + transpose to ensure ndarray layout is column-major\n dummy_mat = np.eye(number_of_cols, dtype=dtype).take(codes, axis=1).T\n\n if not dummy_na:\n # reset NaN GH4446\n dummy_mat[codes == -1] = 0\n\n if drop_first:\n # remove first GH12042\n dummy_mat = dummy_mat[:, 1:]\n dummy_cols = dummy_cols[1:]\n return DataFrame(dummy_mat, index=index, columns=dummy_cols)\n\n\ndef _reorder_for_extension_array_stack(\n arr: ExtensionArray, n_rows: int, n_columns: int\n) -> ExtensionArray:\n \"\"\"\n Re-orders the values when stacking multiple extension-arrays.\n\n The indirect stacking method used for EAs requires a followup\n take to get the order correct.\n\n Parameters\n ----------\n arr : ExtensionArray\n n_rows, n_columns : int\n The number of rows and columns in the original DataFrame.\n\n Returns\n -------\n taken : ExtensionArray\n The original `arr` with elements re-ordered appropriately\n\n Examples\n --------\n >>> arr = np.array(['a', 'b', 'c', 'd', 'e', 'f'])\n >>> _reorder_for_extension_array_stack(arr, 2, 3)\n array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='<U1')\n\n >>> _reorder_for_extension_array_stack(arr, 3, 2)\n array(['a', 'd', 'b', 'e', 'c', 'f'], dtype='<U1')\n \"\"\"\n # final take to get the order correct.\n # idx is an indexer like\n # [c0r0, c1r0, c2r0, ...,\n # c0r1, c1r1, c2r1, ...]\n idx = np.arange(n_rows * n_columns).reshape(n_columns, n_rows).T.ravel()\n return arr.take(idx)\n"
] |
[
[
"numpy.take",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas._libs.sparse.IntIndex",
"numpy.dtype",
"pandas.core.sorting.get_group_index_sorter",
"pandas.core.dtypes.missing.notna",
"numpy.max",
"numpy.iinfo",
"pandas.core.sorting.get_group_index",
"pandas.core.frame.DataFrame",
"pandas.core.arrays.categorical.factorize_from_iterable",
"pandas.core.series.Series",
"numpy.arange",
"numpy.eye",
"pandas.core.sorting.get_compressed_ids",
"numpy.insert",
"numpy.zeros",
"pandas.core.algorithms.take_nd",
"pandas.core.sorting.decons_obs_group_ids",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.dtypes.common.is_list_like",
"pandas.core.sorting.compress_group_index",
"pandas.core.dtypes.common.is_1d_only_ea_dtype",
"pandas.core.indexes.api.Index",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.core.dtypes.cast.maybe_promote",
"pandas.core.reshape.concat.concat",
"pandas.core.construction.ensure_wrapped_if_datetimelike",
"pandas.core.dtypes.common.needs_i8_conversion",
"numpy.tile",
"pandas.core.dtypes.common.is_integer",
"numpy.ones",
"pandas.core.indexes.api.MultiIndex",
"pandas.core.dtypes.common.is_object_dtype",
"numpy.prod",
"numpy.empty"
]
] |
johnson7788/MAAC
|
[
"466866534fc24ebfdc0968619f6ba7cabe0aac8b"
] |
[
"main.py"
] |
[
"import argparse\nimport torch\nimport os\nimport numpy as np\nfrom gym.spaces import Box, Discrete\nfrom pathlib import Path\nfrom torch.autograd import Variable\nfrom tensorboardX import SummaryWriter\nfrom utils.make_env import make_env\nfrom utils.buffer import ReplayBuffer\nfrom utils.env_wrappers import SubprocVecEnv, DummyVecEnv\nfrom algorithms.attention_sac import AttentionSAC\n\n\ndef make_parallel_env(env_id, n_rollout_threads, seed):\n def get_env_fn(rank):\n def init_env():\n env = make_env(env_id, discrete_action=True)\n env.seed(seed + rank * 1000)\n np.random.seed(seed + rank * 1000)\n return env\n return init_env\n if n_rollout_threads == 1:\n return DummyVecEnv([get_env_fn(0)])\n else:\n return SubprocVecEnv([get_env_fn(i) for i in range(n_rollout_threads)])\n\ndef run(config):\n model_dir = Path('./models') / config.env_id / config.model_name\n # if not model_dir.exists():\n # run_num = 1\n # else:\n # exst_run_nums = [int(str(folder.name).split('run')[1]) for folder in\n # model_dir.iterdir() if\n # str(folder.name).startswith('run')]\n # if len(exst_run_nums) == 0:\n # run_num = 1\n # else:\n # run_num = max(exst_run_nums) + 1\n run_num = 1\n curr_run = 'run%i' % run_num\n run_dir = model_dir / curr_run\n log_dir = run_dir / 'logs'\n os.makedirs(log_dir,exist_ok=True)\n logger = SummaryWriter(str(log_dir))\n\n torch.manual_seed(run_num)\n np.random.seed(run_num)\n env = make_parallel_env(config.env_id, config.n_rollout_threads, run_num)\n model = AttentionSAC.init_from_env(env,\n tau=config.tau,\n pi_lr=config.pi_lr,\n q_lr=config.q_lr,\n gamma=config.gamma,\n pol_hidden_dim=config.pol_hidden_dim,\n critic_hidden_dim=config.critic_hidden_dim,\n attend_heads=config.attend_heads,\n reward_scale=config.reward_scale)\n replay_buffer = ReplayBuffer(config.buffer_length, model.nagents,\n [obsp.shape[0] for obsp in env.observation_space],\n [acsp.shape[0] if isinstance(acsp, Box) else acsp.n\n for acsp in env.action_space])\n t = 0\n for ep_i in range(0, config.n_episodes, config.n_rollout_threads):\n print(\"Episodes %i-%i of %i\" % (ep_i + 1,\n ep_i + 1 + config.n_rollout_threads,\n config.n_episodes))\n obs = env.reset()\n model.prep_rollouts(device='cpu')\n\n for et_i in range(config.episode_length):\n # rearrange observations to be per agent, and convert to torch Variable\n torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])),\n requires_grad=False)\n for i in range(model.nagents)]\n # get actions as torch Variables\n torch_agent_actions = model.step(torch_obs, explore=True)\n # convert actions to numpy arrays\n agent_actions = [ac.data.numpy() for ac in torch_agent_actions]\n # rearrange actions to be per environment\n actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]\n next_obs, rewards, dones, infos = env.step(actions)\n replay_buffer.push(obs, agent_actions, rewards, next_obs, dones)\n obs = next_obs\n t += config.n_rollout_threads\n if (len(replay_buffer) >= config.batch_size and\n (t % config.steps_per_update) < config.n_rollout_threads):\n if config.use_gpu:\n model.prep_training(device='gpu')\n else:\n model.prep_training(device='cpu')\n for u_i in range(config.num_updates):\n sample = replay_buffer.sample(config.batch_size,\n to_gpu=config.use_gpu)\n model.update_critic(sample, logger=logger)\n model.update_policies(sample, logger=logger)\n model.update_all_targets()\n model.prep_rollouts(device='cpu')\n ep_rews = replay_buffer.get_average_rewards(\n config.episode_length * config.n_rollout_threads)\n for a_i, a_ep_rew in enumerate(ep_rews):\n logger.add_scalar('agent%i/mean_episode_rewards' % a_i,\n a_ep_rew * config.episode_length, ep_i)\n\n if ep_i % config.save_interval < config.n_rollout_threads:\n model.prep_rollouts(device='cpu')\n os.makedirs(run_dir / 'incremental', exist_ok=True)\n model.save(run_dir / 'incremental' / ('model_ep%i.pt' % (ep_i + 1)))\n model.save(run_dir / 'model.pt')\n\n model.save(run_dir / 'model.pt')\n env.close()\n logger.export_scalars_to_json(str(log_dir / 'summary.json'))\n logger.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--env_id\", help=\"环境的名称\",choices=('fullobs_collect_treasure','multi_speaker_listener'), default='fullobs_collect_treasure')\n parser.add_argument(\"--model_name\",help=\"存储模型/训练内容的目录名称\", default='output')\n parser.add_argument(\"--n_rollout_threads\", default=1, type=int, help=\"同时启动环境的线程数,启动多少个环境\")\n parser.add_argument(\"--buffer_length\", default=int(1e6), type=int)\n parser.add_argument(\"--n_episodes\", default=50000, type=int)\n parser.add_argument(\"--episode_length\", default=25, type=int)\n parser.add_argument(\"--steps_per_update\", default=100, type=int)\n parser.add_argument(\"--num_updates\", default=4, type=int,\n help=\"每个更新周期的更新数\")\n parser.add_argument(\"--batch_size\",\n default=1024, type=int,\n help=\"Batch size for training\")\n parser.add_argument(\"--save_interval\", default=1000, type=int)\n parser.add_argument(\"--pol_hidden_dim\", default=128, type=int, help='policy网络神经元的隐藏神经元数')\n parser.add_argument(\"--critic_hidden_dim\", default=128, type=int, help='Critic网络神经元的隐藏神经元数')\n parser.add_argument(\"--attend_heads\", default=4, type=int, help='critic的注意力的头数')\n parser.add_argument(\"--pi_lr\", default=0.001, type=float)\n parser.add_argument(\"--q_lr\", default=0.001, type=float)\n parser.add_argument(\"--tau\", default=0.001, type=float)\n parser.add_argument(\"--gamma\", default=0.99, type=float)\n parser.add_argument(\"--reward_scale\", default=100., type=float)\n parser.add_argument(\"--use_gpu\", action='store_true')\n\n config = parser.parse_args()\n #主函数\n run(config)\n"
] |
[
[
"torch.manual_seed",
"numpy.vstack",
"numpy.random.seed"
]
] |
alliedel/anomalyframework_python
|
[
"63c56d9fb2e1dc37dfca494805e7fa179e078623"
] |
[
"anomalyframework/tests/test_full.py"
] |
[
"import numpy as np\nimport unittest\nimport tempfile\n\nfrom anomalyframework import liblinear_utils\nfrom anomalyframework import run\n\nONE_BASED = 1\n\n\nclass TestFull(unittest.TestCase):\n def easy_2d_test(self):\n prediction, ground_truth = easy_2d_test()\n self.assertListEqual(prediction.tolist(), ground_truth.tolist())\n\n\ndef generate_easy_2d_example():\n X = np.ones((100,2), dtype=float)\n X[50:55,1] = 2.0 + np.random.normal(0,0.2,5)\n y = np.arange(1, X.shape[0]+1)\n ground_truth = np.zeros(y.shape)\n ground_truth[50:55] = 1\n print(len(ground_truth))\n test_features_file = tempfile.mkstemp('.train')[1]\n\n liblinear_utils.write(X, y, test_features_file, zero_based=False)\n return test_features_file, ground_truth\n\n\ndef easy_2d_test():\n infile_features, ground_truth = generate_easy_2d_example()\n\n a, pars = run.main(infile_features=infile_features, n_shuffles=100, window_size=1,\n window_stride_multiplier=1.0, lambd=1e-2)\n anomaly_signal = a/(1-a)\n std_multiplier = 1.0\n prediction = anomaly_signal > np.mean(anomaly_signal) + std_multiplier * np.std(anomaly_signal)\n return prediction, ground_truth\n\n\nif __name__ == '__main__':\n unittest.main()\n\n"
] |
[
[
"numpy.arange",
"numpy.ones",
"numpy.random.normal",
"numpy.std",
"numpy.mean",
"numpy.zeros"
]
] |
bdevy/raspi-sump
|
[
"78d0101808e54ea091a43f9142cdc7d8c31804b0"
] |
[
"raspisump/todaychart.py"
] |
[
"'''Graph sump pit activity.'''\n\n# Raspi-sump, a sump pump monitoring system.\n# Al Audet\n# http://www.linuxnorth.org/raspi-sump/\n#\n# All configuration changes should be done in raspisump.conf\n# MIT License -- http://www.linuxnorth.org/raspi-sump/license.html\n\nimport time\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom matplotlib import rcParams\nrcParams.update({'figure.autolayout': True})\ntry:\n import ConfigParser as configparser # Python2\nexcept ImportError:\n import configparser # Python3\n\nconfig = configparser.RawConfigParser()\nconfig.read('/home/pi/raspi-sump/raspisump.conf')\nconfigs = {'unit': config.get('pit', 'unit')}\n\nMPL_VERSION = int(mpl.__version__.split(\".\")[0]) # Matplotlib major version\n\nif MPL_VERSION > 1:\n rcParams['date.autoformatter.hour'] = '%H:%M:%S' # Matplotlib 2.0 changed time formatting\n \ndef bytesdate2str(fmt, encoding='utf-8'):\n '''Convert strpdate2num from bytes to string as required in Python3.\n\n This is a workaround as described in the following tread;\n https://github.com/matplotlib/matplotlib/issues/4126/\n\n Credit to github user cimarronm for this workaround.\n '''\n\n strconverter = mdates.strpdate2num(fmt)\n\n def bytesconverter(b):\n s = b.decode(encoding)\n return strconverter(s)\n return bytesconverter\n\n\ndef graph(csv_file, filename, bytes2str, graph_title):\n '''Create a line graph from a two column csv file.'''\n\n unit = configs['unit']\n date, value = np.loadtxt(csv_file, delimiter=',', unpack=True,\n converters={0: bytes2str}\n )\n fig = plt.figure(figsize=(10, 3.5))\n \n # axisbg is deprecated in matplotlib 2.x. Maintain 1.x compatibility\n if MPL_VERSION > 1:\n fig.add_subplot(111, facecolor='white', frameon=False)\n else:\n fig.add_subplot(111,axisbg='white', frameon=False)\n \n rcParams.update({'font.size': 9})\n plt.plot_date(x=date, y=value, ls='solid', linewidth=2, color='#FB921D',\n fmt=':'\n )\n title = \"Sump Pit {} Level {}\".format(graph_title, time.strftime('%Y-%m-%d %H:%M'))\n title_set = plt.title(title)\n title_set.set_y(1.09)\n plt.subplots_adjust(top=0.86)\n\n if graph_title == 'Water':\n if unit == 'imperial':\n plt.ylabel('inches')\n if unit == 'metric':\n plt.ylabel('centimeters')\n elif graph_title == 'Temperature':\n if unit == 'imperial':\n plt.ylabel('Fahrenheit')\n if unit == 'metric':\n plt.ylabel('Celcius')\n\n plt.xlabel('Time of Day')\n plt.xticks(rotation=30)\n plt.grid(True, color='#ECE5DE', linestyle='solid')\n plt.tick_params(axis='x', bottom='off', top='off')\n plt.tick_params(axis='y', left='off', right='off')\n plt.savefig(filename, dpi=72)\n\n"
] |
[
[
"matplotlib.pyplot.plot_date",
"matplotlib.dates.strpdate2num",
"matplotlib.pyplot.title",
"matplotlib.use",
"matplotlib.__version__.split",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.rcParams.update",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.tick_params",
"numpy.loadtxt",
"matplotlib.pyplot.figure"
]
] |
BodenmillerGroup/pycytools
|
[
"a79ed661a672fa15831830efa9c4c3bb7018b29d"
] |
[
"pycytools/plots.py"
] |
[
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.ndimage.filters import gaussian_filter\n\nimport pycytools.library as lib\n\n\ndef plot_rgb_imc(imc_img, metals, norm_perc=99.9, sigma=1, outlierthresh=30, saturation=1):\n plt.figure()\n imgstack = [imc_img.get_img_by_metal(m) for m in metals]\n imgstack = _preproc_img_stack(imgstack, norm_perc, sigma, outlierthresh)\n pimg = np.stack(imgstack, axis=2)\n pimg = pimg * saturation\n pimg[pimg > 1] = 1\n plt.imshow(pimg, interpolation='nearest')\n plt.axis('off')\n\n\ndef plot_rgbw_imc(imc_img, metals, w_metal, white_weight=0.4, norm_perc=99.9, sigma=1, outlierthresh=30):\n plt.figure()\n imgstack = [imc_img.get_img_by_metal(m) for m in metals + [w_metal]]\n imgstack = _preproc_img_stack(imgstack, norm_perc, sigma, outlierthresh)\n pimg = np.stack(imgstack[:3], axis=2) + np.repeat(np.expand_dims(imgstack[3], 2) * white_weight, 3, 2)\n pimg[pimg > 1] = 1\n plt.imshow(pimg, interpolation='nearest')\n plt.axis('off')\n\n\ndef _preproc_img_stack(imgstack, norm_perc=99.9, sigma=1, outlierthresh=30):\n imgstack = [lib.remove_outlier_pixels(im.astype(np.uint16), threshold=outlierthresh) for im in imgstack]\n imgstack = [gaussian_filter(im, sigma=sigma) for im in imgstack]\n imgstack = [im.astype(np.float) / np.percentile(im, norm_perc) for im in imgstack]\n for im in imgstack:\n im[im > 1] = 1\n return imgstack\n\n\ndef get_7_color_img(imc_img, metals, norm_perc=99.9, alphas=None, sigma=1, outlierthresh=30, saturation=1):\n \"\"\"\n Color.red,Color.green,Color.blue,\n Color.white,Color.cyan,Color.magenta,Color.yellow\n \"\"\"\n cols = [(1, 0, 0), (0, 1, 0), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 0, 1), (1, 1, 0)]\n imgstack = [imc_img.get_img_by_metal(m) for m in metals if m != 0]\n curcols = [c for m, c in zip(metals, cols) if m != 0]\n imgstack = _preproc_img_stack(imgstack, norm_perc, sigma, outlierthresh)\n\n if alphas is None:\n alphas = np.repeat(1 / len(imgstack), len(imgstack))\n else:\n alphas = [a for m, a in zip(metals, alphas) if m != 0]\n imgstack = [np.stack([im * c * a for c in col], axis=2) for im, col, a in zip(imgstack, curcols, alphas)]\n\n pimg = np.sum(imgstack, axis=0)\n pimg = pimg * saturation\n pimg[pimg > 1] = 1\n\n return pimg.squeeze()\n\n\ndef plot_7_color_img(imc_img, metals, norm_perc=99.9, alphas=None, sigma=1, outlierthresh=30, saturation=1):\n plt.figure()\n pimg = get_7_color_img(imc_img, metals, norm_perc, alphas, sigma, outlierthresh, saturation)\n plt.imshow(pimg.squeeze(), interpolation='nearest')\n plt.axis('off')\n\n # def ipw_marker_vs_marker(pdat, transf_dict, name_dict, col_level1='measure', col_level2='mass', cut_level='ImageNumber')\n# stat = pdat.columns.get_level_values(col_level1)\n# marker = pdat.columns.get_level_values(col_level2)\n#\n#\n# #w = ipw.interactive(ft.partial(transf_dict=transf_dict),\n# # _ipw_marker_vs_marker,\n# (pdat,stat1, m1,transform1, stat2, m2, transform2, fit, color_cut, fixed(transf_dict),)\n\n# def _ipw_marker_vs_marker(pdat,stat1, m1,transform1, stat2, m2, transform2, fit, color_cut, transf_dict, name_dict):\n# dat1 = pdat[stat1]\n# dat2 = pdat[stat2]\n# m1 = pct.library.metal_from_name(m1)\n# m2 = pct.library.metal_from_name(m2)\n# x = dat1[m1]\n# y = dat2[m2]\n#\n# nafil = np.isfinite(x) & np.isfinite(y)\n# x = x[nafil]\n# y = y[nafil]\n#\n# transfkt = transf_dict[transform]\n# x = np.array(transfkt(x))\n# y = np.array(transfkt(y))\n#\n#\n# def marker_vs_marker(x, y, fit,):\n#\n# plt.close()\n# g = sns.jointplot(x, y, alpha=0.5)\n# if color_cut:\n# r = np.arange(len(x))\n# np.random.shuffle(r)\n# c = pdat.index.get_level_values(level='ImageNumber')[nafil][r]\n#\n# col_pal = sns.color_palette(\"hls\",max(c)+1)\n# cols = [col_pal[int(i)] for i in c]\n# g.ax_joint.scatter(x[r],y[r], color=cols)\n#\n# g.set_axis_labels(stat1+' '+ m1, stat2+' '+m2)\n#\n# #g.title(np.corrcoef(x,y)[1,0])\n#\n# if fit:\n# lm_mod = lm.LinearRegression(fit_intercept=True)\n# mod = lm.RANSACRegressor(lm_mod)\n# mod.fit(x.reshape(-1, 1),y)\n# y_pred = mod.predict(x.reshape(-1, 1))\n# g.ax_joint.plot(x,y_pred, color='r')\n#\n# plt.title('Pearson: {:.2}\\n Spearman: {:.2}\\nSlope: {:.2}\\nIntercept: {:.2}\\n'.format(\n# np.corrcoef(x,y)[1,0],scistats.spearmanr(x, y)[0],\n# mod.estimator_.coef_[0], mod.estimator_.intercept_))\n# #else:\n# #plt.title('Corcoef: {:.2}'.format(np.corrcoef(x,y)[1,0]))\n"
] |
[
[
"matplotlib.pyplot.imshow",
"numpy.expand_dims",
"numpy.stack",
"numpy.percentile",
"scipy.ndimage.filters.gaussian_filter",
"matplotlib.pyplot.axis",
"numpy.sum",
"matplotlib.pyplot.figure"
]
] |
zhang-xin/ncappzoo
|
[
"3b46ae583556378eff27d08f545726507557cd38"
] |
[
"apps/ssd-object-detector/ssd-object-detector.py"
] |
[
"#!/usr/bin/python3\n\n# ****************************************************************************\n# Copyright(c) 2017 Intel Corporation. \n# License: MIT See LICENSE file in root directory.\n# ****************************************************************************\n\n# How to run Single Shot Multibox Detectors (SSD)\n# on Intel® Movidius™ Neural Compute Stick (NCS)\n\nimport os\nimport sys\nimport numpy\nimport ntpath\nimport argparse\nimport skimage.io\nimport skimage.transform\n\nimport mvnc.mvncapi as mvnc\n\ncurr_path = os.path.dirname(os.path.realpath(__file__))\nutil_path = curr_path.replace(os.path.basename(curr_path), 'utils')\nsys.path.append(util_path)\nimport visualize_output\nimport deserialize_output\n\n# Detection threshold: Minimum confidance to tag as valid detection\nCONFIDANCE_THRESHOLD = 0.60 # 60% confidant\n\n# Variable to store commandline arguments\nARGS = None\n\n# ---- Step 1: Open the enumerated device and get a handle to it -------------\n\ndef open_ncs_device():\n\n # Look for enumerated NCS device(s); quit program if none found.\n devices = mvnc.EnumerateDevices()\n if len( devices ) == 0:\n print( \"No devices found\" )\n quit()\n\n # Get a handle to the first enumerated device and open it\n device = mvnc.Device( devices[0] )\n device.OpenDevice()\n\n return device\n\n# ---- Step 2: Load a graph file onto the NCS device -------------------------\n\ndef load_graph( device ):\n\n # Read the graph file into a buffer\n with open( ARGS.graph, mode='rb' ) as f:\n blob = f.read()\n\n # Load the graph buffer into the NCS\n graph = device.AllocateGraph( blob )\n\n return graph\n\n# ---- Step 3: Pre-process the images ----------------------------------------\n\ndef pre_process_image( img_draw ):\n\n # Resize image [Image size is defined during training]\n img = skimage.transform.resize( img_draw, ARGS.dim, preserve_range=True )\n\n # Convert RGB to BGR [skimage reads image in RGB, some networks may need BGR]\n if( ARGS.colormode == \"bgr\" ):\n img = img[:, :, ::-1]\n\n # Mean subtraction & scaling [A common technique used to center the data]\n img = img.astype( numpy.float16 )\n img = ( img - numpy.float16( ARGS.mean ) ) * ARGS.scale\n\n return img\n\n# ---- Step 4: Read & print inference results from the NCS -------------------\n\ndef infer_image( graph, img ):\n\n # Read original image, so we can perform visualization ops on it\n img_draw = skimage.io.imread( ARGS.image )\n\n # The first inference takes an additional ~20ms due to memory \n # initializations, so we make a 'dummy forward pass'.\n graph.LoadTensor( img, 'user object' )\n output, userobj = graph.GetResult()\n\n # Load the image as a half-precision floating point array\n graph.LoadTensor( img, 'user object' )\n\n # Get the results from NCS\n output, userobj = graph.GetResult()\n\n # Get execution time\n inference_time = graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN )\n\n # Deserialize the output into a python dictionary\n output_dict = deserialize_output.ssd( \n output, \n CONFIDANCE_THRESHOLD, \n img_draw.shape )\n\n # Print the results\n print( \"\\n==============================================================\" )\n print( \"I found these objects in\", ntpath.basename( ARGS.image ) )\n print( \"Execution time: \" + str( numpy.sum( inference_time ) ) + \"ms\" )\n print( \"--------------------------------------------------------------\" )\n for i in range( 0, output_dict['num_detections'] ):\n print( \"%3.1f%%\\t\" % output_dict['detection_scores_' + str(i)]\n + labels[ int(output_dict['detection_classes_' + str(i)]) ]\n + \": Top Left: \" + str( output_dict['detection_boxes_' + str(i)][0] )\n + \" Bottom Right: \" + str( output_dict['detection_boxes_' + str(i)][1] ) )\n\n # Draw bounding boxes around valid detections \n (y1, x1) = output_dict.get('detection_boxes_' + str(i))[0]\n (y2, x2) = output_dict.get('detection_boxes_' + str(i))[1]\n\n # Prep string to overlay on the image\n display_str = ( \n labels[output_dict.get('detection_classes_' + str(i))]\n + \": \"\n + str( output_dict.get('detection_scores_' + str(i) ) )\n + \"%\" )\n\n img_draw = visualize_output.draw_bounding_box( \n y1, x1, y2, x2, \n img_draw,\n thickness=4,\n color=(255, 255, 0),\n display_str=display_str )\n\n print( \"==============================================================\\n\" )\n\n # If a display is available, show the image on which inference was performed\n if 'DISPLAY' in os.environ:\n skimage.io.imshow( img_draw )\n skimage.io.show()\n\n# ---- Step 5: Unload the graph and close the device -------------------------\n\ndef close_ncs_device( device, graph ):\n graph.DeallocateGraph()\n device.CloseDevice()\n\n# ---- Main function (entry point for this script ) --------------------------\n\ndef main():\n\n device = open_ncs_device()\n graph = load_graph( device )\n\n img_draw = skimage.io.imread( ARGS.image )\n img = pre_process_image( img_draw )\n infer_image( graph, img )\n\n close_ncs_device( device, graph )\n\n# ---- Define 'main' function as the entry point for this script -------------\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(\n description=\"Object detection using SSD on \\\n Intel® Movidius™ Neural Compute Stick.\" )\n\n parser.add_argument( '-g', '--graph', type=str,\n default='../../caffe/SSD_MobileNet/graph',\n help=\"Absolute path to the neural network graph file.\" )\n\n parser.add_argument( '-i', '--image', type=str,\n default='../../data/images/pic_064.jpg',\n help=\"Absolute path to the image that needs to be inferred.\" )\n\n parser.add_argument( '-l', '--labels', type=str,\n default='../../caffe/SSD_MobileNet/labels.txt',\n help=\"Absolute path to labels file.\" )\n\n parser.add_argument( '-M', '--mean', type=float,\n nargs='+',\n default=[127.5, 127.5, 127.5],\n help=\"',' delimited floating point values for image mean.\" )\n\n parser.add_argument( '-S', '--scale', type=float,\n default=0.00789,\n help=\"Absolute path to labels file.\" )\n\n parser.add_argument( '-D', '--dim', type=int,\n nargs='+',\n default=[300, 300],\n help=\"Image dimensions. ex. -D 224 224\" )\n\n parser.add_argument( '-c', '--colormode', type=str,\n default=\"bgr\",\n help=\"RGB vs BGR color sequence. This is network dependent.\" )\n\n ARGS = parser.parse_args()\n\n # Load the labels file\n labels =[ line.rstrip('\\n') for line in\n open( ARGS.labels ) if line != 'classes\\n']\n\n main()\n\n# ==== End of file ===========================================================\n"
] |
[
[
"numpy.float16",
"numpy.sum"
]
] |
XuMengyaAmy/CIDACaptioning
|
[
"1b5446e9db1aaea524fe3645da947eb5c33413cf"
] |
[
"models/transformer/encoders.py"
] |
[
"from torch.nn import functional as F\nfrom models.transformer.utils import PositionWiseFeedForward\nimport torch\nfrom torch import nn\nfrom models.transformer.attention import MultiHeadAttention\nfrom models.transformer.gaussian_kernel import get_gaussian_filter\n\nclass EncoderLayer(nn.Module):\n def __init__(self, d_model=512, d_k=64, d_v=64, h=8, d_ff=512, dropout=.1, identity_map_reordering=False,\n attention_module=None, attention_module_kwargs=None): \n super(EncoderLayer, self).__init__()\n self.identity_map_reordering = identity_map_reordering\n self.mhatt = MultiHeadAttention(d_model, d_k, d_v, h, dropout, identity_map_reordering=identity_map_reordering,\n attention_module=attention_module,\n attention_module_kwargs=attention_module_kwargs)\n self.pwff = PositionWiseFeedForward(d_model, d_ff, dropout, identity_map_reordering=identity_map_reordering)\n\n def forward(self, queries, keys, values, attention_mask=None, attention_weights=None):\n att = self.mhatt(queries, keys, values, attention_mask, attention_weights)\n ff = self.pwff(att)\n return ff\n\n\nclass MultiLevelEncoder(nn.Module):\n def __init__(self, N, padding_idx, d_model=512, d_k=64, d_v=64, h=8, d_ff=512, dropout=.1,\n identity_map_reordering=False, attention_module=None, attention_module_kwargs=None):\n super(MultiLevelEncoder, self).__init__()\n self.d_model = d_model\n self.dropout = dropout\n self.layers = nn.ModuleList([EncoderLayer(d_model, d_k, d_v, h, d_ff, dropout,\n identity_map_reordering=identity_map_reordering,\n attention_module=attention_module,\n attention_module_kwargs=attention_module_kwargs)\n for _ in range(N)])\n self.padding_idx = padding_idx\n\n def forward(self, input, attention_weights=None):\n attention_mask = (torch.sum(input, -1) == self.padding_idx).unsqueeze(1).unsqueeze(1) \n\n outs = []\n out = input\n for l in self.layers:\n out = l(out, out, out, attention_mask, attention_weights)\n outs.append(out.unsqueeze(1))\n\n\n outs = torch.cat(outs, 1)\n return outs, attention_mask\n\n\n\nclass MemoryAugmentedEncoder(nn.Module):\n def __init__(self, N, padding_idx, d_in=512, d_model=512, dropout=.1, **kwargs): \n super(MemoryAugmentedEncoder, self).__init__()\n self.d_model = d_model\n self.dropout = dropout\n\n self.fc = nn.Linear(d_in, self.d_model)\n self.dropout = nn.Dropout(p=self.dropout)\n self.layer_norm = nn.LayerNorm(self.d_model)\n self.MLencoder = MultiLevelEncoder(N, padding_idx)\n\n def forward(self, input, attention_weights=None): \n out = F.relu(self.fc(input)) \n out = self.dropout(out)\n out = self.layer_norm(out)\n out = self.MLencoder(out, attention_weights=attention_weights)\n return out\n\n\n'''\nPaper : Class-Incremental Domain Adaptation with Smoothing and Calibration for Surgical Report Generation\n'''\nclass MemoryAugmentedEncoder_CBS(nn.Module):\n def __init__(self, N, padding_idx, d_in=512, d_model=512, dropout=.1, std=1, **kwargs): \n super(MemoryAugmentedEncoder_CBS, self).__init__()\n self.d_model = d_model\n self.dropout = dropout\n\n self.std = std\n\n self.fc = nn.Linear(d_in, self.d_model)\n self.dropout = nn.Dropout(p=self.dropout)\n self.layer_norm = nn.LayerNorm(self.d_model)\n self.MLencoder = MultiLevelEncoder(N, padding_idx)\n \n def get_new_kernels(self, epoch_count, kernel_sizex, kernel_sizey, decay_epoch, std_factor):\n if epoch_count % decay_epoch == 0 and epoch_count is not 0:\n self.std *= std_factor\n self.kernel1 = get_gaussian_filter(\n kernel_sizex=kernel_sizex,\n kernel_sizey=kernel_sizey,\n sigma=self.std,\n channels=6,\n )\n def forward(self, input, attention_weights=None):\n out = self.fc(input)\n out = F.relu(self.layer_norm(self.kernel1(out))) \n out = self.dropout(out)\n out = self.layer_norm(out)\n out = self.MLencoder(out, attention_weights=attention_weights)\n return out\n\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"torch.nn.Dropout",
"torch.cat",
"torch.sum",
"torch.nn.LayerNorm",
"torch.nn.Linear"
]
] |
foreverxq/my_1conv
|
[
"a42390c3854fee7ba733b3a7d8872c0ae4112c6e"
] |
[
"layers/modules/multibox_loss.py"
] |
[
"# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom data import coco as cfg\n# from ..box_utils import match, log_sum_exp\nfrom layers.box_utils import match, log_sum_exp\n\nclass MultiBoxLoss(nn.Module):\n \"\"\"SSD损失函数\n 计算目标:\n 1)对于目标每一个真实的bounding box 计算与其相匹配的所有prior box,计算方式就是通过 jaccard计算重叠面积。\n 2) Produce localization target by 'encoding' variance into offsets of ground\n truth boxes and their matched 'priorboxes'.\n 3)利用hard negative mining 技术过滤掉过多的负例框,从而使得正例与负例保持在1: 3.\n 目标损失:\n L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n 其中Lconf是交叉熵损失,Lloc是L1损失。\n\n Args:\n c: class confidences,\n l: predicted boxes,\n g: ground truth boxes\n N: number of matched default boxes\n See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n\n\n \"\"\"\n\n def __init__(self, num_classes, overlap_thresh, prior_for_matching,\n bkg_label, neg_mining, neg_pos, neg_overlap, encode_target,\n use_gpu=True):\n super(MultiBoxLoss, self).__init__()\n self.use_gpu = use_gpu\n self.num_classes = num_classes\n self.threshold = overlap_thresh\n self.background_label = bkg_label\n self.encode_target = encode_target\n self.use_prior_for_matching = prior_for_matching\n self.do_neg_mining = neg_mining\n self.negpos_ratio = neg_pos\n self.neg_overlap = neg_overlap\n self.variance = cfg['variance']\n\n def forward(self, predictions, targets):\n \"\"\"Multibox Loss\n Args:\n predictions (tuple): A tuple containing loc preds, conf preds,\n and prior boxes from SSD net.\n conf shape: torch.size(batch_size,num_priors,num_classes)\n loc shape: torch.size(batch_size,num_priors,4)\n priors shape: torch.size(num_priors,4)\n\n targets (tensor): Ground truth boxes and labels for a batch,\n shape: [batch_size,num_objs,5] (last idx is the label).\n \"\"\"\n loc_data, conf_data, priors = predictions\n num = loc_data.size(0)\n priors = priors[:loc_data.size(1), :]\n num_priors = (priors.size(0))\n num_classes = self.num_classes\n\n # match priors (default boxes) and ground truth boxes\n loc_t = torch.Tensor(num, num_priors, 4)\n conf_t = torch.LongTensor(num, num_priors)\n for idx in range(num):\n truths = targets[idx][:, :-1].data #目标的四个坐标。\n labels = targets[idx][:, -1].data #目标的类别\n defaults = priors.data\n match(self.threshold, truths, defaults, self.variance, labels,\n loc_t, conf_t, idx)\n\n if self.use_gpu:\n loc_t = loc_t.cuda()\n conf_t = conf_t.cuda()\n # wrap targets\n loc_t = Variable(loc_t, requires_grad=False)#loc_t shape (batch_size, num_priors, 4)\n conf_t = Variable(conf_t, requires_grad=False)#conf_t shape: torch.size(batch_size, num_priors)\n\n pos = conf_t > 0 #pos 找出模型 对 priors分类后 不为背景 的prior\n num_pos = pos.sum(dim=1, keepdim=True) # conf_t 中不为背景的个数\n\n # Localization Loss (Smooth L1)\n # Shape: [batch,num_priors,4]\n pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)#loc_data shape: torch.size(batch_size,num_priors,4) expand_as 扩充的维度上面的值为复制的原先维度上面的值。\n loc_p = loc_data[pos_idx].view(-1, 4)#pos_idx 筛选出prior框中 不为背景的框\n loc_t = loc_t[pos_idx].view(-1, 4)\n loss_l = F.smooth_l1_loss(loc_p, loc_t, size_average=False)#计算prior框与真实框的 smooth_l1_loss\n\n # Compute max conf across batch for hard negative mining\n batch_conf = conf_data.view(-1, self.num_classes) #conf_data shape: torch.size(batch_size,num_priors,num_classes)\n loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1)) # batch_conf shape(batch_size * num_priors, num_classes)\n #log_sum_exp 后shape 为(batch_size * num_priors, 1)\n # conf_t shape(batch_size, num_priors)\n #通过gather函数选取出模型对各个prior预测的值\n #其shape为(batch_size * num_priors, 1)\n\n # Hard Negative Mining\n loss_c[pos.squeeze(0)] = 0 # filter out pos boxes for now loss_c shape: (batch_size, num_priors) #pos shape\n loss_c = loss_c.view(num, -1)#loss_c shape(batch_size, num_priors)\n _, loss_idx = loss_c.sort(1, descending=True)\n _, idx_rank = loss_idx.sort(1) #获得loss_c下标所对应的损失值rank\n num_pos = pos.long().sum(1, keepdim=True)\n num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)#pos size(batch_size, num_priors) #num_neg shape == num_pos.shape\n neg = idx_rank < num_neg.expand_as(idx_rank) #使loss_c中损失在前num_neg位的下标为True\n\n\n # Confidence Loss Including Positive and Negative Examples\n pos_idx = pos.unsqueeze(2).expand_as(conf_data)\n neg_idx = neg.unsqueeze(2).expand_as(conf_data)\n conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)\n targets_weighted = conf_t[(pos+neg).gt(0)]\n loss_c = F.cross_entropy(conf_p, targets_weighted, size_average=False)\n\n # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n\n N = num_pos.data.sum()\n loss_l /= N\n loss_c /= N\n return loss_l, loss_c\n#TODO 其中hard negative mining 写的很巧妙,再继续看下。\n\nif __name__ == '__main__':\n from train import train_cfg\n from data.config import data_cfg\n criterion = MultiBoxLoss(data_cfg['num_classes'], 0.5, True, 0, True, 3, 0.5,\n False, train_cfg['cuda'])\n\n"
] |
[
[
"torch.LongTensor",
"torch.Tensor",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.smooth_l1_loss",
"torch.autograd.Variable"
]
] |
JiaMingLin/tsn-pytorch
|
[
"d9173033900eb3fbda5aaf9478727e0673bc84b7"
] |
[
"dataset.py"
] |
[
"import torch.utils.data as data\n\nfrom PIL import Image\nimport os\nimport os.path\nimport numpy as np\nfrom numpy.random import randint\n\nclass VideoRecord(object):\n def __init__(self, row):\n self._data = row\n\n @property\n def path(self):\n return self._data[0]\n\n @property\n def num_frames(self):\n frames = int(self._data[1])\n return frames-1 if frames > 1 else 1\n\n @property\n def label(self):\n return int(self._data[2])\n\n\nclass TSNDataSet(data.Dataset):\n def __init__(self, root_path, list_file,\n num_segments=3, new_length=1, modality='RGB',\n image_tmpl='frame{:06d}.jpg', transform=None,\n force_grayscale=False, random_shift=True, test_mode=False,\n **kwargs):\n\n self.root_path = root_path\n self.list_file = list_file\n self.num_segments = num_segments\n self.new_length = new_length\n self.modality = modality\n self.image_tmpl = image_tmpl\n self.transform = transform\n self.random_shift = random_shift\n self.test_mode = test_mode\n self.dataset = kwargs.get('dataset', 'ucf101')\n\n if self.modality == 'RGBDiff':\n self.new_length += 1# Diff needs one more image to calculate diff\n\n self._parse_list()\n\n def _load_image(self, directory, idx):\n \n img_folder = os.path.join(self.root_path, directory.split(' ')[0])\n if self.modality == 'RGB' or self.modality == 'RGBDiff':\n return [Image.open(os.path.join(img_folder, self.image_tmpl.format(idx))).convert('RGB')]\n elif self.modality == 'Flow' or self.modality == 'lk_flow':\n\n x_img = Image.open(os.path.join(img_folder, self.image_tmpl.format('x', idx))).convert('L')\n y_img = Image.open(os.path.join(img_folder, self.image_tmpl.format('y', idx))).convert('L')\n\n return [x_img, y_img]\n elif self.modality == 'tvl1':\n # self.root_path = ~/action_data/ucf101/tvl1_flow\n if self.dataset == 'ucf101' or self.dataset == 'ucf11':\n x_img = Image.open(os.path.join(self.root_path, 'u', directory.split(' ')[0], self.image_tmpl.format(idx))).convert('L')\n y_img = Image.open(os.path.join(self.root_path, 'v', directory.split(' ')[0], self.image_tmpl.format(idx))).convert('L')\n elif self.dataset == 'hmdb51':\n x_img = Image.open(os.path.join(self.root_path, directory.split(' ')[0], self.image_tmpl.format('x', idx))).convert('L')\n y_img = Image.open(os.path.join(self.root_path, directory.split(' ')[0], self.image_tmpl.format('y', idx))).convert('L')\n\n return [x_img, y_img]\n\n def _parse_list(self):\n self.video_list = [VideoRecord(x.strip().split(' ')) for x in open(self.list_file)]\n\n def _sample_indices(self, record):\n \"\"\"\n :param record: VideoRecord\n :return: list\n \"\"\"\n average_duration = (record.num_frames - self.new_length + 1) // self.num_segments #16\n if average_duration > 0:\n offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration, size=self.num_segments)\n elif record.num_frames > self.num_segments:\n # 從 400 frames 中隨機選取 num_segment = 25 frame 出來\n offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))\n else:\n offsets = np.zeros((self.num_segments,))\n return offsets + 1\n\n def _get_val_indices(self, record):\n if record.num_frames > self.num_segments + self.new_length - 1:\n tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)\n offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)])\n else:\n offsets = np.zeros((self.num_segments,))\n return offsets + 1\n\n def _get_test_indices(self, record):\n\n tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)\n\n offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)])\n\n return offsets + 1\n\n def __getitem__(self, index):\n record = self.video_list[index]\n\n if not self.test_mode:\n segment_indices = self._sample_indices(record) if self.random_shift else self._get_val_indices(record)\n else:\n segment_indices = self._get_test_indices(record)\n\n return self.get(record, segment_indices)\n\n def get(self, record, indices):\n\n images = list()\n for seg_ind in indices:\n p = int(seg_ind)\n for i in range(self.new_length):\n seg_imgs = self._load_image(record.path, p)\n images.extend(seg_imgs)\n if p < record.num_frames:\n p += 1\n\n process_data = self.transform(images)\n return process_data, record.label\n\n def __len__(self):\n return len(self.video_list)\n"
] |
[
[
"numpy.zeros",
"numpy.random.randint"
]
] |
JelleAalbers/plunc
|
[
"e70806b17c56dfb7ffa882faf6d61f1959d1ccca"
] |
[
"tests/test_interval.py"
] |
[
"import unittest\nimport numpy as np\n\nimport logging\nimport sys\n\nfrom plunc.intervals.basic import CentralCI\nfrom plunc.statistics import NumberOfEvents\nfrom plunc.exceptions import InsufficientPrecisionError\n\n# 90% confidence level on Poisson rate if n=20 or n=120 events observed.\n# Values taken from calculator at http://statpages.org/confint.html\nPOISSON_90_CI_20 = (13.2547, 29.0620)\nPOISSON_90_CI_120 = (102.5677, 139.6439)\n\n\nclass TestIntervalChoice(unittest.TestCase):\n\n def test_ci(self):\n \"\"\"Compute a single Poisson confidence interval\"\"\"\n poisson_ci = CentralCI(statistic=NumberOfEvents(), confidence_level=0.9, precision_digits=5)\n ll, hl = poisson_ci(np.ones(20))\n\n # Note assertAlmostEqual's 'places' refers to digits past the decimal point\n self.assertAlmostEqual(ll, POISSON_90_CI_20[0], places=3)\n self.assertAlmostEqual(hl, POISSON_90_CI_20[1], places=3)\n\n def test_wrap_interpolator(self):\n poisson_ci = CentralCI(statistic=NumberOfEvents(), confidence_level=0.9, precision_digits=2,\n wrap_interpolator=True)\n\n # Precompute some values\n logging.getLogger().setLevel(logging.INFO)\n for n in np.arange(100):\n poisson_ci(np.ones(n))\n logging.getLogger().setLevel(logging.DEBUG)\n\n poisson_ci.forbid_exact_computation = True\n\n # Test the interpolator at one of the joints\n ll, hl = poisson_ci(np.ones(20))\n self.assertAlmostEqual(np.log10(ll),\n np.log10(POISSON_90_CI_20[0]), places=1)\n self.assertAlmostEqual(np.log10(hl),\n np.log10(POISSON_90_CI_20[1]), places=1)\n\n # Test the extrapolation\n ll, hl = poisson_ci(np.ones(120))\n self.assertAlmostEqual(np.log10(ll), np.log10(POISSON_90_CI_120[0]), places=1)\n self.assertAlmostEqual(np.log10(hl), np.log10(POISSON_90_CI_120[1]), places=1)\n\n # Test a ridiculously far extrapolation: should raise error\n self.assertRaises(InsufficientPrecisionError, poisson_ci, np.ones(int(1e4)))\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG,\n stream=sys.stderr)\n unittest.main()\n"
] |
[
[
"numpy.arange",
"numpy.log10",
"numpy.ones"
]
] |
Keyrat06/Gaussian_Processes_Sampling
|
[
"50eac1163da001bf708470580750f55ca9709a98"
] |
[
"example.py"
] |
[
"import threading\nimport matplotlib.pyplot as plt\nimport pickle\nimport time\nimport numpy as np\nimport cv2\nimport GaussianProcess\nimport util\nfrom scipy import interpolate\nplt.ion()\n\nN = util.N\nM = util.M\nK = 2\n\n# MAP = np.ones((N,M,K))/float(K)\nMAP = lambda x, y: np.ones(K)/float(K)\n# np.random.seed(0)\n\ndef Image_Classification_Thread(n = float('inf'), t=0.1):\n print(\"YAY Image Classification Has Started!\")\n GaussianProcess.setup()\n imageClassifier = pickle.load(open(\"Image_Classifier_Model.p\", \"rb\"))\n MAP = cv2.imread(\"MAP.png\")\n FEATURE_EXTRACTOR = lambda image: [image[:, :, 0].mean(), image[:, :, 1].mean(), image[:, :, 2].mean()]\n i = 0\n while True and i < n:\n sample_location = (np.random.randint(0, N), np.random.randint(0, M))\n image_sample = MAP[sample_location[0]*100:sample_location[0]*100+100,\n sample_location[1]*100:sample_location[1]*100+100]\n\n image_feature = FEATURE_EXTRACTOR(image_sample)\n time.sleep(t)\n P = imageClassifier.predict_proba(np.array([image_feature]))[0]\n GaussianProcess.new_image(P, sample_location[0], sample_location[1])\n i += 1\n\n\n\ndef Adaptive_Sampling_Thread():\n print(\"YAY Adaptive Sampling Has Started!\")\n while True:\n time.sleep(0.1)\n global MAP\n # MAP = GaussianProcess.get_image_map()\n MAP = GaussianProcess.GPRegressor()\n\n\ndef main():\n image = threading.Thread(name='image_class', target=Image_Classification_Thread)\n sampling = threading.Thread(name='adaptive_sample', target=Adaptive_Sampling_Thread)\n image.start()\n sampling.start()\n i = 0\n while True:\n plt.pause(1)\n plt.clf()\n MAP.visualize(0, file_path=\"images/yay{}.png\".format(i))\n i += 1\n\ndef experament(a_options=np.linspace(0,1,11), b_options=range(1,21), n=100):\n \"\"\"\n This function just finds optimal a and b values and plots the space\n :param a_options: list options for a\n :param b_options: list options for b\n :param n: number of samples\n :return: None\n \"\"\"\n np.random.seed(0)\n Image_Classification_Thread(n, t=0)\n data = np.zeros((len(a_options), len(b_options)))\n min_NLL = float('inf')\n optimal_params = (-1, -1)\n for i, a in enumerate(a_options):\n for j, b in enumerate(b_options):\n MAP = GaussianProcess.get_image_map(a, b)\n nll = util.get_NLL(MAP)\n data[i, j] = nll\n if nll < min_NLL:\n optimal_params = (a, b)\n min_NLL = nll\n print(\"optimal a = {}, optimal b = {}\".format(*optimal_params))\n cm = plt.imshow(data)\n plt.colorbar(cm)\n plt.xticks(range(20), b_options)\n plt.yticks(range(10), a_options)\n plt.title(\"Negative Log Loss for values of a and b\")\n plt.xlabel(\"b\")\n plt.ylabel(\"a\")\n plt.show(\"hold\")\n\n# experament()\n\nif __name__ == \"__main__\":\n main()\n\n\n\n"
] |
[
[
"matplotlib.pyplot.imshow",
"numpy.random.seed",
"numpy.linspace",
"matplotlib.pyplot.title",
"numpy.ones",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.clf",
"numpy.random.randint",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
Renelvon/fsic
|
[
"24878074bb465f05bd0e0b1e9cff947dd73b391b"
] |
[
"fsic/util.py"
] |
[
"\"\"\"Convenient methods for general machine learning\"\"\"\n\nimport numpy as np\n\n\nclass NumpySeedContext:\n \"\"\"\n A context manager to reset the random seed by numpy.random.seed(..).\n Set the seed back at the end of the block.\n \"\"\"\n\n def __init__(self, seed):\n self.seed = seed\n self.cur_state = None\n\n def __enter__(self):\n rstate = np.random.get_state()\n self.cur_state = rstate\n np.random.seed(self.seed)\n return self\n\n def __exit__(self, *args):\n np.random.set_state(self.cur_state)\n\n\ndef dist_matrix2(X, Y):\n \"\"\"\n Construct a pairwise squared Euclidean distance matrix of\n size X.shape[0] x Y.shape[0]\n \"\"\"\n D = X.dot(Y.T)\n np.multiply(D, -2, out=D)\n np.add(D, np.sum(X ** 2, 1, keepdims=True), out=D)\n return np.add(D, np.sum(Y ** 2, 1), out=D)\n\n\ndef dist_matrix(X, Y):\n \"\"\"\n Construct a pairwise Euclidean distance matrix of\n size X.shape[0] x Y.shape[0]\n \"\"\"\n D = dist_matrix2(X, Y)\n\n # Clamp negative numbers to 0, to avoid errors from taking sqrt.\n np.maximum(D, 0, out=D)\n\n return np.sqrt(D, out=D)\n\n\ndef subsample_ind(n, k, seed=32):\n \"\"\"\n Return a list of indices to choose k out of n without replacement\n \"\"\"\n rand_state = np.random.get_state()\n np.random.seed(seed)\n\n ind = np.random.choice(n, k, replace=False)\n np.random.set_state(rand_state)\n return ind\n\n\ndef subsample_rows(X, k, seed=29):\n \"\"\"\n Subsample k rows from the matrix X.\n \"\"\"\n n = X.shape[0]\n if k > n:\n raise ValueError(\n \"Cannot select {} rows from matrix X; it has only {}\".format(k, n)\n )\n ind = subsample_ind(n, k, seed)\n return X[ind, :]\n\n\ndef tr_te_indices(n, tr_proportion, seed=9282):\n \"\"\"Get two logical vectors for indexing train/test points.\n\n Return (tr_ind, te_ind)\n \"\"\"\n if not 0.0 <= tr_proportion <= 1.0:\n raise ValueError(\n \"tr_proportion must be in [0, 1]; found {}\".format(tr_proportion)\n )\n\n Itr = np.full(n, False)\n tr_ind = subsample_ind(n, int(tr_proportion * n), seed)\n Itr[tr_ind] = True\n return Itr, ~Itr\n\n\ndef median_distance(X):\n \"\"\"\n Compute the median of pairwise distances of points in the matrix.\n\n Useful as a heuristic for setting Gaussian kernel's width.\n\n Parameters\n ----------\n X : n x d numpy array\n\n Return\n ------\n The median distance. If it is nonpositive, return the mean. This can happen\n e.g. when the data are 0s and 1s and there are more 0s than 1s.\n \"\"\"\n D = dist_matrix(X, X)\n Itri = np.tril_indices(D.shape[0], -1)\n Tri = D[Itri]\n med = np.median(Tri)\n return med if med > 0 else np.mean(Tri)\n\n\ndef sampled_median_distance(X, subsample, seed=9827):\n \"\"\"\n Compute the subsampled median of pairwise distances of points in the matrix.\n\n Useful as a heuristic for setting Gaussian kernel's width.\n\n Parameters\n ----------\n X : n x d numpy array\n subsample: number of points to sample from X to determine median\n\n Return\n ------\n The subsampled median distance.\n \"\"\"\n if subsample <= 0:\n raise ValueError(\n \"subsample must be positive; found {}\".format(subsample)\n )\n\n n = X.shape[0]\n if subsample > n:\n subsample = n\n\n Xi = subsample_rows(X, subsample, seed)\n return median_distance(Xi)\n\n\ndef is_real_num(x):\n \"\"\"Return true iff x is a real number.\"\"\"\n return np.isscalar(x) and np.isfinite(x) and np.isrealobj(x)\n\n\ndef cca(X, Y, reg=1e-5):\n \"\"\"\n CCA formulation solving two eigenvalue problems.\n\n - X: n x dx data matrix\n - Y: n x dy data matrix\n\n Return (vals, Vx, Vy) where\n vals is a numpy array of decreasing eigenvalues,\n Vx is a square matrix whose columns are eigenvectors for X corresponding to vals.\n Vy is a square matrix whose columns are eigenvectors for Y corresponding to vals.\n \"\"\"\n nx, dx = X.shape\n ny, dy = Y.shape\n if nx != ny:\n raise ValueError(\"X has {} rows while Y has {} rows\".format(nx, ny))\n\n mx = np.mean(X, 0)\n my = np.mean(Y, 0)\n\n Cxy = X.T.dot(Y) # dx x dy\n np.divide(Cxy, nx, out=Cxy)\n np.subtract(Cxy, np.outer(mx, my), out=Cxy)\n\n # Cxx, Cyy have to be invertible\n Cxx = np.cov(X.T)\n Cyy = np.cov(Y.T)\n\n if dx == 1:\n CxxICxy = Cxy / Cxx\n else:\n regmat = np.identity(dx)\n np.multiply(regmat, reg, out=regmat)\n np.add(Cxx, regmat, out=Cxx)\n CxxICxy = np.linalg.solve(Cxx, Cxy)\n\n if dy == 1:\n CyyICyx = Cxy.T / Cyy\n else:\n regmat = np.identity(dy)\n np.multiply(regmat, reg, out=regmat)\n np.add(Cyy, regmat, out=Cyy)\n CyyICyx = np.linalg.solve(Cyy, Cxy.T)\n\n # Problems for a and b:\n avals, aV = np.linalg.eig(CxxICxy.dot(CyyICyx))\n bvals, bV = np.linalg.eig(CyyICyx.dot(CxxICxy))\n\n dim = min(dx, dy)\n\n # Sort in descending order and select first `dim` entries\n Ia = np.argsort(-avals)[:dim]\n avals = avals[Ia]\n aV = aV[:, Ia]\n\n Ib = np.argsort(-bvals)[:dim]\n bvals = bvals[Ib]\n bV = bV[:, Ib]\n\n return np.real(avals), np.real(aV), np.real(bV)\n\n\ndef sym_to_power(X, power, fix=0):\n \"\"\"\n Raise symmetric matrix to given power through eigenvalue decomposition.\n \"\"\"\n # Since X is symmetric, use `eigh` decomposition.\n evals, evecs = np.linalg.eigh(X)\n\n # If X is full rank, all eigenvalues are positive, but we can optionally\n # ensure the eigenvalues are non-zero. This is usefull in case `power` < 0.\n np.maximum(0, evals, out=evals)\n if fix != 0:\n np.add(evals, fix, out=evals)\n\n # Since the matrix is symemtric, the eigenvectors should be real.\n evecs = np.real(evecs)\n\n np.power(evals, power, out=evals)\n\n Y = evecs * evals\n return np.dot(Y, evecs.T, out=Y)\n\n\ndef fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):\n \"\"\"\n Fit a multivariate normal to X (n x d) and draw J points from the fit.\n\n - reg: regularizer to use with the covariance matrix\n - eig_pow: raise eigenvalues of the covariance matrix to this power to\n construct a new covariance matrix before drawing samples. Useful to\n shrink the spread of the variance.\n \"\"\"\n with NumpySeedContext(seed=seed):\n d = X.shape[1]\n cov_x = np.cov(X, rowvar=True) # construct the d x d covariance matrix\n if d == 1:\n # TODO: Write a unittest for this case!\n cov_x = np.array([[cov_x]])\n\n shrunk_cov = sym_to_power(cov_x, eig_pow)\n\n # Add regularizer to shrunken covariance matrix.\n regmat = np.identity(d)\n np.multiply(regmat, reg, out=regmat)\n np.add(shrunk_cov, regmat, out=shrunk_cov)\n\n return np.random.multivariate_normal(np.mean(X, 0), shrunk_cov, J)\n\n\ndef bound_by_data(Z, Data):\n \"\"\"\n Determine min/max bounds for each dimension of Data; project Z so that all\n points are within the bounds.\n\n Z: m x d\n Data: n x d\n\n Return a projected Z of size m x d.\n \"\"\"\n Low = np.min(Data, 0)\n High = np.max(Data, 0)\n return np.clip(Z, Low, High)\n\n\ndef one_of_K_code(arr):\n \"\"\"\n One-hot-encode the numpy array.\n\n For example, if arr = ([0, 1, 0, 2]), then return\n\n [[1, 0, 0],\n [0, 1, 0],\n [1, 0, 0],\n [0, 0, 1]]\n\n Code based on https://stackoverflow.com/questions/29831489/convert-array-of-indices-to-1-hot-encoded-numpy-array#comment101948320_49790223\n \"\"\"\n return np.identity(np.max(arr) + 1)[arr]\n\n\ndef standardize(X, check=False):\n \"\"\"Standardize array X\"\"\"\n mx = np.mean(X, 0)\n stdx = np.std(X, 0)\n Xs = X - mx\n np.divide(Xs, stdx, out=Xs) # Assume standard deviations are not 0\n if check:\n assert np.all(np.isfinite(Xs))\n return Xs\n"
] |
[
[
"numpy.dot",
"numpy.sqrt",
"numpy.max",
"numpy.mean",
"numpy.divide",
"numpy.clip",
"numpy.tril_indices",
"numpy.full",
"numpy.real",
"numpy.random.set_state",
"numpy.std",
"numpy.isrealobj",
"numpy.outer",
"numpy.multiply",
"numpy.random.choice",
"numpy.power",
"numpy.min",
"numpy.median",
"numpy.cov",
"numpy.linalg.eigh",
"numpy.identity",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.random.get_state",
"numpy.linalg.solve",
"numpy.maximum",
"numpy.random.seed",
"numpy.isfinite",
"numpy.isscalar",
"numpy.add"
]
] |
pipekorsi/IsoMut2
|
[
"f45f125f79e6ebef218be349aaf5897301e121d1"
] |
[
"isomut2py/format.py"
] |
[
"try:\n from isomut2py import ploidyestimation\n\n import pandas as __pd\n import numpy as __np\n import os as __os\n import subprocess as __subprocess\nexcept ImportError:\n print('ImportError in isomut2py.format, generating bed files will not work.')\n\n\ndef get_bed_format_for_sample(chromosomes, chrom_length, output_dir, bam_filename=None, ownbed_filepath=None):\n \"\"\"\n\n Creates bed file of constant ploidies for a given sample from a file of positional ploidy data.\n If the ownbed_filepath attribute of the PloidyEstimation object is set, saves the bedfile to the path specified there. Otherwise,\n saves it to the output_dir with the \"_ploidy.bed\" suffix. Also sets the bed_dataframe attribute to the pandas.Dataframe containing the bed file.\n\n :param chromosomes: list of chromosomes (array-like)\n :param chrom_length: list of chromosome lengths in bp (array-like)\n :param output_dir: path to the directory where the PE_fullchrom_* files are located. (str)\n :param bam_filename: filename of the BAM file of the sample (default: None) (str)\n :param ownbed_filepath: path to the bed file where results should be saved (default: None) (str)\n\n :returns: (ownbed_filepath, df)\n\n - ownbed_filepath: path the the bed file where results are saved\n - df: the bed file in a pandas.DataFrame\n\n \"\"\"\n\n def get_ploidy_ranges_for_chrom(outputdir, chrom, chr_list, before_list, after_list, pl_list, loh_list,\n chrom_len_dict):\n \"\"\"\n\n Gets ranges of constant ploidies from a file of positional ploidy data.\n At breakpoints, the rounded average of the two bordering positions are taken as the breakpoint position.\n\n :param outputdir: The path to the directory, where the PE_fullchrom_[chrom].txt file is located. (str)\n :param chrom: The name of the chromosome to be analysed. (str)\n :param chr_list: The list of chromosomes to append current information to.\n :param before_list: The list of starting positions to append current information to.\n :param after_list: The list of ending positions to append current information to.\n :param pl_list: The list of ploidies to append current information to.\n :param loh_list: The list of LOHs to append current information to.\n :param chrom_len_dict: A dictionary containing {chromosome: chromosome length} pairs.\n\n :return: (chr_list, before_list, after_list, pl_list, loh_list)\n\n \"\"\"\n df = __pd.read_csv(outputdir + '/PE_fullchrom_' + chrom + '.txt', sep='\\t').sort_values(by='pos')\n p = __np.array(list(df['pos']))\n pl = __np.array(list(df['ploidy']))\n loh = __np.array(list(df['LOH']))\n pl_loh = __np.array([str(pl_c) + ',' + str(loh_c) for pl_c, loh_c in zip(pl, loh)])\n pl_loh_change = __np.where(pl_loh[:-1] != pl_loh[1:])[0]\n\n before_pos = 0\n after_pos = 0\n for i in range(len(pl_loh_change)):\n after_idx = pl_loh_change[i]\n after_pos = int(round(__np.mean([p[after_idx], p[after_idx + 1]])))\n chr_list.append(chrom)\n before_list.append(before_pos)\n after_list.append(after_pos)\n pl_list.append(pl[after_idx])\n loh_list.append(loh[after_idx])\n before_pos = after_pos + 1\n chr_list.append(chrom)\n before_list.append(before_pos)\n after_list.append(chrom_len_dict[chrom])\n pl_list.append(pl[-1])\n loh_list.append(loh[-1])\n\n return chr_list, before_list, after_list, pl_list, loh_list\n\n chrom_len_dict = {c: l for c, l in zip(chromosomes, chrom_length)}\n chr_list = []\n before_list = []\n after_list = []\n pl_list = []\n loh_list = []\n for c in chromosomes:\n if not __os.path.isfile(output_dir + '/PE_fullchrom_' + c + '.txt'):\n raise ValueError(\n 'File ' + output_dir + '/PE_fullchrom_' + c + '.txt is not yet created, call \"run_ploidy_estimation\" '\n 'first.')\n chr_list, before_list, after_list, pl_list, loh_list = get_ploidy_ranges_for_chrom(output_dir, c,\n chr_list, before_list,\n after_list, pl_list,\n loh_list, chrom_len_dict)\n df = __pd.DataFrame()\n df['chrom'] = chr_list\n df['chromStart'] = before_list\n df['chromEnd'] = after_list\n df['ploidy'] = pl_list\n df['LOH'] = loh_list\n\n if ownbed_filepath != None:\n df.to_csv(ownbed_filepath, index=False)\n elif bam_filename != None:\n df.to_csv(output_dir + '/' + bam_filename.split('.bam')[0] + '_ploidy.bed', index=False)\n ownbed_filepath = output_dir + '/' + bam_filename.split('.bam')[0] + '_ploidy.bed'\n else:\n df.to_csv(output_dir + '/' + 'ploidy.bed', index=False)\n ownbed_filepath = output_dir + '/' + 'ploidy.bed'\n\n return ownbed_filepath, df\n\n\ndef generate_ploidy_info_file(filename=None, sample_names=None, bed_filepaths=None, ploidy_estimation_objects=None,\n sample_groups=None, group_bed_filepaths=None):\n \"\"\"\n\n Generate ploidy info file for mutation detection in samples with nondefault ploidies. Make sure to supply one of the following arguments:\n - ploidy_estimation_objects\n - sample_names AND bed_filepaths\n - sample_groups AND group_bed_filepaths\n\n :param filename: The desired path to the generated ploidy info file. If None, ploidy information is saved to ploidy_info_file.txt in the current directory. (default: None) (str)\n :param sample_names: List of bam filenames for the samples, must be supplied together with bed_filepaths. (default: None) (list of str)\n :param bed_filepaths: Must be supplied together with sample_names. The list of filepaths to each bed file describing the given sample in sample_names. (default: None) (list of str)\n :param ploidy_estimation_objects: List of PloidyEstimation objects for each sample. (default: None) (list of isomut2py.PloidyEstimation)\n :param sample_groups: List of lists of str. Each list in sample_groups must contain the name of bam files in that group. Must be supplied together with group_bed_filepaths. (default: None) (list of list of str, example: [['sample1.bam', 'sample2.bam', 'sample3.bam'], ['sample4.bam', 'sample5.bam'], ['sample6.bam']])\n :param group_bed_filepaths: List of filepaths to the bed files describing each sample group in sample_groups. Must be supplied together with sample_groups. (default: None) (list of str, example: ['bedfile_of_samples123.txt', 'bedfile_of_samples45.txt' 'bedfile_of_samples6.txt'])\n\n \"\"\"\n if filename == None:\n filename = 'ploidy_info_file.txt'\n print('Argument \"filename\" not set, saving ploidy info file to ploidy_info_file.txt')\n\n if (ploidy_estimation_objects == None and\n not (sample_names != None and bed_filepaths != None)\n and not (sample_groups != None and group_bed_filepaths != None)):\n raise TypeError(\n 'Either \"ploidy_estimation_objects\" OR \"sample_names\" together with \"bed_filepaths\" OR \"sample_groups\" '\n 'together with \"group_bed_filepaths\" must be defined.')\n\n __subprocess.call('rm ' + filename, shell=True)\n\n with open(filename, \"a\") as f:\n f.write('#file_path\\tsample_names_list\\n')\n if ploidy_estimation_objects != None:\n if ploidy_estimation_objects.__class__ != list:\n raise TypeError(\n 'Argument \"ploidy_estimation_objects\" must be a list.')\n for PE in ploidy_estimation_objects:\n if not isinstance(PE, ploidyestimation.PloidyEstimator):\n raise TypeError('All elements of ploidy_estimation_objects must be PloidyEstimator objects.')\n if not hasattr(PE, \"ownbed_filepath\"):\n raise ValueError(\n 'All PloidyEstimator objects must have valid \"ownbed_filepath\" arguments. Try running '\n '\"run_ploidy_estimation()\" or set \"ownbed_filepath\" manually.')\n if not __os.path.isfile(PE.ownbed_filepath):\n raise ValueError(\n 'All PloidyEstimator objects must have valid \"ownbed_filepath\" arguments. Try running '\n '\"run_ploidy_estimation()\" or set \"ownbed_filepath\" manually.')\n else:\n s = __subprocess.check_output('head -1 ' + PE.ownbed_filepath, shell=True)\n sep = ','\n if (len(s.decode('utf-8').strip().split('\\t')) > 1):\n sep = '\\t'\n elif (len(s.decode('utf-8').strip().split(';')) > 1):\n sep = ';'\n df = __pd.read_csv(PE.ownbed_filepath, sep=sep)\n if \"chrom\" not in list(df.columns) or \"chromStart\" not in list(\n df.columns) or \"chromEnd\" not in list(df.columns) or \"ploidy\" not in list(df.columns):\n raise ValueError(\n \"File \" + PE.ownbed_filepath + \"does not have one of the following columns: chrom, \"\n \"chromStart, chromEnd, ploidy.\")\n else:\n df = df[['chrom', 'chromStart', 'chromEnd', 'ploidy']]\n df.to_csv(PE.ownbed_filepath + '_im2format', sep='\\t', index=False)\n f.write(PE.ownbed_filepath + '_im2format\\t' + PE.bam_filename + '\\n')\n elif sample_names != None and bed_filepaths != None:\n if sample_names.__class__ != list:\n raise TypeError(\n 'Argument \"sample_names\" must be a list.')\n elif bed_filepaths.__class__ != list:\n raise TypeError(\n 'Argument \"bed_filepaths\" must be a list.')\n elif len(sample_names) != len(bed_filepaths):\n raise ValueError('Arguments \"sample_names\" and \"bed_filepaths\" must have the same length.')\n else:\n for sn, bfp in zip(sample_names, bed_filepaths):\n if (not __os.path.isfile(bfp)):\n raise ValueError('File ' + bfp + ' does not exist.')\n else:\n s = __subprocess.check_output('head -1 ' + bfp, shell=True)\n sep = ','\n if len(s.decode('utf-8').strip().split('\\t')) > 1:\n sep = '\\t'\n elif len(s.decode('utf-8').strip().split(';')) > 1:\n sep = ';'\n df = __pd.read_csv(bfp, sep=sep)\n if (\"chrom\" not in list(df.columns) or \"chromStart\" not in list(\n df.columns) or \"chromEnd\" not in list(df.columns) or \"ploidy\" not in list(df.columns)):\n raise ValueError(\n \"File \" + bfp + \"does not have one of the following columns: chrom, chromStart, \"\n \"chromEnd, ploidy.\")\n else:\n df = df[['chrom', 'chromStart', 'chromEnd', 'ploidy']]\n df.to_csv(bfp + '_im2format', sep='\\t', index=False)\n f.write(bfp + '_im2format\\t' + sn + '\\n')\n elif sample_groups != None and group_bed_filepaths != None:\n if sample_groups.__class__ != list:\n raise TypeError(\n 'Argument \"sample_groups\" must be a list of lists.')\n for sg in sample_groups:\n if sg.__class__ != list:\n raise TypeError(\n 'Argument \"sample_groups\" must be a list of lists.')\n if group_bed_filepaths.__class__ != list:\n raise TypeError(\n 'Argument \"group_bed_filepaths\" must be a list.')\n elif len(sample_groups) != len(group_bed_filepaths):\n raise ValueError('Arguments \"sample_groups\" and \"group_bed_filepaths\" must have the same length.')\n else:\n for sg, gbfp in zip(sample_groups, group_bed_filepaths):\n if not __os.path.isfile(gbfp):\n raise ValueError('File ' + gbfp + ' does not exist.')\n else:\n s = __subprocess.check_output('head -1 ' + gbfp, shell=True)\n sep = ','\n if len(s.decode('utf-8').strip().split('\\t')) > 1:\n sep = '\\t'\n elif len(s.decode('utf-8').strip().split(';')) > 1:\n sep = ';'\n df = __pd.read_csv(gbfp, sep=sep)\n if (\"chrom\" not in list(df.columns) or \"chromStart\" not in list(\n df.columns) or \"chromEnd\" not in list(df.columns) or \"ploidy\" not in list(df.columns)):\n raise ValueError(\n \"File \" + gbfp + \"does not have one of the following columns: chrom, chromStart, \"\n \"chromEnd, ploidy.\")\n else:\n df = df[['chrom', 'chromStart', 'chromEnd', 'ploidy']]\n df.to_csv(gbfp + '_im2format', sep='\\t', index=False)\n f.write(gbfp + '_im2format\\t' + ', '.join(sg) + '\\n')\n"
] |
[
[
"pandas.read_csv",
"numpy.where",
"numpy.mean",
"pandas.DataFrame"
]
] |
czha5168/pytorch-semseg
|
[
"5cd845f1c911ab6ee4cbc6a3ce90f01229326fab"
] |
[
"ptsemseg/models/unet.py"
] |
[
"import torch.nn as nn\n\nfrom ptsemseg.models.utils import *\n\n\nclass unet(nn.Module):\n def __init__(\n self,\n feature_scale=4,\n n_classes=21,\n is_deconv=True,\n in_channels=3,\n is_batchnorm=True,\n ):\n super(unet, self).__init__()\n self.is_deconv = is_deconv\n self.in_channels = in_channels\n self.is_batchnorm = is_batchnorm\n self.feature_scale = feature_scale\n\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n\n # downsampling\n self.conv1 = unetConv2(self.in_channels, filters[0], self.is_batchnorm)\n self.maxpool1 = nn.MaxPool2d(kernel_size=2)\n\n self.conv2 = unetConv2(filters[0], filters[1], self.is_batchnorm)\n self.maxpool2 = nn.MaxPool2d(kernel_size=2)\n\n self.conv3 = unetConv2(filters[1], filters[2], self.is_batchnorm)\n self.maxpool3 = nn.MaxPool2d(kernel_size=2)\n\n self.conv4 = unetConv2(filters[2], filters[3], self.is_batchnorm)\n self.maxpool4 = nn.MaxPool2d(kernel_size=2)\n\n self.center = unetConv2(filters[3], filters[4], self.is_batchnorm)\n\n # upsampling\n self.up_concat4 = unetUp(filters[4], filters[3], self.is_deconv)\n self.up_concat3 = unetUp(filters[3], filters[2], self.is_deconv)\n self.up_concat2 = unetUp(filters[2], filters[1], self.is_deconv)\n self.up_concat1 = unetUp(filters[1], filters[0], self.is_deconv)\n\n # final conv (without any concat)\n self.final = nn.Conv2d(filters[0], n_classes, 1)\n\n def forward(self, inputs):\n conv1 = self.conv1(inputs)\n maxpool1 = self.maxpool1(conv1)\n\n conv2 = self.conv2(maxpool1)\n maxpool2 = self.maxpool2(conv2)\n\n conv3 = self.conv3(maxpool2)\n maxpool3 = self.maxpool3(conv3)\n\n conv4 = self.conv4(maxpool3)\n maxpool4 = self.maxpool4(conv4)\n\n center = self.center(maxpool4)\n up4 = self.up_concat4(conv4, center)\n up3 = self.up_concat3(conv3, up4)\n up2 = self.up_concat2(conv2, up3)\n up1 = self.up_concat1(conv1, up2)\n\n final = self.final(up1)\n\n return final\n"
] |
[
[
"torch.nn.MaxPool2d",
"torch.nn.Conv2d"
]
] |
CompML/PRTS
|
[
"66db2ac5f83ac9182ce29dcd7afc0f678a678eed"
] |
[
"tests/test_precision.py"
] |
[
"import unittest\nimport numpy as np\n\nfrom prts.base.time_series_metrics import BaseTimeSeriesMetrics\nfrom prts.time_series_metrics.precision import TimeSeriesPrecision\nfrom prts import ts_precision\n\n\nclass TestPrecision(unittest.TestCase):\n def test_PrecisionClass_inherited_BaseTimeSeriesMetrics(self):\n \"\"\" Check if it inherits BaseTimeSeriesMetircs.\n \"\"\"\n obj = TimeSeriesPrecision()\n self.assertTrue(\n isinstance(obj, BaseTimeSeriesMetrics)\n )\n\n def test_PrecisionClass_init(self):\n \"\"\" Test of init function.\n \"\"\"\n\n test_case_1 = {'alpha': 0.0, 'cardinality': 'one', 'bias': 'flat'}\n test_case_2 = {'alpha': 0.0, 'cardinality': 'one', 'bias': None}\n test_case_3 = {'alpha': 10.0, 'cardinality': 'one', 'bias': 'flat'}\n\n # test of the normal call\n obj = TimeSeriesPrecision(**test_case_1)\n self.assertEqual(obj.alpha, test_case_1['alpha'])\n self.assertEqual(obj.cardinality, test_case_1['cardinality'])\n self.assertEqual(obj.bias, test_case_1['bias'])\n\n # test of the invalid bias\n with self.assertRaises(Exception):\n obj = TimeSeriesPrecision(**test_case_2)\n\n # test of the invalid alpha\n with self.assertRaises(Exception):\n obj = TimeSeriesPrecision(**test_case_3)\n\n def test_PrecisionClass_score(self):\n \"\"\"Test of score function.\n \"\"\"\n\n # test normal case\n real = np.array([1, 1, 0, 0, 0])\n pred = np.array([0, 1, 0, 0, 0])\n\n obj = TimeSeriesPrecision()\n\n score = obj.score(real, pred)\n self.assertEqual(score, 1.0)\n\n # test invalid inputs\n real = None\n pred = np.array([0, 1, 0, 0, 0])\n with self.assertRaises(Exception):\n score = obj.score(real, pred)\n\n real = np.array([1, 1, 0, 0, 0])\n pred = None\n with self.assertRaises(Exception):\n score = obj.score(real, pred)\n\n def test_PrecisionClass_update_precision(self):\n \"\"\"Test of _update_precision function.\n \"\"\"\n\n # test of the normal case\n real = np.array([1, 1, 0, 0, 0])\n pred = np.array([0, 1, 0, 0, 0])\n\n obj = TimeSeriesPrecision()\n real_anomalies, predicted_anomalies = obj._prepare_data(real, pred)\n\n score = obj._update_precision(real_anomalies, predicted_anomalies)\n self.assertEqual(score, 1.0)\n\n # test of the empty case\n empty_real = np.array([])\n empty_pred = np.array([])\n\n score = obj._update_precision(empty_real, empty_pred)\n self.assertEqual(score, 0.0)\n\n def test_precision_function(self):\n \"\"\"Teest of ts_precision function.\n \"\"\"\n\n # test case1\n real = np.array([1, 1, 0, 0, 0])\n pred = np.array([0, 1, 0, 0, 0])\n\n score = ts_precision(real, pred)\n self.assertEqual(score, 1.0)\n\n # test case2\n real = np.array([1, 1, 0, 0, 0])\n pred = np.array([0, 0, 1, 1, 1])\n\n score = ts_precision(real, pred)\n self.assertEqual(score, 0.0)\n\n def test_precision_function_with_list(self):\n \"\"\"Teet of ts_precision function with list type arguments.\n \"\"\"\n\n real = [1, 1, 0, 0, 0]\n pred = [0, 1, 0, 0, 0]\n\n score = ts_precision(real, pred)\n self.assertEqual(score, 1.0)\n\n def test_precision_function_with_invalid_alpha(self):\n \"\"\"Test of ts_precision function with invalid alpha\n \"\"\"\n\n real = np.array([1, 1, 0, 0, 0])\n pred = np.array([0, 1, 0, 0, 0])\n\n with self.assertRaises(Exception):\n ts_precision(real, pred, alpha=10)\n\n with self.assertRaises(Exception):\n ts_precision(real, pred, alpha=-1)\n\n def test_precision_function_with_invalid_bias(self):\n \"\"\"Test of ts_precision function with invalid bias\n \"\"\"\n\n real = np.array([1, 1, 0, 0, 0])\n pred = np.array([0, 1, 0, 0, 0])\n\n with self.assertRaises(Exception):\n ts_precision(real, pred, bias=None)\n\n with self.assertRaises(Exception):\n ts_precision(real, pred, bias=\"Invalid\")\n\n def test_precision_function_with_all_zeros(self):\n \"\"\"Test of ts_precision function with all zero values\n \"\"\"\n\n real = np.array([0, 0, 0, 0, 0])\n pred = np.array([0, 0, 0, 0, 0])\n\n with self.assertRaises(Exception):\n ts_precision(real, pred)\n\n def test_precision_function_with_all_ones(self):\n \"\"\"Test of ts_precision function with all zero values\n \"\"\"\n\n real = np.array([1, 1, 1, 1, 1])\n pred = np.array([1, 1, 1, 1, 1])\n\n self.assertEqual(ts_precision(real, pred), 1.0)\n\n"
] |
[
[
"numpy.array"
]
] |
jisunl23/wouldyouci
|
[
"1a896008487a979139596aa8aa8d108b740d95ef"
] |
[
"wouldyouci_back/utils/update_train_data.py"
] |
[
"import os\nimport surprise\nimport pymysql\nimport pandas as pd\nfrom decouple import config\n\n\ndef get_genre_info():\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n conn = pymysql.connect(host=config('HOST'), port=3306, user=config('USER'),\n password=config('PASSWORD'), db=config('DB'))\n sql = 'SELECT * FROM wouldyouci.movies_genre'\n\n result = pd.read_sql_query(sql, conn)\n\n path = os.path.join(BASE_DIR, 'genres.csv')\n result.to_csv(path, index=True)\n\n conn.close()\n\n\ndef get_genre_train_data():\n print('트레인 시작')\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n conn = pymysql.connect(host=config('HOST'), port=3306, user=config('USER'),\n password=config('PASSWORD'), db=config('DB'))\n sql = 'SELECT * FROM wouldyouci.movies_movie_genres'\n\n movie_genres = pd.read_sql_query(sql, conn, index_col='movie_id')\n\n path = os.path.join(BASE_DIR, 'genres.csv')\n genres = pd.read_csv(path, index_col='id')\n\n movie_genres = movie_genres.drop('id', axis='columns')\n movie_genres['genre_id'] = movie_genres['genre_id'].apply(lambda x: genres.loc[x, 'name']+'|')\n movie_genres = movie_genres.groupby('movie_id').sum()\n\n genres_dummies = movie_genres['genre_id'].str.get_dummies(sep='|')\n\n path = os.path.join(BASE_DIR, 'genres_train.p')\n genres_dummies.to_pickle(path)\n\n conn.close()\n print('트레인 끝')\n\n\ndef get_movie_train_data():\n print('트레인 시작')\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n conn = pymysql.connect(host=config('HOST'), port=3306, user=config('USER'),\n password=config('PASSWORD'), db=config('DB'))\n sql = 'SELECT * FROM wouldyouci.movies_movie_genres'\n sql2 = 'SELECT * FROM wouldyouci.movies_movie_directors'\n sql4 = 'SELECT * FROM wouldyouci.movies_people'\n\n movie_genres = pd.read_sql_query(sql, conn, index_col='movie_id')\n movie_directors = pd.read_sql_query(sql2, conn, index_col='movie_id')\n people = pd.read_sql_query(sql4, conn, index_col='id')\n\n path = os.path.join(BASE_DIR, 'genres.csv')\n genres = pd.read_csv(path, index_col='id')\n\n movie_genres = movie_genres.drop('id', axis='columns')\n movie_directors = movie_directors.drop('id', axis='columns')\n\n movie_genres['genre_id'] = movie_genres['genre_id'].apply(lambda x: genres.loc[x, 'name'] + '|')\n movie_directors['people_id'] = movie_directors['people_id'].apply(lambda x: people.loc[x, 'name'] + '|')\n\n movie_genres = movie_genres.groupby('movie_id').sum()\n movie_directors = movie_directors.groupby('movie_id').sum()\n\n genres_dummies = movie_genres['genre_id'].str.get_dummies(sep='|')\n people_dummies = movie_directors['people_id'].str.get_dummies(sep='|')\n\n train = people_dummies.merge(genres_dummies, on='movie_id')\n\n path = os.path.join(BASE_DIR, 'movie_director_train.p')\n train.to_pickle(path)\n\n conn.close()\n print('트레인 끝')\n\n\ndef recur_dictify(frame):\n if len(frame.columns) == 1:\n if frame.values.size == 1: return frame.values[0][0]\n return frame.values.squeeze()\n grouped = frame.groupby(frame.columns[0])\n d = {k: recur_dictify(g.iloc[:, 1:]) for k, g in grouped}\n return d\n\n\ndef KNN_train():\n print('트레인 시작')\n conn = pymysql.connect(host=config('HOST'), port=3306, user=config('USER'),\n password=config('PASSWORD'), db=config('DB'))\n sql = 'SELECT * FROM wouldyouci.accounts_rating'\n data = pd.read_sql_query(sql, conn)\n conn.close()\n df = data[['user_id', 'movie_id', 'score']]\n\n n1 = 5\n filter_movies = df['movie_id'].value_counts() >= n1\n filter_movies = filter_movies[filter_movies].index.tolist()\n\n n2 = 5\n filter_users = df['user_id'].value_counts() >= n2\n filter_users = filter_users[filter_users].index.tolist()\n\n df_new = df[df['movie_id'].isin(filter_movies) & df['user_id'].isin(filter_users)]\n\n df_to_dict = recur_dictify(df_new)\n\n user_list = []\n movie_set = set()\n\n for user in df_to_dict:\n user_list.append(user)\n\n for movie in df_to_dict[user]:\n movie_set.add(movie)\n\n movie_list = list(movie_set)\n\n rating_dic = {\n 'user_id': [],\n 'movie_id': [],\n 'score': []\n }\n\n for user in df_to_dict:\n for movie in df_to_dict[user]:\n u_index = user_list.index(user)\n m_index = movie_list.index(movie)\n score = df_to_dict[user][movie]\n\n rating_dic['user_id'].append(u_index)\n rating_dic['movie_id'].append(m_index)\n rating_dic['score'].append(score)\n\n df = pd.DataFrame(rating_dic)\n\n reader = surprise.Reader(rating_scale=(0.5, 5.0))\n\n col_list = ['user_id', 'movie_id', 'score']\n data = surprise.Dataset.load_from_df(df_new[col_list], reader)\n\n trainset = data.build_full_trainset()\n\n option = {'name': 'pearson'}\n algo = surprise.KNNBasic(sim_options=option)\n algo.fit(trainset)\n\n recommand_dic = {\n 'user_id': [],\n 'movie_id': [],\n }\n\n for user_key in df_new['user_id'].unique():\n index = user_list.index(user_key)\n result = algo.get_neighbors(index, k=5)\n recom_set = set()\n for i in result:\n max_rating = data.df[data.df['user_id'] == user_list[i]]['score'].max()\n recom_movies = data.df[(data.df['score'] == max_rating) & (data.df['user_id'] == user_list[i])][\n 'movie_id'].values\n for item in recom_movies:\n recom_set.add(item)\n\n for item in recom_set:\n recommand_dic['user_id'].append(user_key)\n recommand_dic['movie_id'].append(item)\n\n pickle = pd.DataFrame(recommand_dic)\n\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n path = os.path.join(BASE_DIR, 'KNN.p')\n\n pd.to_pickle(pickle, path)\n print('트레인 끝')\n"
] |
[
[
"pandas.to_pickle",
"pandas.read_sql_query",
"pandas.read_csv",
"pandas.DataFrame"
]
] |
synwalk/synwalk-analysis
|
[
"964aa5c1e1ee5596227264ab2f804b50bc458799"
] |
[
"src/wrappers/metrics.py"
] |
[
"import networkx as nx\nimport numpy as np\nfrom clusim.clustering import Clustering\nfrom networkx import Graph\nfrom sklearn.metrics import adjusted_mutual_info_score\n\n\ndef ami_score(clu_file_pred, clu_file_true, graph_file=None):\n \"\"\"Compute adjusted mutual (AMI) information between two clusterings.\n\n This method computes the AMI using the scikit-learn method 'adjusted_mutual_info_score',\n because it is significantly faster than it's clusim counterpart.\n\n Parameters\n ----------\n clu_file_pred : str\n Path to clusim Clustering file (.json) containing the predicted clustering.\n clu_file_true : str\n Path to clusim Clustering file (.json) containing the ground truth clustering.\n graph_file : str\n Interface placeholder. Not used.\n\n Returns\n ------\n float\n Adjusted mutual information between the two input clusterings.\n \"\"\"\n clu_pred = Clustering().load(clu_file_pred)\n clu_true = Clustering().load(clu_file_true)\n labels_pred = clu_pred.to_membership_list()\n labels_true = clu_true.to_membership_list()\n return adjusted_mutual_info_score(labels_pred, labels_true, average_method='arithmetic')\n\n\ndef synwalk_error(clu_file_pred, clu_file_true, graph_file):\n \"\"\"Compute the difference in synwalk objective for two clusterings.\n\n This method computes the AMI using the scikit-learn method 'adjusted_mutual_info_score',\n because it is significantly faster than it's clusim counterpart.\n\n Parameters\n ----------\n clu_file_pred : str\n Path to clusim Clustering file (.json) containing the predicted clustering.\n clu_file_true : str\n Path to clusim Clustering file (.json) containing the ground truth clustering.\n graph_file : str\n Path to the underlying graph file in edge list format.\n\n Returns\n ------\n float\n Relative error in synwalk objective of the predicted clustering w.r.t ground truth.\n \"\"\"\n # load graph and clusterings\n clu_pred = Clustering().load(clu_file_pred)\n clu_true = Clustering().load(clu_file_true)\n graph = nx.read_edgelist(graph_file, nodetype=int)\n\n node_flows, node_transitions = compute_node_distributions(graph)\n objective_pred = synwalk_objective(graph, clu_pred, node_flows, node_transitions)\n objective_true = synwalk_objective(graph, clu_true, node_flows, node_transitions)\n return objective_pred / objective_true - 1.0\n\n\ndef plogq(p, q):\n \"\"\"Compute p * log(q) where log has base 2.\n\n Edge case: 0 * log(0) = 0\n\n Parameters\n ----------\n p : float\n First function argument.\n q : float\n Second function argument.\n\n Returns\n ------\n float\n p * log(q)\n \"\"\"\n if p < 1e-18:\n return 0.0\n\n if q < 1e-18:\n print(f'Unexpected zero operand in plogq: p={p}, q={q}\\n.')\n return -np.inf\n\n return p * np.log2(q)\n\n\ndef plogp(p):\n \"\"\"Compute p * log(p) where log has base 2.\n\n Edge case: 0 * log(0) = 0\n\n Parameters\n ----------\n p : float\n Function argument.\n\n Returns\n ------\n float\n p * log(p)\n \"\"\"\n return plogq(p, p)\n\n\ndef compute_node_distributions(graph: Graph):\n \"\"\"Compute the stationary distribution and transition probabilities over nodes for a given graph.\n\n Parameters\n ----------\n graph : Graph\n A networkx graph object.\n\n Returns\n ------\n numpy array, numpy array\n The stationary distribution over nodes and the transition probability matrix.\n \"\"\"\n # stationary distribution over nodes\n p = np.fromiter(nx.pagerank_scipy(graph, alpha=0.99).values(), dtype=float)\n # transition probability matrix\n P = nx.google_matrix(graph, alpha=0.99, nodelist=sorted(graph))\n return p, P\n\n\ndef synwalk_objective(graph: Graph, clu: Clustering, node_flows=None, node_transitions=None):\n \"\"\"Compute the synwalk objective for a given graph and clustering.\n\n Parameters\n ----------\n graph : Graph\n A networkx Graph object.\n clu : Clustering\n A clusim Clustering object.\n node_flows: numpy array\n The stationary distribution over nodes. Computed from graph if None.\n node_transitions: numpy array\n The transition probability matrix of the graph. Computed from graph if None.\n\n Returns\n ------\n float\n The resulting synwalk objective.\n \"\"\"\n # compute node distributions if not given\n if node_flows is None:\n # stationary distribution over nodes\n node_flows = np.fromiter(nx.pagerank_scipy(graph, alpha=0.99).values(), dtype=float)\n if node_transitions is None:\n # transition probability matrix\n node_transitions = nx.google_matrix(graph, alpha=0.99, nodelist=sorted(graph))\n\n # compute module distributions\n membership_list = clu.to_membership_list()\n module_flows = np.zeros((clu.n_clusters,)) # stationary distribution over modules\n module_stay_flows = np.zeros((clu.n_clusters,)) # joint probabilities for staying within a specific module\n for node in graph:\n module_idx = membership_list[node]\n module_flows[module_idx] += node_flows[node]\n for neighbor in graph.neighbors(node):\n if membership_list[neighbor] == membership_list[node]:\n module_stay_flows[module_idx] += node_flows[node] * node_transitions[node, neighbor]\n\n # compute synwalk objective\n objective = 0.0\n for module_flow, module_stay in zip(module_flows, module_stay_flows):\n # check corner cases\n epsilon = 1e-18 # vicinity threshold for numerical stability\n if (module_flow <= epsilon) or (module_flow + epsilon >= 1.0):\n continue\n\n module_exit = module_flow - module_stay # joint probability of leaving a specific module\n objective += plogp(module_stay) \\\n - 2.0 * plogq(module_stay, module_flow) \\\n + plogp(module_exit) \\\n - plogq(module_exit, module_flow) \\\n - plogq(module_exit, 1.0 - module_flow)\n\n return objective\n"
] |
[
[
"numpy.log2",
"numpy.zeros",
"sklearn.metrics.adjusted_mutual_info_score"
]
] |
josibake/pandas
|
[
"02675e3376fea7d3cd484ace874d676c142cf003"
] |
[
"pandas/util/testing.py"
] |
[
"import bz2\nfrom collections import Counter\nfrom contextlib import contextmanager\nfrom datetime import datetime\nfrom functools import wraps\nimport gzip\nimport os\nimport re\nfrom shutil import rmtree\nimport string\nimport tempfile\nfrom typing import Union, cast\nimport warnings\nimport zipfile\n\nimport numpy as np\nfrom numpy.random import rand, randn\n\nfrom pandas._config.localization import ( # noqa:F401\n can_set_locale,\n get_locales,\n set_locale,\n)\n\nimport pandas._libs.testing as _testing\nfrom pandas.compat import _get_lzma_file, _import_lzma, raise_with_traceback\n\nfrom pandas.core.dtypes.common import (\n is_bool,\n is_categorical_dtype,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_extension_array_dtype,\n is_interval_dtype,\n is_list_like,\n is_number,\n is_period_dtype,\n is_sequence,\n is_timedelta64_dtype,\n needs_i8_conversion,\n)\nfrom pandas.core.dtypes.missing import array_equivalent\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n CategoricalIndex,\n DataFrame,\n DatetimeIndex,\n Index,\n IntervalIndex,\n MultiIndex,\n RangeIndex,\n Series,\n bdate_range,\n)\nfrom pandas.core.algorithms import take_1d\nfrom pandas.core.arrays import (\n DatetimeArray,\n ExtensionArray,\n IntervalArray,\n PeriodArray,\n TimedeltaArray,\n period_array,\n)\n\nfrom pandas.io.common import urlopen\nfrom pandas.io.formats.printing import pprint_thing\n\nlzma = _import_lzma()\n\nN = 30\nK = 4\n_RAISE_NETWORK_ERROR_DEFAULT = False\n\n# set testing_mode\n_testing_mode_warnings = (DeprecationWarning, ResourceWarning)\n\n\ndef set_testing_mode():\n # set the testing mode filters\n testing_mode = os.environ.get(\"PANDAS_TESTING_MODE\", \"None\")\n if \"deprecate\" in testing_mode:\n warnings.simplefilter(\"always\", _testing_mode_warnings)\n\n\ndef reset_testing_mode():\n # reset the testing mode filters\n testing_mode = os.environ.get(\"PANDAS_TESTING_MODE\", \"None\")\n if \"deprecate\" in testing_mode:\n warnings.simplefilter(\"ignore\", _testing_mode_warnings)\n\n\nset_testing_mode()\n\n\ndef reset_display_options():\n \"\"\"\n Reset the display options for printing and representing objects.\n \"\"\"\n\n pd.reset_option(\"^display.\", silent=True)\n\n\ndef round_trip_pickle(obj, path=None):\n \"\"\"\n Pickle an object and then read it again.\n\n Parameters\n ----------\n obj : pandas object\n The object to pickle and then re-read.\n path : str, default None\n The path where the pickled object is written and then read.\n\n Returns\n -------\n round_trip_pickled_object : pandas object\n The original object that was pickled and then re-read.\n \"\"\"\n\n if path is None:\n path = \"__{random_bytes}__.pickle\".format(random_bytes=rands(10))\n with ensure_clean(path) as path:\n pd.to_pickle(obj, path)\n return pd.read_pickle(path)\n\n\ndef round_trip_pathlib(writer, reader, path=None):\n \"\"\"\n Write an object to file specified by a pathlib.Path and read it back\n\n Parameters\n ----------\n writer : callable bound to pandas object\n IO writing function (e.g. DataFrame.to_csv )\n reader : callable\n IO reading function (e.g. pd.read_csv )\n path : str, default None\n The path where the object is written and then read.\n\n Returns\n -------\n round_trip_object : pandas object\n The original object that was serialized and then re-read.\n \"\"\"\n\n import pytest\n\n Path = pytest.importorskip(\"pathlib\").Path\n if path is None:\n path = \"___pathlib___\"\n with ensure_clean(path) as path:\n writer(Path(path))\n obj = reader(Path(path))\n return obj\n\n\ndef round_trip_localpath(writer, reader, path=None):\n \"\"\"\n Write an object to file specified by a py.path LocalPath and read it back\n\n Parameters\n ----------\n writer : callable bound to pandas object\n IO writing function (e.g. DataFrame.to_csv )\n reader : callable\n IO reading function (e.g. pd.read_csv )\n path : str, default None\n The path where the object is written and then read.\n\n Returns\n -------\n round_trip_object : pandas object\n The original object that was serialized and then re-read.\n \"\"\"\n import pytest\n\n LocalPath = pytest.importorskip(\"py.path\").local\n if path is None:\n path = \"___localpath___\"\n with ensure_clean(path) as path:\n writer(LocalPath(path))\n obj = reader(LocalPath(path))\n return obj\n\n\n@contextmanager\ndef decompress_file(path, compression):\n \"\"\"\n Open a compressed file and return a file object\n\n Parameters\n ----------\n path : str\n The path where the file is read from\n\n compression : {'gzip', 'bz2', 'zip', 'xz', None}\n Name of the decompression to use\n\n Returns\n -------\n f : file object\n \"\"\"\n\n if compression is None:\n f = open(path, \"rb\")\n elif compression == \"gzip\":\n f = gzip.open(path, \"rb\")\n elif compression == \"bz2\":\n f = bz2.BZ2File(path, \"rb\")\n elif compression == \"xz\":\n f = _get_lzma_file(lzma)(path, \"rb\")\n elif compression == \"zip\":\n zip_file = zipfile.ZipFile(path)\n zip_names = zip_file.namelist()\n if len(zip_names) == 1:\n f = zip_file.open(zip_names.pop())\n else:\n raise ValueError(\"ZIP file {} error. Only one file per ZIP.\".format(path))\n else:\n msg = \"Unrecognized compression type: {}\".format(compression)\n raise ValueError(msg)\n\n try:\n yield f\n finally:\n f.close()\n if compression == \"zip\":\n zip_file.close()\n\n\ndef write_to_compressed(compression, path, data, dest=\"test\"):\n \"\"\"\n Write data to a compressed file.\n\n Parameters\n ----------\n compression : {'gzip', 'bz2', 'zip', 'xz'}\n The compression type to use.\n path : str\n The file path to write the data.\n data : str\n The data to write.\n dest : str, default \"test\"\n The destination file (for ZIP only)\n\n Raises\n ------\n ValueError : An invalid compression value was passed in.\n \"\"\"\n\n if compression == \"zip\":\n import zipfile\n\n compress_method = zipfile.ZipFile\n elif compression == \"gzip\":\n import gzip\n\n compress_method = gzip.GzipFile\n elif compression == \"bz2\":\n import bz2\n\n compress_method = bz2.BZ2File\n elif compression == \"xz\":\n compress_method = _get_lzma_file(lzma)\n else:\n msg = \"Unrecognized compression type: {}\".format(compression)\n raise ValueError(msg)\n\n if compression == \"zip\":\n mode = \"w\"\n args = (dest, data)\n method = \"writestr\"\n else:\n mode = \"wb\"\n args = (data,)\n method = \"write\"\n\n with compress_method(path, mode=mode) as f:\n getattr(f, method)(*args)\n\n\ndef assert_almost_equal(\n left, right, check_dtype=\"equiv\", check_less_precise=False, **kwargs\n):\n \"\"\"\n Check that the left and right objects are approximately equal.\n\n By approximately equal, we refer to objects that are numbers or that\n contain numbers which may be equivalent to specific levels of precision.\n\n Parameters\n ----------\n left : object\n right : object\n check_dtype : bool / string {'equiv'}, default 'equiv'\n Check dtype if both a and b are the same type. If 'equiv' is passed in,\n then `RangeIndex` and `Int64Index` are also considered equivalent\n when doing type checking.\n check_less_precise : bool or int, default False\n Specify comparison precision. 5 digits (False) or 3 digits (True)\n after decimal points are compared. If int, then specify the number\n of digits to compare.\n\n When comparing two numbers, if the first number has magnitude less\n than 1e-5, we compare the two numbers directly and check whether\n they are equivalent within the specified precision. Otherwise, we\n compare the **ratio** of the second number to the first number and\n check whether it is equivalent to 1 within the specified precision.\n \"\"\"\n\n if isinstance(left, pd.Index):\n assert_index_equal(\n left,\n right,\n check_exact=False,\n exact=check_dtype,\n check_less_precise=check_less_precise,\n **kwargs\n )\n\n elif isinstance(left, pd.Series):\n assert_series_equal(\n left,\n right,\n check_exact=False,\n check_dtype=check_dtype,\n check_less_precise=check_less_precise,\n **kwargs\n )\n\n elif isinstance(left, pd.DataFrame):\n assert_frame_equal(\n left,\n right,\n check_exact=False,\n check_dtype=check_dtype,\n check_less_precise=check_less_precise,\n **kwargs\n )\n\n else:\n # Other sequences.\n if check_dtype:\n if is_number(left) and is_number(right):\n # Do not compare numeric classes, like np.float64 and float.\n pass\n elif is_bool(left) and is_bool(right):\n # Do not compare bool classes, like np.bool_ and bool.\n pass\n else:\n if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):\n obj = \"numpy array\"\n else:\n obj = \"Input\"\n assert_class_equal(left, right, obj=obj)\n _testing.assert_almost_equal(\n left,\n right,\n check_dtype=check_dtype,\n check_less_precise=check_less_precise,\n **kwargs\n )\n\n\ndef _check_isinstance(left, right, cls):\n \"\"\"\n Helper method for our assert_* methods that ensures that\n the two objects being compared have the right type before\n proceeding with the comparison.\n\n Parameters\n ----------\n left : The first object being compared.\n right : The second object being compared.\n cls : The class type to check against.\n\n Raises\n ------\n AssertionError : Either `left` or `right` is not an instance of `cls`.\n \"\"\"\n\n err_msg = \"{name} Expected type {exp_type}, found {act_type} instead\"\n cls_name = cls.__name__\n\n if not isinstance(left, cls):\n raise AssertionError(\n err_msg.format(name=cls_name, exp_type=cls, act_type=type(left))\n )\n if not isinstance(right, cls):\n raise AssertionError(\n err_msg.format(name=cls_name, exp_type=cls, act_type=type(right))\n )\n\n\ndef assert_dict_equal(left, right, compare_keys=True):\n\n _check_isinstance(left, right, dict)\n _testing.assert_dict_equal(left, right, compare_keys=compare_keys)\n\n\ndef randbool(size=(), p=0.5):\n return rand(*size) <= p\n\n\nRANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))\nRANDU_CHARS = np.array(\n list(\"\".join(map(chr, range(1488, 1488 + 26))) + string.digits),\n dtype=(np.unicode_, 1),\n)\n\n\ndef rands_array(nchars, size, dtype=\"O\"):\n \"\"\"Generate an array of byte strings.\"\"\"\n retval = (\n np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))\n .view((np.str_, nchars))\n .reshape(size)\n )\n if dtype is None:\n return retval\n else:\n return retval.astype(dtype)\n\n\ndef randu_array(nchars, size, dtype=\"O\"):\n \"\"\"Generate an array of unicode strings.\"\"\"\n retval = (\n np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))\n .view((np.unicode_, nchars))\n .reshape(size)\n )\n if dtype is None:\n return retval\n else:\n return retval.astype(dtype)\n\n\ndef rands(nchars):\n \"\"\"\n Generate one random byte string.\n\n See `rands_array` if you want to create an array of random strings.\n\n \"\"\"\n return \"\".join(np.random.choice(RANDS_CHARS, nchars))\n\n\ndef randu(nchars):\n \"\"\"\n Generate one random unicode string.\n\n See `randu_array` if you want to create an array of random unicode strings.\n\n \"\"\"\n return \"\".join(np.random.choice(RANDU_CHARS, nchars))\n\n\ndef close(fignum=None):\n from matplotlib.pyplot import get_fignums, close as _close\n\n if fignum is None:\n for fignum in get_fignums():\n _close(fignum)\n else:\n _close(fignum)\n\n\n# -----------------------------------------------------------------------------\n# contextmanager to ensure the file cleanup\n\n\n@contextmanager\ndef ensure_clean(filename=None, return_filelike=False):\n \"\"\"Gets a temporary path and agrees to remove on close.\n\n Parameters\n ----------\n filename : str (optional)\n if None, creates a temporary file which is then removed when out of\n scope. if passed, creates temporary file with filename as ending.\n return_filelike : bool (default False)\n if True, returns a file-like which is *always* cleaned. Necessary for\n savefig and other functions which want to append extensions.\n \"\"\"\n filename = filename or \"\"\n fd = None\n\n if return_filelike:\n f = tempfile.TemporaryFile(suffix=filename)\n try:\n yield f\n finally:\n f.close()\n else:\n # don't generate tempfile if using a path with directory specified\n if len(os.path.dirname(filename)):\n raise ValueError(\"Can't pass a qualified name to ensure_clean()\")\n\n try:\n fd, filename = tempfile.mkstemp(suffix=filename)\n except UnicodeEncodeError:\n import pytest\n\n pytest.skip(\"no unicode file names on this system\")\n\n try:\n yield filename\n finally:\n try:\n os.close(fd)\n except OSError:\n print(\n \"Couldn't close file descriptor: {fdesc} (file: {fname})\".format(\n fdesc=fd, fname=filename\n )\n )\n try:\n if os.path.exists(filename):\n os.remove(filename)\n except OSError as e:\n print(\"Exception on removing file: {error}\".format(error=e))\n\n\n@contextmanager\ndef ensure_clean_dir():\n \"\"\"\n Get a temporary directory path and agrees to remove on close.\n\n Yields\n ------\n Temporary directory path\n \"\"\"\n directory_name = tempfile.mkdtemp(suffix=\"\")\n try:\n yield directory_name\n finally:\n try:\n rmtree(directory_name)\n except OSError:\n pass\n\n\n@contextmanager\ndef ensure_safe_environment_variables():\n \"\"\"\n Get a context manager to safely set environment variables\n\n All changes will be undone on close, hence environment variables set\n within this contextmanager will neither persist nor change global state.\n \"\"\"\n saved_environ = dict(os.environ)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(saved_environ)\n\n\n# -----------------------------------------------------------------------------\n# Comparators\n\n\ndef equalContents(arr1, arr2):\n \"\"\"Checks if the set of unique elements of arr1 and arr2 are equivalent.\n \"\"\"\n return frozenset(arr1) == frozenset(arr2)\n\n\ndef assert_index_equal(\n left: Index,\n right: Index,\n exact: Union[bool, str] = \"equiv\",\n check_names: bool = True,\n check_less_precise: Union[bool, int] = False,\n check_exact: bool = True,\n check_categorical: bool = True,\n obj: str = \"Index\",\n) -> None:\n \"\"\"\n Check that left and right Index are equal.\n\n Parameters\n ----------\n left : Index\n right : Index\n exact : bool / string {'equiv'}, default 'equiv'\n Whether to check the Index class, dtype and inferred_type\n are identical. If 'equiv', then RangeIndex can be substituted for\n Int64Index as well.\n check_names : bool, default True\n Whether to check the names attribute.\n check_less_precise : bool or int, default False\n Specify comparison precision. Only used when check_exact is False.\n 5 digits (False) or 3 digits (True) after decimal points are compared.\n If int, then specify the digits to compare\n check_exact : bool, default True\n Whether to compare number exactly.\n check_categorical : bool, default True\n Whether to compare internal Categorical exactly.\n obj : str, default 'Index'\n Specify object name being compared, internally used to show appropriate\n assertion message\n \"\"\"\n __tracebackhide__ = True\n\n def _check_types(l, r, obj=\"Index\"):\n if exact:\n assert_class_equal(l, r, exact=exact, obj=obj)\n\n # Skip exact dtype checking when `check_categorical` is False\n if check_categorical:\n assert_attr_equal(\"dtype\", l, r, obj=obj)\n\n # allow string-like to have different inferred_types\n if l.inferred_type in (\"string\", \"unicode\"):\n assert r.inferred_type in (\"string\", \"unicode\")\n else:\n assert_attr_equal(\"inferred_type\", l, r, obj=obj)\n\n def _get_ilevel_values(index, level):\n # accept level number only\n unique = index.levels[level]\n labels = index.codes[level]\n filled = take_1d(unique.values, labels, fill_value=unique._na_value)\n values = unique._shallow_copy(filled, name=index.names[level])\n return values\n\n # instance validation\n _check_isinstance(left, right, Index)\n\n # class / dtype comparison\n _check_types(left, right, obj=obj)\n\n # level comparison\n if left.nlevels != right.nlevels:\n msg1 = \"{obj} levels are different\".format(obj=obj)\n msg2 = \"{nlevels}, {left}\".format(nlevels=left.nlevels, left=left)\n msg3 = \"{nlevels}, {right}\".format(nlevels=right.nlevels, right=right)\n raise_assert_detail(obj, msg1, msg2, msg3)\n\n # length comparison\n if len(left) != len(right):\n msg1 = \"{obj} length are different\".format(obj=obj)\n msg2 = \"{length}, {left}\".format(length=len(left), left=left)\n msg3 = \"{length}, {right}\".format(length=len(right), right=right)\n raise_assert_detail(obj, msg1, msg2, msg3)\n\n # MultiIndex special comparison for little-friendly error messages\n if left.nlevels > 1:\n left = cast(MultiIndex, left)\n right = cast(MultiIndex, right)\n\n for level in range(left.nlevels):\n # cannot use get_level_values here because it can change dtype\n llevel = _get_ilevel_values(left, level)\n rlevel = _get_ilevel_values(right, level)\n\n lobj = \"MultiIndex level [{level}]\".format(level=level)\n assert_index_equal(\n llevel,\n rlevel,\n exact=exact,\n check_names=check_names,\n check_less_precise=check_less_precise,\n check_exact=check_exact,\n obj=lobj,\n )\n # get_level_values may change dtype\n _check_types(left.levels[level], right.levels[level], obj=obj)\n\n # skip exact index checking when `check_categorical` is False\n if check_exact and check_categorical:\n if not left.equals(right):\n diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)\n msg = \"{obj} values are different ({pct} %)\".format(\n obj=obj, pct=np.round(diff, 5)\n )\n raise_assert_detail(obj, msg, left, right)\n else:\n _testing.assert_almost_equal(\n left.values,\n right.values,\n check_less_precise=check_less_precise,\n check_dtype=exact,\n obj=obj,\n lobj=left,\n robj=right,\n )\n\n # metadata comparison\n if check_names:\n assert_attr_equal(\"names\", left, right, obj=obj)\n if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):\n assert_attr_equal(\"freq\", left, right, obj=obj)\n if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):\n assert_interval_array_equal(left.values, right.values)\n\n if check_categorical:\n if is_categorical_dtype(left) or is_categorical_dtype(right):\n assert_categorical_equal(\n left.values, right.values, obj=\"{obj} category\".format(obj=obj)\n )\n\n\ndef assert_class_equal(left, right, exact=True, obj=\"Input\"):\n \"\"\"checks classes are equal.\"\"\"\n __tracebackhide__ = True\n\n def repr_class(x):\n if isinstance(x, Index):\n # return Index as it is to include values in the error message\n return x\n\n try:\n return x.__class__.__name__\n except AttributeError:\n return repr(type(x))\n\n if exact == \"equiv\":\n if type(left) != type(right):\n # allow equivalence of Int64Index/RangeIndex\n types = {type(left).__name__, type(right).__name__}\n if len(types - {\"Int64Index\", \"RangeIndex\"}):\n msg = \"{obj} classes are not equivalent\".format(obj=obj)\n raise_assert_detail(obj, msg, repr_class(left), repr_class(right))\n elif exact:\n if type(left) != type(right):\n msg = \"{obj} classes are different\".format(obj=obj)\n raise_assert_detail(obj, msg, repr_class(left), repr_class(right))\n\n\ndef assert_attr_equal(attr, left, right, obj=\"Attributes\"):\n \"\"\"checks attributes are equal. Both objects must have attribute.\n\n Parameters\n ----------\n attr : str\n Attribute name being compared.\n left : object\n right : object\n obj : str, default 'Attributes'\n Specify object name being compared, internally used to show appropriate\n assertion message\n \"\"\"\n __tracebackhide__ = True\n\n left_attr = getattr(left, attr)\n right_attr = getattr(right, attr)\n\n if left_attr is right_attr:\n return True\n elif (\n is_number(left_attr)\n and np.isnan(left_attr)\n and is_number(right_attr)\n and np.isnan(right_attr)\n ):\n # np.nan\n return True\n\n try:\n result = left_attr == right_attr\n except TypeError:\n # datetimetz on rhs may raise TypeError\n result = False\n if not isinstance(result, bool):\n result = result.all()\n\n if result:\n return True\n else:\n msg = 'Attribute \"{attr}\" are different'.format(attr=attr)\n raise_assert_detail(obj, msg, left_attr, right_attr)\n\n\ndef assert_is_valid_plot_return_object(objs):\n import matplotlib.pyplot as plt\n\n if isinstance(objs, (pd.Series, np.ndarray)):\n for el in objs.ravel():\n msg = (\n \"one of 'objs' is not a matplotlib Axes instance, type \"\n \"encountered {name!r}\"\n ).format(name=el.__class__.__name__)\n assert isinstance(el, (plt.Axes, dict)), msg\n else:\n assert isinstance(objs, (plt.Artist, tuple, dict)), (\n \"objs is neither an ndarray of Artist instances nor a \"\n 'single Artist instance, tuple, or dict, \"objs\" is a {name!r}'.format(\n name=objs.__class__.__name__\n )\n )\n\n\ndef isiterable(obj):\n return hasattr(obj, \"__iter__\")\n\n\ndef assert_is_sorted(seq):\n \"\"\"Assert that the sequence is sorted.\"\"\"\n if isinstance(seq, (Index, Series)):\n seq = seq.values\n # sorting does not change precisions\n assert_numpy_array_equal(seq, np.sort(np.array(seq)))\n\n\ndef assert_categorical_equal(\n left, right, check_dtype=True, check_category_order=True, obj=\"Categorical\"\n):\n \"\"\"Test that Categoricals are equivalent.\n\n Parameters\n ----------\n left : Categorical\n right : Categorical\n check_dtype : bool, default True\n Check that integer dtype of the codes are the same\n check_category_order : bool, default True\n Whether the order of the categories should be compared, which\n implies identical integer codes. If False, only the resulting\n values are compared. The ordered attribute is\n checked regardless.\n obj : str, default 'Categorical'\n Specify object name being compared, internally used to show appropriate\n assertion message\n \"\"\"\n _check_isinstance(left, right, Categorical)\n\n if check_category_order:\n assert_index_equal(\n left.categories, right.categories, obj=\"{obj}.categories\".format(obj=obj)\n )\n assert_numpy_array_equal(\n left.codes,\n right.codes,\n check_dtype=check_dtype,\n obj=\"{obj}.codes\".format(obj=obj),\n )\n else:\n assert_index_equal(\n left.categories.sort_values(),\n right.categories.sort_values(),\n obj=\"{obj}.categories\".format(obj=obj),\n )\n assert_index_equal(\n left.categories.take(left.codes),\n right.categories.take(right.codes),\n obj=\"{obj}.values\".format(obj=obj),\n )\n\n assert_attr_equal(\"ordered\", left, right, obj=obj)\n\n\ndef assert_interval_array_equal(left, right, exact=\"equiv\", obj=\"IntervalArray\"):\n \"\"\"Test that two IntervalArrays are equivalent.\n\n Parameters\n ----------\n left, right : IntervalArray\n The IntervalArrays to compare.\n exact : bool / string {'equiv'}, default 'equiv'\n Whether to check the Index class, dtype and inferred_type\n are identical. If 'equiv', then RangeIndex can be substituted for\n Int64Index as well.\n obj : str, default 'IntervalArray'\n Specify object name being compared, internally used to show appropriate\n assertion message\n \"\"\"\n _check_isinstance(left, right, IntervalArray)\n\n assert_index_equal(\n left.left, right.left, exact=exact, obj=\"{obj}.left\".format(obj=obj)\n )\n assert_index_equal(\n left.right, right.right, exact=exact, obj=\"{obj}.left\".format(obj=obj)\n )\n assert_attr_equal(\"closed\", left, right, obj=obj)\n\n\ndef assert_period_array_equal(left, right, obj=\"PeriodArray\"):\n _check_isinstance(left, right, PeriodArray)\n\n assert_numpy_array_equal(\n left._data, right._data, obj=\"{obj}.values\".format(obj=obj)\n )\n assert_attr_equal(\"freq\", left, right, obj=obj)\n\n\ndef assert_datetime_array_equal(left, right, obj=\"DatetimeArray\"):\n __tracebackhide__ = True\n _check_isinstance(left, right, DatetimeArray)\n\n assert_numpy_array_equal(left._data, right._data, obj=\"{obj}._data\".format(obj=obj))\n assert_attr_equal(\"freq\", left, right, obj=obj)\n assert_attr_equal(\"tz\", left, right, obj=obj)\n\n\ndef assert_timedelta_array_equal(left, right, obj=\"TimedeltaArray\"):\n __tracebackhide__ = True\n _check_isinstance(left, right, TimedeltaArray)\n assert_numpy_array_equal(left._data, right._data, obj=\"{obj}._data\".format(obj=obj))\n assert_attr_equal(\"freq\", left, right, obj=obj)\n\n\ndef raise_assert_detail(obj, message, left, right, diff=None):\n __tracebackhide__ = True\n\n if isinstance(left, np.ndarray):\n left = pprint_thing(left)\n elif is_categorical_dtype(left):\n left = repr(left)\n\n if isinstance(right, np.ndarray):\n right = pprint_thing(right)\n elif is_categorical_dtype(right):\n right = repr(right)\n\n msg = \"\"\"{obj} are different\n\n{message}\n[left]: {left}\n[right]: {right}\"\"\".format(\n obj=obj, message=message, left=left, right=right\n )\n\n if diff is not None:\n msg += \"\\n[diff]: {diff}\".format(diff=diff)\n\n raise AssertionError(msg)\n\n\ndef assert_numpy_array_equal(\n left,\n right,\n strict_nan=False,\n check_dtype=True,\n err_msg=None,\n check_same=None,\n obj=\"numpy array\",\n):\n \"\"\" Checks that 'np.ndarray' is equivalent\n\n Parameters\n ----------\n left : np.ndarray or iterable\n right : np.ndarray or iterable\n strict_nan : bool, default False\n If True, consider NaN and None to be different.\n check_dtype: bool, default True\n check dtype if both a and b are np.ndarray\n err_msg : str, default None\n If provided, used as assertion message\n check_same : None|'copy'|'same', default None\n Ensure left and right refer/do not refer to the same memory area\n obj : str, default 'numpy array'\n Specify object name being compared, internally used to show appropriate\n assertion message\n \"\"\"\n __tracebackhide__ = True\n\n # instance validation\n # Show a detailed error message when classes are different\n assert_class_equal(left, right, obj=obj)\n # both classes must be an np.ndarray\n _check_isinstance(left, right, np.ndarray)\n\n def _get_base(obj):\n return obj.base if getattr(obj, \"base\", None) is not None else obj\n\n left_base = _get_base(left)\n right_base = _get_base(right)\n\n if check_same == \"same\":\n if left_base is not right_base:\n msg = \"{left!r} is not {right!r}\".format(left=left_base, right=right_base)\n raise AssertionError(msg)\n elif check_same == \"copy\":\n if left_base is right_base:\n msg = \"{left!r} is {right!r}\".format(left=left_base, right=right_base)\n raise AssertionError(msg)\n\n def _raise(left, right, err_msg):\n if err_msg is None:\n if left.shape != right.shape:\n raise_assert_detail(\n obj,\n \"{obj} shapes are different\".format(obj=obj),\n left.shape,\n right.shape,\n )\n\n diff = 0\n for l, r in zip(left, right):\n # count up differences\n if not array_equivalent(l, r, strict_nan=strict_nan):\n diff += 1\n\n diff = diff * 100.0 / left.size\n msg = \"{obj} values are different ({pct} %)\".format(\n obj=obj, pct=np.round(diff, 5)\n )\n raise_assert_detail(obj, msg, left, right)\n\n raise AssertionError(err_msg)\n\n # compare shape and values\n if not array_equivalent(left, right, strict_nan=strict_nan):\n _raise(left, right, err_msg)\n\n if check_dtype:\n if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):\n assert_attr_equal(\"dtype\", left, right, obj=obj)\n\n\ndef assert_extension_array_equal(\n left, right, check_dtype=True, check_less_precise=False, check_exact=False\n):\n \"\"\"Check that left and right ExtensionArrays are equal.\n\n Parameters\n ----------\n left, right : ExtensionArray\n The two arrays to compare\n check_dtype : bool, default True\n Whether to check if the ExtensionArray dtypes are identical.\n check_less_precise : bool or int, default False\n Specify comparison precision. Only used when check_exact is False.\n 5 digits (False) or 3 digits (True) after decimal points are compared.\n If int, then specify the digits to compare.\n check_exact : bool, default False\n Whether to compare number exactly.\n\n Notes\n -----\n Missing values are checked separately from valid values.\n A mask of missing values is computed for each and checked to match.\n The remaining all-valid values are cast to object dtype and checked.\n \"\"\"\n assert isinstance(left, ExtensionArray), \"left is not an ExtensionArray\"\n assert isinstance(right, ExtensionArray), \"right is not an ExtensionArray\"\n if check_dtype:\n assert_attr_equal(\"dtype\", left, right, obj=\"ExtensionArray\")\n\n if hasattr(left, \"asi8\") and type(right) == type(left):\n # Avoid slow object-dtype comparisons\n assert_numpy_array_equal(left.asi8, right.asi8)\n return\n\n left_na = np.asarray(left.isna())\n right_na = np.asarray(right.isna())\n assert_numpy_array_equal(left_na, right_na, obj=\"ExtensionArray NA mask\")\n\n left_valid = np.asarray(left[~left_na].astype(object))\n right_valid = np.asarray(right[~right_na].astype(object))\n if check_exact:\n assert_numpy_array_equal(left_valid, right_valid, obj=\"ExtensionArray\")\n else:\n _testing.assert_almost_equal(\n left_valid,\n right_valid,\n check_dtype=check_dtype,\n check_less_precise=check_less_precise,\n obj=\"ExtensionArray\",\n )\n\n\n# This could be refactored to use the NDFrame.equals method\ndef assert_series_equal(\n left,\n right,\n check_dtype=True,\n check_index_type=\"equiv\",\n check_series_type=True,\n check_less_precise=False,\n check_names=True,\n check_exact=False,\n check_datetimelike_compat=False,\n check_categorical=True,\n obj=\"Series\",\n):\n \"\"\"\n Check that left and right Series are equal.\n\n Parameters\n ----------\n left : Series\n right : Series\n check_dtype : bool, default True\n Whether to check the Series dtype is identical.\n check_index_type : bool / string {'equiv'}, default 'equiv'\n Whether to check the Index class, dtype and inferred_type\n are identical.\n check_series_type : bool, default True\n Whether to check the Series class is identical.\n check_less_precise : bool or int, default False\n Specify comparison precision. Only used when check_exact is False.\n 5 digits (False) or 3 digits (True) after decimal points are compared.\n If int, then specify the digits to compare.\n\n When comparing two numbers, if the first number has magnitude less\n than 1e-5, we compare the two numbers directly and check whether\n they are equivalent within the specified precision. Otherwise, we\n compare the **ratio** of the second number to the first number and\n check whether it is equivalent to 1 within the specified precision.\n check_names : bool, default True\n Whether to check the Series and Index names attribute.\n check_exact : bool, default False\n Whether to compare number exactly.\n check_datetimelike_compat : bool, default False\n Compare datetime-like which is comparable ignoring dtype.\n check_categorical : bool, default True\n Whether to compare internal Categorical exactly.\n obj : str, default 'Series'\n Specify object name being compared, internally used to show appropriate\n assertion message.\n \"\"\"\n __tracebackhide__ = True\n\n # instance validation\n _check_isinstance(left, right, Series)\n\n if check_series_type:\n # ToDo: There are some tests using rhs is sparse\n # lhs is dense. Should use assert_class_equal in future\n assert isinstance(left, type(right))\n # assert_class_equal(left, right, obj=obj)\n\n # length comparison\n if len(left) != len(right):\n msg1 = \"{len}, {left}\".format(len=len(left), left=left.index)\n msg2 = \"{len}, {right}\".format(len=len(right), right=right.index)\n raise_assert_detail(obj, \"Series length are different\", msg1, msg2)\n\n # index comparison\n assert_index_equal(\n left.index,\n right.index,\n exact=check_index_type,\n check_names=check_names,\n check_less_precise=check_less_precise,\n check_exact=check_exact,\n check_categorical=check_categorical,\n obj=\"{obj}.index\".format(obj=obj),\n )\n\n if check_dtype:\n # We want to skip exact dtype checking when `check_categorical`\n # is False. We'll still raise if only one is a `Categorical`,\n # regardless of `check_categorical`\n if (\n is_categorical_dtype(left)\n and is_categorical_dtype(right)\n and not check_categorical\n ):\n pass\n else:\n assert_attr_equal(\"dtype\", left, right)\n\n if check_exact:\n assert_numpy_array_equal(\n left._internal_get_values(),\n right._internal_get_values(),\n check_dtype=check_dtype,\n obj=\"{obj}\".format(obj=obj),\n )\n elif check_datetimelike_compat:\n # we want to check only if we have compat dtypes\n # e.g. integer and M|m are NOT compat, but we can simply check\n # the values in that case\n if needs_i8_conversion(left) or needs_i8_conversion(right):\n\n # datetimelike may have different objects (e.g. datetime.datetime\n # vs Timestamp) but will compare equal\n if not Index(left.values).equals(Index(right.values)):\n msg = (\n \"[datetimelike_compat=True] {left} is not equal to \" \"{right}.\"\n ).format(left=left.values, right=right.values)\n raise AssertionError(msg)\n else:\n assert_numpy_array_equal(\n left._internal_get_values(),\n right._internal_get_values(),\n check_dtype=check_dtype,\n )\n elif is_interval_dtype(left) or is_interval_dtype(right):\n assert_interval_array_equal(left.array, right.array)\n elif is_extension_array_dtype(left.dtype) and is_datetime64tz_dtype(left.dtype):\n # .values is an ndarray, but ._values is the ExtensionArray.\n # TODO: Use .array\n assert is_extension_array_dtype(right.dtype)\n assert_extension_array_equal(left._values, right._values)\n elif (\n is_extension_array_dtype(left)\n and not is_categorical_dtype(left)\n and is_extension_array_dtype(right)\n and not is_categorical_dtype(right)\n ):\n assert_extension_array_equal(left.array, right.array)\n else:\n _testing.assert_almost_equal(\n left._internal_get_values(),\n right._internal_get_values(),\n check_less_precise=check_less_precise,\n check_dtype=check_dtype,\n obj=\"{obj}\".format(obj=obj),\n )\n\n # metadata comparison\n if check_names:\n assert_attr_equal(\"name\", left, right, obj=obj)\n\n if check_categorical:\n if is_categorical_dtype(left) or is_categorical_dtype(right):\n assert_categorical_equal(\n left.values, right.values, obj=\"{obj} category\".format(obj=obj)\n )\n\n\n# This could be refactored to use the NDFrame.equals method\ndef assert_frame_equal(\n left,\n right,\n check_dtype=True,\n check_index_type=\"equiv\",\n check_column_type=\"equiv\",\n check_frame_type=True,\n check_less_precise=False,\n check_names=True,\n by_blocks=False,\n check_exact=False,\n check_datetimelike_compat=False,\n check_categorical=True,\n check_like=False,\n obj=\"DataFrame\",\n):\n \"\"\"\n Check that left and right DataFrame are equal.\n\n This function is intended to compare two DataFrames and output any\n differences. Is is mostly intended for use in unit tests.\n Additional parameters allow varying the strictness of the\n equality checks performed.\n\n Parameters\n ----------\n left : DataFrame\n First DataFrame to compare.\n right : DataFrame\n Second DataFrame to compare.\n check_dtype : bool, default True\n Whether to check the DataFrame dtype is identical.\n check_index_type : bool / string {'equiv'}, default 'equiv'\n Whether to check the Index class, dtype and inferred_type\n are identical.\n check_column_type : bool / string {'equiv'}, default 'equiv'\n Whether to check the columns class, dtype and inferred_type\n are identical. Is passed as the ``exact`` argument of\n :func:`assert_index_equal`.\n check_frame_type : bool, default True\n Whether to check the DataFrame class is identical.\n check_less_precise : bool or int, default False\n Specify comparison precision. Only used when check_exact is False.\n 5 digits (False) or 3 digits (True) after decimal points are compared.\n If int, then specify the digits to compare.\n\n When comparing two numbers, if the first number has magnitude less\n than 1e-5, we compare the two numbers directly and check whether\n they are equivalent within the specified precision. Otherwise, we\n compare the **ratio** of the second number to the first number and\n check whether it is equivalent to 1 within the specified precision.\n check_names : bool, default True\n Whether to check that the `names` attribute for both the `index`\n and `column` attributes of the DataFrame is identical, i.e.\n\n * left.index.names == right.index.names\n * left.columns.names == right.columns.names\n by_blocks : bool, default False\n Specify how to compare internal data. If False, compare by columns.\n If True, compare by blocks.\n check_exact : bool, default False\n Whether to compare number exactly.\n check_datetimelike_compat : bool, default False\n Compare datetime-like which is comparable ignoring dtype.\n check_categorical : bool, default True\n Whether to compare internal Categorical exactly.\n check_like : bool, default False\n If True, ignore the order of index & columns.\n Note: index labels must match their respective rows\n (same as in columns) - same labels must be with the same data.\n obj : str, default 'DataFrame'\n Specify object name being compared, internally used to show appropriate\n assertion message.\n\n See Also\n --------\n assert_series_equal : Equivalent method for asserting Series equality.\n DataFrame.equals : Check DataFrame equality.\n\n Examples\n --------\n This example shows comparing two DataFrames that are equal\n but with columns of differing dtypes.\n\n >>> from pandas.util.testing import assert_frame_equal\n >>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})\n >>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})\n\n df1 equals itself.\n\n >>> assert_frame_equal(df1, df1)\n\n df1 differs from df2 as column 'b' is of a different type.\n\n >>> assert_frame_equal(df1, df2)\n Traceback (most recent call last):\n AssertionError: Attributes are different\n ...\n Attribute \"dtype\" are different\n [left]: int64\n [right]: float64\n\n Ignore differing dtypes in columns with check_dtype.\n\n >>> assert_frame_equal(df1, df2, check_dtype=False)\n \"\"\"\n __tracebackhide__ = True\n\n # instance validation\n _check_isinstance(left, right, DataFrame)\n\n if check_frame_type:\n assert isinstance(left, type(right))\n # assert_class_equal(left, right, obj=obj)\n\n # shape comparison\n if left.shape != right.shape:\n raise_assert_detail(\n obj,\n \"{obj} shape mismatch\".format(obj=obj),\n \"{shape!r}\".format(shape=left.shape),\n \"{shape!r}\".format(shape=right.shape),\n )\n\n if check_like:\n left, right = left.reindex_like(right), right\n\n # index comparison\n assert_index_equal(\n left.index,\n right.index,\n exact=check_index_type,\n check_names=check_names,\n check_less_precise=check_less_precise,\n check_exact=check_exact,\n check_categorical=check_categorical,\n obj=\"{obj}.index\".format(obj=obj),\n )\n\n # column comparison\n assert_index_equal(\n left.columns,\n right.columns,\n exact=check_column_type,\n check_names=check_names,\n check_less_precise=check_less_precise,\n check_exact=check_exact,\n check_categorical=check_categorical,\n obj=\"{obj}.columns\".format(obj=obj),\n )\n\n # compare by blocks\n if by_blocks:\n rblocks = right._to_dict_of_blocks()\n lblocks = left._to_dict_of_blocks()\n for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):\n assert dtype in lblocks\n assert dtype in rblocks\n assert_frame_equal(\n lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj\n )\n\n # compare by columns\n else:\n for i, col in enumerate(left.columns):\n assert col in right\n lcol = left.iloc[:, i]\n rcol = right.iloc[:, i]\n assert_series_equal(\n lcol,\n rcol,\n check_dtype=check_dtype,\n check_index_type=check_index_type,\n check_less_precise=check_less_precise,\n check_exact=check_exact,\n check_names=check_names,\n check_datetimelike_compat=check_datetimelike_compat,\n check_categorical=check_categorical,\n obj=\"{obj}.iloc[:, {idx}]\".format(obj=obj, idx=i),\n )\n\n\ndef assert_equal(left, right, **kwargs):\n \"\"\"\n Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.\n\n Parameters\n ----------\n left : Index, Series, DataFrame, ExtensionArray, or np.ndarray\n right : Index, Series, DataFrame, ExtensionArray, or np.ndarray\n **kwargs\n \"\"\"\n __tracebackhide__ = True\n\n if isinstance(left, pd.Index):\n assert_index_equal(left, right, **kwargs)\n elif isinstance(left, pd.Series):\n assert_series_equal(left, right, **kwargs)\n elif isinstance(left, pd.DataFrame):\n assert_frame_equal(left, right, **kwargs)\n elif isinstance(left, IntervalArray):\n assert_interval_array_equal(left, right, **kwargs)\n elif isinstance(left, PeriodArray):\n assert_period_array_equal(left, right, **kwargs)\n elif isinstance(left, DatetimeArray):\n assert_datetime_array_equal(left, right, **kwargs)\n elif isinstance(left, TimedeltaArray):\n assert_timedelta_array_equal(left, right, **kwargs)\n elif isinstance(left, ExtensionArray):\n assert_extension_array_equal(left, right, **kwargs)\n elif isinstance(left, np.ndarray):\n assert_numpy_array_equal(left, right, **kwargs)\n else:\n raise NotImplementedError(type(left))\n\n\ndef box_expected(expected, box_cls, transpose=True):\n \"\"\"\n Helper function to wrap the expected output of a test in a given box_class.\n\n Parameters\n ----------\n expected : np.ndarray, Index, Series\n box_cls : {Index, Series, DataFrame}\n\n Returns\n -------\n subclass of box_cls\n \"\"\"\n if box_cls is pd.Index:\n expected = pd.Index(expected)\n elif box_cls is pd.Series:\n expected = pd.Series(expected)\n elif box_cls is pd.DataFrame:\n expected = pd.Series(expected).to_frame()\n if transpose:\n # for vector operations, we we need a DataFrame to be a single-row,\n # not a single-column, in order to operate against non-DataFrame\n # vectors of the same length.\n expected = expected.T\n elif box_cls is PeriodArray:\n # the PeriodArray constructor is not as flexible as period_array\n expected = period_array(expected)\n elif box_cls is DatetimeArray:\n expected = DatetimeArray(expected)\n elif box_cls is TimedeltaArray:\n expected = TimedeltaArray(expected)\n elif box_cls is np.ndarray:\n expected = np.array(expected)\n elif box_cls is to_array:\n expected = to_array(expected)\n else:\n raise NotImplementedError(box_cls)\n return expected\n\n\ndef to_array(obj):\n # temporary implementation until we get pd.array in place\n if is_period_dtype(obj):\n return period_array(obj)\n elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj):\n return DatetimeArray._from_sequence(obj)\n elif is_timedelta64_dtype(obj):\n return TimedeltaArray._from_sequence(obj)\n else:\n return np.array(obj)\n\n\n# -----------------------------------------------------------------------------\n# Sparse\n\n\ndef assert_sp_array_equal(\n left,\n right,\n check_dtype=True,\n check_kind=True,\n check_fill_value=True,\n consolidate_block_indices=False,\n):\n \"\"\"Check that the left and right SparseArray are equal.\n\n Parameters\n ----------\n left : SparseArray\n right : SparseArray\n check_dtype : bool, default True\n Whether to check the data dtype is identical.\n check_kind : bool, default True\n Whether to just the kind of the sparse index for each column.\n check_fill_value : bool, default True\n Whether to check that left.fill_value matches right.fill_value\n consolidate_block_indices : bool, default False\n Whether to consolidate contiguous blocks for sparse arrays with\n a BlockIndex. Some operations, e.g. concat, will end up with\n block indices that could be consolidated. Setting this to true will\n create a new BlockIndex for that array, with consolidated\n block indices.\n \"\"\"\n\n _check_isinstance(left, right, pd.SparseArray)\n\n assert_numpy_array_equal(left.sp_values, right.sp_values, check_dtype=check_dtype)\n\n # SparseIndex comparison\n assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)\n assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)\n\n if not check_kind:\n left_index = left.sp_index.to_block_index()\n right_index = right.sp_index.to_block_index()\n else:\n left_index = left.sp_index\n right_index = right.sp_index\n\n if consolidate_block_indices and left.kind == \"block\":\n # we'll probably remove this hack...\n left_index = left_index.to_int_index().to_block_index()\n right_index = right_index.to_int_index().to_block_index()\n\n if not left_index.equals(right_index):\n raise_assert_detail(\n \"SparseArray.index\", \"index are not equal\", left_index, right_index\n )\n else:\n # Just ensure a\n pass\n\n if check_fill_value:\n assert_attr_equal(\"fill_value\", left, right)\n if check_dtype:\n assert_attr_equal(\"dtype\", left, right)\n assert_numpy_array_equal(left.to_dense(), right.to_dense(), check_dtype=check_dtype)\n\n\n# -----------------------------------------------------------------------------\n# Others\n\n\ndef assert_contains_all(iterable, dic):\n for k in iterable:\n assert k in dic, \"Did not contain item: '{key!r}'\".format(key=k)\n\n\ndef assert_copy(iter1, iter2, **eql_kwargs):\n \"\"\"\n iter1, iter2: iterables that produce elements\n comparable with assert_almost_equal\n\n Checks that the elements are equal, but not\n the same object. (Does not check that items\n in sequences are also not the same object)\n \"\"\"\n for elem1, elem2 in zip(iter1, iter2):\n assert_almost_equal(elem1, elem2, **eql_kwargs)\n msg = (\n \"Expected object {obj1!r} and object {obj2!r} to be \"\n \"different objects, but they were the same object.\"\n ).format(obj1=type(elem1), obj2=type(elem2))\n assert elem1 is not elem2, msg\n\n\ndef getCols(k):\n return string.ascii_uppercase[:k]\n\n\n# make index\ndef makeStringIndex(k=10, name=None):\n return Index(rands_array(nchars=10, size=k), name=name)\n\n\ndef makeUnicodeIndex(k=10, name=None):\n return Index(randu_array(nchars=10, size=k), name=name)\n\n\ndef makeCategoricalIndex(k=10, n=3, name=None, **kwargs):\n \"\"\" make a length k index or n categories \"\"\"\n x = rands_array(nchars=4, size=n)\n return CategoricalIndex(np.random.choice(x, k), name=name, **kwargs)\n\n\ndef makeIntervalIndex(k=10, name=None, **kwargs):\n \"\"\" make a length k IntervalIndex \"\"\"\n x = np.linspace(0, 100, num=(k + 1))\n return IntervalIndex.from_breaks(x, name=name, **kwargs)\n\n\ndef makeBoolIndex(k=10, name=None):\n if k == 1:\n return Index([True], name=name)\n elif k == 2:\n return Index([False, True], name=name)\n return Index([False, True] + [False] * (k - 2), name=name)\n\n\ndef makeIntIndex(k=10, name=None):\n return Index(list(range(k)), name=name)\n\n\ndef makeUIntIndex(k=10, name=None):\n return Index([2 ** 63 + i for i in range(k)], name=name)\n\n\ndef makeRangeIndex(k=10, name=None, **kwargs):\n return RangeIndex(0, k, 1, name=name, **kwargs)\n\n\ndef makeFloatIndex(k=10, name=None):\n values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)\n return Index(values * (10 ** np.random.randint(0, 9)), name=name)\n\n\ndef makeDateIndex(k=10, freq=\"B\", name=None, **kwargs):\n dt = datetime(2000, 1, 1)\n dr = bdate_range(dt, periods=k, freq=freq, name=name)\n return DatetimeIndex(dr, name=name, **kwargs)\n\n\ndef makeTimedeltaIndex(k=10, freq=\"D\", name=None, **kwargs):\n return pd.timedelta_range(start=\"1 day\", periods=k, freq=freq, name=name, **kwargs)\n\n\ndef makePeriodIndex(k=10, name=None, **kwargs):\n dt = datetime(2000, 1, 1)\n dr = pd.period_range(start=dt, periods=k, freq=\"B\", name=name, **kwargs)\n return dr\n\n\ndef makeMultiIndex(k=10, names=None, **kwargs):\n return MultiIndex.from_product(((\"foo\", \"bar\"), (1, 2)), names=names, **kwargs)\n\n\n_names = [\n \"Alice\",\n \"Bob\",\n \"Charlie\",\n \"Dan\",\n \"Edith\",\n \"Frank\",\n \"George\",\n \"Hannah\",\n \"Ingrid\",\n \"Jerry\",\n \"Kevin\",\n \"Laura\",\n \"Michael\",\n \"Norbert\",\n \"Oliver\",\n \"Patricia\",\n \"Quinn\",\n \"Ray\",\n \"Sarah\",\n \"Tim\",\n \"Ursula\",\n \"Victor\",\n \"Wendy\",\n \"Xavier\",\n \"Yvonne\",\n \"Zelda\",\n]\n\n\ndef _make_timeseries(start=\"2000-01-01\", end=\"2000-12-31\", freq=\"1D\", seed=None):\n \"\"\"\n Make a DataFrame with a DatetimeIndex\n\n Parameters\n ----------\n start : str or Timestamp, default \"2000-01-01\"\n The start of the index. Passed to date_range with `freq`.\n end : str or Timestamp, default \"2000-12-31\"\n The end of the index. Passed to date_range with `freq`.\n freq : str or Freq\n The frequency to use for the DatetimeIndex\n seed : int, optional\n The random state seed.\n\n * name : object dtype with string names\n * id : int dtype with\n * x, y : float dtype\n\n Examples\n --------\n >>> _make_timeseries()\n id name x y\n timestamp\n 2000-01-01 982 Frank 0.031261 0.986727\n 2000-01-02 1025 Edith -0.086358 -0.032920\n 2000-01-03 982 Edith 0.473177 0.298654\n 2000-01-04 1009 Sarah 0.534344 -0.750377\n 2000-01-05 963 Zelda -0.271573 0.054424\n ... ... ... ... ...\n 2000-12-27 980 Ingrid -0.132333 -0.422195\n 2000-12-28 972 Frank -0.376007 -0.298687\n 2000-12-29 1009 Ursula -0.865047 -0.503133\n 2000-12-30 1000 Hannah -0.063757 -0.507336\n 2000-12-31 972 Tim -0.869120 0.531685\n \"\"\"\n index = pd.date_range(start=start, end=end, freq=freq, name=\"timestamp\")\n n = len(index)\n state = np.random.RandomState(seed)\n columns = {\n \"name\": state.choice(_names, size=n),\n \"id\": state.poisson(1000, size=n),\n \"x\": state.rand(n) * 2 - 1,\n \"y\": state.rand(n) * 2 - 1,\n }\n df = pd.DataFrame(columns, index=index, columns=sorted(columns))\n if df.index[-1] == end:\n df = df.iloc[:-1]\n return df\n\n\ndef all_index_generator(k=10):\n \"\"\"Generator which can be iterated over to get instances of all the various\n index classes.\n\n Parameters\n ----------\n k: length of each of the index instances\n \"\"\"\n all_make_index_funcs = [\n makeIntIndex,\n makeFloatIndex,\n makeStringIndex,\n makeUnicodeIndex,\n makeDateIndex,\n makePeriodIndex,\n makeTimedeltaIndex,\n makeBoolIndex,\n makeRangeIndex,\n makeIntervalIndex,\n makeCategoricalIndex,\n ]\n for make_index_func in all_make_index_funcs:\n yield make_index_func(k=k)\n\n\ndef index_subclass_makers_generator():\n make_index_funcs = [\n makeDateIndex,\n makePeriodIndex,\n makeTimedeltaIndex,\n makeRangeIndex,\n makeIntervalIndex,\n makeCategoricalIndex,\n makeMultiIndex,\n ]\n for make_index_func in make_index_funcs:\n yield make_index_func\n\n\ndef all_timeseries_index_generator(k=10):\n \"\"\"Generator which can be iterated over to get instances of all the classes\n which represent time-series.\n\n Parameters\n ----------\n k: length of each of the index instances\n \"\"\"\n make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]\n for make_index_func in make_index_funcs:\n yield make_index_func(k=k)\n\n\n# make series\ndef makeFloatSeries(name=None):\n index = makeStringIndex(N)\n return Series(randn(N), index=index, name=name)\n\n\ndef makeStringSeries(name=None):\n index = makeStringIndex(N)\n return Series(randn(N), index=index, name=name)\n\n\ndef makeObjectSeries(name=None):\n data = makeStringIndex(N)\n data = Index(data, dtype=object)\n index = makeStringIndex(N)\n return Series(data, index=index, name=name)\n\n\ndef getSeriesData():\n index = makeStringIndex(N)\n return {c: Series(randn(N), index=index) for c in getCols(K)}\n\n\ndef makeTimeSeries(nper=None, freq=\"B\", name=None):\n if nper is None:\n nper = N\n return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)\n\n\ndef makePeriodSeries(nper=None, name=None):\n if nper is None:\n nper = N\n return Series(randn(nper), index=makePeriodIndex(nper), name=name)\n\n\ndef getTimeSeriesData(nper=None, freq=\"B\"):\n return {c: makeTimeSeries(nper, freq) for c in getCols(K)}\n\n\ndef getPeriodData(nper=None):\n return {c: makePeriodSeries(nper) for c in getCols(K)}\n\n\n# make frame\ndef makeTimeDataFrame(nper=None, freq=\"B\"):\n data = getTimeSeriesData(nper, freq)\n return DataFrame(data)\n\n\ndef makeDataFrame():\n data = getSeriesData()\n return DataFrame(data)\n\n\ndef getMixedTypeDict():\n index = Index([\"a\", \"b\", \"c\", \"d\", \"e\"])\n\n data = {\n \"A\": [0.0, 1.0, 2.0, 3.0, 4.0],\n \"B\": [0.0, 1.0, 0.0, 1.0, 0.0],\n \"C\": [\"foo1\", \"foo2\", \"foo3\", \"foo4\", \"foo5\"],\n \"D\": bdate_range(\"1/1/2009\", periods=5),\n }\n\n return index, data\n\n\ndef makeMixedDataFrame():\n return DataFrame(getMixedTypeDict()[1])\n\n\ndef makePeriodFrame(nper=None):\n data = getPeriodData(nper)\n return DataFrame(data)\n\n\ndef makeCustomIndex(\n nentries, nlevels, prefix=\"#\", names=False, ndupe_l=None, idx_type=None\n):\n \"\"\"Create an index/multindex with given dimensions, levels, names, etc'\n\n nentries - number of entries in index\n nlevels - number of levels (> 1 produces multindex)\n prefix - a string prefix for labels\n names - (Optional), bool or list of strings. if True will use default\n names, if false will use no names, if a list is given, the name of\n each level in the index will be taken from the list.\n ndupe_l - (Optional), list of ints, the number of rows for which the\n label will repeated at the corresponding level, you can specify just\n the first few, the rest will use the default ndupe_l of 1.\n len(ndupe_l) <= nlevels.\n idx_type - \"i\"/\"f\"/\"s\"/\"u\"/\"dt\"/\"p\"/\"td\".\n If idx_type is not None, `idx_nlevels` must be 1.\n \"i\"/\"f\" creates an integer/float index,\n \"s\"/\"u\" creates a string/unicode index\n \"dt\" create a datetime index.\n \"td\" create a datetime index.\n\n if unspecified, string labels will be generated.\n \"\"\"\n\n if ndupe_l is None:\n ndupe_l = [1] * nlevels\n assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels\n assert names is None or names is False or names is True or len(names) is nlevels\n assert idx_type is None or (\n idx_type in (\"i\", \"f\", \"s\", \"u\", \"dt\", \"p\", \"td\") and nlevels == 1\n )\n\n if names is True:\n # build default names\n names = [prefix + str(i) for i in range(nlevels)]\n if names is False:\n # pass None to index constructor for no name\n names = None\n\n # make singleton case uniform\n if isinstance(names, str) and nlevels == 1:\n names = [names]\n\n # specific 1D index type requested?\n idx_func = dict(\n i=makeIntIndex,\n f=makeFloatIndex,\n s=makeStringIndex,\n u=makeUnicodeIndex,\n dt=makeDateIndex,\n td=makeTimedeltaIndex,\n p=makePeriodIndex,\n ).get(idx_type)\n if idx_func:\n idx = idx_func(nentries)\n # but we need to fill in the name\n if names:\n idx.name = names[0]\n return idx\n elif idx_type is not None:\n raise ValueError(\n '\"{idx_type}\" is not a legal value for `idx_type`, '\n 'use \"i\"/\"f\"/\"s\"/\"u\"/\"dt/\"p\"/\"td\".'.format(idx_type=idx_type)\n )\n\n if len(ndupe_l) < nlevels:\n ndupe_l.extend([1] * (nlevels - len(ndupe_l)))\n assert len(ndupe_l) == nlevels\n\n assert all(x > 0 for x in ndupe_l)\n\n tuples = []\n for i in range(nlevels):\n\n def keyfunc(x):\n import re\n\n numeric_tuple = re.sub(r\"[^\\d_]_?\", \"\", x).split(\"_\")\n return [int(num) for num in numeric_tuple]\n\n # build a list of lists to create the index from\n div_factor = nentries // ndupe_l[i] + 1\n cnt = Counter()\n for j in range(div_factor):\n label = \"{prefix}_l{i}_g{j}\".format(prefix=prefix, i=i, j=j)\n cnt[label] = ndupe_l[i]\n # cute Counter trick\n result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]\n tuples.append(result)\n\n tuples = list(zip(*tuples))\n\n # convert tuples to index\n if nentries == 1:\n # we have a single level of tuples, i.e. a regular Index\n index = Index(tuples[0], name=names[0])\n elif nlevels == 1:\n name = None if names is None else names[0]\n index = Index((x[0] for x in tuples), name=name)\n else:\n index = MultiIndex.from_tuples(tuples, names=names)\n return index\n\n\ndef makeCustomDataframe(\n nrows,\n ncols,\n c_idx_names=True,\n r_idx_names=True,\n c_idx_nlevels=1,\n r_idx_nlevels=1,\n data_gen_f=None,\n c_ndupe_l=None,\n r_ndupe_l=None,\n dtype=None,\n c_idx_type=None,\n r_idx_type=None,\n):\n \"\"\"\n nrows, ncols - number of data rows/cols\n c_idx_names, idx_names - False/True/list of strings, yields No names ,\n default names or uses the provided names for the levels of the\n corresponding index. You can provide a single string when\n c_idx_nlevels ==1.\n c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex\n r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex\n data_gen_f - a function f(row,col) which return the data value\n at that position, the default generator used yields values of the form\n \"RxCy\" based on position.\n c_ndupe_l, r_ndupe_l - list of integers, determines the number\n of duplicates for each label at a given level of the corresponding\n index. The default `None` value produces a multiplicity of 1 across\n all levels, i.e. a unique index. Will accept a partial list of length\n N < idx_nlevels, for just the first N levels. If ndupe doesn't divide\n nrows/ncol, the last label might have lower multiplicity.\n dtype - passed to the DataFrame constructor as is, in case you wish to\n have more control in conjunction with a custom `data_gen_f`\n r_idx_type, c_idx_type - \"i\"/\"f\"/\"s\"/\"u\"/\"dt\"/\"td\".\n If idx_type is not None, `idx_nlevels` must be 1.\n \"i\"/\"f\" creates an integer/float index,\n \"s\"/\"u\" creates a string/unicode index\n \"dt\" create a datetime index.\n \"td\" create a timedelta index.\n\n if unspecified, string labels will be generated.\n\n Examples:\n\n # 5 row, 3 columns, default names on both, single index on both axis\n >> makeCustomDataframe(5,3)\n\n # make the data a random int between 1 and 100\n >> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))\n\n # 2-level multiindex on rows with each label duplicated\n # twice on first level, default names on both axis, single\n # index on both axis\n >> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])\n\n # DatetimeIndex on row, index with unicode labels on columns\n # no names on either axis\n >> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,\n r_idx_type=\"dt\",c_idx_type=\"u\")\n\n # 4-level multindex on rows with names provided, 2-level multindex\n # on columns with default labels and default names.\n >> a=makeCustomDataframe(5,3,r_idx_nlevels=4,\n r_idx_names=[\"FEE\",\"FI\",\"FO\",\"FAM\"],\n c_idx_nlevels=2)\n\n >> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)\n \"\"\"\n\n assert c_idx_nlevels > 0\n assert r_idx_nlevels > 0\n assert r_idx_type is None or (\n r_idx_type in (\"i\", \"f\", \"s\", \"u\", \"dt\", \"p\", \"td\") and r_idx_nlevels == 1\n )\n assert c_idx_type is None or (\n c_idx_type in (\"i\", \"f\", \"s\", \"u\", \"dt\", \"p\", \"td\") and c_idx_nlevels == 1\n )\n\n columns = makeCustomIndex(\n ncols,\n nlevels=c_idx_nlevels,\n prefix=\"C\",\n names=c_idx_names,\n ndupe_l=c_ndupe_l,\n idx_type=c_idx_type,\n )\n index = makeCustomIndex(\n nrows,\n nlevels=r_idx_nlevels,\n prefix=\"R\",\n names=r_idx_names,\n ndupe_l=r_ndupe_l,\n idx_type=r_idx_type,\n )\n\n # by default, generate data based on location\n if data_gen_f is None:\n data_gen_f = lambda r, c: \"R{rows}C{cols}\".format(rows=r, cols=c)\n\n data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]\n\n return DataFrame(data, index, columns, dtype=dtype)\n\n\ndef _create_missing_idx(nrows, ncols, density, random_state=None):\n if random_state is None:\n random_state = np.random\n else:\n random_state = np.random.RandomState(random_state)\n\n # below is cribbed from scipy.sparse\n size = int(np.round((1 - density) * nrows * ncols))\n # generate a few more to ensure unique values\n min_rows = 5\n fac = 1.02\n extra_size = min(size + min_rows, fac * size)\n\n def _gen_unique_rand(rng, _extra_size):\n ind = rng.rand(int(_extra_size))\n return np.unique(np.floor(ind * nrows * ncols))[:size]\n\n ind = _gen_unique_rand(random_state, extra_size)\n while ind.size < size:\n extra_size *= 1.05\n ind = _gen_unique_rand(random_state, extra_size)\n\n j = np.floor(ind * 1.0 / nrows).astype(int)\n i = (ind - j * nrows).astype(int)\n return i.tolist(), j.tolist()\n\n\ndef makeMissingCustomDataframe(\n nrows,\n ncols,\n density=0.9,\n random_state=None,\n c_idx_names=True,\n r_idx_names=True,\n c_idx_nlevels=1,\n r_idx_nlevels=1,\n data_gen_f=None,\n c_ndupe_l=None,\n r_ndupe_l=None,\n dtype=None,\n c_idx_type=None,\n r_idx_type=None,\n):\n \"\"\"\n Parameters\n ----------\n Density : float, optional\n Float in (0, 1) that gives the percentage of non-missing numbers in\n the DataFrame.\n random_state : {np.random.RandomState, int}, optional\n Random number generator or random seed.\n\n See makeCustomDataframe for descriptions of the rest of the parameters.\n \"\"\"\n df = makeCustomDataframe(\n nrows,\n ncols,\n c_idx_names=c_idx_names,\n r_idx_names=r_idx_names,\n c_idx_nlevels=c_idx_nlevels,\n r_idx_nlevels=r_idx_nlevels,\n data_gen_f=data_gen_f,\n c_ndupe_l=c_ndupe_l,\n r_ndupe_l=r_ndupe_l,\n dtype=dtype,\n c_idx_type=c_idx_type,\n r_idx_type=r_idx_type,\n )\n\n i, j = _create_missing_idx(nrows, ncols, density, random_state)\n df.values[i, j] = np.nan\n return df\n\n\ndef makeMissingDataframe(density=0.9, random_state=None):\n df = makeDataFrame()\n i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)\n df.values[i, j] = np.nan\n return df\n\n\nclass TestSubDict(dict):\n def __init__(self, *args, **kwargs):\n dict.__init__(self, *args, **kwargs)\n\n\ndef optional_args(decorator):\n \"\"\"allows a decorator to take optional positional and keyword arguments.\n Assumes that taking a single, callable, positional argument means that\n it is decorating a function, i.e. something like this::\n\n @my_decorator\n def function(): pass\n\n Calls decorator with decorator(f, *args, **kwargs)\"\"\"\n\n @wraps(decorator)\n def wrapper(*args, **kwargs):\n def dec(f):\n return decorator(f, *args, **kwargs)\n\n is_decorating = not kwargs and len(args) == 1 and callable(args[0])\n if is_decorating:\n f = args[0]\n args = []\n return dec(f)\n else:\n return dec\n\n return wrapper\n\n\n# skip tests on exceptions with this message\n_network_error_messages = (\n # 'urlopen error timed out',\n # 'timeout: timed out',\n # 'socket.timeout: timed out',\n \"timed out\",\n \"Server Hangup\",\n \"HTTP Error 503: Service Unavailable\",\n \"502: Proxy Error\",\n \"HTTP Error 502: internal error\",\n \"HTTP Error 502\",\n \"HTTP Error 503\",\n \"HTTP Error 403\",\n \"HTTP Error 400\",\n \"Temporary failure in name resolution\",\n \"Name or service not known\",\n \"Connection refused\",\n \"certificate verify\",\n)\n\n# or this e.errno/e.reason.errno\n_network_errno_vals = (\n 101, # Network is unreachable\n 111, # Connection refused\n 110, # Connection timed out\n 104, # Connection reset Error\n 54, # Connection reset by peer\n 60, # urllib.error.URLError: [Errno 60] Connection timed out\n)\n\n# Both of the above shouldn't mask real issues such as 404's\n# or refused connections (changed DNS).\n# But some tests (test_data yahoo) contact incredibly flakey\n# servers.\n\n# and conditionally raise on exception types in _get_default_network_errors\n\n\ndef _get_default_network_errors():\n # Lazy import for http.client because it imports many things from the stdlib\n import http.client\n\n return (IOError, http.client.HTTPException, TimeoutError)\n\n\ndef can_connect(url, error_classes=None):\n \"\"\"Try to connect to the given url. True if succeeds, False if IOError\n raised\n\n Parameters\n ----------\n url : basestring\n The URL to try to connect to\n\n Returns\n -------\n connectable : bool\n Return True if no IOError (unable to connect) or URLError (bad url) was\n raised\n \"\"\"\n\n if error_classes is None:\n error_classes = _get_default_network_errors()\n\n try:\n with urlopen(url):\n pass\n except error_classes:\n return False\n else:\n return True\n\n\n@optional_args\ndef network(\n t,\n url=\"http://www.google.com\",\n raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,\n check_before_test=False,\n error_classes=None,\n skip_errnos=_network_errno_vals,\n _skip_on_messages=_network_error_messages,\n):\n \"\"\"\n Label a test as requiring network connection and, if an error is\n encountered, only raise if it does not find a network connection.\n\n In comparison to ``network``, this assumes an added contract to your test:\n you must assert that, under normal conditions, your test will ONLY fail if\n it does not have network connectivity.\n\n You can call this in 3 ways: as a standard decorator, with keyword\n arguments, or with a positional argument that is the url to check.\n\n Parameters\n ----------\n t : callable\n The test requiring network connectivity.\n url : path\n The url to test via ``pandas.io.common.urlopen`` to check\n for connectivity. Defaults to 'http://www.google.com'.\n raise_on_error : bool\n If True, never catches errors.\n check_before_test : bool\n If True, checks connectivity before running the test case.\n error_classes : tuple or Exception\n error classes to ignore. If not in ``error_classes``, raises the error.\n defaults to IOError. Be careful about changing the error classes here.\n skip_errnos : iterable of int\n Any exception that has .errno or .reason.erno set to one\n of these values will be skipped with an appropriate\n message.\n _skip_on_messages: iterable of string\n any exception e for which one of the strings is\n a substring of str(e) will be skipped with an appropriate\n message. Intended to suppress errors where an errno isn't available.\n\n Notes\n -----\n * ``raise_on_error`` supercedes ``check_before_test``\n\n Returns\n -------\n t : callable\n The decorated test ``t``, with checks for connectivity errors.\n\n Example\n -------\n\n Tests decorated with @network will fail if it's possible to make a network\n connection to another URL (defaults to google.com)::\n\n >>> from pandas.util.testing import network\n >>> from pandas.io.common import urlopen\n >>> @network\n ... def test_network():\n ... with urlopen(\"rabbit://bonanza.com\"):\n ... pass\n Traceback\n ...\n URLError: <urlopen error unknown url type: rabit>\n\n You can specify alternative URLs::\n\n >>> @network(\"http://www.yahoo.com\")\n ... def test_something_with_yahoo():\n ... raise IOError(\"Failure Message\")\n >>> test_something_with_yahoo()\n Traceback (most recent call last):\n ...\n IOError: Failure Message\n\n If you set check_before_test, it will check the url first and not run the\n test on failure::\n\n >>> @network(\"failing://url.blaher\", check_before_test=True)\n ... def test_something():\n ... print(\"I ran!\")\n ... raise ValueError(\"Failure\")\n >>> test_something()\n Traceback (most recent call last):\n ...\n\n Errors not related to networking will always be raised.\n \"\"\"\n from pytest import skip\n\n if error_classes is None:\n error_classes = _get_default_network_errors()\n\n t.network = True\n\n @wraps(t)\n def wrapper(*args, **kwargs):\n if check_before_test and not raise_on_error:\n if not can_connect(url, error_classes):\n skip()\n try:\n return t(*args, **kwargs)\n except Exception as e:\n errno = getattr(e, \"errno\", None)\n if not errno and hasattr(errno, \"reason\"):\n errno = getattr(e.reason, \"errno\", None)\n\n if errno in skip_errnos:\n skip(\n \"Skipping test due to known errno\"\n \" and error {error}\".format(error=e)\n )\n\n e_str = str(e)\n\n if any(m.lower() in e_str.lower() for m in _skip_on_messages):\n skip(\n \"Skipping test because exception \"\n \"message is known and error {error}\".format(error=e)\n )\n\n if not isinstance(e, error_classes):\n raise\n\n if raise_on_error or can_connect(url, error_classes):\n raise\n else:\n skip(\n \"Skipping test due to lack of connectivity\"\n \" and error {error}\".format(error=e)\n )\n\n return wrapper\n\n\nwith_connectivity_check = network\n\n\ndef assert_raises_regex(_exception, _regexp, _callable=None, *args, **kwargs):\n r\"\"\"\n Check that the specified Exception is raised and that the error message\n matches a given regular expression pattern. This may be a regular\n expression object or a string containing a regular expression suitable\n for use by `re.search()`. This is a port of the `assertRaisesRegexp`\n function from unittest in Python 2.7.\n\n .. deprecated:: 0.24.0\n Use `pytest.raises` instead.\n\n Examples\n --------\n >>> assert_raises_regex(ValueError, 'invalid literal for.*XYZ', int, 'XYZ')\n >>> import re\n >>> assert_raises_regex(ValueError, re.compile('literal'), int, 'XYZ')\n\n If an exception of a different type is raised, it bubbles up.\n\n >>> assert_raises_regex(TypeError, 'literal', int, 'XYZ')\n Traceback (most recent call last):\n ...\n ValueError: invalid literal for int() with base 10: 'XYZ'\n >>> dct = dict()\n >>> assert_raises_regex(KeyError, 'pear', dct.__getitem__, 'apple')\n Traceback (most recent call last):\n ...\n AssertionError: \"pear\" does not match \"'apple'\"\n\n You can also use this in a with statement.\n\n >>> with assert_raises_regex(TypeError, r'unsupported operand type\\(s\\)'):\n ... 1 + {}\n >>> with assert_raises_regex(TypeError, 'banana'):\n ... 'apple'[0] = 'b'\n Traceback (most recent call last):\n ...\n AssertionError: \"banana\" does not match \"'str' object does not support \\\nitem assignment\"\n \"\"\"\n warnings.warn(\n (\n \"assert_raises_regex has been deprecated and will \"\n \"be removed in the next release. Please use \"\n \"`pytest.raises` instead.\"\n ),\n FutureWarning,\n stacklevel=2,\n )\n\n manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp)\n if _callable is not None:\n with manager:\n _callable(*args, **kwargs)\n else:\n return manager\n\n\nclass _AssertRaisesContextmanager:\n \"\"\"\n Context manager behind `assert_raises_regex`.\n \"\"\"\n\n def __init__(self, exception, regexp=None):\n \"\"\"\n Initialize an _AssertRaisesContextManager instance.\n\n Parameters\n ----------\n exception : class\n The expected Exception class.\n regexp : str, default None\n The regex to compare against the Exception message.\n \"\"\"\n\n self.exception = exception\n\n if regexp is not None and not hasattr(regexp, \"search\"):\n regexp = re.compile(regexp, re.DOTALL)\n\n self.regexp = regexp\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, trace_back):\n expected = self.exception\n\n if not exc_type:\n exp_name = getattr(expected, \"__name__\", str(expected))\n raise AssertionError(\"{name} not raised.\".format(name=exp_name))\n\n return self.exception_matches(exc_type, exc_value, trace_back)\n\n def exception_matches(self, exc_type, exc_value, trace_back):\n \"\"\"\n Check that the Exception raised matches the expected Exception\n and expected error message regular expression.\n\n Parameters\n ----------\n exc_type : class\n The type of Exception raised.\n exc_value : Exception\n The instance of `exc_type` raised.\n trace_back : stack trace object\n The traceback object associated with `exc_value`.\n\n Returns\n -------\n is_matched : bool\n Whether or not the Exception raised matches the expected\n Exception class and expected error message regular expression.\n\n Raises\n ------\n AssertionError : The error message provided does not match\n the expected error message regular expression.\n \"\"\"\n\n if issubclass(exc_type, self.exception):\n if self.regexp is not None:\n val = str(exc_value)\n\n if not self.regexp.search(val):\n msg = '\"{pat}\" does not match \"{val}\"'.format(\n pat=self.regexp.pattern, val=val\n )\n e = AssertionError(msg)\n raise_with_traceback(e, trace_back)\n\n return True\n else:\n # Failed, so allow Exception to bubble up.\n return False\n\n\n@contextmanager\ndef assert_produces_warning(\n expected_warning=Warning,\n filter_level=\"always\",\n clear=None,\n check_stacklevel=True,\n raise_on_extra_warnings=True,\n):\n \"\"\"\n Context manager for running code expected to either raise a specific\n warning, or not raise any warnings. Verifies that the code raises the\n expected warning, and that it does not raise any other unexpected\n warnings. It is basically a wrapper around ``warnings.catch_warnings``.\n\n Parameters\n ----------\n expected_warning : {Warning, False, None}, default Warning\n The type of Exception raised. ``exception.Warning`` is the base\n class for all warnings. To check that no warning is returned,\n specify ``False`` or ``None``.\n filter_level : str or None, default \"always\"\n Specifies whether warnings are ignored, displayed, or turned\n into errors.\n Valid values are:\n\n * \"error\" - turns matching warnings into exceptions\n * \"ignore\" - discard the warning\n * \"always\" - always emit a warning\n * \"default\" - print the warning the first time it is generated\n from each location\n * \"module\" - print the warning the first time it is generated\n from each module\n * \"once\" - print the warning the first time it is generated\n\n clear : str, default None\n If not ``None`` then remove any previously raised warnings from\n the ``__warningsregistry__`` to ensure that no warning messages are\n suppressed by this context manager. If ``None`` is specified,\n the ``__warningsregistry__`` keeps track of which warnings have been\n shown, and does not show them again.\n check_stacklevel : bool, default True\n If True, displays the line that called the function containing\n the warning to show were the function is called. Otherwise, the\n line that implements the function is displayed.\n raise_on_extra_warnings : bool, default True\n Whether extra warnings not of the type `expected_warning` should\n cause the test to fail.\n\n Examples\n --------\n >>> import warnings\n >>> with assert_produces_warning():\n ... warnings.warn(UserWarning())\n ...\n >>> with assert_produces_warning(False):\n ... warnings.warn(RuntimeWarning())\n ...\n Traceback (most recent call last):\n ...\n AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].\n >>> with assert_produces_warning(UserWarning):\n ... warnings.warn(RuntimeWarning())\n Traceback (most recent call last):\n ...\n AssertionError: Did not see expected warning of class 'UserWarning'.\n\n ..warn:: This is *not* thread-safe.\n \"\"\"\n __tracebackhide__ = True\n\n with warnings.catch_warnings(record=True) as w:\n\n if clear is not None:\n # make sure that we are clearing these warnings\n # if they have happened before\n # to guarantee that we will catch them\n if not is_list_like(clear):\n clear = [clear]\n for m in clear:\n try:\n m.__warningregistry__.clear()\n except AttributeError:\n # module may not have __warningregistry__\n pass\n\n saw_warning = False\n warnings.simplefilter(filter_level)\n yield w\n extra_warnings = []\n\n for actual_warning in w:\n if expected_warning and issubclass(\n actual_warning.category, expected_warning\n ):\n saw_warning = True\n\n if check_stacklevel and issubclass(\n actual_warning.category, (FutureWarning, DeprecationWarning)\n ):\n from inspect import getframeinfo, stack\n\n caller = getframeinfo(stack()[2][0])\n msg = (\n \"Warning not set with correct stacklevel. \"\n \"File where warning is raised: {actual} != \"\n \"{caller}. Warning message: {message}\"\n ).format(\n actual=actual_warning.filename,\n caller=caller.filename,\n message=actual_warning.message,\n )\n assert actual_warning.filename == caller.filename, msg\n else:\n extra_warnings.append(\n (\n actual_warning.category.__name__,\n actual_warning.message,\n actual_warning.filename,\n actual_warning.lineno,\n )\n )\n if expected_warning:\n msg = \"Did not see expected warning of class {name!r}.\".format(\n name=expected_warning.__name__\n )\n assert saw_warning, msg\n if raise_on_extra_warnings and extra_warnings:\n raise AssertionError(\n \"Caused unexpected warning(s): {!r}.\".format(extra_warnings)\n )\n\n\nclass RNGContext:\n \"\"\"\n Context manager to set the numpy random number generator speed. Returns\n to the original value upon exiting the context manager.\n\n Parameters\n ----------\n seed : int\n Seed for numpy.random.seed\n\n Examples\n --------\n\n with RNGContext(42):\n np.random.randn()\n \"\"\"\n\n def __init__(self, seed):\n self.seed = seed\n\n def __enter__(self):\n\n self.start_state = np.random.get_state()\n np.random.seed(self.seed)\n\n def __exit__(self, exc_type, exc_value, traceback):\n\n np.random.set_state(self.start_state)\n\n\n@contextmanager\ndef with_csv_dialect(name, **kwargs):\n \"\"\"\n Context manager to temporarily register a CSV dialect for parsing CSV.\n\n Parameters\n ----------\n name : str\n The name of the dialect.\n kwargs : mapping\n The parameters for the dialect.\n\n Raises\n ------\n ValueError : the name of the dialect conflicts with a builtin one.\n\n See Also\n --------\n csv : Python's CSV library.\n \"\"\"\n import csv\n\n _BUILTIN_DIALECTS = {\"excel\", \"excel-tab\", \"unix\"}\n\n if name in _BUILTIN_DIALECTS:\n raise ValueError(\"Cannot override builtin dialect.\")\n\n csv.register_dialect(name, **kwargs)\n yield\n csv.unregister_dialect(name)\n\n\n@contextmanager\ndef use_numexpr(use, min_elements=None):\n from pandas.core.computation import expressions as expr\n\n if min_elements is None:\n min_elements = expr._MIN_ELEMENTS\n\n olduse = expr._USE_NUMEXPR\n oldmin = expr._MIN_ELEMENTS\n expr.set_use_numexpr(use)\n expr._MIN_ELEMENTS = min_elements\n yield\n expr._MIN_ELEMENTS = oldmin\n expr.set_use_numexpr(olduse)\n\n\ndef test_parallel(num_threads=2, kwargs_list=None):\n \"\"\"Decorator to run the same function multiple times in parallel.\n\n Parameters\n ----------\n num_threads : int, optional\n The number of times the function is run in parallel.\n kwargs_list : list of dicts, optional\n The list of kwargs to update original\n function kwargs on different threads.\n Notes\n -----\n This decorator does not pass the return value of the decorated function.\n\n Original from scikit-image:\n\n https://github.com/scikit-image/scikit-image/pull/1519\n\n \"\"\"\n\n assert num_threads > 0\n has_kwargs_list = kwargs_list is not None\n if has_kwargs_list:\n assert len(kwargs_list) == num_threads\n import threading\n\n def wrapper(func):\n @wraps(func)\n def inner(*args, **kwargs):\n if has_kwargs_list:\n update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])\n else:\n update_kwargs = lambda i: kwargs\n threads = []\n for i in range(num_threads):\n updated_kwargs = update_kwargs(i)\n thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)\n threads.append(thread)\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n return inner\n\n return wrapper\n\n\nclass SubclassedSeries(Series):\n _metadata = [\"testattr\", \"name\"]\n\n @property\n def _constructor(self):\n return SubclassedSeries\n\n @property\n def _constructor_expanddim(self):\n return SubclassedDataFrame\n\n\nclass SubclassedDataFrame(DataFrame):\n _metadata = [\"testattr\"]\n\n @property\n def _constructor(self):\n return SubclassedDataFrame\n\n @property\n def _constructor_sliced(self):\n return SubclassedSeries\n\n\nclass SubclassedCategorical(Categorical):\n @property\n def _constructor(self):\n return SubclassedCategorical\n\n\n@contextmanager\ndef set_timezone(tz):\n \"\"\"Context manager for temporarily setting a timezone.\n\n Parameters\n ----------\n tz : str\n A string representing a valid timezone.\n\n Examples\n --------\n\n >>> from datetime import datetime\n >>> from dateutil.tz import tzlocal\n >>> tzlocal().tzname(datetime.now())\n 'IST'\n\n >>> with set_timezone('US/Eastern'):\n ... tzlocal().tzname(datetime.now())\n ...\n 'EDT'\n \"\"\"\n\n import os\n import time\n\n def setTZ(tz):\n if tz is None:\n try:\n del os.environ[\"TZ\"]\n except KeyError:\n pass\n else:\n os.environ[\"TZ\"] = tz\n time.tzset()\n\n orig_tz = os.environ.get(\"TZ\")\n setTZ(tz)\n try:\n yield\n finally:\n setTZ(orig_tz)\n\n\ndef _make_skipna_wrapper(alternative, skipna_alternative=None):\n \"\"\"Create a function for calling on an array.\n\n Parameters\n ----------\n alternative : function\n The function to be called on the array with no NaNs.\n Only used when 'skipna_alternative' is None.\n skipna_alternative : function\n The function to be called on the original array\n\n Returns\n -------\n skipna_wrapper : function\n \"\"\"\n if skipna_alternative:\n\n def skipna_wrapper(x):\n return skipna_alternative(x.values)\n\n else:\n\n def skipna_wrapper(x):\n nona = x.dropna()\n if len(nona) == 0:\n return np.nan\n return alternative(nona)\n\n return skipna_wrapper\n\n\ndef convert_rows_list_to_csv_str(rows_list):\n \"\"\"\n Convert list of CSV rows to single CSV-formatted string for current OS.\n\n This method is used for creating expected value of to_csv() method.\n\n Parameters\n ----------\n rows_list : list\n The list of string. Each element represents the row of csv.\n\n Returns\n -------\n expected : string\n Expected output of to_csv() in current OS\n \"\"\"\n sep = os.linesep\n expected = sep.join(rows_list) + sep\n return expected\n"
] |
[
[
"pandas.core.computation.expressions.set_use_numexpr",
"pandas.Series",
"numpy.linspace",
"pandas.compat._get_lzma_file",
"pandas.RangeIndex",
"pandas.core.dtypes.common.is_extension_array_dtype",
"numpy.random.random_sample",
"pandas.DataFrame",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"numpy.round",
"pandas.MultiIndex.from_tuples",
"pandas.core.arrays.TimedeltaArray",
"numpy.random.randn",
"pandas.core.dtypes.common.is_datetime64_dtype",
"pandas._libs.testing.assert_dict_equal",
"pandas.reset_option",
"numpy.random.randint",
"pandas.io.common.urlopen",
"pandas.core.dtypes.common.is_interval_dtype",
"pandas.compat.raise_with_traceback",
"pandas.Index",
"pandas.DatetimeIndex",
"numpy.random.set_state",
"pandas.core.dtypes.common.is_number",
"matplotlib.pyplot.close",
"pandas.to_pickle",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.dtypes.common.is_list_like",
"pandas.bdate_range",
"numpy.random.choice",
"numpy.isnan",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.dtypes.common.is_period_dtype",
"pandas.core.dtypes.common.is_sequence",
"pandas.core.arrays.DatetimeArray",
"pandas.core.arrays.period_array",
"pandas.MultiIndex.from_product",
"numpy.random.rand",
"pandas.date_range",
"matplotlib.pyplot.get_fignums",
"numpy.floor",
"numpy.random.RandomState",
"pandas.compat._import_lzma",
"numpy.array",
"pandas.core.dtypes.common.needs_i8_conversion",
"pandas.core.dtypes.common.is_bool",
"pandas.timedelta_range",
"numpy.random.get_state",
"numpy.random.seed",
"pandas.period_range",
"pandas.core.arrays.DatetimeArray._from_sequence",
"pandas.core.algorithms.take_1d",
"pandas._libs.testing.assert_almost_equal",
"pandas.core.dtypes.missing.array_equivalent",
"pandas.core.arrays.TimedeltaArray._from_sequence",
"pandas.IntervalIndex.from_breaks",
"numpy.prod",
"pandas.read_pickle",
"pandas.io.formats.printing.pprint_thing"
]
] |
mesielepush/Demi
|
[
"c108d52c8e44949bc8bb67c0aef733a8772015f0"
] |
[
"scripts/nn/simple_lstm/train_lstm.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport random\nimport numpy as np\nimport pandas as pd\nimport joblib\nimport tensorflow as tf\nfrom sklearn.metrics import confusion_matrix as cmx\nTOP_K = 20000\n\nos.environ['PYTHONHASHSEED']='666'\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nrandom.seed(666)\nnp.random.seed(666)\ncurrent_path = os.getcwd()\npre_data_dir = os.path.join(current_path.split(\"scripts\",1)[0],'input','pre_data')\nmodel_dir = os.path.join(current_path.split(\"scripts\",1)[0],'models')\n\n\ndef lstm_model(\n num_classes,\n num_features,\n embedding_dim,\n dropout_rate,\n ):\n if num_classes == 2:\n last_act = 'sigmoid'\n units = 1\n else:\n last_act = 'softmax'\n units = num_classes\n\n \n model = tf.keras.Sequential([\n tf.keras.layers.Embedding(num_features, embedding_dim),\n tf.keras.layers.Bidirectional(tf.compat.v2.keras.layers.LSTM(embedding_dim)),\n tf.keras.layers.Dropout(dropout_rate),\n tf.keras.layers.Dense(embedding_dim, activation='relu'),\n tf.keras.layers.Dropout(dropout_rate),\n tf.keras.layers.Dense(embedding_dim/2, activation='relu'),\n tf.keras.layers.Dense(units, activation = last_act)\n ])\n return model\n\ndef lstm(data, mod,\n num_classes,\n embedding_dim,\n dropout_rate,\n learning_rate,\n patience,\n epochs,\n batch_size,\n ):\n\n if not os.path.exists(os.path.join(model_dir,mod,'lstm')):\n os.makedirs(os.path.join(model_dir,mod,'lstm'))\n \n os.chdir(os.path.join(model_dir,mod,'lstm'))\n\n (x_train, y_train), (x_test, y_test), word_index = data\n\n num_features = min(len(word_index) + 1, TOP_K)\n\n model = lstm_model(\n num_classes,\n num_features,\n embedding_dim,\n dropout_rate, \n )\n\n if num_classes == 2:\n loss = 'binary_crossentropy'\n else:\n loss = 'sparse_categorical_crossentropy'\n\n optimizer = tf.keras.optimizers.Adam(lr=learning_rate)\n\n checkpoint_path = 'cp.ckpt'\n checkpoint_dir = os.path.dirname(checkpoint_path)\n earlyStopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',\n patience = patience,\n verbose = 2,\n mode = 'min',\n restore_best_weights=True)\n \n cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,\n save_best_only = True,\n monitor = 'val_loss',\n mode = 'min',\n save_weights_only= True,\n verbose=2)\n model.compile(optimizer=optimizer, loss=loss, metrics=['acc'])\n callbacks = [earlyStopping, cp_callback]\n\n history = model.fit(\n x_train,\n y_train,\n epochs=epochs,\n callbacks=callbacks,\n validation_data=(x_test, y_test),\n verbose=2,\n batch_size = batch_size)\n history = history.history\n model_things = {\n 'embedding_dim': embedding_dim,\n 'dropout_rate': dropout_rate,\n 'input_shape': x_train.shape[1:],\n 'num_classes': num_classes,\n 'num_features': num_features\n }\n pred = model.predict_classes(x_test)\n print(cmx(y_test, pred))\n joblib.dump(model_things,os.path.join(model_dir,mod,'lstm','{0}_things.pkl'.format(mod)))\n joblib.dump(history,os.path.join(model_dir,mod,'lstm','{0}_history.pkl'.format(mod)))\n\n return max(history['val_acc']), min(history['val_loss'])"
] |
[
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"numpy.random.seed",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.layers.Dense",
"sklearn.metrics.confusion_matrix",
"tensorflow.keras.optimizers.Adam",
"tensorflow.compat.v2.keras.layers.LSTM",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.callbacks.EarlyStopping"
]
] |
hsnlab/annabellaDB
|
[
"daeaf394babd07b6d980a3eaa74be6614e7124b8"
] |
[
"demo/demo_console.py"
] |
[
"import shlex\nfrom cmd import Cmd\nimport subprocess\nimport time\nimport docker\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport copy\nimport _thread\nimport os\n\nG = nx.Graph()\nSERVER_IP = {\"kvs1\": \"172.17.0.2\", \"kvs2\": \"172.17.0.3\"}\nnf_server_assignment = {}\nclient = docker.from_env(timeout=180)\n\nkvs1_exist = False\ndata_location = None\ndef execute_bash_command(command):\n bashCommand = command\n process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)\n output, error = process.communicate()\n # if output != \"\":\n # print(\"Command: '{}' gives:\".format(command))\n # print(output)\n if error != None:\n print(\"Command: '{}' gives:\".format(command))\n print(\"ERROR: {}\".format(error))\n\ndef refresh_graph():\n color_map = []\n node_size_map = []\n for node in G:\n if \"NF1\" in node:\n color_map.append('green')\n node_size_map.append(1000)\n elif \"NF2\" in node:\n color_map.append('yellow')\n node_size_map.append(1000)\n elif \"NF3\" in node:\n color_map.append('blue')\n node_size_map.append(1000)\n elif \"data\" in node:\n color_map.append('red')\n node_size_map.append(500)\n else:\n color_map.append('grey')\n node_size_map.append(2000)\n\n nx.draw(G, node_color=color_map, node_size=node_size_map, with_labels=True, font_weight='bold')\n plt.savefig(\"Topology.jpg\", format=\"JPG\")\n plt.clf()\n\n #execute_bash_command(\"docker cp Topology.jpg dockergrafanainfluxkit_grafana_1:/usr/share/grafana/public/img/.\")\n\ndef detect_states():\n\n global kvs1_exist\n global data_location\n global G\n while not kvs1_exist:\n time.sleep(3)\n\n prev_location = None\n while True:\n execute_bash_command(\"docker cp kvs1:/hydro/anna/log_monitoring.txt . \")\n data_location_ip = \"not_known\"\n with open(\"log_monitoring.txt\", \"r\") as file:\n for line in file:\n if \"Key\" in line and \"Master\" in line and \"Slave\" in line:\n data_location_ip = line.split()[10][1:-1]\n #print(data_location_ip)\n for k, v in SERVER_IP.items():\n if v == data_location_ip:\n data_location = k\n #print(data_location)\n if prev_location != data_location:\n try:\n G.remove_node(\"data\")\n except Exception:\n pass\n G.add_node(\"data\")\n G.add_edge(\"data\", data_location)\n refresh_graph()\n prev_location = data_location\n\n\n time.sleep(3)\n print(\"log exit\")\n\n\nclass MyPrompt(Cmd):\n\n def emptyline(self):\n if self.lastcmd:\n self.lastcmd = \"\"\n return self.onecmd('\\n')\n\n def do_exit(self, inp):\n print(\"Bye\")\n return True\n\n def do_start_cluster(self, inp):\n\n global data_location\n global G\n\n print(\"Starting InfluxDB and Grafana... \")\n execute_bash_command(\"docker-compose -f ../../DockerGrafanaInfluxKit/docker-compose.yml up -d\")\n\n print(\"Starting AnnabellaDB Bootstrap server...\")\n execute_bash_command(\"docker run -it -d --name kvs1 master_annabelladb_image\")\n execute_bash_command(\"docker exec --privileged kvs1 tc qdisc replace dev eth0 root netem delay 500ms\")\n print(\"\\tLoading config file...\")\n execute_bash_command(\"docker cp ../conf/test.yml kvs1:/hydro/anna/conf/anna-config.yml\")\n\n print(\"Starting AnnabellaDB Server...\")\n execute_bash_command(\"docker run -it -d --name kvs2 annabelladb_image\")\n execute_bash_command(\"docker exec --privileged kvs2 tc qdisc replace dev eth0 root netem delay 500ms\")\n print(\"\\tLoading config file...\")\n execute_bash_command(\"docker cp ../conf/test-slave.yml kvs2:/hydro/anna/conf/anna-config.yml\")\n\n print(\n \"Grafana dashboard is available on: \\n\\thttp://localhost:3000/dashboard/db/access-times-of-nfs?refresh=5s&orgId=1\")\n\n G.add_nodes_from([\"kvs1\", \"kvs2\"])\n G.add_edge(\"kvs1\", \"kvs2\")\n refresh_graph()\n\n global kvs1_exist\n kvs1_exist = True\n time.sleep(5)\n\n def do_delete_cluster(self, inp):\n global G\n print(\"Deleting InfluxDB and Grafana... \")\n execute_bash_command(\"docker rm -f dockergrafanainfluxkit_influxdb_1\")\n print(\"Deleting Grafana... \")\n execute_bash_command(\"docker rm -f dockergrafanainfluxkit_grafana_1\")\n print(\"Deleting AnnaBellaDB cluster... \")\n execute_bash_command(\"docker rm -f kvs1\")\n execute_bash_command(\"docker rm -f kvs2\")\n\n nodes = copy.deepcopy(G.nodes())\n for node in nodes:\n G.remove_node(node)\n refresh_graph()\n\n def do_start_NF(self, inp):\n try:\n params = inp.split(' ')\n nf_id = params[0]\n nf = \"NF{}\".format(nf_id)\n print(\"NF ID: {}\".format(nf))\n\n server = params[1]\n print(\"Server: {}\".format(server))\n\n print(\"Starting {} on {}...\".format(nf, server))\n container = client.containers.get(server)\n # docker_ip_add = container.attrs['NetworkSettings']['IPAddress']\n container.exec_run(\n '/KVS-CLIENT/bin/python3 /hydro/anna/client/python/demoNF.py {} 172.17.0.1 8086 a {}'.format(\n SERVER_IP[server], nf_id), detach=True)\n\n nf_server_assignment[nf] = server\n\n G.add_node(nf)\n G.add_edge(nf, server)\n refresh_graph()\n\n except IndexError:\n print(\"Invalid command. To use: start_NF <NF ID> <server container name>\")\n except docker.errors.NotFound:\n print(\"There is no '{}' container\".format(nf))\n # execute_bash_command(\"docker exec -it -d {} bash -c '/KVS-CLIENT/bin/python3 /hydro/anna/client/python/demoNF.py {} 172.17.0.1 8086 a {}'\".format(server, SERVER_IP[server], nf_id))\n # print('docker exec -it -d {} bash -c \"/KVS-CLIENT/bin/python3 /hydro/anna/client/python/demoNF.py {} 172.17.0.1 8086 a {}\"'.format(server, SERVER_IP[server], nf_id))\n\n def do_delete_NF(self, inp):\n global G\n try:\n params = inp.split(' ')\n nf_id = params[0]\n\n container = client.containers.get(nf_server_assignment[nf_id])\n output = container.exec_run(['sh', '-c', 'ps aux | grep demoNF'], stderr=True, stdout=True)\n output = output.output.decode(\"utf-8\")\n for line in output.split(\"\\n\"):\n if line.split(' ')[-1] == nf_id[2:]:\n line = line.split(' ')\n line = [i for i in line if i != '']\n pid = line[1]\n container.exec_run(['kill', '-9', pid])\n break\n\n G.remove_node(nf_id)\n refresh_graph()\n\n except KeyError:\n print(\"There is no NF such {}\".format(nf_id))\n\n\n_thread.start_new_thread(detect_states, ())\nMyPrompt().cmdloop()\nprint(\"after\")\n"
] |
[
[
"matplotlib.pyplot.clf",
"matplotlib.pyplot.savefig"
]
] |
willemolding/JointGaussianChangeDetector
|
[
"6f67d479a08999cc6427658c64591bf639e67689"
] |
[
"jointcd/change_point_estimator.py"
] |
[
"from change_detector import ChangeDetector\nimport numpy as np\nfrom functools import partial\nfrom scipy.spatial.distance import mahalanobis\nfrom scipy.linalg import pinvh\n\n\ndef partition(sigma, k):\n \"\"\"\n Partitions a covariance matrix by zeroing out off diagonal blocks to create independence\n \"\"\"\n S = np.copy(sigma)\n S[k:,:k] = 0\n S[:k,k:] = 0\n return S\n\ndef distance(x, mu, precisions):\n \"\"\"\n Given an array of precision matrices (inverse covariance) calculate the mhalanobis distances\n \"\"\"\n distances = np.array([mahalanobis(x, mu, precisions[k]) for k in range(x.shape[0])])\n return distances\n\n\nclass ChangePointEstimator(ChangeDetector):\n\n def predict(self, X):\n \"\"\"\n Returns the most probably change point in each time series.\n Also returns the time series of mahalanobis distances\n\n Parameters:\n -----------\n X - array of time series, shape (n_series, len_series)\n \"\"\"\n D,N = X.shape\n\n sigma = self.covariance_estimator_.covariance_\n mu = self.covariance_estimator_.location_\n\n # calculate the precision matrices for all possible partitions\n precisions = [pinvh(partition(sigma, k)) for k in range(N)]\n\n # calculate the mahalanobis distance for each candidate change point in each time series\n distance_time_series = np.apply_along_axis(partial(distance, mu=mu, precisions=precisions), 1, X)\n\n # return the min distance (max likelihood change point) and array of distances\n return np.argmin(distance_time_series, axis=1), distance_time_series\n"
] |
[
[
"numpy.copy",
"scipy.spatial.distance.mahalanobis",
"numpy.argmin"
]
] |
richpsharp/ecoshard
|
[
"4f5e15835160901b83001e82496576236b2328e5"
] |
[
"src/ecoshard/geoprocessing/geoprocessing.py"
] |
[
"# coding=UTF-8\n\"\"\"A collection of raster and vector algorithms and utilities.\"\"\"\nimport collections\nimport functools\nimport logging\nimport math\nimport os\nimport pprint\nimport queue\nimport shutil\nimport sys\nimport tempfile\nimport threading\nimport time\n\nfrom . import geoprocessing_core\nfrom .geoprocessing_core import DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS\nfrom .geoprocessing_core import DEFAULT_OSR_AXIS_MAPPING_STRATEGY\nfrom osgeo import gdal\nfrom osgeo import ogr\nfrom osgeo import osr\nimport numpy\nimport numpy.ma\nimport rtree\nimport scipy.interpolate\nimport scipy.ndimage\nimport scipy.signal\nimport scipy.signal.signaltools\nimport scipy.sparse\nimport shapely.ops\nimport shapely.prepared\nimport shapely.wkb\n\n# This is used to efficiently pass data to the raster stats worker if available\nif sys.version_info >= (3, 8):\n import multiprocessing.shared_memory\n\n\nclass ReclassificationMissingValuesError(Exception):\n \"\"\"Raised when a raster value is not a valid key to a dictionary.\n\n Attributes:\n msg (str) - error message\n missing_values (list) - a list of the missing values from the raster\n that are not keys in the dictionary\n\n \"\"\"\n\n def __init__(self, msg, missing_values):\n \"\"\"See Attributes for args docstring.\"\"\"\n self.msg = msg\n self.missing_values = missing_values\n super().__init__(msg, missing_values)\n\n\nLOGGER = logging.getLogger(__name__)\n\n# Used in joining finished TaskGraph Tasks.\n_MAX_TIMEOUT = 60.0\n\n_VALID_GDAL_TYPES = (\n set([getattr(gdal, x) for x in dir(gdal.gdalconst) if 'GDT_' in x]))\n\n_LOGGING_PERIOD = 5.0 # min 5.0 seconds per update log message for the module\n_LARGEST_ITERBLOCK = 2**16 # largest block for iterblocks to read in cells\n\n_GDAL_TYPE_TO_NUMPY_LOOKUP = {\n gdal.GDT_Byte: numpy.uint8,\n gdal.GDT_Int16: numpy.int16,\n gdal.GDT_Int32: numpy.int32,\n gdal.GDT_UInt16: numpy.uint16,\n gdal.GDT_UInt32: numpy.uint32,\n gdal.GDT_Float32: numpy.float32,\n gdal.GDT_Float64: numpy.float64,\n gdal.GDT_CFloat32: numpy.csingle,\n gdal.GDT_CFloat64: numpy.complex64,\n}\n\n\ndef raster_calculator(\n base_raster_path_band_const_list, local_op, target_raster_path,\n datatype_target, nodata_target,\n calc_raster_stats=True, use_shared_memory=False,\n largest_block=_LARGEST_ITERBLOCK, max_timeout=_MAX_TIMEOUT,\n raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS):\n \"\"\"Apply local a raster operation on a stack of rasters.\n\n This function applies a user defined function across a stack of\n rasters' pixel stack. The rasters in ``base_raster_path_band_list`` must\n be spatially aligned and have the same cell sizes.\n\n Args:\n base_raster_path_band_const_list (sequence): a sequence containing:\n\n * ``(str, int)`` tuples, referring to a raster path/band index pair\n to use as an input.\n * ``numpy.ndarray`` s of up to two dimensions. These inputs must\n all be broadcastable to each other AND the size of the raster\n inputs.\n * ``(object, 'raw')`` tuples, where ``object`` will be passed\n directly into the ``local_op``.\n\n All rasters must have the same raster size. If only arrays are\n input, numpy arrays must be broadcastable to each other and the\n final raster size will be the final broadcast array shape. A value\n error is raised if only \"raw\" inputs are passed.\n local_op (function): a function that must take in as many parameters as\n there are elements in ``base_raster_path_band_const_list``. The\n parameters in ``local_op`` will map 1-to-1 in order with the values\n in ``base_raster_path_band_const_list``. ``raster_calculator`` will\n call ``local_op`` to generate the pixel values in ``target_raster``\n along memory block aligned processing windows. Note any\n particular call to ``local_op`` will have the arguments from\n ``raster_path_band_const_list`` sliced to overlap that window.\n If an argument from ``raster_path_band_const_list`` is a\n raster/path band tuple, it will be passed to ``local_op`` as a 2D\n numpy array of pixel values that align with the processing window\n that ``local_op`` is targeting. A 2D or 1D array will be sliced to\n match the processing window and in the case of a 1D array tiled in\n whatever dimension is flat. If an argument is a scalar it is\n passed as as scalar.\n The return value must be a 2D array of the same size as any of the\n input parameter 2D arrays and contain the desired pixel values\n for the target raster.\n target_raster_path (string): the path of the output raster. The\n projection, size, and cell size will be the same as the rasters\n in ``base_raster_path_const_band_list`` or the final broadcast\n size of the constant/ndarray values in the list.\n datatype_target (gdal datatype; int): the desired GDAL output type of\n the target raster.\n nodata_target (numerical value): the desired nodata value of the\n target raster.\n calc_raster_stats (boolean): If True, calculates and sets raster\n statistics (min, max, mean, and stdev) for target raster.\n use_shared_memory (boolean): If True, uses Python Multiprocessing\n shared memory to calculate raster stats for faster performance.\n This feature is available for Python >= 3.8 and will otherwise\n be ignored for earlier versions of Python.\n largest_block (int): Attempts to internally iterate over raster blocks\n with this many elements. Useful in cases where the blocksize is\n relatively small, memory is available, and the function call\n overhead dominates the iteration. Defaults to 2**20. A value of\n anything less than the original blocksize of the raster will\n result in blocksizes equal to the original size.\n max_timeout (float): amount of time in seconds to wait for stats\n worker thread to join. Default is _MAX_TIMEOUT.\n raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver\n name string as the first element and a GDAL creation options\n tuple/list as the second. Defaults to\n geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS.\n\n Return:\n None\n\n Raises:\n ValueError: invalid input provided\n\n \"\"\"\n if not base_raster_path_band_const_list:\n raise ValueError(\n \"`base_raster_path_band_const_list` is empty and \"\n \"should have at least one value.\")\n\n # It's a common error to not pass in path/band tuples, so check for that\n # and report error if so\n bad_raster_path_list = False\n if not isinstance(base_raster_path_band_const_list, (list, tuple)):\n bad_raster_path_list = True\n else:\n for value in base_raster_path_band_const_list:\n if (not _is_raster_path_band_formatted(value) and\n not isinstance(value, numpy.ndarray) and\n not (isinstance(value, tuple) and len(value) == 2 and\n value[1] == 'raw')):\n bad_raster_path_list = True\n break\n if bad_raster_path_list:\n raise ValueError(\n \"Expected a sequence of path / integer band tuples, \"\n \"ndarrays, or (value, 'raw') pairs for \"\n \"`base_raster_path_band_const_list`, instead got: \"\n \"%s\" % pprint.pformat(base_raster_path_band_const_list))\n\n # check that any rasters exist on disk and have enough bands\n not_found_paths = []\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n base_raster_path_band_list = [\n path_band for path_band in base_raster_path_band_const_list\n if _is_raster_path_band_formatted(path_band)]\n for value in base_raster_path_band_list:\n if gdal.OpenEx(value[0], gdal.OF_RASTER) is None:\n not_found_paths.append(value[0])\n gdal.PopErrorHandler()\n if not_found_paths:\n raise ValueError(\n \"The following files were expected but do not exist on the \"\n \"filesystem: \" + str(not_found_paths))\n\n # check that band index exists in raster\n invalid_band_index_list = []\n for value in base_raster_path_band_list:\n raster = gdal.OpenEx(value[0], gdal.OF_RASTER)\n if not (1 <= value[1] <= raster.RasterCount):\n invalid_band_index_list.append(value)\n raster = None\n if invalid_band_index_list:\n raise ValueError(\n \"The following rasters do not contain requested band \"\n \"indexes: %s\" % invalid_band_index_list)\n\n # check that the target raster is not also an input raster\n if target_raster_path in [x[0] for x in base_raster_path_band_list]:\n raise ValueError(\n \"%s is used as a target path, but it is also in the base input \"\n \"path list %s\" % (\n target_raster_path, str(base_raster_path_band_const_list)))\n\n # check that raster inputs are all the same dimensions\n raster_info_list = [\n get_raster_info(path_band[0])\n for path_band in base_raster_path_band_const_list\n if _is_raster_path_band_formatted(path_band)]\n geospatial_info_set = set()\n for raster_info in raster_info_list:\n geospatial_info_set.add(raster_info['raster_size'])\n if len(geospatial_info_set) > 1:\n raise ValueError(\n \"Input Rasters are not the same dimensions. The \"\n \"following raster are not identical %s\" % str(\n geospatial_info_set))\n\n numpy_broadcast_list = [\n x for x in base_raster_path_band_const_list\n if isinstance(x, numpy.ndarray)]\n stats_worker_thread = None\n try:\n # numpy.broadcast can only take up to 32 arguments, this loop works\n # around that restriction:\n while len(numpy_broadcast_list) > 1:\n numpy_broadcast_list = (\n [numpy.broadcast(*numpy_broadcast_list[:32])] +\n numpy_broadcast_list[32:])\n if numpy_broadcast_list:\n numpy_broadcast_size = numpy_broadcast_list[0].shape\n except ValueError:\n # this gets raised if numpy.broadcast fails\n raise ValueError(\n \"Numpy array inputs cannot be broadcast into a single shape %s\" %\n numpy_broadcast_list)\n\n if numpy_broadcast_list and len(numpy_broadcast_list[0].shape) > 2:\n raise ValueError(\n \"Numpy array inputs must be 2 dimensions or less %s\" %\n numpy_broadcast_list)\n\n # if there are both rasters and arrays, check the numpy shape will\n # be broadcastable with raster shape\n if raster_info_list and numpy_broadcast_list:\n # geospatial lists x/y order and numpy does y/x so reverse size list\n raster_shape = tuple(reversed(raster_info_list[0]['raster_size']))\n invalid_broadcast_size = False\n if len(numpy_broadcast_size) == 1:\n # if there's only one dimension it should match the last\n # dimension first, in the raster case this is the columns\n # because of the row/column order of numpy. No problem if\n # that value is ``1`` because it will be broadcast, otherwise\n # it should be the same as the raster.\n if (numpy_broadcast_size[0] != raster_shape[1] and\n numpy_broadcast_size[0] != 1):\n invalid_broadcast_size = True\n else:\n for dim_index in range(2):\n # no problem if 1 because it'll broadcast, otherwise must\n # be the same value\n if (numpy_broadcast_size[dim_index] !=\n raster_shape[dim_index] and\n numpy_broadcast_size[dim_index] != 1):\n invalid_broadcast_size = True\n if invalid_broadcast_size:\n raise ValueError(\n \"Raster size %s cannot be broadcast to numpy shape %s\" % (\n raster_shape, numpy_broadcast_size))\n\n # create a \"canonical\" argument list that's bands, 2d numpy arrays, or\n # raw values only\n base_canonical_arg_list = []\n base_raster_list = []\n base_band_list = []\n for value in base_raster_path_band_const_list:\n # the input has been tested and value is either a raster/path band\n # tuple, 1d ndarray, 2d ndarray, or (value, 'raw') tuple.\n if _is_raster_path_band_formatted(value):\n # it's a raster/path band, keep track of open raster and band\n # for later so we can `None` them.\n base_raster_list.append(gdal.OpenEx(value[0], gdal.OF_RASTER))\n base_band_list.append(\n base_raster_list[-1].GetRasterBand(value[1]))\n base_canonical_arg_list.append(base_band_list[-1])\n elif isinstance(value, numpy.ndarray):\n if value.ndim == 1:\n # easier to process as a 2d array for writing to band\n base_canonical_arg_list.append(\n value.reshape((1, value.shape[0])))\n else: # dimensions are two because we checked earlier.\n base_canonical_arg_list.append(value)\n elif isinstance(value, tuple):\n base_canonical_arg_list.append(value)\n else:\n raise ValueError(\n \"An unexpected ``value`` occurred. This should never happen. \"\n \"Value: %r\" % value)\n\n # create target raster\n if raster_info_list:\n # if rasters are passed, the target is the same size as the raster\n n_cols, n_rows = raster_info_list[0]['raster_size']\n elif numpy_broadcast_list:\n # numpy arrays in args and no raster result is broadcast shape\n # expanded to two dimensions if necessary\n if len(numpy_broadcast_size) == 1:\n n_rows, n_cols = 1, numpy_broadcast_size[0]\n else:\n n_rows, n_cols = numpy_broadcast_size\n else:\n raise ValueError(\n \"Only (object, 'raw') values have been passed. Raster \"\n \"calculator requires at least a raster or numpy array as a \"\n \"parameter. This is the input list: %s\" % pprint.pformat(\n base_raster_path_band_const_list))\n\n if datatype_target not in _VALID_GDAL_TYPES:\n raise ValueError(\n 'Invalid target type, should be a gdal.GDT_* type, received '\n '\"%s\"' % datatype_target)\n\n # create target raster\n raster_driver = gdal.GetDriverByName(raster_driver_creation_tuple[0])\n try:\n os.makedirs(os.path.dirname(target_raster_path))\n except OSError:\n pass\n target_raster = raster_driver.Create(\n target_raster_path, n_cols, n_rows, 1, datatype_target,\n options=raster_driver_creation_tuple[1])\n\n target_band = target_raster.GetRasterBand(1)\n if nodata_target is not None:\n target_band.SetNoDataValue(nodata_target)\n if base_raster_list:\n # use the first raster in the list for the projection and geotransform\n target_raster.SetProjection(base_raster_list[0].GetProjection())\n target_raster.SetGeoTransform(base_raster_list[0].GetGeoTransform())\n target_band.FlushCache()\n target_raster.FlushCache()\n\n try:\n last_time = time.time()\n\n block_offset_list = list(iterblocks(\n (target_raster_path, 1), offset_only=True,\n largest_block=largest_block))\n\n if calc_raster_stats:\n # if this queue is used to send computed valid blocks of\n # the raster to an incremental statistics calculator worker\n stats_worker_queue = queue.Queue()\n exception_queue = queue.Queue()\n\n if sys.version_info >= (3, 8):\n # The stats worker keeps running variables as a float64, so\n # all input rasters are dtype float64 -- make the shared memory\n # size equivalent.\n block_size_bytes = (\n numpy.dtype(numpy.float64).itemsize *\n block_offset_list[0]['win_xsize'] *\n block_offset_list[0]['win_ysize'])\n\n shared_memory = multiprocessing.shared_memory.SharedMemory(\n create=True, size=block_size_bytes)\n\n else:\n stats_worker_queue = None\n\n if calc_raster_stats:\n # To avoid doing two passes on the raster to calculate standard\n # deviation, we implement a continuous statistics calculation\n # as the raster is computed. This computational effort is high\n # and benefits from running in parallel. This queue and worker\n # takes a valid block of a raster and incrementally calculates\n # the raster's statistics. When ``None`` is pushed to the queue\n # the worker will finish and return a (min, max, mean, std)\n # tuple.\n LOGGER.info('starting stats_worker')\n stats_worker_thread = threading.Thread(\n target=geoprocessing_core.stats_worker,\n args=(stats_worker_queue, len(block_offset_list)))\n stats_worker_thread.daemon = True\n stats_worker_thread.start()\n LOGGER.info('started stats_worker %s', stats_worker_thread)\n\n pixels_processed = 0\n n_pixels = n_cols * n_rows\n\n # iterate over each block and calculate local_op\n for block_offset in block_offset_list:\n # read input blocks\n offset_list = (block_offset['yoff'], block_offset['xoff'])\n blocksize = (block_offset['win_ysize'], block_offset['win_xsize'])\n data_blocks = []\n for value in base_canonical_arg_list:\n if isinstance(value, gdal.Band):\n data_blocks.append(value.ReadAsArray(**block_offset))\n # I've encountered the following error when a gdal raster\n # is corrupt, often from multiple threads writing to the\n # same file. This helps to catch the error early rather\n # than lead to confusing values of ``data_blocks`` later.\n if not isinstance(data_blocks[-1], numpy.ndarray):\n raise ValueError(\n f\"got a {data_blocks[-1]} when trying to read \"\n f\"{value.GetDataset().GetFileList()} at \"\n f\"{block_offset}, expected numpy.ndarray.\")\n elif isinstance(value, numpy.ndarray):\n # must be numpy array and all have been conditioned to be\n # 2d, so start with 0:1 slices and expand if possible\n slice_list = [slice(0, 1)] * 2\n tile_dims = list(blocksize)\n for dim_index in [0, 1]:\n if value.shape[dim_index] > 1:\n slice_list[dim_index] = slice(\n offset_list[dim_index],\n offset_list[dim_index] +\n blocksize[dim_index],)\n tile_dims[dim_index] = 1\n data_blocks.append(\n numpy.tile(value[tuple(slice_list)], tile_dims))\n else:\n # must be a raw tuple\n data_blocks.append(value[0])\n\n target_block = local_op(*data_blocks)\n\n if (not isinstance(target_block, numpy.ndarray) or\n target_block.shape != blocksize):\n raise ValueError(\n \"Expected `local_op` to return a numpy.ndarray of \"\n \"shape %s but got this instead: %s\" % (\n blocksize, target_block))\n\n target_band.WriteArray(\n target_block, yoff=block_offset['yoff'],\n xoff=block_offset['xoff'])\n\n # send result to stats calculator\n if stats_worker_queue:\n # guard against an undefined nodata target\n if nodata_target is not None:\n target_block = target_block[target_block != nodata_target]\n target_block = target_block.astype(numpy.float64).flatten()\n\n if sys.version_info >= (3, 8) and use_shared_memory:\n shared_memory_array = numpy.ndarray(\n target_block.shape, dtype=target_block.dtype,\n buffer=shared_memory.buf)\n shared_memory_array[:] = target_block[:]\n\n stats_worker_queue.put((\n shared_memory_array.shape, shared_memory_array.dtype,\n shared_memory))\n else:\n stats_worker_queue.put(target_block)\n\n pixels_processed += blocksize[0] * blocksize[1]\n last_time = _invoke_timed_callback(\n last_time, lambda: LOGGER.info(\n f'{float(pixels_processed) / n_pixels * 100.0:.2f}% '\n f'complete on {target_raster_path}',),\n _LOGGING_PERIOD)\n\n LOGGER.info('100.0% complete')\n\n if calc_raster_stats:\n LOGGER.info(\"Waiting for raster stats worker result.\")\n stats_worker_thread.join(max_timeout)\n if stats_worker_thread.is_alive():\n LOGGER.error(\"stats_worker_thread.join() timed out\")\n raise RuntimeError(\"stats_worker_thread.join() timed out\")\n payload = stats_worker_queue.get(True, max_timeout)\n if payload is not None:\n target_min, target_max, target_mean, target_stddev = payload\n target_band.SetStatistics(\n float(target_min), float(target_max), float(target_mean),\n float(target_stddev))\n target_band.FlushCache()\n except Exception:\n LOGGER.exception('exception encountered in raster_calculator')\n raise\n finally:\n # This block ensures that rasters are destroyed even if there's an\n # exception raised.\n base_band_list[:] = []\n base_raster_list[:] = []\n target_band.FlushCache()\n target_band = None\n target_raster.FlushCache()\n target_raster = None\n\n if calc_raster_stats and stats_worker_thread:\n if stats_worker_thread.is_alive():\n stats_worker_queue.put(None, True, max_timeout)\n LOGGER.info(\"Waiting for raster stats worker result.\")\n stats_worker_thread.join(max_timeout)\n if stats_worker_thread.is_alive():\n LOGGER.error(\"stats_worker_thread.join() timed out\")\n raise RuntimeError(\n \"stats_worker_thread.join() timed out\")\n if sys.version_info >= (3, 8) and use_shared_memory:\n LOGGER.debug(\n f'unlink shared memory for process {os.getpid()}')\n shared_memory.close()\n shared_memory.unlink()\n LOGGER.debug(\n f'unlinked shared memory for process {os.getpid()}')\n\n # check for an exception in the workers, otherwise get result\n # and pass to writer\n try:\n exception = exception_queue.get_nowait()\n LOGGER.error(\"Exception encountered at termination.\")\n raise exception\n except queue.Empty:\n pass\n\n\ndef align_and_resize_raster_stack(\n base_raster_path_list, target_raster_path_list, resample_method_list,\n target_pixel_size, bounding_box_mode, base_vector_path_list=None,\n raster_align_index=None, base_projection_wkt_list=None,\n target_projection_wkt=None, vector_mask_options=None,\n gdal_warp_options=None,\n raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS,\n osr_axis_mapping_strategy=DEFAULT_OSR_AXIS_MAPPING_STRATEGY):\n \"\"\"Generate rasters from a base such that they align geospatially.\n\n This function resizes base rasters that are in the same geospatial\n projection such that the result is an aligned stack of rasters that have\n the same cell size, dimensions, and bounding box. This is achieved by\n clipping or resizing the rasters to intersected, unioned, or equivocated\n bounding boxes of all the raster and vector input.\n\n Args:\n base_raster_path_list (sequence): a sequence of base raster paths that\n will be transformed and will be used to determine the target\n bounding box.\n target_raster_path_list (sequence): a sequence of raster paths that\n will be created to one-to-one map with ``base_raster_path_list``\n as aligned versions of those original rasters. If there are\n duplicate paths in this list, the function will raise a ValueError.\n resample_method_list (sequence): a sequence of resampling methods\n which one to one map each path in ``base_raster_path_list`` during\n resizing. Each element must be one of\n \"near|bilinear|cubic|cubicspline|lanczos|mode\".\n target_pixel_size (list/tuple): the target raster's x and y pixel size\n example: (30, -30).\n bounding_box_mode (string): one of \"union\", \"intersection\", or\n a sequence of floats of the form [minx, miny, maxx, maxy] in the\n target projection coordinate system. Depending\n on the value, output extents are defined as the union,\n intersection, or the explicit bounding box.\n base_vector_path_list (sequence): a sequence of base vector paths\n whose bounding boxes will be used to determine the final bounding\n box of the raster stack if mode is 'union' or 'intersection'. If\n mode is 'bb=[...]' then these vectors are not used in any\n calculation.\n raster_align_index (int): indicates the index of a\n raster in ``base_raster_path_list`` that the target rasters'\n bounding boxes pixels should align with. This feature allows\n rasters whose raster dimensions are the same, but bounding boxes\n slightly shifted less than a pixel size to align with a desired\n grid layout. If ``None`` then the bounding box of the target\n rasters is calculated as the precise intersection, union, or\n bounding box.\n base_projection_wkt_list (sequence): if not None, this is a sequence of\n base projections of the rasters in ``base_raster_path_list``. If a\n value is ``None`` the ``base_sr`` is assumed to be whatever is\n defined in that raster. This value is useful if there are rasters\n with no projection defined, but otherwise known.\n target_projection_wkt (string): if not None, this is the desired\n projection of all target rasters in Well Known Text format. If\n None, the base SRS will be passed to the target.\n vector_mask_options (dict): optional, if not None, this is a\n dictionary of options to use an existing vector's geometry to\n mask out pixels in the target raster that do not overlap the\n vector's geometry. Keys to this dictionary are:\n\n * ``'mask_vector_path'`` (str): path to the mask vector file.\n This vector will be automatically projected to the target\n projection if its base coordinate system does not match the\n target.\n * ``'mask_layer_name'`` (str): the layer name to use for masking.\n If this key is not in the dictionary the default is to use\n the layer at index 0.\n * ``'mask_vector_where_filter'`` (str): an SQL WHERE string.\n This will be used to filter the geometry in the mask. Ex: ``'id\n > 10'`` would use all features whose field value of 'id' is >\n 10.\n\n gdal_warp_options (sequence): if present, the contents of this list\n are passed to the ``warpOptions`` parameter of ``gdal.Warp``. See\n the `GDAL Warp documentation\n <https://gdal.org/api/gdalwarp_cpp.html#_CPPv415GDALWarpOptions>`_\n for valid options.\n raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver\n name string as the first element and a GDAL creation options\n tuple/list as the second. Defaults to a GTiff driver tuple\n defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS.\n osr_axis_mapping_strategy (int): OSR axis mapping strategy for\n ``SpatialReference`` objects. Defaults to\n ``geoprocessing.DEFAULT_OSR_AXIS_MAPPING_STRATEGY``. This parameter\n should not be changed unless you know what you are doing.\n\n Return:\n None\n\n Raises:\n ValueError\n If any combination of the raw bounding boxes, raster\n bounding boxes, vector bounding boxes, and/or vector_mask\n bounding box does not overlap to produce a valid target.\n ValueError\n If any of the input or target lists are of different\n lengths.\n ValueError\n If there are duplicate paths on the target list which would\n risk corrupted output.\n ValueError\n If some combination of base, target, and embedded source\n reference systems results in an ambiguous target coordinate\n system.\n ValueError\n If ``vector_mask_options`` is not None but the\n ``mask_vector_path`` is undefined or doesn't point to a valid\n file.\n ValueError\n If ``pixel_size`` is not a 2 element sequence of numbers.\n\n \"\"\"\n # make sure that the input lists are of the same length\n list_lengths = [\n len(base_raster_path_list), len(target_raster_path_list),\n len(resample_method_list)]\n if len(set(list_lengths)) != 1:\n raise ValueError(\n \"base_raster_path_list, target_raster_path_list, and \"\n \"resample_method_list must be the same length \"\n \" current lengths are %s\" % (str(list_lengths)))\n\n unique_targets = set(target_raster_path_list)\n if len(unique_targets) != len(target_raster_path_list):\n seen = set()\n duplicate_list = []\n for path in target_raster_path_list:\n if path not in seen:\n seen.add(path)\n else:\n duplicate_list.append(path)\n raise ValueError(\n \"There are duplicated paths on the target list. This is an \"\n \"invalid state of ``target_path_list``. Duplicates: %s\" % (\n duplicate_list))\n\n # we can accept 'union', 'intersection', or a 4 element list/tuple\n if bounding_box_mode not in [\"union\", \"intersection\"] and (\n not isinstance(bounding_box_mode, (list, tuple)) or\n len(bounding_box_mode) != 4):\n raise ValueError(\"Unknown bounding_box_mode %s\" % (\n str(bounding_box_mode)))\n\n n_rasters = len(base_raster_path_list)\n if ((raster_align_index is not None) and\n ((raster_align_index < 0) or (raster_align_index >= n_rasters))):\n raise ValueError(\n \"Alignment index is out of bounds of the datasets index: %s\"\n \" n_elements %s\" % (raster_align_index, n_rasters))\n\n _assert_is_valid_pixel_size(target_pixel_size)\n\n # used to get bounding box, projection, and possible alignment info\n raster_info_list = [\n get_raster_info(path) for path in base_raster_path_list]\n\n # get the literal or intersecting/unioned bounding box\n if isinstance(bounding_box_mode, (list, tuple)):\n # if it's a sequence or tuple, it must be a manual bounding box\n LOGGER.debug(\n \"assuming manual bounding box mode of %s\", bounding_box_mode)\n target_bounding_box = bounding_box_mode\n else:\n # either intersection or union, get list of bounding boxes, reproject\n # if necessary, and reduce to a single box\n if base_vector_path_list is not None:\n # vectors are only interesting for their bounding boxes, that's\n # this construction is inside an else.\n vector_info_list = [\n get_vector_info(path) for path in base_vector_path_list]\n else:\n vector_info_list = []\n\n raster_bounding_box_list = []\n for raster_index, raster_info in enumerate(raster_info_list):\n # this block calculates the base projection of ``raster_info`` if\n # ``target_projection_wkt`` is defined, thus implying a\n # reprojection will be necessary.\n if target_projection_wkt:\n if base_projection_wkt_list and \\\n base_projection_wkt_list[raster_index]:\n # a base is defined, use that\n base_raster_projection_wkt = \\\n base_projection_wkt_list[raster_index]\n else:\n # otherwise use the raster's projection and there must\n # be one since we're reprojecting\n base_raster_projection_wkt = raster_info['projection_wkt']\n if not base_raster_projection_wkt:\n raise ValueError(\n \"no projection for raster %s\" %\n base_raster_path_list[raster_index])\n # since the base spatial reference is potentially different\n # than the target, we need to transform the base bounding\n # box into target coordinates so later we can calculate\n # accurate bounding box overlaps in the target coordinate\n # system\n raster_bounding_box_list.append(\n transform_bounding_box(\n raster_info['bounding_box'],\n base_raster_projection_wkt, target_projection_wkt))\n else:\n raster_bounding_box_list.append(raster_info['bounding_box'])\n\n # include the vector bounding box information to make a global list\n # of target bounding boxes\n bounding_box_list = [\n vector_info['bounding_box'] if target_projection_wkt is None else\n transform_bounding_box(\n vector_info['bounding_box'],\n vector_info['projection_wkt'], target_projection_wkt)\n for vector_info in vector_info_list] + raster_bounding_box_list\n\n target_bounding_box = merge_bounding_box_list(\n bounding_box_list, bounding_box_mode)\n\n if vector_mask_options:\n # ensure the mask exists and intersects with the target bounding box\n if 'mask_vector_path' not in vector_mask_options:\n raise ValueError(\n 'vector_mask_options passed, but no value for '\n '\"mask_vector_path\": %s', vector_mask_options)\n\n mask_vector_info = get_vector_info(\n vector_mask_options['mask_vector_path'])\n\n if 'mask_vector_where_filter' in vector_mask_options:\n # the bounding box only exists for the filtered features\n mask_vector = gdal.OpenEx(\n vector_mask_options['mask_vector_path'], gdal.OF_VECTOR)\n mask_layer = mask_vector.GetLayer()\n mask_layer.SetAttributeFilter(\n vector_mask_options['mask_vector_where_filter'])\n mask_bounding_box = merge_bounding_box_list(\n [[feature.GetGeometryRef().GetEnvelope()[i]\n for i in [0, 2, 1, 3]] for feature in mask_layer],\n 'union')\n mask_layer = None\n mask_vector = None\n else:\n # if no where filter then use the raw vector bounding box\n mask_bounding_box = mask_vector_info['bounding_box']\n\n mask_vector_projection_wkt = mask_vector_info['projection_wkt']\n if mask_vector_projection_wkt is not None and \\\n target_projection_wkt is not None:\n mask_vector_bb = transform_bounding_box(\n mask_bounding_box, mask_vector_info['projection_wkt'],\n target_projection_wkt)\n else:\n mask_vector_bb = mask_vector_info['bounding_box']\n # Calling `merge_bounding_box_list` will raise an ValueError if the\n # bounding box of the mask and the target do not intersect. The\n # result is otherwise not used.\n _ = merge_bounding_box_list(\n [target_bounding_box, mask_vector_bb], 'intersection')\n\n if raster_align_index is not None and raster_align_index >= 0:\n # bounding box needs alignment\n align_bounding_box = (\n raster_info_list[raster_align_index]['bounding_box'])\n align_pixel_size = (\n raster_info_list[raster_align_index]['pixel_size'])\n # adjust bounding box so lower left corner aligns with a pixel in\n # raster[raster_align_index]\n for index in [0, 1]:\n n_pixels = int(\n (target_bounding_box[index] - align_bounding_box[index]) /\n float(align_pixel_size[index]))\n target_bounding_box[index] = (\n n_pixels * align_pixel_size[index] +\n align_bounding_box[index])\n\n for index, (base_path, target_path, resample_method) in enumerate(zip(\n base_raster_path_list, target_raster_path_list,\n resample_method_list)):\n warp_raster(\n base_path, target_pixel_size, target_path, resample_method,\n target_bb=target_bounding_box,\n raster_driver_creation_tuple=(raster_driver_creation_tuple),\n target_projection_wkt=target_projection_wkt,\n base_projection_wkt=(\n None if not base_projection_wkt_list else\n base_projection_wkt_list[index]),\n vector_mask_options=vector_mask_options,\n gdal_warp_options=gdal_warp_options)\n LOGGER.info(\n '%d of %d aligned: %s', index+1, n_rasters,\n os.path.basename(target_path))\n\n LOGGER.info(\"aligned all %d rasters.\", n_rasters)\n\n\ndef new_raster_from_base(\n base_path, target_path, datatype, band_nodata_list,\n fill_value_list=None, n_rows=None, n_cols=None,\n raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS):\n \"\"\"Create new raster by coping spatial reference/geotransform of base.\n\n A convenience function to simplify the creation of a new raster from the\n basis of an existing one. Depending on the input mode, one can create\n a new raster of the same dimensions, geotransform, and georeference as\n the base. Other options are provided to change the raster dimensions,\n number of bands, nodata values, data type, and core raster creation\n options.\n\n Args:\n base_path (string): path to existing raster.\n target_path (string): path to desired target raster.\n datatype: the pixel datatype of the output raster, for example\n gdal.GDT_Float32. See the following header file for supported\n pixel types:\n http://www.gdal.org/gdal_8h.html#22e22ce0a55036a96f652765793fb7a4\n band_nodata_list (sequence): list of nodata values, one for each band,\n to set on target raster. If value is 'None' the nodata value is\n not set for that band. The number of target bands is inferred\n from the length of this list.\n fill_value_list (sequence): list of values to fill each band with. If\n None, no filling is done.\n n_rows (int): if not None, defines the number of target raster rows.\n n_cols (int): if not None, defines the number of target raster\n columns.\n raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver\n name string as the first element and a GDAL creation options\n tuple/list as the second. Defaults to a GTiff driver tuple\n defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS.\n\n Return:\n None\n \"\"\"\n base_raster = gdal.OpenEx(base_path, gdal.OF_RASTER)\n if n_rows is None:\n n_rows = base_raster.RasterYSize\n if n_cols is None:\n n_cols = base_raster.RasterXSize\n driver = gdal.GetDriverByName(raster_driver_creation_tuple[0])\n\n local_raster_creation_options = list(raster_driver_creation_tuple[1])\n # PIXELTYPE is sometimes used to define signed vs. unsigned bytes and\n # the only place that is stored is in the IMAGE_STRUCTURE metadata\n # copy it over if it exists and it not already defined by the input\n # creation options. It's okay to get this info from the first band since\n # all bands have the same datatype\n base_band = base_raster.GetRasterBand(1)\n metadata = base_band.GetMetadata('IMAGE_STRUCTURE')\n if 'PIXELTYPE' in metadata and not any(\n ['PIXELTYPE' in option for option in\n local_raster_creation_options]):\n local_raster_creation_options.append(\n 'PIXELTYPE=' + metadata['PIXELTYPE'])\n\n block_size = base_band.GetBlockSize()\n # It's not clear how or IF we can determine if the output should be\n # striped or tiled. Here we leave it up to the default inputs or if its\n # obviously not striped we tile.\n if not any(\n ['TILED' in option for option in local_raster_creation_options]):\n # TILED not set, so lets try to set it to a reasonable value\n if block_size[0] != n_cols:\n # if x block is not the width of the raster it *must* be tiled\n # otherwise okay if it's striped or tiled, I can't construct a\n # test case to cover this, but there is nothing in the spec that\n # restricts this so I have it just in case.\n local_raster_creation_options.append('TILED=YES')\n\n if not any(\n ['BLOCK' in option for option in local_raster_creation_options]):\n # not defined, so lets copy what we know from the current raster\n local_raster_creation_options.extend([\n 'BLOCKXSIZE=%d' % block_size[0],\n 'BLOCKYSIZE=%d' % block_size[1]])\n\n # make target directory if it doesn't exist\n try:\n os.makedirs(os.path.dirname(target_path))\n except OSError:\n pass\n\n base_band = None\n n_bands = len(band_nodata_list)\n target_raster = driver.Create(\n target_path, n_cols, n_rows, n_bands, datatype,\n options=local_raster_creation_options)\n target_raster.SetProjection(base_raster.GetProjection())\n target_raster.SetGeoTransform(base_raster.GetGeoTransform())\n base_raster = None\n\n for index, nodata_value in enumerate(band_nodata_list):\n if nodata_value is None:\n continue\n target_band = target_raster.GetRasterBand(index + 1)\n try:\n target_band.SetNoDataValue(nodata_value.item())\n except AttributeError:\n target_band.SetNoDataValue(nodata_value)\n\n target_raster.FlushCache()\n last_time = time.time()\n pixels_processed = 0\n n_pixels = n_cols * n_rows\n if fill_value_list is not None:\n for index, fill_value in enumerate(fill_value_list):\n if fill_value is None:\n continue\n target_band = target_raster.GetRasterBand(index + 1)\n # some rasters are very large and a fill can appear to cause\n # computation to hang. This block, though possibly slightly less\n # efficient than ``band.Fill`` will give real-time feedback about\n # how the fill is progressing.\n for offsets in iterblocks((target_path, 1), offset_only=True):\n fill_array = numpy.empty(\n (offsets['win_ysize'], offsets['win_xsize']))\n pixels_processed += (\n offsets['win_ysize'] * offsets['win_xsize'])\n fill_array[:] = fill_value\n target_band.WriteArray(\n fill_array, offsets['xoff'], offsets['yoff'])\n\n last_time = _invoke_timed_callback(\n last_time, lambda: LOGGER.info(\n f'filling new raster {target_path} with {fill_value} '\n f'-- {float(pixels_processed)/n_pixels*100.0:.2f}% '\n f'complete'),\n _LOGGING_PERIOD)\n target_band = None\n target_band = None\n target_raster = None\n\n\ndef create_raster_from_vector_extents(\n base_vector_path, target_raster_path, target_pixel_size,\n target_pixel_type, target_nodata, fill_value=None,\n raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS):\n \"\"\"Create a blank raster based on a vector file extent.\n\n Args:\n base_vector_path (string): path to vector shapefile to base the\n bounding box for the target raster.\n target_raster_path (string): path to location of generated geotiff;\n the upper left hand corner of this raster will be aligned with the\n bounding box of the source vector and the extent will be exactly\n equal or contained the source vector's bounding box depending on\n whether the pixel size divides evenly into the source bounding\n box; if not coordinates will be rounded up to contain the original\n extent.\n target_pixel_size (list/tuple): the x/y pixel size as a sequence\n Example::\n\n [30.0, -30.0]\n\n target_pixel_type (int): gdal GDT pixel type of target raster\n target_nodata (numeric): target nodata value. Can be None if no nodata\n value is needed.\n fill_value (int/float): value to fill in the target raster; no fill if\n value is None\n raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver\n name string as the first element and a GDAL creation options\n tuple/list as the second. Defaults to a GTiff driver tuple\n defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS.\n\n Return:\n None\n \"\"\"\n if target_pixel_type not in _VALID_GDAL_TYPES:\n raise ValueError(\n f'Invalid target type, should be a gdal.GDT_* type, received '\n f'\"{target_pixel_type}\"')\n # Determine the width and height of the tiff in pixels based on the\n # maximum size of the combined envelope of all the features\n vector = gdal.OpenEx(base_vector_path, gdal.OF_VECTOR)\n shp_extent = None\n for layer_index in range(vector.GetLayerCount()):\n layer = vector.GetLayer(layer_index)\n for feature in layer:\n try:\n # envelope is [xmin, xmax, ymin, ymax]\n feature_extent = feature.GetGeometryRef().GetEnvelope()\n if shp_extent is None:\n shp_extent = list(feature_extent)\n else:\n # expand bounds of current bounding box to include that\n # of the newest feature\n shp_extent = [\n f(shp_extent[index], feature_extent[index])\n for index, f in enumerate([min, max, min, max])]\n except AttributeError as error:\n # For some valid OGR objects the geometry can be undefined\n # since it's valid to have a NULL entry in the attribute table\n # this is expressed as a None value in the geometry reference\n # this feature won't contribute\n LOGGER.warning(error)\n layer = None\n\n if shp_extent is None:\n raise ValueError(\n f'the vector at {base_vector_path} has no geometry, cannot '\n f'create a raster from these extents')\n\n # round up on the rows and cols so that the target raster encloses the\n # base vector\n n_cols = int(numpy.ceil(\n abs((shp_extent[1] - shp_extent[0]) / target_pixel_size[0])))\n n_cols = max(1, n_cols)\n\n n_rows = int(numpy.ceil(\n abs((shp_extent[3] - shp_extent[2]) / target_pixel_size[1])))\n n_rows = max(1, n_rows)\n\n driver = gdal.GetDriverByName(raster_driver_creation_tuple[0])\n n_bands = 1\n raster = driver.Create(\n target_raster_path, n_cols, n_rows, n_bands, target_pixel_type,\n options=raster_driver_creation_tuple[1])\n raster.GetRasterBand(1).SetNoDataValue(target_nodata)\n\n # Set the transform based on the upper left corner and given pixel\n # dimensions\n if target_pixel_size[0] < 0:\n x_source = shp_extent[1]\n else:\n x_source = shp_extent[0]\n if target_pixel_size[1] < 0:\n y_source = shp_extent[3]\n else:\n y_source = shp_extent[2]\n raster_transform = [\n x_source, target_pixel_size[0], 0.0,\n y_source, 0.0, target_pixel_size[1]]\n raster.SetGeoTransform(raster_transform)\n\n # Use the same projection on the raster as the shapefile\n raster.SetProjection(vector.GetLayer(0).GetSpatialRef().ExportToWkt())\n\n # Initialize everything to nodata\n if fill_value is not None:\n band = raster.GetRasterBand(1)\n band.Fill(fill_value)\n band = None\n vector = None\n raster = None\n\n\ndef interpolate_points(\n base_vector_path, vector_attribute_field, target_raster_path_band,\n interpolation_mode):\n \"\"\"Interpolate point values onto an existing raster.\n\n Args:\n base_vector_path (string): path to a shapefile that contains point\n vector layers.\n vector_attribute_field (field): a string in the vector referenced at\n ``base_vector_path`` that refers to a numeric value in the\n vector's attribute table. This is the value that will be\n interpolated across the raster.\n target_raster_path_band (tuple): a path/band number tuple to an\n existing raster which likely intersects or is nearby the source\n vector. The band in this raster will take on the interpolated\n numerical values provided at each point.\n interpolation_mode (string): the interpolation method to use for\n scipy.interpolate.griddata, one of 'linear', near', or 'cubic'.\n\n Return:\n None\n \"\"\"\n source_vector = gdal.OpenEx(base_vector_path, gdal.OF_VECTOR)\n point_list = []\n value_list = []\n for layer_index in range(source_vector.GetLayerCount()):\n layer = source_vector.GetLayer(layer_index)\n for point_feature in layer:\n value = point_feature.GetField(vector_attribute_field)\n # Add in the numpy notation which is row, col\n # Here the point geometry is in the form x, y (col, row)\n geometry = point_feature.GetGeometryRef()\n point = geometry.GetPoint()\n point_list.append([point[1], point[0]])\n value_list.append(value)\n\n point_array = numpy.array(point_list)\n value_array = numpy.array(value_list)\n\n # getting the offsets first before the raster is opened in update mode\n offset_list = list(\n iterblocks(target_raster_path_band, offset_only=True))\n target_raster = gdal.OpenEx(\n target_raster_path_band[0], gdal.OF_RASTER | gdal.GA_Update)\n band = target_raster.GetRasterBand(target_raster_path_band[1])\n nodata = band.GetNoDataValue()\n geotransform = target_raster.GetGeoTransform()\n for offset in offset_list:\n grid_y, grid_x = numpy.mgrid[\n offset['yoff']:offset['yoff']+offset['win_ysize'],\n offset['xoff']:offset['xoff']+offset['win_xsize']]\n grid_y = grid_y * geotransform[5] + geotransform[3]\n grid_x = grid_x * geotransform[1] + geotransform[0]\n\n # this is to be consistent with GDAL 2.0's change of 'nearest' to\n # 'near' for an interpolation scheme that SciPy did not change.\n if interpolation_mode == 'near':\n interpolation_mode = 'nearest'\n raster_out_array = scipy.interpolate.griddata(\n point_array, value_array, (grid_y, grid_x), interpolation_mode,\n nodata)\n band.WriteArray(raster_out_array, offset['xoff'], offset['yoff'])\n\n\ndef zonal_statistics(\n base_raster_path_band, aggregate_vector_path,\n aggregate_layer_name=None, ignore_nodata=True,\n polygons_might_overlap=True, working_dir=None,\n clean_working_dir=True):\n \"\"\"Collect stats on pixel values which lie within polygons.\n\n This function summarizes raster statistics including min, max,\n mean, and pixel count over the regions on the raster that are\n overlapped by the polygons in the vector layer. Statistics are calculated\n in two passes, where first polygons aggregate over pixels in the raster\n whose centers intersect with the polygon. In the second pass, any polygons\n that are not aggregated use their bounding box to intersect with the\n raster for overlap statistics.\n\n Note:\n There may be some degenerate cases where the bounding box vs. actual\n geometry intersection would be incorrect, but these are so unlikely as\n to be manually constructed. If you encounter one of these please email\n the description and dataset to [email protected].\n\n Args:\n base_raster_path_band (tuple): a str/int tuple indicating the path to\n the base raster and the band index of that raster to analyze.\n aggregate_vector_path (string): a path to a polygon vector whose\n geometric features indicate the areas in\n ``base_raster_path_band`` to calculate zonal statistics.\n aggregate_layer_name (string): name of shapefile layer that will be\n used to aggregate results over. If set to None, the first layer\n in the DataSource will be used as retrieved by ``.GetLayer()``.\n Note: it is normal and expected to set this field at None if the\n aggregating shapefile is a single layer as many shapefiles,\n including the common 'ESRI Shapefile', are.\n ignore_nodata: if true, then nodata pixels are not accounted for when\n calculating min, max, count, or mean. However, the value of\n ``nodata_count`` will always be the number of nodata pixels\n aggregated under the polygon.\n polygons_might_overlap (boolean): if True the function calculates\n aggregation coverage close to optimally by rasterizing sets of\n polygons that don't overlap. However, this step can be\n computationally expensive for cases where there are many polygons.\n this flag to False directs the function rasterize in one\n step.\n working_dir (string): If not None, indicates where temporary files\n should be created during this run.\n clean_working_dir (bool): If false the temporary files used to\n calculate zonal stats are not deleted.\n\n Return:\n nested dictionary indexed by aggregating feature id, and then by one\n of 'min' 'max' 'sum' 'count' and 'nodata_count'. Example::\n\n {0: {'min': 0,\n 'max': 1,\n 'sum': 1.7,\n 'count': 3,\n 'nodata_count': 1\n }\n }\n\n Raises:\n ValueError\n if ``base_raster_path_band`` is incorrectly formatted.\n RuntimeError\n if the aggregate vector or layer cannot open.\n\n \"\"\"\n if not _is_raster_path_band_formatted(base_raster_path_band):\n raise ValueError(\n \"`base_raster_path_band` not formatted as expected. Expects \"\n \"(path, band_index), received %s\" % repr(base_raster_path_band))\n aggregate_vector = gdal.OpenEx(aggregate_vector_path, gdal.OF_VECTOR)\n if aggregate_vector is None:\n raise RuntimeError(\n \"Could not open aggregate vector at %s\" % aggregate_vector_path)\n if aggregate_layer_name is not None:\n aggregate_layer = aggregate_vector.GetLayerByName(\n aggregate_layer_name)\n else:\n aggregate_layer = aggregate_vector.GetLayer()\n if aggregate_layer is None:\n raise RuntimeError(\n \"Could not open layer %s on %s\" % (\n aggregate_layer_name, aggregate_vector_path))\n\n # create a new aggregate ID field to map base vector aggregate fields to\n # local ones that are guaranteed to be integers.\n local_aggregate_field_name = 'original_fid'\n rasterize_layer_args = {\n 'options': [\n 'ALL_TOUCHED=FALSE',\n 'ATTRIBUTE=%s' % local_aggregate_field_name]\n }\n\n # clip base raster to aggregating vector intersection\n raster_info = get_raster_info(base_raster_path_band[0])\n # -1 here because bands are 1 indexed\n raster_nodata = raster_info['nodata'][base_raster_path_band[1]-1]\n temp_working_dir = tempfile.mkdtemp(dir=working_dir)\n clipped_raster_path = os.path.join(\n temp_working_dir, 'clipped_raster.tif')\n\n try:\n align_and_resize_raster_stack(\n [base_raster_path_band[0]], [clipped_raster_path], ['near'],\n raster_info['pixel_size'], 'intersection',\n base_vector_path_list=[aggregate_vector_path],\n target_projection_wkt=raster_info['projection_wkt'],\n raster_align_index=0)\n clipped_raster = gdal.OpenEx(clipped_raster_path, gdal.OF_RASTER)\n clipped_band = clipped_raster.GetRasterBand(base_raster_path_band[1])\n except ValueError as e:\n if 'Bounding boxes do not intersect' in repr(e):\n LOGGER.error(\n \"aggregate vector %s does not intersect with the raster %s\",\n aggregate_vector_path, base_raster_path_band)\n aggregate_stats = collections.defaultdict(\n lambda: {\n 'min': None, 'max': None, 'count': 0, 'nodata_count': 0,\n 'sum': 0.0})\n for feature in aggregate_layer:\n _ = aggregate_stats[feature.GetFID()]\n return dict(aggregate_stats)\n else:\n # this would be very unexpected to get here, but if it happened\n # and we didn't raise an exception, execution could get weird.\n raise\n\n # make a shapefile that non-overlapping layers can be added to\n driver = ogr.GetDriverByName('MEMORY')\n disjoint_vector = driver.CreateDataSource('disjoint_vector')\n spat_ref = aggregate_layer.GetSpatialRef()\n\n # Initialize these dictionaries to have the shapefile fields in the\n # original datasource even if we don't pick up a value later\n LOGGER.info(\"build a lookup of aggregate field value to FID\")\n\n aggregate_layer_fid_set = set(\n [agg_feat.GetFID() for agg_feat in aggregate_layer])\n agg_feat = None\n # Loop over each polygon and aggregate\n if polygons_might_overlap:\n LOGGER.info(\"creating disjoint polygon set\")\n disjoint_fid_sets = calculate_disjoint_polygon_set(\n aggregate_vector_path, bounding_box=raster_info['bounding_box'])\n else:\n disjoint_fid_sets = [aggregate_layer_fid_set]\n\n aggregate_stats = collections.defaultdict(lambda: {\n 'min': None, 'max': None, 'count': 0, 'nodata_count': 0, 'sum': 0.0})\n last_time = time.time()\n LOGGER.info(\"processing %d disjoint polygon sets\", len(disjoint_fid_sets))\n for set_index, disjoint_fid_set in enumerate(disjoint_fid_sets):\n last_time = _invoke_timed_callback(\n last_time, lambda: LOGGER.info(\n \"zonal stats approximately %.1f%% complete on %s\",\n 100.0 * float(set_index+1) / len(disjoint_fid_sets),\n os.path.basename(aggregate_vector_path)),\n _LOGGING_PERIOD)\n\n agg_fid_raster_path = os.path.join(\n temp_working_dir, f'agg_fid_{set_index}.tif')\n agg_fid_nodata = -1\n new_raster_from_base(\n clipped_raster_path, agg_fid_raster_path, gdal.GDT_Int32,\n [agg_fid_nodata])\n # fetch the block offsets before the raster is opened for writing\n agg_fid_offset_list = list(\n iterblocks((agg_fid_raster_path, 1), offset_only=True))\n agg_fid_raster = gdal.OpenEx(\n agg_fid_raster_path, gdal.GA_Update | gdal.OF_RASTER)\n agg_fid_band = agg_fid_raster.GetRasterBand(1)\n\n disjoint_layer = disjoint_vector.CreateLayer(\n 'disjoint_vector', spat_ref, ogr.wkbPolygon)\n disjoint_layer.CreateField(\n ogr.FieldDefn(local_aggregate_field_name, ogr.OFTInteger))\n disjoint_layer_defn = disjoint_layer.GetLayerDefn()\n # add polygons to subset_layer\n disjoint_layer.StartTransaction()\n for index, feature_fid in enumerate(disjoint_fid_set):\n last_time = _invoke_timed_callback(\n last_time, lambda: LOGGER.info(\n \"polygon set %d of %d approximately %.1f%% processed \"\n \"on %s\", set_index+1, len(disjoint_fid_sets),\n 100.0 * float(index+1) / len(disjoint_fid_set),\n os.path.basename(aggregate_vector_path)),\n _LOGGING_PERIOD)\n agg_feat = aggregate_layer.GetFeature(feature_fid)\n agg_geom_ref = agg_feat.GetGeometryRef()\n disjoint_feat = ogr.Feature(disjoint_layer_defn)\n disjoint_feat.SetGeometry(agg_geom_ref.Clone())\n agg_geom_ref = None\n disjoint_feat.SetField(\n local_aggregate_field_name, feature_fid)\n disjoint_layer.CreateFeature(disjoint_feat)\n agg_feat = None\n disjoint_layer.CommitTransaction()\n\n LOGGER.info(\n \"disjoint polygon set %d of %d 100.0%% processed on %s\",\n set_index+1, len(disjoint_fid_sets), os.path.basename(\n aggregate_vector_path))\n\n LOGGER.info(\n \"rasterizing disjoint polygon set %d of %d %s\", set_index+1,\n len(disjoint_fid_sets),\n os.path.basename(aggregate_vector_path))\n rasterize_callback = _make_logger_callback(\n \"rasterizing polygon \" + str(set_index+1) + \" of \" +\n str(len(disjoint_fid_set)) + \" set %.1f%% complete %s\")\n gdal.RasterizeLayer(\n agg_fid_raster, [1], disjoint_layer,\n callback=rasterize_callback, **rasterize_layer_args)\n agg_fid_raster.FlushCache()\n\n # Delete the features we just added to the subset_layer\n disjoint_layer = None\n disjoint_vector.DeleteLayer(0)\n\n # create a key array\n # and parallel min, max, count, and nodata count arrays\n LOGGER.info(\n \"summarizing rasterized disjoint polygon set %d of %d %s\",\n set_index+1, len(disjoint_fid_sets),\n os.path.basename(aggregate_vector_path))\n for agg_fid_offset in agg_fid_offset_list:\n agg_fid_block = agg_fid_band.ReadAsArray(**agg_fid_offset)\n clipped_block = clipped_band.ReadAsArray(**agg_fid_offset)\n valid_mask = (agg_fid_block != agg_fid_nodata)\n valid_agg_fids = agg_fid_block[valid_mask]\n valid_clipped = clipped_block[valid_mask]\n for agg_fid in numpy.unique(valid_agg_fids):\n masked_clipped_block = valid_clipped[\n valid_agg_fids == agg_fid]\n if raster_nodata is not None:\n clipped_nodata_mask = numpy.isclose(\n masked_clipped_block, raster_nodata)\n else:\n clipped_nodata_mask = numpy.zeros(\n masked_clipped_block.shape, dtype=bool)\n aggregate_stats[agg_fid]['nodata_count'] += (\n numpy.count_nonzero(clipped_nodata_mask))\n if ignore_nodata:\n masked_clipped_block = (\n masked_clipped_block[~clipped_nodata_mask])\n if masked_clipped_block.size == 0:\n continue\n\n if aggregate_stats[agg_fid]['min'] is None:\n aggregate_stats[agg_fid]['min'] = (\n masked_clipped_block[0])\n aggregate_stats[agg_fid]['max'] = (\n masked_clipped_block[0])\n\n aggregate_stats[agg_fid]['min'] = min(\n numpy.min(masked_clipped_block),\n aggregate_stats[agg_fid]['min'])\n aggregate_stats[agg_fid]['max'] = max(\n numpy.max(masked_clipped_block),\n aggregate_stats[agg_fid]['max'])\n aggregate_stats[agg_fid]['count'] += (\n masked_clipped_block.size)\n aggregate_stats[agg_fid]['sum'] += numpy.sum(\n masked_clipped_block)\n agg_fid_band = None\n agg_fid_raster = None\n unset_fids = aggregate_layer_fid_set.difference(aggregate_stats)\n LOGGER.debug(\n \"unset_fids: %s of %s \", len(unset_fids),\n len(aggregate_layer_fid_set))\n clipped_gt = numpy.array(\n clipped_raster.GetGeoTransform(), dtype=numpy.float32)\n LOGGER.debug(\"gt %s for %s\", clipped_gt, base_raster_path_band)\n for unset_fid in unset_fids:\n unset_feat = aggregate_layer.GetFeature(unset_fid)\n unset_geom_ref = unset_feat.GetGeometryRef()\n if unset_geom_ref is None:\n LOGGER.warn(\n f'no geometry in {aggregate_vector_path} FID: {unset_fid}')\n continue\n # fetch a shapely polygon and turn it into a list of polygons in the\n # case that it is a multipolygon\n shapely_geom = shapely.wkb.loads(bytes(unset_geom_ref.ExportToWkb()))\n try:\n # a non multipolygon will raise a TypeError\n shapely_geom_list = list(shapely_geom)\n except TypeError:\n shapely_geom_list = [shapely_geom]\n unset_geom_ref = None\n for shapely_geom in shapely_geom_list:\n single_geom = ogr.CreateGeometryFromWkt(shapely_geom.wkt)\n unset_geom_envelope = list(single_geom.GetEnvelope())\n single_geom = None\n unset_feat = None\n if clipped_gt[1] < 0:\n unset_geom_envelope[0], unset_geom_envelope[1] = (\n unset_geom_envelope[1], unset_geom_envelope[0])\n if clipped_gt[5] < 0:\n unset_geom_envelope[2], unset_geom_envelope[3] = (\n unset_geom_envelope[3], unset_geom_envelope[2])\n\n xoff = int((unset_geom_envelope[0] - clipped_gt[0]) / clipped_gt[1])\n yoff = int((unset_geom_envelope[2] - clipped_gt[3]) / clipped_gt[5])\n win_xsize = int(numpy.ceil(\n (unset_geom_envelope[1] - clipped_gt[0]) /\n clipped_gt[1])) - xoff\n win_ysize = int(numpy.ceil(\n (unset_geom_envelope[3] - clipped_gt[3]) /\n clipped_gt[5])) - yoff\n\n # clamp offset to the side of the raster if it's negative\n if xoff < 0:\n win_xsize += xoff\n xoff = 0\n if yoff < 0:\n win_ysize += yoff\n yoff = 0\n\n # clamp the window to the side of the raster if too big\n if xoff+win_xsize > clipped_band.XSize:\n win_xsize = clipped_band.XSize-xoff\n if yoff+win_ysize > clipped_band.YSize:\n win_ysize = clipped_band.YSize-yoff\n\n if win_xsize <= 0 or win_ysize <= 0:\n continue\n\n # here we consider the pixels that intersect with the geometry's\n # bounding box as being the proxy for the intersection with the\n # polygon itself. This is not a bad approximation since the case\n # that caused the polygon to be skipped in the first phase is that it\n # is as small as a pixel. There could be some degenerate cases that\n # make this estimation very wrong, but we do not know of any that\n # would come from natural data. If you do encounter such a dataset\n # please email the description and datset to [email protected].\n unset_fid_block = clipped_band.ReadAsArray(\n xoff=xoff, yoff=yoff, win_xsize=win_xsize, win_ysize=win_ysize)\n\n if raster_nodata is not None:\n unset_fid_nodata_mask = numpy.isclose(\n unset_fid_block, raster_nodata)\n else:\n unset_fid_nodata_mask = numpy.zeros(\n unset_fid_block.shape, dtype=bool)\n\n valid_unset_fid_block = unset_fid_block[~unset_fid_nodata_mask]\n if valid_unset_fid_block.size == 0:\n aggregate_stats[unset_fid]['min'] = 0.0\n aggregate_stats[unset_fid]['max'] = 0.0\n aggregate_stats[unset_fid]['sum'] = 0.0\n else:\n aggregate_stats[unset_fid]['min'] = numpy.min(\n valid_unset_fid_block)\n aggregate_stats[unset_fid]['max'] = numpy.max(\n valid_unset_fid_block)\n aggregate_stats[unset_fid]['sum'] = numpy.sum(\n valid_unset_fid_block)\n aggregate_stats[unset_fid]['count'] = valid_unset_fid_block.size\n aggregate_stats[unset_fid]['nodata_count'] = numpy.count_nonzero(\n unset_fid_nodata_mask)\n\n unset_fids = aggregate_layer_fid_set.difference(aggregate_stats)\n LOGGER.debug(\n \"remaining unset_fids: %s of %s \", len(unset_fids),\n len(aggregate_layer_fid_set))\n # fill in the missing polygon fids in the aggregate stats by invoking the\n # accessor in the defaultdict\n for fid in unset_fids:\n _ = aggregate_stats[fid]\n\n LOGGER.info(\n \"all done processing polygon sets for %s\", os.path.basename(\n aggregate_vector_path))\n\n # clean up temporary files\n spat_ref = None\n clipped_band = None\n clipped_raster = None\n disjoint_layer = None\n disjoint_vector = None\n aggregate_layer = None\n aggregate_vector = None\n\n if clean_working_dir:\n shutil.rmtree(temp_working_dir)\n return dict(aggregate_stats)\n\n\ndef get_vector_info(vector_path, layer_id=0):\n \"\"\"Get information about an GDAL vector.\n\n Args:\n vector_path (str): a path to a GDAL vector.\n layer_id (str/int): name or index of underlying layer to analyze.\n Defaults to 0.\n\n Raises:\n ValueError if ``vector_path`` does not exist on disk or cannot be\n opened as a gdal.OF_VECTOR.\n\n Return:\n raster_properties (dictionary):\n a dictionary with the following key-value pairs:\n\n * ``'projection_wkt'`` (string): projection of the vector in Well\n Known Text.\n * ``'bounding_box'`` (sequence): sequence of floats representing\n the bounding box in projected coordinates in the order\n [minx, miny, maxx, maxy].\n * ``'file_list'`` (sequence): sequence of string paths to the files\n that make up this vector.\n * ``'feature_count'`` (int): number of features in the layer.\n\n \"\"\"\n vector = gdal.OpenEx(vector_path, gdal.OF_VECTOR)\n if not vector:\n raise ValueError(\n \"Could not open %s as a gdal.OF_VECTOR\" % vector_path)\n vector_properties = {}\n vector_properties['file_list'] = vector.GetFileList()\n layer = vector.GetLayer(iLayer=layer_id)\n # projection is same for all layers, so just use the first one\n spatial_ref = layer.GetSpatialRef()\n if spatial_ref:\n vector_projection_wkt = spatial_ref.ExportToWkt()\n else:\n vector_projection_wkt = None\n vector_properties['projection_wkt'] = vector_projection_wkt\n layer_bb = layer.GetExtent()\n # convert form [minx,maxx,miny,maxy] to [minx,miny,maxx,maxy]\n vector_properties['bounding_box'] = [layer_bb[i] for i in [0, 2, 1, 3]]\n vector_properties['feature_count'] = layer.GetFeatureCount()\n layer = None\n vector = None\n return vector_properties\n\n\ndef get_raster_info(raster_path):\n \"\"\"Get information about a GDAL raster (dataset).\n\n Args:\n raster_path (String): a path to a GDAL raster.\n\n Raises:\n ValueError\n if ``raster_path`` is not a file or cannot be opened as a\n ``gdal.OF_RASTER``.\n\n Return:\n raster_properties (dictionary):\n a dictionary with the properties stored under relevant keys.\n\n * ``'pixel_size'`` (tuple): (pixel x-size, pixel y-size)\n from geotransform.\n * ``'raster_size'`` (tuple): number of raster pixels in (x, y)\n direction.\n * ``'nodata'`` (sequence): a sequence of the nodata values in the bands\n of the raster in the same order as increasing band index.\n * ``'n_bands'`` (int): number of bands in the raster.\n * ``'geotransform'`` (tuple): a 6-tuple representing the geotransform\n of (x orign, x-increase, xy-increase, y origin, yx-increase,\n y-increase).\n * ``'datatype'`` (int): An instance of an enumerated gdal.GDT_* int\n that represents the datatype of the raster.\n * ``'projection_wkt'`` (string): projection of the raster in Well Known\n Text.\n * ``'bounding_box'`` (sequence): sequence of floats representing the\n bounding box in projected coordinates in the order\n [minx, miny, maxx, maxy]\n * ``'block_size'`` (tuple): underlying x/y raster block size for\n efficient reading.\n * ``'numpy_type'`` (numpy type): this is the equivalent numpy datatype\n for the raster bands including signed bytes.\n\n \"\"\"\n raster = gdal.OpenEx(raster_path, gdal.OF_RASTER)\n if not raster:\n raise ValueError(\n \"Could not open %s as a gdal.OF_RASTER\" % raster_path)\n raster_properties = {}\n raster_properties['file_list'] = raster.GetFileList()\n projection_wkt = raster.GetProjection()\n if not projection_wkt:\n projection_wkt = None\n raster_properties['projection_wkt'] = projection_wkt\n geo_transform = raster.GetGeoTransform()\n raster_properties['geotransform'] = geo_transform\n raster_properties['pixel_size'] = (geo_transform[1], geo_transform[5])\n raster_properties['raster_size'] = (\n raster.GetRasterBand(1).XSize,\n raster.GetRasterBand(1).YSize)\n raster_properties['n_bands'] = raster.RasterCount\n raster_properties['nodata'] = [\n raster.GetRasterBand(index).GetNoDataValue() for index in range(\n 1, raster_properties['n_bands']+1)]\n # blocksize is the same for all bands, so we can just get the first\n raster_properties['block_size'] = raster.GetRasterBand(1).GetBlockSize()\n\n # we dont' really know how the geotransform is laid out, all we can do is\n # calculate the x and y bounds, then take the appropriate min/max\n x_bounds = [\n geo_transform[0], geo_transform[0] +\n raster_properties['raster_size'][0] * geo_transform[1] +\n raster_properties['raster_size'][1] * geo_transform[2]]\n y_bounds = [\n geo_transform[3], geo_transform[3] +\n raster_properties['raster_size'][0] * geo_transform[4] +\n raster_properties['raster_size'][1] * geo_transform[5]]\n\n raster_properties['bounding_box'] = [\n numpy.min(x_bounds), numpy.min(y_bounds),\n numpy.max(x_bounds), numpy.max(y_bounds)]\n\n # datatype is the same for the whole raster, but is associated with band\n band = raster.GetRasterBand(1)\n band_datatype = band.DataType\n raster_properties['datatype'] = band_datatype\n raster_properties['numpy_type'] = (\n _GDAL_TYPE_TO_NUMPY_LOOKUP[band_datatype])\n # this part checks to see if the byte is signed or not\n if band_datatype == gdal.GDT_Byte:\n metadata = band.GetMetadata('IMAGE_STRUCTURE')\n if 'PIXELTYPE' in metadata and metadata['PIXELTYPE'] == 'SIGNEDBYTE':\n raster_properties['numpy_type'] = numpy.int8\n band = None\n raster = None\n return raster_properties\n\n\ndef reproject_vector(\n base_vector_path, target_projection_wkt, target_path, layer_id=0,\n driver_name='ESRI Shapefile', copy_fields=True,\n geometry_type=ogr.wkbMultiPolygon,\n osr_axis_mapping_strategy=DEFAULT_OSR_AXIS_MAPPING_STRATEGY):\n \"\"\"Reproject OGR DataSource (vector).\n\n Transforms the features of the base vector to the desired output\n projection in a new ESRI Shapefile.\n\n Args:\n base_vector_path (string): Path to the base shapefile to transform.\n target_projection_wkt (string): the desired output projection in Well\n Known Text (by layer.GetSpatialRef().ExportToWkt())\n target_path (string): the filepath to the transformed shapefile\n layer_id (str/int): name or index of layer in ``base_vector_path`` to\n reproject. Defaults to 0.\n driver_name (string): String to pass to ogr.GetDriverByName, defaults\n to 'ESRI Shapefile'.\n copy_fields (bool or iterable): If True, all the fields in\n ``base_vector_path`` will be copied to ``target_path`` during the\n reprojection step. If it is an iterable, it will contain the\n field names to exclusively copy. An unmatched fieldname will be\n ignored. If ``False`` no fields are copied into the new vector.\n geometry_type (int): enumerated type of target layer, default is\n multipolygon which saves the function from having to guess\n and deal with different geometry type specifications from\n ESRI to GPKG.\n osr_axis_mapping_strategy (int): OSR axis mapping strategy for\n ``SpatialReference`` objects. Defaults to\n ``geoprocessing.DEFAULT_OSR_AXIS_MAPPING_STRATEGY``. This parameter\n should not be changed unless you know what you are doing.\n\n Return:\n None\n \"\"\"\n base_vector = gdal.OpenEx(base_vector_path, gdal.OF_VECTOR)\n\n # if this file already exists, then remove it\n if os.path.isfile(target_path):\n LOGGER.warning(\n \"%s already exists, removing and overwriting\", target_path)\n os.remove(target_path)\n\n target_sr = osr.SpatialReference(target_projection_wkt)\n\n # create a new shapefile from the orginal_datasource\n target_driver = ogr.GetDriverByName(driver_name)\n target_vector = target_driver.CreateDataSource(target_path)\n\n layer = base_vector.GetLayer(layer_id)\n layer_dfn = layer.GetLayerDefn()\n\n target_layer = target_vector.CreateLayer(\n layer_dfn.GetName(), target_sr, geometry_type)\n\n # this will map the target field index to the base index it came from\n # in case we don't need to copy all the fields\n target_to_base_field_id_map = {}\n if copy_fields:\n # Get the number of fields in original_layer\n original_field_count = layer_dfn.GetFieldCount()\n # For every field that's copying, create a duplicate field in the\n # new layer\n\n for fld_index in range(original_field_count):\n original_field = layer_dfn.GetFieldDefn(fld_index)\n field_name = original_field.GetName()\n if copy_fields is True or field_name in copy_fields:\n target_field = ogr.FieldDefn(\n field_name, original_field.GetType())\n target_layer.CreateField(target_field)\n target_to_base_field_id_map[fld_index] = len(\n target_to_base_field_id_map)\n\n # Get the SR of the original_layer to use in transforming\n base_sr = layer.GetSpatialRef()\n\n base_sr.SetAxisMappingStrategy(osr_axis_mapping_strategy)\n target_sr.SetAxisMappingStrategy(osr_axis_mapping_strategy)\n\n # Create a coordinate transformation\n coord_trans = osr.CreateCoordinateTransformation(base_sr, target_sr)\n\n # Copy all of the features in layer to the new shapefile\n target_layer.StartTransaction()\n error_count = 0\n last_time = time.time()\n LOGGER.info(\"starting reprojection\")\n for feature_index, base_feature in enumerate(layer):\n last_time = _invoke_timed_callback(\n last_time, lambda: LOGGER.info(\n \"reprojection approximately %.1f%% complete on %s\",\n 100.0 * float(feature_index+1) / (layer.GetFeatureCount()),\n os.path.basename(target_path)),\n _LOGGING_PERIOD)\n\n geom = base_feature.GetGeometryRef()\n if geom is None:\n # we encountered this error occasionally when transforming clipped\n # global polygons. Not clear what is happening but perhaps a\n # feature was retained that otherwise wouldn't have been included\n # in the clip\n error_count += 1\n continue\n\n # Transform geometry into format desired for the new projection\n error_code = geom.Transform(coord_trans)\n if error_code != 0: # error\n # this could be caused by an out of range transformation\n # whatever the case, don't put the transformed poly into the\n # output set\n error_count += 1\n continue\n\n # Copy original_datasource's feature and set as new shapes feature\n target_feature = ogr.Feature(target_layer.GetLayerDefn())\n target_feature.SetGeometry(geom)\n\n # For all the fields in the feature set the field values from the\n # source field\n for target_index, base_index in (\n target_to_base_field_id_map.items()):\n target_feature.SetField(\n target_index, base_feature.GetField(base_index))\n\n target_layer.CreateFeature(target_feature)\n target_feature = None\n base_feature = None\n target_layer.CommitTransaction()\n LOGGER.info(\n \"reprojection 100.0%% complete on %s\", os.path.basename(target_path))\n if error_count > 0:\n LOGGER.warning(\n '%d features out of %d were unable to be transformed and are'\n ' not in the output vector at %s', error_count,\n layer.GetFeatureCount(), target_path)\n layer = None\n base_vector = None\n\n\ndef reclassify_raster(\n base_raster_path_band, value_map, target_raster_path, target_datatype,\n target_nodata, values_required=True,\n raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS):\n \"\"\"Reclassify pixel values in a raster.\n\n A function to reclassify values in raster to any output type. By default\n the values except for nodata must be in ``value_map``.\n\n Args:\n base_raster_path_band (tuple): a tuple including file path to a raster\n and the band index to operate over. ex: (path, band_index)\n value_map (dictionary): a dictionary of values of\n {source_value: dest_value, ...} where source_value's type is the\n same as the values in ``base_raster_path`` at band ``band_index``.\n Must contain at least one value.\n target_raster_path (string): target raster output path; overwritten if\n it exists\n target_datatype (gdal type): the numerical type for the target raster\n target_nodata (numerical type): the nodata value for the target raster\n Must be the same type as target_datatype\n values_required (bool): If True, raise a ValueError if there is a\n value in the raster that is not found in ``value_map``.\n raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver\n name string as the first element and a GDAL creation options\n tuple/list as the second. Defaults to a GTiff driver tuple\n defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS.\n\n Return:\n None\n\n Raises:\n ReclassificationMissingValuesError\n if ``values_required`` is ``True``\n and a pixel value from ``base_raster_path_band`` is not a key in\n ``value_map``.\n\n \"\"\"\n if len(value_map) == 0:\n raise ValueError(\"value_map must contain at least one value\")\n if not _is_raster_path_band_formatted(base_raster_path_band):\n raise ValueError(\n \"Expected a (path, band_id) tuple, instead got '%s'\" %\n base_raster_path_band)\n raster_info = get_raster_info(base_raster_path_band[0])\n nodata = raster_info['nodata'][base_raster_path_band[1]-1]\n value_map_copy = value_map.copy()\n # possible that nodata value is not defined, so test for None first\n # otherwise if nodata not predefined, remap it into the dictionary\n if nodata is not None and nodata not in value_map_copy:\n value_map_copy[nodata] = target_nodata\n keys = sorted(numpy.array(list(value_map_copy.keys())))\n values = numpy.array([value_map_copy[x] for x in keys])\n\n def _map_dataset_to_value_op(original_values):\n \"\"\"Convert a block of original values to the lookup values.\"\"\"\n if values_required:\n unique = numpy.unique(original_values)\n has_map = numpy.in1d(unique, keys)\n if not all(has_map):\n missing_values = unique[~has_map]\n raise ReclassificationMissingValuesError(\n f'The following {missing_values.size} raster values'\n f' {missing_values} from \"{base_raster_path_band[0]}\"'\n ' do not have corresponding entries in the ``value_map``:'\n f' {value_map}.', missing_values)\n index = numpy.digitize(original_values.ravel(), keys, right=True)\n return values[index].reshape(original_values.shape)\n\n raster_calculator(\n [base_raster_path_band], _map_dataset_to_value_op,\n target_raster_path, target_datatype, target_nodata,\n raster_driver_creation_tuple=raster_driver_creation_tuple)\n\n\ndef warp_raster(\n base_raster_path, target_pixel_size, target_raster_path,\n resample_method, target_bb=None, base_projection_wkt=None,\n target_projection_wkt=None, n_threads=None, vector_mask_options=None,\n gdal_warp_options=None, working_dir=None,\n raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS,\n osr_axis_mapping_strategy=DEFAULT_OSR_AXIS_MAPPING_STRATEGY):\n \"\"\"Resize/resample raster to desired pixel size, bbox and projection.\n\n Args:\n base_raster_path (string): path to base raster.\n target_pixel_size (list/tuple): a two element sequence indicating\n the x and y pixel size in projected units.\n target_raster_path (string): the location of the resized and\n resampled raster.\n resample_method (string): the resampling technique, one of\n ``near|bilinear|cubic|cubicspline|lanczos|average|mode|max|min|med|q1|q3``\n target_bb (sequence): if None, target bounding box is the same as the\n source bounding box. Otherwise it's a sequence of float\n describing target bounding box in target coordinate system as\n [minx, miny, maxx, maxy].\n base_projection_wkt (string): if not None, interpret the projection of\n ``base_raster_path`` as this.\n target_projection_wkt (string): if not None, desired target projection\n in Well Known Text format.\n n_threads (int): optional, if not None this sets the ``N_THREADS``\n option for ``gdal.Warp``.\n vector_mask_options (dict): optional, if not None, this is a\n dictionary of options to use an existing vector's geometry to\n mask out pixels in the target raster that do not overlap the\n vector's geometry. Keys to this dictionary are:\n\n * ``'mask_vector_path'``: (str) path to the mask vector file. This\n vector will be automatically projected to the target\n projection if its base coordinate system does not match\n the target.\n * ``'mask_layer_id'``: (int/str) the layer index or name to use\n for masking, if this key is not in the dictionary the default\n is to use the layer at index 0.\n * ``'mask_vector_where_filter'``: (str) an SQL WHERE string that\n can be used to filter the geometry in the mask. Ex:\n 'id > 10' would use all features whose field value of\n 'id' is > 10.\n\n gdal_warp_options (sequence): if present, the contents of this list\n are passed to the ``warpOptions`` parameter of ``gdal.Warp``. See\n the GDAL Warp documentation for valid options.\n working_dir (string): if defined uses this directory to make\n temporary working files for calculation. Otherwise uses system's\n temp directory.\n raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver\n name string as the first element and a GDAL creation options\n tuple/list as the second. Defaults to a GTiff driver tuple\n defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS.\n osr_axis_mapping_strategy (int): OSR axis mapping strategy for\n ``SpatialReference`` objects. Defaults to\n ``geoprocessing.DEFAULT_OSR_AXIS_MAPPING_STRATEGY``. This parameter\n should not be changed unless you know what you are doing.\n\n Return:\n None\n\n Raises:\n ValueError\n if ``pixel_size`` is not a 2 element sequence of numbers.\n ValueError\n if ``vector_mask_options`` is not None but the\n ``mask_vector_path`` is undefined or doesn't point to a valid\n file.\n\n \"\"\"\n _assert_is_valid_pixel_size(target_pixel_size)\n\n base_raster_info = get_raster_info(base_raster_path)\n if target_projection_wkt is None:\n target_projection_wkt = base_raster_info['projection_wkt']\n\n if target_bb is None:\n # ensure it's a sequence so we can modify it\n working_bb = list(get_raster_info(base_raster_path)['bounding_box'])\n # transform the working_bb if target_projection_wkt is not None\n if target_projection_wkt is not None:\n LOGGER.debug(\n \"transforming bounding box from %s \", working_bb)\n working_bb = transform_bounding_box(\n base_raster_info['bounding_box'],\n base_raster_info['projection_wkt'], target_projection_wkt)\n LOGGER.debug(\n \"transforming bounding to %s \", working_bb)\n else:\n # ensure it's a sequence so we can modify it\n working_bb = list(target_bb)\n\n # determine the raster size that bounds the input bounding box and then\n # adjust the bounding box to be that size\n target_x_size = int(abs(\n float(working_bb[2] - working_bb[0]) / target_pixel_size[0]))\n target_y_size = int(abs(\n float(working_bb[3] - working_bb[1]) / target_pixel_size[1]))\n\n # sometimes bounding boxes are numerically perfect, this checks for that\n x_residual = (\n abs(target_x_size * target_pixel_size[0]) -\n (working_bb[2] - working_bb[0]))\n if not numpy.isclose(x_residual, 0.0):\n target_x_size += 1\n y_residual = (\n abs(target_y_size * target_pixel_size[1]) -\n (working_bb[3] - working_bb[1]))\n if not numpy.isclose(y_residual, 0.0):\n target_y_size += 1\n\n if target_x_size == 0:\n LOGGER.warning(\n \"bounding_box is so small that x dimension rounds to 0; \"\n \"clamping to 1.\")\n target_x_size = 1\n if target_y_size == 0:\n LOGGER.warning(\n \"bounding_box is so small that y dimension rounds to 0; \"\n \"clamping to 1.\")\n target_y_size = 1\n\n # this ensures the bounding boxes perfectly fit a multiple of the target\n # pixel size\n working_bb[2] = working_bb[0] + abs(target_pixel_size[0] * target_x_size)\n working_bb[3] = working_bb[1] + abs(target_pixel_size[1] * target_y_size)\n\n reproject_callback = _make_logger_callback(\n \"Warp %.1f%% complete %s\")\n\n warp_options = []\n if n_threads:\n warp_options.append('NUM_THREADS=%d' % n_threads)\n if gdal_warp_options:\n warp_options.extend(gdal_warp_options)\n\n mask_vector_path = None\n mask_layer_id = 0\n mask_vector_where_filter = None\n if vector_mask_options:\n # translate pygeoprocessing terminology into GDAL warp options.\n if 'mask_vector_path' not in vector_mask_options:\n raise ValueError(\n 'vector_mask_options passed, but no value for '\n '\"mask_vector_path\": %s', vector_mask_options)\n mask_vector_path = vector_mask_options['mask_vector_path']\n if not os.path.exists(mask_vector_path):\n raise ValueError(\n 'The mask vector at %s was not found.', mask_vector_path)\n if 'mask_layer_id' in vector_mask_options:\n mask_layer_id = vector_mask_options['mask_layer_id']\n if 'mask_vector_where_filter' in vector_mask_options:\n mask_vector_where_filter = (\n vector_mask_options['mask_vector_where_filter'])\n\n if vector_mask_options:\n temp_working_dir = tempfile.mkdtemp(dir=working_dir)\n warped_raster_path = os.path.join(\n temp_working_dir, os.path.basename(target_raster_path).replace(\n '.tif', '_nonmasked.tif'))\n else:\n # if there is no vector path the result is the warp\n warped_raster_path = target_raster_path\n base_raster = gdal.OpenEx(base_raster_path, gdal.OF_RASTER)\n\n raster_creation_options = list(raster_driver_creation_tuple[1])\n if (base_raster_info['numpy_type'] == numpy.int8 and\n 'PIXELTYPE' not in ' '.join(raster_creation_options)):\n raster_creation_options.append('PIXELTYPE=SIGNEDBYTE')\n\n # WarpOptions.this is None when an invalid option is passed, and it's a\n # truthy SWIG proxy object when it's given a valid resample arg.\n if not gdal.WarpOptions(resampleAlg=resample_method)[0].this:\n raise ValueError(\n f'Invalid resample method: \"{resample_method}\"')\n\n gdal.Warp(\n warped_raster_path, base_raster,\n format=raster_driver_creation_tuple[0],\n outputBounds=working_bb,\n xRes=abs(target_pixel_size[0]),\n yRes=abs(target_pixel_size[1]),\n resampleAlg=resample_method,\n outputBoundsSRS=target_projection_wkt,\n srcSRS=base_projection_wkt,\n dstSRS=target_projection_wkt,\n multithread=True if warp_options else False,\n warpOptions=warp_options,\n creationOptions=raster_creation_options,\n callback=reproject_callback,\n callback_data=[target_raster_path])\n\n if vector_mask_options:\n # Make sure the raster creation options passed to ``mask_raster``\n # reflect any metadata updates\n updated_raster_driver_creation_tuple = (\n raster_driver_creation_tuple[0], tuple(raster_creation_options))\n # there was a cutline vector, so mask it out now, otherwise target\n # is already the result.\n mask_raster(\n (warped_raster_path, 1), vector_mask_options['mask_vector_path'],\n target_raster_path,\n mask_layer_id=mask_layer_id,\n where_clause=mask_vector_where_filter,\n target_mask_value=None, working_dir=temp_working_dir,\n all_touched=False,\n raster_driver_creation_tuple=updated_raster_driver_creation_tuple)\n shutil.rmtree(temp_working_dir)\n\n\ndef rasterize(\n vector_path, target_raster_path, burn_values=None, option_list=None,\n layer_id=0, where_clause=None):\n \"\"\"Project a vector onto an existing raster.\n\n Burn the layer at ``layer_id`` in ``vector_path`` to an existing\n raster at ``target_raster_path_band``.\n\n Args:\n vector_path (string): filepath to vector to rasterize.\n target_raster_path (string): path to an existing raster to burn vector\n into. Can have multiple bands.\n burn_values (list/tuple): optional sequence of values to burn into\n each band of the raster. If used, should have the same length as\n number of bands at the ``target_raster_path`` raster. If ``None``\n then ``option_list`` must have a valid value.\n option_list (list/tuple): optional a sequence of burn options, if None\n then a valid value for ``burn_values`` must exist. Otherwise, each\n element is a string of the form:\n\n * ``\"ATTRIBUTE=?\"``: Identifies an attribute field on the features\n to be used for a burn in value. The value will be burned into all\n output bands. If specified, ``burn_values`` will not be used and\n can be None.\n * ``\"CHUNKYSIZE=?\"``: The height in lines of the chunk to operate\n on. The larger the chunk size the less times we need to make a\n pass through all the shapes. If it is not set or set to zero the\n default chunk size will be used. Default size will be estimated\n based on the GDAL cache buffer size using formula:\n ``cache_size_bytes/scanline_size_bytes``, so the chunk will not\n exceed the cache.\n * ``\"ALL_TOUCHED=TRUE/FALSE\"``: May be set to ``TRUE`` to set all\n pixels touched by the line or polygons, not just those whose\n center is within the polygon or that are selected by Brezenhams\n line algorithm. Defaults to ``FALSE``.\n * ``\"BURN_VALUE_FROM\"``: May be set to \"Z\" to use the Z values of\n the geometries. The value from burn_values or the\n attribute field value is added to this before burning. In\n default case dfBurnValue is burned as it is (richpsharp:\n note, I'm not sure what this means, but copied from formal\n docs). This is implemented properly only for points and\n lines for now. Polygons will be burned using the Z value\n from the first point.\n * ``\"MERGE_ALG=REPLACE/ADD\"``: REPLACE results in overwriting of\n value, while ADD adds the new value to the existing\n raster, suitable for heatmaps for instance.\n\n Example::\n\n [\"ATTRIBUTE=npv\", \"ALL_TOUCHED=TRUE\"]\n\n layer_id (str/int): name or index of the layer to rasterize. Defaults\n to 0.\n where_clause (str): If not None, is an SQL query-like string to filter\n which features are used to rasterize, (e.x. where=\"value=1\").\n\n Return:\n None\n \"\"\"\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n raster = gdal.OpenEx(target_raster_path, gdal.GA_Update | gdal.OF_RASTER)\n gdal.PopErrorHandler()\n if raster is None:\n raise ValueError(\n \"%s doesn't exist, but needed to rasterize.\" % target_raster_path)\n\n rasterize_callback = _make_logger_callback(\n \"RasterizeLayer %.1f%% complete %s\")\n\n if burn_values is None:\n burn_values = []\n if option_list is None:\n option_list = []\n\n if not burn_values and not option_list:\n raise ValueError(\n \"Neither `burn_values` nor `option_list` is set. At least \"\n \"one must have a value.\")\n\n if not isinstance(burn_values, (list, tuple)):\n raise ValueError(\n \"`burn_values` is not a list/tuple, the value passed is '%s'\",\n repr(burn_values))\n\n if not isinstance(option_list, (list, tuple)):\n raise ValueError(\n \"`option_list` is not a list/tuple, the value passed is '%s'\",\n repr(option_list))\n\n vector = gdal.OpenEx(vector_path, gdal.OF_VECTOR)\n layer = vector.GetLayer(layer_id)\n if where_clause:\n layer.SetAttributeFilter(where_clause)\n\n try:\n result = gdal.RasterizeLayer(\n raster, [1], layer, burn_values=burn_values,\n options=option_list, callback=rasterize_callback)\n except Exception:\n # something bad happened, but still clean up\n # this case came out of a flaky test condition where the raster\n # would still be in use by the rasterize layer function\n LOGGER.exception('bad error on rasterizelayer')\n result = -1\n\n layer = None\n vector = None\n\n if result != 0:\n # need this __swig_destroy__ because we sometimes encounter a flaky\n # test where the path to the raster cannot be cleaned up because\n # it is still in use somewhere, likely a bug in gdal.RasterizeLayer\n # note it is only invoked if there is a serious error\n gdal.Dataset.__swig_destroy__(raster)\n raise RuntimeError('Rasterize returned a nonzero exit code.')\n raster = None\n\n\ndef calculate_disjoint_polygon_set(\n vector_path, layer_id=0, bounding_box=None):\n \"\"\"Create a sequence of sets of polygons that don't overlap.\n\n Determining the minimal number of those sets is an np-complete problem so\n this is an approximation that builds up sets of maximal subsets.\n\n Args:\n vector_path (string): a path to an OGR vector.\n layer_id (str/int): name or index of underlying layer in\n ``vector_path`` to calculate disjoint set. Defaults to 0.\n bounding_box (sequence): sequence of floats representing a bounding\n box to filter any polygons by. If a feature in ``vector_path``\n does not intersect this bounding box it will not be considered\n in the disjoint calculation. Coordinates are in the order\n [minx, miny, maxx, maxy].\n\n Return:\n subset_list (sequence): sequence of sets of FIDs from vector_path\n\n \"\"\"\n vector = gdal.OpenEx(vector_path, gdal.OF_VECTOR)\n vector_layer = vector.GetLayer(layer_id)\n feature_count = vector_layer.GetFeatureCount()\n\n if feature_count == 0:\n raise RuntimeError('Vector must have geometries but does not: %s'\n % vector_path)\n\n last_time = time.time()\n LOGGER.info(\"build shapely polygon list\")\n\n if bounding_box is None:\n bounding_box = get_vector_info(vector_path)['bounding_box']\n bounding_box = shapely.prepared.prep(shapely.geometry.box(*bounding_box))\n\n # As much as I want this to be in a comprehension, a comprehension version\n # of this loop causes python 3.6 to crash on linux in GDAL 2.1.2 (which is\n # what's in the debian:stretch repos.)\n shapely_polygon_lookup = {}\n for poly_feat in vector_layer:\n poly_geom_ref = poly_feat.GetGeometryRef()\n if poly_geom_ref is None:\n LOGGER.warn(\n f'no geometry in {vector_path} FID: {poly_feat.GetFID()}, '\n 'skipping...')\n continue\n # with GDAL>=3.3.0 ExportToWkb returns a bytearray instead of bytes\n shapely_polygon_lookup[poly_feat.GetFID()] = (\n shapely.wkb.loads(bytes(poly_geom_ref.ExportToWkb())))\n poly_geom_ref = None\n poly_feat = None\n\n LOGGER.info(\"build shapely rtree index\")\n r_tree_index_stream = [\n (poly_fid, poly.bounds, None)\n for poly_fid, poly in shapely_polygon_lookup.items()\n if bounding_box.intersects(poly)]\n if r_tree_index_stream:\n poly_rtree_index = rtree.index.Index(r_tree_index_stream)\n else:\n LOGGER.warning(\"no polygons intersected the bounding box\")\n return []\n\n vector_layer = None\n vector = None\n LOGGER.info(\n 'poly feature lookup 100.0%% complete on %s',\n os.path.basename(vector_path))\n\n LOGGER.info('build poly intersection lookup')\n poly_intersect_lookup = collections.defaultdict(set)\n for poly_index, (poly_fid, poly_geom) in enumerate(\n shapely_polygon_lookup.items()):\n last_time = _invoke_timed_callback(\n last_time, lambda: LOGGER.info(\n \"poly intersection lookup approximately %.1f%% complete \"\n \"on %s\", 100.0 * float(poly_index+1) / len(\n shapely_polygon_lookup), os.path.basename(vector_path)),\n _LOGGING_PERIOD)\n possible_intersection_set = list(poly_rtree_index.intersection(\n poly_geom.bounds))\n # no reason to prep the polygon to intersect itself\n if len(possible_intersection_set) > 1:\n polygon = shapely.prepared.prep(poly_geom)\n else:\n polygon = poly_geom\n for intersect_poly_fid in possible_intersection_set:\n if intersect_poly_fid == poly_fid or polygon.intersects(\n shapely_polygon_lookup[intersect_poly_fid]):\n poly_intersect_lookup[poly_fid].add(intersect_poly_fid)\n polygon = None\n LOGGER.info(\n 'poly intersection feature lookup 100.0%% complete on %s',\n os.path.basename(vector_path))\n\n # Build maximal subsets\n subset_list = []\n while len(poly_intersect_lookup) > 0:\n # sort polygons by increasing number of intersections\n intersections_list = [\n (len(poly_intersect_set), poly_fid, poly_intersect_set)\n for poly_fid, poly_intersect_set in\n poly_intersect_lookup.items()]\n intersections_list.sort()\n\n # build maximal subset\n maximal_set = set()\n for _, poly_fid, poly_intersect_set in intersections_list:\n last_time = _invoke_timed_callback(\n last_time, lambda: LOGGER.info(\n \"maximal subset build approximately %.1f%% complete \"\n \"on %s\", 100.0 * float(\n feature_count - len(poly_intersect_lookup)) /\n feature_count, os.path.basename(vector_path)),\n _LOGGING_PERIOD)\n if not poly_intersect_set.intersection(maximal_set):\n # no intersection, add poly_fid to the maximal set and remove\n # the polygon from the lookup\n maximal_set.add(poly_fid)\n del poly_intersect_lookup[poly_fid]\n # remove all the polygons from intersections once they're computed\n for poly_fid, poly_intersect_set in poly_intersect_lookup.items():\n poly_intersect_lookup[poly_fid] = (\n poly_intersect_set.difference(maximal_set))\n subset_list.append(maximal_set)\n LOGGER.info(\n 'maximal subset build 100.0%% complete on %s',\n os.path.basename(vector_path))\n return subset_list\n\n\ndef distance_transform_edt(\n base_region_raster_path_band, target_distance_raster_path,\n sampling_distance=(1., 1.), working_dir=None,\n raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS):\n \"\"\"Calculate the euclidean distance transform on base raster.\n\n Calculates the euclidean distance transform on the base raster in units of\n pixels multiplied by an optional scalar constant. The implementation is\n based off the algorithm described in: Meijster, Arnold, Jos BTM Roerdink,\n and Wim H. Hesselink. \"A general algorithm for computing distance\n transforms in linear time.\" Mathematical Morphology and its applications\n to image and signal processing. Springer, Boston, MA, 2002. 331-340.\n\n The base mask raster represents the area to distance transform from as\n any pixel that is not 0 or nodata. It is computationally convenient to\n calculate the distance transform on the entire raster irrespective of\n nodata placement and thus produces a raster that will have distance\n transform values even in pixels that are nodata in the base.\n\n Args:\n base_region_raster_path_band (tuple): a tuple including file path to a\n raster and the band index to define the base region pixels. Any\n pixel that is not 0 and nodata are considered to be part of the\n region.\n target_distance_raster_path (string): path to the target raster that\n is the exact euclidean distance transform from any pixel in the\n base raster that is not nodata and not 0. The units are in\n ``(pixel distance * sampling_distance)``.\n sampling_distance (tuple/list): an optional parameter used to scale\n the pixel distances when calculating the distance transform.\n Defaults to (1.0, 1.0). First element indicates the distance\n traveled in the x direction when changing a column index, and the\n second element in y when changing a row index. Both values must\n be > 0.\n working_dir (string): If not None, indicates where temporary files\n should be created during this run.\n raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver\n name string as the first element and a GDAL creation options\n tuple/list as the second. Defaults to a GTiff driver tuple\n defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS.\n\n Return:\n None\n \"\"\"\n working_raster_paths = {}\n for raster_prefix in ['region_mask_raster', 'g_raster']:\n with tempfile.NamedTemporaryFile(\n prefix=raster_prefix, suffix='.tif', delete=False,\n dir=working_dir) as tmp_file:\n working_raster_paths[raster_prefix] = tmp_file.name\n nodata = (get_raster_info(base_region_raster_path_band[0])['nodata'])[\n base_region_raster_path_band[1]-1]\n nodata_out = 255\n\n def mask_op(base_array):\n \"\"\"Convert base_array to 1 if not 0 and nodata, 0 otherwise.\"\"\"\n if nodata is not None:\n return ~numpy.isclose(base_array, nodata) & (base_array != 0)\n else:\n return base_array != 0\n\n if not isinstance(sampling_distance, (tuple, list)):\n raise ValueError(\n \"`sampling_distance` should be a tuple/list, instead it's %s\" % (\n type(sampling_distance)))\n\n sample_d_x, sample_d_y = sampling_distance\n if sample_d_x <= 0. or sample_d_y <= 0.:\n raise ValueError(\n \"Sample distances must be > 0.0, instead got %s\",\n sampling_distance)\n\n raster_calculator(\n [base_region_raster_path_band], mask_op,\n working_raster_paths['region_mask_raster'], gdal.GDT_Byte, nodata_out,\n calc_raster_stats=False,\n raster_driver_creation_tuple=raster_driver_creation_tuple)\n geoprocessing_core._distance_transform_edt(\n working_raster_paths['region_mask_raster'],\n working_raster_paths['g_raster'], sampling_distance[0],\n sampling_distance[1], target_distance_raster_path,\n raster_driver_creation_tuple)\n\n for path in working_raster_paths.values():\n try:\n os.remove(path)\n except OSError:\n LOGGER.warning(\"couldn't remove file %s\", path)\n\n\ndef _next_regular(base):\n \"\"\"Find the next regular number greater than or equal to base.\n\n Regular numbers are composites of the prime factors 2, 3, and 5.\n Also known as 5-smooth numbers or Hamming numbers, these are the optimal\n size for inputs to FFTPACK.\n\n This source was taken directly from scipy.signaltools and saves us from\n having to access a protected member in a library that could change in\n future releases:\n\n https://github.com/scipy/scipy/blob/v0.17.1/scipy/signal/signaltools.py#L211\n\n Args:\n base (int): a positive integer to start to find the next Hamming\n number.\n\n Return:\n The next regular number greater than or equal to ``base``.\n\n \"\"\"\n if base <= 6:\n return base\n\n # Quickly check if it's already a power of 2\n if not (base & (base-1)):\n return base\n\n match = float('inf') # Anything found will be smaller\n p5 = 1\n while p5 < base:\n p35 = p5\n while p35 < base:\n # Ceiling integer division, avoiding conversion to float\n # (quotient = ceil(base / p35))\n quotient = -(-base // p35)\n\n # Quickly find next power of 2 >= quotient\n p2 = 2**((quotient - 1).bit_length())\n\n N = p2 * p35\n if N == base:\n return N\n elif N < match:\n match = N\n p35 *= 3\n if p35 == base:\n return p35\n if p35 < match:\n match = p35\n p5 *= 5\n if p5 == base:\n return p5\n if p5 < match:\n match = p5\n return match\n\n\ndef convolve_2d(\n signal_path_band, kernel_path_band, target_path,\n ignore_nodata_and_edges=False, mask_nodata=True,\n normalize_kernel=False, target_datatype=gdal.GDT_Float64,\n target_nodata=None, working_dir=None, set_tol_to_zero=1e-8,\n max_timeout=_MAX_TIMEOUT,\n raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS):\n \"\"\"Convolve 2D kernel over 2D signal.\n\n Convolves the raster in ``kernel_path_band`` over ``signal_path_band``.\n Nodata values are treated as 0.0 during the convolution and masked to\n nodata for the output result where ``signal_path`` has nodata.\n\n Note with default values, boundary effects can be seen in the result where\n the kernel would hang off the edge of the raster or in regions with\n nodata pixels. The function would treat these areas as values with \"0.0\"\n by default thus pulling the total convolution down in these areas. This\n is similar to setting ``mode='same'`` in Numpy's ``convolve`` function:\n https://numpy.org/doc/stable/reference/generated/numpy.convolve.html\n\n This boundary effect can be avoided by setting\n ``ignore_nodata_and_edges=True`` which normalizes the target result by\n dynamically accounting for the number of valid signal pixels the kernel\n overlapped during the convolution step.\n\n Args:\n signal_path_band (tuple): a 2 tuple of the form\n (filepath to signal raster, band index).\n kernel_path_band (tuple): a 2 tuple of the form\n (filepath to kernel raster, band index), all pixel values should\n be valid -- output is not well defined if the kernel raster has\n nodata values.\n target_path (string): filepath to target raster that's the convolution\n of signal with kernel. Output will be a single band raster of\n same size and projection as ``signal_path_band``. Any nodata pixels\n that align with ``signal_path_band`` will be set to nodata.\n ignore_nodata_and_edges (boolean): If true, any pixels that are equal\n to ``signal_path_band``'s nodata value or signal pixels where the\n kernel extends beyond the edge of the raster are not included when\n averaging the convolution filter. This has the effect of\n \"spreading\" the result as though nodata and edges beyond the\n bounds of the raster are 0s. If set to false this tends to \"pull\"\n the signal away from nodata holes or raster edges. Set this value\n to ``True`` to avoid distortions signal values near edges for\n large integrating kernels.\n It can be useful to set this value to ``True`` to fill\n nodata holes through distance weighted averaging. In this case\n ``mask_nodata`` must be set to ``False`` so the result does not\n mask out these areas which are filled in. When using this\n technique be careful of cases where the kernel does not extend\n over any areas except nodata holes, in this case the resulting\n values in these areas will be nonsensical numbers, perhaps\n numerical infinity or NaNs.\n normalize_kernel (boolean): If true, the result is divided by the\n sum of the kernel.\n mask_nodata (boolean): If true, ``target_path`` raster's output is\n nodata where ``signal_path_band``'s pixels were nodata. Note that\n setting ``ignore_nodata_and_edges`` to ``True`` while setting\n ``mask_nodata`` to ``False`` can allow for a technique involving\n distance weighted averaging to define areas that would otherwise\n be nodata. Be careful in cases where the kernel does not\n extend over any valid non-nodata area since the result can be\n numerical infinity or NaNs.\n target_datatype (GDAL type): a GDAL raster type to set the output\n raster type to, as well as the type to calculate the convolution\n in. Defaults to GDT_Float64. Note signed byte is not\n supported.\n target_nodata (int/float): nodata value to set on output raster.\n If ``target_datatype`` is not gdal.GDT_Float64, this value must\n be set. Otherwise defaults to the minimum value of a float32.\n raster_creation_options (sequence): an argument list that will be\n passed to the GTiff driver for creating ``target_path``. Useful\n for blocksizes, compression, and more.\n working_dir (string): If not None, indicates where temporary files\n should be created during this run.\n set_tol_to_zero (float): any value within +- this from 0.0 will get\n set to 0.0. This is to handle numerical roundoff errors that\n sometimes result in \"numerical zero\", such as -1.782e-18 that\n cannot be tolerated by users of this function. If `None` no\n adjustment will be done to output values.\n max_timeout (float): maximum amount of time to wait for worker thread\n to terminate.\n raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver\n name string as the first element and a GDAL creation options\n tuple/list as the second. Defaults to a GTiff driver tuple\n defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS.\n\n Return:\n ``None``\n\n Raises:\n ValueError:\n if ``ignore_nodata_and_edges`` is ``True`` and ``mask_nodata``\n is ``False``.\n ValueError:\n if ``signal_path_band`` or ``kernel_path_band`` is a row based\n blocksize which would result in slow runtimes due to gdal\n cache thrashing.\n\n \"\"\"\n if target_datatype is not gdal.GDT_Float64 and target_nodata is None:\n raise ValueError(\n \"`target_datatype` is set, but `target_nodata` is None. \"\n \"`target_nodata` must be set if `target_datatype` is not \"\n \"`gdal.GDT_Float64`. `target_nodata` is set to None.\")\n if target_nodata is None:\n target_nodata = float(numpy.finfo(numpy.float32).min)\n\n if ignore_nodata_and_edges and not mask_nodata:\n LOGGER.debug(\n 'ignore_nodata_and_edges is True while mask_nodata is False -- '\n 'this can yield a nonsensical result in areas where the kernel '\n 'touches only nodata values.')\n\n bad_raster_path_list = []\n for raster_id, raster_path_band in [\n ('signal', signal_path_band), ('kernel', kernel_path_band)]:\n if (not _is_raster_path_band_formatted(raster_path_band)):\n bad_raster_path_list.append((raster_id, raster_path_band))\n if bad_raster_path_list:\n raise ValueError(\n \"Expected raster path band sequences for the following arguments \"\n f\"but instead got: {bad_raster_path_list}\")\n\n signal_raster_info = get_raster_info(signal_path_band[0])\n kernel_raster_info = get_raster_info(kernel_path_band[0])\n\n for info_dict in [signal_raster_info, kernel_raster_info]:\n if 1 in info_dict['block_size']:\n raise ValueError(\n f'{signal_path_band} has a row blocksize which can make this '\n f'function run very slow, create a square blocksize using '\n f'`warp_raster` or `align_and_resize_raster_stack` which '\n f'creates square blocksizes by default')\n\n # The nodata value is reset to a different value at the end of this\n # function. Here 0 is chosen as a default value since data are\n # incrementally added to the raster\n new_raster_from_base(\n signal_path_band[0], target_path, target_datatype, [0],\n raster_driver_creation_tuple=raster_driver_creation_tuple)\n\n\n n_cols_signal, n_rows_signal = signal_raster_info['raster_size']\n n_cols_kernel, n_rows_kernel = kernel_raster_info['raster_size']\n s_path_band = signal_path_band\n k_path_band = kernel_path_band\n s_nodata = signal_raster_info['nodata'][0]\n\n # we need the original signal raster info because we want the output to\n # be clipped and NODATA masked to it\n signal_raster = gdal.OpenEx(signal_path_band[0], gdal.OF_RASTER)\n signal_band = signal_raster.GetRasterBand(signal_path_band[1])\n # getting the offset list before it's opened for updating\n target_offset_list = list(iterblocks((target_path, 1), offset_only=True))\n target_raster = gdal.OpenEx(target_path, gdal.OF_RASTER | gdal.GA_Update)\n target_band = target_raster.GetRasterBand(1)\n\n # if we're ignoring nodata, we need to make a parallel convolved signal\n # of the nodata mask\n if ignore_nodata_and_edges:\n raster_file, mask_raster_path = tempfile.mkstemp(\n suffix='.tif', prefix='convolved_mask',\n dir=os.path.dirname(target_path))\n os.close(raster_file)\n new_raster_from_base(\n signal_path_band[0], mask_raster_path, gdal.GDT_Float64,\n [0.0], raster_driver_creation_tuple=raster_driver_creation_tuple)\n mask_raster = gdal.OpenEx(\n mask_raster_path, gdal.GA_Update | gdal.OF_RASTER)\n mask_band = mask_raster.GetRasterBand(1)\n\n LOGGER.info('starting convolve')\n last_time = time.time()\n\n # calculate the kernel sum for normalization\n kernel_nodata = kernel_raster_info['nodata'][0]\n kernel_sum = 0.0\n for _, kernel_block in iterblocks(kernel_path_band):\n if kernel_nodata is not None and ignore_nodata_and_edges:\n kernel_block[numpy.isclose(kernel_block, kernel_nodata)] = 0.0\n kernel_sum += numpy.sum(kernel_block)\n\n # limit the size of the work queue since a large kernel / signal with small\n # block size can have a large memory impact when queuing offset lists.\n work_queue = queue.Queue(10)\n signal_offset_list = list(iterblocks(s_path_band, offset_only=True))\n kernel_offset_list = list(iterblocks(k_path_band, offset_only=True))\n n_blocks = len(signal_offset_list) * len(kernel_offset_list)\n\n LOGGER.debug('start fill work queue thread')\n\n def _fill_work_queue():\n \"\"\"Asynchronously fill the work queue.\"\"\"\n LOGGER.debug('fill work queue')\n for signal_offset in signal_offset_list:\n for kernel_offset in kernel_offset_list:\n work_queue.put((signal_offset, kernel_offset))\n work_queue.put(None)\n LOGGER.debug('work queue full')\n\n fill_work_queue_worker = threading.Thread(\n target=_fill_work_queue)\n fill_work_queue_worker.daemon = True\n fill_work_queue_worker.start()\n\n # limit the size of the write queue so we don't accidentally load a whole\n # array into memory\n LOGGER.debug('start worker thread')\n write_queue = queue.Queue(10)\n worker = threading.Thread(\n target=_convolve_2d_worker,\n args=(\n signal_path_band, kernel_path_band,\n ignore_nodata_and_edges, normalize_kernel,\n set_tol_to_zero, work_queue, write_queue))\n worker.daemon = True\n worker.start()\n\n n_blocks_processed = 0\n LOGGER.info(f'{n_blocks} sent to workers, wait for worker results')\n while True:\n # the timeout guards against a worst case scenario where the\n # ``_convolve_2d_worker`` has crashed.\n write_payload = write_queue.get(timeout=_MAX_TIMEOUT)\n if write_payload:\n (index_dict, result, mask_result,\n left_index_raster, right_index_raster,\n top_index_raster, bottom_index_raster,\n left_index_result, right_index_result,\n top_index_result, bottom_index_result) = write_payload\n else:\n worker.join(max_timeout)\n break\n\n output_array = numpy.empty(\n (index_dict['win_ysize'], index_dict['win_xsize']),\n dtype=numpy.float32)\n\n # the inital data value in target_band is 0 because that is the\n # temporary nodata selected so that manual resetting of initial\n # data values weren't necessary. at the end of this function the\n # target nodata value is set to `target_nodata`.\n current_output = target_band.ReadAsArray(**index_dict)\n\n # read the signal block so we know where the nodata are\n potential_nodata_signal_array = signal_band.ReadAsArray(**index_dict)\n\n valid_mask = numpy.ones(\n potential_nodata_signal_array.shape, dtype=bool)\n\n # guard against a None nodata value\n if s_nodata is not None and mask_nodata:\n valid_mask[:] = (\n ~numpy.isclose(potential_nodata_signal_array, s_nodata))\n output_array[:] = target_nodata\n output_array[valid_mask] = (\n (result[top_index_result:bottom_index_result,\n left_index_result:right_index_result])[valid_mask] +\n current_output[valid_mask])\n target_band.WriteArray(\n output_array, xoff=index_dict['xoff'],\n yoff=index_dict['yoff'])\n\n if ignore_nodata_and_edges:\n # we'll need to save off the mask convolution so we can divide\n # it in total later\n current_mask = mask_band.ReadAsArray(**index_dict)\n\n output_array[valid_mask] = (\n (mask_result[\n top_index_result:bottom_index_result,\n left_index_result:right_index_result])[valid_mask] +\n current_mask[valid_mask])\n mask_band.WriteArray(\n output_array, xoff=index_dict['xoff'],\n yoff=index_dict['yoff'])\n\n n_blocks_processed += 1\n last_time = _invoke_timed_callback(\n last_time, lambda: LOGGER.info(\n \"convolution worker approximately %.1f%% complete on %s\",\n 100.0 * float(n_blocks_processed) / (n_blocks),\n os.path.basename(target_path)),\n _LOGGING_PERIOD)\n\n LOGGER.info(\n f\"convolution worker 100.0% complete on \"\n f\"{os.path.basename(target_path)}\")\n\n target_band.FlushCache()\n if ignore_nodata_and_edges:\n signal_nodata = get_raster_info(signal_path_band[0])['nodata'][\n signal_path_band[1]-1]\n LOGGER.info(\n \"need to normalize result so nodata values are not included\")\n mask_pixels_processed = 0\n mask_band.FlushCache()\n for target_offset_data in target_offset_list:\n target_block = target_band.ReadAsArray(\n **target_offset_data).astype(numpy.float64)\n signal_block = signal_band.ReadAsArray(**target_offset_data)\n mask_block = mask_band.ReadAsArray(**target_offset_data)\n if mask_nodata and signal_nodata is not None:\n valid_mask = ~numpy.isclose(signal_block, signal_nodata)\n else:\n valid_mask = numpy.ones(target_block.shape, dtype=bool)\n valid_mask &= (mask_block > 0)\n # divide the target_band by the mask_band\n target_block[valid_mask] /= mask_block[valid_mask].astype(\n numpy.float64)\n\n # scale by kernel sum if necessary since mask division will\n # automatically normalize kernel\n if not normalize_kernel:\n target_block[valid_mask] *= kernel_sum\n\n target_band.WriteArray(\n target_block, xoff=target_offset_data['xoff'],\n yoff=target_offset_data['yoff'])\n\n mask_pixels_processed += target_block.size\n last_time = _invoke_timed_callback(\n last_time, lambda: LOGGER.info(\n \"convolution nodata normalizer approximately %.1f%% \"\n \"complete on %s\", 100.0 * float(mask_pixels_processed) / (\n n_cols_signal * n_rows_signal),\n os.path.basename(target_path)),\n _LOGGING_PERIOD)\n\n mask_raster = None\n mask_band = None\n os.remove(mask_raster_path)\n LOGGER.info(\n f\"convolution nodata normalize 100.0% complete on \"\n f\"{os.path.basename(target_path)}\")\n\n # set the nodata value from 0 to a reasonable value for the result\n target_band.SetNoDataValue(target_nodata)\n\n target_band = None\n target_raster = None\n\n\ndef iterblocks(\n raster_path_band, largest_block=_LARGEST_ITERBLOCK,\n offset_only=False):\n \"\"\"Iterate across all the memory blocks in the input raster.\n\n Result is a generator of block location information and numpy arrays.\n\n This is especially useful when a single value needs to be derived from the\n pixel values in a raster, such as the sum total of all pixel values, or\n a sequence of unique raster values. In such cases, ``raster_local_op``\n is overkill, since it writes out a raster.\n\n As a generator, this can be combined multiple times with itertools.izip()\n to iterate 'simultaneously' over multiple rasters, though the user should\n be careful to do so only with prealigned rasters.\n\n Args:\n raster_path_band (tuple): a path/band index tuple to indicate\n which raster band iterblocks should iterate over.\n largest_block (int): Attempts to iterate over raster blocks with\n this many elements. Useful in cases where the blocksize is\n relatively small, memory is available, and the function call\n overhead dominates the iteration. Defaults to 2**20. A value of\n anything less than the original blocksize of the raster will\n result in blocksizes equal to the original size.\n offset_only (boolean): defaults to False, if True ``iterblocks`` only\n returns offset dictionary and doesn't read any binary data from\n the raster. This can be useful when iterating over writing to\n an output.\n\n Yields:\n If ``offset_only`` is false, on each iteration, a tuple containing a\n dict of block data and a 2-dimensional numpy array are\n yielded. The dict of block data has these attributes:\n\n * ``data['xoff']`` - The X offset of the upper-left-hand corner of the\n block.\n * ``data['yoff']`` - The Y offset of the upper-left-hand corner of the\n block.\n * ``data['win_xsize']`` - The width of the block.\n * ``data['win_ysize']`` - The height of the block.\n\n If ``offset_only`` is True, the function returns only the block offset\n data and does not attempt to read binary data from the raster.\n\n \"\"\"\n if not _is_raster_path_band_formatted(raster_path_band):\n raise ValueError(\n \"`raster_path_band` not formatted as expected. Expects \"\n \"(path, band_index), received %s\" % repr(raster_path_band))\n raster = gdal.OpenEx(raster_path_band[0], gdal.OF_RASTER)\n if raster is None:\n raise ValueError(\n \"Raster at %s could not be opened.\" % raster_path_band[0])\n band = raster.GetRasterBand(raster_path_band[1])\n block = band.GetBlockSize()\n cols_per_block = block[0]\n rows_per_block = block[1]\n\n n_cols = raster.RasterXSize\n n_rows = raster.RasterYSize\n\n block_area = cols_per_block * rows_per_block\n # try to make block wider\n if int(largest_block / block_area) > 0:\n width_factor = int(largest_block / block_area)\n cols_per_block *= width_factor\n if cols_per_block > n_cols:\n cols_per_block = n_cols\n block_area = cols_per_block * rows_per_block\n # try to make block taller\n if int(largest_block / block_area) > 0:\n height_factor = int(largest_block / block_area)\n rows_per_block *= height_factor\n if rows_per_block > n_rows:\n rows_per_block = n_rows\n\n n_col_blocks = int(math.ceil(n_cols / float(cols_per_block)))\n n_row_blocks = int(math.ceil(n_rows / float(rows_per_block)))\n\n for row_block_index in range(n_row_blocks):\n row_offset = row_block_index * rows_per_block\n row_block_width = n_rows - row_offset\n if row_block_width > rows_per_block:\n row_block_width = rows_per_block\n for col_block_index in range(n_col_blocks):\n col_offset = col_block_index * cols_per_block\n col_block_width = n_cols - col_offset\n if col_block_width > cols_per_block:\n col_block_width = cols_per_block\n\n offset_dict = {\n 'xoff': col_offset,\n 'yoff': row_offset,\n 'win_xsize': col_block_width,\n 'win_ysize': row_block_width,\n }\n if offset_only:\n yield offset_dict\n else:\n yield (offset_dict, band.ReadAsArray(**offset_dict))\n\n band = None\n raster = None\n\n\ndef transform_bounding_box(\n bounding_box, base_projection_wkt, target_projection_wkt,\n edge_samples=100,\n osr_axis_mapping_strategy=DEFAULT_OSR_AXIS_MAPPING_STRATEGY,\n check_finite=True, allow_partial_reprojection=True):\n \"\"\"Transform input bounding box to output projection.\n\n This transform accounts for the fact that the reprojected square bounding\n box might be warped in the new coordinate system. To account for this,\n the function samples points along the original bounding box edges and\n attempts to make the largest bounding box around any transformed point\n on the edge whether corners or warped edges.\n\n Args:\n bounding_box (sequence): a sequence of 4 coordinates in ``base_epsg``\n coordinate system describing the bound in the order\n [xmin, ymin, xmax, ymax].\n base_projection_wkt (string): the spatial reference of the input\n coordinate system in Well Known Text.\n target_projection_wkt (string): the spatial reference of the desired\n output coordinate system in Well Known Text.\n edge_samples (int): the number of interpolated points along each\n bounding box edge to sample along. A value of 2 will sample just\n the corners while a value of 3 will also sample the corners and\n the midpoint.\n osr_axis_mapping_strategy (int): OSR axis mapping strategy for\n ``SpatialReference`` objects. Defaults to\n ``geoprocessing.DEFAULT_OSR_AXIS_MAPPING_STRATEGY``. This\n parameter should not be changed unless you know what you are\n doing.\n check_finite (bool): If True, raises ValueError if bounding box\n results in non-finite values.\n allow_partial_reprojection (bool): If True, will attempt partial\n reprojections if coordinates lie outside the area defined by\n a projeciton. If False, will raise error in such cases.\n\n Return:\n A list of the form [xmin, ymin, xmax, ymax] that describes the largest\n fitting bounding box around the original warped bounding box in\n ``new_epsg`` coordinate system.\n\n Raises:\n ``ValueError`` if resulting transform yields non-finite coordinates.\n This would indicate an ill posed transform region that the user\n should address.\n\n \"\"\"\n base_ref = osr.SpatialReference()\n base_ref.ImportFromWkt(base_projection_wkt)\n\n target_ref = osr.SpatialReference()\n target_ref.ImportFromWkt(target_projection_wkt)\n\n base_ref.SetAxisMappingStrategy(osr_axis_mapping_strategy)\n target_ref.SetAxisMappingStrategy(osr_axis_mapping_strategy)\n\n transformer = osr.CreateCoordinateTransformation(base_ref, target_ref)\n\n # Create a bounding box geometry\n ring = ogr.Geometry(ogr.wkbLinearRing)\n # make a linear interpolation around the polygon for extra transform points\n for start, end in [\n ((0, 1), (2, 1)),\n ((2, 1), (2, 3)),\n ((2, 3), (0, 3)),\n ((0, 3), (0, 1))]:\n for step in range(edge_samples):\n p = step/edge_samples\n x_coord_start = bounding_box[start[0]]\n y_coord_start = bounding_box[start[1]]\n x_coord_end = bounding_box[end[0]]\n y_coord_end = bounding_box[end[1]]\n\n x_coord = (1-p)*x_coord_start+p*x_coord_end\n y_coord = (1-p)*y_coord_start+p*y_coord_end\n ring.AddPoint(x_coord, y_coord)\n # close the ring by putting a point where we start\n ring.AddPoint(bounding_box[0], bounding_box[1])\n poly = ogr.Geometry(ogr.wkbPolygon)\n poly.AddGeometry(ring)\n\n if allow_partial_reprojection:\n gdal.SetConfigOption('OGR_ENABLE_PARTIAL_REPROJECTION', 'TRUE')\n else:\n gdal.SetConfigOption('OGR_ENABLE_PARTIAL_REPROJECTION', 'FALSE')\n error_code = poly.Transform(transformer)\n if error_code != 0:\n raise ValueError(\n f'error on transforming {bounding_box} from {base_projection_wkt} '\n f'to {target_projection_wkt}. Error code: {error_code}')\n envelope = poly.GetEnvelope()\n # swizzle from xmin xmax ymin ymax to xmin, ymin, xmax, ymax\n transformed_bounding_box = [envelope[i] for i in [0, 2, 1, 3]]\n\n if check_finite and not all(numpy.isfinite(\n numpy.array(transformed_bounding_box))):\n raise ValueError(\n f'Could not transform bounding box from base to target projection.'\n f'Some transformed coordinates are not finite: '\n f'{transformed_bounding_box}, base bounding box may not fit into '\n f'target coordinate projection system.\\n'\n f'Original bounding box: {bounding_box}\\n'\n f'Base projection: {base_projection_wkt}\\n'\n f'Target projection: {target_projection_wkt}\\n')\n return transformed_bounding_box\n\n\ndef mask_raster(\n base_raster_path_band, mask_vector_path, target_mask_raster_path,\n mask_layer_id=0, target_mask_value=None, working_dir=None,\n all_touched=False, where_clause=None,\n raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS):\n \"\"\"Mask a raster band with a given vector.\n\n Args:\n base_raster_path_band (tuple): a (path, band number) tuple indicating\n the data to mask.\n mask_vector_path (path): path to a vector that will be used to mask\n anything outside of the polygon that overlaps with\n ``base_raster_path_band`` to ``target_mask_value`` if defined or\n else ``base_raster_path_band``'s nodata value.\n target_mask_raster_path (str): path to desired target raster that\n is a copy of ``base_raster_path_band`` except any pixels that do\n not intersect with ``mask_vector_path`` are set to\n ``target_mask_value`` or ``base_raster_path_band``'s nodata value\n if ``target_mask_value`` is None.\n mask_layer_id (str/int): an index or name to identify the mask\n geometry layer in ``mask_vector_path``, default is 0.\n target_mask_value (numeric): If not None, this value is written to\n any pixel in ``base_raster_path_band`` that does not intersect\n with ``mask_vector_path``. Otherwise the nodata value of\n ``base_raster_path_band`` is used.\n working_dir (str): this is a path to a directory that can be used to\n hold temporary files required to complete this operation.\n all_touched (bool): if False, a pixel is only masked if its centroid\n intersects with the mask. If True a pixel is masked if any point\n of the pixel intersects the polygon mask.\n where_clause (str): (optional) if not None, it is an SQL compatible\n where clause that can be used to filter the features that are used\n to mask the base raster.\n raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver\n name string as the first element and a GDAL creation options\n tuple/list as the second. Defaults to a GTiff driver tuple\n defined at geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS.\n\n Return:\n None\n \"\"\"\n with tempfile.NamedTemporaryFile(\n prefix='mask_raster', delete=False, suffix='.tif',\n dir=working_dir) as mask_raster_file:\n mask_raster_path = mask_raster_file.name\n\n new_raster_from_base(\n base_raster_path_band[0], mask_raster_path, gdal.GDT_Byte, [255],\n fill_value_list=[0],\n raster_driver_creation_tuple=raster_driver_creation_tuple)\n\n base_raster_info = get_raster_info(base_raster_path_band[0])\n\n rasterize(\n mask_vector_path, mask_raster_path, burn_values=[1],\n layer_id=mask_layer_id,\n option_list=[('ALL_TOUCHED=%s' % all_touched).upper()],\n where_clause=where_clause)\n\n base_nodata = base_raster_info['nodata'][base_raster_path_band[1]-1]\n\n if target_mask_value is None:\n mask_value = base_nodata\n if mask_value is None:\n LOGGER.warning(\n \"No mask value was passed and target nodata is undefined, \"\n \"defaulting to 0 as the target mask value.\")\n mask_value = 0\n else:\n mask_value = target_mask_value\n\n def mask_op(base_array, mask_array):\n result = numpy.copy(base_array)\n result[mask_array == 0] = mask_value\n return result\n\n raster_calculator(\n [base_raster_path_band, (mask_raster_path, 1)], mask_op,\n target_mask_raster_path, base_raster_info['datatype'], base_nodata,\n raster_driver_creation_tuple=raster_driver_creation_tuple)\n\n os.remove(mask_raster_path)\n\n\ndef _invoke_timed_callback(\n reference_time, callback_lambda, callback_period):\n \"\"\"Invoke callback if a certain amount of time has passed.\n\n This is a convenience function to standardize update callbacks from the\n module.\n\n Args:\n reference_time (float): time to base ``callback_period`` length from.\n callback_lambda (lambda): function to invoke if difference between\n current time and ``reference_time`` has exceeded\n ``callback_period``.\n callback_period (float): time in seconds to pass until\n ``callback_lambda`` is invoked.\n\n Return:\n ``reference_time`` if ``callback_lambda`` not invoked, otherwise the\n time when ``callback_lambda`` was invoked.\n\n \"\"\"\n current_time = time.time()\n if current_time - reference_time > callback_period:\n callback_lambda()\n return current_time\n return reference_time\n\n\ndef _gdal_to_numpy_type(band):\n \"\"\"Calculate the equivalent numpy datatype from a GDAL raster band type.\n\n This function doesn't handle complex or unknown types. If they are\n passed in, this function will raise a ValueError.\n\n Args:\n band (gdal.Band): GDAL Band\n\n Return:\n numpy_datatype (numpy.dtype): equivalent of band.DataType\n\n \"\"\"\n # doesn't include GDT_Byte because that's a special case\n base_gdal_type_to_numpy = {\n gdal.GDT_Int16: numpy.int16,\n gdal.GDT_Int32: numpy.int32,\n gdal.GDT_UInt16: numpy.uint16,\n gdal.GDT_UInt32: numpy.uint32,\n gdal.GDT_Float32: numpy.float32,\n gdal.GDT_Float64: numpy.float64,\n }\n\n if band.DataType in base_gdal_type_to_numpy:\n return base_gdal_type_to_numpy[band.DataType]\n\n if band.DataType != gdal.GDT_Byte:\n raise ValueError(\"Unsupported DataType: %s\" % str(band.DataType))\n\n # band must be GDT_Byte type, check if it is signed/unsigned\n metadata = band.GetMetadata('IMAGE_STRUCTURE')\n if 'PIXELTYPE' in metadata and metadata['PIXELTYPE'] == 'SIGNEDBYTE':\n return numpy.int8\n return numpy.uint8\n\n\ndef merge_bounding_box_list(bounding_box_list, bounding_box_mode):\n \"\"\"Create a single bounding box by union or intersection of the list.\n\n Args:\n bounding_box_list (sequence): a sequence of bounding box coordinates\n in the order [minx, miny, maxx, maxy].\n mode (string): either ``'union'`` or ``'intersection'`` for the\n corresponding reduction mode.\n\n Return:\n A four tuple bounding box that is the union or intersection of the\n input bounding boxes.\n\n Raises:\n ValueError\n if the bounding boxes in ``bounding_box_list`` do not\n intersect if the ``bounding_box_mode`` is 'intersection'.\n\n \"\"\"\n def _merge_bounding_boxes(bb1, bb2, mode):\n \"\"\"Merge two bounding boxes through union or intersection.\n\n Args:\n bb1, bb2 (sequence): sequence of float representing bounding box\n in the form bb=[minx,miny,maxx,maxy]\n mode (string); one of 'union' or 'intersection'\n\n Return:\n Reduced bounding box of bb1/bb2 depending on mode.\n\n \"\"\"\n def _less_than_or_equal(x_val, y_val):\n return x_val if x_val <= y_val else y_val\n\n def _greater_than(x_val, y_val):\n return x_val if x_val > y_val else y_val\n\n if mode == \"union\":\n comparison_ops = [\n _less_than_or_equal, _less_than_or_equal,\n _greater_than, _greater_than]\n if mode == \"intersection\":\n comparison_ops = [\n _greater_than, _greater_than,\n _less_than_or_equal, _less_than_or_equal]\n\n bb_out = [op(x, y) for op, x, y in zip(comparison_ops, bb1, bb2)]\n return bb_out\n\n result_bb = functools.reduce(\n functools.partial(_merge_bounding_boxes, mode=bounding_box_mode),\n bounding_box_list)\n if result_bb[0] > result_bb[2] or result_bb[1] > result_bb[3]:\n raise ValueError(\n \"Bounding boxes do not intersect. Base list: %s mode: %s \"\n \" result: %s\" % (bounding_box_list, bounding_box_mode, result_bb))\n return result_bb\n\n\ndef get_gis_type(path):\n \"\"\"Calculate the GIS type of the file located at ``path``.\n\n Args:\n path (str): path to a file on disk.\n\n\n Return:\n A bitwise OR of all GIS types that PyGeoprocessing models, currently\n this is ``pygeoprocessing.UNKNOWN_TYPE``,\n ``pygeoprocessing.RASTER_TYPE``, or ``pygeoprocessing.VECTOR_TYPE``.\n\n \"\"\"\n if not os.path.exists(path):\n raise ValueError(\"%s does not exist\", path)\n from pygeoprocessing import UNKNOWN_TYPE\n gis_type = UNKNOWN_TYPE\n gis_raster = gdal.OpenEx(path, gdal.OF_RASTER)\n if gis_raster is not None:\n from pygeoprocessing import RASTER_TYPE\n gis_type |= RASTER_TYPE\n gis_raster = None\n gis_vector = gdal.OpenEx(path, gdal.OF_VECTOR)\n if gis_vector is not None:\n from pygeoprocessing import VECTOR_TYPE\n gis_type |= VECTOR_TYPE\n return gis_type\n\n\ndef _make_logger_callback(message):\n \"\"\"Build a timed logger callback that prints ``message`` replaced.\n\n Args:\n message (string): a string that expects 2 placement %% variables,\n first for % complete from ``df_complete``, second from\n ``p_progress_arg[0]``.\n\n Return:\n Function with signature:\n logger_callback(df_complete, psz_message, p_progress_arg)\n\n \"\"\"\n def logger_callback(df_complete, _, p_progress_arg):\n \"\"\"Argument names come from the GDAL API for callbacks.\"\"\"\n try:\n current_time = time.time()\n if ((current_time - logger_callback.last_time) > 5.0 or\n (df_complete == 1.0 and\n logger_callback.total_time >= 5.0)):\n # In some multiprocess applications I was encountering a\n # ``p_progress_arg`` of None. This is unexpected and I suspect\n # was an issue for some kind of GDAL race condition. So I'm\n # guarding against it here and reporting an appropriate log\n # if it occurs.\n if p_progress_arg:\n LOGGER.info(message, df_complete * 100, p_progress_arg[0])\n else:\n LOGGER.info(message, df_complete * 100, '')\n logger_callback.last_time = current_time\n logger_callback.total_time += current_time\n except AttributeError:\n logger_callback.last_time = time.time()\n logger_callback.total_time = 0.0\n except Exception:\n LOGGER.exception(\"Unhandled error occurred while logging \"\n \"progress. df_complete: %s, p_progress_arg: %s\",\n df_complete, p_progress_arg)\n\n return logger_callback\n\n\ndef _is_raster_path_band_formatted(raster_path_band):\n \"\"\"Return true if raster path band is a (str, int) tuple/list.\"\"\"\n if not isinstance(raster_path_band, (list, tuple)):\n return False\n elif len(raster_path_band) != 2:\n return False\n elif not isinstance(raster_path_band[0], str):\n return False\n elif not isinstance(raster_path_band[1], int):\n return False\n else:\n return True\n\n\ndef _convolve_2d_worker(\n signal_path_band, kernel_path_band,\n ignore_nodata, normalize_kernel, set_tol_to_zero,\n work_queue, write_queue):\n \"\"\"Worker function to be used by ``convolve_2d``.\n\n Args:\n signal_path_band (tuple): a 2 tuple of the form\n (filepath to signal raster, band index).\n kernel_path_band (tuple): a 2 tuple of the form\n (filepath to kernel raster, band index).\n ignore_nodata (boolean): If true, any pixels that are equal to\n ``signal_path_band``'s nodata value are not included when\n averaging the convolution filter.\n normalize_kernel (boolean): If true, the result is divided by the\n sum of the kernel.\n set_tol_to_zero (float): Value to test close to to determine if values\n are zero, and if so, set to zero.\n work_queue (Queue): will contain (signal_offset, kernel_offset)\n tuples that can be used to read raster blocks directly using\n GDAL ReadAsArray(**offset). Indicates the block to operate on.\n write_queue (Queue): mechanism to pass result back to the writer\n contains a (index_dict, result, mask_result,\n left_index_raster, right_index_raster,\n top_index_raster, bottom_index_raster,\n left_index_result, right_index_result,\n top_index_result, bottom_index_result) tuple that's used\n for writing and masking.\n\n Return:\n None\n \"\"\"\n signal_raster = gdal.OpenEx(signal_path_band[0], gdal.OF_RASTER)\n kernel_raster = gdal.OpenEx(kernel_path_band[0], gdal.OF_RASTER)\n signal_band = signal_raster.GetRasterBand(signal_path_band[1])\n kernel_band = kernel_raster.GetRasterBand(kernel_path_band[1])\n\n signal_raster_info = get_raster_info(signal_path_band[0])\n kernel_raster_info = get_raster_info(kernel_path_band[0])\n\n n_cols_signal, n_rows_signal = signal_raster_info['raster_size']\n n_cols_kernel, n_rows_kernel = kernel_raster_info['raster_size']\n signal_nodata = signal_raster_info['nodata'][0]\n kernel_nodata = kernel_raster_info['nodata'][0]\n\n mask_result = None # in case no mask is needed, variable is still defined\n\n # calculate the kernel sum for normalization\n kernel_sum = 0.0\n for _, kernel_block in iterblocks(kernel_path_band):\n if kernel_nodata is not None and ignore_nodata:\n kernel_block[numpy.isclose(kernel_block, kernel_nodata)] = 0.0\n kernel_sum += numpy.sum(kernel_block)\n\n while True:\n payload = work_queue.get()\n if payload is None:\n break\n\n signal_offset, kernel_offset = payload\n\n # ensure signal and kernel are internally float64 precision\n # irrespective of their base type\n signal_block = signal_band.ReadAsArray(**signal_offset).astype(\n numpy.float64)\n kernel_block = kernel_band.ReadAsArray(**kernel_offset).astype(\n numpy.float64)\n\n # don't ever convolve the nodata value\n if signal_nodata is not None:\n signal_nodata_mask = numpy.isclose(signal_block, signal_nodata)\n signal_block[signal_nodata_mask] = 0.0\n if not ignore_nodata:\n signal_nodata_mask[:] = 0\n else:\n signal_nodata_mask = numpy.zeros(\n signal_block.shape, dtype=bool)\n\n left_index_raster = (\n signal_offset['xoff'] - n_cols_kernel // 2 +\n kernel_offset['xoff'])\n right_index_raster = (\n signal_offset['xoff'] - n_cols_kernel // 2 +\n kernel_offset['xoff'] + signal_offset['win_xsize'] +\n kernel_offset['win_xsize'] - 1)\n top_index_raster = (\n signal_offset['yoff'] - n_rows_kernel // 2 +\n kernel_offset['yoff'])\n bottom_index_raster = (\n signal_offset['yoff'] - n_rows_kernel // 2 +\n kernel_offset['yoff'] + signal_offset['win_ysize'] +\n kernel_offset['win_ysize'] - 1)\n\n # it's possible that the piece of the integrating kernel\n # doesn't affect the final result, if so we should skip\n if (right_index_raster < 0 or\n bottom_index_raster < 0 or\n left_index_raster > n_cols_signal or\n top_index_raster > n_rows_signal):\n continue\n\n if kernel_nodata is not None:\n kernel_block[numpy.isclose(kernel_block, kernel_nodata)] = 0.0\n\n if normalize_kernel:\n kernel_block /= kernel_sum\n\n # determine the output convolve shape\n shape = (\n numpy.array(signal_block.shape) +\n numpy.array(kernel_block.shape) - 1)\n\n # add zero padding so FFT is fast\n fshape = [_next_regular(int(d)) for d in shape]\n\n signal_fft = numpy.fft.rfftn(signal_block, fshape)\n kernel_fft = numpy.fft.rfftn(kernel_block, fshape)\n\n # this variable determines the output slice that doesn't include\n # the padded array region made for fast FFTs.\n fslice = tuple([slice(0, int(sz)) for sz in shape])\n # classic FFT convolution\n result = numpy.fft.irfftn(signal_fft * kernel_fft, fshape)[fslice]\n # nix any roundoff error\n if set_tol_to_zero is not None:\n result[numpy.isclose(result, set_tol_to_zero)] = 0.0\n\n # if we're ignoring nodata, we need to make a convolution of the\n # nodata mask too\n if ignore_nodata:\n mask_fft = numpy.fft.rfftn(\n numpy.where(signal_nodata_mask, 0.0, 1.0), fshape)\n mask_result = numpy.fft.irfftn(\n mask_fft * kernel_fft, fshape)[fslice]\n\n left_index_result = 0\n right_index_result = result.shape[1]\n top_index_result = 0\n bottom_index_result = result.shape[0]\n\n # we might abut the edge of the raster, clip if so\n if left_index_raster < 0:\n left_index_result = -left_index_raster\n left_index_raster = 0\n if top_index_raster < 0:\n top_index_result = -top_index_raster\n top_index_raster = 0\n if right_index_raster > n_cols_signal:\n right_index_result -= right_index_raster - n_cols_signal\n right_index_raster = n_cols_signal\n if bottom_index_raster > n_rows_signal:\n bottom_index_result -= (\n bottom_index_raster - n_rows_signal)\n bottom_index_raster = n_rows_signal\n\n # Add result to current output to account for overlapping edges\n index_dict = {\n 'xoff': left_index_raster,\n 'yoff': top_index_raster,\n 'win_xsize': right_index_raster-left_index_raster,\n 'win_ysize': bottom_index_raster-top_index_raster\n }\n\n write_queue.put(\n (index_dict, result, mask_result,\n left_index_raster, right_index_raster,\n top_index_raster, bottom_index_raster,\n left_index_result, right_index_result,\n top_index_result, bottom_index_result))\n\n # Indicates worker has terminated\n write_queue.put(None)\n\n\ndef _assert_is_valid_pixel_size(target_pixel_size):\n \"\"\"Return true if ``target_pixel_size`` is a valid 2 element sequence.\n\n Raises ValueError if not a two element list/tuple and/or the values in\n the sequence are not numerical.\n\n \"\"\"\n def _is_number(x):\n \"\"\"Return true if x is a number.\"\"\"\n try:\n if isinstance(x, str):\n return False\n float(x)\n return True\n except (ValueError, TypeError):\n return False\n\n if not isinstance(target_pixel_size, (list, tuple)):\n raise ValueError(\n \"target_pixel_size is not a tuple, its value was '%s'\",\n repr(target_pixel_size))\n\n if (len(target_pixel_size) != 2 or\n not all([_is_number(x) for x in target_pixel_size])):\n raise ValueError(\n \"Invalid value for `target_pixel_size`, expected two numerical \"\n \"elements, got: %s\", repr(target_pixel_size))\n return True\n\n\ndef shapely_geometry_to_vector(\n shapely_geometry_list, target_vector_path, projection_wkt,\n vector_format, fields=None, attribute_list=None,\n ogr_geom_type=ogr.wkbPolygon):\n \"\"\"Convert list of geometry to vector on disk.\n\n Args:\n shapely_geometry_list (list): a list of Shapely objects.\n target_vector_path (str): path to target vector.\n projection_wkt (str): WKT for target vector.\n vector_format (str): GDAL driver name for target vector.\n fields (dict): a python dictionary mapping string fieldname\n to OGR Fieldtypes, if None no fields are added\n attribute_list (list of dicts): a list of python dictionary mapping\n fieldname to field value for each geometry in\n `shapely_geometry_list`, if None, no attributes are created.\n ogr_geom_type (ogr geometry enumerated type): sets the target layer\n geometry type. Defaults to wkbPolygon.\n\n Return:\n None\n \"\"\"\n if fields is None:\n fields = {}\n\n if attribute_list is None:\n attribute_list = [{} for _ in range(len(shapely_geometry_list))]\n\n num_geoms = len(shapely_geometry_list)\n num_attrs = len(attribute_list)\n if num_geoms != num_attrs:\n raise ValueError(\n f\"Geometry count ({num_geoms}) and attribute count \"\n f\"({num_attrs}) do not match.\")\n\n vector_driver = ogr.GetDriverByName(vector_format)\n target_vector = vector_driver.CreateDataSource(target_vector_path)\n layer_name = os.path.basename(os.path.splitext(target_vector_path)[0])\n projection = osr.SpatialReference()\n projection.ImportFromWkt(projection_wkt)\n target_layer = target_vector.CreateLayer(\n layer_name, srs=projection, geom_type=ogr_geom_type)\n\n for field_name, field_type in fields.items():\n target_layer.CreateField(ogr.FieldDefn(field_name, field_type))\n layer_defn = target_layer.GetLayerDefn()\n\n for shapely_feature, fields in zip(shapely_geometry_list, attribute_list):\n new_feature = ogr.Feature(layer_defn)\n new_geometry = ogr.CreateGeometryFromWkb(shapely_feature.wkb)\n new_feature.SetGeometry(new_geometry)\n\n for field_name, field_value in fields.items():\n new_feature.SetField(field_name, field_value)\n target_layer.CreateFeature(new_feature)\n\n target_layer = None\n target_vector = None\n\n\ndef numpy_array_to_raster(\n base_array, target_nodata, pixel_size, origin, projection_wkt,\n target_path,\n raster_driver_creation_tuple=DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS):\n \"\"\"Create a single band raster of size ``base_array.shape``.\n\n Args:\n base_array (numpy.array): a 2d numpy array.\n target_nodata (numeric): nodata value of target array, can be None.\n pixel_size (tuple): square dimensions (in ``(x, y)``) of pixel.\n origin (tuple/list): x/y coordinate of the raster origin.\n projection_wkt (str): target projection in wkt.\n target_path (str): path to raster to create that will be of the\n same type of base_array with contents of base_array.\n raster_driver_creation_tuple (tuple): a tuple containing a GDAL driver\n name string as the first element and a GDAL creation options\n tuple/list as the second. Defaults to\n geoprocessing.DEFAULT_GTIFF_CREATION_TUPLE_OPTIONS.\n\n Return:\n None\n \"\"\"\n numpy_to_gdal_type = {\n numpy.dtype(bool): gdal.GDT_Byte,\n numpy.dtype(numpy.int8): gdal.GDT_Byte,\n numpy.dtype(numpy.uint8): gdal.GDT_Byte,\n numpy.dtype(numpy.int16): gdal.GDT_Int16,\n numpy.dtype(numpy.int32): gdal.GDT_Int32,\n numpy.dtype(numpy.uint16): gdal.GDT_UInt16,\n numpy.dtype(numpy.uint32): gdal.GDT_UInt32,\n numpy.dtype(numpy.float32): gdal.GDT_Float32,\n numpy.dtype(numpy.float64): gdal.GDT_Float64,\n numpy.dtype(numpy.csingle): gdal.GDT_CFloat32,\n numpy.dtype(numpy.complex64): gdal.GDT_CFloat64,\n }\n raster_driver = gdal.GetDriverByName(raster_driver_creation_tuple[0])\n ny, nx = base_array.shape\n new_raster = raster_driver.Create(\n target_path, nx, ny, 1, numpy_to_gdal_type[base_array.dtype],\n options=raster_driver_creation_tuple[1])\n if projection_wkt is not None:\n new_raster.SetProjection(projection_wkt)\n new_raster.SetGeoTransform(\n [origin[0], pixel_size[0], 0.0, origin[1], 0.0, pixel_size[1]])\n new_band = new_raster.GetRasterBand(1)\n if target_nodata is not None:\n new_band.SetNoDataValue(target_nodata)\n new_band.WriteArray(base_array)\n new_band = None\n new_raster = None\n\n\ndef raster_to_numpy_array(raster_path, band_id=1):\n \"\"\"Read the entire contents of the raster band to a numpy array.\n\n Args:\n raster_path (str): path to raster.\n band_id (int): band in the raster to read.\n\n Return:\n numpy array contents of `band_id` in raster.\n\n \"\"\"\n raster = gdal.OpenEx(raster_path, gdal.OF_RASTER)\n band = raster.GetRasterBand(band_id)\n array = band.ReadAsArray()\n band = None\n raster = None\n return array\n\n\ndef stitch_rasters(\n base_raster_path_band_list,\n resample_method_list,\n target_stitch_raster_path_band,\n overlap_algorithm='etch',\n area_weight_m2_to_wgs84=False,\n osr_axis_mapping_strategy=DEFAULT_OSR_AXIS_MAPPING_STRATEGY):\n \"\"\"Stitch the raster in the base list into the existing target.\n\n Args:\n base_raster_path_band_list (sequence): sequence of raster path/band\n tuples to stitch into target.\n resample_method_list (sequence): a sequence of resampling methods\n which one to one map each path in ``base_raster_path_band_list``\n during resizing. Each element must be one of\n \"near|bilinear|cubic|cubicspline|lanczos|mode\".\n target_stitch_raster_path_band (tuple): raster path/band tuple to an\n existing raster, values in ``base_raster_path_band_list`` will\n be stitched into this raster/band in the order they are in the\n list. The nodata value for the target band must be defined and\n will be written over with values from the base raster. Nodata\n values in the base rasters will not be written into the target.\n If the pixel size or projection are different between base and\n target the base is warped to the target's cell size and target\n with the interpolation method provided. If any part of the\n base raster lies outside of the target, that part of the base\n is ignored. A warning is logged if the entire base raster is\n outside of the target bounds.\n overlap_algorithm (str): this value indicates which algorithm to use\n when a raster is stitched on non-nodata values in the target\n stitch raster. It can be one of the following:\n 'etch': write a value to the target raster only if the target\n raster pixel is nodata. If the target pixel is non-nodata\n ignore any additional values to write on that pixel.\n 'replace': write a value to the target raster irrespective\n of the value of the target raster\n 'add': add the value to be written to the target raster to\n any existing value that is there. If the existing value\n is nodata, treat it as 0.0.\n area_weight_m2_to_wgs84 (bool): If ``True`` the stitched raster will\n be converted to a per-area value before reprojection to wgs84,\n then multiplied by the m^2 area per pixel in the wgs84 coordinate\n space. This is useful when the quantity being stitched is a total\n quantity per pixel rather than a per unit area density. Note\n this assumes input rasters are in a projected space of meters,\n if they are not the stitched output will be nonsensical.\n osr_axis_mapping_strategy (int): OSR axis mapping strategy for\n ``SpatialReference`` objects. Defaults to\n ``geoprocessing.DEFAULT_OSR_AXIS_MAPPING_STRATEGY``. This\n parameter should not be changed unless you know what you are\n doing.\n\n Return:\n None.\n \"\"\"\n valid_overlap_algorithms = ['etch', 'replace', 'add']\n if overlap_algorithm not in valid_overlap_algorithms:\n raise ValueError(\n f'overlap algorithm {overlap_algorithm} is not one of '\n f'{valid_overlap_algorithms}')\n\n if not _is_raster_path_band_formatted(target_stitch_raster_path_band):\n raise ValueError(\n f'Expected raster path/band tuple for '\n f'target_stitch_raster_path_band but got '\n f'\"{target_stitch_raster_path_band}\"')\n\n if len(base_raster_path_band_list) != len(resample_method_list):\n raise ValueError(\n f'Expected same number of elements in '\n f'`base_raster_path_band_list` as `resample_method_list` but '\n f'got {len(base_raster_path_band_list)} != '\n f'{len(resample_method_list)} respectively')\n\n if not os.path.exists(target_stitch_raster_path_band[0]):\n raise ValueError(\n f'Target stitch raster does not exist: '\n f'\"{target_stitch_raster_path_band[0]}\"')\n gis_type = get_gis_type(target_stitch_raster_path_band[0])\n from pygeoprocessing import RASTER_TYPE\n if gis_type != RASTER_TYPE:\n raise ValueError(\n f'Target stitch raster is not a raster. '\n f'Location: \"{target_stitch_raster_path_band[0]}\" '\n f'GIS type: {gis_type}')\n target_raster_info = get_raster_info(target_stitch_raster_path_band[0])\n if target_stitch_raster_path_band[1] > len(target_raster_info['nodata']):\n raise ValueError(\n f'target_stitch_raster_path_band refers to a band that exceeds '\n f'the number of bands in the raster:\\n'\n f'target_stitch_raster_path_band[1]: '\n f'{target_stitch_raster_path_band[1]} '\n f'n bands: {len(target_raster_info[\"nodata\"])}')\n\n target_nodata = target_raster_info['nodata'][\n target_stitch_raster_path_band[1]-1]\n if target_nodata is None:\n raise ValueError(\n f'target stitch raster at \"{target_stitch_raster_path_band[0]} \"'\n f'nodata value is `None`, expected non-`None` value')\n\n target_raster = gdal.OpenEx(\n target_stitch_raster_path_band[0], gdal.OF_RASTER | gdal.GA_Update)\n target_band = target_raster.GetRasterBand(\n target_stitch_raster_path_band[1])\n target_inv_gt = gdal.InvGeoTransform(target_raster_info['geotransform'])\n target_raster_x_size, target_raster_y_size = target_raster_info[\n 'raster_size']\n for (raster_path, raster_band_id), resample_method in zip(\n base_raster_path_band_list, resample_method_list):\n LOGGER.info(\n f'stitching {(raster_path, raster_band_id)} into '\n f'{target_stitch_raster_path_band}')\n raster_info = get_raster_info(raster_path)\n\n projected_raster_bounding_box = transform_bounding_box(\n raster_info['bounding_box'],\n raster_info['projection_wkt'],\n target_raster_info['projection_wkt'])\n\n try:\n # merge the bounding boxes only to see if they don't intersect\n _ = merge_bounding_box_list(\n [projected_raster_bounding_box,\n target_raster_info['bounding_box']], 'intersection')\n except ValueError:\n LOGGER.warning(\n f'the raster at \"{raster_path}\"\" does not intersect the '\n f'stitch raster at \"{target_stitch_raster_path_band[0]}\", '\n f'skipping...')\n continue\n\n # use this to determine if we need to warp and delete if we did at\n # the end\n if (raster_info['projection_wkt'] ==\n target_raster_info['projection_wkt'] and\n raster_info['pixel_size'] ==\n target_raster_info['pixel_size']):\n warped_raster = False\n base_stitch_raster_path = raster_path\n else:\n workspace_dir = tempfile.mkdtemp(\n dir=os.path.dirname(target_stitch_raster_path_band[0]),\n prefix='stitch_rasters_workspace')\n base_stitch_raster_path = os.path.join(\n workspace_dir, os.path.basename(raster_path))\n warp_raster(\n raster_path, target_raster_info['pixel_size'],\n base_stitch_raster_path, resample_method,\n target_projection_wkt=target_raster_info['projection_wkt'],\n working_dir=workspace_dir,\n osr_axis_mapping_strategy=osr_axis_mapping_strategy)\n warped_raster = True\n\n if warped_raster and area_weight_m2_to_wgs84:\n # determine base area per pixel currently and area per pixel\n # once it is projected to wgs84 pixel sizes\n base_pixel_area_m2 = abs(numpy.prod(raster_info['pixel_size']))\n base_stitch_raster_info = get_raster_info(\n base_stitch_raster_path)\n _, lat_min, _, lat_max = base_stitch_raster_info['bounding_box']\n n_rows = base_stitch_raster_info['raster_size'][1]\n # this column is a longitude invariant latitude variant pixel\n # area for scaling area dependent values\n m2_area_per_lat = _create_latitude_m2_area_column(\n lat_min, lat_max, n_rows)\n\n def _mult_op(base_array, base_nodata, scale, datatype):\n \"\"\"Scale non-nodata by scale.\"\"\"\n result = base_array.astype(datatype)\n if base_nodata is not None:\n valid_mask = ~numpy.isclose(base_array, base_nodata)\n else:\n valid_mask = numpy.ones(\n base_array.shape, dtype=bool)\n result[valid_mask] = result[valid_mask] * scale[valid_mask]\n return result\n\n base_stitch_nodata = base_stitch_raster_info['nodata'][0]\n scaled_raster_path = os.path.join(\n workspace_dir,\n f'scaled_{os.path.basename(base_stitch_raster_path)}')\n # multiply the pixels in the resampled raster by the ratio of\n # the pixel area in the wgs84 units divided by the area of the\n # original pixel\n raster_calculator(\n [(base_stitch_raster_path, 1), (base_stitch_nodata, 'raw'),\n m2_area_per_lat/base_pixel_area_m2,\n (_GDAL_TYPE_TO_NUMPY_LOOKUP[\n target_raster_info['datatype']], 'raw')], _mult_op,\n scaled_raster_path,\n target_raster_info['datatype'], base_stitch_nodata)\n\n # swap the result to base stitch so the rest of the function\n # operates on the area scaled raster\n os.remove(base_stitch_raster_path)\n base_stitch_raster_path = scaled_raster_path\n\n base_raster = gdal.OpenEx(base_stitch_raster_path, gdal.OF_RASTER)\n base_gt = base_raster.GetGeoTransform()\n base_band = base_raster.GetRasterBand(raster_band_id)\n base_nodata = base_band.GetNoDataValue()\n # Get the target upper left xoff/yoff w/r/t the stitch raster 0,0\n # coordinates\n target_to_base_xoff, target_to_base_yoff = [\n int(_) for _ in gdal.ApplyGeoTransform(\n target_inv_gt, *gdal.ApplyGeoTransform(base_gt, 0, 0))]\n for offset_dict in iterblocks(\n (base_stitch_raster_path, raster_band_id), offset_only=True):\n _offset_vars = {}\n overlap = True\n for (target_to_base_off, off_val,\n target_off_id, off_clip_id, win_size_id, raster_size) in [\n (target_to_base_xoff, offset_dict['xoff'],\n 'target_xoff', 'xoff_clip', 'win_xsize',\n target_raster_x_size),\n (target_to_base_yoff, offset_dict['yoff'],\n 'target_yoff', 'yoff_clip', 'win_ysize',\n target_raster_y_size)]:\n _offset_vars[target_off_id] = (target_to_base_off+off_val)\n if _offset_vars[target_off_id] >= raster_size:\n overlap = False\n break\n # how far to move right to get in the target raster\n _offset_vars[off_clip_id] = 0\n _offset_vars[win_size_id] = offset_dict[win_size_id]\n if _offset_vars[target_off_id] < 0:\n # if negative, move the offset so it's in range of the\n # stitch raster and make the window smaller\n _offset_vars[off_clip_id] = -_offset_vars[target_off_id]\n _offset_vars[win_size_id] += _offset_vars[target_off_id]\n if _offset_vars[off_clip_id] >= _offset_vars[win_size_id] or (\n _offset_vars[win_size_id] < 0):\n # its too far left/right for the whole window\n overlap = False\n break\n # make the _offset_vars[win_size_id] smaller if it shifts\n # off the target window\n if (_offset_vars[off_clip_id] + _offset_vars[target_off_id] +\n _offset_vars[win_size_id] >= raster_size):\n _offset_vars[win_size_id] -= (\n _offset_vars[off_clip_id] +\n _offset_vars[target_off_id] +\n _offset_vars[win_size_id] - raster_size)\n\n # deal with the case where the base_stitch_raster_path is\n # outside of the bounds of the\n if not overlap:\n continue\n\n target_array = target_band.ReadAsArray(\n xoff=_offset_vars['target_xoff']+_offset_vars['xoff_clip'],\n yoff=_offset_vars['target_yoff']+_offset_vars['yoff_clip'],\n win_xsize=_offset_vars['win_xsize'],\n win_ysize=_offset_vars['win_ysize'])\n target_nodata_mask = numpy.isclose(target_array, target_nodata)\n base_array = base_band.ReadAsArray(\n xoff=offset_dict['xoff']+_offset_vars['xoff_clip'],\n yoff=offset_dict['yoff']+_offset_vars['yoff_clip'],\n win_xsize=_offset_vars['win_xsize'],\n win_ysize=_offset_vars['win_ysize'])\n\n if base_nodata is not None:\n base_nodata_mask = numpy.isclose(base_array, base_nodata)\n else:\n base_nodata_mask = numpy.zeros(\n base_array.shape, dtype=bool)\n\n if overlap_algorithm == 'etch':\n # place values only where target is nodata\n valid_mask = ~base_nodata_mask & target_nodata_mask\n target_array[valid_mask] = base_array[valid_mask]\n elif overlap_algorithm == 'replace':\n # write valid values into the target -- disregard any\n # existing values in the target\n valid_mask = ~base_nodata_mask\n target_array[valid_mask] = base_array[valid_mask]\n elif overlap_algorithm == 'add':\n # add values to the target and treat target nodata as 0.\n valid_mask = ~base_nodata_mask\n masked_target_array = target_array[valid_mask]\n target_array_nodata_mask = numpy.isclose(\n masked_target_array, target_nodata)\n target_array[valid_mask] = (\n base_array[valid_mask] +\n numpy.where(\n target_array_nodata_mask, 0, masked_target_array))\n else:\n raise RuntimeError(\n f'overlap_algorithm {overlap_algorithm} was not defined '\n f'but also not detected earlier -- this should never '\n f'happen')\n\n target_band.WriteArray(\n target_array,\n xoff=_offset_vars['target_xoff']+_offset_vars['xoff_clip'],\n yoff=_offset_vars['target_yoff']+_offset_vars['yoff_clip'])\n\n base_raster = None\n base_band = None\n if warped_raster:\n shutil.rmtree(workspace_dir)\n\n target_raster = None\n target_band = None\n\n\ndef get_utm_zone(lng, lat):\n \"\"\"Given lng/lat coordinates return EPSG code of UTM zone.\n\n Note this only correctly calculates the main longitudnnal UTM zones and\n will incorrectly calcualte the UTM zones for the corner cases in\n very Northern Europe and Russia.\n\n Args:\n lng/lat (float): longitude and latitude in degrees.\n\n Returns:\n epsg code for the primary utm zone containing the point (lng/lat)\n \"\"\"\n utm_code = (math.floor((lng + 180)/6) % 60) + 1\n lat_code = 6 if lat > 0 else 7\n epsg_code = int('32%d%02d' % (lat_code, utm_code))\n return epsg_code\n\n\ndef _m2_area_of_wg84_pixel(pixel_size, center_lat):\n \"\"\"Calculate m^2 area of a square wgs84 pixel.\n\n Adapted from: https://gis.stackexchange.com/a/127327/2397\n\n Args:\n pixel_size (float): length of side of a square pixel in degrees.\n center_lat (float): latitude of the center of the pixel. Note this\n value +/- half the `pixel-size` must not exceed 90/-90 degrees\n latitude or an invalid area will be calculated.\n\n Returns:\n Area of square pixel of side length `pixel_size` centered at\n `center_lat` in m^2.\n\n \"\"\"\n a = 6378137 # meters\n b = 6356752.3142 # meters\n e = math.sqrt(1 - (b/a)**2)\n area_list = []\n for f in [center_lat+pixel_size/2, center_lat-pixel_size/2]:\n zm = 1 - e*math.sin(math.radians(f))\n zp = 1 + e*math.sin(math.radians(f))\n area_list.append(\n math.pi * b**2 * (\n math.log(zp/zm) / (2*e) +\n math.sin(math.radians(f)) / (zp*zm)))\n return abs(pixel_size / 360. * (area_list[0] - area_list[1]))\n\n\ndef _create_latitude_m2_area_column(lat_min, lat_max, n_pixels):\n \"\"\"Create a (n, 1) sized numpy array with m^2 areas in each element.\n\n Creates a per pixel m^2 area array that varies with changes in latitude.\n This array can be used to scale values by area when converting to or\n from a WGS84 projection to a projected one.\n\n Args:\n lat_max (float): maximum latitude in the bound\n lat_min (float): minimum latitude in the bound\n n_pixels (int): number of pixels to create for the column. The\n size of the target square pixels are (lat_max-lat_min)/n_pixels\n degrees per side.\n\n Return:\n A (n, 1) sized numpy array whose elements are the m^2 areas in each\n element estimated by the latitude value at the center of each pixel.\n \"\"\"\n pixel_size = (lat_max - lat_min) / n_pixels\n center_lat_array = numpy.linspace(\n lat_min+pixel_size/2, lat_max-pixel_size/2, n_pixels)\n area_array = numpy.array([\n _m2_area_of_wg84_pixel(pixel_size, lat)\n for lat in reversed(center_lat_array)]).reshape((n_pixels, 1))\n return area_array\n"
] |
[
[
"numpy.linspace",
"numpy.in1d",
"numpy.ndarray",
"numpy.dtype",
"numpy.max",
"numpy.broadcast",
"numpy.where",
"numpy.unique",
"numpy.finfo",
"numpy.ceil",
"numpy.copy",
"numpy.count_nonzero",
"numpy.fft.irfftn",
"numpy.zeros",
"numpy.isclose",
"numpy.min",
"numpy.array",
"numpy.fft.rfftn",
"numpy.sum",
"numpy.ones",
"numpy.prod",
"numpy.empty"
]
] |
micheleantonazzi/gibson-dataset
|
[
"cb5fc81061bbda1a653d6fc7b625b14c8a517f3c"
] |
[
"tests/test_sample_generator.py"
] |
[
"import numpy as np\nimport pytest\n\nfrom generic_dataset.data_pipeline import DataPipeline\nfrom generic_dataset.generic_sample import GenericSample, AnotherActivePipelineException, FieldHasIncorrectTypeException\nfrom generic_dataset.sample_generator import SampleGenerator, FieldNameAlreadyExistsException, \\\n FieldDoesNotExistException, MethodAlreadyExistsException, synchronize_on_fields\n\n\ndef test_generate_sample_class():\n # Classification problem\n generator = SampleGenerator(name='Sample', label_set={-1, 1})\n GeneratedClass = generator.generate_sample_class()\n\n assert isinstance(GeneratedClass, type)\n assert isinstance(GeneratedClass(label=1), GenericSample)\n assert GeneratedClass.GET_LABEL_SET()\n assert GeneratedClass(label=1).get_label() == 1\n with pytest.raises(FieldHasIncorrectTypeException):\n GeneratedClass(label=1.1)\n\n # Regression problem\n generator = SampleGenerator(name='Sample', label_set=set())\n GeneratedClass = generator.generate_sample_class()\n assert isinstance(GeneratedClass, type)\n assert isinstance(GeneratedClass(label=1.1), GenericSample)\n assert not GeneratedClass.GET_LABEL_SET()\n assert GeneratedClass(label=1.0).get_label() == 1.0\n with pytest.raises(FieldHasIncorrectTypeException):\n GeneratedClass(label=1)\n\n\ndef test_label_set():\n GeneratedSampleRegression = SampleGenerator(name='Sample', label_set=set()).generate_sample_class()\n\n assert GeneratedSampleRegression().get_label() == 0.0\n\n sample = GeneratedSampleRegression(label=1.11)\n assert sample.get_label() == 1.11\n\n sample.set_label(value=2.22)\n assert 2.22 == sample.get_label()\n assert not GeneratedSampleRegression.GET_LABEL_SET()\n\n GeneratedSampleClassification = SampleGenerator(name='Sample', label_set={-1, 1}).generate_sample_class()\n\n assert GeneratedSampleClassification().get_label() == -1\n sample = GeneratedSampleClassification(label=int(-1))\n assert sample.get_label() == -1\n\n sample.set_label(value=1)\n assert 1 == sample.get_label()\n\n assert GeneratedSampleClassification.GET_LABEL_SET() == {-1, 1}\n\n\ndef test_add_field():\n generator = SampleGenerator(name='Sample', label_set=set())\n generator.add_field('a', np.ndarray)\n\n with pytest.raises(FieldNameAlreadyExistsException):\n generator.add_field('a', int)\n\n with pytest.raises(FieldNameAlreadyExistsException):\n generator.add_field('label', int)\n\n\ndef test_fields_setter_getter():\n generator = SampleGenerator('Sample', label_set=set()).add_field(field_name='field', field_type=np.ndarray).add_dataset_field('field2', str, lambda d:d, lambda d:d)\n GeneratedClass = generator.generate_sample_class()\n\n generated = GeneratedClass(label=.0)\n\n with pytest.raises(FieldHasIncorrectTypeException):\n generated.set_field('y')\n\n generated.set_field(np.array([0]))\n generated.set_field2('Hi')\n\n assert generated.get_field2() == 'Hi'\n\n with pytest.raises(FieldNameAlreadyExistsException):\n SampleGenerator('S', label_set=set()).add_field(field_name='f', field_type=str).add_field(field_name='f', field_type=int)\n\n with pytest.raises(FieldNameAlreadyExistsException):\n SampleGenerator('S', label_set=set()).add_dataset_field(field_name='f', field_type=str, save_function=lambda d:d, load_function=lambda d:d).add_field(field_name='f', field_type=int)\n\n with pytest.raises(FieldNameAlreadyExistsException):\n SampleGenerator('S', label_set=set()).add_dataset_field(field_name='f', field_type=str, save_function=lambda d:d, load_function=lambda d:d)\\\n .add_dataset_field(field_name='f', field_type=str, save_function=lambda d:d, load_function=lambda d:d)\n\n\ndef test_pipeline_methods():\n GeneratedClass = SampleGenerator('Sample', label_set={1}).add_dataset_field(field_name='field', field_type=np.ndarray, save_function=lambda d:d, load_function=lambda d:d)\\\n .add_field('field2', np.ndarray).add_field(\n 'field3', int).generate_sample_class()\n\n generated = GeneratedClass(label=1).set_field(np.array([2])).set_field2(np.array([1])).set_field3(1)\n with pytest.raises(AttributeError):\n generated.create_pipeline_for_field3()\n\n with pytest.raises(AttributeError):\n generated.get_pipeline_field3()\n\n pipeline_field = generated.create_pipeline_for_field()\n\n assert pipeline_field == generated.get_pipeline_field()\n assert pipeline_field != generated.create_pipeline_for_field2()\n\n with pytest.raises(AnotherActivePipelineException):\n generated.create_pipeline_for_field()\n\n res = pipeline_field.run(False).get_data()\n assert pipeline_field != generated.create_pipeline_for_field()\n\n\ndef test_custom_pipeline(use_gpu: bool = False):\n with pytest.raises(MethodAlreadyExistsException):\n SampleGenerator('Sample', label_set=set()).add_field(field_name='field', field_type=np.ndarray).add_field('field2', np.ndarray) \\\n .add_custom_pipeline('m', elaborated_field='field', final_field='field2', pipeline=DataPipeline()) \\\n .add_custom_pipeline('m', elaborated_field='field', final_field='field2', pipeline=DataPipeline())\n\n with pytest.raises(FieldDoesNotExistException):\n SampleGenerator('Sample', label_set=set()).add_field(field_name='field', field_type=np.ndarray).add_field('field2', np.ndarray) \\\n .add_custom_pipeline('m', elaborated_field='f', final_field='field2', pipeline=DataPipeline())\n\n with pytest.raises(FieldDoesNotExistException):\n SampleGenerator('Sample', label_set=set()).add_field(field_name='field', field_type=np.ndarray).add_field('field2', np.ndarray) \\\n .add_custom_pipeline('m', elaborated_field='field', final_field='field22', pipeline=DataPipeline())\n\n with pytest.raises(FieldHasIncorrectTypeException):\n SampleGenerator('Sample', label_set=set()).add_field(field_name='field', field_type=np.ndarray).add_field('field2', int) \\\n .add_custom_pipeline('m', elaborated_field='field', final_field='field2', pipeline=DataPipeline())\n\n with pytest.raises(FieldHasIncorrectTypeException):\n SampleGenerator('Sample', label_set=set()).add_field(field_name='field', field_type=int).add_field('field2', np.ndarray) \\\n .add_custom_pipeline('m', elaborated_field='field', final_field='field2', pipeline=DataPipeline())\n\n GeneratedClass = SampleGenerator('Sample', label_set=set()).add_field(field_name='field', field_type=np.ndarray).add_field('field2', np.ndarray) \\\n .add_custom_pipeline('m', elaborated_field='field', final_field='field2', pipeline=DataPipeline().add_operation(\n operation=lambda data, engine: (engine.asarray([2]), engine))) \\\n .generate_sample_class()\n\n generated = GeneratedClass(label=.0).set_field(np.array([1, 1])).set_field2(np.array([]))\n pipeline = generated.m()\n\n with pytest.raises(AnotherActivePipelineException):\n generated.m()\n\n with pytest.raises(AnotherActivePipelineException):\n generated.get_field2()\n with pytest.raises(AnotherActivePipelineException):\n generated.get_field()\n\n with pytest.raises(AnotherActivePipelineException):\n generated.get_field2()\n\n res = pipeline.run(use_gpu).get_data()\n\n assert np.array_equal(generated.get_field(), np.array([1, 1]))\n assert np.array_equal(generated.get_field2(), np.array([2]))\n assert np.array_equal(res, generated.get_field2())\n\n\ndef test_custom_method():\n with pytest.raises(MethodAlreadyExistsException):\n SampleGenerator('Sample', label_set={0, 1}).add_custom_method(method_name='m', function=lambda d:d).add_custom_method(method_name='m', function=lambda m:m)\n\n @synchronize_on_fields({'field', 'field2'}, check_pipeline=True)\n def f(sample: GenericSample, i: int) -> int:\n sample.set_label(i + 1)\n return i + 1\n\n GeneratedClass = SampleGenerator('Sample', label_set={0, 1}).add_field(field_name='field', field_type=np.ndarray).add_field('field2', np.ndarray) \\\n .add_custom_method(method_name='custom_method', function=f).generate_sample_class()\n\n generated = GeneratedClass(label=0).set_field(np.array([1])).set_field2(np.array([]))\n assert generated.custom_method(0) == 1\n assert generated.get_label() == 1\n\n generated.create_pipeline_for_field()\n\n with pytest.raises(AnotherActivePipelineException):\n generated.custom_method(1)\n\n generated.get_pipeline_field().run(False).get_data()\n\n assert generated.custom_method(2) == 3\n\n generated.set_label(0)\n generated.custom_method(2)\n\n assert generated.get_label() == 3\n\n\ndef test_acquire_all_locks():\n GeneratedClass = SampleGenerator('Sample', label_set=set()).add_field(field_name='field', field_type=np.ndarray).add_field('field2', np.ndarray) \\\n .add_custom_pipeline('m', elaborated_field='field', final_field='field2', pipeline=DataPipeline().add_operation(\n operation=lambda data, engine: (engine.asarray([2]), engine))) \\\n .generate_sample_class()\n\n generated = GeneratedClass(label=1.0)\n generated.acquire_all_locks()\n generated.release_all_locks()\n\n with generated as gen_acquired_all_locks:\n gen_acquired_all_locks.create_pipeline_for_field()\n"
] |
[
[
"numpy.array"
]
] |
calofmijuck/pytorch-bert-fine-tuning
|
[
"9ef6454352ec4d25e1f33ce62b26f579dc5a406d"
] |
[
"pytorch_pretrained_bert/modeling.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch BERT model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport copy\nimport json\nimport math\nimport logging\nimport tarfile\nimport tempfile\nimport shutil\n\nimport torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom .file_utils import cached_path\n\nlogger = logging.getLogger(__name__)\n\n## Changed the directories due to proxy problems\nPRETRAINED_MODEL_ARCHIVE_MAP = {\n 'bert-base-uncased': \"/home/sung.yi/.pytorch_pretrained_bert/bert-base-uncased.tar.gz\",\n 'bert-large-uncased': \"/home/sung.yi/.pytorch_pretrained_bert/bert-large-uncased.tar.gz\",\n 'bert-base-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz\",\n 'bert-large-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz\",\n 'bert-base-multilingual-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz\",\n 'bert-base-multilingual-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz\",\n 'bert-base-chinese': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz\",\n}\nCONFIG_NAME = 'bert_config.json'\nWEIGHTS_NAME = 'pytorch_model.bin'\n\ndef gelu(x):\n \"\"\"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\n\ndef swish(x):\n return x * torch.sigmoid(x)\n\n\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish}\n\n\nclass BertConfig(object):\n \"\"\"Configuration class to store the configuration of a `BertModel`.\n \"\"\"\n def __init__(self,\n vocab_size_or_config_json_file,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler. If string, \"gelu\", \"relu\" and \"swish\" are supported.\n hidden_dropout_prob: The dropout probabilitiy for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The sttdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n if isinstance(vocab_size_or_config_json_file, str):\n with open(vocab_size_or_config_json_file, \"r\", encoding='utf-8') as reader:\n json_config = json.loads(reader.read())\n for key, value in json_config.items():\n self.__dict__[key] = value\n elif isinstance(vocab_size_or_config_json_file, int):\n self.vocab_size = vocab_size_or_config_json_file\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n else:\n raise ValueError(\"First argument must be either a vocabulary size (int)\"\n \"or the path to a pretrained model config file (str)\")\n\n @classmethod\n def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size_or_config_json_file=-1)\n for key, value in json_object.items():\n config.__dict__[key] = value\n return config\n\n @classmethod\n def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\ntry:\n from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm\nexcept ImportError:\n print(\"Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.\")\n class BertLayerNorm(nn.Module):\n def __init__(self, hidden_size, eps=1e-12):\n \"\"\"Construct a layernorm module in the TF style (epsilon inside the square root).\n \"\"\"\n super(BertLayerNorm, self).__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.bias = nn.Parameter(torch.zeros(hidden_size))\n self.variance_epsilon = eps\n\n def forward(self, x):\n u = x.mean(-1, keepdim=True)\n s = (x - u).pow(2).mean(-1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.variance_epsilon)\n return self.weight * x + self.bias\n\nclass BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n def __init__(self, config):\n super(BertEmbeddings, self).__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids, token_type_ids=None):\n seq_length = input_ids.size(1)\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n words_embeddings = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = words_embeddings + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass BertSelfAttention(nn.Module):\n def __init__(self, config):\n super(BertSelfAttention, self).__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(self, hidden_states, attention_mask):\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n context_layer = torch.matmul(attention_probs, value_layer)\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n return context_layer\n\n\nclass BertSelfOutput(nn.Module):\n def __init__(self, config):\n super(BertSelfOutput, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertAttention(nn.Module):\n def __init__(self, config):\n super(BertAttention, self).__init__()\n self.self = BertSelfAttention(config)\n self.output = BertSelfOutput(config)\n\n def forward(self, input_tensor, attention_mask):\n self_output = self.self(input_tensor, attention_mask)\n attention_output = self.output(self_output, input_tensor)\n return attention_output\n\n\nclass BertIntermediate(nn.Module):\n def __init__(self, config):\n super(BertIntermediate, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n self.intermediate_act_fn = ACT2FN[config.hidden_act] \\\n if isinstance(config.hidden_act, str) else config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass BertOutput(nn.Module):\n def __init__(self, config):\n super(BertOutput, self).__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertLayer(nn.Module):\n def __init__(self, config):\n super(BertLayer, self).__init__()\n self.attention = BertAttention(config)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(self, hidden_states, attention_mask):\n attention_output = self.attention(hidden_states, attention_mask)\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\nclass BertEncoder(nn.Module):\n def __init__(self, config):\n super(BertEncoder, self).__init__()\n layer = BertLayer(config)\n self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])\n\n def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):\n all_encoder_layers = []\n for layer_module in self.layer:\n hidden_states = layer_module(hidden_states, attention_mask)\n if output_all_encoded_layers:\n all_encoder_layers.append(hidden_states)\n if not output_all_encoded_layers:\n all_encoder_layers.append(hidden_states)\n return all_encoder_layers\n\n\nclass BertPooler(nn.Module):\n def __init__(self, config):\n super(BertPooler, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass BertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super(BertPredictionHeadTransform, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.transform_act_fn = ACT2FN[config.hidden_act] \\\n if isinstance(config.hidden_act, str) else config.hidden_act\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\nclass BertLMPredictionHead(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super(BertLMPredictionHead, self).__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(bert_model_embedding_weights.size(1),\n bert_model_embedding_weights.size(0),\n bias=False)\n self.decoder.weight = bert_model_embedding_weights\n self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states) + self.bias\n return hidden_states\n\n\nclass BertOnlyMLMHead(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super(BertOnlyMLMHead, self).__init__()\n self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\nclass BertOnlyNSPHead(nn.Module):\n def __init__(self, config):\n super(BertOnlyNSPHead, self).__init__()\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, pooled_output):\n seq_relationship_score = self.seq_relationship(pooled_output)\n return seq_relationship_score\n\n\nclass BertPreTrainingHeads(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super(BertPreTrainingHeads, self).__init__()\n self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass PreTrainedBertModel(nn.Module):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for dowloading and loading pretrained models.\n \"\"\"\n def __init__(self, config, *inputs, **kwargs):\n super(PreTrainedBertModel, self).__init__()\n if not isinstance(config, BertConfig):\n raise ValueError(\n \"Parameter config in `{}(config)` should be an instance of class `BertConfig`. \"\n \"To create a model from a Google pretrained model use \"\n \"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`\".format(\n self.__class__.__name__, self.__class__.__name__\n ))\n self.config = config\n\n def init_bert_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name, state_dict=None, cache_dir=None, *inputs, **kwargs):\n \"\"\"\n Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.\n Download and cache the pre-trained model file if needed.\n\n Params:\n pretrained_model_name: either:\n - a str with the name of a pre-trained model to load selected in the list of:\n . `bert-base-uncased`\n . `bert-large-uncased`\n . `bert-base-cased`\n . `bert-large-cased`\n . `bert-base-multilingual-uncased`\n . `bert-base-multilingual-cased`\n . `bert-base-chinese`\n - a path or url to a pretrained model archive containing:\n . `bert_config.json` a configuration file for the model\n . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance\n cache_dir: an optional path to a folder in which the pre-trained models will be cached.\n state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models\n *inputs, **kwargs: additional input for the specific Bert class\n (ex: num_labels for BertForSequenceClassification)\n \"\"\"\n if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP:\n archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name]\n else:\n archive_file = pretrained_model_name\n # redirect to the cache, if necessary\n try:\n resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)\n except FileNotFoundError:\n logger.error(\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url but couldn't find any file \"\n \"associated to this path or url.\".format(\n pretrained_model_name,\n ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),\n archive_file))\n return None\n if resolved_archive_file == archive_file:\n logger.info(\"loading archive file {}\".format(archive_file))\n else:\n logger.info(\"loading archive file {} from cache at {}\".format(\n archive_file, resolved_archive_file))\n tempdir = None\n if os.path.isdir(resolved_archive_file):\n serialization_dir = resolved_archive_file\n else:\n # Extract archive to temp dir\n tempdir = tempfile.mkdtemp()\n logger.info(\"extracting archive file {} to temp dir {}\".format(\n resolved_archive_file, tempdir))\n with tarfile.open(resolved_archive_file, 'r:gz') as archive:\n archive.extractall(tempdir)\n serialization_dir = tempdir\n # Load config\n config_file = os.path.join(serialization_dir, CONFIG_NAME)\n config = BertConfig.from_json_file(config_file)\n logger.info(\"Model config {}\".format(config))\n # Instantiate model.\n model = cls(config, *inputs, **kwargs)\n if state_dict is None:\n weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)\n state_dict = torch.load(weights_path)\n\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if 'gamma' in key:\n new_key = key.replace('gamma', 'weight')\n if 'beta' in key:\n new_key = key.replace('beta', 'bias')\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, '_metadata', None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=''):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + '.')\n load(model, prefix='' if hasattr(model, 'bert') else 'bert.')\n if len(missing_keys) > 0:\n logger.info(\"Weights of {} not initialized from pretrained model: {}\".format(\n model.__class__.__name__, missing_keys))\n if len(unexpected_keys) > 0:\n logger.info(\"Weights from pretrained model not used in {}: {}\".format(\n model.__class__.__name__, unexpected_keys))\n if tempdir:\n # Clean up temp dir\n shutil.rmtree(tempdir)\n return model\n\n\nclass BertModel(PreTrainedBertModel):\n \"\"\"BERT model (\"Bidirectional Embedding Representations from a Transformer\").\n\n Params:\n config: a BertConfig class instance with the configuration to build a new model\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.\n\n Outputs: Tuple of (encoded_layers, pooled_output)\n `encoded_layers`: controled by `output_all_encoded_layers` argument:\n - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end\n of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each\n encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],\n - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding\n to the last attention block of shape [batch_size, sequence_length, hidden_size],\n `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a\n classifier pretrained on top of the hidden state associated to the first character of the\n input (`CLF`) to train on the Next-Sentence task (see BERT's paper).\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = modeling.BertModel(config=config)\n all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertModel, self).__init__(config)\n self.embeddings = BertEmbeddings(config)\n self.encoder = BertEncoder(config)\n self.pooler = BertPooler(config)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):\n if attention_mask is None:\n attention_mask = torch.ones_like(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n embedding_output = self.embeddings(input_ids, token_type_ids)\n encoded_layers = self.encoder(embedding_output,\n extended_attention_mask,\n output_all_encoded_layers=output_all_encoded_layers)\n sequence_output = encoded_layers[-1]\n pooled_output = self.pooler(sequence_output)\n if not output_all_encoded_layers:\n encoded_layers = encoded_layers[-1]\n return encoded_layers, pooled_output\n\n\nclass BertForPreTraining(PreTrainedBertModel):\n \"\"\"BERT model with pre-training heads.\n This module comprises the BERT model followed by the two pre-training heads:\n - the masked language modeling head, and\n - the next sentence classification head.\n\n Params:\n config: a BertConfig class instance with the configuration to build a new model.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]\n with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss\n is only computed for the labels set in [0, ..., vocab_size]\n `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]\n with indices selected in [0, 1].\n 0 => next sentence is the continuation, 1 => next sentence is a random sentence.\n\n Outputs:\n if `masked_lm_labels` and `next_sentence_label` are not `None`:\n Outputs the total_loss which is the sum of the masked language modeling loss and the next\n sentence classification loss.\n if `masked_lm_labels` or `next_sentence_label` is `None`:\n Outputs a tuple comprising\n - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and\n - the next sentence classification logits of shape [batch_size, 2].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForPreTraining(config)\n masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertForPreTraining, self).__init__(config)\n self.bert = BertModel(config)\n self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None):\n sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,\n output_all_encoded_layers=False)\n prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)\n\n if masked_lm_labels is not None and next_sentence_label is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n total_loss = masked_lm_loss + next_sentence_loss\n return total_loss\n else:\n return prediction_scores, seq_relationship_score\n\n\nclass BertForMaskedLM(PreTrainedBertModel):\n \"\"\"BERT model with the masked language modeling head.\n This module comprises the BERT model followed by the masked language modeling head.\n\n Params:\n config: a BertConfig class instance with the configuration to build a new model.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]\n with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss\n is only computed for the labels set in [0, ..., vocab_size]\n\n Outputs:\n if `masked_lm_labels` is not `None`:\n Outputs the masked language modeling loss.\n if `masked_lm_labels` is `None`:\n Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForMaskedLM(config)\n masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertForMaskedLM, self).__init__(config)\n self.bert = BertModel(config)\n self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):\n sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask,\n output_all_encoded_layers=False)\n prediction_scores = self.cls(sequence_output)\n\n if masked_lm_labels is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))\n return masked_lm_loss\n else:\n return prediction_scores\n\n\nclass BertForNextSentencePrediction(PreTrainedBertModel):\n \"\"\"BERT model with next sentence prediction head.\n This module comprises the BERT model followed by the next sentence classification head.\n\n Params:\n config: a BertConfig class instance with the configuration to build a new model.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]\n with indices selected in [0, 1].\n 0 => next sentence is the continuation, 1 => next sentence is a random sentence.\n\n Outputs:\n if `next_sentence_label` is not `None`:\n Outputs the total_loss which is the sum of the masked language modeling loss and the next\n sentence classification loss.\n if `next_sentence_label` is `None`:\n Outputs the next sentence classification logits of shape [batch_size, 2].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForNextSentencePrediction(config)\n seq_relationship_logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertForNextSentencePrediction, self).__init__(config)\n self.bert = BertModel(config)\n self.cls = BertOnlyNSPHead(config)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None):\n _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,\n output_all_encoded_layers=False)\n seq_relationship_score = self.cls( pooled_output)\n\n if next_sentence_label is not None:\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n return next_sentence_loss\n else:\n return seq_relationship_score\n\n\nclass BertForSequenceClassification(PreTrainedBertModel):\n \"\"\"BERT model for classification.\n This module is composed of the BERT model with a linear layer on top of\n the pooled output.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model.\n `num_labels`: the number of classes for the classifier. Default = 2.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]\n with indices selected in [0, ..., num_labels].\n\n Outputs:\n if `labels` is not `None`:\n Outputs the CrossEntropy classification loss of the output with the labels.\n if `labels` is `None`:\n Outputs the classification logits of shape [batch_size, num_labels].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n num_labels = 2\n\n model = BertForSequenceClassification(config, num_labels)\n logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertForSequenceClassification, self).__init__(config)\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob) ##\n\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):\n _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)\n ## From here\n pooled_output = self.dropout(pooled_output)\n return pooled_output\n\n\nclass GlueModel(BertModel):\n def __init__(self, config, num_labels = 2):\n super(GlueModel, self).__init__(config)\n self.num_labels = num_labels ##\n self.classifier = nn.Linear(config.hidden_size, num_labels) ##\n self.apply(self.init_bert_weights) ## Create new function\n\n def init_bert_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n def foward(self, pooled_output, labels = None):\n logits = self.classifier(pooled_output)\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n return loss\n else:\n return logits\n\nclass BertForMultipleChoice(PreTrainedBertModel):\n \"\"\"BERT model for multiple choice tasks.\n This module is composed of the BERT model with a linear layer on top of\n the pooled output.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model.\n `num_choices`: the number of classes for the classifier. Default = 2.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]\n with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`\n and type 1 corresponds to a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]\n with indices selected in [0, ..., num_choices].\n\n Outputs:\n if `labels` is not `None`:\n Outputs the CrossEntropy classification loss of the output with the labels.\n if `labels` is `None`:\n Outputs the classification logits of shape [batch_size, num_labels].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])\n input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])\n token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n num_choices = 2\n\n model = BertForMultipleChoice(config, num_choices)\n logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, num_choices=2):\n super(BertForMultipleChoice, self).__init__(config)\n self.num_choices = num_choices\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):\n flat_input_ids = input_ids.view(-1, input_ids.size(-1))\n flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))\n flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))\n _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False)\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, self.num_choices)\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n return loss\n else:\n return reshaped_logits\n\n\nclass BertForTokenClassification(PreTrainedBertModel):\n \"\"\"BERT model for token-level classification.\n This module is composed of the BERT model with a linear layer on top of\n the full hidden state of the last layer.\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model.\n `num_labels`: the number of classes for the classifier. Default = 2.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]\n with indices selected in [0, ..., num_labels].\n\n Outputs:\n if `labels` is not `None`:\n Outputs the CrossEntropy classification loss of the output with the labels.\n if `labels` is `None`:\n Outputs the classification logits of shape [batch_size, sequence_length, num_labels].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n num_labels = 2\n\n model = BertForTokenClassification(config, num_labels)\n logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config, num_labels=2):\n super(BertForTokenClassification, self).__init__(config)\n self.num_labels = num_labels\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, num_labels)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):\n # final representation for all sequence tokens\n ## TODO\n sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n return loss\n else:\n return logits\n\n\nclass BertForQuestionAnswering(PreTrainedBertModel):\n \"\"\"BERT model for Question Answering (span extraction).\n This module is composed of the BERT model with a linear layer on top of\n the sequence output that computes start_logits and end_logits\n\n Params:\n `config`: a BertConfig class instance with the configuration to build a new model.\n\n Inputs:\n `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n a `sentence B` token (see BERT paper for more details).\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].\n Positions are clamped to the length of the sequence and position outside of the sequence are not taken\n into account for computing the loss.\n `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].\n Positions are clamped to the length of the sequence and position outside of the sequence are not taken\n into account for computing the loss.\n\n Outputs:\n if `start_positions` and `end_positions` are not `None`:\n Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.\n if `start_positions` or `end_positions` is `None`:\n Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end\n position tokens of shape [batch_size, sequence_length].\n\n Example usage:\n ```python\n # Already been converted into WordPiece token ids\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n\n config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n\n model = BertForQuestionAnswering(config)\n start_logits, end_logits = model(input_ids, token_type_ids, input_mask)\n ```\n \"\"\"\n def __init__(self, config):\n super(BertForQuestionAnswering, self).__init__(config)\n self.bert = BertModel(config)\n # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version\n # self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.qa_outputs = nn.Linear(config.hidden_size, 2)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None):\n sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n return total_loss\n else:\n return start_logits, end_logits\n"
] |
[
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.sigmoid",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.load",
"torch.zeros",
"torch.sqrt",
"torch.zeros_like",
"torch.nn.Embedding",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.matmul",
"torch.arange",
"torch.ones_like"
]
] |
lcarde/pymanip
|
[
"d6f0b90b678d6f508d86d2d3ba3b580615c9566f",
"d6f0b90b678d6f508d86d2d3ba3b580615c9566f"
] |
[
"samples/pymba_acquisition_continuous.py",
"samples/essai_andor.py"
] |
[
"from pathlib import Path\nimport time\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport png\n\nfrom pymba import *\nfrom pymba.vimbaexception import VimbaException\nfrom pymba.vimbainterface import VimbaInterface\nfrom pymanip import Session\n\n# Paramètres de l'acquisition\ndestination_dir = Path(r'C:\\Users\\Julien Salort\\Documents\\Acquis')\nacquisition_name = 'essai'\nimages_dir = destination_dir / acquisition_name\nN = 20\n\nif not destination_dir.exists():\n destination_dir.mkdir()\nif not destination_dir.is_dir():\n raise NotADirectoryError\nif not images_dir.exists():\n images_dir.mkdir()\nif not images_dir.is_dir():\n raise NotADirectoryError\n\nMI = Session(images_dir, ('timestamp',))\n\ndef print_features(cam):\n cameraFeatureNames = cam.getFeatureNames()\n for name in cameraFeatureNames:\n try:\n val = cam.__getattr__(name)\n if isinstance(val, bytes):\n val = val.decode('ascii')\n info = cam.getFeatureInfo(name)\n unit = info.unit\n if isinstance(unit, bytes):\n unit = unit.decode('ascii')\n elif unit is None:\n unit = \"\"\n print(info.displayName.decode('ascii'),\n '(' + name.decode('ascii') + ')',\n ':', val, unit)\n except VimbaException:\n print(name.decode('ascii'), ': ?')\n \nwith Vimba() as vimba:\n print(\"Vimba version:\", vimba.getVersion())\n system = vimba.getSystem()\n print(\"\"\"\nVimba System features\n=====================\"\"\")\n print_features(system)\n \n # list available cameras (after enabling discovery for GigE cameras)\n if system.GeVTLIsPresent:\n system.runFeatureCommand(\"GeVDiscoveryAllOnce\")\n time.sleep(0.2)\n \n # Ouverture de le caméra\n cameraIds = vimba.getCameraIds()\n \n for cameraId in cameraIds:\n #print('Camera ID:', cameraId)\n cam = vimba.getCamera(cameraId)\n cam.openCamera()\n #print('Camera ID String', cam.cameraIdString)\n \n \n # Acquisition\n\n print(\"\"\"\nCamera info structure\n=====================\"\"\")\n info = cam.getInfo()\n print('cameraName:', info.cameraName.decode('ascii'))\n print('interfaceIdString:', info.interfaceIdString.decode('ascii'))\n print('modelName:', info.modelName.decode('ascii'))\n print('serialString:', info.serialString.decode('ascii'))\n \n #print('Acquisition mode:', cam.AcquisitionMode)\n # Possible values: 'Continuous', 'SingleFrame', 'MultiFrame', 'Recorder'\n #cam.IIDCActivateFormat7 = True\n cam.AcquisitionMode = 'Continuous'\n cam.IIDCPhyspeed = 'S800'\n cam.PixelFormat = 'Mono16'\n \n #cam.AcquisitionFrameRate = 20.0\n cam.TriggerMode = 'On'\n \n #cam.IIDCPacketSizeAuto = 'On'\n \n print(\"\"\"\nVimba Camera features\n=====================\"\"\")\n print_features(cam)\n \n frame = cam.getFrame()\n frame.announceFrame()\n\n print(\"\"\"\nAcquisition\n===========\"\"\")\n cam.startCapture()\n cam.runFeatureCommand('AcquisitionStart')\n\n for i in range(N):\n frame.queueFrameCapture()\n frame.waitFrameCapture()\n timestamp = frame.timestamp/1e7\n img = png.from_array(frame.getImage(), mode='L')\n with open(images_dir / 'img-{:03d}.png'.format(i), 'wb') as f:\n img.save(f)\n MI.log_addline()\n \n cam.runFeatureCommand('AcquisitionStop')\n cam.endCapture()\n cam.revokeAllFrames()\n\nprint('Finished.')\n\n# Graphe des timestamps\nt = MI['timestamp'][-N:]\ntMI = MI['t'][-N:]\nMI.Stop()\nreal_fs = 1/np.mean(t[1:]-t[:-1])\nprint('Real fs =', real_fs, 'Hz')\nprint('Computer estimated=', 1/np.mean(tMI[1:]-tMI[:-1]))\n\nplt.figure()\nplt.plot(t-t[0], 'bo')\nplt.show()\nMI.Stop()\n",
"import time\nimport cv2\nimport os\nimport h5py\n\nfrom pymanip.video.andor import Andor_Camera\n\n\n# Exemple avec one-shot\n# if True:\n # with Andor_Camera() as cam:\n # for n in range(5):\n # img = cam.acquisition_oneshot()\n # print(img.metadata['timestamp'])\n # cv2.imwrite(f'img-{n:04d}.png', img)\n # print('Saved', n+1)\n # time.sleep(10.0)\nexposure_time_chosen=4.0\nframe_rate_chosen=0.1\nnb_images_chosen=2\nname_folder_chosen='plusplusplustard'\nbitdepth_chosen='Mono16'\nSimplePreAmpGainControl_chosen='16-bit (low noise & high well capacity)'\ntry:\n os.mkdir(name_folder_chosen)\nexcept Exception:\n pass\n \nwith Andor_Camera() as cam:\n cam.ExposureTime.setValue(exposure_time_chosen)#10e-3)\n cam.FrameRate.setValue(frame_rate_chosen)\n cam.PixelEncoding.setString(bitdepth_chosen)\n cam.SimplePreAmpGainControl.setString(SimplePreAmpGainControl_chosen)\n count, ts = cam.acquire_to_files(nb_images_chosen, name_folder_chosen+'/img', zerofill=4,\n dryrun=False, file_format='png',\n compression_level=9,\n delay_save=True,\n progressbar=True)\n\n#ecrire enregistrer les temps en hdf5 et parametres image\nwith h5py.File(name_folder_chosen+'timestamps_and_parameters.hdf5', 'w') as f:\n f.create_dataset('ts', data=ts)\n f.attrs['exposure_time'] = exposure_time_chosen\n f.attrs['frame_rate'] = frame_rate_chosen\n f.attrs['bitdepth'] = bitdepth_chosen\n f.attrs['SimplePreAmpGainControl'] = SimplePreAmpGainControl_chosen\n\n\n \n \nimport matplotlib.pyplot as plt\n\nplt.plot(count, ts, 'o')\nplt.show()\n\n\nimg = cv2.imread(name_folder_chosen+'/img'+'-0001.png', -1)\nplt.figure(1)\nplt.imshow(img.T, cmap='gray')\nplt.colorbar()\nplt.clim([0 ,10000])\n\nimg = cv2.imread(name_folder_chosen+'/img'+'-0002.png', -1)\nplt.figure(2)\nplt.imshow(img.T, cmap='gray')\nplt.colorbar()\n\nplt.show()\n\n\nwith h5py.File(name_folder_chosen+'timestamps_and_parameters.hdf5', 'r') as f:\n timestamps = f['ts'].value\n exposure_time = f.attrs['exposure_time']\n print(timestamps)\n fhz = f.attrs['frame_rate']\n print(fhz)"
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"numpy.mean",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.clim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
BaiYuhaoSpiceeYJ/SEGAN_denoise
|
[
"5bf65ae72b9f0a996ae338c53c68c4967e08cd59"
] |
[
"segan/models/generator.py"
] |
[
"import torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.nn.utils as nnu\nimport torch.nn as nn\nimport random\nimport numpy as np\ntry:\n from core import *\n from modules import *\nexcept ImportError:\n from .core import *\n from .modules import *\n\n# BEWARE: PyTorch >= 0.4.1 REQUIRED\nfrom torch.nn.utils.spectral_norm import spectral_norm\n\nclass GSkip(nn.Module):\n\n def __init__(self, skip_type, size, skip_init, skip_dropout=0,\n merge_mode='sum', kwidth=11, bias=True):\n # skip_init only applies to alpha skips\n super().__init__()\n self.merge_mode = merge_mode\n if skip_type == 'alpha' or skip_type == 'constant':\n if skip_init == 'zero':\n alpha_ = torch.zeros(size)\n elif skip_init == 'randn':\n alpha_ = torch.randn(size)\n elif skip_init == 'one':\n alpha_ = torch.ones(size)\n else:\n raise TypeError('Unrecognized alpha init scheme: ', \n skip_init)\n #if cuda:\n # alpha_ = alpha_.cuda()\n if skip_type == 'alpha':\n self.skip_k = nn.Parameter(alpha_.view(1, -1, 1))\n else:\n # constant, not learnable\n self.skip_k = nn.Parameter(alpha_.view(1, -1, 1))\n self.skip_k.requires_grad = False\n elif skip_type == 'conv':\n if kwidth > 1:\n pad = kwidth // 2\n else:\n pad = 0\n self.skip_k = nn.Conv1d(size, size, kwidth, stride=1,\n padding=pad, bias=bias)\n else:\n raise TypeError('Unrecognized GSkip scheme: ', skip_type)\n self.skip_type = skip_type\n if skip_dropout > 0:\n self.skip_dropout = nn.Dropout(skip_dropout)\n\n def __repr__(self):\n if self.skip_type == 'alpha':\n return self._get_name() + '(Alpha(1))'\n elif self.skip_type == 'constant':\n return self._get_name() + '(Constant(1))'\n else:\n return super().__repr__()\n\n def forward(self, hj, hi):\n if self.skip_type == 'conv':\n sk_h = self.skip_k(hj)\n else:\n skip_k = self.skip_k.repeat(hj.size(0), 1, hj.size(2))\n sk_h = skip_k * hj\n if hasattr(self, 'skip_dropout'):\n sk_h = self.skip_dropout(sk_h)\n if self.merge_mode == 'sum':\n # merge with input hi on current layer\n return sk_h + hi\n elif self.merge_mode == 'concat':\n return torch.cat((hi, sk_h), dim=1)\n else:\n raise TypeError('Unrecognized skip merge mode: ', self.merge_mode)\n\nclass Generator(Model):\n\n def __init__(self, ninputs, fmaps,\n kwidth, poolings, \n dec_fmaps=None,\n dec_kwidth=None,\n dec_poolings=None,\n z_dim=None,\n no_z=False,\n skip=True,\n bias=False,\n skip_init='one',\n skip_dropout=0,\n skip_type='alpha',\n norm_type=None,\n skip_merge='sum',\n skip_kwidth=11,\n name='Generator'):\n super().__init__(name=name)\n self.skip = skip\n self.bias = bias\n self.no_z = no_z\n self.z_dim = z_dim\n self.enc_blocks = nn.ModuleList()\n assert isinstance(fmaps, list), type(fmaps)\n assert isinstance(poolings, list), type(poolings)\n if isinstance(kwidth, int): \n kwidth = [kwidth] * len(fmaps)\n assert isinstance(kwidth, list), type(kwidth)\n skips = {}\n ninp = ninputs\n for pi, (fmap, pool, kw) in enumerate(zip(fmaps, poolings, kwidth),\n start=1):\n if skip and pi < len(fmaps):\n # Make a skip connection for all but last hidden layer\n gskip = GSkip(skip_type, fmap,\n skip_init,\n skip_dropout,\n merge_mode=skip_merge,\n kwidth=skip_kwidth,\n bias=bias)\n l_i = pi - 1\n skips[l_i] = {'alpha':gskip}\n setattr(self, 'alpha_{}'.format(l_i), skips[l_i]['alpha'])\n enc_block = GConv1DBlock(\n ninp, fmap, kw, stride=pool, bias=bias,\n norm_type=norm_type\n )\n self.enc_blocks.append(enc_block)\n ninp = fmap\n\n self.skips = skips\n if not no_z and z_dim is None:\n z_dim = fmaps[-1]\n if not no_z:\n ninp += z_dim\n # Ensure we have fmaps, poolings and kwidth ready to decode\n if dec_fmaps is None:\n dec_fmaps = fmaps[::-1][1:] + [1]\n else:\n assert isinstance(dec_fmaps, list), type(dec_fmaps)\n if dec_poolings is None:\n dec_poolings = poolings[:]\n else:\n assert isinstance(dec_poolings, list), type(dec_poolings)\n self.dec_poolings = dec_poolings\n if dec_kwidth is None:\n dec_kwidth = kwidth[:]\n else:\n if isinstance(dec_kwidth, int): \n dec_kwidth = [dec_kwidth] * len(dec_fmaps)\n assert isinstance(dec_kwidth, list), type(dec_kwidth)\n # Build the decoder\n self.dec_blocks = nn.ModuleList()\n for pi, (fmap, pool, kw) in enumerate(zip(dec_fmaps, dec_poolings, \n dec_kwidth),\n start=1):\n if skip and pi > 1 and pool > 1:\n if skip_merge == 'concat':\n ninp *= 2\n\n if pi >= len(dec_fmaps):\n act = 'Tanh'\n else:\n act = None\n if pool > 1:\n dec_block = GDeconv1DBlock(\n ninp, fmap, kw, stride=pool,\n norm_type=norm_type, bias=bias,\n act=act\n )\n else:\n dec_block = GConv1DBlock(\n ninp, fmap, kw, stride=1, \n bias=bias,\n norm_type=norm_type\n )\n self.dec_blocks.append(dec_block)\n ninp = fmap\n\n def forward(self, x, z=None, ret_hid=False):\n hall = {}\n hi = x\n skips = self.skips\n for l_i, enc_layer in enumerate(self.enc_blocks):\n hi, linear_hi = enc_layer(hi, True)\n #print('ENC {} hi size: {}'.format(l_i, hi.size()))\n #print('Adding skip[{}]={}, alpha={}'.format(l_i,\n # hi.size(),\n # hi.size(1)))\n if self.skip and l_i < (len(self.enc_blocks) - 1):\n skips[l_i]['tensor'] = linear_hi\n if ret_hid:\n hall['enc_{}'.format(l_i)] = hi\n if not self.no_z:\n if z is None:\n # make z \n z = torch.randn(hi.size(0), self.z_dim, *hi.size()[2:])\n if hi.is_cuda:\n z = z.to('cuda')\n if len(z.size()) != len(hi.size()):\n raise ValueError('len(z.size) {} != len(hi.size) {}'\n ''.format(len(z.size()), len(hi.size())))\n if not hasattr(self, 'z'):\n self.z = z\n hi = torch.cat((z, hi), dim=1)\n if ret_hid:\n hall['enc_zc'] = hi\n else:\n z = None\n enc_layer_idx = len(self.enc_blocks) - 1\n for l_i, dec_layer in enumerate(self.dec_blocks):\n if self.skip and enc_layer_idx in self.skips and \\\n self.dec_poolings[l_i] > 1:\n skip_conn = skips[enc_layer_idx]\n #hi = self.skip_merge(skip_conn, hi)\n #print('Merging hi {} with skip {} of hj {}'.format(hi.size(),\n # l_i,\n # skip_conn['tensor'].size()))\n hi = skip_conn['alpha'](skip_conn['tensor'], hi)\n #print('DEC in size after skip and z_all: ', hi.size())\n #print('decoding layer {} with input {}'.format(l_i, hi.size()))\n hi = dec_layer(hi)\n #print('decoding layer {} output {}'.format(l_i, hi.size()))\n enc_layer_idx -= 1\n if ret_hid:\n hall['dec_{}'.format(l_i)] = hi\n if ret_hid:\n return hi, hall\n else:\n return hi\n\nclass Generator1D(Model):\n\n def __init__(self, ninputs, enc_fmaps, kwidth,\n activations, lnorm=False, dropout=0.,\n pooling=2, z_dim=256, z_all=False,\n skip=True, skip_blacklist=[],\n dec_activations=None, cuda=False,\n bias=False, aal=False, wd=0.,\n skip_init='one', skip_dropout=0.,\n no_tanh=False, aal_out=False,\n rnn_core=False, linterp=False,\n mlpconv=False, dec_kwidth=None,\n no_z=False,\n skip_type='alpha', \n num_spks=None, multilayer_out=False,\n skip_merge='sum', snorm=False,\n convblock=False, post_skip=False,\n pos_code=False, satt=False,\n dec_fmaps=None, up_poolings=None,\n post_proc=False, out_gate=False, \n linterp_mode='linear', hidden_comb=False, \n big_out_filter=False, z_std=1,\n freeze_enc=False, skip_kwidth=11,\n pad_type='constant'):\n # if num_spks is specified, do onehot coditioners in dec stages\n # subract_mean: from output signal, get rif of mean by windows\n # multilayer_out: add some convs in between gblocks in decoder\n super().__init__(name='Generator1D')\n self.dec_kwidth = dec_kwidth\n self.skip_kwidth = skip_kwidth\n self.skip = skip\n self.skip_init = skip_init\n self.skip_dropout = skip_dropout\n self.snorm = snorm\n self.z_dim = z_dim\n self.z_all = z_all\n self.pos_code = pos_code\n self.post_skip = post_skip\n self.big_out_filter = big_out_filter\n self.satt = satt\n self.post_proc = post_proc\n self.pad_type = pad_type\n self.onehot = num_spks is not None\n if self.onehot:\n assert num_spks > 0\n self.num_spks = num_spks\n # do not place any z\n self.no_z = no_z\n self.do_cuda = cuda\n self.wd = wd\n self.no_tanh = no_tanh\n self.skip_blacklist = skip_blacklist\n self.z_std = z_std\n self.freeze_enc = freeze_enc\n self.gen_enc = nn.ModuleList()\n if aal or aal_out:\n # Make cheby1 filter to include into pytorch conv blocks\n from scipy.signal import cheby1, dlti, dimpulse\n system = dlti(*cheby1(8, 0.05, 0.8 / pooling))\n tout, yout = dimpulse(system)\n filter_h = yout[0]\n if aal:\n self.filter_h = filter_h\n else:\n self.filter_h = None\n\n if dec_kwidth is None:\n dec_kwidth = kwidth\n\n if isinstance(activations, str):\n if activations != 'glu':\n activations = getattr(nn, activations)()\n if not isinstance(activations, list):\n activations = [activations] * len(enc_fmaps)\n if not isinstance(pooling, list) or len(pooling) == 1: \n pooling = [pooling] * len(enc_fmaps)\n skips = {}\n # Build Encoder\n for layer_idx, (fmaps, pool, act) in enumerate(zip(enc_fmaps, \n pooling,\n activations)):\n if layer_idx == 0:\n inp = ninputs\n else:\n inp = enc_fmaps[layer_idx - 1]\n if self.skip and layer_idx < (len(enc_fmaps) - 1):\n if layer_idx not in self.skip_blacklist:\n l_i = layer_idx\n gskip = GSkip(skip_type, fmaps,\n skip_init,\n skip_dropout,\n merge_mode=skip_merge,\n cuda=self.do_cuda,\n kwidth=self.skip_kwidth)\n skips[l_i] = {'alpha':gskip}\n setattr(self, 'alpha_{}'.format(l_i), skips[l_i]['alpha'])\n self.gen_enc.append(GBlock(inp, fmaps, kwidth, act,\n padding=None, lnorm=lnorm, \n dropout=dropout, pooling=pool,\n enc=True, bias=bias, \n aal_h=self.filter_h,\n snorm=snorm, convblock=convblock,\n satt=self.satt,\n pad_type=pad_type))\n self.skips = skips\n dec_inp = enc_fmaps[-1]\n if dec_fmaps is None:\n if mlpconv:\n dec_fmaps = enc_fmaps[:-1][::-1] + [16, 8, 1]\n print(dec_fmaps)\n up_poolings = [pooling] * (len(dec_fmaps) - 2) + [1] * 3\n add_activations = [nn.PReLU(16), nn.PReLU(8), nn.PReLU(1)]\n raise NotImplementedError('MLPconv is not useful and should be'\n ' deleted')\n else:\n dec_fmaps = enc_fmaps[:-1][::-1] + [1]\n up_poolings = pooling[::-1]\n #up_poolings = [pooling] * len(dec_fmaps)\n print('up_poolings: ', up_poolings)\n self.up_poolings = up_poolings\n else:\n assert up_poolings is not None\n self.up_poolings = up_poolings\n if rnn_core:\n self.z_all = False\n z_all = False\n # place a bidirectional RNN layer in the core to condition\n # everything to everything AND Z will be the init state of it\n self.rnn_core = nn.LSTM(dec_inp, dec_inp // 2, bidirectional=True,\n batch_first=True)\n else:\n if no_z:\n all_z = False\n else:\n dec_inp += z_dim\n #print(dec_fmaps)\n # Build Decoder\n self.gen_dec = nn.ModuleList()\n\n if dec_activations is None:\n # assign same activations as in Encoder\n dec_activations = [activations[0]] * len(dec_fmaps)\n else:\n if mlpconv:\n dec_activations = dec_activations[:-1]\n dec_activations += add_activations\n \n enc_layer_idx = len(enc_fmaps) - 1\n for layer_idx, (fmaps, act) in enumerate(zip(dec_fmaps, \n dec_activations)):\n if skip and layer_idx > 0 and enc_layer_idx not in skip_blacklist \\\n and up_poolings[layer_idx] > 1: \n if skip_merge == 'concat':\n dec_inp *= 2\n print('Added skip conn input of enc idx: {} and size:'\n ' {}'.format(enc_layer_idx, dec_inp))\n\n if z_all and layer_idx > 0:\n dec_inp += z_dim\n\n if self.onehot:\n dec_inp += self.num_spks\n\n if layer_idx >= len(dec_fmaps) - 1:\n if self.no_tanh:\n act = None\n else:\n act = nn.Tanh()\n lnorm = False\n dropout = 0\n if up_poolings[layer_idx] > 1:\n pooling = up_poolings[layer_idx]\n self.gen_dec.append(GBlock(dec_inp,\n fmaps, dec_kwidth, act, \n padding=0, \n lnorm=lnorm,\n dropout=dropout, pooling=pooling, \n enc=False,\n bias=bias,\n linterp=linterp, \n linterp_mode=linterp_mode,\n convblock=convblock, \n comb=hidden_comb,\n pad_type=pad_type))\n else:\n self.gen_dec.append(GBlock(dec_inp,\n fmaps, dec_kwidth, act, \n lnorm=lnorm,\n dropout=dropout, pooling=1,\n padding=0,#kwidth//2,\n enc=True,\n bias=bias,\n convblock=convblock,\n pad_type=pad_type))\n dec_inp = fmaps\n if aal_out:\n # make AAL filter to put in output\n self.aal_out = nn.Conv1d(1, 1, filter_h.shape[0] + 1,\n stride=1, \n padding=filter_h.shape[0] // 2,\n bias=False)\n print('filter_h shape: ', filter_h.shape)\n # apply AAL weights, reshaping impulse response to match\n # in channels and out channels\n aal_t = torch.FloatTensor(filter_h).view(1, 1, -1)\n aal_t = torch.cat((aal_t, torch.zeros(1, 1, 1)), dim=-1)\n self.aal_out.weight.data = aal_t\n print('aal_t size: ', aal_t.size())\n\n if post_proc:\n self.comb_net = PostProcessingCombNet(1, 512)\n if out_gate:\n self.out_gate = OutGate(1, 1)\n if big_out_filter:\n self.out_filter = nn.Conv1d(1, 1, 513, padding=513//2)\n\n \n\n def forward(self, x, z=None, ret_hid=False, spkid=None, \n slice_idx=0, att_weight=0):\n if self.num_spks is not None and spkid is None:\n raise ValueError('Please specify spk ID to network to '\n 'build OH identifier in decoder')\n\n hall = {}\n hi = x\n skips = self.skips\n for l_i, enc_layer in enumerate(self.gen_enc):\n hi, linear_hi = enc_layer(hi, att_weight=att_weight)\n #print('ENC {} hi size: {}'.format(l_i, hi.size()))\n #print('Adding skip[{}]={}, alpha={}'.format(l_i,\n # hi.size(),\n # hi.size(1)))\n if self.skip and l_i < (len(self.gen_enc) - 1):\n if l_i not in self.skip_blacklist:\n if self.post_skip:\n skips[l_i]['tensor'] = hi\n else:\n skips[l_i]['tensor'] = linear_hi\n if ret_hid:\n hall['enc_{}'.format(l_i)] = hi\n if hasattr(self, 'rnn_core'):\n self.z_all = False\n if z is None:\n # make z as initial RNN state forward and backward\n # (2 directions)\n if self.no_z:\n # MAKE DETERMINISTIC ZERO\n h0 = Variable(torch.zeros(2, hi.size(0), hi.size(1)//2))\n else:\n h0 = Variable(self.z_std * torch.randn(2, \n hi.size(0), \n hi.size(1)//2))\n c0 = Variable(torch.zeros(2, hi.size(0), hi.size(1)//2))\n if self.do_cuda:\n h0 = h0.cuda()\n c0 = c0.cuda()\n z = (h0, c0)\n if not hasattr(self, 'z'):\n self.z = z\n # Conv --> RNN\n hi = hi.transpose(1, 2)\n hi, state = self.rnn_core(hi, z)\n # RNN --> Conv\n hi = hi.transpose(1, 2)\n else:\n if not self.no_z:\n if z is None:\n # make z \n z = Variable(self.z_std * torch.randn(hi.size(0), self.z_dim,\n *hi.size()[2:]))\n if len(z.size()) != len(hi.size()):\n raise ValueError('len(z.size) {} != len(hi.size) {}'\n ''.format(len(z.size()), len(hi.size())))\n if self.do_cuda:\n z = z.cuda()\n if not hasattr(self, 'z'):\n self.z = z\n #print('Concating z {} and hi {}'.format(z.size(),\n # hi.size()))\n hi = torch.cat((z, hi), dim=1)\n if ret_hid:\n hall['enc_zc'] = hi\n else:\n z = None\n if self.pos_code:\n hi = pos_code(slice_idx, hi)\n # Cut gradient flow in Encoder?\n if self.freeze_enc:\n hi = hi.detach()\n #print('Concated hi|z size: ', hi.size())\n enc_layer_idx = len(self.gen_enc) - 1\n z_up = z\n if self.onehot:\n # make one hot identifier batch\n spk_oh = Variable(torch.zeros(spkid.size(0), \n self.num_spks))\n for bidx in range(spkid.size(0)):\n if len(spkid.size()) == 3:\n spk_id = spkid[bidx, 0].cpu().data[0]\n else:\n spk_id = spkid[bidx].cpu().data[0]\n spk_oh[bidx, spk_id] = 1\n spk_oh = spk_oh.view(spk_oh.size(0), -1, 1)\n if self.do_cuda:\n spk_oh = spk_oh.cuda()\n # Now one-hot is [B, SPKS, 1] ready to be \n # repeated to [B, SPKS, T] depending on layer\n for l_i, dec_layer in enumerate(self.gen_dec):\n if self.skip and enc_layer_idx in self.skips and \\\n self.up_poolings[l_i] > 1:\n skip_conn = skips[enc_layer_idx]\n #hi = self.skip_merge(skip_conn, hi)\n #print('Merging hi {} with skip {} of hj {}'.format(hi.size(),\n # l_i,\n # skip_conn['tensor'].size()))\n hi = skip_conn['alpha'](skip_conn['tensor'], hi)\n if l_i > 0 and self.z_all:\n # concat z in every layer\n z_up = torch.cat((z_up, z_up), dim=2)\n hi = torch.cat((hi, z_up), dim=1)\n if self.onehot:\n # repeat one-hot in time to adjust to concat\n spk_oh_r = spk_oh.repeat(1, 1, hi.size(-1))\n # concat in depth (channels)\n hi = torch.cat((hi, spk_oh_r), dim=1)\n #print('DEC in size after skip and z_all: ', hi.size())\n #print('decoding layer {} with input {}'.format(l_i, hi.size()))\n hi, _ = dec_layer(hi, att_weight=att_weight)\n #print('decoding layer {} output {}'.format(l_i, hi.size()))\n enc_layer_idx -= 1\n if ret_hid:\n hall['dec_{}'.format(l_i)] = hi\n if hasattr(self, 'aal_out'):\n hi = self.aal_out(hi)\n if hasattr(self, 'comb_net'):\n hi = F.tanh(self.comb_net(hi))\n if hasattr(self, 'out_gate'):\n hi = self.out_gate(hi)\n if hasattr(self, 'out_filter'):\n hi = self.out_filter(hi)\n # normalize G output in range within [-1, 1]\n #hi = self.batch_minmax_norm(hi)\n if ret_hid:\n return hi, hall\n else:\n return hi\n\n def batch_minmax_norm(self, x, out_min=-1, out_max=1):\n mins = torch.min(x, dim=2)[0]\n maxs = torch.max(x, dim=2)[0]\n R = (out_max - out_min) / (maxs - mins)\n R = R.unsqueeze(1)\n #print('R size: ', R.size())\n #print('x size: ', x.size())\n #print('mins size: ', mins.size())\n x = R * (x - mins.unsqueeze(1)) + out_min\n #print('norm x size: ', x.size())\n return x\n\n def skip_merge(self, skip_conn, hi):\n # TODO: DEPRECATED WITH NEW SKIP SCHEME\n raise NotImplementedError\n hj = skip_conn['tensor']\n alpha = skip_conn['alpha'].view(1, -1, 1)\n alpha = alpha.repeat(hj.size(0), 1, hj.size(2))\n #print('hi: ', hi.size())\n #print('hj: ', hj.size())\n #print('alpha: ', alpha.size())\n #print('alpha: ', alpha)\n if 'dropout' in skip_conn:\n alpha = skip_conn['dropout'](alpha)\n #print('alpha: ', alpha)\n return hi + alpha * hj\n \nif __name__ == '__main__':\n \"\"\"\n G = Generator1D(1, [64, 128, 256, 512, 1024], \n 31, \n 'ReLU',\n lnorm=False, \n pooling=4,\n z_dim=1024,\n skip_init='randn',\n skip_type='alpha',\n skip_blacklist=[],\n bias=False, cuda=False,\n rnn_core=False, linterp=False,\n dec_kwidth=31)\n \"\"\"\n G = Generator(1, [64, 128, 256, 512, 1024],\n kwidth=31,\n poolings=[4, 4, 4, 4, 4], no_z=True)\n print(G)\n print('G num params: ', G.get_n_params())\n x = torch.randn(1, 1, 4096)\n y, hall = G(x, ret_hid=True)\n print(y)\n print(x.size())\n print(y.size())\n #import matplotlib\n #matplotlib.use('Agg')\n #import matplotlib.pyplot as plt\n #plt.imshow(hall['att'].data[0, :, :].numpy())\n #plt.savefig('att_test.png', dpi=200)\n"
] |
[
[
"torch.nn.Dropout",
"scipy.signal.cheby1",
"torch.max",
"torch.ones",
"torch.cat",
"torch.nn.LSTM",
"torch.randn",
"torch.nn.ModuleList",
"torch.min",
"torch.zeros",
"torch.nn.PReLU",
"torch.nn.Tanh",
"scipy.signal.dimpulse",
"torch.FloatTensor",
"torch.nn.Conv1d"
]
] |
virajmehta/vae-training
|
[
"dc99fea6f012b7256ad65f65972183fc22e82c8e"
] |
[
"utils.py"
] |
[
"import os\nimport jax\nimport jax.numpy as jnp\nfrom jax import lax\nimport jax.nn.initializers as initializers\nimport flax\nimport numpy as np\nimport cv2\nimport json\n\nDATA_DIR = 'data/'\n\n\n\nclass Constants:\n \"\"\"\n Recommended hyperparameters (Feel free to add/remove/modify these values).\n \"\"\"\n lambd = 10\n alpha = 0.1\n # TODO: figure out if this is worth computing\n epsilon_singular_value = 1e-7\n\n\ndef sigmoid(x):\n return 1 / (1 + jnp.exp(-x))\n\n\ndef relu(x):\n return jnp.maximum(x, 0)\n\n\ndef leaky_relu(x):\n return jnp.maximum(x, x * Constants.alpha)\n\n\ndef inv_leaky_relu(x):\n return jnp.minimum(x, x / Constants.alpha)\n\n\ndef inv_dense(x, weight, bias):\n inv_weight = jnp.linalg.inv(weight)\n return jnp.dot((x - bias), inv_weight)\n\n\ndef make_output_dir(name, overwrite, args):\n dirname = get_output_dir(name)\n if os.path.exists(dirname):\n if overwrite:\n for fn in os.listdir(dirname):\n os.remove(os.path.join(dirname, fn))\n else:\n raise ValueError(f\"{dirname} already exists! Use a different name\")\n else:\n os.mkdir(dirname)\n args_name = os.path.join(dirname, 'args.json')\n args = vars(args)\n with open(args_name, 'w') as f:\n json.dump(args, f)\n return dirname\n\n\ndef get_output_dir(name):\n dirname = os.path.join(DATA_DIR, name)\n return dirname\n\n\[email protected]\[email protected]\ndef cross_entropy_loss(logits, label):\n return -logits[label]\n\n\[email protected]\ndef compute_accuracy(logits, labels):\n return jnp.mean(jnp.argmax(logits, -1) == labels)\n\n\ndef img_tile(imgs, fn, save, aspect_ratio=1.0, border=1, border_color=0):\n \"\"\"\n Visualize the WGAN result for each step\n :param imgs: Numpy array of the generated images\n :param path: Path to save visualized results for each epoch\n :param epoch: Epoch index\n :param save: Boolean value to determine whether you want to save the result or not\n \"\"\"\n\n if imgs.ndim != 3 and imgs.ndim != 4:\n raise ValueError('imgs has wrong number of dimensions.')\n n_imgs = imgs.shape[0]\n\n tile_shape = None\n # Grid shape\n img_shape = np.array(imgs.shape[1:3])\n if tile_shape is None:\n img_aspect_ratio = img_shape[1] / float(img_shape[0])\n aspect_ratio *= img_aspect_ratio\n tile_height = int(np.ceil(np.sqrt(n_imgs * aspect_ratio)))\n tile_width = int(np.ceil(np.sqrt(n_imgs / aspect_ratio)))\n grid_shape = np.array((tile_height, tile_width))\n else:\n assert len(tile_shape) == 2\n grid_shape = np.array(tile_shape)\n\n # Tile image shape\n tile_img_shape = np.array(imgs.shape[1:])\n tile_img_shape[:2] = (img_shape[:2] + border) * grid_shape[:2] - border\n\n # Assemble tile image\n tile_img = np.empty(tile_img_shape)\n tile_img[:] = border_color\n for i in range(grid_shape[0]):\n for j in range(grid_shape[1]):\n img_idx = j + i * grid_shape[1]\n if img_idx >= n_imgs:\n # No more images - stop filling out the grid.\n break\n\n # -1~1 to 0~1\n img = (imgs[img_idx] + 1) / 2.0 # * 255.0\n\n yoff = (img_shape[0] + border) * i\n xoff = (img_shape[1] + border) * j\n tile_img[yoff:yoff + img_shape[0], xoff:xoff + img_shape[1], ...] = img\n\n ##########################################\n # Change code below if you want to save results using PIL\n ##########################################\n tile_img = cv2.resize(tile_img, (256, 256))\n # cv2.imshow(\"Results\", tile_img)\n # cv2.waitKey(1)\n if save:\n cv2.imwrite(fn, tile_img * 255)\n\n\ndef _absolute_dims(rank, dims):\n return tuple([rank + dim if dim < 0 else dim for dim in dims])\n\n\nclass InvertibleBatchNorm(flax.nn.Module):\n \"\"\"Invertible BatchNorm Module.\"\"\"\n\n def apply(self,\n x,\n batch_stats=None,\n use_running_average=False,\n axis=-1,\n momentum=0.99,\n epsilon=1e-5,\n dtype=jnp.float32,\n bias=True,\n scale=True,\n bias_init=initializers.zeros,\n scale_init=initializers.ones,\n axis_name=None,\n axis_index_groups=None):\n \"\"\"Normalizes the input using batch statistics.\n\n Args:\n x: the input to be normalized.\n batch_stats: a `flax.nn.Collection` used to store an exponential moving\n average of the batch statistics (default: None).\n use_running_average: if true, the statistics stored in batch_stats\n will be used instead of computing the batch statistics on the input.\n axis: the feature or non-batch axis of the input.\n momentum: decay rate for the exponential moving average of\n the batch statistics.\n epsilon: a small float added to variance to avoid dividing by zero.\n dtype: the dtype of the computation (default: float32).\n bias: if True, bias (beta) is added.\n scale: if True, multiply by scale (gamma).\n When the next layer is linear (also e.g. nn.relu), this can be disabled\n since the scaling will be done by the next layer.\n bias_init: initializer for bias, by default, zero.\n scale_init: initializer for scale, by default, one.\n axis_name: the axis name used to combine batch statistics from multiple\n devices. See `jax.pmap` for a description of axis names (default: None).\n axis_index_groups: groups of axis indices within that named axis\n representing subsets of devices to reduce over (default: None). For example,\n `[[0, 1], [2, 3]]` would independently batch-normalize over the examples\n on the first two and last two devices. See `jax.lax.psum` for more details.\n\n Returns:\n Normalized inputs (the same shape as inputs).\n \"\"\"\n x = jnp.asarray(x, jnp.float32)\n axis = axis if isinstance(axis, tuple) else (axis,)\n axis = _absolute_dims(x.ndim, axis)\n feature_shape = tuple(d if i in axis else 1 for i, d in enumerate(x.shape))\n reduced_feature_shape = tuple(d for i, d in enumerate(x.shape) if i in axis)\n reduction_axis = tuple(i for i in range(x.ndim) if i not in axis)\n if self.is_stateful() or batch_stats:\n ra_mean = self.state('mean', reduced_feature_shape,\n initializers.zeros, collection=batch_stats)\n ra_var = self.state('var', reduced_feature_shape,\n initializers.ones, collection=batch_stats)\n state_mul = self.state('recent_mul', reduced_feature_shape,\n initializers.ones, collection=batch_stats)\n state_mean = self.state('recent_mean', feature_shape,\n initializers.zeros, collection=batch_stats)\n else:\n ra_mean = None\n ra_var = None\n state_mul = None\n state_mean = None\n\n if use_running_average:\n if ra_mean is None:\n raise ValueError('when use_running_averages is True '\n 'either use a stateful context or provide batch_stats')\n mean, var = ra_mean.value, ra_var.value\n else:\n mean = jnp.mean(x, axis=reduction_axis, keepdims=False)\n mean2 = jnp.mean(lax.square(x), axis=reduction_axis, keepdims=False)\n if axis_name is not None and not self.is_initializing():\n concatenated_mean = jnp.concatenate([mean, mean2])\n mean, mean2 = jnp.split(\n lax.pmean(\n concatenated_mean,\n axis_name=axis_name,\n axis_index_groups=axis_index_groups), 2)\n var = mean2 - lax.square(mean)\n\n if ra_mean and not self.is_initializing():\n ra_mean.value = momentum * ra_mean.value + (1 - momentum) * mean\n ra_var.value = momentum * ra_var.value + (1 - momentum) * var\n\n recent_mean = mean.reshape(feature_shape)\n y = x - recent_mean\n if state_mean and not self.is_initializing():\n state_mean.value = recent_mean\n mul = lax.rsqrt(var + epsilon)\n if state_mul and not self.is_initializing():\n state_mul.value = mul\n if scale:\n mul = mul * self.param(\n 'scale', reduced_feature_shape, scale_init).reshape(feature_shape)\n y = y * mul\n if bias:\n y = y + self.param(\n 'bias', reduced_feature_shape, bias_init).reshape(feature_shape)\n return jnp.asarray(y, dtype)\n\n\ndef inv_batch_norm(y,\n params,\n collection,\n bias=True,\n scale=True,\n ):\n mul = collection['recent_mul']\n mean = collection['recent_mean']\n if bias:\n bias = params['bias']\n y = y - bias\n y = y / mul\n if scale:\n scale = params['scale']\n y = y / scale\n x = y + mean\n return x\n\n\ndef get_mask(shape, reverse, use_checkerboard=True):\n '''\n Assumes shape is (batch, height, width, channels) or (height, width, channels)\n '''\n height = shape[-3]\n width = shape[-2]\n channels = shape[-1]\n if use_checkerboard:\n checkerboard = [[((i % 2) + j) % 2 for j in range(width)] for i in range(height)]\n mask = jnp.array(checkerboard).reshape(height, width, 1)\n if reverse:\n mask = 1 - mask\n if len(shape) == 4:\n return mask[jnp.newaxis, ...]\n else:\n return mask\n else:\n half = channels // 2\n zero_mask = jnp.zeros((height, width, half))\n one_mask = jnp.ones((height, width, half))\n if reverse:\n mask = jnp.concatenate((zero_mask, one_mask), axis=-1)\n else:\n mask = jnp.concatenate((one_mask, zero_mask), axis=-1)\n if len(shape) == 4:\n return mask[jnp.newaxis, ...]\n else:\n return mask\n\n\ndef squeeze_2x2(x, reverse=False):\n # block_size = 2\n assert x.ndim == 4\n b, h, w, c = x.shape\n if reverse:\n if c % 4 != 0:\n raise ValueError(f\"Number of channels {c} is not divisible by 4\")\n x = x.reshape((b, h, w, c // 4, 2, 2))\n x = jnp.transpose(x, (0, 1, 4, 2, 5, 3))\n x = x.reshape((b, 2 * h, 2 * w, c // 4))\n else:\n if h % 2 != 0 or w % 2 != 0:\n raise ValueError(f\"Expected even spatial dims HxW got {h}x{w}\")\n x = x.reshape((b, h // 2, 2, w // 2, 2, c))\n x = jnp.transpose(x, (0, 1, 3, 5, 2, 4))\n x = x.reshape((b, h // 2, w // 2, c * 4))\n return x\n\n\ndef split_layer_sizes(layer_sizes):\n return [int(size) for size in layer_sizes.split('|')]\n\n\ndef sin_theta_distance(A, B):\n '''\n Assumes A and B are orthogonal matrices\n '''\n U, _, _ = jnp.linalg.svd(A)\n Uprime, _, _ = jnp.linalg.svd(B)\n # eye = jnp.eye(B.shape[0])\n distmat = (U - Uprime)\n return 0.5 * jnp.linalg.norm(distmat, ord=\"fro\")\n"
] |
[
[
"numpy.array",
"numpy.sqrt",
"numpy.empty"
]
] |
jairideout/astropy
|
[
"2534a2dd747da3d50644812ce4faab6d909e7f36",
"2534a2dd747da3d50644812ce4faab6d909e7f36"
] |
[
"astropy/convolution/kernels.py",
"astropy/coordinates/tests/test_funcs.py"
] |
[
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport math\n\nimport numpy as np\n\nfrom .core import Kernel1D, Kernel2D, Kernel\nfrom .utils import KernelSizeError\nfrom ..modeling import models\nfrom ..modeling.core import Fittable1DModel, Fittable2DModel\nfrom ..utils.decorators import deprecated_renamed_argument\n\n__all__ = ['Gaussian1DKernel', 'Gaussian2DKernel', 'CustomKernel',\n 'Box1DKernel', 'Box2DKernel', 'Tophat2DKernel',\n 'Trapezoid1DKernel', 'MexicanHat1DKernel', 'MexicanHat2DKernel',\n 'AiryDisk2DKernel', 'Moffat2DKernel', 'Model1DKernel',\n 'Model2DKernel', 'TrapezoidDisk2DKernel', 'Ring2DKernel']\n\n\ndef _round_up_to_odd_integer(value):\n i = math.ceil(value)\n if i % 2 == 0:\n return i + 1\n else:\n return i\n\n\nclass Gaussian1DKernel(Kernel1D):\n \"\"\"\n 1D Gaussian filter kernel.\n\n The Gaussian filter is a filter with great smoothing properties. It is\n isotropic and does not produce artifacts.\n\n Parameters\n ----------\n stddev : number\n Standard deviation of the Gaussian kernel.\n x_size : odd int, optional\n Size of the kernel array. Default = 8 * stddev\n mode : str, optional\n One of the following discretization modes:\n * 'center' (default)\n Discretize model by taking the value\n at the center of the bin.\n * 'linear_interp'\n Discretize model by linearly interpolating\n between the values at the corners of the bin.\n * 'oversample'\n Discretize model by taking the average\n on an oversampled grid.\n * 'integrate'\n Discretize model by integrating the\n model over the bin. Very slow.\n factor : number, optional\n Factor of oversampling. Default factor = 10. If the factor\n is too large, evaluation can be very slow.\n\n\n See Also\n --------\n Box1DKernel, Trapezoid1DKernel, MexicanHat1DKernel\n\n\n Examples\n --------\n Kernel response:\n\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n from astropy.convolution import Gaussian1DKernel\n gauss_1D_kernel = Gaussian1DKernel(10)\n plt.plot(gauss_1D_kernel, drawstyle='steps')\n plt.xlabel('x [pixels]')\n plt.ylabel('value')\n plt.show()\n \"\"\"\n _separable = True\n _is_bool = False\n\n def __init__(self, stddev, **kwargs):\n self._model = models.Gaussian1D(1. / (np.sqrt(2 * np.pi) * stddev),\n 0, stddev)\n self._default_size = _round_up_to_odd_integer(8 * stddev)\n super().__init__(**kwargs)\n self._truncation = np.abs(1. - self._array.sum())\n\n\nclass Gaussian2DKernel(Kernel2D):\n \"\"\"\n 2D Gaussian filter kernel.\n\n The Gaussian filter is a filter with great smoothing properties. It is\n isotropic and does not produce artifacts.\n\n Parameters\n ----------\n x_stddev : float\n Standard deviation of the Gaussian in x before rotating by theta.\n y_stddev : float\n Standard deviation of the Gaussian in y before rotating by theta.\n theta : float\n Rotation angle in radians. The rotation angle increases\n counterclockwise.\n x_size : odd int, optional\n Size in x direction of the kernel array. Default = 8 * stddev.\n y_size : odd int, optional\n Size in y direction of the kernel array. Default = 8 * stddev.\n mode : str, optional\n One of the following discretization modes:\n * 'center' (default)\n Discretize model by taking the value\n at the center of the bin.\n * 'linear_interp'\n Discretize model by performing a bilinear interpolation\n between the values at the corners of the bin.\n * 'oversample'\n Discretize model by taking the average\n on an oversampled grid.\n * 'integrate'\n Discretize model by integrating the\n model over the bin.\n factor : number, optional\n Factor of oversampling. Default factor = 10.\n\n\n See Also\n --------\n Box2DKernel, Tophat2DKernel, MexicanHat2DKernel, Ring2DKernel,\n TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel\n\n Examples\n --------\n Kernel response:\n\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n from astropy.convolution import Gaussian2DKernel\n gaussian_2D_kernel = Gaussian2DKernel(10)\n plt.imshow(gaussian_2D_kernel, interpolation='none', origin='lower')\n plt.xlabel('x [pixels]')\n plt.ylabel('y [pixels]')\n plt.colorbar()\n plt.show()\n\n \"\"\"\n _separable = True\n _is_bool = False\n\n @deprecated_renamed_argument('stddev', 'x_stddev', '3.0')\n def __init__(self, x_stddev, y_stddev=None, theta=0.0, **kwargs):\n if y_stddev is None:\n y_stddev = x_stddev\n self._model = models.Gaussian2D(1. / (2 * np.pi * x_stddev * y_stddev),\n 0, 0, x_stddev=x_stddev,\n y_stddev=y_stddev, theta=theta)\n self._default_size = _round_up_to_odd_integer(\n 8 * np.max([x_stddev, y_stddev]))\n super().__init__(**kwargs)\n self._truncation = np.abs(1. - self._array.sum())\n\n\nclass Box1DKernel(Kernel1D):\n \"\"\"\n 1D Box filter kernel.\n\n The Box filter or running mean is a smoothing filter. It is not isotropic\n and can produce artifacts, when applied repeatedly to the same data.\n\n By default the Box kernel uses the ``linear_interp`` discretization mode,\n which allows non-shifting, even-sized kernels. This is achieved by\n weighting the edge pixels with 1/2. E.g a Box kernel with an effective\n smoothing of 4 pixel would have the following array: [0.5, 1, 1, 1, 0.5].\n\n\n Parameters\n ----------\n width : number\n Width of the filter kernel.\n mode : str, optional\n One of the following discretization modes:\n * 'center'\n Discretize model by taking the value\n at the center of the bin.\n * 'linear_interp' (default)\n Discretize model by linearly interpolating\n between the values at the corners of the bin.\n * 'oversample'\n Discretize model by taking the average\n on an oversampled grid.\n * 'integrate'\n Discretize model by integrating the\n model over the bin.\n factor : number, optional\n Factor of oversampling. Default factor = 10.\n\n See Also\n --------\n Gaussian1DKernel, Trapezoid1DKernel, MexicanHat1DKernel\n\n\n Examples\n --------\n Kernel response function:\n\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n from astropy.convolution import Box1DKernel\n box_1D_kernel = Box1DKernel(9)\n plt.plot(box_1D_kernel, drawstyle='steps')\n plt.xlim(-1, 9)\n plt.xlabel('x [pixels]')\n plt.ylabel('value')\n plt.show()\n\n \"\"\"\n _separable = True\n _is_bool = True\n\n def __init__(self, width, **kwargs):\n self._model = models.Box1D(1. / width, 0, width)\n self._default_size = _round_up_to_odd_integer(width)\n kwargs['mode'] = 'linear_interp'\n super().__init__(**kwargs)\n self._truncation = 0\n self.normalize()\n\n\nclass Box2DKernel(Kernel2D):\n \"\"\"\n 2D Box filter kernel.\n\n The Box filter or running mean is a smoothing filter. It is not isotropic\n and can produce artifact, when applied repeatedly to the same data.\n\n By default the Box kernel uses the ``linear_interp`` discretization mode,\n which allows non-shifting, even-sized kernels. This is achieved by\n weighting the edge pixels with 1/2.\n\n\n Parameters\n ----------\n width : number\n Width of the filter kernel.\n mode : str, optional\n One of the following discretization modes:\n * 'center'\n Discretize model by taking the value\n at the center of the bin.\n * 'linear_interp' (default)\n Discretize model by performing a bilinear interpolation\n between the values at the corners of the bin.\n * 'oversample'\n Discretize model by taking the average\n on an oversampled grid.\n * 'integrate'\n Discretize model by integrating the\n model over the bin.\n factor : number, optional\n Factor of oversampling. Default factor = 10.\n\n\n See Also\n --------\n Gaussian2DKernel, Tophat2DKernel, MexicanHat2DKernel, Ring2DKernel,\n TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel\n\n Examples\n --------\n Kernel response:\n\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n from astropy.convolution import Box2DKernel\n box_2D_kernel = Box2DKernel(9)\n plt.imshow(box_2D_kernel, interpolation='none', origin='lower',\n vmin=0.0, vmax=0.015)\n plt.xlim(-1, 9)\n plt.ylim(-1, 9)\n plt.xlabel('x [pixels]')\n plt.ylabel('y [pixels]')\n plt.colorbar()\n plt.show()\n \"\"\"\n _separable = True\n _is_bool = True\n\n def __init__(self, width, **kwargs):\n self._model = models.Box2D(1. / width ** 2, 0, 0, width, width)\n self._default_size = _round_up_to_odd_integer(width)\n kwargs['mode'] = 'linear_interp'\n super().__init__(**kwargs)\n self._truncation = 0\n self.normalize()\n\n\nclass Tophat2DKernel(Kernel2D):\n \"\"\"\n 2D Tophat filter kernel.\n\n The Tophat filter is an isotropic smoothing filter. It can produce\n artifacts when applied repeatedly on the same data.\n\n Parameters\n ----------\n radius : int\n Radius of the filter kernel.\n mode : str, optional\n One of the following discretization modes:\n * 'center' (default)\n Discretize model by taking the value\n at the center of the bin.\n * 'linear_interp'\n Discretize model by performing a bilinear interpolation\n between the values at the corners of the bin.\n * 'oversample'\n Discretize model by taking the average\n on an oversampled grid.\n * 'integrate'\n Discretize model by integrating the\n model over the bin.\n factor : number, optional\n Factor of oversampling. Default factor = 10.\n\n\n See Also\n --------\n Gaussian2DKernel, Box2DKernel, MexicanHat2DKernel, Ring2DKernel,\n TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel\n\n Examples\n --------\n Kernel response:\n\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n from astropy.convolution import Tophat2DKernel\n tophat_2D_kernel = Tophat2DKernel(40)\n plt.imshow(tophat_2D_kernel, interpolation='none', origin='lower')\n plt.xlabel('x [pixels]')\n plt.ylabel('y [pixels]')\n plt.colorbar()\n plt.show()\n\n \"\"\"\n def __init__(self, radius, **kwargs):\n self._model = models.Disk2D(1. / (np.pi * radius ** 2), 0, 0, radius)\n self._default_size = _round_up_to_odd_integer(2 * radius)\n super().__init__(**kwargs)\n self._truncation = 0\n\n\nclass Ring2DKernel(Kernel2D):\n \"\"\"\n 2D Ring filter kernel.\n\n The Ring filter kernel is the difference between two Tophat kernels of\n different width. This kernel is useful for, e.g., background estimation.\n\n Parameters\n ----------\n radius_in : number\n Inner radius of the ring kernel.\n width : number\n Width of the ring kernel.\n mode : str, optional\n One of the following discretization modes:\n * 'center' (default)\n Discretize model by taking the value\n at the center of the bin.\n * 'linear_interp'\n Discretize model by performing a bilinear interpolation\n between the values at the corners of the bin.\n * 'oversample'\n Discretize model by taking the average\n on an oversampled grid.\n * 'integrate'\n Discretize model by integrating the\n model over the bin.\n factor : number, optional\n Factor of oversampling. Default factor = 10.\n\n See Also\n --------\n Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel,\n Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel\n\n Examples\n --------\n Kernel response:\n\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n from astropy.convolution import Ring2DKernel\n ring_2D_kernel = Ring2DKernel(9, 8)\n plt.imshow(ring_2D_kernel, interpolation='none', origin='lower')\n plt.xlabel('x [pixels]')\n plt.ylabel('y [pixels]')\n plt.colorbar()\n plt.show()\n \"\"\"\n def __init__(self, radius_in, width, **kwargs):\n radius_out = radius_in + width\n self._model = models.Ring2D(1. / (np.pi * (radius_out ** 2 - radius_in ** 2)),\n 0, 0, radius_in, width)\n self._default_size = _round_up_to_odd_integer(2 * radius_out)\n super().__init__(**kwargs)\n self._truncation = 0\n\n\nclass Trapezoid1DKernel(Kernel1D):\n \"\"\"\n 1D trapezoid kernel.\n\n Parameters\n ----------\n width : number\n Width of the filter kernel, defined as the width of the constant part,\n before it begins to slope down.\n slope : number\n Slope of the filter kernel's tails\n mode : str, optional\n One of the following discretization modes:\n * 'center' (default)\n Discretize model by taking the value\n at the center of the bin.\n * 'linear_interp'\n Discretize model by linearly interpolating\n between the values at the corners of the bin.\n * 'oversample'\n Discretize model by taking the average\n on an oversampled grid.\n * 'integrate'\n Discretize model by integrating the\n model over the bin.\n factor : number, optional\n Factor of oversampling. Default factor = 10.\n\n See Also\n --------\n Box1DKernel, Gaussian1DKernel, MexicanHat1DKernel\n\n Examples\n --------\n Kernel response:\n\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n from astropy.convolution import Trapezoid1DKernel\n trapezoid_1D_kernel = Trapezoid1DKernel(17, slope=0.2)\n plt.plot(trapezoid_1D_kernel, drawstyle='steps')\n plt.xlabel('x [pixels]')\n plt.ylabel('amplitude')\n plt.xlim(-1, 28)\n plt.show()\n \"\"\"\n _is_bool = False\n\n def __init__(self, width, slope=1., **kwargs):\n self._model = models.Trapezoid1D(1, 0, width, slope)\n self._default_size = _round_up_to_odd_integer(width + 2. / slope)\n super().__init__(**kwargs)\n self._truncation = 0\n self.normalize()\n\n\nclass TrapezoidDisk2DKernel(Kernel2D):\n \"\"\"\n 2D trapezoid kernel.\n\n Parameters\n ----------\n radius : number\n Width of the filter kernel, defined as the width of the constant part,\n before it begins to slope down.\n slope : number\n Slope of the filter kernel's tails\n mode : str, optional\n One of the following discretization modes:\n * 'center' (default)\n Discretize model by taking the value\n at the center of the bin.\n * 'linear_interp'\n Discretize model by performing a bilinear interpolation\n between the values at the corners of the bin.\n * 'oversample'\n Discretize model by taking the average\n on an oversampled grid.\n * 'integrate'\n Discretize model by integrating the\n model over the bin.\n factor : number, optional\n Factor of oversampling. Default factor = 10.\n\n See Also\n --------\n Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel,\n Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel\n\n Examples\n --------\n Kernel response:\n\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n from astropy.convolution import TrapezoidDisk2DKernel\n trapezoid_2D_kernel = TrapezoidDisk2DKernel(20, slope=0.2)\n plt.imshow(trapezoid_2D_kernel, interpolation='none', origin='lower')\n plt.xlabel('x [pixels]')\n plt.ylabel('y [pixels]')\n plt.colorbar()\n plt.show()\n\n \"\"\"\n _is_bool = False\n\n def __init__(self, radius, slope=1., **kwargs):\n self._model = models.TrapezoidDisk2D(1, 0, 0, radius, slope)\n self._default_size = _round_up_to_odd_integer(2 * radius + 2. / slope)\n super().__init__(**kwargs)\n self._truncation = 0\n self.normalize()\n\n\nclass MexicanHat1DKernel(Kernel1D):\n \"\"\"\n 1D Mexican hat filter kernel.\n\n The Mexican Hat, or inverted Gaussian-Laplace filter, is a\n bandpass filter. It smooths the data and removes slowly varying\n or constant structures (e.g. Background). It is useful for peak or\n multi-scale detection.\n\n This kernel is derived from a normalized Gaussian function, by\n computing the second derivative. This results in an amplitude\n at the kernels center of 1. / (sqrt(2 * pi) * width ** 3). The\n normalization is the same as for `scipy.ndimage.gaussian_laplace`,\n except for a minus sign.\n\n Parameters\n ----------\n width : number\n Width of the filter kernel, defined as the standard deviation\n of the Gaussian function from which it is derived.\n x_size : odd int, optional\n Size in x direction of the kernel array. Default = 8 * width.\n mode : str, optional\n One of the following discretization modes:\n * 'center' (default)\n Discretize model by taking the value\n at the center of the bin.\n * 'linear_interp'\n Discretize model by linearly interpolating\n between the values at the corners of the bin.\n * 'oversample'\n Discretize model by taking the average\n on an oversampled grid.\n * 'integrate'\n Discretize model by integrating the\n model over the bin.\n factor : number, optional\n Factor of oversampling. Default factor = 10.\n\n\n See Also\n --------\n Box1DKernel, Gaussian1DKernel, Trapezoid1DKernel\n\n Examples\n --------\n Kernel response:\n\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n from astropy.convolution import MexicanHat1DKernel\n mexicanhat_1D_kernel = MexicanHat1DKernel(10)\n plt.plot(mexicanhat_1D_kernel, drawstyle='steps')\n plt.xlabel('x [pixels]')\n plt.ylabel('value')\n plt.show()\n\n \"\"\"\n _is_bool = True\n\n def __init__(self, width, **kwargs):\n amplitude = 1.0 / (np.sqrt(2 * np.pi) * width ** 3)\n self._model = models.MexicanHat1D(amplitude, 0, width)\n self._default_size = _round_up_to_odd_integer(8 * width)\n super().__init__(**kwargs)\n self._truncation = np.abs(self._array.sum() / self._array.size)\n\n\nclass MexicanHat2DKernel(Kernel2D):\n \"\"\"\n 2D Mexican hat filter kernel.\n\n The Mexican Hat, or inverted Gaussian-Laplace filter, is a\n bandpass filter. It smooths the data and removes slowly varying\n or constant structures (e.g. Background). It is useful for peak or\n multi-scale detection.\n\n This kernel is derived from a normalized Gaussian function, by\n computing the second derivative. This results in an amplitude\n at the kernels center of 1. / (pi * width ** 4). The normalization\n is the same as for `scipy.ndimage.gaussian_laplace`, except\n for a minus sign.\n\n Parameters\n ----------\n width : number\n Width of the filter kernel, defined as the standard deviation\n of the Gaussian function from which it is derived.\n x_size : odd int, optional\n Size in x direction of the kernel array. Default = 8 * width.\n y_size : odd int, optional\n Size in y direction of the kernel array. Default = 8 * width.\n mode : str, optional\n One of the following discretization modes:\n * 'center' (default)\n Discretize model by taking the value\n at the center of the bin.\n * 'linear_interp'\n Discretize model by performing a bilinear interpolation\n between the values at the corners of the bin.\n * 'oversample'\n Discretize model by taking the average\n on an oversampled grid.\n * 'integrate'\n Discretize model by integrating the\n model over the bin.\n factor : number, optional\n Factor of oversampling. Default factor = 10.\n\n\n See Also\n --------\n Gaussian2DKernel, Box2DKernel, Tophat2DKernel, Ring2DKernel,\n TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel\n\n Examples\n --------\n Kernel response:\n\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n from astropy.convolution import MexicanHat2DKernel\n mexicanhat_2D_kernel = MexicanHat2DKernel(10)\n plt.imshow(mexicanhat_2D_kernel, interpolation='none', origin='lower')\n plt.xlabel('x [pixels]')\n plt.ylabel('y [pixels]')\n plt.colorbar()\n plt.show()\n \"\"\"\n _is_bool = False\n\n def __init__(self, width, **kwargs):\n amplitude = 1.0 / (np.pi * width ** 4)\n self._model = models.MexicanHat2D(amplitude, 0, 0, width)\n self._default_size = _round_up_to_odd_integer(8 * width)\n super().__init__(**kwargs)\n self._truncation = np.abs(self._array.sum() / self._array.size)\n\n\nclass AiryDisk2DKernel(Kernel2D):\n \"\"\"\n 2D Airy disk kernel.\n\n This kernel models the diffraction pattern of a circular aperture. This\n kernel is normalized to a peak value of 1.\n\n Parameters\n ----------\n radius : float\n The radius of the Airy disk kernel (radius of the first zero).\n x_size : odd int, optional\n Size in x direction of the kernel array. Default = 8 * radius.\n y_size : odd int, optional\n Size in y direction of the kernel array. Default = 8 * radius.\n mode : str, optional\n One of the following discretization modes:\n * 'center' (default)\n Discretize model by taking the value\n at the center of the bin.\n * 'linear_interp'\n Discretize model by performing a bilinear interpolation\n between the values at the corners of the bin.\n * 'oversample'\n Discretize model by taking the average\n on an oversampled grid.\n * 'integrate'\n Discretize model by integrating the\n model over the bin.\n factor : number, optional\n Factor of oversampling. Default factor = 10.\n\n See Also\n --------\n Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel,\n Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel\n\n Examples\n --------\n Kernel response:\n\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n from astropy.convolution import AiryDisk2DKernel\n airydisk_2D_kernel = AiryDisk2DKernel(10)\n plt.imshow(airydisk_2D_kernel, interpolation='none', origin='lower')\n plt.xlabel('x [pixels]')\n plt.ylabel('y [pixels]')\n plt.colorbar()\n plt.show()\n \"\"\"\n _is_bool = False\n\n def __init__(self, radius, **kwargs):\n self._model = models.AiryDisk2D(1, 0, 0, radius)\n self._default_size = _round_up_to_odd_integer(8 * radius)\n super().__init__(**kwargs)\n self.normalize()\n self._truncation = None\n\n\nclass Moffat2DKernel(Kernel2D):\n \"\"\"\n 2D Moffat kernel.\n\n This kernel is a typical model for a seeing limited PSF.\n\n Parameters\n ----------\n gamma : float\n Core width of the Moffat model.\n alpha : float\n Power index of the Moffat model.\n x_size : odd int, optional\n Size in x direction of the kernel array. Default = 8 * radius.\n y_size : odd int, optional\n Size in y direction of the kernel array. Default = 8 * radius.\n mode : str, optional\n One of the following discretization modes:\n * 'center' (default)\n Discretize model by taking the value\n at the center of the bin.\n * 'linear_interp'\n Discretize model by performing a bilinear interpolation\n between the values at the corners of the bin.\n * 'oversample'\n Discretize model by taking the average\n on an oversampled grid.\n * 'integrate'\n Discretize model by integrating the\n model over the bin.\n factor : number, optional\n Factor of oversampling. Default factor = 10.\n\n See Also\n --------\n Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel,\n Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel\n\n Examples\n --------\n Kernel response:\n\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n from astropy.convolution import Moffat2DKernel\n moffat_2D_kernel = Moffat2DKernel(3, 2)\n plt.imshow(moffat_2D_kernel, interpolation='none', origin='lower')\n plt.xlabel('x [pixels]')\n plt.ylabel('y [pixels]')\n plt.colorbar()\n plt.show()\n \"\"\"\n _is_bool = False\n\n def __init__(self, gamma, alpha, **kwargs):\n self._model = models.Moffat2D((gamma - 1.0) / (np.pi * alpha * alpha),\n 0, 0, gamma, alpha)\n fwhm = 2.0 * alpha * (2.0 ** (1.0 / gamma) - 1.0) ** 0.5\n self._default_size = _round_up_to_odd_integer(4.0 * fwhm)\n super().__init__(**kwargs)\n self.normalize()\n self._truncation = None\n\n\nclass Model1DKernel(Kernel1D):\n \"\"\"\n Create kernel from 1D model.\n\n The model has to be centered on x = 0.\n\n Parameters\n ----------\n model : `~astropy.modeling.Fittable1DModel`\n Kernel response function model\n x_size : odd int, optional\n Size in x direction of the kernel array. Default = 8 * width.\n mode : str, optional\n One of the following discretization modes:\n * 'center' (default)\n Discretize model by taking the value\n at the center of the bin.\n * 'linear_interp'\n Discretize model by linearly interpolating\n between the values at the corners of the bin.\n * 'oversample'\n Discretize model by taking the average\n on an oversampled grid.\n * 'integrate'\n Discretize model by integrating the\n model over the bin.\n factor : number, optional\n Factor of oversampling. Default factor = 10.\n\n Raises\n ------\n TypeError\n If model is not an instance of `~astropy.modeling.Fittable1DModel`\n\n See also\n --------\n Model2DKernel : Create kernel from `~astropy.modeling.Fittable2DModel`\n CustomKernel : Create kernel from list or array\n\n Examples\n --------\n Define a Gaussian1D model:\n\n >>> from astropy.modeling.models import Gaussian1D\n >>> from astropy.convolution.kernels import Model1DKernel\n >>> gauss = Gaussian1D(1, 0, 2)\n\n And create a custom one dimensional kernel from it:\n\n >>> gauss_kernel = Model1DKernel(gauss, x_size=9)\n\n This kernel can now be used like a usual Astropy kernel.\n \"\"\"\n _separable = False\n _is_bool = False\n\n def __init__(self, model, **kwargs):\n if isinstance(model, Fittable1DModel):\n self._model = model\n else:\n raise TypeError(\"Must be Fittable1DModel\")\n super().__init__(**kwargs)\n\n\nclass Model2DKernel(Kernel2D):\n \"\"\"\n Create kernel from 2D model.\n\n The model has to be centered on x = 0 and y = 0.\n\n Parameters\n ----------\n model : `~astropy.modeling.Fittable2DModel`\n Kernel response function model\n x_size : odd int, optional\n Size in x direction of the kernel array. Default = 8 * width.\n y_size : odd int, optional\n Size in y direction of the kernel array. Default = 8 * width.\n mode : str, optional\n One of the following discretization modes:\n * 'center' (default)\n Discretize model by taking the value\n at the center of the bin.\n * 'linear_interp'\n Discretize model by performing a bilinear interpolation\n between the values at the corners of the bin.\n * 'oversample'\n Discretize model by taking the average\n on an oversampled grid.\n * 'integrate'\n Discretize model by integrating the\n model over the bin.\n factor : number, optional\n Factor of oversampling. Default factor = 10.\n\n Raises\n ------\n TypeError\n If model is not an instance of `~astropy.modeling.Fittable2DModel`\n\n See also\n --------\n Model1DKernel : Create kernel from `~astropy.modeling.Fittable1DModel`\n CustomKernel : Create kernel from list or array\n\n Examples\n --------\n Define a Gaussian2D model:\n\n >>> from astropy.modeling.models import Gaussian2D\n >>> from astropy.convolution.kernels import Model2DKernel\n >>> gauss = Gaussian2D(1, 0, 0, 2, 2)\n\n And create a custom two dimensional kernel from it:\n\n >>> gauss_kernel = Model2DKernel(gauss, x_size=9)\n\n This kernel can now be used like a usual astropy kernel.\n\n \"\"\"\n _is_bool = False\n _separable = False\n\n def __init__(self, model, **kwargs):\n self._separable = False\n if isinstance(model, Fittable2DModel):\n self._model = model\n else:\n raise TypeError(\"Must be Fittable2DModel\")\n super().__init__(**kwargs)\n\n\nclass PSFKernel(Kernel2D):\n \"\"\"\n Initialize filter kernel from astropy PSF instance.\n \"\"\"\n _separable = False\n\n def __init__(self):\n raise NotImplementedError('Not yet implemented')\n\n\nclass CustomKernel(Kernel):\n \"\"\"\n Create filter kernel from list or array.\n\n Parameters\n ----------\n array : list or array\n Filter kernel array. Size must be odd.\n\n Raises\n ------\n TypeError\n If array is not a list or array.\n KernelSizeError\n If array size is even.\n\n See also\n --------\n Model2DKernel, Model1DKernel\n\n Examples\n --------\n Define one dimensional array:\n\n >>> from astropy.convolution.kernels import CustomKernel\n >>> import numpy as np\n >>> array = np.array([1, 2, 3, 2, 1])\n >>> kernel = CustomKernel(array)\n >>> kernel.dimension\n 1\n\n Define two dimensional array:\n\n >>> array = np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]])\n >>> kernel = CustomKernel(array)\n >>> kernel.dimension\n 2\n \"\"\"\n def __init__(self, array):\n self.array = array\n super().__init__(self._array)\n\n @property\n def array(self):\n \"\"\"\n Filter kernel array.\n \"\"\"\n return self._array\n\n @array.setter\n def array(self, array):\n \"\"\"\n Filter kernel array setter\n \"\"\"\n if isinstance(array, np.ndarray):\n self._array = array.astype(np.float64)\n elif isinstance(array, list):\n self._array = np.array(array, dtype=np.float64)\n else:\n raise TypeError(\"Must be list or array.\")\n\n # Check if array is odd in all axes\n odd = all(axes_size % 2 != 0 for axes_size in self.shape)\n if not odd:\n raise KernelSizeError(\"Kernel size must be odd in all axes.\")\n\n # Check if array is bool\n ones = self._array == 1.\n zeros = self._array == 0\n self._is_bool = bool(np.all(np.logical_or(ones, zeros)))\n\n self._truncation = 0.0\n",
"# -*- coding: utf-8 -*-\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"\nTests for miscellaneous functionality in the `funcs` module\n\"\"\"\n\n\nimport pytest\nimport numpy as np\nfrom numpy import testing as npt\n\n\nfrom ... import units as u\nfrom ...time import Time\n\n\ndef test_sun():\n \"\"\"\n Test that `get_sun` works and it behaves roughly as it should (in GCRS)\n \"\"\"\n from ..funcs import get_sun\n\n northern_summer_solstice = Time('2010-6-21')\n northern_winter_solstice = Time('2010-12-21')\n equinox_1 = Time('2010-3-21')\n equinox_2 = Time('2010-9-21')\n\n gcrs1 = get_sun(equinox_1)\n assert np.abs(gcrs1.dec.deg) < 1\n\n gcrs2 = get_sun(Time([northern_summer_solstice, equinox_2, northern_winter_solstice]))\n assert np.all(np.abs(gcrs2.dec - [23.5, 0, -23.5]*u.deg) < 1*u.deg)\n\n\ndef test_constellations():\n from .. import ICRS, FK5, SkyCoord\n from ..funcs import get_constellation\n\n inuma = ICRS(9*u.hour, 65*u.deg)\n res = get_constellation(inuma)\n res_short = get_constellation(inuma, short_name=True)\n assert res == 'Ursa Major'\n assert res_short == 'UMa'\n assert isinstance(res, str) or getattr(res, 'shape', None) == tuple()\n\n # these are taken from the ReadMe for Roman 1987\n ras = [9, 23.5, 5.12, 9.4555, 12.8888, 15.6687, 19, 6.2222]\n decs = [65, -20, 9.12, -19.9, 22, -12.1234, -40, -81.1234]\n shortnames = ['UMa', 'Aqr', 'Ori', 'Hya', 'Com', 'Lib', 'CrA', 'Men']\n\n testcoos = FK5(ras*u.hour, decs*u.deg, equinox='B1950')\n npt.assert_equal(get_constellation(testcoos, short_name=True), shortnames)\n\n # test on a SkyCoord, *and* test Boötes, which is special in that it has a\n # non-ASCII character\n bootest = SkyCoord(15*u.hour, 30*u.deg, frame='icrs')\n boores = get_constellation(bootest)\n assert boores == u'Boötes'\n assert isinstance(boores, str) or getattr(boores, 'shape', None) == tuple()\n\n\ndef test_concatenate():\n from .. import FK5, SkyCoord, ICRS\n from ..funcs import concatenate\n\n # Just positions\n fk5 = FK5(1*u.deg, 2*u.deg)\n sc = SkyCoord(3*u.deg, 4*u.deg, frame='fk5')\n\n res = concatenate([fk5, sc])\n np.testing.assert_allclose(res.ra, [1, 3]*u.deg)\n np.testing.assert_allclose(res.dec, [2, 4]*u.deg)\n\n with pytest.raises(TypeError):\n concatenate(fk5)\n\n with pytest.raises(TypeError):\n concatenate(1*u.deg)\n\n # positions and velocities\n fr = ICRS(ra=10*u.deg, dec=11.*u.deg,\n pm_ra_cosdec=12*u.mas/u.yr,\n pm_dec=13*u.mas/u.yr)\n sc = SkyCoord(ra=20*u.deg, dec=21.*u.deg,\n pm_ra_cosdec=22*u.mas/u.yr,\n pm_dec=23*u.mas/u.yr)\n\n res = concatenate([fr, sc])\n\n with pytest.raises(ValueError):\n concatenate([fr, fk5])\n\n fr2 = ICRS(ra=10*u.deg, dec=11.*u.deg)\n with pytest.raises(ValueError):\n concatenate([fr, fr2])\n\n\ndef test_concatenate_representations():\n from ..funcs import concatenate_representations\n from .. import representation as r\n\n reps = [r.CartesianRepresentation([1, 2, 3.]*u.kpc),\n r.SphericalRepresentation(lon=1*u.deg, lat=2.*u.deg,\n distance=10*u.pc),\n r.UnitSphericalRepresentation(lon=1*u.deg, lat=2.*u.deg),\n r.CartesianRepresentation(np.ones((3, 100)) * u.kpc),\n r.CartesianRepresentation(np.ones((3, 16, 8)) * u.kpc)]\n\n reps.append(reps[0].with_differentials(\n r.CartesianDifferential([1, 2, 3.] * u.km/u.s)))\n reps.append(reps[1].with_differentials(\n r.SphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr, 3*u.km/u.s)))\n reps.append(reps[2].with_differentials(\n r.SphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr, 3*u.km/u.s)))\n reps.append(reps[2].with_differentials(\n r.UnitSphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr)))\n reps.append(reps[2].with_differentials(\n {'s': r.RadialDifferential(1*u.km/u.s)}))\n reps.append(reps[3].with_differentials(\n r.CartesianDifferential(*np.ones((3, 100)) * u.km/u.s)))\n reps.append(reps[4].with_differentials(\n r.CartesianDifferential(*np.ones((3, 16, 8)) * u.km/u.s)))\n\n # Test that combining all of the above with itself succeeds\n for rep in reps:\n if not rep.shape:\n expected_shape = (2, )\n else:\n expected_shape = (2 * rep.shape[0], ) + rep.shape[1:]\n\n tmp = concatenate_representations((rep, rep))\n assert tmp.shape == expected_shape\n\n if 's' in rep.differentials:\n assert tmp.differentials['s'].shape == expected_shape\n\n # Try combining 4, just for something different\n for rep in reps:\n if not rep.shape:\n expected_shape = (4, )\n else:\n expected_shape = (4 * rep.shape[0], ) + rep.shape[1:]\n\n tmp = concatenate_representations((rep, rep, rep, rep))\n assert tmp.shape == expected_shape\n\n if 's' in rep.differentials:\n assert tmp.differentials['s'].shape == expected_shape\n\n # Test that combining pairs fails\n with pytest.raises(TypeError):\n concatenate_representations((reps[0], reps[1]))\n\n with pytest.raises(ValueError):\n concatenate_representations((reps[0], reps[5]))\n\n # Check that passing in a single object fails\n with pytest.raises(TypeError):\n concatenate_representations(reps[0])\n"
] |
[
[
"numpy.logical_or",
"numpy.max",
"numpy.array",
"numpy.sqrt"
],
[
"numpy.ones",
"numpy.abs",
"numpy.testing.assert_allclose"
]
] |
j40903272/bottom-up-attention-py3
|
[
"cceb719133023895a4b63569c5ba9a25fcd2c86b"
] |
[
"lib/datasets/vg.py"
] |
[
"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import zip\nfrom builtins import str\nfrom builtins import range\nfrom past.utils import old_div\nimport os\nfrom datasets.imdb import imdb\nimport datasets.ds_utils as ds_utils\nimport xml.etree.ElementTree as ET\nimport numpy as np\nimport scipy.sparse\nimport utils.cython_bbox\nimport pickle\nimport gzip\nimport PIL\nimport json\nfrom .vg_eval import vg_eval\nfrom fast_rcnn.config import cfg\n\nclass vg(imdb):\n def __init__(self, version, image_set, ):\n imdb.__init__(self, 'vg_' + version + '_' + image_set)\n self._version = version\n self._image_set = image_set\n self._data_path = os.path.join(cfg.DATA_DIR, 'genome')\n self._img_path = os.path.join(cfg.DATA_DIR, 'vg')\n # VG specific config options\n self.config = {'cleanup' : False}\n \n # Load classes\n self._classes = ['__background__']\n self._class_to_ind = {}\n self._class_to_ind[self._classes[0]] = 0\n with open(os.path.join(self._data_path, self._version, 'objects_vocab.txt')) as f:\n count = 1\n for object in f.readlines():\n names = [n.lower().strip() for n in object.split(',')]\n self._classes.append(names[0])\n for n in names:\n self._class_to_ind[n] = count\n count += 1 \n \n # Load attributes\n self._attributes = ['__no_attribute__']\n self._attribute_to_ind = {}\n self._attribute_to_ind[self._attributes[0]] = 0\n with open(os.path.join(self._data_path, self._version, 'attributes_vocab.txt')) as f:\n count = 1\n for att in f.readlines():\n names = [n.lower().strip() for n in att.split(',')]\n self._attributes.append(names[0])\n for n in names:\n self._attribute_to_ind[n] = count\n count += 1 \n \n # Load relations\n self._relations = ['__no_relation__']\n self._relation_to_ind = {}\n self._relation_to_ind[self._relations[0]] = 0\n with open(os.path.join(self._data_path, self._version, 'relations_vocab.txt')) as f:\n count = 1\n for rel in f.readlines():\n names = [n.lower().strip() for n in rel.split(',')]\n self._relations.append(names[0])\n for n in names:\n self._relation_to_ind[n] = count\n count += 1 \n \n self._image_ext = '.jpg'\n self._image_index, self._id_to_dir = self._load_image_set_index()\n\n def image_path_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return self.image_path_from_index(self._image_index[i])\n\n def image_path_from_index(self, index):\n \"\"\"\n Construct an image path from the image's \"index\" identifier.\n \"\"\"\n folder = self._id_to_dir[index]\n image_path = os.path.join(self._img_path, folder,\n str(index) + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path\n \n def _image_split_path(self):\n if self._image_set == \"minitrain\":\n return os.path.join(self._data_path, 'train.txt')\n if self._image_set == \"minival\":\n return os.path.join(self._data_path, 'val.txt')\n else:\n return os.path.join(self._data_path, self._image_set+'.txt')\n\n def _load_image_set_index(self):\n \"\"\"\n Load the indexes listed in this dataset's image set file.\n \"\"\"\n training_split_file = self._image_split_path()\n assert os.path.exists(training_split_file), \\\n 'Path does not exist: {}'.format(training_split_file)\n with open(training_split_file) as f:\n metadata = f.readlines()\n if self._image_set == \"minitrain\":\n metadata = metadata[:1000]\n elif self._image_set == \"minival\":\n metadata = metadata[:100]\n \n image_index = []\n id_to_dir = {}\n for line in metadata:\n im_file,ann_file = line.split()\n image_id = int(ann_file.split('/')[-1].split('.')[0])\n filename = self._annotation_path(image_id)\n if os.path.exists(filename):\n # Some images have no bboxes after object filtering, so there\n # is no xml annotation for these.\n tree = ET.parse(filename)\n for obj in tree.findall('object'):\n obj_name = obj.find('name').text.lower().strip()\n if obj_name in self._class_to_ind:\n # We have to actually load and check these to make sure they have\n # at least one object actually in vocab\n image_index.append(image_id)\n id_to_dir[image_id] = im_file.split('/')[0]\n break\n return image_index, id_to_dir\n \n def gt_roidb(self):\n \"\"\"\n Return the database of ground-truth regions of interest.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n fid = gzip.open(cache_file,'rb') \n roidb = pickle.load(fid)\n fid.close()\n print('{} gt roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n gt_roidb = [self._load_vg_annotation(index)\n for index in self.image_index]\n\n fid = gzip.open(cache_file,'wb') \n pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)\n fid.close()\n print('wrote gt roidb to {}'.format(cache_file))\n return gt_roidb\n \n def _get_size(self, index):\n return PIL.Image.open(self.image_path_from_index(index)).size\n \n def _annotation_path(self, index):\n return os.path.join(self._data_path, 'xml', str(index) + '.xml')\n \n def _load_vg_annotation(self, index):\n \"\"\"\n Load image and bounding boxes info from XML file in the PASCAL VOC\n format.\n \"\"\"\n width, height = self._get_size(index)\n filename = self._annotation_path(index)\n tree = ET.parse(filename)\n objs = tree.findall('object')\n num_objs = len(objs)\n\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n # Max of 16 attributes are observed in the data\n gt_attributes = np.zeros((num_objs, 16), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # \"Seg\" area for pascal is just the box area\n seg_areas = np.zeros((num_objs), dtype=np.float32)\n\n # Load object bounding boxes into a data frame.\n obj_dict = {}\n ix = 0\n for obj in objs:\n obj_name = obj.find('name').text.lower().strip()\n if obj_name in self._class_to_ind:\n bbox = obj.find('bndbox')\n x1 = max(0,float(bbox.find('xmin').text))\n y1 = max(0,float(bbox.find('ymin').text))\n x2 = min(width-1,float(bbox.find('xmax').text))\n y2 = min(height-1,float(bbox.find('ymax').text))\n # If bboxes are not positive, just give whole image coords (there are a few examples)\n if x2 < x1 or y2 < y1:\n print('Failed bbox in %s, object %s' % (filename, obj_name))\n x1 = 0\n y1 = 0\n x2 = width-1\n y2 = width-1\n cls = self._class_to_ind[obj_name]\n obj_dict[obj.find('object_id').text] = ix\n atts = obj.findall('attribute')\n n = 0\n for att in atts:\n att = att.text.lower().strip()\n if att in self._attribute_to_ind:\n gt_attributes[ix, n] = self._attribute_to_ind[att]\n n += 1\n if n >= 16:\n break\n boxes[ix, :] = [x1, y1, x2, y2]\n gt_classes[ix] = cls\n overlaps[ix, cls] = 1.0\n seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)\n ix += 1\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n gt_attributes = scipy.sparse.csr_matrix(gt_attributes)\n\n rels = tree.findall('relation')\n num_rels = len(rels) \n gt_relations = set() # Avoid duplicates\n for rel in rels:\n pred = rel.find('predicate').text\n if pred: # One is empty\n pred = pred.lower().strip()\n if pred in self._relation_to_ind:\n try:\n triple = []\n triple.append(obj_dict[rel.find('subject_id').text])\n triple.append(self._relation_to_ind[pred])\n triple.append(obj_dict[rel.find('object_id').text])\n gt_relations.add(tuple(triple))\n except:\n pass # Object not in dictionary\n gt_relations = np.array(list(gt_relations), dtype=np.int32)\n\n return {'boxes' : boxes,\n 'gt_classes': gt_classes,\n 'gt_attributes' : gt_attributes,\n 'gt_relations' : gt_relations,\n 'gt_overlaps' : overlaps,\n 'width' : width,\n 'height': height,\n 'flipped' : False,\n 'seg_areas' : seg_areas}\n\n def evaluate_detections(self, all_boxes, output_dir):\n self._write_voc_results_file(self.classes, all_boxes, output_dir)\n self._do_python_eval(output_dir)\n if self.config['cleanup']:\n for cls in self._classes:\n if cls == '__background__':\n continue\n filename = self._get_vg_results_file_template(output_dir).format(cls)\n os.remove(filename) \n \n def evaluate_attributes(self, all_boxes, output_dir):\n self._write_voc_results_file(self.attributes, all_boxes, output_dir)\n self._do_python_eval(output_dir, eval_attributes = True)\n if self.config['cleanup']:\n for cls in self._attributes:\n if cls == '__no_attribute__':\n continue\n filename = self._get_vg_results_file_template(output_dir).format(cls)\n os.remove(filename)\n \n def _get_vg_results_file_template(self, output_dir):\n filename = 'detections_' + self._image_set + '_{:s}.txt'\n path = os.path.join(output_dir, filename)\n return path\n\n def _write_voc_results_file(self, classes, all_boxes, output_dir):\n for cls_ind, cls in enumerate(classes):\n if cls == '__background__':\n continue\n print('Writing \"{}\" vg results file'.format(cls))\n filename = self._get_vg_results_file_template(output_dir).format(cls)\n with open(filename, 'wt') as f:\n for im_ind, index in enumerate(self.image_index):\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n # the VOCdevkit expects 1-based indices\n for k in range(dets.shape[0]):\n f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\\n'.\n format(str(index), dets[k, -1],\n dets[k, 0] + 1, dets[k, 1] + 1,\n dets[k, 2] + 1, dets[k, 3] + 1))\n \n \n def _do_python_eval(self, output_dir, pickle=True, eval_attributes = False):\n # We re-use parts of the pascal voc python code for visual genome\n aps = []\n nposs = []\n thresh = []\n # The PASCAL VOC metric changed in 2010\n use_07_metric = False\n print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n # Load ground truth \n gt_roidb = self.gt_roidb()\n if eval_attributes:\n classes = self._attributes\n else:\n classes = self._classes\n for i, cls in enumerate(classes):\n if cls == '__background__' or cls == '__no_attribute__':\n continue\n filename = self._get_vg_results_file_template(output_dir).format(cls)\n rec, prec, ap, scores, npos = vg_eval(\n filename, gt_roidb, self.image_index, i, ovthresh=0.5,\n use_07_metric=use_07_metric, eval_attributes=eval_attributes)\n\n # Determine per class detection thresholds that maximise f score\n if npos > 1:\n f = np.nan_to_num(old_div((prec*rec),(prec+rec)))\n thresh += [scores[np.argmax(f)]]\n else: \n thresh += [0]\n aps += [ap]\n nposs += [float(npos)]\n print('AP for {} = {:.4f} (npos={:,})'.format(cls, ap, npos))\n if pickle:\n with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:\n pickle.dump({'rec': rec, 'prec': prec, 'ap': ap, \n 'scores': scores, 'npos':npos}, f)\n \n # Set thresh to mean for classes with poor results \n thresh = np.array(thresh)\n avg_thresh = np.mean(thresh[thresh!=0])\n thresh[thresh==0] = avg_thresh\n if eval_attributes:\n filename = 'attribute_thresholds_' + self._image_set + '.txt'\n else:\n filename = 'object_thresholds_' + self._image_set + '.txt'\n path = os.path.join(output_dir, filename) \n with open(path, 'wt') as f:\n for i, cls in enumerate(classes[1:]):\n f.write('{:s} {:.3f}\\n'.format(cls, thresh[i])) \n \n weights = np.array(nposs)\n weights /= weights.sum()\n print('Mean AP = {:.4f}'.format(np.mean(aps)))\n print('Weighted Mean AP = {:.4f}'.format(np.average(aps, weights=weights)))\n print('Mean Detection Threshold = {:.3f}'.format(avg_thresh))\n print('~~~~~~~~')\n print('Results:')\n for ap,npos in zip(aps,nposs):\n print('{:.3f}\\t{:.3f}'.format(ap,npos))\n print('{:.3f}'.format(np.mean(aps)))\n print('~~~~~~~~')\n print('')\n print('--------------------------------------------------------------')\n print('Results computed with the **unofficial** PASCAL VOC Python eval code.')\n print('--------------------------------------------------------------') \n\n \nif __name__ == '__main__':\n d = datasets.vg('val')\n res = d.roidb\n from IPython import embed; embed()\n"
] |
[
[
"numpy.argmax",
"numpy.mean",
"numpy.average",
"numpy.array",
"numpy.zeros"
]
] |
jinliangwei/tensor2tensor-1.11
|
[
"bfca92e0c48956ad5be987b25b4309a59ff61bb5"
] |
[
"tensor2tensor/models/research/moe.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Mixture-of-experts code.\n\nInterfaces and algorithms are under development and subject to rapid change\nwithout notice.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport mesh_tensorflow as mtf\nimport tensorflow as tf\n\n\ndef transformer_moe_layer_v1(inputs, output_dim, hparams, train,\n master_dtype=tf.bfloat16,\n slice_dtype=tf.float32):\n \"\"\"Local mixture of experts that works well on TPU.\n\n Adapted from the paper https://arxiv.org/abs/1701.06538\n\n Note: until the algorithm and inferface solidify, we pass in a hyperparameters\n dictionary in order not to complicate the interface in mtf_transformer.py .\n Once this code moves out of \"research\", we should pass the hyperparameters\n separately.\n\n Hyperparameters used:\n hparams.moe_num_experts: number of experts\n hparams.moe_hidden_size: size of hidden layer in each expert\n hparams.moe_group_size: size of each \"group\" for gating purposes\n hparams.moe_capacity_factor_train: a float\n hparams.moe_capacity_factor_eval: a float\n hparams.moe_gating: a string\n + all hyperparmeters used by _top_2_gating()\n\n The number of parameters in the gating network is:\n (input_dim.size * hparams.num_experts) +\n\n The number of parameters in the experts themselves is:\n (hparams.num_experts\n * (input_dim.size + output_dim.size)\n * hparams.moe_hidden_size)\n\n The input is n-dimensional: [<batch_and_length_dims>, input_dim], consisting\n of the representations of all positions in a batch of sequences.\n\n Each position of each sequence is sent to 0-2 experts. The expert\n choices and the combination weights are determined by a learned gating\n function.\n\n This function returns a small auxiliary loss that should be added to the\n training loss of the model. This loss helps to balance expert usage.\n Without the loss, it is very likely that a few experts will be trained and\n the rest will starve.\n\n Several hacks are necessary to get around current TPU limitations:\n\n - To ensure static shapes, we enforce (by truncation/padding)\n that each sequence send the same number of elements to each expert.\n\n It would make more sense to enforce this equality over the entire batch,\n but due to our hacked-up gather-by-matmul implementation, we need to divide\n the batch into \"groups\". For each group, the same number of elements\n are sent to each expert.\n\n TODO(noam): Factor this code better. We want to be able to substitute\n different code for the experts themselves.\n\n Args:\n inputs: a mtf.Tensor with shape [<batch_dims...>, length_dim, input_dim]\n output_dim: a mtf.Dimension (for Transformer, this is input_dim)\n hparams: model hyperparameters\n train: a boolean\n master_dtype: a tf.dtype\n slice_dtype: a tf.dtype\n\n Returns:\n outputs: a Tensor with shape [<batch_dims...>, length_dim, output_dim]\n loss: a mtf scalar\n\n Raises:\n ValueError: on unrecognized hparams.moe_gating\n \"\"\"\n orig_inputs = inputs\n input_dim = inputs.shape.dims[-1]\n hidden_dim = mtf.Dimension(\"expert_hidden\", hparams.moe_hidden_size)\n experts_dim = mtf.Dimension(\"experts\", hparams.moe_num_experts)\n group_size_dim = mtf.Dimension(\"group\", hparams.moe_group_size)\n batch_dim = mtf.Dimension(\n orig_inputs.shape[0].name,\n orig_inputs.shape.size // (group_size_dim.size * input_dim.size))\n inputs = mtf.reshape(inputs, [batch_dim, group_size_dim, input_dim])\n\n # Each sequence sends expert_capacity positions to each expert.\n capacity_factor = (\n hparams.moe_capacity_factor_train if train else\n hparams.moe_capacity_factor_eval)\n expert_capacity = min(\n group_size_dim.size,\n int((group_size_dim.size * capacity_factor) / experts_dim.size))\n expert_capacity_dim = mtf.Dimension(\"expert_capacity\", expert_capacity)\n\n experts_dim_unsplit = mtf.Dimension(\"expert_unsplit\", experts_dim.size)\n batch_dim_unsplit = mtf.Dimension(\"batch_unsplit\", batch_dim.size)\n\n if hparams.moe_gating == \"top_2\":\n dispatch_tensor, combine_tensor, loss = _top_2_gating(\n inputs=inputs,\n outer_expert_dims=None,\n experts_dim=experts_dim_unsplit,\n expert_capacity_dim=expert_capacity_dim,\n hparams=hparams,\n train=train)\n else:\n raise ValueError(\"unknown hparams.moe_gating=%s\" % hparams.moe_gating)\n\n # put num_experts dimension first to make split easier in alltoall\n expert_inputs = mtf.einsum([inputs, dispatch_tensor], mtf.Shape(\n [experts_dim_unsplit, batch_dim, expert_capacity_dim, input_dim]))\n\n expert_inputs = mtf.reshape(expert_inputs, mtf.Shape(\n [experts_dim, batch_dim_unsplit, expert_capacity_dim, input_dim]))\n\n # Now feed the expert inputs through the experts.\n h = mtf.layers.dense(\n expert_inputs, hidden_dim, expert_dims=[experts_dim],\n activation=mtf.relu, use_bias=False, master_dtype=master_dtype,\n slice_dtype=slice_dtype, name=\"x0\")\n expert_output = mtf.layers.dense(\n h, output_dim, expert_dims=[experts_dim], use_bias=False,\n master_dtype=master_dtype, slice_dtype=slice_dtype, name=\"x1\")\n\n expert_output = mtf.reshape(expert_output, mtf.Shape(\n [experts_dim_unsplit, batch_dim, expert_capacity_dim, input_dim]))\n\n output = mtf.einsum([expert_output, combine_tensor], mtf.Shape(\n [batch_dim, group_size_dim, output_dim]))\n\n output = mtf.reshape(output, orig_inputs.shape.dims[:-1] + [output_dim])\n\n return output, loss * hparams.moe_loss_coef\n\n\ndef transformer_moe_layer_v2(inputs, output_dim, hparams, train,\n master_dtype=tf.bfloat16, slice_dtype=tf.float32):\n \"\"\"2-level mixture of experts.\n\n Adapted from the paper https://arxiv.org/abs/1701.06538\n\n Note: until the algorithm and inferface solidify, we pass in a hyperparameters\n dictionary in order not to complicate the interface in mtf_transformer.py .\n Once this code moves out of \"research\", we should pass the hyperparameters\n separately.\n\n Hyperparameters used:\n hparams.moe_num_experts: number of experts\n hparams.moe_hidden_size: size of hidden layer in each expert\n hparams.moe_group_size: size of each \"group\" for gating purposes\n hparams.moe_capacity_factor_train: a float\n hparams.moe_capacity_factor_eval: a float\n hparams.moe_capacity_factor_second_level: a float\n hparams.moe_gating: a string\n + all hyperparmeters used by _top_2_gating()\n\n One set of params for experts in first level and different of hparams\n per expert in the second level.\n The number of parameters in the gating network is:\n (input_dim.size * (hparams.num_experts) +\n (moe_hidden_size * hparams.num_experts) * hparams.num_experts\n\n\n The number of parameters in the experts themselves is:\n (hparams.num_experts\n * (input_dim.size + output_dim.size)\n * hparams.moe_hidden_size)\n\n The input is n-dimensional: [<batch_and_length_dims>, input_dim], consisting\n of the representations of all positions in a batch of sequences.\n\n Each position of each sequence is sent to 0-3 experts. The expert\n choices and the combination weights are determined by a learned gating\n function.\n\n This function returns a small auxiliary loss that should be added to the\n training loss of the model. This loss helps to balance expert usage.\n Without the loss, it is very likely that a few experts will be trained and\n the rest will starve.\n\n Several hacks are necessary to get around current TPU limitations:\n\n - To ensure static shapes, we enforce (by truncation/padding)\n that each sequence send the same number of elements to each expert.\n\n It would make more sense to enforce this equality over the entire batch,\n but due to our hacked-up gather-by-matmul implementation, we need to divide\n the batch into \"groups\". For each group, the same number of elements\n are sent to each expert.\n\n TODO(noam): Factor this code better. We want to be able to substitute\n different code for the experts themselves.\n\n Dimensions cheat sheet:\n a, b: batch size\n l: original sequence length\n m: input depth\n n: output depth\n g, h: number of groups\n s, t: group size\n x, y: number of experts\n c, d: expert capacity\n\n input: [a0, b1, l, m]\n input: [a0, g1, s, m]\n dispatch_tensor_x: [a0, g1, s, x, c]\n expert_input: [a0, g1, x, c, m]\n alltoall: [a0, g, x1, c, m]\n alltoall: [a0, g, x1, c, m]\n transpose: [x1, a0, g, c, m]\n reshape: [x1, h0, s, m]\n assignment2: [x1, h0, t, y, d]\n expert_input2: [x1, h0, y, d, m]\n alltoall: [x1, h, y0, d, m]\n ...\n reverse of that\n\n gating params 0: [m, x]\n gating params 1: [x1, m, y]\n\n expert params:\n [x1, y0, m, hidden]\n [x1, y0, hidden, n]\n\n Args:\n inputs: a mtf.Tensor with shape [a, b, l, m]\n output_dim: a mtf.Dimension (for Transformer, this is input_dim)\n hparams: model hyperparameters\n train: a boolean\n master_dtype: a tf.dtype\n slice_dtype: a tf.dtype\n\n Returns:\n outputs: a Tensor with shape [a, b, l, n]\n loss: a mtf scalar\n\n Raises:\n ValueError: on unrecognized hparams.moe_gating\n \"\"\"\n insert_outer_batch_dim = (len(inputs.shape.dims) == 3)\n if insert_outer_batch_dim:\n inputs = mtf.reshape(\n inputs, [mtf.Dimension(\"outer_batch\", 1)] + inputs.shape.dims)\n\n assert len(hparams.moe_num_experts) == 2\n a0, b1, l, m = inputs.shape.dims\n hidden_dim = mtf.Dimension(\"expert_hidden\", hparams.moe_hidden_size)\n x1 = mtf.Dimension(\"expert_x\", hparams.moe_num_experts[0])\n y0 = mtf.Dimension(\"expert_y\", hparams.moe_num_experts[1])\n x = mtf.Dimension(\"expert_x_unsplit\", hparams.moe_num_experts[0])\n y = mtf.Dimension(\"expert_y_unsplit\", hparams.moe_num_experts[1])\n n = output_dim\n\n # We \"cheat\" here and look at the mesh shape and layout. This is to ensure\n # that the number of groups (g.size) is a multiple of the mesh dimension\n # over which those groups are split.\n num_groups, group_size = _split_into_groups(\n b1.size * l.size, hparams.moe_group_size,\n mtf.tensor_dim_to_mesh_dim_size(hparams.layout, hparams.mesh_shape, b1))\n g1 = mtf.Dimension(b1.name, num_groups)\n g = mtf.Dimension(b1.name + \"_unsplit\", g1.size)\n s = mtf.Dimension(\"group_size_x\", group_size)\n\n # Each sequence sends (at most?) expert_capacity positions to each expert.\n # Static expert_capacity dimension is needed for expert batch sizes\n capacity_factor = (\n hparams.moe_capacity_factor_train if train else\n hparams.moe_capacity_factor_eval)\n expert_capacity = min(s.size, int((s.size * capacity_factor) / x.size))\n expert_capacity = max(expert_capacity, 4)\n c = mtf.Dimension(\"expert_capacity_x\", expert_capacity)\n\n # We \"cheat\" here and look at the mesh shape and layout. This is to ensure\n # that the number of groups (h.size) is a multiple of the mesh dimension\n # over which those groups are split.\n num_groups, group_size = _split_into_groups(\n a0.size * g.size * c.size,\n hparams.moe_group_size,\n mtf.tensor_dim_to_mesh_dim_size(hparams.layout, hparams.mesh_shape, a0))\n t = mtf.Dimension(\"group_size_y\", group_size)\n h0 = mtf.Dimension(a0.name, num_groups)\n h = mtf.Dimension(a0.name + \"_unsplit\", h0.size)\n\n expert_capacity = min(\n t.size,\n int((t.size * hparams.moe_capacity_factor_second_level) / y.size))\n expert_capacity = max(expert_capacity, 4)\n d = mtf.Dimension(\"expert_capacity_y\", expert_capacity)\n\n # First level of expert routing\n # Reshape the inner batch size to a multiple of group_dim g1 and\n # group_size_dim s.\n inputs = mtf.reshape(inputs, [a0, g1, s, m])\n\n # Get the assignments for the first level.\n # dispatch_tensor_x has shape [a0, g1, s, x, c]\n if hparams.moe_gating == \"top_2\":\n dispatch_tensor_x, combine_tensor_x, loss_outer = _top_2_gating(\n inputs=inputs,\n outer_expert_dims=None,\n experts_dim=x,\n expert_capacity_dim=c,\n hparams=hparams,\n train=train)\n else:\n raise ValueError(\"unknown hparams.moe_gating=%s\" % hparams.moe_gating)\n\n # Now create expert_inputs based on the assignments.\n # put num_experts dimension first to make split easier in alltoall\n expert_inputs_x = mtf.einsum([inputs, dispatch_tensor_x], [x, a0, g1, c, m])\n\n # we construct an \"importance\" Tensor for the inputs to the second-level\n # gating. The importance of an input is 1.0 if it represents the\n # first-choice expert-group and 0.5 if it represents the second-choice expert\n # group. This is used by the second-level gating.\n importance = mtf.reduce_sum(combine_tensor_x, output_shape=[x, a0, g1, c])\n importance = 0.5 * (\n mtf.to_float(mtf.greater(importance, 0.5)) +\n mtf.to_float(mtf.greater(importance, 0.0)))\n\n # First level, all to all. Here we change the split dimension from g1 to x1.\n expert_inputs_x = mtf.reshape(expert_inputs_x, mtf.Shape(\n [x1, a0, g, c, m]))\n importance = mtf.reshape(importance, [x1, a0, g, c])\n\n # Second level of expert routing\n # Reshape the expert_inputs outer batch dim to be a multiple of group_dim h0\n # and group_size_dim t.\n inputs_y = mtf.reshape(expert_inputs_x, [x1, h0, t, m])\n importance = mtf.reshape(importance, [x1, h0, t])\n\n # Get the assignments for the second level.\n # dispatch_tensor_y has shape [x1, h0, t, y, d]\n if hparams.moe_gating == \"top_2\":\n dispatch_tensor_y, combine_tensor_y, loss_inner = _top_2_gating(\n inputs=inputs_y,\n outer_expert_dims=[x1],\n experts_dim=y,\n expert_capacity_dim=d,\n hparams=hparams,\n train=train,\n importance=importance)\n else:\n raise ValueError(\"unknown hparams.moe_gating=%s\" % hparams.moe_gating)\n\n # Now create expert_inputs based on the assignments.\n # put num_experts dimension first to make split easier in alltoall\n expert_inputs_y = mtf.einsum([inputs_y, dispatch_tensor_y], [y, x1, h0, d, m])\n\n # Second level, all to all. Here we change the split dimension from h0 to y0.\n expert_inputs_y = mtf.reshape(expert_inputs_y, mtf.Shape(\n [y0, x1, h, d, m]))\n\n hidden_output = mtf.layers.dense(\n expert_inputs_y, hidden_dim, expert_dims=[y0, x1],\n activation=mtf.relu, use_bias=False, master_dtype=master_dtype,\n slice_dtype=slice_dtype, name=\"expert0\")\n expert_output = mtf.layers.dense(\n hidden_output, output_dim, expert_dims=[y0, x1],\n use_bias=False, master_dtype=master_dtype, slice_dtype=slice_dtype,\n name=\"expert1\")\n\n # NOW COMBINE EXPERT OUTPUTS (reversing everything we have done)\n # expert_output has shape [y0, x1, h, d, n]\n\n # alltoall\n expert_output = mtf.reshape(expert_output, mtf.Shape(\n [y, x1, h0, d, n]))\n\n # combine results from inner level\n output_y = mtf.einsum([expert_output, combine_tensor_y], [x1, h0, t, n])\n\n # Reshape the combined tensor from inner level to now contain outer_batch_dim\n # a0 and group_dim g\n output = mtf.reshape(output_y, [x1, a0, g, c, n])\n\n # alltoall from expert_dim x to group_dim g1\n expert_output_x = mtf.reshape(output, mtf.Shape([x, a0, g1, c, n]))\n\n # combine results from outer level\n output_x = mtf.einsum([expert_output_x, combine_tensor_x], [a0, g1, s, n])\n\n # Reshape the combined tensor to now contain inner_batch_dim\n # b1 and the original sequence length\n output = mtf.reshape(output_x, [a0, b1, l, n])\n if insert_outer_batch_dim:\n output = mtf.reshape(output, [b1, l, n])\n return output, (loss_outer + loss_inner) * hparams.moe_loss_coef\n\n\ndef _top_2_gating(\n inputs, outer_expert_dims, experts_dim, expert_capacity_dim,\n hparams, train, importance=None):\n \"\"\"Compute gating for mixture-of-experts in TensorFlow.\n\n Note: until the algorithm and inferface solidify, we pass in a hyperparameters\n dictionary in order not to complicate the interface in mtf_transformer.py .\n Once this code moves out of \"research\", we should pass the hyperparameters\n separately.\n\n Hyperparameters used:\n hparams.moe_use_second_place_loss: a boolean\n hparams.moe_second_policy_train: a string\n hparams.moe_second_policy_eval: a string\n hparams.moe_second_threshold: a float\n\n The returned forward assignment is a tensor used to map (via einsum) from the\n inputs to the expert_inputs. Likewise, the returned combine_tensor is\n used to map (via einsum) from the expert outputs to the outputs. Both the\n forward and backward assignments are mostly zeros. The shapes of the tensors\n are as follows.\n\n inputs: [<batch_dims>, group_size_dim, input_dim]\n importance: [<batch_dims>, group_size_dim]\n dispatch_tensor:\n [<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]\n expert_inputs:\n [<batch_dims>, experts_dim, expert_capacity_dim, input_dim]\n\n expert_outputs: [<batch_dims>, experts_dim, expert_capacity_dim, output_dim]\n combine_tensor:\n [<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]\n outputs: [<batch_dims>, group_size_dim, output_dim]\n\n \"importance\" is an optional tensor with one floating-point value for each\n input vector. If the importance of an input is 1.0, then we send it to\n up to 2 experts. If 0.0 < importance < 1.0, then we send it to at most\n one expert. If importance == 0.0, then we send it to no experts.\n\n We use \"importance\" at the second-level gating function of a hierarchical\n mixture of experts. Inputs to the first-choice expert-group get importance\n 1.0. Inputs to the second-choice expert group get importance 0.5.\n Inputs that represent padding get importance 0.0.\n\n Args:\n inputs: a mtf.Tensor with shape [<batch_dims>, group_size_dim, input_dim]\n outer_expert_dims: an optional list of dimensions. This is for the case\n where we are at an inner level of a hierarchical MoE.\n experts_dim: a Dimension (the number of experts)\n expert_capacity_dim: a Dimension (number of examples per group per expert)\n hparams: model hyperparameters.\n train: a boolean\n importance: an optional tensor with shape [<batch_dims>, group_size_dim]\n\n Returns:\n dispatch_tensor: a Tensor with shape\n [<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]\n combine_tensor: a Tensor with shape\n [<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]\n loss: a mtf scalar\n\n Raises:\n ValueError: on illegal hyperparameters\n \"\"\"\n group_size_dim, unused_input_dim = inputs.shape.dims[-2:]\n\n raw_gates = mtf.softmax(mtf.layers.dense(\n inputs, experts_dim, use_bias=False,\n expert_dims=outer_expert_dims), experts_dim)\n\n # The internals of this function run in float32.\n # bfloat16 seems to reduce quality.\n raw_gates = mtf.to_float(raw_gates)\n\n expert_capacity_f = float(expert_capacity_dim.size)\n\n # FIND TOP 2 EXPERTS PER POSITON\n # Find the top expert for each position. shape=[batch, group]\n index_1, gate_1 = mtf.top_1(raw_gates, experts_dim)\n # [batch, group, experts]\n mask_1 = mtf.one_hot(index_1, experts_dim, dtype=raw_gates.dtype)\n density_1_proxy = raw_gates\n if importance is not None:\n mask_1 *= mtf.to_float(mtf.equal(importance, 1.0))\n gate_1 *= mtf.to_float(mtf.equal(importance, 1.0))\n density_1_proxy *= mtf.to_float(mtf.equal(importance, 1.0))\n gates_without_top_1 = raw_gates * (1.0 - mask_1)\n # [batch, group]\n index_2, gate_2 = mtf.top_1(gates_without_top_1, experts_dim)\n # [batch, group, experts]\n mask_2 = mtf.one_hot(index_2, experts_dim, dtype=raw_gates.dtype)\n if importance is not None:\n mask_2 *= mtf.to_float(mtf.greater(importance, 0.0))\n\n denom = gate_1 + gate_2 + 1e-9\n gate_1 /= denom\n gate_2 /= denom\n\n # BALANCING LOSSES\n # shape = [batch, experts]\n # We want to equalize the fraction of the batch assigned to each expert\n density_1 = mtf.reduce_mean(mask_1, reduced_dim=group_size_dim)\n # Something continuous that is correlated with what we want to equalize.\n density_1_proxy = mtf.reduce_mean(density_1_proxy, reduced_dim=group_size_dim)\n #density_1 = mtf.Print(\n # density_1, [mtf.reduce_mean(density_1, output_shape=[experts_dim])],\n # \"density_1\", summarize=1000)\n loss = (mtf.reduce_mean(density_1_proxy * density_1)\n * float(experts_dim.size * experts_dim.size))\n\n if hparams.moe_use_second_place_loss:\n # Also add a loss to encourage all experts to be used equally also as the\n # second-place expert. Experimentally, this seems to be a wash.\n # We want to equalize the fraction of the batch assigned to each expert:\n density_2 = mtf.reduce_mean(mask_2, reduced_dim=group_size_dim)\n # As a proxy for density_2, we renormalize the raw gates after the top one\n # has been removed.\n normalized = gates_without_top_1 / (\n mtf.reduce_sum(gates_without_top_1, reduced_dim=experts_dim) + 1e-9)\n density_2_proxy = mtf.reduce_mean(normalized, reduced_dim=group_size_dim)\n loss_2 = (mtf.reduce_mean(density_2_proxy * density_2)\n * float(experts_dim.size * experts_dim.size))\n loss += loss_2 * 0.5\n\n # Depending on the policy in the hparams, we may drop out some of the\n # second-place experts.\n policy = (\n hparams.moe_second_policy_train if train else\n hparams.moe_second_policy_eval)\n threshold = (\n hparams.moe_second_threshold_train if train else\n hparams.moe_second_threshold_eval)\n if policy == \"all\":\n # Use second-place experts for all examples.\n pass\n elif policy == \"none\":\n # Never use second-place experts for all examples.\n mask_2 = mtf.zeros_like(mask_2)\n elif policy == \"threshold\":\n # Use second-place experts if gate_2 > threshold.\n mask_2 *= mtf.to_float(mtf.greater(gate_2, threshold))\n elif policy == \"random\":\n # Use second-place experts with probablity min(1.0, gate_2 / threshold).\n mask_2 *= mtf.to_float(\n mtf.less(mtf.random_uniform(gate_2.mesh, gate_2.shape),\n gate_2 / max(threshold, 1e-9)))\n else:\n raise ValueError(\"Unknown policy %s\" % policy)\n #mask_2 = mtf.Print(\n # mask_2, [mtf.reduce_mean(mask_2, output_shape=[experts_dim])],\n # \"density_2\", summarize=1000)\n\n # COMPUTE ASSIGNMENT TO EXPERTS\n # [batch, group, experts]\n # This is the position within the expert's mini-batch for this sequence\n position_in_expert_1 = mtf.cumsum(\n mask_1, group_size_dim, exclusive=True) * mask_1\n # Remove the elements that don't fit. [batch, group, experts]\n mask_1 *= mtf.to_float(mtf.less(position_in_expert_1, expert_capacity_f))\n # [batch, experts]\n # How many examples in this sequence go to this expert\n mask_1_count = mtf.reduce_sum(mask_1, reduced_dim=group_size_dim)\n # [batch, group] - mostly ones, but zeros where something didn't fit\n mask_1_flat = mtf.reduce_sum(mask_1, reduced_dim=experts_dim)\n # [batch, group]\n position_in_expert_1 = mtf.reduce_sum(\n position_in_expert_1, reduced_dim=experts_dim)\n # Weight assigned to first expert. [batch, group]\n gate_1 *= mask_1_flat\n\n # [batch, group, experts]\n position_in_expert_2 = (\n mtf.cumsum(mask_2, group_size_dim, exclusive=True) + mask_1_count)\n position_in_expert_2 *= mask_2\n mask_2 *= mtf.to_float(mtf.less(position_in_expert_2, expert_capacity_f))\n # mask_2_count = mtf.reduce_sum(mask_2, reduced_dim=experts_dim)\n mask_2_flat = mtf.reduce_sum(mask_2, reduced_dim=experts_dim)\n gate_2 *= mask_2_flat\n position_in_expert_2 = mtf.reduce_sum(\n position_in_expert_2, reduced_dim=experts_dim)\n\n # [batch, group, experts, expert_capacity]\n combine_tensor = (\n gate_1 * mask_1_flat\n * mtf.one_hot(index_1, experts_dim)\n * mtf.one_hot(mtf.to_int32(position_in_expert_1), expert_capacity_dim) +\n gate_2 * mask_2_flat\n * mtf.one_hot(index_2, experts_dim)\n * mtf.one_hot(mtf.to_int32(position_in_expert_2), expert_capacity_dim))\n\n combine_tensor = mtf.cast(combine_tensor, inputs.dtype)\n loss = mtf.cast(loss, inputs.dtype)\n\n dispatch_tensor = mtf.cast(\n mtf.cast(combine_tensor, tf.bool), combine_tensor.dtype)\n\n return dispatch_tensor, combine_tensor, loss\n\n\ndef set_default_moe_hparams(hparams):\n \"\"\"Add necessary hyperparameters for mixture-of-experts.\"\"\"\n hparams.moe_num_experts = 16\n hparams.moe_loss_coef = 1e-2\n hparams.add_hparam(\"moe_gating\", \"top_2\")\n # Experts have fixed capacity per batch. We need some extra capacity\n # in case gating is not perfectly balanced.\n # moe_capacity_factor_* should be set to a value >=1.\n hparams.add_hparam(\"moe_capacity_factor_train\", 1.25)\n hparams.add_hparam(\"moe_capacity_factor_eval\", 2.0)\n hparams.add_hparam(\"moe_capacity_factor_second_level\", 1.0)\n # Each expert has a hidden layer with this size.\n hparams.add_hparam(\"moe_hidden_size\", 4096)\n # For gating, divide inputs into groups of this size before gating.\n # Each group sends the same number of inputs to each expert.\n # Ideally, the group size would be the whole batch, but this is expensive\n # due to our use of matrix multiplication for reordering.\n hparams.add_hparam(\"moe_group_size\", 1024)\n # For top_2 gating, whether to impose an additional loss in order to make\n # the experts equally used as the second-place expert.\n hparams.add_hparam(\"moe_use_second_place_loss\", 0)\n # In top_2 gating, policy for whether to use a second-place expert.\n # Legal values are:\n # \"all\": always\n # \"none\": never\n # \"threshold\": if gate value > the given threshold\n # \"random\": if gate value > threshold*random_uniform(0,1)\n hparams.add_hparam(\"moe_second_policy_train\", \"random\")\n hparams.add_hparam(\"moe_second_policy_eval\", \"random\")\n hparams.add_hparam(\"moe_second_threshold_train\", 0.2)\n hparams.add_hparam(\"moe_second_threshold_eval\", 0.2)\n\n\ndef _split_into_groups(n, max_group_size, mesh_dim_size):\n \"\"\"Helper function for figuring out how to split a dimensino into groups.\n\n We have a dimension with size n and we want to split it into\n two dimensions: n = num_groups * group_size\n\n group_size should be the largest possible value meeting the constraints:\n group_size <= max_group_size\n (num_groups = n/group_size) is a multiple of mesh_dim_size\n\n Args:\n n: an integer\n max_group_size: an integer\n mesh_dim_size: an integer\n\n Returns:\n num_groups: an integer\n group_size: an integer\n\n Raises:\n ValueError: if n is not a multiple of mesh_dim_size\n \"\"\"\n if n % mesh_dim_size != 0:\n raise ValueError(\n \"n=%d is not a multiple of mesh_dim_size=%d\" % (n, mesh_dim_size))\n num_groups = max(1, n // max_group_size)\n while (num_groups % mesh_dim_size != 0 or n % num_groups != 0):\n num_groups += 1\n group_size = n // num_groups\n tf.logging.info(\n \"_split_into_groups(n=%d, max_group_size=%d, mesh_dim_size=%d)\"\n \" = (num_groups=%d group_size=%d)\" %\n (n, max_group_size, mesh_dim_size, num_groups, group_size))\n return num_groups, group_size\n"
] |
[
[
"tensorflow.logging.info"
]
] |
naseemap47/GestureVolumeControl-ComputerVision-python
|
[
"17f7b1aa6528d5a0701e49ce294708ccd5d574b0"
] |
[
"gesture_control.py"
] |
[
"import cv2\nimport mediapipe as mp\nimport time\nimport math\nimport numpy as np\nfrom change_vol import set_master_volume\n\ncap = cv2.VideoCapture(0)\np_time = 0\n\nmp_hand = mp.solutions.hands\nhand = mp_hand.Hands(max_num_hands=1)\nmp_draw = mp.solutions.drawing_utils\n\nvol = 0\nvol_bar = 400\n\nwhile True:\n success, img = cap.read()\n img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n result = hand.process(img_rgb)\n # print(result.multi_hand_landmarks)\n lm_list = []\n if result.multi_hand_landmarks:\n for hand_lm in result.multi_hand_landmarks:\n for id, lm in enumerate(hand_lm.landmark):\n height, width, channel = img.shape\n x, y = int(lm.x * width), int(lm.y * height)\n lm_list.append([id, x, y])\n # print(lm_list)\n if len(lm_list) > 8:\n # print(lm_list[4], lm_list[8])\n x1, y1 = lm_list[4][1], lm_list[4][2]\n x2, y2 = lm_list[8][1], lm_list[8][2]\n cx, cy = (x1 + x2) // 2, (y1 + y2) // 2\n cv2.circle(img, (x1, y1), 8, (255, 0, 255), cv2.FILLED)\n cv2.circle(img, (x2, y2), 8, (255, 0, 255), cv2.FILLED)\n cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), 3)\n cv2.circle(img, (cx, cy), 8, (255, 0, 255), cv2.FILLED)\n\n length = math.hypot(x2 - x1, y2 - y1)\n # print(length)\n if length < 26:\n cv2.circle(img, (cx, cy), 8, (0, 255, 0), cv2.FILLED)\n if length > 200:\n cv2.circle(img, (cx, cy), 8, (0, 0, 255), cv2.FILLED)\n\n # Hand - 25 to 200\n # Vol - 0 to 100\n vol = np.interp(length, [25, 200], [0, 100])\n set_master_volume(vol)\n\n vol_bar = np.interp(length, [25, 200], [400, 150])\n\n mp_draw.draw_landmarks(img, hand_lm, mp_hand.HAND_CONNECTIONS)\n\n # Volume Bar\n cv2.rectangle(img, (50, 150), (85, 400), (0, 255, 0), 3)\n cv2.rectangle(img, (50, int(vol_bar)), (85, 400), (0, 255, 0), cv2.FILLED)\n cv2.putText(\n img, f'{int(vol)} %', (40, 450),\n cv2.FONT_HERSHEY_PLAIN, 2,\n (255, 0, 255), 2\n )\n\n c_time = time.time()\n fps = 1 / (c_time - p_time)\n p_time = c_time\n cv2.putText(\n img, f'FPS: {int(fps)}', (10, 60),\n cv2.FONT_HERSHEY_PLAIN, 2,\n (0, 255, 255), 2\n )\n\n cv2.imshow(\"Web-cam\", img)\n cv2.waitKey(1)\n"
] |
[
[
"numpy.interp"
]
] |
nickleus27/pandas
|
[
"e892d46598d303530519dde7e00b69e549a3d7ea"
] |
[
"pandas/tests/generic/test_duplicate_labels.py"
] |
[
"\"\"\"Tests dealing with the NDFrame.allows_duplicates.\"\"\"\nimport operator\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\nnot_implemented = pytest.mark.xfail(reason=\"Not implemented.\")\n\n# ----------------------------------------------------------------------------\n# Preservation\n\n\nclass TestPreserves:\n @pytest.mark.parametrize(\n \"cls, data\",\n [\n (pd.Series, np.array([])),\n (pd.Series, [1, 2]),\n (pd.DataFrame, {}),\n (pd.DataFrame, {\"A\": [1, 2]}),\n ],\n )\n def test_construction_ok(self, cls, data):\n result = cls(data)\n assert result.flags.allows_duplicate_labels is True\n\n result = cls(data).set_flags(allows_duplicate_labels=False)\n assert result.flags.allows_duplicate_labels is False\n\n @pytest.mark.parametrize(\n \"func\",\n [\n operator.itemgetter([\"a\"]),\n operator.methodcaller(\"add\", 1),\n operator.methodcaller(\"rename\", str.upper),\n operator.methodcaller(\"rename\", \"name\"),\n operator.methodcaller(\"abs\"),\n np.abs,\n ],\n )\n def test_preserved_series(self, func):\n s = pd.Series([0, 1], index=[\"a\", \"b\"]).set_flags(allows_duplicate_labels=False)\n assert func(s).flags.allows_duplicate_labels is False\n\n @pytest.mark.parametrize(\n \"other\", [pd.Series(0, index=[\"a\", \"b\", \"c\"]), pd.Series(0, index=[\"a\", \"b\"])]\n )\n # TODO: frame\n @not_implemented\n def test_align(self, other):\n s = pd.Series([0, 1], index=[\"a\", \"b\"]).set_flags(allows_duplicate_labels=False)\n a, b = s.align(other)\n assert a.flags.allows_duplicate_labels is False\n assert b.flags.allows_duplicate_labels is False\n\n def test_preserved_frame(self):\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4]}, index=[\"a\", \"b\"]).set_flags(\n allows_duplicate_labels=False\n )\n assert df.loc[[\"a\"]].flags.allows_duplicate_labels is False\n assert df.loc[:, [\"A\", \"B\"]].flags.allows_duplicate_labels is False\n\n @not_implemented\n def test_to_frame(self):\n s = pd.Series(dtype=float).set_flags(allows_duplicate_labels=False)\n assert s.to_frame().flags.allows_duplicate_labels is False\n\n @pytest.mark.parametrize(\"func\", [\"add\", \"sub\"])\n @pytest.mark.parametrize(\n \"frame\", [False, pytest.param(True, marks=not_implemented)]\n )\n @pytest.mark.parametrize(\"other\", [1, pd.Series([1, 2], name=\"A\")])\n def test_binops(self, func, other, frame):\n df = pd.Series([1, 2], name=\"A\", index=[\"a\", \"b\"]).set_flags(\n allows_duplicate_labels=False\n )\n if frame:\n df = df.to_frame()\n if isinstance(other, pd.Series) and frame:\n other = other.to_frame()\n func = operator.methodcaller(func, other)\n assert df.flags.allows_duplicate_labels is False\n assert func(df).flags.allows_duplicate_labels is False\n\n @not_implemented\n def test_preserve_getitem(self):\n df = pd.DataFrame({\"A\": [1, 2]}).set_flags(allows_duplicate_labels=False)\n assert df[[\"A\"]].flags.allows_duplicate_labels is False\n assert df[\"A\"].flags.allows_duplicate_labels is False\n assert df.loc[0].flags.allows_duplicate_labels is False\n assert df.loc[[0]].flags.allows_duplicate_labels is False\n assert df.loc[0, [\"A\"]].flags.allows_duplicate_labels is False\n\n @pytest.mark.xfail(reason=\"Unclear behavior.\")\n def test_ndframe_getitem_caching_issue(self):\n # NDFrame.__getitem__ will cache the first df['A']. May need to\n # invalidate that cache? Update the cached entries?\n df = pd.DataFrame({\"A\": [0]}).set_flags(allows_duplicate_labels=False)\n assert df[\"A\"].flags.allows_duplicate_labels is False\n df.flags.allows_duplicate_labels = True\n assert df[\"A\"].flags.allows_duplicate_labels is True\n\n @pytest.mark.parametrize(\n \"objs, kwargs\",\n [\n # Series\n (\n [\n pd.Series(1, index=[\"a\", \"b\"]).set_flags(\n allows_duplicate_labels=False\n ),\n pd.Series(2, index=[\"c\", \"d\"]).set_flags(\n allows_duplicate_labels=False\n ),\n ],\n {},\n ),\n (\n [\n pd.Series(1, index=[\"a\", \"b\"]).set_flags(\n allows_duplicate_labels=False\n ),\n pd.Series(2, index=[\"a\", \"b\"]).set_flags(\n allows_duplicate_labels=False\n ),\n ],\n {\"ignore_index\": True},\n ),\n (\n [\n pd.Series(1, index=[\"a\", \"b\"]).set_flags(\n allows_duplicate_labels=False\n ),\n pd.Series(2, index=[\"a\", \"b\"]).set_flags(\n allows_duplicate_labels=False\n ),\n ],\n {\"axis\": 1},\n ),\n # Frame\n (\n [\n pd.DataFrame({\"A\": [1, 2]}, index=[\"a\", \"b\"]).set_flags(\n allows_duplicate_labels=False\n ),\n pd.DataFrame({\"A\": [1, 2]}, index=[\"c\", \"d\"]).set_flags(\n allows_duplicate_labels=False\n ),\n ],\n {},\n ),\n (\n [\n pd.DataFrame({\"A\": [1, 2]}, index=[\"a\", \"b\"]).set_flags(\n allows_duplicate_labels=False\n ),\n pd.DataFrame({\"A\": [1, 2]}, index=[\"a\", \"b\"]).set_flags(\n allows_duplicate_labels=False\n ),\n ],\n {\"ignore_index\": True},\n ),\n (\n [\n pd.DataFrame({\"A\": [1, 2]}, index=[\"a\", \"b\"]).set_flags(\n allows_duplicate_labels=False\n ),\n pd.DataFrame({\"B\": [1, 2]}, index=[\"a\", \"b\"]).set_flags(\n allows_duplicate_labels=False\n ),\n ],\n {\"axis\": 1},\n ),\n # Series / Frame\n (\n [\n pd.DataFrame({\"A\": [1, 2]}, index=[\"a\", \"b\"]).set_flags(\n allows_duplicate_labels=False\n ),\n pd.Series([1, 2], index=[\"a\", \"b\"], name=\"B\",).set_flags(\n allows_duplicate_labels=False,\n ),\n ],\n {\"axis\": 1},\n ),\n ],\n )\n def test_concat(self, objs, kwargs):\n result = pd.concat(objs, **kwargs)\n assert result.flags.allows_duplicate_labels is False\n\n @pytest.mark.parametrize(\n \"left, right, kwargs, expected\",\n [\n # false false false\n pytest.param(\n pd.DataFrame({\"A\": [0, 1]}, index=[\"a\", \"b\"]).set_flags(\n allows_duplicate_labels=False\n ),\n pd.DataFrame({\"B\": [0, 1]}, index=[\"a\", \"d\"]).set_flags(\n allows_duplicate_labels=False\n ),\n {\"left_index\": True, \"right_index\": True},\n False,\n marks=not_implemented,\n ),\n # false true false\n pytest.param(\n pd.DataFrame({\"A\": [0, 1]}, index=[\"a\", \"b\"]).set_flags(\n allows_duplicate_labels=False\n ),\n pd.DataFrame({\"B\": [0, 1]}, index=[\"a\", \"d\"]),\n {\"left_index\": True, \"right_index\": True},\n False,\n marks=not_implemented,\n ),\n # true true true\n (\n pd.DataFrame({\"A\": [0, 1]}, index=[\"a\", \"b\"]),\n pd.DataFrame({\"B\": [0, 1]}, index=[\"a\", \"d\"]),\n {\"left_index\": True, \"right_index\": True},\n True,\n ),\n ],\n )\n def test_merge(self, left, right, kwargs, expected):\n result = pd.merge(left, right, **kwargs)\n assert result.flags.allows_duplicate_labels is expected\n\n @not_implemented\n def test_groupby(self):\n # XXX: This is under tested\n # TODO:\n # - apply\n # - transform\n # - Should passing a grouper that disallows duplicates propagate?\n df = pd.DataFrame({\"A\": [1, 2, 3]}).set_flags(allows_duplicate_labels=False)\n result = df.groupby([0, 0, 1]).agg(\"count\")\n assert result.flags.allows_duplicate_labels is False\n\n @pytest.mark.parametrize(\"frame\", [True, False])\n @not_implemented\n def test_window(self, frame):\n df = pd.Series(\n 1,\n index=pd.date_range(\"2000\", periods=12),\n name=\"A\",\n allows_duplicate_labels=False,\n )\n if frame:\n df = df.to_frame()\n assert df.rolling(3).mean().flags.allows_duplicate_labels is False\n assert df.ewm(3).mean().flags.allows_duplicate_labels is False\n assert df.expanding(3).mean().flags.allows_duplicate_labels is False\n\n\n# ----------------------------------------------------------------------------\n# Raises\n\n\nclass TestRaises:\n @pytest.mark.parametrize(\n \"cls, axes\",\n [\n (pd.Series, {\"index\": [\"a\", \"a\"], \"dtype\": float}),\n (pd.DataFrame, {\"index\": [\"a\", \"a\"]}),\n (pd.DataFrame, {\"index\": [\"a\", \"a\"], \"columns\": [\"b\", \"b\"]}),\n (pd.DataFrame, {\"columns\": [\"b\", \"b\"]}),\n ],\n )\n def test_set_flags_with_duplicates(self, cls, axes):\n result = cls(**axes)\n assert result.flags.allows_duplicate_labels is True\n\n msg = \"Index has duplicates.\"\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n cls(**axes).set_flags(allows_duplicate_labels=False)\n\n @pytest.mark.parametrize(\n \"data\",\n [\n pd.Series(index=[0, 0], dtype=float),\n pd.DataFrame(index=[0, 0]),\n pd.DataFrame(columns=[0, 0]),\n ],\n )\n def test_setting_allows_duplicate_labels_raises(self, data):\n msg = \"Index has duplicates.\"\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n data.flags.allows_duplicate_labels = False\n\n assert data.flags.allows_duplicate_labels is True\n\n def test_series_raises(self):\n a = pd.Series(0, index=[\"a\", \"b\"])\n b = pd.Series([0, 1], index=[\"a\", \"b\"]).set_flags(allows_duplicate_labels=False)\n msg = \"Index has duplicates.\"\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n pd.concat([a, b])\n\n @pytest.mark.parametrize(\n \"getter, target\",\n [\n (operator.itemgetter([\"A\", \"A\"]), None),\n # loc\n (operator.itemgetter([\"a\", \"a\"]), \"loc\"),\n pytest.param(\n operator.itemgetter((\"a\", [\"A\", \"A\"])), \"loc\", marks=not_implemented\n ),\n (operator.itemgetter(([\"a\", \"a\"], \"A\")), \"loc\"),\n # iloc\n (operator.itemgetter([0, 0]), \"iloc\"),\n pytest.param(\n operator.itemgetter((0, [0, 0])), \"iloc\", marks=not_implemented\n ),\n pytest.param(\n operator.itemgetter(([0, 0], 0)), \"iloc\", marks=not_implemented\n ),\n ],\n )\n def test_getitem_raises(self, getter, target):\n df = pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4]}, index=[\"a\", \"b\"]).set_flags(\n allows_duplicate_labels=False\n )\n if target:\n # df, df.loc, or df.iloc\n target = getattr(df, target)\n else:\n target = df\n\n msg = \"Index has duplicates.\"\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n getter(target)\n\n @pytest.mark.parametrize(\n \"objs, kwargs\",\n [\n (\n [\n pd.Series(1, index=[0, 1], name=\"a\").set_flags(\n allows_duplicate_labels=False\n ),\n pd.Series(2, index=[0, 1], name=\"a\").set_flags(\n allows_duplicate_labels=False\n ),\n ],\n {\"axis\": 1},\n )\n ],\n )\n def test_concat_raises(self, objs, kwargs):\n msg = \"Index has duplicates.\"\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n pd.concat(objs, **kwargs)\n\n @not_implemented\n def test_merge_raises(self):\n a = pd.DataFrame({\"A\": [0, 1, 2]}, index=[\"a\", \"b\", \"c\"]).set_flags(\n allows_duplicate_labels=False\n )\n b = pd.DataFrame({\"B\": [0, 1, 2]}, index=[\"a\", \"b\", \"b\"])\n msg = \"Index has duplicates.\"\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n pd.merge(a, b, left_index=True, right_index=True)\n\n\[email protected](\n \"idx\",\n [\n pd.Index([1, 1]),\n pd.Index([\"a\", \"a\"]),\n pd.Index([1.1, 1.1]),\n pd.PeriodIndex([pd.Period(\"2000\", \"D\")] * 2),\n pd.DatetimeIndex([pd.Timestamp(\"2000\")] * 2),\n pd.TimedeltaIndex([pd.Timedelta(\"1D\")] * 2),\n pd.CategoricalIndex([\"a\", \"a\"]),\n pd.IntervalIndex([pd.Interval(0, 1)] * 2),\n pd.MultiIndex.from_tuples([(\"a\", 1), (\"a\", 1)]),\n ],\n ids=lambda x: type(x).__name__,\n)\ndef test_raises_basic(idx):\n msg = \"Index has duplicates.\"\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n pd.Series(1, index=idx).set_flags(allows_duplicate_labels=False)\n\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n pd.DataFrame({\"A\": [1, 1]}, index=idx).set_flags(allows_duplicate_labels=False)\n\n with pytest.raises(pd.errors.DuplicateLabelError, match=msg):\n pd.DataFrame([[1, 2]], columns=idx).set_flags(allows_duplicate_labels=False)\n\n\ndef test_format_duplicate_labels_message():\n idx = pd.Index([\"a\", \"b\", \"a\", \"b\", \"c\"])\n result = idx._format_duplicate_message()\n expected = pd.DataFrame(\n {\"positions\": [[0, 2], [1, 3]]}, index=pd.Index([\"a\", \"b\"], name=\"label\")\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_format_duplicate_labels_message_multi():\n idx = pd.MultiIndex.from_product([[\"A\"], [\"a\", \"b\", \"a\", \"b\", \"c\"]])\n result = idx._format_duplicate_message()\n expected = pd.DataFrame(\n {\"positions\": [[0, 2], [1, 3]]},\n index=pd.MultiIndex.from_product([[\"A\"], [\"a\", \"b\"]]),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_dataframe_insert_raises():\n df = pd.DataFrame({\"A\": [1, 2]}).set_flags(allows_duplicate_labels=False)\n msg = \"Cannot specify\"\n with pytest.raises(ValueError, match=msg):\n df.insert(0, \"A\", [3, 4], allow_duplicates=True)\n\n\[email protected](\n \"method, frame_only\",\n [\n (operator.methodcaller(\"set_index\", \"A\", inplace=True), True),\n (operator.methodcaller(\"set_axis\", [\"A\", \"B\"], inplace=True), False),\n (operator.methodcaller(\"reset_index\", inplace=True), True),\n (operator.methodcaller(\"rename\", lambda x: x, inplace=True), False),\n ],\n)\ndef test_inplace_raises(method, frame_only):\n df = pd.DataFrame({\"A\": [0, 0], \"B\": [1, 2]}).set_flags(\n allows_duplicate_labels=False\n )\n s = df[\"A\"]\n s.flags.allows_duplicate_labels = False\n msg = \"Cannot specify\"\n\n with pytest.raises(ValueError, match=msg):\n method(df)\n if not frame_only:\n with pytest.raises(ValueError, match=msg):\n method(s)\n\n\ndef test_pickle():\n a = pd.Series([1, 2]).set_flags(allows_duplicate_labels=False)\n b = tm.round_trip_pickle(a)\n tm.assert_series_equal(a, b)\n\n a = pd.DataFrame({\"A\": []}).set_flags(allows_duplicate_labels=False)\n b = tm.round_trip_pickle(a)\n tm.assert_frame_equal(a, b)\n"
] |
[
[
"numpy.array",
"pandas.CategoricalIndex",
"pandas.concat",
"pandas.merge",
"pandas.Series",
"pandas.Timestamp",
"pandas.Index",
"pandas.DataFrame",
"pandas.MultiIndex.from_tuples",
"pandas.Timedelta",
"pandas.MultiIndex.from_product",
"pandas._testing.round_trip_pickle",
"pandas.date_range",
"pandas.Period",
"pandas.Interval",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_frame_equal"
]
] |
wordsand/fsgan
|
[
"dabfc28afda0463100f37c48fd6dfed308e6e2b0"
] |
[
"train_blending.py"
] |
[
"import os\nimport itertools\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.utils as tutils\nimport torch.nn.functional as F\nimport numpy as np\nimport cv2\nfrom tqdm import tqdm\nfrom fsgan.utils.obj_factory import obj_factory\nfrom fsgan.utils.tensorboard_logger import TensorBoardLogger\nfrom fsgan.utils import utils, img_utils, landmarks_utils\nfrom fsgan.datasets import img_landmarks_transforms\nfrom fsgan.models.hrnet import hrnet_wlfw\n\n\ndef transfer_mask(img1, img2, mask):\n mask = mask.unsqueeze(1).repeat(1, 3, 1, 1).float()\n out = img1 * mask + img2 * (1 - mask)\n\n return out\n\n\ndef blend_imgs_bgr(source_img, target_img, mask):\n a = np.where(mask != 0)\n if len(a[0]) == 0 or len(a[1]) == 0:\n return target_img\n if (np.max(a[0]) - np.min(a[0])) <= 10 or (np.max(a[1]) - np.min(a[1])) <= 10:\n return target_img\n\n center = (np.min(a[1]) + np.max(a[1])) // 2, (np.min(a[0]) + np.max(a[0])) // 2\n output = cv2.seamlessClone(source_img, target_img, mask*255, center, cv2.NORMAL_CLONE)\n\n return output\n\n\ndef blend_imgs(source_tensor, target_tensor, mask_tensor):\n out_tensors = []\n for b in range(source_tensor.shape[0]):\n source_img = img_utils.tensor2bgr(source_tensor[b])\n target_img = img_utils.tensor2bgr(target_tensor[b])\n mask = mask_tensor[b].squeeze().cpu().numpy()\n out_bgr = blend_imgs_bgr(source_img, target_img, mask)\n out_tensors.append(img_utils.bgr2tensor(out_bgr))\n\n return torch.cat(out_tensors, dim=0)\n\n\ndef main(\n # General arguments\n exp_dir, resume_dir=None, start_epoch=None, epochs=(90,), iterations=None, resolutions=(128, 256),\n lr_gen=(1e-4,), lr_dis=(1e-4,), gpus=None, workers=4, batch_size=(64,), seed=None, log_freq=20,\n\n # Data arguments\n train_dataset='opencv_video_seq_dataset.VideoSeqDataset', val_dataset=None, numpy_transforms=None,\n tensor_transforms=('img_landmarks_transforms.ToTensor()',\n 'transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])'),\n\n # Training arguments\n optimizer='optim.SGD(momentum=0.9,weight_decay=1e-4)', scheduler='lr_scheduler.StepLR(step_size=30,gamma=0.1)',\n pretrained=False, criterion_pixelwise='nn.L1Loss', criterion_id='vgg_loss.VGGLoss',\n criterion_attr='vgg_loss.VGGLoss', criterion_gan='gan_loss.GANLoss(use_lsgan=True)',\n generator='res_unet.MultiScaleResUNet(in_nc=7,out_nc=3)',\n discriminator='discriminators_pix2pix.MultiscaleDiscriminator',\n reenactment_model=None, seg_model=None, lms_model=None, pix_weight=0.1, rec_weight=1.0, gan_weight=0.001,\n background_value=-1.0\n):\n def proces_epoch(dataset_loader, train=True):\n stage = 'TRAINING' if train else 'VALIDATION'\n total_iter = len(dataset_loader) * dataset_loader.batch_size * epoch\n pbar = tqdm(dataset_loader, unit='batches')\n\n # Set networks training mode\n Gb.train(train)\n D.train(train)\n Gr.train(False)\n S.train(False)\n L.train(False)\n\n # Reset logger\n logger.reset(prefix='{} {}X{}: Epoch: {} / {}; LR: {:.0e}; '.format(\n stage, res, res, epoch + 1, res_epochs, scheduler_G.get_lr()[0]))\n\n # For each batch in the training data\n for i, (img, target) in enumerate(pbar):\n # Prepare input\n with torch.no_grad():\n # For each view images\n for j in range(len(img)):\n # For each pyramid image: push to device\n for p in range(len(img[j])):\n img[j][p] = img[j][p].to(device)\n\n # Compute context\n context = L(img[1][0].sub(context_mean).div(context_std))\n context = landmarks_utils.filter_landmarks(context)\n\n # Normalize each of the pyramid images\n for j in range(len(img)):\n for p in range(len(img[j])):\n img[j][p].sub_(img_mean).div_(img_std)\n\n # Target segmentation\n seg = S(img[1][0])\n if seg.shape[2:] != (res, res):\n seg = F.interpolate(seg, (res, res), mode='bicubic', align_corners=False)\n\n # Concatenate pyramid images with context to derive the final input\n input = []\n for p in range(len(img[0]) - 1, -1, -1):\n context = F.interpolate(context, size=img[0][p].shape[2:], mode='bicubic', align_corners=False)\n input.insert(0, torch.cat((img[0][p], context), dim=1))\n\n # Reenactment\n reenactment_img = Gr(input)\n reenactment_seg = S(reenactment_img)\n if reenactment_img.shape[2:] != (res, res):\n reenactment_img = F.interpolate(reenactment_img, (res, res), mode='bilinear', align_corners=False)\n reenactment_seg = F.interpolate(reenactment_seg, (res, res), mode='bilinear', align_corners=False)\n\n # Remove unnecessary pyramids\n for j in range(len(img)):\n img[j] = img[j][-ri - 1:]\n\n # Face mask as intersection of reenactment face segmentation with target face segmentation\n face_mask = (reenactment_seg.argmax(1) == 1) * (seg.argmax(1) == 1)\n\n # Prepare blending input and target\n img_transfer = transfer_mask(reenactment_img, img[1][0], face_mask)\n img_blend = blend_imgs(img_transfer, img[1][0], face_mask.byte()).to(device)\n img_transfer_input = torch.cat((img_transfer, img[1][0], face_mask.unsqueeze(1).float()), dim=1)\n img_transfer_input_pyd = img_utils.create_pyramid(img_transfer_input, len(img[0]))\n\n # Blend images\n img_blend_pred = Gb(img_transfer_input_pyd)\n\n # Fake Detection and Loss\n img_blend_pred_pyd = img_utils.create_pyramid(img_blend_pred, len(img[0]))\n pred_fake_pool = D([x.detach() for x in img_blend_pred_pyd])\n loss_D_fake = criterion_gan(pred_fake_pool, False)\n\n # Real Detection and Loss\n pred_real = D(img[1])\n loss_D_real = criterion_gan(pred_real, True)\n\n loss_D_total = (loss_D_fake + loss_D_real) * 0.5\n\n # GAN loss (Fake Passability Loss)\n pred_fake = D(img_blend_pred_pyd)\n loss_G_GAN = criterion_gan(pred_fake, True)\n\n # Reconstruction\n loss_pixelwise = criterion_pixelwise(img_blend_pred, img_blend)\n loss_id = criterion_id(img_blend_pred, img_blend)\n loss_attr = criterion_attr(img_blend_pred, img_blend)\n loss_rec = pix_weight * loss_pixelwise + 0.5 * loss_id + 0.5 * loss_attr\n\n loss_G_total = rec_weight * loss_rec + gan_weight * loss_G_GAN\n\n if train:\n # Update generator weights\n optimizer_G.zero_grad()\n loss_G_total.backward()\n optimizer_G.step()\n\n # Update discriminator weights\n optimizer_D.zero_grad()\n loss_D_total.backward()\n optimizer_D.step()\n\n logger.update('losses', pixelwise=loss_pixelwise, id=loss_id, attr=loss_attr, rec=loss_rec,\n g_gan=loss_G_GAN, d_gan=loss_D_total)\n total_iter += dataset_loader.batch_size\n\n # Batch logs\n pbar.set_description(str(logger))\n if train and i % log_freq == 0:\n logger.log_scalars_val('%dx%d/batch' % (res, res), total_iter)\n\n # Epoch logs\n logger.log_scalars_avg('%dx%d/epoch/%s' % (res, res, 'train' if train else 'val'), epoch)\n if not train:\n # Log images\n grid = img_utils.make_grid(img[0][0], reenactment_img, img_transfer, img_blend_pred, img_blend, img[1][0])\n logger.log_image('%dx%d/vis' % (res, res), grid, epoch)\n\n return logger.log_dict['losses']['rec'].avg\n\n #################\n # Main pipeline #\n #################\n\n # Validation\n resolutions = resolutions if isinstance(resolutions, (list, tuple)) else [resolutions]\n lr_gen = lr_gen if isinstance(lr_gen, (list, tuple)) else [lr_gen]\n lr_dis = lr_dis if isinstance(lr_dis, (list, tuple)) else [lr_dis]\n epochs = epochs if isinstance(epochs, (list, tuple)) else [epochs]\n batch_size = batch_size if isinstance(batch_size, (list, tuple)) else [batch_size]\n iterations = iterations if iterations is None or isinstance(iterations, (list, tuple)) else [iterations]\n\n lr_gen = lr_gen * len(resolutions) if len(lr_gen) == 1 else lr_gen\n lr_dis = lr_dis * len(resolutions) if len(lr_dis) == 1 else lr_dis\n epochs = epochs * len(resolutions) if len(epochs) == 1 else epochs\n batch_size = batch_size * len(resolutions) if len(batch_size) == 1 else batch_size\n if iterations is not None:\n iterations = iterations * len(resolutions) if len(iterations) == 1 else iterations\n iterations = utils.str2int(iterations)\n\n if not os.path.isdir(exp_dir):\n raise RuntimeError('Experiment directory was not found: \\'' + exp_dir + '\\'')\n assert len(lr_gen) == len(resolutions)\n assert len(lr_dis) == len(resolutions)\n assert len(epochs) == len(resolutions)\n assert len(batch_size) == len(resolutions)\n assert iterations is None or len(iterations) == len(resolutions)\n\n # Seed\n utils.set_seed(seed)\n\n # Check CUDA device availability\n device, gpus = utils.set_device(gpus)\n\n # Initialize loggers\n logger = TensorBoardLogger(log_dir=exp_dir)\n\n # Initialize datasets\n numpy_transforms = obj_factory(numpy_transforms) if numpy_transforms is not None else []\n tensor_transforms = obj_factory(tensor_transforms) if tensor_transforms is not None else []\n img_transforms = img_landmarks_transforms.Compose(numpy_transforms + tensor_transforms)\n\n train_dataset = obj_factory(train_dataset, transform=img_transforms)\n if val_dataset is not None:\n val_dataset = obj_factory(val_dataset, transform=img_transforms)\n\n # Create networks\n Gb = obj_factory(generator).to(device)\n D = obj_factory(discriminator).to(device)\n\n # Resume from a checkpoint or initialize the networks weights randomly\n checkpoint_dir = exp_dir if resume_dir is None else resume_dir\n Gb_path = os.path.join(checkpoint_dir, 'Gb_latest.pth')\n D_path = os.path.join(checkpoint_dir, 'D_latest.pth')\n best_loss = 1000000.\n curr_res = resolutions[0]\n optimizer_G_state, optimizer_D_state = None, None\n if os.path.isfile(Gb_path) and os.path.isfile(D_path):\n print(\"=> loading checkpoint from '{}'\".format(checkpoint_dir))\n # G\n checkpoint = torch.load(Gb_path)\n if 'resolution' in checkpoint:\n curr_res = checkpoint['resolution']\n start_epoch = checkpoint['epoch'] if start_epoch is None else start_epoch\n else:\n curr_res = resolutions[1] if len(resolutions) > 1 else resolutions[0]\n best_loss = checkpoint['best_loss']\n Gb.apply(utils.init_weights)\n Gb.load_state_dict(checkpoint['state_dict'], strict=False)\n optimizer_G_state = checkpoint['optimizer']\n\n # D\n D.apply(utils.init_weights)\n if os.path.isfile(D_path):\n checkpoint = torch.load(D_path)\n D.load_state_dict(checkpoint['state_dict'], strict=False)\n optimizer_D_state = checkpoint['optimizer']\n else:\n print(\"=> no checkpoint found at '{}'\".format(checkpoint_dir))\n if not pretrained:\n print(\"=> randomly initializing networks...\")\n Gb.apply(utils.init_weights)\n D.apply(utils.init_weights)\n\n # Load reenactment model\n print('=> Loading face reenactment model: \"' + os.path.basename(reenactment_model) + '\"...')\n if reenactment_model is None:\n raise RuntimeError('Reenactment model must be specified!')\n if not os.path.exists(reenactment_model):\n raise RuntimeError('Couldn\\'t find reenactment model in path: ' + reenactment_model)\n checkpoint = torch.load(reenactment_model)\n Gr = obj_factory(checkpoint['arch']).to(device)\n Gr.load_state_dict(checkpoint['state_dict'])\n\n # Load segmentation model\n print('=> Loading face segmentation model: \"' + os.path.basename(seg_model) + '\"...')\n if seg_model is None:\n raise RuntimeError('Segmentation model must be specified!')\n if not os.path.exists(seg_model):\n raise RuntimeError('Couldn\\'t find segmentation model in path: ' + seg_model)\n checkpoint = torch.load(seg_model)\n S = obj_factory(checkpoint['arch']).to(device)\n S.load_state_dict(checkpoint['state_dict'])\n\n # Load face landmarks model\n print('=> Loading face landmarks model: \"' + os.path.basename(lms_model) + '\"...')\n assert os.path.isfile(lms_model), 'The model path \"%s\" does not exist' % lms_model\n L = hrnet_wlfw().to(device)\n state_dict = torch.load(lms_model)\n L.load_state_dict(state_dict)\n\n # Initialize normalization tensors\n # Note: this is necessary because of the landmarks model\n img_mean = torch.as_tensor([0.5, 0.5, 0.5], device=device).view(1, 3, 1, 1)\n img_std = torch.as_tensor([0.5, 0.5, 0.5], device=device).view(1, 3, 1, 1)\n context_mean = torch.as_tensor([0.485, 0.456, 0.406], device=device).view(1, 3, 1, 1)\n context_std = torch.as_tensor([0.229, 0.224, 0.225], device=device).view(1, 3, 1, 1)\n\n # Lossess\n criterion_pixelwise = obj_factory(criterion_pixelwise).to(device)\n criterion_id = obj_factory(criterion_id).to(device)\n criterion_attr = obj_factory(criterion_attr).to(device)\n criterion_gan = obj_factory(criterion_gan).to(device)\n\n # Support multiple GPUs\n if gpus and len(gpus) > 1:\n Gb = nn.DataParallel(Gb, gpus)\n Gr = nn.DataParallel(Gr, gpus)\n D = nn.DataParallel(D, gpus)\n S = nn.DataParallel(S, gpus)\n L = nn.DataParallel(L, gpus)\n criterion_id.vgg = nn.DataParallel(criterion_id.vgg, gpus)\n criterion_attr.vgg = nn.DataParallel(criterion_attr.vgg, gpus)\n\n # For each resolution\n start_res_ind = int(np.log2(curr_res)) - int(np.log2(resolutions[0]))\n start_epoch = 0 if start_epoch is None else start_epoch\n for ri in range(start_res_ind, len(resolutions)):\n res = resolutions[ri]\n res_lr_gen = lr_gen[ri]\n res_lr_dis = lr_dis[ri]\n res_epochs = epochs[ri]\n res_iterations = iterations[ri] if iterations is not None else None\n res_batch_size = batch_size[ri]\n\n # Optimizer and scheduler\n optimizer_G = obj_factory(optimizer, Gb.parameters(), lr=res_lr_gen)\n optimizer_D = obj_factory(optimizer, D.parameters(), lr=res_lr_dis)\n scheduler_G = obj_factory(scheduler, optimizer_G)\n scheduler_D = obj_factory(scheduler, optimizer_D)\n if optimizer_G_state is not None:\n optimizer_G.load_state_dict(optimizer_G_state)\n optimizer_G_state = None\n if optimizer_D_state is not None:\n optimizer_D.load_state_dict(optimizer_D_state)\n optimizer_D_state = None\n\n # Initialize data loaders\n if res_iterations is None:\n train_sampler = tutils.data.sampler.WeightedRandomSampler(train_dataset.weights, len(train_dataset))\n else:\n train_sampler = tutils.data.sampler.WeightedRandomSampler(train_dataset.weights, res_iterations)\n train_loader = tutils.data.DataLoader(train_dataset, batch_size=res_batch_size, sampler=train_sampler,\n num_workers=workers, pin_memory=True, drop_last=True, shuffle=False)\n if val_dataset is not None:\n if res_iterations is None:\n val_sampler = tutils.data.sampler.WeightedRandomSampler(val_dataset.weights, len(val_dataset))\n else:\n val_iterations = (res_iterations * len(val_dataset.classes)) // len(train_dataset.classes)\n val_sampler = tutils.data.sampler.WeightedRandomSampler(val_dataset.weights, val_iterations)\n val_loader = tutils.data.DataLoader(val_dataset, batch_size=res_batch_size, sampler=val_sampler,\n num_workers=workers, pin_memory=True, drop_last=True, shuffle=False)\n else:\n val_loader = None\n\n # For each epoch\n for epoch in range(start_epoch, res_epochs):\n total_loss = proces_epoch(train_loader, train=True)\n if val_loader is not None:\n with torch.no_grad():\n total_loss = proces_epoch(val_loader, train=False)\n\n # Schedulers step (in PyTorch 1.1.0+ it must follow after the epoch training and validation steps)\n if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):\n scheduler_G.step(total_loss)\n scheduler_D.step(total_loss)\n else:\n scheduler_G.step()\n scheduler_D.step()\n\n # Save models checkpoints\n is_best = total_loss < best_loss\n best_loss = min(best_loss, total_loss)\n utils.save_checkpoint(exp_dir, 'Gb', {\n 'resolution': res,\n 'epoch': epoch + 1,\n 'state_dict': Gb.module.state_dict() if gpus and len(gpus) > 1 else Gb.state_dict(),\n 'optimizer': optimizer_G.state_dict(),\n 'best_loss': best_loss,\n }, is_best)\n utils.save_checkpoint(exp_dir, 'D', {\n 'resolution': res,\n 'epoch': epoch + 1,\n 'state_dict': D.module.state_dict() if gpus and len(gpus) > 1 else D.state_dict(),\n 'optimizer': optimizer_D.state_dict(),\n 'best_loss': best_loss,\n }, is_best)\n\n # Reset start epoch to 0 because it's should only effect the first training resolution\n start_epoch = 0\n\n\nif __name__ == \"__main__\":\n # Parse program arguments\n import argparse\n parser = argparse.ArgumentParser('train_blending')\n general = parser.add_argument_group('general')\n general.add_argument('exp_dir', metavar='DIR',\n help='path to experiment directory')\n general.add_argument('-re', '--resume', metavar='DIR',\n help='path to latest checkpoint (default: None)')\n general.add_argument('-se', '--start-epoch', metavar='N',\n help='manual epoch number (useful on restarts)')\n general.add_argument('-e', '--epochs', default=90, type=int, nargs='+', metavar='N',\n help='number of total epochs to run')\n general.add_argument('-i', '--iterations', nargs='+', metavar='N',\n help='number of iterations per resolution to run')\n general.add_argument('-r', '--resolutions', default=(128, 256), type=int, nargs='+', metavar='N',\n help='the training resolutions list (must be power of 2)')\n parser.add_argument('-lrg', '--lr_gen', default=(1e-4,), type=float, nargs='+',\n metavar='F', help='initial generator learning rate per resolution')\n parser.add_argument('-lrd', '--lr_dis', default=(1e-4,), type=float, nargs='+',\n metavar='F', help='initial discriminator learning rate per resolution')\n general.add_argument('--gpus', nargs='+', type=int, metavar='N',\n help='list of gpu ids to use (default: all)')\n general.add_argument('-w', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\n general.add_argument('-b', '--batch-size', default=(64,), type=int, nargs='+', metavar='N',\n help='mini-batch size (default: 64)')\n general.add_argument('--seed', type=int, metavar='N',\n help='random seed')\n general.add_argument('-lf', '--log_freq', default=20, type=int, metavar='N',\n help='number of steps between each loss plot')\n\n data = parser.add_argument_group('data')\n data.add_argument('-td', '--train_dataset', default='opencv_video_seq_dataset.VideoSeqDataset',\n help='train dataset object')\n data.add_argument('-vd', '--val_dataset',\n help='val dataset object')\n data.add_argument('-nt', '--numpy_transforms', nargs='+',\n help='Numpy transforms')\n data.add_argument('-tt', '--tensor_transforms', nargs='+', help='tensor transforms',\n default=('img_landmarks_transforms.ToTensor()',\n 'transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5])'))\n\n training = parser.add_argument_group('training')\n training.add_argument('-o', '--optimizer', default='optim.SGD(momentum=0.9,weight_decay=1e-4)',\n help='network\\'s optimizer object')\n training.add_argument('-s', '--scheduler', default='lr_scheduler.StepLR(step_size=30,gamma=0.1)',\n help='scheduler object')\n training.add_argument('-p', '--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\n training.add_argument('-cp', '--criterion_pixelwise', default='nn.L1Loss',\n help='pixelwise criterion object')\n training.add_argument('-ci', '--criterion_id', default='vgg_loss.VGGLoss',\n help='id criterion object')\n training.add_argument('-ca', '--criterion_attr', default='vgg_loss.VGGLoss',\n help='attributes criterion object')\n training.add_argument('-cg', '--criterion_gan', default='gan_loss.GANLoss(use_lsgan=True)',\n help='GAN criterion object')\n parser.add_argument('-g', '--generator', default='res_unet_.MultiScaleResUNet(in_nc=7,out_nc=3)',\n help='generator completion object')\n parser.add_argument('-d', '--discriminator', default='discriminators_pix2pix.MultiscaleDiscriminator',\n help='discriminator object')\n parser.add_argument('-rm', '--reenactment_model', default=None, metavar='PATH',\n help='reenactment model')\n parser.add_argument('-sm', '--seg_model', default=None, metavar='PATH',\n help='segmentation model')\n parser.add_argument('-lm', '--lms_model', default=None, metavar='PATH',\n help='landmarks model')\n parser.add_argument('-pw', '--pix_weight', default=0.1, type=float, metavar='F',\n help='pixel-wise loss weight')\n parser.add_argument('-rw', '--rec_weight', default=1.0, type=float, metavar='F',\n help='reconstruction loss weight')\n parser.add_argument('-gw', '--gan_weight', default=0.001, type=float, metavar='F',\n help='GAN loss weight')\n parser.add_argument('-bv', '--background_value', default=-1.0, type=float, metavar='F',\n help='removed background replacement value')\n\n args = parser.parse_args()\n main(\n # General arguments\n args.exp_dir, args.resume, args.start_epoch, args.epochs, args.iterations, args.resolutions,\n lr_gen=args.lr_gen, lr_dis=args.lr_dis, gpus=args.gpus, workers=args.workers, batch_size=args.batch_size,\n seed=args.seed, log_freq=args.log_freq,\n\n # Data arguments\n train_dataset=args.train_dataset, val_dataset=args.val_dataset, numpy_transforms=args.numpy_transforms,\n tensor_transforms=args.tensor_transforms,\n\n # Training arguments\n optimizer=args.optimizer, scheduler=args.scheduler, pretrained=args.pretrained,\n criterion_pixelwise=args.criterion_pixelwise, criterion_id=args.criterion_id,\n criterion_attr=args.criterion_attr, criterion_gan=args.criterion_gan, generator=args.generator,\n discriminator=args.discriminator, reenactment_model=args.reenactment_model, seg_model=args.seg_model,\n lms_model=args.lms_model, pix_weight=args.pix_weight, rec_weight=args.rec_weight, gan_weight=args.gan_weight,\n background_value=args.background_value\n )\n"
] |
[
[
"numpy.log2",
"torch.load",
"torch.cat",
"numpy.min",
"torch.utils.data.DataLoader",
"numpy.max",
"torch.no_grad",
"torch.nn.functional.interpolate",
"torch.nn.DataParallel",
"torch.utils.data.sampler.WeightedRandomSampler",
"numpy.where",
"torch.as_tensor"
]
] |
fameyer/pymorWin
|
[
"b449a38754fddb719d554f1aacf9280a585f1168"
] |
[
"src/pymor/algorithms/ei.py"
] |
[
"# This file is part of the pyMOR project (http://www.pymor.org).\n# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler\n# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)\n\n\"\"\"This module contains algorithms for the empirical interpolation of operators.\n\nThe main work for generating the necessary interpolation data is handled by\nthe :func:`ei_greedy` method. The objects returned by this method can be used\nto instantiate an |EmpiricalInterpolatedOperator|.\n\n:func:`ei_greedy` expects an iterable of operator evaluations which are to be\ninterpolated. These evaluation can be provided by an instance of\n:class:`EvaluationProvider` which, given a discretization, names of |Operators|\nand a set of parameters, provides evaluations of the |Operators| on the solution\nsnapshots for the given parameters. Caching of the evaluations is also\nhandled by :class:`EvaluationProvider`.\n\nAs a convenience, the :func:`interpolate_operators` method allows to perform\nthe empirical interpolation of the |Operators| of a given discretization with\na single function call.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\nfrom scipy.linalg import solve_triangular, cho_factor, cho_solve\n\nfrom pymor.core.logger import getLogger\nfrom pymor.core.cache import CacheableInterface, cached\nfrom pymor.la.interfaces import VectorArrayInterface\nfrom pymor.la.pod import pod\nfrom pymor.operators.ei import EmpiricalInterpolatedOperator\n\n\ndef ei_greedy(evaluations, error_norm=None, target_error=None, max_interpolation_dofs=None,\n projection='orthogonal', product=None):\n \"\"\"Generate data for empirical operator interpolation by a greedy search (EI-Greedy algorithm).\n\n Given evaluations of |Operators|, this method generates a collateral_basis and\n interpolation DOFs for empirical operator interpolation. The returned objects\n can be used to instantiate an |EmpiricalInterpolatedOperator|.\n\n The interpolation data is generated by a greedy search algorithm, adding in each\n loop the worst approximated operator evaluation to the collateral basis.\n\n Parameters\n ----------\n evaluations\n An iterable of operator evaluations. Each element must be a |VectorArray|\n of the same type and dimension, but it can hold an arbitrary number of evaluations.\n error_norm\n Norm w.r.t. which to calculate the interpolation error. If `None`, the Euclidean norm\n is used.\n target_error\n Stop the greedy search if the largest approximation error is below this threshold.\n max_interpolation_dofs\n Stop the greedy search if the number of interpolation DOF (= dimension of the collateral\n basis) reaches this value.\n projection\n If `ei`, compute the approximation error by comparing the given evaluation by the\n evaluation of the interpolated operator. If `orthogonal`, compute the error by\n comparing with the orthogonal projection onto the span of the collateral basis.\n product\n If `projection == 'orthogonal'`, the product which is used to perform the projection.\n If `None`, the Euclidean product is used.\n\n Returns\n -------\n interpolation_dofs\n |NumPy array| of the DOFs at which the operators have to be evaluated.\n collateral_basis\n |VectorArray| containing the generated collateral basis.\n data\n Dict containing the following fields:\n\n :errors: sequence of maximum approximation errors during\n greedy search.\n :triangularity_errors: sequence of maximum absolute values of interoplation\n matrix coefficients in the upper triangle (should\n be near zero).\n \"\"\"\n\n assert projection in ('orthogonal', 'ei')\n assert isinstance(evaluations, VectorArrayInterface)\\\n or all(isinstance(ev, VectorArrayInterface) for ev in evaluations)\n if isinstance(evaluations, VectorArrayInterface):\n evaluations = (evaluations,)\n\n logger = getLogger('pymor.algorithms.ei.ei_greedy')\n logger.info('Generating Interpolation Data ...')\n\n interpolation_dofs = np.zeros((0,), dtype=np.int32)\n interpolation_matrix = np.zeros((0, 0))\n collateral_basis = next(iter(evaluations)).empty()\n max_errs = []\n triangularity_errs = []\n\n def interpolate(U, ind=None):\n coefficients = solve_triangular(interpolation_matrix, U.components(interpolation_dofs, ind=ind).T,\n lower=True, unit_diagonal=True).T\n # coefficients = np.linalg.solve(interpolation_matrix, U.components(interpolation_dofs, ind=ind).T).T\n return collateral_basis.lincomb(coefficients)\n\n # compute the maximum projection error and error vector for the current interpolation data\n def projection_error():\n max_err = -1.\n\n # precompute gramian_inverse if needed\n if projection == 'orthogonal' and len(interpolation_dofs) > 0:\n if product is None:\n gramian = collateral_basis.gramian()\n else:\n gramian = product.apply2(collateral_basis, collateral_basis, pairwise=False)\n gramian_cholesky = cho_factor(gramian, overwrite_a=True)\n\n for AU in evaluations:\n if len(interpolation_dofs) > 0:\n if projection == 'ei':\n AU_interpolated = interpolate(AU)\n ERR = AU - AU_interpolated\n else:\n if product is None:\n coefficients = cho_solve(gramian_cholesky,\n collateral_basis.dot(AU, pairwise=False)).T\n else:\n coefficients = cho_solve(gramian_cholesky,\n product.apply2(collateral_basis, AU, pairwise=False)).T\n AU_projected = collateral_basis.lincomb(coefficients)\n ERR = AU - AU_projected\n else:\n ERR = AU\n errs = ERR.l2_norm() if error_norm is None else error_norm(ERR)\n local_max_err_ind = np.argmax(errs)\n local_max_err = errs[local_max_err_ind]\n if local_max_err > max_err:\n max_err = local_max_err\n if len(interpolation_dofs) == 0 or projection == 'ei':\n new_vec = ERR.copy(ind=local_max_err_ind)\n else:\n new_vec = AU.copy(ind=local_max_err_ind)\n new_vec -= interpolate(AU, ind=local_max_err_ind)\n\n return max_err, new_vec\n\n # main loop\n while True:\n max_err, new_vec = projection_error()\n\n logger.info('Maximum interpolation error with {} interpolation DOFs: {}'.format(len(interpolation_dofs),\n max_err))\n if target_error is not None and max_err <= target_error:\n logger.info('Target error reached! Stopping extension loop.')\n break\n\n # compute new interpolation dof and collateral basis vector\n new_dof = new_vec.amax()[0][0]\n if new_dof in interpolation_dofs:\n logger.info('DOF {} selected twice for interplation! Stopping extension loop.'.format(new_dof))\n break\n new_vec *= 1 / new_vec.components([new_dof])[0, 0]\n interpolation_dofs = np.hstack((interpolation_dofs, new_dof))\n collateral_basis.append(new_vec, remove_from_other=True)\n interpolation_matrix = collateral_basis.components(interpolation_dofs).T\n max_errs.append(max_err)\n\n triangularity_error = np.max(np.abs(interpolation_matrix - np.tril(interpolation_matrix)))\n triangularity_errs.append(triangularity_error)\n logger.info('Interpolation matrix is not lower triangular with maximum error of {}'\n .format(triangularity_error))\n\n if len(interpolation_dofs) >= max_interpolation_dofs:\n logger.info('Maximum number of interpolation DOFs reached. Stopping extension loop.')\n max_err, _ = projection_error()\n logger.info('Final maximum interpolation error with {} interpolation DOFs: {}'.format(\n len(interpolation_dofs), max_err))\n break\n\n logger.info('')\n\n data = {'errors': max_errs, 'triangularity_errors': triangularity_errs}\n\n return interpolation_dofs, collateral_basis, data\n\n\ndef deim(evaluations, modes=None, error_norm=None, product=None):\n \"\"\"Generate data for empirical operator interpolation using DEIM algorithm.\n\n Given evaluations of |Operators|, this method generates a collateral_basis and\n interpolation DOFs for empirical operator interpolation. The returned objects\n can be used to instantiate an |EmpiricalInterpolatedOperator|.\n\n The collateral basis is determined by the first POD modes of the operator\n evaluations.\n\n Parameters\n ----------\n evaluations\n A |VectorArray| of operator evaluations.\n modes\n Dimension of the collateral basis i.e. number of POD modes of the operator evaluations.\n error_norm\n Norm w.r.t. which to calculate the interpolation error. If `None`, the Euclidean norm\n is used.\n product\n Product |Operator| used for POD.\n\n Returns\n -------\n interpolation_dofs\n |NumPy array| of the DOFs at which the operators have to be evaluated.\n collateral_basis\n |VectorArray| containing the generated collateral basis.\n data\n Dict containing the following fields:\n\n :errors: sequence of maximum approximation errors during greedy search.\n \"\"\"\n\n assert isinstance(evaluations, VectorArrayInterface)\n\n logger = getLogger('pymor.algorithms.ei.deim')\n logger.info('Generating Interpolation Data ...')\n\n collateral_basis = pod(evaluations, modes, product=product)[0]\n\n interpolation_dofs = np.zeros((0,), dtype=np.int32)\n interpolation_matrix = np.zeros((0, 0))\n errs = []\n\n for i in xrange(len(collateral_basis)):\n\n if len(interpolation_dofs) > 0:\n coefficients = np.linalg.solve(interpolation_matrix,\n collateral_basis.components(interpolation_dofs, ind=i).T).T\n U_interpolated = collateral_basis.lincomb(coefficients, ind=range(len(interpolation_dofs)))\n ERR = collateral_basis.copy(ind=i)\n ERR -= U_interpolated\n else:\n ERR = collateral_basis.copy(ind=i)\n\n err = ERR.l2_norm() if error_norm is None else error_norm(ERR)\n\n logger.info('Interpolation error for basis vector {}: {}'.format(i, err))\n\n # compute new interpolation dof and collateral basis vector\n new_dof = ERR.amax()[0][0]\n\n if new_dof in interpolation_dofs:\n logger.info('DOF {} selected twice for interplation! Stopping extension loop.'.format(new_dof))\n break\n\n interpolation_dofs = np.hstack((interpolation_dofs, new_dof))\n interpolation_matrix = collateral_basis.components(interpolation_dofs, ind=range(len(interpolation_dofs))).T\n errs.append(err)\n\n logger.info('')\n\n if len(interpolation_dofs) < len(collateral_basis):\n collateral_basis.remove(ind=range(len(interpolation_dofs), len(collateral_basis)))\n\n logger.info('Finished.'.format(new_dof))\n\n data = {'errors': errs}\n\n return interpolation_dofs, collateral_basis, data\n\n\nclass EvaluationProvider(CacheableInterface):\n \"\"\"Helper class for providing cached operator evaluations that can be fed into :func:`ei_greedy`.\n\n This class calls `solve()` on a given |Discretization| for a provided sample of |Parameters| and\n then applies |Operators| to the solutions. The results are cached.\n\n Parameters\n ----------\n discretization\n The |Discretization| whose `solve()` method will be called.\n operators\n A list of |Operators| which are evaluated on the solution snapshots.\n sample\n A list of |Parameters| for which `discretization.solve()` is called.\n cache_region\n Name of the |CacheRegion| to use.\n \"\"\"\n\n def __init__(self, discretization, operators, sample, cache_region='memory'):\n self.cache_region = cache_region\n self.discretization = discretization\n self.sample = sample\n self.operators = operators\n\n @cached\n def data(self, k):\n mu = self.sample[k]\n U = self.discretization.solve(mu)\n AU = self.operators[0].range.empty(reserve=len(self.operators))\n for op in self.operators:\n AU.append(op.apply(U, mu=mu))\n return AU\n\n def __len__(self):\n return len(self.sample)\n\n def __getitem__(self, ind):\n if not 0 <= ind < len(self.sample):\n raise IndexError\n return self.data(ind)\n\n\ndef interpolate_operators(discretization, operator_names, parameter_sample, error_norm=None,\n target_error=None, max_interpolation_dofs=None,\n projection='orthogonal', product=None, cache_region='memory'):\n \"\"\"Empirical operator interpolation using the EI-Greedy algorithm.\n\n This is a convenience method for facilitating the use of :func:`ei_greedy`. Given\n a |Discretization|, names of |Operators|, and a sample of |Parameters|, first the operators\n are evaluated on the solution snapshots of the discretization for the provided parameters.\n These evaluations are then used as input for :func:`ei_greedy`. Finally the resulting\n interpolation data is used to create |EmpiricalInterpolatedOperators| and a new\n discretization with the interpolated operators is returned.\n\n Note that this implementation creates ONE common collateral basis for all operators\n which might not be what you want.\n\n Parameters\n ----------\n discretization\n The |Discretization| whose |Operators| will be interpolated.\n operator_names\n List of keys in the `operators` dict of the discretization. The corresponding\n |Operators| will be interpolated.\n sample\n A list of |Parameters| for which solution snapshots are calculated.\n error_norm\n See :func:`ei_greedy`.\n target_error\n See :func:`ei_greedy`.\n max_interpolation_dofs\n See :func:`ei_greedy`.\n projection\n See :func:`ei_greedy`.\n product\n See :func:`ei_greedy`.\n cache_region\n Name of the |CacheRegion| in which the operator evaluations will be stored.\n\n Returns\n -------\n ei_discretization\n |Discretization| with |Operators| given by `operator_names` replaced by\n |EmpiricalInterpolatedOperators|.\n data\n Dict containing the following fields:\n\n :dofs: |NumPy array| of the DOFs at which the |Operators| have to be evaluated.\n :basis: |VectorArray| containing the generated collateral basis.\n :errors: sequence of maximum approximation errors during greedy search.\n \"\"\"\n\n sample = tuple(parameter_sample)\n operators = [discretization.operators[operator_name] for operator_name in operator_names]\n\n evaluations = EvaluationProvider(discretization, operators, sample, cache_region=cache_region)\n dofs, basis, data = ei_greedy(evaluations, error_norm, target_error, max_interpolation_dofs,\n projection=projection, product=product)\n\n ei_operators = {name: EmpiricalInterpolatedOperator(operator, dofs, basis, triangular=True)\n for name, operator in zip(operator_names, operators)}\n operators_dict = discretization.operators.copy()\n operators_dict.update(ei_operators)\n ei_discretization = discretization.with_(operators=operators_dict, name='{}_ei'.format(discretization.name))\n\n data.update({'dofs': dofs, 'basis': basis})\n return ei_discretization, data\n"
] |
[
[
"numpy.hstack",
"scipy.linalg.cho_factor",
"numpy.argmax",
"numpy.zeros",
"numpy.tril"
]
] |
krishna-aditi/Big-Data-Systems-Assignment-2
|
[
"a7e02b4588e9609c6a9ff12e04169ca042300950"
] |
[
"src/losses/vggloss.py"
] |
[
"\"\"\"\nVarious loss functions for training \n\n\"\"\"\nimport os\nimport tensorflow as tf\n\n# Global vars for VGG19 loss\nglobal vggfull\nglobal vggfeats\nglobal vggfeats_l\nglobal vggfeats_seq\nglobal vggfeats_seq_l\nglobal vgginput_shape\nglobal vgginit\n\n# Download this file using keras first.\n_default_vgg_weights = f'{os.environ[\"HOME\"]}/.keras/models/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'\n\nclass VGGLoss():\n def __init__(self,\n input_shape=(None,192,192,1),\n resize_to=None,\n layer='block5_conv4',\n vgg_weights=_default_vgg_weights,\n normalization_scale=1.0,\n normalization_shift=0.0,\n skip=1):\n if len(input_shape)==4:\n self.T,self.L,self.W,self.D = input_shape\n self.use_seq=True\n elif len(input_shape)==3:\n self.L,self.W,self.D = input_shape\n self.use_seq=False\n else:\n raise ValueError('input_shape must have length 3 or 4')\n\n assert(self.D==1 or self.D==3) # Depth channel must be gray scale or RGB\n self.skip=skip\n self.resize_to=resize_to\n if resize_to:\n self.vggshape = (resize_to[0],resize_to[1],3) \n else:\n self.vggshape = (self.L,self.W,3) \n self.vggfull = tf.keras.applications.VGG19(weights=vgg_weights,\n input_shape=self.vggshape,\n include_top=False)\n self.vggfull.trainable=False\n for l in self.vggfull.layers:\n l.trainable=False\n \n # Build a pipeline that computes the MSE between VGG features.\n # Becuase VGG featueres are big, we will perform the computation sequentually over\n # the time dimension.\n self.vggfeats = tf.keras.Model(self.vggfull.input, \n outputs=self.vggfull.get_layer(layer).output)\n \n if self.use_seq:\n # Make the sequence-ified version of self.vggfeats\n # tf.keras.layers.TimeDistributed only accepts single inputs, so \n # will assume inputs are concat-ed along channel dim\n inp = tf.keras.layers.Input(shape=(self.vggshape[0],self.vggshape[1],6)) # inputs concatted along D\n inp_norm = tf.keras.layers.Lambda(lambda inp,scale,shift: inp*scale+shift,\n arguments={'scale':np.float32(normalization_scale),\n 'shift':np.float32(normalization_shift)})(inp)\n yt = tf.keras.layers.Lambda( lambda inp: inp[:,:,:,:3] )(inp_norm)\n yp = tf.keras.layers.Lambda( lambda inp: inp[:,:,:,3:] )(inp_norm)\n f_true = self.vggfeats(yt)\n f_pred = self.vggfeats(yp)\n f_diff = tf.keras.layers.Lambda( lambda f: tf.math.square(f[0]-f[1]))([f_true,f_pred])\n out = tf.keras.layers.Lambda( lambda d: tf.reduce_mean(d,axis=[1,2,3]) )(f_diff)\n self.vggfeats_model = tf.keras.Model(inputs=inp,outputs=out,name=\"VGG_Model\")\n \n # Build TimeDistributed that applys this over a sequence\n inp_seq = tf.keras.layers.Input(shape=(self.T,self.vggshape[0],self.vggshape[1],6))\n vggfeats_td = tf.keras.layers.TimeDistributed(self.vggfeats_model,name='VGG_Sequence')(inp_seq)\n self.vggfeats_seq = tf.keras.Model(inputs=inp_seq, outputs=vggfeats_td,name='VGG_Sequence_Loss')\n\n #####\n # Pipeline to prep data prior to being passed to vgg functions\n #####\n loss_inp = tf.keras.layers.Input(shape=(self.L,self.W,1)) \n if self.resize_to:\n loss_inp_resized = tf.keras.layers.Lambda(\n lambda t,size: tf.image.resize(t,size),\n arguments={'size':(self.vggshape[0],self.vggshape[1])} )(loss_inp)\n else:\n loss_inp_resized=loss_inp\n \n if self.D==1:\n loss_inp_tile3 = tf.keras.layers.Lambda(lambda t: tf.tile(t,[1,1,1,3]))(loss_inp_resized)\n else:\n loss_inp_tile3 = loss_inp_resized\n self.prep_loss_input = tf.keras.Model(inputs=loss_inp,outputs=loss_inp_tile3,name='PrepLossInputs')\n \n if self.use_seq:\n # Create sequence-ified version of self.prep_loss_input\n loss_inp_seq = tf.keras.layers.Input(shape=(self.T,self.L,self.W,1))\n if self.skip>1:\n # subset the time sequence\n loss_inp_seq_skipped = tf.keras.layers.Lambda(\n lambda t,skip: t[:,::skip],\n arguments={'skip':self.skip})(loss_inp_seq)\n else:\n loss_inp_seq_skipped = loss_inp_seq\n prep_loss_input_td = tf.keras.layers.TimeDistributed(self.prep_loss_input,name='PrepLossInputsSequence')(loss_inp_seq_skipped)\n self.prep_loss_input_seq = tf.keras.Model(inputs=loss_inp_seq,outputs=prep_loss_input_td)\n\n\n\n def get_loss(self):\n def vggloss(y_true,y_pred):\n \"\"\"\n If use_seq, inputs should be [N,T,L,W,D]\n If not, inputs should be [N,L,W,D]\n \"\"\"\n if not self.use_seq:\n yt_inp = tf.keras.layers.Input(shape=(self.L,self.W,self.D)) \n yp_inp = tf.keras.layers.Input(shape=(self.L,self.W,self.D))\n yt_inp_prepped=self.prep_loss_input(yt_inp) # resizes & tiles\n yp_inp_prepped=self.prep_loss_input(yp_inp) # resizes & tiles\n f_true = self.vggfeats(yt_inp_prepped)\n f_pred = self.vggfeats(yp_inp_prepped)\n f_diff = tf.keras.layers.Lambda( lambda f: tf.math.square(f[0]-f[1]))([f_true,f_pred])\n out = tf.keras.layers.Lambda( lambda d: tf.reduce_mean(d,axis=[1,2,3]) )(f_diff)\n loss = tf.keras.Model(inputs=[yt_inp,yp_inp],outputs=out)\n d = loss([y_true,y_pred])\n return tf.reduce_mean(d)\n else:\n yt_inp = tf.keras.layers.Input(shape=(self.T,self.L,self.W,self.D)) \n yp_inp = tf.keras.layers.Input(shape=(self.T,self.L,self.W,self.D))\n yt_inp_prepped = self.prep_loss_input_seq(yt_inp) # skips,resizes & tiles\n yp_inp_prepped = self.prep_loss_input_seq(yp_inp)\n # Must pass a single input to TimeDistributed, so concat inputs here\n y_concat = tf.keras.layers.Lambda(lambda ys: tf.concat((ys[0],ys[1]) ,axis=-1))([yt_inp_prepped,yp_inp_prepped])\n vgg=self.vggfeats_seq(y_concat)\n loss = tf.keras.Model(inputs=[yt_inp,yp_inp],outputs=vgg)\n d = loss([y_true,y_pred])\n return tf.reduce_mean( d ) # mean MSE over time\n return vggloss\n\n\n\n\n\n\n\n\n\n\ndef init_vgg19(input_shape=(192,192,1),\n T=None,\n layer='block5_conv4',\n vgg_weights=None,\n normalization_scale=1.0,\n normalization_shift=0.0):\n \"\"\"\n Initializes the VGG loss pipeline.\n \n input_shape Input shape tuple (L,W,D). Inputs are resized to this shape before being passed to VGG \n layer str name of VGG 19 layer to use for features\n vgg_weights weights for VGG19\n T Size of T dimension\n normalization_scale Scale for normalizing data before passing to VGG (normed_data = data*scale + shift) \n normalization_shift Shift for normalizing data before passing to VGG (normed_data = data*scale + shift) \n\n \n Make sure weights are downloaded first to vgg_weights.\n \"\"\"\n global vggfull\n global vggfeats\n global vggfeats_seq\n global vgginput_shape\n global vgginit\n \n L,W,D = input_shape\n vgginput_shape=input_shape\n assert(D==1 or D==3) # Depth channel must be gray scale or RGB\n if vgg_weights is None:\n vgg_weights = default_vgg_weights\n vggshape = (input_shape[0],input_shape[1],3) \n vggfull = tf.keras.applications.VGG19(weights=vgg_weights,input_shape=vggshape,include_top=False)\n vggfull.trainable=False\n for l in vggfull.layers:\n l.trainable=False\n \n # Build a pipeline that computes the MSE between VGG features.\n # Becuase VGG featueres are big, we will perform the computation sequentually over\n # the time dimension.\n vggfeats = tf.keras.Model(vggfull.input, outputs=vggfull.get_layer(layer).output)\n \n # tf.keras.layers.TimeDistributed only accepts single inputs, so \n # will assume inputs are concat-ed along channel dim\n inp = tf.keras.layers.Input(shape=(L,W,6)) # inputs concatted along D\n inp_norm = tf.keras.layers.Lambda(lambda inp: inp*normalization_scale+normalization_shift)(inp)\n yt = tf.keras.layers.Lambda( lambda inp: inp[:,:,:,:3] )(inp_norm)\n yp = tf.keras.layers.Lambda( lambda inp: inp[:,:,:,3:] )(inp_norm)\n f_true = vggfeats(yt)\n f_pred = vggfeats(yp)\n f_diff = tf.keras.layers.Lambda( lambda f: tf.math.square(f[0]-f[1]))([f_true,f_pred])\n out = tf.keras.layers.Lambda( lambda d: tf.reduce_mean(d,axis=[1,2,3]) )(f_diff)\n vggfeats_model = tf.keras.Model(inputs=inp,outputs=out)\n \n inp_seq = tf.keras.layers.Input(shape=(T,L,W,6))\n \n # Build TimeDistributed that applys this over a sequence\n vggfeats_td = tf.keras.layers.TimeDistributed(vggfeats_model,name='VGG_Sequence')(inp_seq)\n vggfeats_seq = tf.keras.Model(inputs=inp_seq, outputs=vggfeats_td,name='VGG_Sequence_Loss')\n vgginit=True\n\n\n \ndef vgg19_loss(y_true, y_pred):\n \"\"\"\n Input:\n y_true N,T,L,L,D target\n Y_pred N,T,L,L,D prediction\n \n Depth D must be either 3 or 1. If equal to 1, channel is repeated 3 times to simulate RGB.\n \n Outputs MSE between Vgg19 features computed using layer.\n \n \"\"\"\n global vggfull\n global vggfeats\n global vggfeats_seq\n global vgginit\n global vgginput_shape\n if not vgginit:\n raise ValueError('Please call init_vgg19 first')\n \n L,W,D = vgginput_shape\n #yt_inp = tf.keras.layers.Input(shape=(None,L,W,1)) \n #yp_inp = tf.keras.layers.Input(shape=(None,L,W,1))\n \n T=y_true.get_shape()[1]\n Lin = y_true.get_shape()[2]\n Win = y_true.get_shape()[3]\n yt_inp = tf.keras.layers.Input(shape=(None,None,None,1)) \n yp_inp = tf.keras.layers.Input(shape=(None,None,None,1)) \n\n yt_reshape = tf.keras.layers.Lambda(lambda t: tf.reshape(t,[-1,384,384,1]) )(yt_inp)\n yp_reshape = tf.keras.layers.Lambda(lambda t: tf.reshape(t,[-1,384,384,1]) )(yp_inp)\n\n yt_resize = tf.keras.layers.Lambda(lambda t: tf.image.resize(t,(L,W)) )(yt_reshape)\n yp_resize = tf.keras.layers.Lambda(lambda t: tf.image.resize(t,(L,W)) )(yp_reshape)\n \n yt_reshape2 = tf.keras.layers.Lambda(lambda t: tf.reshape(t,[-1,T,L,W,D]) )(yt_resize)\n yp_reshape2 = tf.keras.layers.Lambda(lambda t: tf.reshape(t,[-1,T,L,W,D]) )(yp_resize)\n\n yt_tile3 = tf.keras.layers.Lambda(lambda t: tf.tile(t,[1,1,1,1,3]))(yt_reshape2)\n yp_tile3 = tf.keras.layers.Lambda(lambda t: tf.tile(t,[1,1,1,1,3]))(yp_reshape2)\n y_concat = tf.keras.layers.Lambda(lambda ys: tf.concat((ys[0],ys[1]) ,axis=4))([yt_tile3,yp_tile3])\n y_concat = tf.keras.layers.Lambda(lambda y: y[:,::6])(y_concat) # every hour\n vgg = vggfeats_seq(y_concat)\n loss = tf.keras.Model(inputs=[yt_inp,yp_inp],outputs=vgg)\n d = loss([y_true,y_pred])\n return tf.reduce_mean( d ) # mean MSE over time\n \n \ndef recon_loss(img,decoded_img, decoded_str):\n \"\"\"\n recon loss for VAEs\n \"\"\"\n if int(tf.__version__[0])<2: # TF 1\n reconstruction_loss = -tf.reduce_sum( tf.contrib.distributions.Normal(\n decoded_img,decoded_str ).log_prob(img), axis=[1,2,3],name='reconloss' )\n else: # TF 2\n import tensorflow_probability as tfp\n reconstruction_loss = -tf.reduce_sum( tfp.distributions.Normal(\n decoded_img, decoded_str).log_prob(img), axis=[1,2,3],name='reconloss' )\n return tf.reduce_mean(reconstruction_loss,axis=0)\n \n\n \ndef kl_loss(img,decoded_img,encoder_log_var,encoder_mu):\n \"\"\"\n LK loss for VAEs\n \"\"\"\n kl_loss = -0.5 * tf.reduce_sum( (1+encoder_log_var-tf.exp(encoder_log_var)-encoder_mu**2), axis=[1,2,3],name='klloss' )\n return tf.reduce_mean(kl_loss,axis=0)\n \n \n\n"
] |
[
[
"tensorflow.concat",
"tensorflow.keras.layers.Lambda",
"tensorflow.reduce_mean",
"tensorflow.keras.layers.TimeDistributed",
"tensorflow.reshape",
"tensorflow.exp",
"tensorflow.keras.Model",
"tensorflow.keras.applications.VGG19",
"tensorflow.image.resize",
"tensorflow.contrib.distributions.Normal",
"tensorflow.math.square",
"tensorflow.tile",
"tensorflow.keras.layers.Input"
]
] |
huangzhii/biolearns
|
[
"95d58d55690e550fff94730f34ed7c0fb96f12af"
] |
[
"build/lib/biolearns/coexpression/lmQCM.py"
] |
[
"# Copyright 2020 Zhi Huang. All rights reserved\n# Created on Mon Feb 10 17:57:08 2020\n# Author: Zhi Huang, Purdue University\n# ___ ___ ___ ___ ___ \n# /\\__\\ /\\__\\ /\\ \\ /\\ \\ /\\__\\ \n# /:/ / /::| | /::\\ \\ /::\\ \\ /::| | \n# /:/ / /:|:| | /:/\\:\\ \\ /:/\\:\\ \\ /:|:| | \n# /:/ / /:/|:|__|__ \\:\\~\\:\\ \\ /:/ \\:\\ \\ /:/|:|__|__ \n# /:/__/ /:/ |::::\\__\\ \\:\\ \\:\\__\\ /:/__/ \\:\\__\\ /:/ |::::\\__\\\n# \\:\\ \\ \\/__/~~/:/ / \\:\\/:/ / \\:\\ \\ \\/__/ \\/__/~~/:/ /\n# \\:\\ \\ /:/ / \\::/ / \\:\\ \\ /:/ / \n# \\:\\ \\ /:/ / /:/ / \\:\\ \\ /:/ / \n# \\:\\__\\ /:/ / /:/ / \\:\\__\\ /:/ / \n# \\/__/ \\/__/ \\/__/ \\/__/ \\/__/ \n#\n#\n# The original code came with the following disclaimer:\n#\n# This software is provided \"as-is\". There are no expressed or implied\n# warranties of any kind, including, but not limited to, the warranties\n# of merchantability and fitness for a given application. In no event\n# shall Zhi Huang be liable for any direct, indirect, incidental,\n# special, exemplary or consequential damages (including, but not limited\n# to, loss of use, data or profits, or business interruption) however\n# caused and on any theory of liability, whether in contract, strict\n# liability or tort (including negligence or otherwise) arising in any way\n# out of the use of this software, even if advised of the possibility of\n# such damage.\n#\n\n'''\nParameters\n----------\n \ndata_in : real-valued expression matrix with rownames indicating\n gene ID or gene symbol.\ngamma : gamma value (default = 0.55)\nt : t value (default = 1)\nlambda : lambda value (default = 1)\nbeta : beta value (default = 0.4)\nminClusterSize : minimum length of cluster to retain (default = 10)\nCCmethod : Methods for correlation coefficient calculation (default =\n \"pearson\"). Users can also pick \"spearman\".\nnormalization : Determine if normalization is needed on massive correlation\n coefficient matrix.\nReturns\n-------\nNone\nNotes\n-----\nReferences\n----------\n.. [1] Zhang J, Huang K. Normalized lmqcm: An algorithm for detecting weak quasi-cliques\n in weighted graph with applications in gene co-expression module discovery in\n cancers. Cancer informatics. 2014 Jan;13:CIN-S14021.\n.. [2] Huang Z, Han Z, Wang T, Shao W, Xiang S, Salama P, Rizkalla M, Huang K, Zhang J.\n TSUNAMI: Translational Bioinformatics Tool Suite For Network Analysis And Mining.\n bioRxiv. 2019 Jan 1:787507.\nExamples\n-------\n>>> tcga_COAD_data = 'http://gdac.broadinstitute.org/runs/stddata__2016_01_28/data/COAD/20160128/gdac.broadinstitute.org_COAD.Merge_rnaseqv2__illuminahiseq_rnaseqv2__unc_edu__Level_3__RSEM_genes_normalized__data.Level_3.2016012800.0.0.tar.gz'\n>>> data_in = pd.read_csv(tcga_COAD_data, header=0, skiprows=range(1, 2), index_col=0, sep='\\t')\n>>> lobject = lmQCM(data_in)\n>>> lobject.fit()\n>>> lobject.clusters\n>>> lobject.clusters_names\n>>> lobject.eigengene_matrix\n'''\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom scipy.stats import spearmanr\n\n\nclass lmQCM():\n def __init__(self, data_in = None, gamma = 0.55, t = 1, lambdaa = 1, beta = 0.4, \n minClusterSize = 10, CCmethod = \"pearson\", normalization = False):\n \n self.data_in = data_in\n if 'DataFrame' not in str(type(self.data_in)):\n print('Input matrix is numpy matrix. Convert it to pandas.core.frame.DataFrame...')\n self.data_in = pd.DataFrame(self.data_in)\n if np.sum(np.isnan(self.data_in.values)) > 0:\n print('Warning: %d NaN value detected. Replacing them to zero...' % np.sum(np.isnan(self.data_in.values)))\n self.data_in.fillna(0, inplace = True)\n self.gamma = gamma\n self.t = t\n self.lambdaa = lambdaa\n self.beta = beta\n self.minClusterSize = minClusterSize\n self.CCmethod = CCmethod\n self.normalization = normalization\n self.calculate_correlation_matrix()\n print('Initialization Done.')\n \n def localMaximumQCM(self):\n C = []\n nRow = self.corr_mat.shape[0]\n maxV = np.max(self.corr_mat, axis = 0)\n maxInd = np.argmax(self.corr_mat, axis = 1)\n lm_ind = np.where(maxV == np.max(self.corr_mat[maxInd,], axis = 1))[0]\n maxEdges = np.stack((maxInd[lm_ind], lm_ind)).T\n maxW = maxV[lm_ind]\n sortMaxV = np.sort(maxW, kind='mergesort')[::-1] # decreasing\n sortMaxInd = np.argsort(maxW, kind='mergesort')[::-1]\n sortMaxEdges = maxEdges[sortMaxInd, ]\n print(\"Number of Maximum Edges: %d\" % len(sortMaxInd))\n currentInit = 1\n noNewInit = 0\n \n pbar = tqdm(total=len(sortMaxInd))\n nodesInCluster = []\n while currentInit <= len(sortMaxInd) and noNewInit == 0:\n pbar.update(1)\n if sortMaxV[currentInit] < (self.gamma * sortMaxV[1]):\n noNewInit = 1\n else:\n if sortMaxEdges[currentInit, 0] not in nodesInCluster and sortMaxEdges[currentInit, 1] not in nodesInCluster:\n newCluster = list(sortMaxEdges[currentInit, ])\n addingMode = 1\n currentDensity = sortMaxV[currentInit]\n nCp = 2\n totalInd = np.arange(nRow)\n remainInd = np.setdiff1d(totalInd, newCluster)\n while addingMode == 1:\n neighborWeights = np.sum(self.corr_mat[newCluster,:][:,remainInd], axis = 0)\n maxNeighborWeight = max(neighborWeights)\n maxNeighborInd = np.argmax(neighborWeights)\n c_v = maxNeighborWeight/nCp\n alphaN = 1 - 1/(2 * self.lambdaa * (nCp + self.t))\n if c_v >= alphaN * currentDensity:\n newCluster = newCluster + [remainInd[maxNeighborInd]]\n nCp = nCp + 1\n currentDensity = (currentDensity * ((nCp - 1) * (nCp - 2)/2) + maxNeighborWeight)/(nCp * (nCp - 1)/2)\n remainInd = np.setdiff1d(remainInd, remainInd[maxNeighborInd])\n else:\n addingMode = 0\n nodesInCluster = nodesInCluster + newCluster\n C = C + [newCluster]\n currentInit += 1\n print(\" Calculation Finished.\")\n pbar.close()\n return(C)\n \n def merging_lmQCM(self, C):\n sizeC = [len(i) for i in C]\n sortInd = np.argsort(sizeC, kind='mergesort')[::-1]\n mergedCluster = [C[i] for i in sortInd if len(C[i]) >= self.minClusterSize]\n mergeOccur = 1\n currentInd = -1\n print(\" %d Modules before merging.\" % len(mergedCluster))\n while mergeOccur == 1:\n mergeOccur = 0\n while currentInd < len(mergedCluster):\n currentInd += 1\n if currentInd < len(mergedCluster):\n keepInd = list(np.arange(0,currentInd+1))\n for j in np.arange(currentInd+1, len(mergedCluster)):\n interCluster = np.intersect1d(mergedCluster[currentInd], mergedCluster[j])\n if len(interCluster) >= self.beta * min(len(mergedCluster[j]), len(mergedCluster[currentInd])):\n mergedCluster[currentInd] = list(np.union1d(mergedCluster[currentInd], mergedCluster[j]))\n mergeOccur = 1\n else:\n keepInd += [j]\n mergedCluster = [mergedCluster[i] for i in keepInd]\n \n sizeMergedCluster = [len(mergedCluster[i]) for i in range(len(mergedCluster))]\n sortMergedInd = np.argsort(sizeMergedCluster, kind='mergesort')[::-1]\n mergedCluster = [mergedCluster[i] for i in sortMergedInd]\n currentInd = 0\n print(\" %d Modules remain after merging.\" % len(mergedCluster))\n return mergedCluster\n \n def calculate_correlation_matrix(self):\n print(\"Calculating massive correlation coefficient ...\")\n if self.CCmethod.lower() == \"pearson\": self.corr_mat = np.corrcoef(self.data_in.values)\n if self.CCmethod.lower() == \"spearman\": self.corr_mat = spearmanr(self.data_in.values.T).correlation\n np.fill_diagonal(self.corr_mat, 0)\n if np.sum(np.isnan(self.corr_mat)) > 0:\n print('Warning: %d NaN value detected in correlation matrix. Replacing them to zero...' % np.sum(np.isnan(self.corr_mat)))\n self.corr_mat[np.isnan(self.corr_mat)] = 0\n if self.normalization:\n self.corr_mat = np.abs(self.corr_mat)\n D = np.sum(self.corr_mat, axis = 0)\n D_half = 1.0/np.sqrt(D)\n self.corr_mat = np.multiply(np.multiply(self.corr_mat, D_half).T, D_half)\n \n def fit(self):\n C = self.localMaximumQCM()\n clusters = self.merging_lmQCM(C)\n \n clusters_names = []\n for i in range(len(clusters)):\n mc = clusters[i]\n clusters_names.append(list(self.data_in.index.values[mc]))\n eigengene_matrix = np.zeros((len(clusters), self.data_in.shape[1]))\n for i in range(len(clusters_names)):\n geneID = clusters_names[i]\n X = self.data_in.loc[geneID, ]\n mu = np.nanmean(X, axis = 1) # rowMeans\n stddev = np.nanstd(X, axis = 1, ddof= 1) # ddof=1 provides unbiased estimation (1/(n-1))\n XNorm = (X.T-mu).T\n XNorm = (XNorm.T/stddev).T\n u, s, vh = np.linalg.svd(XNorm, full_matrices = False)\n eigengene_matrix[i, ] = vh[0,:]\n eigengene_matrix = pd.DataFrame(eigengene_matrix, columns = self.data_in.columns)\n self.clusters = clusters\n self.clusters_names = clusters_names\n self.eigengene_matrix = eigengene_matrix\n return self.clusters, self.clusters_names, self.eigengene_matrix\n \n \n \n "
] |
[
[
"numpy.sqrt",
"pandas.DataFrame",
"numpy.max",
"numpy.fill_diagonal",
"numpy.nanmean",
"scipy.stats.spearmanr",
"numpy.nanstd",
"numpy.linalg.svd",
"numpy.arange",
"numpy.stack",
"numpy.intersect1d",
"numpy.argmax",
"numpy.multiply",
"numpy.isnan",
"numpy.union1d",
"numpy.corrcoef",
"numpy.argsort",
"numpy.sum",
"numpy.abs",
"numpy.sort",
"numpy.setdiff1d"
]
] |
jackharmer/agency
|
[
"5a78dd23e14c44c4076e49ea44b83ab1697e51c8"
] |
[
"tests/short/discount_test.py"
] |
[
"import torch\nfrom agency.tools.gamma_matrix import make_gamma_matrix, discount\nfrom pytest import approx\n\n\ndef simple_discount(rewards, gamma, value, masks):\n discounts = []\n V = value\n for cc in reversed(range(len(rewards))):\n V = rewards[cc] + gamma * masks[cc] * V\n discounts.append(V)\n return list(reversed(discounts))\n\n\ndef test_simple_discount_works():\n rewards = [0.1, 0.2, 0.3, 0.4]\n masks = [1, 1, 1, 1]\n gamma = 0.99\n value = 10\n\n # Create the true values\n v3 = rewards[3] + gamma * masks[3] * value\n v2 = rewards[2] + gamma * masks[2] * v3\n v1 = rewards[1] + gamma * masks[1] * v2\n v0 = rewards[0] + gamma * masks[0] * v1\n\n d_true = [v0, v1, v2, v3]\n\n d = simple_discount(rewards, gamma, value, masks)\n\n assert d_true == approx(d, 1e-5)\n\n masks = [1, 1, 1, 0]\n\n v3 = rewards[3] + gamma * masks[3] * value\n v2 = rewards[2] + gamma * masks[2] * v3\n v1 = rewards[1] + gamma * masks[1] * v2\n v0 = rewards[0] + gamma * masks[0] * v1\n\n d_true = [v0, v1, v2, v3]\n\n d = simple_discount(rewards, gamma, value, masks)\n\n assert d_true == approx(d, 1e-5)\n\n\ndef test_gamma_matrix():\n rewards = [0.1, 0.2, 0.3, 0.4]\n masks = [1, 1, 1, 1]\n gamma = 0.99\n value = 10\n gamma_matrix = make_gamma_matrix(gamma, len(rewards))\n\n d_simple = simple_discount(rewards, gamma, value, masks)\n d_gamma = discount(\n torch.tensor(rewards).unsqueeze(0),\n torch.tensor([value * masks[-1]]).unsqueeze(0),\n gamma_matrix\n )\n\n assert d_gamma.cpu().numpy() == approx(d_simple, 1e-5)\n\n masks = [1, 1, 1, 0]\n d_simple = simple_discount(rewards, gamma, value, masks)\n d_gamma = discount(\n torch.tensor(rewards).unsqueeze(0),\n torch.tensor([value * masks[-1]]).unsqueeze(0),\n gamma_matrix\n )\n\n assert d_gamma.cpu().numpy() == approx(d_simple, 1e-5)\n"
] |
[
[
"torch.tensor"
]
] |
SheffieldChemoinformatics/molsg
|
[
"13c2edfce84539266e344119316b29a070816afc"
] |
[
"scripts/compute_covariance_descriptor.py"
] |
[
"\"\"\"Compute example WKS compute_WKS_descriptor.\"\"\"\nfrom __future__ import print_function, absolute_import\nimport numpy as np\nfrom molsg.laplacemesh import compute_lb_fem\nfrom molsg.localgeometry import WKS_descriptor\nfrom molsg.covariance import covariance_descriptor\n\nevals = 100\n\nmol = np.load('data/ampc/npy/actives18.pqr0.50.41.2.npy')\neigs = compute_lb_fem(vertices=mol[0], faces=mol[1], k=100)\nwks = WKS_descriptor(eigs, evals=evals)\ncov = covariance_descriptor(wks)\n\nprint('number of vertices:', mol[0].shape[0])\nprint('evals:', evals)\nprint('WKS descriptor shape:', wks.shape)\nprint('covariance descriptor shape:', cov.shape)\n"
] |
[
[
"numpy.load"
]
] |
chlorophyll-zz/qiskit-ibmq-provider
|
[
"ee2b326b0a07dba132cca4f85fe700f030b5880f"
] |
[
"test/ibmq/test_ibmq_job.py"
] |
[
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"IBMQJob Test.\"\"\"\n\nimport time\nimport copy\nfrom concurrent import futures\nfrom datetime import datetime, timedelta\nfrom unittest import SkipTest\n\nimport numpy\nfrom scipy.stats import chi2_contingency\n\nfrom qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister\nfrom qiskit.providers import JobStatus\nfrom qiskit.providers.ibmq import least_busy\nfrom qiskit.providers.ibmq.ibmqbackend import IBMQRetiredBackend\nfrom qiskit.providers.ibmq.exceptions import IBMQBackendError\nfrom qiskit.providers.ibmq.job.ibmqjob import IBMQJob\nfrom qiskit.providers.ibmq.job.exceptions import IBMQJobInvalidStateError, JobError\nfrom qiskit.providers.ibmq.ibmqfactory import IBMQFactory\nfrom qiskit.test import slow_test\nfrom qiskit.compiler import assemble, transpile\nfrom qiskit.result import Result\n\nfrom ..jobtestcase import JobTestCase\nfrom ..decorators import (requires_provider, slow_test_on_device, requires_device,\n requires_qe_access)\n\n\nclass TestIBMQJob(JobTestCase):\n \"\"\"Test ibmqjob module.\"\"\"\n\n def setUp(self):\n super().setUp()\n self._qc = _bell_circuit()\n\n @requires_provider\n def test_run_simulator(self, provider):\n \"\"\"Test running in a simulator.\"\"\"\n backend = provider.get_backend('ibmq_qasm_simulator')\n\n qr = QuantumRegister(2, 'q')\n cr = ClassicalRegister(2, 'c')\n qc = QuantumCircuit(qr, cr, name='hadamard')\n qc.h(qr)\n qc.measure(qr, cr)\n qobj = assemble(transpile([self._qc, qc], backend=backend), backend=backend)\n shots = qobj.config.shots\n job = backend.run(qobj)\n result = job.result()\n counts_qx1 = result.get_counts(0)\n counts_qx2 = result.get_counts(1)\n counts_ex1 = {'00': shots / 2, '11': shots / 2}\n counts_ex2 = {'00': shots / 4, '11': shots / 4, '10': shots / 4, '01': shots / 4}\n states1 = counts_qx1.keys() | counts_ex1.keys()\n states2 = counts_qx2.keys() | counts_ex2.keys()\n # contingency table\n ctable1 = numpy.array([[counts_qx1.get(key, 0) for key in states1],\n [counts_ex1.get(key, 0) for key in states1]])\n ctable2 = numpy.array([[counts_qx2.get(key, 0) for key in states2],\n [counts_ex2.get(key, 0) for key in states2]])\n self.log.info('states1: %s', str(states1))\n self.log.info('states2: %s', str(states2))\n self.log.info('ctable1: %s', str(ctable1))\n self.log.info('ctable2: %s', str(ctable2))\n contingency1 = chi2_contingency(ctable1)\n contingency2 = chi2_contingency(ctable2)\n self.log.info('chi2_contingency1: %s', str(contingency1))\n self.log.info('chi2_contingency2: %s', str(contingency2))\n self.assertGreater(contingency1[1], 0.01)\n self.assertGreater(contingency2[1], 0.01)\n\n @slow_test_on_device\n def test_run_device(self, provider, backend): # pylint: disable=unused-argument\n \"\"\"Test running in a real device.\"\"\"\n qobj = assemble(transpile(self._qc, backend=backend), backend=backend)\n shots = qobj.config.shots\n job = backend.run(qobj)\n while not job.status() is JobStatus.DONE:\n time.sleep(4)\n\n result = job.result()\n counts_qx = result.get_counts(0)\n counts_ex = {'00': shots / 2, '11': shots / 2}\n self.assertDictAlmostEqual(counts_qx, counts_ex, shots * 0.1)\n\n # Test fetching the job properties, as this is a real backend and is\n # guaranteed to have them.\n _ = job.properties()\n\n @requires_provider\n def test_run_async_simulator(self, provider):\n \"\"\"Test running in a simulator asynchronously.\"\"\"\n IBMQJob._executor = futures.ThreadPoolExecutor(max_workers=2)\n\n backend = provider.get_backend('ibmq_qasm_simulator')\n\n self.log.info('submitting to backend %s', backend.name())\n num_qubits = 16\n qr = QuantumRegister(num_qubits, 'qr')\n cr = ClassicalRegister(num_qubits, 'cr')\n qc = QuantumCircuit(qr, cr)\n for i in range(num_qubits - 1):\n qc.cx(qr[i], qr[i + 1])\n qc.measure(qr, cr)\n qobj = assemble(transpile([qc] * 10, backend=backend), backend=backend)\n num_jobs = 5\n job_array = [backend.run(qobj) for _ in range(num_jobs)]\n found_async_jobs = False\n timeout = 30\n start_time = time.time()\n while not found_async_jobs:\n check = sum(\n [job.status() is JobStatus.RUNNING for job in job_array])\n if check >= 2:\n self.log.info('found %d simultaneous jobs', check)\n break\n if all([job.status() is JobStatus.DONE for job in job_array]):\n # done too soon? don't generate error\n self.log.warning('all jobs completed before simultaneous jobs '\n 'could be detected')\n break\n for job in job_array:\n self.log.info('%s %s %s %s', job.status(), job.status() is JobStatus.RUNNING,\n check, job.job_id())\n self.log.info('- %s', str(time.time() - start_time))\n if time.time() - start_time > timeout:\n raise TimeoutError('failed to see multiple running jobs after '\n '{0} s'.format(timeout))\n time.sleep(0.2)\n\n result_array = [job.result() for job in job_array]\n self.log.info('got back all job results')\n # Ensure all jobs have finished.\n self.assertTrue(\n all([job.status() is JobStatus.DONE for job in job_array]))\n self.assertTrue(all([result.success for result in result_array]))\n\n # Ensure job ids are unique.\n job_ids = [job.job_id() for job in job_array]\n self.assertEqual(sorted(job_ids), sorted(list(set(job_ids))))\n\n @slow_test_on_device\n def test_run_async_device(self, provider, backend): # pylint: disable=unused-argument\n \"\"\"Test running in a real device asynchronously.\"\"\"\n self.log.info('submitting to backend %s', backend.name())\n num_qubits = 5\n qr = QuantumRegister(num_qubits, 'qr')\n cr = ClassicalRegister(num_qubits, 'cr')\n qc = QuantumCircuit(qr, cr)\n for i in range(num_qubits - 1):\n qc.cx(qr[i], qr[i + 1])\n qc.measure(qr, cr)\n qobj = assemble(transpile(qc, backend=backend), backend=backend)\n num_jobs = 3\n job_array = [backend.run(qobj) for _ in range(num_jobs)]\n time.sleep(3) # give time for jobs to start (better way?)\n job_status = [job.status() for job in job_array]\n num_init = sum(\n [status is JobStatus.INITIALIZING for status in job_status])\n num_queued = sum([status is JobStatus.QUEUED for status in job_status])\n num_running = sum(\n [status is JobStatus.RUNNING for status in job_status])\n num_done = sum([status is JobStatus.DONE for status in job_status])\n num_error = sum([status is JobStatus.ERROR for status in job_status])\n self.log.info('number of currently initializing jobs: %d/%d',\n num_init, num_jobs)\n self.log.info('number of currently queued jobs: %d/%d',\n num_queued, num_jobs)\n self.log.info('number of currently running jobs: %d/%d',\n num_running, num_jobs)\n self.log.info('number of currently done jobs: %d/%d',\n num_done, num_jobs)\n self.log.info('number of errored jobs: %d/%d',\n num_error, num_jobs)\n self.assertTrue(num_jobs - num_error - num_done > 0)\n\n # Wait for all the results.\n result_array = [job.result(timeout=180) for job in job_array]\n\n # Ensure all jobs have finished.\n self.assertTrue(\n all([job.status() is JobStatus.DONE for job in job_array]))\n self.assertTrue(all([result.success for result in result_array]))\n\n # Ensure job ids are unique.\n job_ids = [job.job_id() for job in job_array]\n self.assertEqual(sorted(job_ids), sorted(list(set(job_ids))))\n\n @requires_provider\n def test_cancel(self, provider):\n \"\"\"Test job cancellation.\"\"\"\n # Find the most busy backend\n backend = max([b for b in provider.backends() if b.status().operational],\n key=lambda b: b.status().pending_jobs)\n\n qobj = assemble(transpile(self._qc, backend=backend), backend=backend)\n job = backend.run(qobj)\n\n for _ in range(2):\n # Try twice in case job is not in a cancellable state\n try:\n if job.cancel():\n status = job.status()\n # TODO Change the warning to assert once API is fixed\n if status is not JobStatus.CANCELLED:\n self.log.warning(\"cancel() was successful for job %s but its status is %s.\",\n job.job_id(), status)\n except JobError:\n pass\n\n @requires_provider\n def test_retrieve_jobs(self, provider):\n \"\"\"Test retrieving jobs.\"\"\"\n backend = provider.get_backend('ibmq_qasm_simulator')\n job_list = provider.backends.jobs(backend_name=backend.name(), limit=5, skip=0)\n for job in job_list:\n self.assertTrue(isinstance(job.job_id(), str))\n\n @requires_provider\n def test_retrieve_job(self, provider):\n \"\"\"Test retrieving a single job.\"\"\"\n backend = provider.get_backend('ibmq_qasm_simulator')\n\n qobj = assemble(transpile(self._qc, backend=backend), backend=backend)\n job = backend.run(qobj)\n\n retrieved_job = provider.backends.retrieve_job(job.job_id())\n self.assertEqual(job.job_id(), retrieved_job.job_id())\n self.assertEqual(job.result().get_counts(), retrieved_job.result().get_counts())\n self.assertEqual(job.qobj().to_dict(), qobj.to_dict())\n\n @requires_device\n @requires_provider\n def test_retrieve_job_uses_appropriate_backend(self, backend, provider):\n \"\"\"Test that retrieved jobs come from their appropriate backend.\"\"\"\n backend_1 = backend\n # Get a second backend.\n backend_2 = None\n for backend_2 in provider.backends():\n if backend_2.status().operational and backend_2.name() != backend_1.name():\n break\n if not backend_2:\n raise SkipTest('Skipping test that requires multiple backends')\n\n qobj_1 = assemble(\n transpile(self._qc, backend=backend_1), backend=backend_1)\n job_1 = backend_1.run(qobj_1)\n\n qobj_2 = assemble(\n transpile(self._qc, backend=backend_2), backend=backend_2)\n job_2 = backend_2.run(qobj_2)\n\n # test a retrieved job's backend is the same as the queried backend\n self.assertEqual(backend_1.retrieve_job(job_1.job_id()).backend().name(),\n backend_1.name())\n self.assertEqual(backend_2.retrieve_job(job_2.job_id()).backend().name(),\n backend_2.name())\n\n # test retrieve requests for jobs that exist on other backends throw errors\n with self.assertWarns(Warning) as context_manager:\n self.assertRaises(IBMQBackendError,\n backend_1.retrieve_job, job_2.job_id())\n self.assertIn('belongs to', str(context_manager.warning))\n with self.assertWarns(Warning) as context_manager:\n self.assertRaises(IBMQBackendError,\n backend_2.retrieve_job, job_1.job_id())\n self.assertIn('belongs to', str(context_manager.warning))\n\n # Cleanup\n for job in [job_1, job_2]:\n try:\n job.cancel()\n except JobError:\n pass\n\n @requires_provider\n def test_retrieve_job_error(self, provider):\n \"\"\"Test retrieving an invalid job.\"\"\"\n self.assertRaises(IBMQBackendError, provider.backends.retrieve_job, 'BAD_JOB_ID')\n\n @requires_provider\n def test_retrieve_jobs_status(self, provider):\n \"\"\"Test retrieving jobs filtered by status.\"\"\"\n backend = provider.get_backend('ibmq_qasm_simulator')\n job_list = provider.backends.jobs(backend_name=backend.name(),\n limit=5, skip=0, status=JobStatus.DONE)\n\n self.assertTrue(job_list)\n for job in job_list:\n self.assertTrue(job.status() is JobStatus.DONE)\n\n @requires_provider\n def test_retrieve_jobs_start_datetime(self, provider):\n \"\"\"Test retrieving jobs created after a specified datetime.\"\"\"\n backend = provider.get_backend('ibmq_qasm_simulator')\n past_month = datetime.now() - timedelta(days=30)\n past_month_str = past_month.strftime('%Y-%m-%dT%H:%M:%S.%fZ')\n\n job_list = provider.backends.jobs(backend_name=backend.name(),\n limit=5, skip=0, start_datetime=past_month)\n self.assertTrue(job_list)\n for i, job in enumerate(job_list):\n self.assertTrue(job.creation_date() >= past_month_str,\n '{}) job creation_date {} is not '\n 'greater than or equal to past month: {}'\n .format(i, job.creation_date(), past_month_str))\n\n @requires_qe_access\n def test_retrieve_jobs_end_datetime(self, qe_token, qe_url):\n \"\"\"Test retrieving jobs created before a specified datetime.\"\"\"\n ibmq_factory = IBMQFactory()\n provider = ibmq_factory.enable_account(qe_token, qe_url)\n backend = provider.get_backend('ibmq_qasm_simulator')\n past_month = datetime.now() - timedelta(days=30)\n past_month_str = past_month.strftime('%Y-%m-%dT%H:%M:%S.%fZ')\n\n job_list = provider.backends.jobs(backend_name=backend.name(),\n limit=5, skip=0, end_datetime=past_month)\n self.assertTrue(job_list)\n for i, job in enumerate(job_list):\n self.assertTrue(job.creation_date() <= past_month_str,\n '{}) job creation_date {} is not '\n 'less than or equal to past month: {}'\n .format(i, job.creation_date(), past_month_str))\n\n @requires_qe_access\n def test_retrieve_jobs_between_datetimes(self, qe_token, qe_url):\n \"\"\"Test retrieving jobs created between two specified datetimes.\"\"\"\n ibmq_factory = IBMQFactory()\n provider = ibmq_factory.enable_account(qe_token, qe_url)\n backend = provider.get_backend('ibmq_qasm_simulator')\n date_today = datetime.now()\n\n past_month = date_today - timedelta(30)\n past_month_str = past_month.strftime('%Y-%m-%dT%H:%M:%S.%fZ')\n past_two_month = date_today - timedelta(60)\n past_two_month_str = past_two_month.strftime('%Y-%m-%dT%H:%M:%S.%fZ')\n\n job_list = provider.backends.jobs(backend_name=backend.name(), limit=5, skip=0,\n start_datetime=past_two_month, end_datetime=past_month)\n self.assertTrue(job_list)\n for i, job in enumerate(job_list):\n self.assertTrue((past_two_month_str <= job.creation_date() <= past_month_str),\n '{}) job creation date {} is not '\n 'between past two month {} and past month {}'\n .format(i, past_two_month_str, job.creation_date(), past_month_str))\n\n @requires_qe_access\n def test_retrieve_jobs_between_datetimes_not_overriden(self, qe_token, qe_url):\n \"\"\"Test retrieving jobs created between two specified datetimes\n and ensure `db_filter` does not override datetime arguments.\"\"\"\n ibmq_factory = IBMQFactory()\n provider = ibmq_factory.enable_account(qe_token, qe_url)\n backend = provider.get_backend('ibmq_qasm_simulator')\n date_today = datetime.now()\n\n past_two_month = date_today - timedelta(30)\n past_two_month_str = past_two_month.strftime('%Y-%m-%dT%H:%M:%S.%fZ')\n past_three_month = date_today - timedelta(60)\n past_three_month_str = past_three_month.strftime('%Y-%m-%dT%H:%M:%S.%fZ')\n\n # Used for `db_filter`, should not override `start_datetime` and `end_datetime` arguments.\n past_ten_days = date_today - timedelta(10)\n\n job_list = provider.backends.jobs(backend_name=backend.name(), limit=5, skip=0,\n start_datetime=past_three_month,\n end_datetime=past_two_month,\n db_filter={'creationDate': {'gt': past_ten_days}})\n self.assertTrue(job_list)\n for i, job in enumerate(job_list):\n self.assertTrue((past_three_month_str <= job.creation_date() <= past_two_month_str),\n '{}) job creation date {} is not '\n 'between past three month {} and past two month {}'\n .format(i, past_three_month_str,\n job.creation_date(), past_two_month_str))\n\n @requires_provider\n def test_retrieve_jobs_db_filter(self, provider):\n \"\"\"Test retrieving jobs using db_filter.\"\"\"\n # TODO: consider generalizing backend name\n backend = provider.get_backend('ibmq_qasm_simulator')\n\n # Submit jobs with desired attributes.\n qc = QuantumCircuit(3, 3)\n qc.h(0)\n qc.measure([0, 1, 2], [0, 1, 2])\n qobj = assemble(transpile(qc, backend=backend), backend=backend)\n for _ in range(2):\n backend.run(qobj).result()\n\n my_filter = {'backend.name': backend.name(),\n 'summaryData.summary.qobj_config.n_qubits': 3,\n 'status': 'COMPLETED'}\n\n job_list = provider.backends.jobs(backend_name=backend.name(),\n limit=2, skip=0, db_filter=my_filter)\n self.assertTrue(job_list)\n\n for job in job_list:\n job.refresh()\n self.assertEqual(\n job.summary_data['summary']['qobj_config']['n_qubits'], 3,\n \"Job {} does not have correct data.\".format(job.job_id())\n )\n\n @requires_provider\n def test_retrieve_jobs_filter_date(self, provider):\n \"\"\"Test retrieving jobs filtered by date.\"\"\"\n backend = provider.get_backend('ibmq_qasm_simulator')\n date_today = datetime.now()\n date_today_str = date_today.strftime('%Y-%m-%dT%H:%M:%S.%fZ')\n\n my_filter = {'creationDate': {'lt': date_today.isoformat()}}\n job_list = provider.backends.jobs(backend_name=backend.name(),\n limit=5, db_filter=my_filter)\n\n self.assertTrue(job_list)\n self.log.info('found %s matching jobs', len(job_list))\n for i, job in enumerate(job_list):\n self.log.info('match #%d: %s', i, job.creation_date())\n self.assertTrue(job.creation_date() < date_today_str,\n '{}) job.creation_date: {}, date_today: {}'\n .format(i, job.creation_date(), date_today_str))\n\n @requires_provider\n def test_double_submit_fails(self, provider):\n \"\"\"Test submitting a job twice.\"\"\"\n backend = provider.get_backend('ibmq_qasm_simulator')\n\n qobj = assemble(transpile(self._qc, backend=backend), backend=backend)\n # backend.run() will automatically call job.submit()\n job = backend.run(qobj)\n with self.assertRaises(IBMQJobInvalidStateError):\n job.submit()\n\n @requires_provider\n def test_retrieve_failed_job_simulator_partial(self, provider):\n \"\"\"Test retrieving partial results from a simulator backend.\"\"\"\n backend = provider.get_backend('ibmq_qasm_simulator')\n\n qc_new = transpile(self._qc, backend)\n qobj = assemble([qc_new, qc_new], backend=backend)\n qobj.experiments[1].instructions[1].name = 'bad_instruction'\n\n job = backend.run(qobj)\n result = job.result(partial=True)\n\n self.assertIsInstance(result, Result)\n self.assertTrue(result.results[0].success)\n self.assertFalse(result.results[1].success)\n\n @slow_test\n @requires_provider\n def test_pulse_job(self, provider):\n \"\"\"Test running a pulse job.\"\"\"\n backends = provider.backends(open_pulse=True, operational=True)\n if not backends:\n raise SkipTest('Skipping pulse test since no pulse backend found.')\n\n backend = least_busy(backends)\n config = backend.configuration()\n defaults = backend.defaults()\n inst_map = defaults.circuit_instruction_map\n\n # Run 2 experiments - 1 with x pulse and 1 without\n x = inst_map.get('x', 0)\n measure = inst_map.get('measure', range(config.n_qubits)) << x.duration\n ground_sched = measure\n excited_sched = x | measure\n schedules = [ground_sched, excited_sched]\n\n qobj = assemble(schedules, backend, meas_level=1, shots=256)\n job = backend.run(qobj)\n _ = job.result()\n\n @requires_provider\n def test_retrieve_from_retired_backend(self, provider):\n \"\"\"Test retrieving a job from a retired backend.\"\"\"\n backend = provider.get_backend('ibmq_qasm_simulator')\n qobj = assemble(transpile(self._qc, backend=backend), backend=backend)\n job = backend.run(qobj)\n\n del provider._backends['ibmq_qasm_simulator']\n new_job = provider.backends.retrieve_job(job.job_id())\n self.assertTrue(isinstance(new_job.backend(), IBMQRetiredBackend))\n self.assertNotEqual(new_job.backend().name(), 'unknown')\n\n new_job2 = provider.backends.jobs(db_filter={'id': job.job_id()})[0]\n self.assertTrue(isinstance(new_job2.backend(), IBMQRetiredBackend))\n self.assertNotEqual(new_job2.backend().name(), 'unknown')\n\n @requires_provider\n def test_refresh_job_result(self, provider):\n \"\"\"Test re-retrieving job result via refresh.\"\"\"\n backend = provider.get_backend('ibmq_qasm_simulator')\n qobj = assemble(transpile(self._qc, backend=backend), backend=backend)\n job = backend.run(qobj)\n result = job.result()\n\n # Save original cached results.\n cached_result = copy.deepcopy(result)\n self.assertTrue(cached_result)\n\n # Modify cached results.\n result.results[0].header.name = 'modified_result'\n self.assertNotEqual(cached_result, result)\n self.assertEqual(result.results[0].header.name, 'modified_result')\n\n # Re-retrieve result via refresh.\n result = job.result(refresh=True)\n self.assertEqual(cached_result, result)\n self.assertNotEqual(result.results[0].header.name, 'modified_result')\n\n\ndef _bell_circuit():\n qr = QuantumRegister(2, 'q')\n cr = ClassicalRegister(2, 'c')\n qc = QuantumCircuit(qr, cr)\n qc.h(qr[0])\n qc.cx(qr[0], qr[1])\n qc.measure(qr, cr)\n return qc\n"
] |
[
[
"scipy.stats.chi2_contingency"
]
] |
vandedok/dlcourse_ai
|
[
"b5ef97029980d3e9b5f75e3cf0b43762fc53ca2f"
] |
[
"assignments/assignment2/.ipynb_checkpoints/gradient_check-checkpoint.py"
] |
[
"import numpy as np\n\n\ndef check_gradient(f, x, delta=1e-5, tol=1e-4):\n \"\"\"\n Checks the implementation of analytical gradient by comparing\n it to numerical gradient using two-point formula\n\n Arguments:\n f: function that receives x and computes value and gradient\n x: np array, initial point where gradient is checked\n delta: step to compute numerical gradient\n tol: tolerance for comparing numerical and analytical gradient\n\n Return:\n bool indicating whether gradients match or not\n \"\"\"\n print(\"point 1\")\n assert isinstance(x, np.ndarray)\n assert x.dtype == np.float\n\n fx, analytic_grad = f(x)\n analytic_grad = analytic_grad.copy()\n \n print(\"point 2\")\n assert analytic_grad.shape == x.shape\n\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n ix = it.multi_index\n analytic_grad_at_ix = analytic_grad[ix]\n x_plus = x.copy()\n x_minus = x.copy()\n x_plus[ix] += delta\n x_minus[ix] -= delta\n\n numeric_grad_at_ix = (f(x_plus)[0] - f(x_minus)[0]) / (2 * delta) \n\n # TODO compute value of numeric gradient of f to idx\n if not np.isclose(numeric_grad_at_ix, analytic_grad_at_ix, tol):\n print(\"Gradients are different at %s. Analytic: %2.5f, Numeric: %2.5f\" % (ix, analytic_grad_at_ix, numeric_grad_at_ix))\n return False\n\n it.iternext()\n\n print(\"Gradient check passed!\")\n return True\n\n\ndef check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n \"\"\"\n Checks gradient correctness for the input and output of a layer\n\n Arguments:\n layer: neural network layer, with forward and backward functions\n x: starting point for layer input\n delta: step to compute numerical gradient\n tol: tolerance for comparing numerical and analytical gradient\n\n Returns:\n bool indicating whether gradients match or not\n \"\"\"\n output = layer.forward(x)\n output_weight = np.random.randn(*output.shape)\n\n def helper_func(x):\n output = layer.forward(x)\n loss = np.sum(output * output_weight)\n d_out = np.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n \n return check_gradient(helper_func, x, delta, tol)\n\n\ndef check_layer_param_gradient(layer, x,\n param_name,\n delta=1e-5, tol=1e-4):\n \"\"\"\n Checks gradient correctness for the parameter of the layer\n\n Arguments:\n layer: neural network layer, with forward and backward functions\n x: starting point for layer input\n param_name: name of the parameter\n delta: step to compute numerical gradient\n tol: tolerance for comparing numerical and analytical gradient\n\n Returns:\n bool indicating whether gradients match or not\n \"\"\"\n param = layer.params()[param_name]\n initial_w = param.value\n\n output = layer.forward(x)\n output_weight = np.random.randn(*output.shape)\n \n\n def helper_func(w):\n param.value = w\n output = layer.forward(x)\n loss = np.sum(output * output_weight)\n d_out = np.ones_like(output) * output_weight\n layer.backward(d_out)\n grad = param.grad\n return loss, grad\n #print(initial_w.shape)\n #print(helper_func(initial_w)[1].shape)\n\n return check_gradient(helper_func, initial_w, delta, tol)\n\n\ndef check_model_gradient(model, X, y,\n delta=1e-5, tol=1e-4):\n \"\"\"\n Checks gradient correctness for all model parameters\n\n Arguments:\n model: neural network model with compute_loss_and_gradients\n X: batch of input data\n y: batch of labels\n delta: step to compute numerical gradient\n tol: tolerance for comparing numerical and analytical gradient\n\n Returns:\n bool indicating whether gradients match or not\n \"\"\"\n \n\n layers = model.layers\n params = model.params()\n for param_key in params.keys():\n print(\"Checking gradient for %s parameter\" % param_key)\n param = params[param_key]\n initial_w = param.value\n\n def helper_func(w):\n param.value = w\n loss = model.compute_loss_and_gradients(X, y)\n grad = param.grad\n return loss, grad\n\n if not check_gradient(helper_func, initial_w, delta, tol):\n return False\n\n return True\n"
] |
[
[
"numpy.ones_like",
"numpy.nditer",
"numpy.random.randn",
"numpy.sum",
"numpy.isclose"
]
] |
pauxy-qmc/pauxy
|
[
"1da80284284769b59361c73cfa3c2d914c74a73f",
"1da80284284769b59361c73cfa3c2d914c74a73f"
] |
[
"pauxy/estimators/mixed.py",
"pauxy/estimators/tests/simple_3d.py"
] |
[
"import h5py\nimport numpy\ntry:\n from mpi4py import MPI\n mpi_sum = MPI.SUM\nexcept ImportError:\n mpi_sum = None\nimport scipy.linalg\nimport time\nfrom pauxy.estimators.utils import H5EstimatorHelper\nfrom pauxy.estimators.ci import get_hmatel\nfrom pauxy.estimators.thermal import particle_number, one_rdm_from_G\ntry:\n from pauxy.estimators.ueg import local_energy_ueg\n from pauxy.estimators.pw_fft import local_energy_pw_fft\nexcept ImportError as e:\n print(e)\nfrom pauxy.estimators.hubbard import local_energy_hubbard, local_energy_hubbard_ghf,\\\n local_energy_hubbard_holstein\nfrom pauxy.estimators.greens_function import gab_mod_ovlp, gab_mod\nfrom pauxy.estimators.generic import (\n local_energy_generic_opt,\n local_energy_generic,\n local_energy_generic_pno,\n local_energy_generic_cholesky,\n local_energy_generic_cholesky_opt,\n local_energy_generic_cholesky_opt_stochastic\n)\nfrom pauxy.utils.io import format_fixed_width_strings, format_fixed_width_floats\nfrom pauxy.utils.misc import dotdict\n\n\nclass Mixed(object):\n \"\"\"Class for computing mixed estimates.\n\n Parameters\n ----------\n mixed : dict\n Input options for mixed estimates.\n root : bool\n True if on root/master processor.\n qmc : :class:`pauxy.state.QMCOpts` object.\n Container for qmc input options.\n trial : :class:`pauxy.trial_wavefunction.X' object\n Trial wavefunction class.\n dtype : complex or float\n Output type.\n\n Attributes\n ----------\n nmeasure : int\n Max number of measurements.\n nreg : int\n Number of regular estimates (exluding iteration).\n G : :class:`numpy.ndarray`\n One-particle RDM.\n estimates : :class:`numpy.ndarray`\n Store for mixed estimates per processor.\n global_estimates : :class:`numpy.ndarray`\n Store for mixed estimates accross all processors.\n names : :class:`pauxy.estimators.EstimEnum`\n Enum for locating estimates in estimates array.\n header : int\n Output header.\n key : dict\n Explanation of output.\n output : :class:`pauxy.estimators.H5EstimatorHelper`\n Class for outputting data to HDF5 group.\n output : :class:`pauxy.estimators.H5EstimatorHelper`\n Class for outputting rdm data to HDF5 group.\n \"\"\"\n\n def __init__(self, mixed, system, root, filename, qmc, trial, dtype):\n self.average_gf = mixed.get('average_gf', False)\n self.eval_energy = mixed.get('evaluate_energy', True)\n self.calc_one_rdm = mixed.get('one_rdm', False)\n self.calc_two_rdm = mixed.get('two_rdm', None)\n self.energy_eval_freq = mixed.get('energy_eval_freq', None)\n if self.energy_eval_freq is None:\n self.energy_eval_freq = qmc.nsteps\n self.verbose = mixed.get('verbose', True)\n # number of steps per block\n self.nsteps = qmc.nsteps\n self.header = ['Iteration', 'WeightFactor', 'Weight', 'ENumer',\n 'EDenom', 'ETotal', 'E1Body', 'E2Body', 'EHybrid',\n 'Overlap']\n if qmc.beta is not None:\n self.thermal = True\n self.header.append('Nav')\n else:\n self.thermal = False\n self.header.append('Time')\n self.nreg = len(self.header[1:])\n self.dtype = dtype\n self.G = numpy.zeros((2,system.nbasis,system.nbasis), dtype)\n if self.calc_one_rdm:\n dms_size = self.G.size\n else:\n dms_size = 0\n self.eshift = numpy.array([0,0])\n # Abuse of language for the moment. Only accumulates S(k) for UEG.\n # TODO: Add functionality to accumulate 2RDM?\n if self.calc_two_rdm is not None:\n if self.calc_two_rdm == \"structure_factor\":\n two_rdm_shape = (2,2,len(system.qvecs),)\n self.two_rdm = numpy.zeros(two_rdm_shape,\n dtype=numpy.complex128)\n dms_size += self.two_rdm.size\n else:\n self.two_rdm = None\n self.estimates = numpy.zeros(self.nreg+dms_size, dtype=dtype)\n self.names = get_estimator_enum(self.thermal)\n self.estimates[self.names.time] = time.time()\n self.global_estimates = numpy.zeros(self.nreg+dms_size,\n dtype=dtype)\n self.key = {\n 'Iteration': \"Simulation iteration. iteration*dt = tau.\",\n 'WeightFactor': \"Rescaling Factor from population control.\",\n 'Weight': \"Total walker weight.\",\n 'E_num': \"Numerator for projected energy estimator.\",\n 'E_denom': \"Denominator for projected energy estimator.\",\n 'ETotal': \"Projected energy estimator.\",\n 'E1Body': \"Mixed one-body energy estimator.\",\n 'E2Body': \"Mixed two-body energy estimator.\",\n 'EHybrid': \"Hybrid energy.\",\n 'Overlap': \"Walker average overlap.\",\n 'Nav': \"Average number of electrons.\",\n 'Time': \"Time per processor to complete one iteration.\",\n }\n if root:\n self.setup_output(filename)\n\n def update(self, system, qmc, trial, psi, step, free_projection=False):\n \"\"\"Update mixed estimates for walkers.\n\n Parameters\n ----------\n system : system object.\n Container for model input options.\n qmc : :class:`pauxy.state.QMCOpts` object.\n Container for qmc input options.\n trial : :class:`pauxy.trial_wavefunction.X' object\n Trial wavefunction class.\n psi : :class:`pauxy.walkers.Walkers` object\n CPMC wavefunction.\n step : int\n Current simulation step\n free_projection : bool\n True if doing free projection.\n \"\"\"\n if free_projection:\n for i, w in enumerate(psi.walkers):\n # For T > 0 w.ot = 1 always.\n wfac = w.weight * w.ot * w.phase# * numpy.exp(w.log_detR-w.log_detR_shift)\n if step % self.energy_eval_freq == 0:\n w.greens_function(trial)\n if self.eval_energy:\n if self.thermal:\n E, T, V = w.local_energy(system)\n else:\n E, T, V = w.local_energy(system, rchol=trial._rchol, eri=trial._eri)\n else:\n E, T, V = 0, 0, 0\n self.estimates[self.names.enumer] += wfac * E\n self.estimates[self.names.e1b:self.names.e2b+1] += (\n wfac * numpy.array([T,V])\n )\n self.estimates[self.names.edenom] += wfac\n if self.thermal:\n nav = particle_number(one_rdm_from_G(w.G))\n self.estimates[self.names.nav] += wfac * nav\n self.estimates[self.names.uweight] += w.unscaled_weight\n self.estimates[self.names.weight] += wfac\n self.estimates[self.names.ehyb] += wfac * w.hybrid_energy\n self.estimates[self.names.ovlp] += w.weight * abs(w.ot)\n else:\n # When using importance sampling we only need to know the current\n # walkers weight as well as the local energy, the walker's overlap\n # with the trial wavefunction is not needed.\n for i, w in enumerate(psi.walkers):\n if self.thermal:\n if self.average_gf:\n E_sum = 0\n T_sum = 0\n V_sum = 0\n nav = 0\n for ts in range(w.stack_length):\n w.greens_function(trial, slice_ix=ts*w.stack_size)\n E, T, V = w.local_energy(system,\n two_rdm=self.two_rdm)\n E_sum += E\n T_sum += T\n V_sum += V\n nav += particle_number(one_rdm_from_G(w.G))\n self.estimates[self.names.nav] += w.weight * nav / w.stack_length\n self.estimates[self.names.enumer] += w.weight*E_sum.real/w.stack_length\n self.estimates[self.names.e1b:self.names.e2b+1] += (\n w.weight*numpy.array([T_sum,V_sum]).real/w.stack_length\n )\n else:\n w.greens_function(trial)\n E, T, V = w.local_energy(system, two_rdm=self.two_rdm)\n nav = particle_number(one_rdm_from_G(w.G))\n self.estimates[self.names.nav] += w.weight * nav\n self.estimates[self.names.enumer] += w.weight*E.real\n self.estimates[self.names.e1b:self.names.e2b+1] += (\n w.weight*numpy.array([T,V]).real\n )\n self.estimates[self.names.edenom] += w.weight\n else:\n if step % self.energy_eval_freq == 0:\n w.greens_function(trial)\n if self.eval_energy:\n E, T, V = w.local_energy(system, rchol=trial._rchol, eri=trial._eri, UVT=trial._UVT)\n else:\n E, T, V = 0, 0, 0\n self.estimates[self.names.enumer] += w.weight*w.le_oratio*E.real\n self.estimates[self.names.e1b:self.names.e2b+1] += (\n w.weight*w.le_oratio*numpy.array([T,V]).real\n )\n self.estimates[self.names.edenom] += w.weight * w.le_oratio\n self.estimates[self.names.uweight] += w.unscaled_weight\n self.estimates[self.names.weight] += w.weight\n self.estimates[self.names.ovlp] += w.weight * abs(w.ot)\n self.estimates[self.names.ehyb] += w.weight * w.hybrid_energy\n if self.calc_one_rdm:\n start = self.names.time+1\n end = self.names.time+1+w.G.size\n self.estimates[start:end] += w.weight*w.G.flatten().real\n if self.calc_two_rdm is not None:\n start = end\n end = end + self.two_rdm.size\n self.estimates[start:end] += w.weight*self.two_rdm.flatten().real\n\n def print_step(self, comm, nprocs, step, nsteps=None, free_projection=False):\n \"\"\"Print mixed estimates to file.\n\n This reduces estimates arrays over processors. On return estimates\n arrays are zerod.\n\n Parameters\n ----------\n comm :\n MPI communicator.\n nprocs : int\n Number of processors.\n step : int\n Current iteration number.\n nmeasure : int\n Number of steps between measurements.\n \"\"\"\n if step % self.nsteps != 0:\n return\n if nsteps is None:\n nsteps = self.nsteps\n es = self.estimates\n ns = self.names\n es[ns.time] = (time.time()-es[ns.time]) / nprocs\n es[ns.uweight:ns.weight+1] /= nsteps\n es[ns.ehyb:ns.time+1] /= nsteps\n comm.Reduce(es, self.global_estimates, op=mpi_sum)\n gs = self.global_estimates\n if comm.rank == 0:\n gs[ns.eproj] = gs[ns.enumer]\n gs[ns.eproj:ns.e2b+1] = gs[ns.eproj:ns.e2b+1] / gs[ns.edenom]\n gs[ns.ehyb] /= gs[ns.weight]\n gs[ns.ovlp] /= gs[ns.weight]\n eshift = numpy.array([gs[ns.ehyb],gs[ns.eproj]])\n else:\n eshift = numpy.array([0,0])\n if self.thermal and comm.rank == 0:\n gs[ns.nav] = gs[ns.nav] / gs[ns.weight]\n eshift = comm.bcast(eshift, root=0)\n self.eshift = eshift\n if comm.rank == 0:\n if self.verbose:\n print(format_fixed_width_floats([step]+list(gs[:ns.time+1].real)))\n self.output.push([step]+list(gs[:ns.time+1]), 'energies')\n if self.calc_one_rdm:\n start = self.nreg\n end = self.nreg+self.G.size\n rdm = gs[start:end].reshape(self.G.shape) / nsteps\n self.output.push(rdm/gs[ns.weight], 'one_rdm')\n if self.calc_two_rdm:\n start = self.nreg + self.G.size\n rdm = gs[start:].reshape(self.two_rdm.shape) / nsteps\n self.output.push(rdm/gs[ns.weight], 'two_rdm')\n self.output.increment()\n self.zero()\n\n def print_key(self, eol='', encode=False):\n \"\"\"Print out information about what the estimates are.\n\n Parameters\n ----------\n eol : string, optional\n String to append to output, e.g., Default : ''.\n encode : bool\n In True encode output to be utf-8.\n \"\"\"\n header = (\n eol + '# Explanation of output column headers:\\n' +\n '# -------------------------------------' + eol\n )\n if encode:\n header = header.encode('utf-8')\n print(header)\n for (k, v) in self.key.items():\n s = '# %s : %s' % (k, v) + eol\n if encode:\n s = s.encode('utf-8')\n print(s)\n\n def print_header(self, eol='', encode=False):\n r\"\"\"Print out header for estimators\n\n Parameters\n ----------\n eol : string, optional\n String to append to output, Default : ''.\n encode : bool\n In True encode output to be utf-8.\n\n Returns\n -------\n None\n \"\"\"\n s = format_fixed_width_strings(self.header) + eol\n if encode:\n s = s.encode('utf-8')\n print(s)\n\n def projected_energy(self):\n \"\"\"Computes projected energy from estimator array.\n\n Returns\n -------\n eproj : float\n Mixed estimate for projected energy.\n \"\"\"\n numerator = self.estimates[self.names.enumer]\n denominator = self.estimates[self.names.edenom]\n return (numerator / denominator).real\n\n def get_shift(self, hybrid=True):\n \"\"\"get hybrid shift.\n\n parameters\n ----------\n hybrid : bool\n true if using hybrid propgation\n returns\n -------\n eshift : float\n walker averaged hybrid energy.\n \"\"\"\n if hybrid:\n return self.eshift[0].real\n else:\n return self.eshift[1].real\n\n def zero(self):\n \"\"\"Zero (in the appropriate sense) various estimator arrays.\"\"\"\n self.estimates[:] = 0\n self.global_estimates[:] = 0\n self.estimates[self.names.time] = time.time()\n\n def setup_output(self, filename):\n with h5py.File(filename, 'a') as fh5:\n fh5['basic/headers'] = numpy.array(self.header).astype('S')\n self.output = H5EstimatorHelper(filename, 'basic')\n\n# Energy evaluation routines for the Hubbard-Holstein model.\ndef local_energy_hh(system, G, X, Lap, Ghalf=None):\n if system.name == \"HubbardHolstein\":\n (e1, e2, e3) = local_energy_hubbard_holstein(system, G, X, Lap, Ghalf)\n return (e1, e2, e3)\n else:\n print(\"SOMETHING IS VERY WRONG... WHY ARE YOU CALLING HUBBARD-HOSTEIN FUNCTION?\")\n exit()\n\n# Energy evaluation routines.\ndef local_energy(system, G, Ghalf=None,\n two_rdm=None,\n rchol=None, eri=None, C0=None, ecoul0=None, exxa0=None, exxb0=None, UVT=None):\n \"\"\"Helper routine to compute local energy.\n\n Parameters\n ----------\n system : system object\n system object.\n G : :class:`numpy.ndarray`\n 1RDM.\n C0 : :class:`numpy.ndarray`\n trial C.\n\n Returns\n -------\n (E,T,V) : tuple\n Total, one-body and two-body energy.\n \"\"\"\n ghf = (G.shape[-1] == 2*system.nbasis)\n # unfortunate interfacial problem for the HH model\n if system.name == \"Hubbard\" or system.name == \"HubbardHolstein\":\n if ghf:\n return local_energy_ghf(system, G)\n else:\n return local_energy_hubbard(system, G)\n elif system.name == \"PW_FFT\":\n return local_energy_pw_fft(system, G, Ghalf, two_rdm=two_rdm)\n elif system.name == \"UEG\":\n return local_energy_ueg(system, G, two_rdm=two_rdm)\n else:\n if Ghalf is not None:\n if system.stochastic_ri and system.control_variate:\n return local_energy_generic_cholesky_opt_stochastic(system, G,\n nsamples=system.nsamples,\n Ghalf=Ghalf,\n rchol=rchol, C0=C0, ecoul0=ecoul0,\n exxa0=exxa0,\n exxb0=exxb0)\n elif system.stochastic_ri and not system.control_variate:\n return local_energy_generic_cholesky_opt_stochastic(system, G,\n nsamples=system.nsamples,\n Ghalf=Ghalf,\n rchol=rchol)\n elif system.exact_eri and not system.pno:\n return local_energy_generic_opt(system, G, Ghalf=Ghalf, eri=eri)\n elif system.pno:\n assert(system.exact_eri and system.control_variate)\n return local_energy_generic_pno(system, G, Ghalf=Ghalf, eri=eri, C0=C0, ecoul0=ecoul0, exxa0=exxa0, exxb0=exxb0, UVT=UVT)\n else:\n return local_energy_generic_cholesky_opt(system, G,\n Ghalf=Ghalf,\n rchol=rchol)\n else:\n return local_energy_generic_cholesky(system, G)\n\ndef local_energy_multi_det(system, Gi, weights, two_rdm=None, rchol=None):\n weight = 0\n energies = 0\n denom = 0\n for w, G in zip(weights, Gi):\n # construct \"local\" green's functions for each component of A\n energies += w * numpy.array(local_energy(system, G, rchol = rchol))\n denom += w\n\n return tuple(energies/denom)\n\ndef local_energy_multi_det_hh(system, Gi, weights, X, Lapi, two_rdm=None):\n weight = 0\n energies = 0\n denom = 0\n for w, G, Lap in zip(weights, Gi, Lapi):\n # construct \"local\" green's functions for each component of A\n energies += w * numpy.array(local_energy_hubbard_holstein(system, G, X, Lap, Ghalf=None))\n denom += w\n return tuple(energies/denom)\n\ndef get_estimator_enum(thermal=False):\n keys = ['uweight', 'weight', 'enumer', 'edenom',\n 'eproj', 'e1b', 'e2b', 'ehyb', 'ovlp']\n if thermal:\n keys.append('nav')\n keys.append('time')\n enum = {}\n for v, k in enumerate(keys):\n enum[k] = v\n return dotdict(enum)\n\n\ndef eproj(estimates, enum):\n \"\"\"Real projected energy.\n\n Parameters\n ----------\n estimates : numpy.array\n Array containing estimates averaged over all processors.\n enum : :class:`pauxy.estimators.EstimatorEnum` object\n Enumerator class outlining indices of estimates array elements.\n\n Returns\n -------\n eproj : float\n Projected energy from current estimates array.\n \"\"\"\n\n numerator = estimates[enum.enumer]\n denominator = estimates[enum.edenom]\n return (numerator/denominator).real\n\ndef variational_energy(system, psi, coeffs, G=None, GH=None, rchol=None, eri=None, \n C0 = None,ecoul0 =None,exxa0 = None,exxb0 = None,UVT=None):\n if len(psi.shape) == 2:\n return variational_energy_single_det(system, psi,\n G=G, GH=GH,\n rchol=rchol, eri=eri, \n C0 = C0, ecoul0 = ecoul0, \n exxa0 = exxa0, exxb0 = exxb0,\n UVT=UVT)\n elif len(psi) == 1:\n return variational_energy_single_det(system, psi[0],\n G=G, GH=GH,\n rchol=rchol, eri=eri, \n C0 = C0, ecoul0 = ecoul0, \n exxa0 = exxa0, exxb0 = exxb0,\n UVT=UVT)\n else:\n return variational_energy_multi_det(system, psi, coeffs)\n\ndef variational_energy_multi_det(system, psi, coeffs, H=None, S=None):\n weight = 0\n energies = 0\n denom = 0\n nup = system.nup\n ndet = len(coeffs)\n if H is not None and S is not None:\n store = True\n else:\n store = False\n for i, (Bi, ci) in enumerate(zip(psi, coeffs)):\n for j, (Aj, cj) in enumerate(zip(psi, coeffs)):\n # construct \"local\" green's functions for each component of A\n Gup, GHup, inv_O_up = gab_mod_ovlp(Bi[:,:nup], Aj[:,:nup])\n Gdn, GHdn, inv_O_dn = gab_mod_ovlp(Bi[:,nup:], Aj[:,nup:])\n ovlp = 1.0 / (scipy.linalg.det(inv_O_up)*scipy.linalg.det(inv_O_dn))\n weight = (ci.conj()*cj) * ovlp\n G = numpy.array([Gup, Gdn])\n e = numpy.array(local_energy(system, G))\n if store:\n H[i,j] = ovlp*e[0]\n S[i,j] = ovlp\n energies += weight * e\n denom += weight\n return tuple(energies/denom)\n\ndef variational_energy_ortho_det(system, occs, coeffs):\n \"\"\"Compute variational energy for CI-like multi-determinant expansion.\n\n Parameters\n ----------\n system : :class:`pauxy.system` object\n System object.\n occs : list of lists\n list of determinants.\n coeffs : :class:`numpy.ndarray`\n Expansion coefficients.\n\n Returns\n -------\n energy : tuple of float / complex\n Total energies: (etot,e1b,e2b).\n \"\"\"\n evar = 0.0\n denom = 0.0\n one_body = 0.0\n two_body = 0.0\n for i, (occi, ci) in enumerate(zip(occs, coeffs)):\n denom += ci.conj()*ci\n for j in range(0,i+1):\n cj = coeffs[j]\n occj = occs[j]\n etot, e1b, e2b = ci.conj()*cj*get_hmatel(system, occi, occj)\n evar += etot\n one_body += e1b\n two_body += e2b\n if j < i:\n # Use Hermiticity\n evar += etot\n one_body += e1b\n two_body += e2b\n return evar/denom, one_body/denom, two_body/denom\n\ndef variational_energy_single_det(system, psi, G=None, GH=None, \n rchol=None, eri=None,\n C0=None, \n ecoul0=None,\n exxa0=None,\n exxb0=None, UVT=None):\n assert len(psi.shape) == 2\n return local_energy(system, G, Ghalf=GH, rchol=rchol, eri=eri, C0=C0, ecoul0=ecoul0, exxa0=exxa0, exxb0=exxb0, UVT=UVT)\n",
"import numpy\nimport os\nimport unittest\nimport itertools\nfrom pyscf import lib\nfrom scipy.fftpack.helper import next_fast_len\nfrom pauxy.estimators.greens_function import gab_mod\nfrom pauxy.utils.testing import get_random_wavefunction\n\ndef fill_up_range (nmesh):\n a = numpy.zeros(nmesh)\n n = nmesh//2\n a = numpy.linspace(-n, n, num=nmesh,dtype=numpy.int32)\n return a\n\ndef generate_fft_grid(mesh):\n gx = fill_up_range(mesh[0])\n gy = fill_up_range(mesh[1])\n gz = fill_up_range(mesh[2])\n\n kval = numpy.array(list(itertools.product(*[gx,gy,gz])), dtype=numpy.int32)\n spval = 0.5*numpy.array([numpy.dot(g,g) for g in kval])\n return kval, spval\n\ndef lookup(g, basis):\n for i, k in enumerate(basis):\n if numpy.dot(g-k,g-k) == 0:\n return i\n return None\n\n# Gives same results as scipy.signal.convolve\ndef convolve(f, g, mesh):\n f_ = f.reshape(*mesh)\n g_ = g.reshape(*mesh)\n shape = numpy.maximum(f_.shape, g_.shape)\n min_shape = numpy.array(f_.shape) + numpy.array(g_.shape) - 1\n\n nqtot = numpy.prod(min_shape)\n\n fshape = [next_fast_len(d) for d in min_shape]\n\n finv = numpy.fft.ifftn(f_, s=fshape)\n ginv = numpy.fft.ifftn(g_, s=fshape)\n fginv = finv * ginv\n fq = numpy.fft.fftn(fginv).copy().ravel()\n fq = fq.reshape(fshape)\n fq = fq[:min_shape[0],:min_shape[1],:min_shape[2]]\n fq = fq.reshape(nqtot)\n return fq\n\ndef test_fft_kmq(nalpha):\n # Create regular grid.\n nmax = 1\n mesh = [2*nmax+1]*3\n grid, eigs = generate_fft_grid(mesh)\n qmax = 2*nmax\n qmesh = [2*qmax+1]*3\n qgrid, qeigs = generate_fft_grid(qmesh)\n # Create wavefunction\n nbasis = len(grid)\n # numpy.random.seed(7)\n psi = get_random_wavefunction((nalpha,nalpha), nbasis)\n I = get_random_wavefunction((nalpha,nalpha), nbasis)\n\n # Select lowest energy states for trial\n trial = I[:,:nalpha].conj()\n G, Gh = gab_mod(trial,psi[:,:nalpha])\n nqgrid = numpy.prod(qmesh)\n\n # # Check by direct convolution f(q) = \\sum_G Psi[G-Q] Gh[G].\n fq_direct = numpy.zeros(nqgrid, dtype=numpy.complex128)\n for iq, q in enumerate(qgrid):\n for i, g in enumerate(grid):\n gmq = g - q\n igmq = lookup(gmq, grid)\n if igmq is not None:\n fq_direct[iq] += trial[igmq,0] * Gh[0,i]\n\n trial_grid = trial[:,0].reshape(mesh)\n Gh_grid = numpy.flip(Gh[0,:]).reshape(mesh)\n\n # Check by fft convolve\n # Compare to fq\n fq_conv = numpy.zeros(nqgrid, dtype=numpy.complex128)\n fq_conv += nqgrid*convolve(trial_grid, Gh_grid, mesh)\n fq_conv = numpy.flip(fq_conv)\n\n import scipy.signal\n fq_conv_sc = numpy.flip(scipy.signal.fftconvolve(trial_grid, Gh_grid).ravel())\n\n import matplotlib.pyplot as pl\n pl.plot(fq_conv, label='fft')\n pl.plot(fq_conv_sc, label='fft_scipy')\n pl.plot(fq_direct, label='direct')\n pl.legend()\n pl.show()\n \ndef test_fft_kpq(nalpha):\n # Create regular grid.\n nmax = 1\n mesh = [2*nmax+1]*3\n grid, eigs = generate_fft_grid(mesh)\n\n qmax = 2*nmax\n qmesh = [2*qmax+1]*3\n qgrid, qeigs = generate_fft_grid(qmesh)\n # Create wavefunction\n nbasis = len(grid)\n\n numpy.random.seed(7)\n psi = get_random_wavefunction((nalpha,nalpha), nbasis)\n I = get_random_wavefunction((nalpha,nalpha), nbasis)\n trial = I[:,:nalpha].conj()\n\n # Select lowest energy states for trial\n G, Gh = gab_mod(trial,psi[:,:nalpha])\n nqgrid = numpy.prod(qmesh)\n\n # # Check by direct convolution f(q) = \\sum_G Psi[G+Q] Gh[G].\n fq_direct = numpy.zeros(nqgrid, dtype=numpy.complex128)\n for i, q in enumerate(qgrid):\n for j, k in enumerate(grid):\n kpq = k + q\n ikpq = lookup(kpq, grid)\n if ikpq is not None:\n fq_direct[i] += trial[ikpq,0] * Gh[0,j]\n\n trial_grid = numpy.flip(trial[:,0]).reshape(mesh)\n Gh_grid = Gh[0,:].reshape(mesh)\n\n # Check by fft convolve\n # Compare to fq\n fq_conv = numpy.zeros(nqgrid, dtype=numpy.complex128)\n fq_conv += nqgrid*convolve(Gh_grid, trial_grid, mesh)\n fq_conv = numpy.flip(fq_conv)\n\n import scipy.signal\n fq_conv_sc = numpy.flip(scipy.signal.fftconvolve(Gh_grid,trial_grid).ravel())\n\n import matplotlib.pyplot as pl\n pl.plot(fq_conv, label='fft')\n pl.plot(fq_conv_sc, label='fft_scipy')\n pl.plot(fq_direct, label='direct')\n # pl.plot(fq, label='from_gf')\n pl.legend()\n pl.show()\n # print(Gtrace, Gtrace_direct, fq[1])\n\nif __name__ == '__main__':\n # test_fft_kpq(7)\n test_fft_kmq(7)\n"
] |
[
[
"numpy.array",
"numpy.zeros"
],
[
"matplotlib.pyplot.legend",
"numpy.dot",
"numpy.maximum",
"numpy.linspace",
"numpy.random.seed",
"scipy.fftpack.helper.next_fast_len",
"numpy.fft.fftn",
"numpy.fft.ifftn",
"matplotlib.pyplot.plot",
"numpy.prod",
"numpy.array",
"numpy.flip",
"numpy.zeros",
"matplotlib.pyplot.show"
]
] |
minhnd3796/facenet
|
[
"2951cf3ef8065a238876ba6c18d34f40cc389b13"
] |
[
"src/create_frt_pairs_file_new_test_classes.py"
] |
[
"import os\nimport random\nimport numpy as np\n\ndata_dir = '/ditmemay/raid/fti/vision.minhnd/datasets/frt/cropped_face'\ntest_class_indices = np.array([172, 2045, 1656, 1702, 2476, 65, 2079, 548, 2874, 558, 1753, 2270], dtype=np.int32)\nsecond_test_class_indices = np.array([172, 2045, 1656, 1702, 2476, 65, 2079, 548, 2874, 558, 1753, 2270], dtype=np.int32)\ntotal_classes = np.array(os.listdir(os.path.expanduser(data_dir)))\nclasses = total_classes[test_class_indices].tolist()\nsecond_classes = total_classes[second_test_class_indices].tolist()\nprint(classes)\nprint(second_classes)\nnum_classes = len(classes)\nnum_second_classes = len(second_classes)\nprint('type(num_classes):', type(num_classes))\nprint('num_classes:', num_classes)\nrandom.seed(3796)\nnum_test_pairs = 6000\nnum_folds = 10\nnum_same_diff_pairs = num_test_pairs // 2\nnum_same_diff_pairs_per_fold = num_same_diff_pairs // num_folds\nindex_tuple_list = []\n\nf_test_pairs = open(os.path.expanduser('/ditmemay/raid/fti/vision.minhnd/sources/facenet/data/frt_image_pairs_test.txt'), 'w')\nf_test_indices = open(os.path.expanduser('/ditmemay/raid/fti/vision.minhnd/sources/facenet/data/frt_index_pairs_test.txt'), 'w')\ncount_num_same_pairs = 0\nenough_pairs_flag = False\nfor class_index in range(num_classes):\n if enough_pairs_flag:\n break\n files = os.listdir(os.path.join(data_dir, classes[class_index]))\n num_files = len(files)\n for first_file_index in range(num_files):\n if enough_pairs_flag:\n break\n for second_file_index in range(num_files):\n if second_file_index == first_file_index:\n continue\n pair_index_tuple = (class_index, first_file_index, class_index, second_file_index)\n reversed_pair_index_tuple = (class_index, second_file_index, class_index, first_file_index)\n if pair_index_tuple in index_tuple_list or reversed_pair_index_tuple in index_tuple_list:\n continue\n index_tuple_list.append(pair_index_tuple)\n index_tuple_list.append(reversed_pair_index_tuple)\n f_test_pairs.write(classes[class_index] + '\\t' + files[first_file_index] + '\\t' + classes[class_index] + '\\t' + files[second_file_index] + '\\n')\n for idx in pair_index_tuple:\n f_test_indices.write(str(idx) + ' ')\n f_test_indices.write('\\n')\n for idx in reversed_pair_index_tuple:\n f_test_indices.write(str(idx) + ' ')\n f_test_indices.write('\\n')\n count_num_same_pairs += 1\n if count_num_same_pairs == num_same_diff_pairs:\n enough_pairs_flag = True\n break\n\nnum_diff_pairs_per_class = num_same_diff_pairs // num_classes\nfor first_class_index in range(num_classes):\n print('\\nfirst_class_index:', first_class_index)\n for diff_pair_index_per_class in range(num_diff_pairs_per_class):\n print('diff_pair_index_per_class:', diff_pair_index_per_class)\n second_class_index = random.randint(0, num_classes - 1)\n first_files = os.listdir(os.path.join(data_dir, classes[first_class_index]))\n num_first_files = len(first_files)\n while second_class_index == first_class_index:\n second_class_index = random.randint(0, num_classes - 1)\n second_files = os.listdir(os.path.join(data_dir, classes[second_class_index]))\n num_second_files = len(second_files)\n first_file_index = random.randint(0, num_first_files - 1)\n second_file_index = random.randint(0, num_second_files - 1)\n pair_index_tuple = (first_class_index, first_file_index, second_class_index, second_file_index)\n reversed_pair_index_tuple = (second_class_index, second_file_index, first_class_index, first_file_index)\n while pair_index_tuple in index_tuple_list or reversed_pair_index_tuple in index_tuple_list:\n first_file_index = random.randint(0, num_first_files - 1)\n second_file_index = random.randint(0, num_second_files - 1)\n pair_index_tuple = (first_class_index, first_file_index, second_class_index, second_file_index)\n reversed_pair_index_tuple = (second_class_index, second_file_index, first_class_index, first_file_index)\n index_tuple_list.append(pair_index_tuple)\n index_tuple_list.append(reversed_pair_index_tuple)\n f_test_pairs.write(classes[first_class_index] + '\\t' + first_files[first_file_index] + '\\t' + classes[second_class_index] + '\\t' + second_files[second_file_index] + '\\n')\n for idx in pair_index_tuple:\n f_test_indices.write(str(idx) + ' ')\n f_test_indices.write('\\n')\n for idx in reversed_pair_index_tuple:\n f_test_indices.write(str(idx) + ' ')\n f_test_indices.write('\\n')\n\nf_test_pairs.close()\nf_test_indices.close()\n\nf_read = open('frt_image_pairs_test.txt')\nlines = f_read.readlines()\nf_read.close()\n\nSAME_FLAG = 0\nDIFF_FLAG = 1\n\nf_write = open('frt_image_pairs_test.txt', 'w')\nfor fold_index in range(num_folds):\n flag = SAME_FLAG\n start_same_index = flag * num_same_diff_pairs + fold_index * num_same_diff_pairs_per_fold\n end_same_index = flag * num_same_diff_pairs + fold_index * num_same_diff_pairs_per_fold + num_same_diff_pairs_per_fold\n for pair_index in range(start_same_index, end_same_index):\n f_write.write(lines[pair_index])\n flag = DIFF_FLAG\n start_diff_index = flag * num_same_diff_pairs + fold_index * num_same_diff_pairs_per_fold\n end_diff_index = flag * num_same_diff_pairs + fold_index * num_same_diff_pairs_per_fold + num_same_diff_pairs_per_fold\n for pair_index in range(start_diff_index, end_diff_index):\n f_write.write(lines[pair_index])\n print(pair_index)\nf_write.close()\n"
] |
[
[
"numpy.array"
]
] |
samsammurphy/sat-stac-sentinel
|
[
"2ffbcc8e47cec32809e9661995c6e157b034fb26"
] |
[
"satstac/sentinel/main.py"
] |
[
"import boto3\nimport gzip\nimport json\nimport logging\nimport requests\nimport sys\n\nimport numpy as np\nimport os.path as op\n\nfrom shapely.geometry import MultiPoint, Point\nfrom shapely import geometry\n\nfrom datetime import datetime, timedelta\nfrom dateutil.parser import parse\nfrom pyproj import Proj, transform as reproj\nfrom satstac import Collection, Item, utils\nfrom .utils import get_matching_s3_keys, read_from_s3\n\nfrom .version import __version__\n\n\nlogger = logging.getLogger(__name__)\n\n_collection = Collection.open(op.join(op.dirname(__file__), 'sentinel-2-l1c.json'))\n\nSETTINGS = {\n 'roda_url': 'https://roda.sentinel-hub.com/sentinel-s2-l1c',\n 's3_url': 'https://sentinel-s2-l1c.s3.amazonaws.com',\n 'inv_bucket': 'sentinel-inventory',\n 'inv_key': 'sentinel-s2-l1c/sentinel-s2-l1c-inventory',\n 'path_pattern': '${sentinel:utm_zone}/${sentinel:latitude_band}/${sentinel:grid_square}',\n 'fname_pattern': '${date}/${id}'\n}\n\n\ndef add_items(catalog, records, start_date=None, end_date=None, s3meta=False, prefix=None, publish=None):\n \"\"\" Stream records to a collection with a transform function \n \n Keyword arguments:\n start_date -- Process this date and after\n end_date -- Process this date and earlier\n s3meta -- Retrieve metadata from s3 rather than Sinergise URL (roda)\n \"\"\"\n \n # use existing collection or create new one if it doesn't exist\n cols = {c.id: c for c in catalog.collections()}\n if 'sentinel-2-l1c' not in cols.keys():\n catalog.add_catalog(_collection)\n cols = {c.id: c for c in catalog.collections()}\n collection = cols['sentinel-2-l1c']\n\n client = None\n if publish:\n parts = publish.split(':')\n client = boto3.client('sns', region_name=parts[3])\n\n duration = []\n # iterate through records\n for i, record in enumerate(records):\n start = datetime.now()\n if i % 50000 == 0:\n logger.info('%s: Scanned %s records' % (start, str(i)))\n dt = record['datetime'].date()\n if prefix is not None:\n # if path doesn't match provided prefix skip to next record\n if record['path'][:len(prefix)] != prefix:\n continue\n if s3meta:\n url = op.join(SETTINGS['s3_url'], record['path'])\n else:\n url = op.join(SETTINGS['roda_url'], record['path'])\n #if i == 10:\n # break\n if (start_date is not None and dt < start_date) or (end_date is not None and dt > end_date):\n # skip to next if before start_date\n continue\n try:\n if s3meta:\n signed_url, headers = utils.get_s3_signed_url(url, requestor_pays=True)\n resp = requests.get(signed_url, headers=headers)\n metadata = json.loads(resp.text)\n else:\n metadata = read_remote(url)\n item = transform(metadata)\n except Exception as err:\n logger.error('Error creating STAC Item %s: %s' % (record['path'], err))\n continue\n try:\n collection.add_item(item, path=SETTINGS['path_pattern'], filename=SETTINGS['fname_pattern'])\n if client:\n client.publish(TopicArn=publish, Message=json.dumps(item.data))\n duration.append((datetime.now()-start).total_seconds())\n logger.info('Ingested %s in %s' % (item.filename, duration[-1]))\n except Exception as err:\n logger.error('Error adding %s: %s' % (item.id, err))\n logger.info('Read in %s records averaging %4.2f sec (%4.2f stddev)' % (i, np.mean(duration), np.std(duration)))\n\n\ndef read_inventory(filename):\n \"\"\" Create generator from inventory file \"\"\"\n with open(filename) as f:\n line = f.readline()\n if 'datetime' not in line:\n parts = line.split(',')\n yield {\n 'datetime': parse(parts[0]),\n 'path': parts[1].strip('\\n')\n }\n for line in f.readlines():\n parts = line.split(',')\n yield {\n 'datetime': parse(parts[0]),\n 'path': parts[1].strip('\\n')\n }\n\n\ndef latest_inventory():\n \"\"\" Return generator function for list of scenes \"\"\"\n s3 = boto3.client('s3')\n # get latest file\n today = datetime.now()\n key = None\n for dt in [today, today - timedelta(1)]:\n prefix = op.join(SETTINGS['inv_key'], dt.strftime('%Y-%m-%d'))\n keys = [k for k in get_matching_s3_keys(SETTINGS['inv_bucket'], prefix=prefix, suffix='manifest.json')]\n if len(keys) == 1:\n key = keys[0]\n break\n if key:\n manifest = json.loads(read_from_s3(SETTINGS['inv_bucket'], key))\n for f in manifest.get('files', []):\n inv = read_from_s3(SETTINGS['inv_bucket'], f['key']).split('\\n')\n inv = [i.replace('\"', '').split(',') for i in inv if 'tileInfo.json' in i]\n for info in inv:\n yield {\n 'datetime': parse(info[3]),\n 'path': info[1]\n }\n\n\ndef transform(data):\n \"\"\" Transform Sentinel metadata (from tileInfo.json) into a STAC item \"\"\"\n dt = parse(data['timestamp'])\n epsg = data['tileOrigin']['crs']['properties']['name'].split(':')[-1]\n\n url = op.join(SETTINGS['s3_url'], data['path'])\n roda_url = op.join(SETTINGS['roda_url'], data['path'])\n\n # geo\n coordinates = data['tileDataGeometry']['coordinates']\n ys = [c[1] for c in coordinates[0]]\n xs = [c[0] for c in coordinates[0]]\n p1 = Proj(init='epsg:%s' % epsg)\n p2 = Proj(init='epsg:4326')\n lons, lats = reproj(p1, p2, xs, ys)\n bbox = [min(lons), min(lats), max(lons), max(lats)]\n coordinates = [[[lons[i], lats[i]] for i in range(0, len(lons))]]\n\n geom = geometry.mapping(geometry.Polygon(coordinates[0]).convex_hull)\n\n assets = _collection.data['assets']\n assets = utils.dict_merge(assets, {\n 'thumbnail': {'href': op.join(roda_url, 'preview.jpg')},\n 'info': {'href': op.join(roda_url, 'tileInfo.json')},\n 'metadata': {'href': op.join(roda_url, 'metadata.xml')},\n 'tki': {'href': op.join(url, 'TKI.jp2')},\n 'B01': {'href': op.join(url, 'B01.jp2')},\n 'B02': {'href': op.join(url, 'B02.jp2')},\n 'B03': {'href': op.join(url, 'B03.jp2')},\n 'B04': {'href': op.join(url, 'B04.jp2')},\n 'B05': {'href': op.join(url, 'B05.jp2')},\n 'B06': {'href': op.join(url, 'B06.jp2')},\n 'B07': {'href': op.join(url, 'B07.jp2')},\n 'B08': {'href': op.join(url, 'B08.jp2')},\n 'B8A': {'href': op.join(url, 'B08.jp2')},\n 'B09': {'href': op.join(url, 'B09.jp2')},\n 'B10': {'href': op.join(url, 'B10.jp2')},\n 'B11': {'href': op.join(url, 'B11.jp2')},\n 'B12': {'href': op.join(url, 'B11.jp2')}\n })\n #if dt < datetime(2016,12,6):\n # del assets['tki']\n\n props = {\n 'collection': 'sentinel-2-l1c',\n 'datetime': dt.isoformat(),\n 'eo:platform': 'sentinel-2%s' % data['productName'][2].lower(),\n 'eo:cloud_cover': float(data['cloudyPixelPercentage']),\n 'sentinel:utm_zone': data['utmZone'],\n 'sentinel:latitude_band': data['latitudeBand'],\n 'sentinel:grid_square': data['gridSquare'],\n 'sentinel:sequence': data['path'].split('/')[-1],\n 'sentinel:product_id': data['productName']\n }\n sid = str(data['utmZone']) + data['latitudeBand'] + data['gridSquare']\n id = '%s_%s_%s_%s' % (data['productName'][0:3], sid, dt.strftime('%Y%m%d'), props['sentinel:sequence'] )\n\n _item = {\n 'type': 'Feature',\n 'id': id,\n 'bbox': bbox,\n 'geometry': geom,\n 'properties':props,\n 'assets': assets\n }\n return Item(_item)\n\n\ndef read_remote(url):\n \"\"\" Retrieve remote JSON \"\"\"\n # Read JSON file remotely\n r = requests.get(url, stream=True)\n metadata = json.loads(r.text)\n return metadata\n"
] |
[
[
"numpy.std",
"numpy.mean"
]
] |
SamChen/DeepSpeech2_Pytorch_TransferLearning
|
[
"8c2cf0437f4facd3f5fc282f33b359873d3c5541"
] |
[
"data_utils/audio.py"
] |
[
"\"\"\"Contains the audio segment class.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport io\nimport struct\nimport re\nimport soundfile\nimport resampy\nfrom scipy import signal\nimport random\nimport copy\n\n\nclass AudioSegment(object):\n \"\"\"Monaural audio segment abstraction.\n\n :param samples: Audio samples [num_samples x num_channels].\n :type samples: ndarray.float32\n :param sample_rate: Audio sample rate.\n :type sample_rate: int\n :raises TypeError: If the sample data type is not float or int.\n \"\"\"\n\n def __init__(self, samples, sample_rate):\n \"\"\"Create audio segment from samples.\n\n Samples are convert float32 internally, with int scaled to [-1, 1].\n \"\"\"\n self._samples = self._convert_samples_to_float32(samples)\n self._sample_rate = sample_rate\n if self._samples.ndim >= 2:\n self._samples = np.mean(self._samples, 1)\n\n def __eq__(self, other):\n \"\"\"Return whether two objects are equal.\"\"\"\n if type(other) is not type(self):\n return False\n if self._sample_rate != other._sample_rate:\n return False\n if self._samples.shape != other._samples.shape:\n return False\n if np.any(self.samples != other._samples):\n return False\n return True\n\n def __ne__(self, other):\n \"\"\"Return whether two objects are unequal.\"\"\"\n return not self.__eq__(other)\n\n def __str__(self):\n \"\"\"Return human-readable representation of segment.\"\"\"\n return (\"%s: num_samples=%d, sample_rate=%d, duration=%.2fsec, \"\n \"rms=%.2fdB\" % (type(self), self.num_samples, self.sample_rate,\n self.duration, self.rms_db))\n\n @classmethod\n def from_file(cls, file):\n \"\"\"Create audio segment from audio file.\n\n :param filepath: Filepath or file object to audio file.\n :type filepath: str|file\n :return: Audio segment instance.\n :rtype: AudioSegment\n \"\"\"\n if isinstance(file, str) and re.findall(r\".seqbin_\\d+$\", file):\n return cls.from_sequence_file(file)\n else:\n samples, sample_rate = soundfile.read(file, dtype='float32')\n return cls(samples, sample_rate)\n\n @classmethod\n def slice_from_file(cls, file, start=None, end=None):\n \"\"\"Loads a small section of an audio without having to load\n the entire file into the memory which can be incredibly wasteful.\n\n :param file: Input audio filepath or file object.\n :type file: str|file\n :param start: Start time in seconds. If start is negative, it wraps\n around from the end. If not provided, this function\n reads from the very beginning.\n :type start: float\n :param end: End time in seconds. If end is negative, it wraps around\n from the end. If not provided, the default behvaior is\n to read to the end of the file.\n :type end: float\n :return: AudioSegment instance of the specified slice of the input\n audio file.\n :rtype: AudioSegment\n :raise ValueError: If start or end is incorrectly set, e.g. out of\n bounds in time.\n \"\"\"\n sndfile = soundfile.SoundFile(file)\n sample_rate = sndfile.samplerate\n duration = float(len(sndfile)) / sample_rate\n start = 0. if start is None else start\n end = 0. if end is None else end\n if start < 0.0:\n start += duration\n if end < 0.0:\n end += duration\n if start < 0.0:\n raise ValueError(\"The slice start position (%f s) is out of \"\n \"bounds.\" % start)\n if end < 0.0:\n raise ValueError(\"The slice end position (%f s) is out of bounds.\" %\n end)\n if start > end:\n raise ValueError(\"The slice start position (%f s) is later than \"\n \"the slice end position (%f s).\" % (start, end))\n if end > duration:\n raise ValueError(\"The slice end position (%f s) is out of bounds \"\n \"(> %f s)\" % (end, duration))\n start_frame = int(start * sample_rate)\n end_frame = int(end * sample_rate)\n sndfile.seek(start_frame)\n data = sndfile.read(frames=end_frame - start_frame, dtype='float32')\n return cls(data, sample_rate)\n\n @classmethod\n def from_sequence_file(cls, filepath):\n \"\"\"Create audio segment from sequence file. Sequence file is a binary\n file containing a collection of multiple audio files, with several\n header bytes in the head indicating the offsets of each audio byte data\n chunk.\n\n The format is:\n\n 4 bytes (int, version),\n 4 bytes (int, num of utterance),\n 4 bytes (int, bytes per header),\n [bytes_per_header*(num_utterance+1)] bytes (offsets for each audio),\n audio_bytes_data_of_1st_utterance,\n audio_bytes_data_of_2nd_utterance,\n ......\n\n Sequence file name must end with \".seqbin\". And the filename of the 5th\n utterance's audio file in sequence file \"xxx.seqbin\" must be\n \"xxx.seqbin_5\", with \"5\" indicating the utterance index within this\n sequence file (starting from 1).\n\n :param filepath: Filepath of sequence file.\n :type filepath: str\n :return: Audio segment instance.\n :rtype: AudioSegment\n \"\"\"\n # parse filepath\n matches = re.match(r\"(.+\\.seqbin)_(\\d+)\", filepath)\n if matches is None:\n raise IOError(\"File type of %s is not supported\" % filepath)\n filename = matches.group(1)\n fileno = int(matches.group(2))\n\n # read headers\n f = open(filename, 'rb')\n version = f.read(4)\n num_utterances = struct.unpack(\"i\", f.read(4))[0]\n bytes_per_header = struct.unpack(\"i\", f.read(4))[0]\n header_bytes = f.read(bytes_per_header * (num_utterances + 1))\n header = [\n struct.unpack(\"i\", header_bytes[bytes_per_header * i:\n bytes_per_header * (i + 1)])[0]\n for i in range(num_utterances + 1)\n ]\n\n # read audio bytes\n f.seek(header[fileno - 1])\n audio_bytes = f.read(header[fileno] - header[fileno - 1])\n f.close()\n\n # create audio segment\n try:\n return cls.from_bytes(audio_bytes)\n except Exception as e:\n samples = np.frombuffer(audio_bytes, dtype='int16')\n return cls(samples=samples, sample_rate=8000)\n\n @classmethod\n def from_bytes(cls, bytes):\n \"\"\"Create audio segment from a byte string containing audio samples.\n\n :param bytes: Byte string containing audio samples.\n :type bytes: str\n :return: Audio segment instance.\n :rtype: AudioSegment\n \"\"\"\n samples, sample_rate = soundfile.read(\n io.BytesIO(bytes), dtype='float32')\n return cls(samples, sample_rate)\n\n @classmethod\n def concatenate(cls, *segments):\n \"\"\"Concatenate an arbitrary number of audio segments together.\n\n :param *segments: Input audio segments to be concatenated.\n :type *segments: tuple of AudioSegment\n :return: Audio segment instance as concatenating results.\n :rtype: AudioSegment\n :raises ValueError: If the number of segments is zero, or if the\n sample_rate of any segments does not match.\n :raises TypeError: If any segment is not AudioSegment instance.\n \"\"\"\n # Perform basic sanity-checks.\n if len(segments) == 0:\n raise ValueError(\"No audio segments are given to concatenate.\")\n sample_rate = segments[0]._sample_rate\n for seg in segments:\n if sample_rate != seg._sample_rate:\n raise ValueError(\"Can't concatenate segments with \"\n \"different sample rates\")\n if type(seg) is not cls:\n raise TypeError(\"Only audio segments of the same type \"\n \"can be concatenated.\")\n samples = np.concatenate([seg.samples for seg in segments])\n return cls(samples, sample_rate)\n\n @classmethod\n def make_silence(cls, duration, sample_rate):\n \"\"\"Creates a silent audio segment of the given duration and sample rate.\n\n :param duration: Length of silence in seconds.\n :type duration: float\n :param sample_rate: Sample rate.\n :type sample_rate: float\n :return: Silent AudioSegment instance of the given duration.\n :rtype: AudioSegment\n \"\"\"\n samples = np.zeros(int(duration * sample_rate))\n return cls(samples, sample_rate)\n\n def to_wav_file(self, filepath, dtype='float32'):\n \"\"\"Save audio segment to disk as wav file.\n\n :param filepath: WAV filepath or file object to save the\n audio segment.\n :type filepath: str|file\n :param dtype: Subtype for audio file. Options: 'int16', 'int32',\n 'float32', 'float64'. Default is 'float32'.\n :type dtype: str\n :raises TypeError: If dtype is not supported.\n \"\"\"\n samples = self._convert_samples_from_float32(self._samples, dtype)\n subtype_map = {\n 'int16': 'PCM_16',\n 'int32': 'PCM_32',\n 'float32': 'FLOAT',\n 'float64': 'DOUBLE'\n }\n soundfile.write(\n filepath,\n samples,\n self._sample_rate,\n format='WAV',\n subtype=subtype_map[dtype])\n\n def superimpose(self, other):\n \"\"\"Add samples from another segment to those of this segment\n (sample-wise addition, not segment concatenation).\n\n Note that this is an in-place transformation.\n\n :param other: Segment containing samples to be added in.\n :type other: AudioSegments\n :raise TypeError: If type of two segments don't match.\n :raise ValueError: If the sample rates of the two segments are not\n equal, or if the lengths of segments don't match.\n \"\"\"\n if isinstance(other, type(self)):\n raise TypeError(\"Cannot add segments of different types: %s \"\n \"and %s.\" % (type(self), type(other)))\n if self._sample_rate != other._sample_rate:\n raise ValueError(\"Sample rates must match to add segments.\")\n if len(self._samples) != len(other._samples):\n raise ValueError(\"Segment lengths must match to add segments.\")\n self._samples += other._samples\n\n def to_bytes(self, dtype='float32'):\n \"\"\"Create a byte string containing the audio content.\n\n :param dtype: Data type for export samples. Options: 'int16', 'int32',\n 'float32', 'float64'. Default is 'float32'.\n :type dtype: str\n :return: Byte string containing audio content.\n :rtype: str\n \"\"\"\n samples = self._convert_samples_from_float32(self._samples, dtype)\n return samples.tostring()\n\n def gain_db(self, gain):\n \"\"\"Apply gain in decibels to samples.\n\n Note that this is an in-place transformation.\n\n :param gain: Gain in decibels to apply to samples.\n :type gain: float|1darray\n \"\"\"\n self._samples *= 10.**(gain / 20.)\n\n def change_speed(self, speed_rate):\n \"\"\"Change the audio speed by linear interpolation.\n\n Note that this is an in-place transformation.\n\n :param speed_rate: Rate of speed change:\n speed_rate > 1.0, speed up the audio;\n speed_rate = 1.0, unchanged;\n speed_rate < 1.0, slow down the audio;\n speed_rate <= 0.0, not allowed, raise ValueError.\n :type speed_rate: float\n :raises ValueError: If speed_rate <= 0.0.\n \"\"\"\n if speed_rate <= 0:\n raise ValueError(\"speed_rate should be greater than zero.\")\n old_length = self._samples.shape[0]\n new_length = int(old_length / speed_rate)\n old_indices = np.arange(old_length)\n new_indices = np.linspace(start=0, stop=old_length, num=new_length)\n self._samples = np.interp(new_indices, old_indices, self._samples)\n\n def normalize(self, target_db=-20, max_gain_db=300.0):\n \"\"\"Normalize audio to be of the desired RMS value in decibels.\n\n Note that this is an in-place transformation.\n\n :param target_db: Target RMS value in decibels. This value should be\n less than 0.0 as 0.0 is full-scale audio.\n :type target_db: float\n :param max_gain_db: Max amount of gain in dB that can be applied for\n normalization. This is to prevent nans when\n attempting to normalize a signal consisting of\n all zeros.\n :type max_gain_db: float\n :raises ValueError: If the required gain to normalize the segment to\n the target_db value exceeds max_gain_db.\n \"\"\"\n gain = target_db - self.rms_db\n if gain > max_gain_db:\n raise ValueError(\n \"Unable to normalize segment to %f dB because the \"\n \"the probable gain have exceeds max_gain_db (%f dB)\" %\n (target_db, max_gain_db))\n self.gain_db(min(max_gain_db, target_db - self.rms_db))\n\n def normalize_online_bayesian(self,\n target_db,\n prior_db,\n prior_samples,\n startup_delay=0.0):\n \"\"\"Normalize audio using a production-compatible online/causal\n algorithm. This uses an exponential likelihood and gamma prior to\n make online estimates of the RMS even when there are very few samples.\n\n Note that this is an in-place transformation.\n\n :param target_db: Target RMS value in decibels.\n :type target_bd: float\n :param prior_db: Prior RMS estimate in decibels.\n :type prior_db: float\n :param prior_samples: Prior strength in number of samples.\n :type prior_samples: float\n :param startup_delay: Default 0.0s. If provided, this function will\n accrue statistics for the first startup_delay\n seconds before applying online normalization.\n :type startup_delay: float\n \"\"\"\n # Estimate total RMS online.\n startup_sample_idx = min(self.num_samples - 1,\n int(self.sample_rate * startup_delay))\n prior_mean_squared = 10.**(prior_db / 10.)\n prior_sum_of_squares = prior_mean_squared * prior_samples\n cumsum_of_squares = np.cumsum(self.samples**2)\n sample_count = np.arange(self.num_samples) + 1\n if startup_sample_idx > 0:\n cumsum_of_squares[:startup_sample_idx] = \\\n cumsum_of_squares[startup_sample_idx]\n sample_count[:startup_sample_idx] = \\\n sample_count[startup_sample_idx]\n mean_squared_estimate = ((cumsum_of_squares + prior_sum_of_squares) /\n (sample_count + prior_samples))\n rms_estimate_db = 10 * np.log10(mean_squared_estimate)\n # Compute required time-varying gain.\n gain_db = target_db - rms_estimate_db\n self.gain_db(gain_db)\n\n def resample(self, target_sample_rate, filter='kaiser_best'):\n \"\"\"Resample the audio to a target sample rate.\n\n Note that this is an in-place transformation.\n\n :param target_sample_rate: Target sample rate.\n :type target_sample_rate: int\n :param filter: The resampling filter to use one of {'kaiser_best',\n 'kaiser_fast'}.\n :type filter: str\n \"\"\"\n self._samples = resampy.resample(\n self.samples, self.sample_rate, target_sample_rate, filter=filter)\n self._sample_rate = target_sample_rate\n\n def pad_silence(self, duration, sides='both'):\n \"\"\"Pad this audio sample with a period of silence.\n\n Note that this is an in-place transformation.\n\n :param duration: Length of silence in seconds to pad.\n :type duration: float\n :param sides: Position for padding:\n 'beginning' - adds silence in the beginning;\n 'end' - adds silence in the end;\n 'both' - adds silence in both the beginning and the end.\n :type sides: str\n :raises ValueError: If sides is not supported.\n \"\"\"\n if duration == 0.0:\n return self\n cls = type(self)\n silence = self.make_silence(duration, self._sample_rate)\n if sides == \"beginning\":\n padded = cls.concatenate(silence, self)\n elif sides == \"end\":\n padded = cls.concatenate(self, silence)\n elif sides == \"both\":\n padded = cls.concatenate(silence, self, silence)\n else:\n raise ValueError(\"Unknown value for the sides %s\" % sides)\n self._samples = padded._samples\n\n def shift(self, shift_ms):\n \"\"\"Shift the audio in time. If `shift_ms` is positive, shift with time\n advance; if negative, shift with time delay. Silence are padded to\n keep the duration unchanged.\n\n Note that this is an in-place transformation.\n\n :param shift_ms: Shift time in millseconds. If positive, shift with\n time advance; if negative; shift with time delay.\n :type shift_ms: float\n :raises ValueError: If shift_ms is longer than audio duration.\n \"\"\"\n if abs(shift_ms) / 1000.0 > self.duration:\n raise ValueError(\"Absolute value of shift_ms should be smaller \"\n \"than audio duration.\")\n shift_samples = int(shift_ms * self._sample_rate / 1000)\n if shift_samples > 0:\n # time advance\n self._samples[:-shift_samples] = self._samples[shift_samples:]\n self._samples[-shift_samples:] = 0\n elif shift_samples < 0:\n # time delay\n self._samples[-shift_samples:] = self._samples[:shift_samples]\n self._samples[:-shift_samples] = 0\n\n def subsegment(self, start_sec=None, end_sec=None):\n \"\"\"Cut the AudioSegment between given boundaries.\n\n Note that this is an in-place transformation.\n\n :param start_sec: Beginning of subsegment in seconds.\n :type start_sec: float\n :param end_sec: End of subsegment in seconds.\n :type end_sec: float\n :raise ValueError: If start_sec or end_sec is incorrectly set, e.g. out\n of bounds in time.\n \"\"\"\n start_sec = 0.0 if start_sec is None else start_sec\n end_sec = self.duration if end_sec is None else end_sec\n if start_sec < 0.0:\n start_sec = self.duration + start_sec\n if end_sec < 0.0:\n end_sec = self.duration + end_sec\n if start_sec < 0.0:\n raise ValueError(\"The slice start position (%f s) is out of \"\n \"bounds.\" % start_sec)\n if end_sec < 0.0:\n raise ValueError(\"The slice end position (%f s) is out of bounds.\" %\n end_sec)\n if start_sec > end_sec:\n raise ValueError(\"The slice start position (%f s) is later than \"\n \"the end position (%f s).\" % (start_sec, end_sec))\n if end_sec > self.duration:\n raise ValueError(\"The slice end position (%f s) is out of bounds \"\n \"(> %f s)\" % (end_sec, self.duration))\n start_sample = int(round(start_sec * self._sample_rate))\n end_sample = int(round(end_sec * self._sample_rate))\n self._samples = self._samples[start_sample:end_sample]\n\n def random_subsegment(self, subsegment_length, rng=None):\n \"\"\"Cut the specified length of the audiosegment randomly.\n\n Note that this is an in-place transformation.\n\n :param subsegment_length: Subsegment length in seconds.\n :type subsegment_length: float\n :param rng: Random number generator state.\n :type rng: random.Random\n :raises ValueError: If the length of subsegment is greater than\n the origineal segemnt.\n \"\"\"\n rng = random.Random() if rng is None else rng\n if subsegment_length > self.duration:\n raise ValueError(\"Length of subsegment must not be greater \"\n \"than original segment.\")\n start_time = rng.uniform(0.0, self.duration - subsegment_length)\n self.subsegment(start_time, start_time + subsegment_length)\n\n def convolve(self, impulse_segment, allow_resample=False):\n \"\"\"Convolve this audio segment with the given impulse segment.\n\n Note that this is an in-place transformation.\n\n :param impulse_segment: Impulse response segments.\n :type impulse_segment: AudioSegment\n :param allow_resample: Indicates whether resampling is allowed when\n the impulse_segment has a different sample\n rate from this signal.\n :type allow_resample: bool\n :raises ValueError: If the sample rate is not match between two\n audio segments when resample is not allowed.\n \"\"\"\n if allow_resample and self.sample_rate != impulse_segment.sample_rate:\n impulse_segment.resample(self.sample_rate)\n if self.sample_rate != impulse_segment.sample_rate:\n raise ValueError(\"Impulse segment's sample rate (%d Hz) is not \"\n \"equal to base signal sample rate (%d Hz).\" %\n (impulse_segment.sample_rate, self.sample_rate))\n samples = signal.fftconvolve(self.samples, impulse_segment.samples,\n \"full\")\n self._samples = samples\n\n def convolve_and_normalize(self, impulse_segment, allow_resample=False):\n \"\"\"Convolve and normalize the resulting audio segment so that it\n has the same average power as the input signal.\n\n Note that this is an in-place transformation.\n\n :param impulse_segment: Impulse response segments.\n :type impulse_segment: AudioSegment\n :param allow_resample: Indicates whether resampling is allowed when\n the impulse_segment has a different sample\n rate from this signal.\n :type allow_resample: bool\n \"\"\"\n target_db = self.rms_db\n self.convolve(impulse_segment, allow_resample=allow_resample)\n self.normalize(target_db)\n\n def add_noise(self,\n noise,\n snr_dB,\n allow_downsampling=False,\n max_gain_db=300.0,\n rng=None):\n \"\"\"Add the given noise segment at a specific signal-to-noise ratio.\n If the noise segment is longer than this segment, a random subsegment\n of matching length is sampled from it and used instead.\n\n Note that this is an in-place transformation.\n\n :param noise: Noise signal to add.\n :type noise: AudioSegment\n :param snr_dB: Signal-to-Noise Ratio, in decibels.\n :type snr_dB: float\n :param allow_downsampling: Whether to allow the noise signal to be\n downsampled to match the base signal sample\n rate.\n :type allow_downsampling: bool\n :param max_gain_db: Maximum amount of gain to apply to noise signal\n before adding it in. This is to prevent attempting\n to apply infinite gain to a zero signal.\n :type max_gain_db: float\n :param rng: Random number generator state.\n :type rng: None|random.Random\n :raises ValueError: If the sample rate does not match between the two\n audio segments when downsampling is not allowed, or\n if the duration of noise segments is shorter than\n original audio segments.\n \"\"\"\n rng = random.Random() if rng is None else rng\n if allow_downsampling and noise.sample_rate > self.sample_rate:\n noise = noise.resample(self.sample_rate)\n if noise.sample_rate != self.sample_rate:\n raise ValueError(\"Noise sample rate (%d Hz) is not equal to base \"\n \"signal sample rate (%d Hz).\" % (noise.sample_rate,\n self.sample_rate))\n if noise.duration < self.duration:\n raise ValueError(\"Noise signal (%f sec) must be at least as long as\"\n \" base signal (%f sec).\" %\n (noise.duration, self.duration))\n noise_gain_db = min(self.rms_db - noise.rms_db - snr_dB, max_gain_db)\n noise_new = copy.deepcopy(noise)\n noise_new.random_subsegment(self.duration, rng=rng)\n noise_new.gain_db(noise_gain_db)\n self.superimpose(noise_new)\n\n @property\n def samples(self):\n \"\"\"Return audio samples.\n\n :return: Audio samples.\n :rtype: ndarray\n \"\"\"\n return self._samples.copy()\n\n @property\n def sample_rate(self):\n \"\"\"Return audio sample rate.\n\n :return: Audio sample rate.\n :rtype: int\n \"\"\"\n return self._sample_rate\n\n @property\n def num_samples(self):\n \"\"\"Return number of samples.\n\n :return: Number of samples.\n :rtype: int\n \"\"\"\n return self._samples.shape[0]\n\n @property\n def duration(self):\n \"\"\"Return audio duration.\n\n :return: Audio duration in seconds.\n :rtype: float\n \"\"\"\n return self._samples.shape[0] / float(self._sample_rate)\n\n @property\n def rms_db(self):\n \"\"\"Return root mean square energy of the audio in decibels.\n\n :return: Root mean square energy in decibels.\n :rtype: float\n \"\"\"\n # square root => multiply by 10 instead of 20 for dBs\n mean_square = np.mean(self._samples**2)\n return 10 * np.log10(mean_square)\n\n def _convert_samples_to_float32(self, samples):\n \"\"\"Convert sample type to float32.\n\n Audio sample type is usually integer or float-point.\n Integers will be scaled to [-1, 1] in float32.\n \"\"\"\n float32_samples = samples.astype('float32')\n if samples.dtype in np.sctypes['int']:\n bits = np.iinfo(samples.dtype).bits\n float32_samples *= (1. / 2**(bits - 1))\n elif samples.dtype in np.sctypes['float']:\n pass\n else:\n raise TypeError(\"Unsupported sample type: %s.\" % samples.dtype)\n return float32_samples\n\n def _convert_samples_from_float32(self, samples, dtype):\n \"\"\"Convert sample type from float32 to dtype.\n\n Audio sample type is usually integer or float-point. For integer\n type, float32 will be rescaled from [-1, 1] to the maximum range\n supported by the integer type.\n\n This is for writing a audio file.\n \"\"\"\n dtype = np.dtype(dtype)\n output_samples = samples.copy()\n if dtype in np.sctypes['int']:\n bits = np.iinfo(dtype).bits\n output_samples *= (2**(bits - 1) / 1.)\n min_val = np.iinfo(dtype).min\n max_val = np.iinfo(dtype).max\n output_samples[output_samples > max_val] = max_val\n output_samples[output_samples < min_val] = min_val\n elif samples.dtype in np.sctypes['float']:\n min_val = np.finfo(dtype).min\n max_val = np.finfo(dtype).max\n output_samples[output_samples > max_val] = max_val\n output_samples[output_samples < min_val] = min_val\n else:\n raise TypeError(\"Unsupported sample type: %s.\" % samples.dtype)\n return output_samples.astype(dtype)\n"
] |
[
[
"numpy.linspace",
"scipy.signal.fftconvolve",
"numpy.arange",
"numpy.cumsum",
"numpy.dtype",
"numpy.finfo",
"numpy.concatenate",
"numpy.frombuffer",
"numpy.log10",
"numpy.mean",
"numpy.interp",
"numpy.any",
"numpy.iinfo"
]
] |
Kguo-cs/TDOR
|
[
"c538ccb99e6b981c5d09bfc451c2a207449a3f64"
] |
[
"model/transformer.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math, copy, time\n\n\ndef clones(module, N):\n \"Produce N identical layers.\"\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])\n\nclass Encoder(nn.Module):\n \"Core encoder is a stack of N layers\"\n\n def __init__(self, layer, N):\n super(Encoder, self).__init__()\n self.layers = clones(layer, N)\n self.norm = torch.nn.LayerNorm(layer.size)\n\n def forward(self, x, mask):\n \"Pass the input (and mask) through each layer in turn.\"\n for layer in self.layers:\n x = layer(x, mask)\n\n return self.norm(x)\n\nclass SublayerConnection(nn.Module):\n \"\"\"\n A residual connection followed by a layer norm.\n Note for code simplicity the norm is first as opposed to last.\n \"\"\"\n def __init__(self, size, dropout):\n super(SublayerConnection, self).__init__()\n self.norm = torch.nn.LayerNorm(size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, sublayer,e=None):\n \"Apply residual connection to any sublayer with the same size.\"\n\n return x + self.dropout(sublayer(self.norm(x)))\n\n\ndef attention(query, key, value, mask=None, dropout=None):\n \"Compute 'Scaled Dot Product Attention'\"\n d_k = query.size(-1)\n #scores = torch.sum(query[:,:,:,None]+key[:,:,None],dim=-1)/ math.sqrt(d_k)\n\n # torch.matmul(query, key.transpose(-2, -1)) #n,h,6,d_k *n,h,d_k,6 scores:n,h,6,6\n\n scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)#n,h,6,d_k *n,h,d_k,6 scores:n,h,6,6\n if mask is not None:#n,1,1,t\n scores = scores.masked_fill(mask == 0, -1e9)\n #scores = scores.masked_fill(mask == 0, 0)\n # p_attn = scores / (torch.sum(mask, dim=-1, keepdim=True) + 1e-9)\n # else:\n # p_attn = scores / (scores.shape[-1])\n p_attn = F.softmax(scores, dim = -1)\n if dropout is not None:\n p_attn = dropout(p_attn)\n\n return torch.matmul(p_attn, value)#, p_attn#n,h,6,6 n,h,6,k\n\n\nclass MultiHeadedAttention(nn.Module):\n def __init__(self, h, d_model, dropout=0.1):\n \"Take in model size and number of heads.\"\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n # We assume d_v always equals d_k\n self.d_k = d_model // h\n self.h = h\n self.linears = clones(nn.Linear(d_model, d_model), 4)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, query, key, value, mask=None):\n \"Implements Figure 2\"\n if mask is not None:\n # Same mask applied to all h heads.\n mask = mask.unsqueeze(1)\n nbatches = query.size(0)\n\n # 1) Do all the linear projections in batch from d_model => h x d_k\n query, key, value = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)\n for l, x in zip(self.linears, (query, key, value))]#n,h,6,d_k\n\n # 2) Apply attention on all the projected vectors in batch.\n x = attention(query, key, value, mask=mask, dropout=self.dropout)#n,h,6,d_k, self.attn\n\n # 3) \"Concat\" using a view and apply a final linear.\n x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)\n return self.linears[-1](x)\n\nclass PositionwiseFeedForward(nn.Module):\n \"Implements FFN equation.\"\n def __init__(self, d_model, d_ff, dropout=0.1):\n super(PositionwiseFeedForward, self).__init__()\n self.w_1 = nn.Linear(d_model, d_ff)\n self.w_2 = nn.Linear(d_ff, d_model)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n return self.w_2(self.dropout(F.relu(self.w_1(x))))\n\nclass Decoder(nn.Module):\n \"Generic N layer decoder with masking.\"\n\n def __init__(self, layer, N):\n super(Decoder, self).__init__()\n self.layers = clones(layer, N)\n self.norm = torch.nn.LayerNorm(layer.size)\n\n def forward(self, x, memory, src_mask, tgt_mask):\n for layer in self.layers:\n x = layer(x, memory, src_mask, tgt_mask)\n return self.norm(x)\n\n\nclass DecoderLayer(nn.Module):\n \"Decoder is made of self-attn, src-attn, and feed forward (defined below)\"\n\n def __init__(self, size, self_attn, src_attn, feed_forward, dropout):\n super(DecoderLayer, self).__init__()\n self.size = size\n self.self_attn = self_attn\n self.src_attn = src_attn\n self.feed_forward = feed_forward\n self.sublayer = clones(SublayerConnection(size, dropout), 3)\n\n def forward(self, x, memory, src_mask, tgt_mask):\n \"Follow Figure 1 (right) for connections.\"\n m = memory\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))\n x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))\n return self.sublayer[2](x, self.feed_forward)\n\nclass EncoderLayer(nn.Module):\n \"Encoder is made up of self-attn and feed forward (defined below)\"\n def __init__(self, size, self_attn, feed_forward, dropout):\n super(EncoderLayer, self).__init__()\n self.self_attn = self_attn\n self.feed_forward = feed_forward\n self.sublayer = clones(SublayerConnection(size, dropout), 2)\n self.size = size\n\n def forward(self, x, mask,e=None):\n\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n\n return self.sublayer[1](x, self.feed_forward)\n"
] |
[
[
"torch.nn.Dropout",
"torch.nn.functional.softmax",
"torch.nn.LayerNorm",
"torch.matmul",
"torch.nn.Linear"
]
] |
DimitriosDelikonstantis/RoughML
|
[
"31a06db4b295a64af11c6cfa264563f61b52f772"
] |
[
"src/roughml/scripts/image_fft2d.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom skimage.color import rgb2gray\nimport cv2\nimport argparse\n\n\ndef get_image_fft(image_filename, show_original=False, show_grayscale=False, show_fft=False):\n # load image\n original_image = cv2.imread(image_filename)\n if show_original:\n # plot original image and get shape\n cv2.imshow('original image ' + str(original_image.shape), original_image)\n # get original image fft2D\n fft2d_rgb = np.fft.fft2(original_image)\n # plot original image fft2D\n plt.imshow(np.log(abs(fft2d_rgb)), cmap='gray')\n plt.title('FFT2D')\n plt.show()\n if show_grayscale:\n # convert original image to grayscale\n grayscale_image = rgb2gray(original_image)\n # plot grayscale image and get shape\n cv2.imshow('grayscale image ' + str(grayscale_image.shape), grayscale_image)\n # get grayscale image fft2D\n fft2d_gray = np.fft.fft2(grayscale_image)\n # plot grayscale image fft2D\n plt.imshow(np.log(abs(fft2d_gray)), cmap='gray')\n plt.title('FFT2D')\n plt.show()\n\n return None\n\n\nif __name__ == \"__main__\":\n # argument parser\n parser = argparse.ArgumentParser(description = 'Image fft2d plot')\n parser.add_argument('image', help = 'directory of image')\n parser.add_argument('original', help = 'Plot fft for original image', action='store_false')\n parser.add_argument('grayscale', help = 'Plot fft for grayscale image', action='store_true')\n args = parser.parse_args()\n\n fft2d = get_image_fft(args.image, args.original, args.grayscale)"
] |
[
[
"numpy.fft.fft2",
"matplotlib.pyplot.show",
"matplotlib.pyplot.title"
]
] |
uber/pyro
|
[
"e2aad5ed70d20709bfdd7bc3f50eb7f87577081b"
] |
[
"tests/distributions/conftest.py"
] |
[
"# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport math\nfrom math import pi\n\nimport numpy as np\nimport pytest\nimport scipy.stats as sp\n\nimport pyro.distributions as dist\nfrom pyro.distributions.testing.naive_dirichlet import NaiveBeta, NaiveDirichlet\nfrom pyro.distributions.testing.rejection_exponential import RejectionExponential\nfrom pyro.distributions.testing.rejection_gamma import (\n ShapeAugmentedBeta,\n ShapeAugmentedDirichlet,\n ShapeAugmentedGamma,\n)\nfrom tests.distributions.dist_fixture import Fixture\n\n\nclass FoldedNormal(dist.FoldedDistribution):\n arg_constraints = dist.Normal.arg_constraints\n\n def __init__(self, loc, scale):\n super().__init__(dist.Normal(loc, scale))\n\n @property\n def loc(self):\n return self.base_dist.loc\n\n @property\n def scale(self):\n return self.base_dist.scale\n\n\nclass SparsePoisson(dist.Poisson):\n def __init__(self, rate, *, validate_args=None):\n super().__init__(rate, is_sparse=True, validate_args=validate_args)\n\n\nclass SineSkewedUniform(dist.SineSkewed):\n def __init__(self, lower, upper, skewness, *args, **kwargs):\n base_dist = dist.Uniform(lower, upper).to_event(lower.ndim)\n super().__init__(base_dist, skewness, *args, **kwargs)\n\n\nclass SineSkewedVonMises(dist.SineSkewed):\n def __init__(self, von_loc, von_conc, skewness):\n base_dist = dist.VonMises(von_loc, von_conc).to_event(von_loc.ndim)\n super().__init__(base_dist, skewness)\n\n\ncontinuous_dists = [\n Fixture(\n pyro_dist=dist.Uniform,\n scipy_dist=sp.uniform,\n examples=[\n {\"low\": [2.0], \"high\": [2.5], \"test_data\": [2.2]},\n {\n \"low\": [2.0, 4.0],\n \"high\": [3.0, 5.0],\n \"test_data\": [[[2.5, 4.5]], [[2.5, 4.5]], [[2.5, 4.5]]],\n },\n {\n \"low\": [[2.0], [-3.0], [0.0]],\n \"high\": [[2.5], [0.0], [1.0]],\n \"test_data\": [[2.2], [-2], [0.7]],\n },\n ],\n scipy_arg_fn=lambda low, high: (\n (),\n {\"loc\": np.array(low), \"scale\": np.array(high) - np.array(low)},\n ),\n ),\n Fixture(\n pyro_dist=dist.Exponential,\n scipy_dist=sp.expon,\n examples=[\n {\"rate\": [2.4], \"test_data\": [5.5]},\n {\n \"rate\": [2.4, 5.5],\n \"test_data\": [[[5.5, 3.2]], [[5.5, 3.2]], [[5.5, 3.2]]],\n },\n {\n \"rate\": [[2.4, 5.5]],\n \"test_data\": [[[5.5, 3.2]], [[5.5, 3.2]], [[5.5, 3.2]]],\n },\n {\"rate\": [[2.4], [5.5]], \"test_data\": [[5.5], [3.2]]},\n ],\n scipy_arg_fn=lambda rate: ((), {\"scale\": 1.0 / np.array(rate)}),\n ),\n Fixture(\n pyro_dist=RejectionExponential,\n scipy_dist=sp.expon,\n examples=[\n {\"rate\": [2.4], \"factor\": [0.5], \"test_data\": [5.5]},\n {\n \"rate\": [2.4, 5.5],\n \"factor\": [0.5],\n \"test_data\": [[[5.5, 3.2]], [[5.5, 3.2]], [[5.5, 3.2]]],\n },\n {\n \"rate\": [[2.4, 5.5]],\n \"factor\": [0.5],\n \"test_data\": [[[5.5, 3.2]], [[5.5, 3.2]], [[5.5, 3.2]]],\n },\n {\"rate\": [[2.4], [5.5]], \"factor\": [0.5], \"test_data\": [[5.5], [3.2]]},\n ],\n scipy_arg_fn=lambda rate, factor: ((), {\"scale\": 1.0 / np.array(rate)}),\n ),\n Fixture(\n pyro_dist=dist.Gamma,\n scipy_dist=sp.gamma,\n examples=[\n {\"concentration\": [2.4], \"rate\": [3.2], \"test_data\": [5.5]},\n {\n \"concentration\": [[2.4, 2.4], [3.2, 3.2]],\n \"rate\": [[2.4, 2.4], [3.2, 3.2]],\n \"test_data\": [[[5.5, 4.4], [5.5, 4.4]]],\n },\n {\n \"concentration\": [[2.4], [2.4]],\n \"rate\": [[3.2], [3.2]],\n \"test_data\": [[5.5], [4.4]],\n },\n ],\n scipy_arg_fn=lambda concentration, rate: (\n (np.array(concentration),),\n {\"scale\": 1.0 / np.array(rate)},\n ),\n ),\n Fixture(\n pyro_dist=ShapeAugmentedGamma,\n scipy_dist=sp.gamma,\n examples=[\n {\"concentration\": [2.4], \"rate\": [3.2], \"test_data\": [5.5]},\n {\n \"concentration\": [[2.4, 2.4], [3.2, 3.2]],\n \"rate\": [[2.4, 2.4], [3.2, 3.2]],\n \"test_data\": [[[5.5, 4.4], [5.5, 4.4]]],\n },\n {\n \"concentration\": [[2.4], [2.4]],\n \"rate\": [[3.2], [3.2]],\n \"test_data\": [[5.5], [4.4]],\n },\n ],\n scipy_arg_fn=lambda concentration, rate: (\n (np.array(concentration),),\n {\"scale\": 1.0 / np.array(rate)},\n ),\n ),\n Fixture(\n pyro_dist=dist.Beta,\n scipy_dist=sp.beta,\n examples=[\n {\"concentration1\": [2.4], \"concentration0\": [3.6], \"test_data\": [0.4]},\n {\n \"concentration1\": [[2.4, 2.4], [3.6, 3.6]],\n \"concentration0\": [[2.5, 2.5], [2.5, 2.5]],\n \"test_data\": [[[0.5, 0.4], [0.5, 0.4]]],\n },\n {\n \"concentration1\": [[2.4], [3.7]],\n \"concentration0\": [[3.6], [2.5]],\n \"test_data\": [[0.4], [0.6]],\n },\n ],\n scipy_arg_fn=lambda concentration1, concentration0: (\n (np.array(concentration1), np.array(concentration0)),\n {},\n ),\n ),\n Fixture(\n pyro_dist=NaiveBeta,\n scipy_dist=sp.beta,\n examples=[\n {\"concentration1\": [2.4], \"concentration0\": [3.6], \"test_data\": [0.4]},\n {\n \"concentration1\": [[2.4, 2.4], [3.6, 3.6]],\n \"concentration0\": [[2.5, 2.5], [2.5, 2.5]],\n \"test_data\": [[[0.5, 0.4], [0.5, 0.4]]],\n },\n {\n \"concentration1\": [[2.4], [3.7]],\n \"concentration0\": [[3.6], [2.5]],\n \"test_data\": [[0.4], [0.6]],\n },\n ],\n scipy_arg_fn=lambda concentration1, concentration0: (\n (np.array(concentration1), np.array(concentration0)),\n {},\n ),\n ),\n Fixture(\n pyro_dist=ShapeAugmentedBeta,\n scipy_dist=sp.beta,\n examples=[\n {\"concentration1\": [2.4], \"concentration0\": [3.6], \"test_data\": [0.4]},\n {\n \"concentration1\": [[2.4, 2.4], [3.6, 3.6]],\n \"concentration0\": [[2.5, 2.5], [2.5, 2.5]],\n \"test_data\": [[[0.5, 0.4], [0.5, 0.4]]],\n },\n {\n \"concentration1\": [[2.4], [3.7]],\n \"concentration0\": [[3.6], [2.5]],\n \"test_data\": [[0.4], [0.6]],\n },\n ],\n scipy_arg_fn=lambda concentration1, concentration0: (\n (np.array(concentration1), np.array(concentration0)),\n {},\n ),\n ),\n Fixture(\n pyro_dist=dist.LogNormal,\n scipy_dist=sp.lognorm,\n examples=[\n {\"loc\": [1.4], \"scale\": [0.4], \"test_data\": [5.5]},\n {\"loc\": [1.4], \"scale\": [0.4], \"test_data\": [[5.5]]},\n {\n \"loc\": [[1.4, 0.4], [1.4, 0.4]],\n \"scale\": [[2.6, 0.5], [2.6, 0.5]],\n \"test_data\": [[5.5, 6.4], [5.5, 6.4]],\n },\n {\n \"loc\": [[1.4], [0.4]],\n \"scale\": [[2.6], [0.5]],\n \"test_data\": [[5.5], [6.4]],\n },\n ],\n scipy_arg_fn=lambda loc, scale: (\n (np.array(scale),),\n {\"scale\": np.exp(np.array(loc))},\n ),\n ),\n Fixture(\n pyro_dist=dist.AffineBeta,\n scipy_dist=sp.beta,\n examples=[\n {\n \"concentration1\": [2.4],\n \"concentration0\": [3.6],\n \"loc\": [-1.0],\n \"scale\": [2.0],\n \"test_data\": [-0.4],\n },\n {\n \"concentration1\": [[2.4, 2.4], [3.6, 3.6]],\n \"concentration0\": [[2.5, 2.5], [2.5, 2.5]],\n \"loc\": [[-1.0, -1.0], [2.0, 2.0]],\n \"scale\": [[2.0, 2.0], [1.0, 1.0]],\n \"test_data\": [[[-0.4, 0.4], [2.5, 2.6]]],\n },\n {\n \"concentration1\": [[2.4], [3.7]],\n \"concentration0\": [[3.6], [2.5]],\n \"loc\": [[-1.0], [2.0]],\n \"scale\": [[2.0], [2.0]],\n \"test_data\": [[0.0], [3.0]],\n },\n ],\n scipy_arg_fn=lambda concentration1, concentration0, loc, scale: (\n (\n np.array(concentration1),\n np.array(concentration0),\n np.array(loc),\n np.array(scale),\n ),\n {},\n ),\n ),\n Fixture(\n pyro_dist=dist.Normal,\n scipy_dist=sp.norm,\n examples=[\n {\"loc\": [2.0], \"scale\": [4.0], \"test_data\": [2.0]},\n {\"loc\": [[2.0]], \"scale\": [[4.0]], \"test_data\": [[2.0]]},\n {\"loc\": [[[2.0]]], \"scale\": [[[4.0]]], \"test_data\": [[[2.0]]]},\n {\n \"loc\": [2.0, 50.0],\n \"scale\": [4.0, 100.0],\n \"test_data\": [[2.0, 50.0], [2.0, 50.0]],\n },\n ],\n scipy_arg_fn=lambda loc, scale: (\n (),\n {\"loc\": np.array(loc), \"scale\": np.array(scale)},\n ),\n prec=0.07,\n min_samples=50000,\n ),\n Fixture(\n pyro_dist=dist.MultivariateNormal,\n scipy_dist=sp.multivariate_normal,\n examples=[\n {\n \"loc\": [2.0, 1.0],\n \"covariance_matrix\": [[1.0, 0.5], [0.5, 1.0]],\n \"test_data\": [[2.0, 1.0], [9.0, 3.4]],\n },\n ],\n # This hack seems to be the best option right now, as 'scale' is not handled well by get_scipy_batch_logpdf\n scipy_arg_fn=lambda loc, covariance_matrix=None: (\n (),\n {\"mean\": np.array(loc), \"cov\": np.array([[1.0, 0.5], [0.5, 1.0]])},\n ),\n prec=0.01,\n min_samples=500000,\n ),\n Fixture(\n pyro_dist=dist.LowRankMultivariateNormal,\n scipy_dist=sp.multivariate_normal,\n examples=[\n {\n \"loc\": [2.0, 1.0],\n \"cov_diag\": [0.5, 0.5],\n \"cov_factor\": [[1.0], [0.5]],\n \"test_data\": [[2.0, 1.0], [9.0, 3.4]],\n },\n ],\n scipy_arg_fn=lambda loc, cov_diag=None, cov_factor=None: (\n (),\n {\"mean\": np.array(loc), \"cov\": np.array([[1.5, 0.5], [0.5, 0.75]])},\n ),\n prec=0.01,\n min_samples=500000,\n ),\n Fixture(\n pyro_dist=FoldedNormal,\n examples=[\n {\"loc\": [2.0], \"scale\": [4.0], \"test_data\": [2.0]},\n {\"loc\": [[2.0]], \"scale\": [[4.0]], \"test_data\": [[2.0]]},\n {\"loc\": [[[2.0]]], \"scale\": [[[4.0]]], \"test_data\": [[[2.0]]]},\n {\n \"loc\": [2.0, 50.0],\n \"scale\": [4.0, 100.0],\n \"test_data\": [[2.0, 50.0], [2.0, 50.0]],\n },\n ],\n ),\n Fixture(\n pyro_dist=dist.Dirichlet,\n scipy_dist=sp.dirichlet,\n examples=[\n {\"concentration\": [2.4, 3, 6], \"test_data\": [0.2, 0.45, 0.35]},\n {\n \"concentration\": [2.4, 3, 6],\n \"test_data\": [[0.2, 0.45, 0.35], [0.2, 0.45, 0.35]],\n },\n {\n \"concentration\": [[2.4, 3, 6], [3.2, 1.2, 0.4]],\n \"test_data\": [[0.2, 0.45, 0.35], [0.3, 0.4, 0.3]],\n },\n ],\n scipy_arg_fn=lambda concentration: ((concentration,), {}),\n ),\n Fixture(\n pyro_dist=NaiveDirichlet,\n scipy_dist=sp.dirichlet,\n examples=[\n {\"concentration\": [2.4, 3, 6], \"test_data\": [0.2, 0.45, 0.35]},\n {\n \"concentration\": [2.4, 3, 6],\n \"test_data\": [[0.2, 0.45, 0.35], [0.2, 0.45, 0.35]],\n },\n {\n \"concentration\": [[2.4, 3, 6], [3.2, 1.2, 0.4]],\n \"test_data\": [[0.2, 0.45, 0.35], [0.3, 0.4, 0.3]],\n },\n ],\n scipy_arg_fn=lambda concentration: ((concentration,), {}),\n ),\n Fixture(\n pyro_dist=ShapeAugmentedDirichlet,\n scipy_dist=sp.dirichlet,\n examples=[\n {\"concentration\": [2.4, 3, 6], \"test_data\": [0.2, 0.45, 0.35]},\n {\n \"concentration\": [2.4, 3, 6],\n \"test_data\": [[0.2, 0.45, 0.35], [0.2, 0.45, 0.35]],\n },\n {\n \"concentration\": [[2.4, 3, 6], [3.2, 1.2, 0.4]],\n \"test_data\": [[0.2, 0.45, 0.35], [0.3, 0.4, 0.3]],\n },\n ],\n scipy_arg_fn=lambda concentration: ((concentration,), {}),\n ),\n Fixture(\n pyro_dist=dist.Cauchy,\n scipy_dist=sp.cauchy,\n examples=[\n {\"loc\": [0.5], \"scale\": [1.2], \"test_data\": [1.0]},\n {\n \"loc\": [0.5, 0.5],\n \"scale\": [1.2, 1.2],\n \"test_data\": [[1.0, 1.0], [1.0, 1.0]],\n },\n {\n \"loc\": [[0.5], [0.3]],\n \"scale\": [[1.2], [1.0]],\n \"test_data\": [[0.4], [0.35]],\n },\n ],\n scipy_arg_fn=lambda loc, scale: (\n (),\n {\"loc\": np.array(loc), \"scale\": np.array(scale)},\n ),\n ),\n Fixture(\n pyro_dist=dist.HalfCauchy,\n scipy_dist=sp.halfcauchy,\n examples=[\n {\"scale\": [1.2], \"test_data\": [1.0]},\n {\"scale\": [1.2, 1.2], \"test_data\": [[1.0, 2.0], [1.0, 2.0]]},\n {\"scale\": [[1.2], [1.0]], \"test_data\": [[0.54], [0.35]]},\n ],\n scipy_arg_fn=lambda scale: ((), {\"scale\": np.array(scale)}),\n ),\n Fixture(\n pyro_dist=dist.VonMises,\n scipy_dist=sp.vonmises,\n examples=[\n {\"loc\": [0.5], \"concentration\": [1.2], \"test_data\": [1.0]},\n {\n \"loc\": [0.5, 3.0],\n \"concentration\": [2.0, 0.5],\n \"test_data\": [[1.0, 2.0], [1.0, 2.0]],\n },\n {\n \"loc\": [[0.5], [0.3]],\n \"concentration\": [[2.0], [0.5]],\n \"test_data\": [[1.0], [2.0]],\n },\n ],\n scipy_arg_fn=lambda loc, concentration: (\n (),\n {\"loc\": np.array(loc), \"kappa\": np.array(concentration)},\n ),\n ),\n Fixture(\n pyro_dist=dist.LKJ,\n examples=[\n {\n \"dim\": 3,\n \"concentration\": 1.0,\n \"test_data\": [\n [\n [1.0000, -0.8221, 0.7655],\n [-0.8221, 1.0000, -0.5293],\n [0.7655, -0.5293, 1.0000],\n ],\n [\n [1.0000, -0.5345, -0.5459],\n [-0.5345, 1.0000, -0.0333],\n [-0.5459, -0.0333, 1.0000],\n ],\n [\n [1.0000, -0.3758, -0.2409],\n [-0.3758, 1.0000, 0.4653],\n [-0.2409, 0.4653, 1.0000],\n ],\n [\n [1.0000, -0.8800, -0.9493],\n [-0.8800, 1.0000, 0.9088],\n [-0.9493, 0.9088, 1.0000],\n ],\n [\n [1.0000, 0.2284, -0.1283],\n [0.2284, 1.0000, 0.0146],\n [-0.1283, 0.0146, 1.0000],\n ],\n ],\n },\n ],\n ),\n Fixture(\n pyro_dist=dist.LKJCholesky,\n examples=[\n {\n \"dim\": 3,\n \"concentration\": 1.0,\n \"test_data\": [\n [\n [1.0, 0.0, 0.0],\n [-0.17332135, 0.98486533, 0.0],\n [0.43106407, -0.54767312, 0.71710384],\n ],\n [\n [1.0, 0.0, 0.0],\n [-0.31391555, 0.94945091, 0.0],\n [-0.31391296, -0.29767500, 0.90158097],\n ],\n ],\n },\n ],\n ),\n Fixture(\n pyro_dist=dist.Stable,\n examples=[\n {\"stability\": [1.5], \"skew\": 0.1, \"test_data\": [-10.0]},\n {\n \"stability\": [1.5],\n \"skew\": 0.1,\n \"scale\": 2.0,\n \"loc\": -2.0,\n \"test_data\": [10.0],\n },\n ],\n ),\n Fixture(\n pyro_dist=dist.MultivariateStudentT,\n examples=[\n {\n \"df\": 1.5,\n \"loc\": [0.2, 0.3],\n \"scale_tril\": [[0.8, 0.0], [1.3, 0.4]],\n \"test_data\": [-3.0, 2],\n },\n ],\n ),\n Fixture(\n pyro_dist=dist.ProjectedNormal,\n examples=[\n {\"concentration\": [0.0, 0.0], \"test_data\": [1.0, 0.0]},\n {\"concentration\": [0.2, 0.1], \"test_data\": [1.0, 0.0]},\n {\"concentration\": [2.0, 3.0], \"test_data\": [0.0, 1.0]},\n {\"concentration\": [0.1, 0.0, 0.0], \"test_data\": [1.0, 0.0, 0.0]},\n {\"concentration\": [0.3, 0.2, 0.1], \"test_data\": [1.0, 0.0, 0.0]},\n {\"concentration\": [-1.0, 2.0, 3.0], \"test_data\": [0.0, 0.0, 1.0]},\n {\"concentration\": [0.0, 0.0, 0.0, 0.0], \"test_data\": [1.0, 0.0, 0.0, 0.0]},\n {\"concentration\": [0.4, 0.3, 0.2, 0.1], \"test_data\": [1.0, 0.0, 0.0, 0.0]},\n {\n \"concentration\": [-1.0, 2.0, 0.5, -0.5],\n \"test_data\": [0.0, 1.0, 0.0, 0.0],\n },\n ],\n ),\n Fixture(\n pyro_dist=dist.SineBivariateVonMises,\n examples=[\n {\n \"phi_loc\": [0.0],\n \"psi_loc\": [0.0],\n \"phi_concentration\": [5.0],\n \"psi_concentration\": [6.0],\n \"correlation\": [2.0],\n \"test_data\": [[0.0, 0.0]],\n },\n {\n \"phi_loc\": [3.003],\n \"psi_loc\": [-1.343],\n \"phi_concentration\": [5.0],\n \"psi_concentration\": [6.0],\n \"correlation\": [2.0],\n \"test_data\": [[0.0, 1.0]],\n },\n {\n \"phi_loc\": [-math.pi / 3],\n \"psi_loc\": -1.0,\n \"phi_concentration\": 0.5,\n \"psi_concentration\": 10.0,\n \"correlation\": 0.9,\n \"test_data\": [[1.0, 0.555]],\n },\n {\n \"phi_loc\": [math.pi - 0.2, 1.0],\n \"psi_loc\": [0.0, 1.0],\n \"phi_concentration\": [5.0, 5.0],\n \"psi_concentration\": [7.0, 0.5],\n \"weighted_correlation\": [0.5, 0.1],\n \"test_data\": [[[1.0, -3.0], [1.0, 59.0]]],\n },\n ],\n ),\n Fixture(\n pyro_dist=dist.SoftLaplace,\n examples=[\n {\"loc\": [2.0], \"scale\": [4.0], \"test_data\": [2.0]},\n {\"loc\": [[2.0]], \"scale\": [[4.0]], \"test_data\": [[2.0]]},\n {\"loc\": [[[2.0]]], \"scale\": [[[4.0]]], \"test_data\": [[[2.0]]]},\n {\n \"loc\": [2.0, 50.0],\n \"scale\": [4.0, 100.0],\n \"test_data\": [[2.0, 50.0], [2.0, 50.0]],\n },\n ],\n ),\n Fixture(\n pyro_dist=SineSkewedUniform,\n examples=[\n {\n \"lower\": [-pi, -pi],\n \"upper\": [pi, pi],\n \"skewness\": [-pi / 4, 0.1],\n \"test_data\": [pi / 2, -2 * pi / 3],\n }\n ],\n ),\n Fixture(\n pyro_dist=SineSkewedVonMises,\n examples=[\n {\n \"von_loc\": [0.0],\n \"von_conc\": [1.0],\n \"skewness\": [0.342355],\n \"test_data\": [0.1],\n },\n {\n \"von_loc\": [0.0, -1.234],\n \"von_conc\": [1.0, 10.0],\n \"skewness\": [[0.342355, -0.0001], [0.91, 0.09]],\n \"test_data\": [[0.1, -3.2], [-2.0, 0.0]],\n },\n ],\n ),\n Fixture(\n pyro_dist=dist.AsymmetricLaplace,\n examples=[\n {\"loc\": [1.0], \"scale\": [1.0], \"asymmetry\": [2.0], \"test_data\": [2.0]},\n {\n \"loc\": [2.0, -50.0],\n \"scale\": [2.0, 10.0],\n \"asymmetry\": [0.5, 2.5],\n \"test_data\": [[2.0, 10.0], [-1.0, -50.0]],\n },\n ],\n ),\n Fixture(\n pyro_dist=dist.SoftAsymmetricLaplace,\n examples=[\n {\"loc\": [1.0], \"scale\": [1.0], \"asymmetry\": [2.0], \"test_data\": [2.0]},\n {\n \"loc\": [2.0, -50.0],\n \"scale\": [2.0, 10.0],\n \"asymmetry\": [0.5, 2.5],\n \"softness\": [0.7, 1.4],\n \"test_data\": [[2.0, 10.0], [-1.0, -50.0]],\n },\n ],\n ),\n Fixture(\n pyro_dist=dist.SkewLogistic,\n examples=[\n {\"loc\": [1.0], \"scale\": [1.0], \"asymmetry\": [2.0], \"test_data\": [2.0]},\n {\n \"loc\": [2.0, -50.0],\n \"scale\": [2.0, 10.0],\n \"asymmetry\": [0.5, 2.5],\n \"test_data\": [[2.0, 10.0], [-1.0, -50.0]],\n },\n ],\n ),\n Fixture(\n pyro_dist=dist.Logistic,\n examples=[\n {\"loc\": [1.0], \"scale\": [1.0], \"test_data\": [2.0]},\n {\n \"loc\": [2.0, -50.0],\n \"scale\": [2.0, 10.0],\n \"test_data\": [[2.0, 10.0], [-1.0, -50.0]],\n },\n ],\n ),\n]\n\ndiscrete_dists = [\n Fixture(\n pyro_dist=dist.OrderedLogistic,\n examples=[\n {\"cutpoints\": [0.0, 1.0, 2.0], \"predictor\": [1.0], \"test_data\": [1]},\n {\n \"cutpoints\": [0.0, 1.0, 2.0],\n \"predictor\": [-0.5, 0.5, 1.5, 2.5],\n \"test_data\": [0, 1, 2, 3],\n },\n {\n \"cutpoints\": [0.0, 1.0],\n \"predictor\": [[-0.5, 0.5, 1.5], [-0.5, 0.5, 1.5]],\n \"test_data\": [[0, 1, 2], [0, 1, 2]],\n },\n ],\n prec=0.05,\n min_samples=10000,\n is_discrete=True,\n ),\n Fixture(\n pyro_dist=dist.Multinomial,\n scipy_dist=sp.multinomial,\n examples=[\n {\"probs\": [0.1, 0.6, 0.3], \"test_data\": [0.0, 1.0, 0.0]},\n {\"probs\": [0.1, 0.6, 0.3], \"total_count\": 8, \"test_data\": [2.0, 4.0, 2.0]},\n {\n \"probs\": [0.1, 0.6, 0.3],\n \"total_count\": 8,\n \"test_data\": [[2.0, 4.0, 2.0], [2.0, 4.0, 2.0]],\n },\n {\n \"probs\": [[0.1, 0.6, 0.3], [0.2, 0.4, 0.4]],\n \"total_count\": 8,\n \"test_data\": [[2.0, 4.0, 2.0], [1.0, 4.0, 3.0]],\n },\n ],\n scipy_arg_fn=lambda probs, total_count=[1]: (\n (total_count[0], np.array(probs)),\n {},\n ),\n prec=0.05,\n min_samples=10000,\n is_discrete=True,\n ),\n Fixture(\n pyro_dist=dist.Bernoulli,\n scipy_dist=sp.bernoulli,\n examples=[\n {\"probs\": [0.25], \"test_data\": [1.0]},\n {\n \"probs\": [0.25, 0.25],\n \"test_data\": [[[0.0, 1.0]], [[1.0, 0.0]], [[0.0, 0.0]]],\n },\n {\n \"logits\": [math.log(p / (1 - p)) for p in (0.25, 0.25)],\n \"test_data\": [[[0.0, 1.0]], [[1.0, 0.0]], [[0.0, 0.0]]],\n },\n # for now, avoid tests on infinite logits\n # {'logits': [-float('inf'), 0],\n # 'test_data': [[0, 1], [0, 1], [0, 1]]},\n {\n \"logits\": [\n [math.log(p / (1 - p)) for p in (0.25, 0.25)],\n [math.log(p / (1 - p)) for p in (0.3, 0.3)],\n ],\n \"test_data\": [[1.0, 1.0], [0.0, 0.0]],\n },\n {\n \"probs\": [[0.25, 0.25], [0.3, 0.3]],\n \"test_data\": [[1.0, 1.0], [0.0, 0.0]],\n },\n ],\n # for now, avoid tests on infinite logits\n # test_data_indices=[0, 1, 2, 3],\n batch_data_indices=[-1, -2],\n scipy_arg_fn=lambda **kwargs: ((), {\"p\": kwargs[\"probs\"]}),\n prec=0.01,\n min_samples=10000,\n is_discrete=True,\n expected_support_non_vec=[[0.0], [1.0]],\n expected_support=[[[0.0, 0.0], [0.0, 0.0]], [[1.0, 1.0], [1.0, 1.0]]],\n ),\n Fixture(\n pyro_dist=dist.BetaBinomial,\n examples=[\n {\n \"concentration1\": [2.0],\n \"concentration0\": [5.0],\n \"total_count\": 8,\n \"test_data\": [4.0],\n },\n {\n \"concentration1\": [2.0],\n \"concentration0\": [5.0],\n \"total_count\": 8,\n \"test_data\": [[2.0], [4.0]],\n },\n {\n \"concentration1\": [[2.0], [2.0]],\n \"concentration0\": [[5.0], [5.0]],\n \"total_count\": 8,\n \"test_data\": [[4.0], [3.0]],\n },\n {\n \"concentration1\": [2.0, 2.0],\n \"concentration0\": [5.0, 5.0],\n \"total_count\": [0.0, 0.0],\n \"test_data\": [[0.0, 0.0], [0.0, 0.0]],\n },\n {\n \"concentration1\": [2.0, 2.0],\n \"concentration0\": [5.0, 5.0],\n \"total_count\": [[8.0, 7.0], [5.0, 9.0]],\n \"test_data\": [[6.0, 3.0], [2.0, 8.0]],\n },\n ],\n batch_data_indices=[-1, -2],\n prec=0.01,\n min_samples=10000,\n is_discrete=True,\n ),\n Fixture(\n pyro_dist=dist.Binomial,\n scipy_dist=sp.binom,\n examples=[\n {\"probs\": [0.6], \"total_count\": 8, \"test_data\": [4.0]},\n {\"probs\": [0.3], \"total_count\": 8, \"test_data\": [[2.0], [4.0]]},\n {\"probs\": [[0.2], [0.4]], \"total_count\": 8, \"test_data\": [[4.0], [3.0]]},\n {\n \"probs\": [0.2, 0.4],\n \"total_count\": [0.0, 0.0],\n \"test_data\": [[0.0, 0.0], [0.0, 0.0]],\n },\n {\n \"probs\": [0.2, 0.4],\n \"total_count\": [[8.0, 7.0], [5.0, 9.0]],\n \"test_data\": [[6.0, 3.0], [2.0, 8.0]],\n },\n ],\n scipy_arg_fn=lambda probs, total_count: ((total_count, probs), {}),\n prec=0.05,\n min_samples=10000,\n is_discrete=True,\n ),\n Fixture(\n pyro_dist=dist.ExtendedBetaBinomial,\n examples=[\n {\n \"concentration1\": [2.0],\n \"concentration0\": [5.0],\n \"total_count\": 8,\n \"test_data\": [4.0],\n },\n {\n \"concentration1\": [2.0],\n \"concentration0\": [5.0],\n \"total_count\": 8,\n \"test_data\": [[2.0], [4.0]],\n },\n {\n \"concentration1\": [[2.0], [2.0]],\n \"concentration0\": [[5.0], [5.0]],\n \"total_count\": 8,\n \"test_data\": [[4.0], [3.0]],\n },\n {\n \"concentration1\": [2.0, 2.0],\n \"concentration0\": [5.0, 5.0],\n \"total_count\": [0.0, 0.0],\n \"test_data\": [[0.0, 0.0], [0.0, 0.0]],\n },\n {\n \"concentration1\": [2.0, 2.0],\n \"concentration0\": [5.0, 5.0],\n \"total_count\": [[8.0, 7.0], [5.0, 9.0]],\n \"test_data\": [[6.0, 3.0], [2.0, 8.0]],\n },\n ],\n batch_data_indices=[-1, -2],\n prec=0.01,\n min_samples=10000,\n is_discrete=True,\n ),\n Fixture(\n pyro_dist=dist.ExtendedBinomial,\n scipy_dist=sp.binom,\n examples=[\n {\"probs\": [0.6], \"total_count\": 8, \"test_data\": [4.0]},\n {\"probs\": [0.3], \"total_count\": 8, \"test_data\": [[2.0], [4.0]]},\n {\"probs\": [[0.2], [0.4]], \"total_count\": 8, \"test_data\": [[4.0], [3.0]]},\n {\n \"probs\": [0.2, 0.4],\n \"total_count\": [0.0, 0.0],\n \"test_data\": [[0.0, 0.0], [0.0, 0.0]],\n },\n {\n \"probs\": [0.2, 0.4],\n \"total_count\": [[8.0, 7.0], [5.0, 9.0]],\n \"test_data\": [[6.0, 3.0], [2.0, 8.0]],\n },\n ],\n scipy_arg_fn=lambda probs, total_count: ((total_count, probs), {}),\n prec=0.05,\n min_samples=10000,\n is_discrete=True,\n ),\n Fixture(\n pyro_dist=dist.Categorical,\n scipy_dist=sp.multinomial,\n examples=[\n {\"probs\": [0.1, 0.6, 0.3], \"test_data\": [2]},\n {\"logits\": list(map(math.log, [0.1, 0.6, 0.3])), \"test_data\": [2]},\n {\n \"logits\": [\n list(map(math.log, [0.1, 0.6, 0.3])),\n list(map(math.log, [0.2, 0.4, 0.4])),\n ],\n \"test_data\": [2, 0],\n },\n {\"probs\": [[0.1, 0.6, 0.3], [0.2, 0.4, 0.4]], \"test_data\": [2, 0]},\n ],\n test_data_indices=[0, 1, 2],\n batch_data_indices=[-1, -2],\n scipy_arg_fn=None,\n prec=0.05,\n min_samples=10000,\n is_discrete=True,\n ),\n Fixture(\n pyro_dist=dist.DirichletMultinomial,\n examples=[\n {\"concentration\": [0.1, 0.6, 0.3], \"test_data\": [0.0, 1.0, 0.0]},\n {\n \"concentration\": [0.5, 1.0, 2.0],\n \"total_count\": 8,\n \"test_data\": [0.0, 2.0, 6.0],\n },\n {\n \"concentration\": [[0.5, 1.0, 2.0], [3.0, 3.0, 0.1]],\n \"total_count\": 8,\n \"test_data\": [[0.0, 2.0, 6.0], [5.0, 2.0, 1.0]],\n },\n ],\n prec=0.08,\n is_discrete=True,\n ),\n Fixture(\n pyro_dist=dist.GammaPoisson,\n examples=[\n {\"concentration\": [1.0], \"rate\": [2.0], \"test_data\": [0.0]},\n {\"concentration\": [1.0], \"rate\": [2.0], \"test_data\": [1.0]},\n {\"concentration\": [1.0], \"rate\": [2.0], \"test_data\": [4.0]},\n {\n \"concentration\": [1.0, 1.0, 1.0],\n \"rate\": [2.0, 2.0, 3.0],\n \"test_data\": [[0.0, 1.0, 4.0], [0.0, 1.0, 4.0]],\n },\n {\n \"concentration\": [[1.0], [1.0], [1.0]],\n \"rate\": [[2.0], [2.0], [3.0]],\n \"test_data\": [[0.0], [1.0], [4.0]],\n },\n ],\n prec=0.08,\n is_discrete=True,\n ),\n Fixture(\n pyro_dist=dist.OneHotCategorical,\n scipy_dist=sp.multinomial,\n examples=[\n {\"probs\": [0.1, 0.6, 0.3], \"test_data\": [0.0, 0.0, 1.0]},\n {\n \"logits\": list(map(math.log, [0.1, 0.6, 0.3])),\n \"test_data\": [0.0, 0.0, 1.0],\n },\n {\n \"logits\": [\n list(map(math.log, [0.1, 0.6, 0.3])),\n list(map(math.log, [0.2, 0.4, 0.4])),\n ],\n \"test_data\": [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]],\n },\n {\n \"probs\": [[0.1, 0.6, 0.3], [0.2, 0.4, 0.4]],\n \"test_data\": [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]],\n },\n ],\n test_data_indices=[0, 1, 2],\n batch_data_indices=[-1, -2],\n scipy_arg_fn=lambda probs: ((1, np.array(probs)), {}),\n prec=0.05,\n min_samples=10000,\n is_discrete=True,\n ),\n Fixture(\n pyro_dist=dist.Poisson,\n scipy_dist=sp.poisson,\n examples=[\n {\"rate\": [2.0], \"test_data\": [0.0]},\n {\"rate\": [3.0], \"test_data\": [1.0]},\n {\"rate\": [6.0], \"test_data\": [4.0]},\n {\"rate\": [2.0, 3.0, 6.0], \"test_data\": [[0.0, 1.0, 4.0], [0.0, 1.0, 4.0]]},\n {\"rate\": [[2.0], [3.0], [6.0]], \"test_data\": [[0.0], [1.0], [4.0]]},\n ],\n scipy_arg_fn=lambda rate: ((np.array(rate),), {}),\n prec=0.08,\n is_discrete=True,\n ),\n Fixture(\n pyro_dist=SparsePoisson,\n scipy_dist=sp.poisson,\n examples=[\n {\"rate\": [2.0], \"test_data\": [0.0]},\n {\"rate\": [3.0], \"test_data\": [1.0]},\n {\"rate\": [6.0], \"test_data\": [4.0]},\n {\"rate\": [2.0, 3.0, 6.0], \"test_data\": [[0.0, 1.0, 4.0], [0.0, 1.0, 4.0]]},\n {\"rate\": [[2.0], [3.0], [6.0]], \"test_data\": [[0.0], [1.0], [4.0]]},\n ],\n scipy_arg_fn=lambda rate: ((np.array(rate),), {}),\n prec=0.08,\n is_discrete=True,\n ),\n Fixture(\n pyro_dist=dist.Geometric,\n scipy_dist=sp.geom,\n examples=[\n {\"logits\": [2.0], \"test_data\": [0.0]},\n {\"logits\": [3.0], \"test_data\": [1.0]},\n {\"logits\": [-6.0], \"test_data\": [4.0]},\n {\n \"logits\": [2.0, 3.0, -6.0],\n \"test_data\": [[0.0, 1.0, 4.0], [0.0, 1.0, 4.0]],\n },\n {\"logits\": [[2.0], [3.0], [-6.0]], \"test_data\": [[0.0], [1.0], [4.0]]},\n ],\n scipy_arg_fn=lambda probs: ((np.array(probs), -1), {}),\n prec=0.08,\n is_discrete=True,\n ),\n Fixture(\n pyro_dist=dist.LogNormalNegativeBinomial,\n examples=[\n {\n \"logits\": [0.6],\n \"total_count\": 8,\n \"multiplicative_noise_scale\": [0.1],\n \"test_data\": [4.0],\n },\n {\n \"logits\": [0.2, 0.4],\n \"multiplicative_noise_scale\": [0.1, 0.2],\n \"total_count\": [[8.0, 7.0], [5.0, 9.0]],\n \"test_data\": [[6.0, 3.0], [2.0, 8.0]],\n },\n ],\n is_discrete=True,\n ),\n]\n\n\[email protected](\n name=\"dist\",\n params=continuous_dists + discrete_dists,\n ids=lambda x: x.get_test_distribution_name(),\n)\ndef all_distributions(request):\n return request.param\n\n\[email protected](\n name=\"continuous_dist\",\n params=continuous_dists,\n ids=lambda x: x.get_test_distribution_name(),\n)\ndef continuous_distributions(request):\n return request.param\n\n\[email protected](\n name=\"discrete_dist\",\n params=discrete_dists,\n ids=lambda x: x.get_test_distribution_name(),\n)\ndef discrete_distributions(request):\n return request.param\n\n\ndef pytest_collection_modifyitems(items):\n for item in items:\n if item.nodeid.startswith(\"tests/distributions\"):\n if \"stage\" not in item.keywords:\n item.add_marker(pytest.mark.stage(\"unit\"))\n if \"init\" not in item.keywords:\n item.add_marker(pytest.mark.init(rng_seed=123))\n"
] |
[
[
"numpy.array"
]
] |
dspftw/dspftw
|
[
"14de953f95e3a0710f10ee76d012c52ab12d6467"
] |
[
"dspftw/root_raised_cosine_filter_generator.py"
] |
[
"# vim: expandtab tabstop=4 shiftwidth=4\n\nfrom numpy import pi, sin, ceil, append, arange, zeros\nfrom numpy import array as nparray\n\nfrom .root_raised_cosine import root_raised_cosine\n\ndef root_raised_cosine_filter_generator(symbol_width: int, symbol_rate: float, sample_rate: float, beta: float=0.25) -> nparray:\n '''\n Compute the taps of a root raised cosine (RRC) filter.\n\n Parameters\n ----------\n symbol_width: int\n Number of bauds wide for the output filter. An odd integer will ensure the initial and final taps are at baud boundaries.\n symbol_rate: float\n Symbol rate in Hz.\n sample_rate: float\n Sample rate in Hz.\n beta: float\n Roll off factor. In the interval (0,1]\n Default is 0.25.\n\n Returns a numpy float array.\n '''\n\n # Set the symbol period\n Ts = 1.0/symbol_rate\n\n # Create the input time values\n time_array = arange(-ceil((symbol_width/2.0)*Ts*sample_rate), ceil((symbol_width/2.0)*Ts*sample_rate+1), dtype=float)/sample_rate\n\n # Create filter values\n if beta == 0:\n filter_val = zeros(time_array.shape)\n tlen = len(time_array)\n\n # Define filter everywhere except center tap\n idx = append(arange((tlen-1)//2, dtype=int), arange((tlen-1)//2+1, tlen, dtype=int))\n filter_val[idx] = sin(pi*time_array[idx]/Ts) / (pi*time_array[idx]/Ts)\n\n # Define filter at center tap\n filter_val[(tlen-1)//2] = 1\n\n else:\n filter_val = root_raised_cosine(time_array,Ts,beta)\n\n return filter_val\n\ndef rrcfiltgen(*args, **kwargs) -> nparray:\n '''\n Alias for root_raised_cosine_filter_generator.\n '''\n return root_raised_cosine_filter_generator(*args, **kwargs)\n"
] |
[
[
"numpy.ceil",
"numpy.arange",
"numpy.zeros",
"numpy.sin"
]
] |
gfngoncalves/foam_graph
|
[
"b1b735aef3eeaa6e9b8430c5520abff871ce3ffb"
] |
[
"test/test_plot.py"
] |
[
"import torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch_geometric.data import Data\nfrom foam_graph.visualization.graph_plotting import plot_graph, plot_graph_contour\nfrom foam_graph.visualization.graph_3d_plotting import plot_3d_graph\n\n\ndef test_plot_2d():\n edge_index = torch.tensor([[0, 1], [1, 0]], dtype=torch.long)\n x = torch.tensor([[-1], [0]], dtype=torch.float)\n pos = torch.tensor([[-1, 0], [0, 0]], dtype=torch.float)\n edge_attr = torch.tensor([[0]], dtype=torch.float)\n\n data = Data(x=x, pos=pos, edge_index=edge_index, edge_attr=edge_attr)\n\n fig, ax = plt.subplots()\n plot_graph(data, \"x\", 0, ax=ax)\n\n lines = ax.collections[1].get_segments()\n np.testing.assert_equal(lines, [[[-1.0, 0.0], [0.0, 0.0]]])\n\n points = ax.collections[0]\n np.testing.assert_equal(points.get_offsets().data, [[-1.0, 0.0], [0.0, 0.0]])\n\n\ndef test_plot_contour_2d():\n edge_index = torch.tensor([], dtype=torch.long)\n x = torch.tensor([[1], [1], [1], [1], [1]], dtype=torch.float)\n pos = torch.tensor([[-1, 0], [0, 0], [-1, 1], [1, 1], [0, 1]], dtype=torch.float)\n internal = torch.tensor([1, 0, 0, 0, 0], dtype=torch.float)\n\n data = Data(x=x, pos=pos, edge_index=edge_index)\n\n fig, ax = plt.subplots()\n plot_graph_contour(data, \"x\", 0, ax=ax, internal_nodes_mask=internal)\n paths = ax.collections[1].get_paths()\n np.testing.assert_equal(paths[0].vertices, [[0.0, 0.0], [-1.0, 1.0], [-1.0, 0.0]])\n\n\ndef test_plot_3d():\n edge_index = torch.tensor([[0, 1], [1, 0]], dtype=torch.long)\n x = torch.tensor([[-1], [0]], dtype=torch.float)\n pos = torch.tensor([[-1, 0, 0], [0, 0, 1]], dtype=torch.float)\n edge_attr = torch.tensor([[0]], dtype=torch.float)\n\n data = Data(x=x, pos=pos, edge_index=edge_index, edge_attr=edge_attr)\n\n fig = plot_3d_graph(data, \"x\", 0)\n\n lines = fig.data[0]\n\n assert lines.x == (-1.0, 0.0, None, 0.0, -1.0, None)\n assert lines.y == (0.0, 0.0, None, 0.0, 0.0, None)\n assert lines.z == (0.0, 1.0, None, 1.0, 0.0, None)\n\n points = fig.data[1]\n np.testing.assert_equal(points.x, [-1.0, 0.0])\n np.testing.assert_equal(points.y, [0.0, 0.0])\n np.testing.assert_equal(points.z, [0.0, 1.0])\n\n"
] |
[
[
"numpy.testing.assert_equal",
"matplotlib.pyplot.subplots",
"torch.tensor"
]
] |
JiajunSong629/download-project
|
[
"83e31b3db18b83add4c7293a3375adb61b0c59c3"
] |
[
"src/data/load_weight_sensor.py"
] |
[
"\"\"\"\nload_weight_sensor.py\nget the raw seat and foot weight data, and get\nthe clean total weight\n\"\"\"\n\nimport warnings\nimport sqlite3\nimport os\nimport pandas as pd\nimport numpy as np\nfrom typing import Optional, List\nfrom src.data import load_water_distance\nimport config\n\n\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\n\ndef get_seat_weight_raw(\n user_id: int,\n database_path: Optional[str] = config.DATABASE_PATH\n) -> pd.Series:\n \"\"\"\n Get the raw seat weight data from the database.\n\n :param user_id: an integer of user id\n :param database_path: a string of location of database\n :return: 1D pd Series mapping from timestamps to mmeasurements\n \"\"\"\n sql_s = f\"SELECT timestamp_ms, value FROM data WHERE data_capture_id={user_id} AND sensor_id=2\"\n conn = sqlite3.connect(database_path)\n cursor = conn.execute(sql_s)\n time_measurements = []\n weight_measurements = []\n for row in cursor:\n time_measurements.append(row[0])\n weight_measurements.append(row[1])\n data_t = pd.Series(weight_measurements, index=time_measurements)\n\n return data_t\n\n\ndef get_foot_weight_raw(\n user_id: int,\n database_path: Optional[str] = config.DATABASE_PATH\n) -> pd.Series:\n \"\"\"\n Get the raw foot weight data from the database.\n\n :param user_id: an integer of user id\n :param database_path: a string of location of database\n :return: 1D pd Series mapping from timestamps to mmeasurements\n \"\"\"\n sql_s = f\"SELECT timestamp_ms, value FROM data WHERE data_capture_id={user_id} AND sensor_id=3\"\n conn = sqlite3.connect(database_path)\n cursor = conn.execute(sql_s)\n time_measurements = []\n weight_measurements = []\n for row in cursor:\n time_measurements.append(row[0])\n weight_measurements.append(row[1])\n data_t = pd.Series(weight_measurements, index=time_measurements)\n\n return data_t\n\n\ndef get_seat_and_foot_weight_raw(\n user_id: int,\n database_path: Optional[str] = config.DATABASE_PATH\n) -> List[pd.Series]:\n \"\"\"\n Get the clean seat weight and foot weight data from the database.\n\n :param user_id: an integer of user id\n :param database_path: a string of location of database\n :return: a list that contains two 1D pd Series, seat and foot weight\n after being cleaned, mapping from timestamps to measurements\n \"\"\"\n seat_raw = get_seat_weight_raw(user_id, database_path)\n foot_raw = get_foot_weight_raw(user_id, database_path)\n min_t = min(min(seat_raw.index), min(foot_raw.index))\n max_t = max(max(seat_raw.index), max(foot_raw.index))\n\n step_t = 500\n min_floor_t = int(np.floor(min_t/step_t)*step_t)\n max_ceil_t = int(np.ceil(max_t/step_t)*step_t)\n step1_d = {}\n step2_d = {}\n for i in range(min_floor_t, max_ceil_t+step_t, step_t):\n step1_d[i] = []\n step2_d[i] = []\n\n for i in range(len(seat_raw)):\n interval_t = int(np.floor(seat_raw.index[i]/step_t)*step_t)\n step1_d[interval_t].append(seat_raw.values[i])\n for i in range(len(foot_raw)):\n interval_t = int(np.floor(foot_raw.index[i]/step_t)*step_t)\n step2_d[interval_t].append(foot_raw.values[i])\n\n clean1_d = {}\n for i in step1_d.keys():\n clean1_d[i] = np.mean(step1_d[i])\n clean1_sz = pd.Series(clean1_d)\n\n clean2_d = {}\n for i in step2_d.keys():\n clean2_d[i] = np.mean(step2_d[i])\n clean2_sz = pd.Series(clean2_d)\n\n return clean1_sz, clean2_sz\n\n\ndef get_seat_and_foot_weight_clean(\n user_id: int,\n database_path: Optional[str] = config.DATABASE_PATH\n) -> List[pd.Series]:\n \"\"\"\n Get the clean seat and foot weight from the database.\n\n :param user_id: an integer of user id\n :param database_path: a string of location of database\n :return: two 1D pd Series mapping from\n timestamps (adjusted relative to the initial timestamp) to\n measurements of the clean seat weight and foot weight data\n (scale in kg)\n \"\"\"\n water_distance_raw = load_water_distance.get_water_distance_raw(\n user_id, database_path)\n t0 = water_distance_raw.index[0]\n seat_raw, foot_raw = get_seat_and_foot_weight_raw(user_id, database_path)\n seat_clean = pd.Series(\n seat_raw.values / 1000,\n index=(seat_raw.index - t0) / 1000\n )\n\n foot_clean = pd.Series(\n foot_raw.values / 1000,\n index=(foot_raw.index - t0) / 1000\n )\n\n return seat_clean, foot_clean\n\n\ndef get_total_weight_clean(\n user_id: int,\n database_path: Optional[str] = config.DATABASE_PATH\n) -> pd.Series:\n \"\"\"\n Get the clean total weight data from the database.\n\n :param user_id: an integer of user id\n :param database_path: a string of location of database\n :return: 1D pd Series mapping from\n timestamps (adjusted relative to the initial timestamp) to\n measurements of the clean total weight data (scale in kg)\n \"\"\"\n seat_clean, foot_clean = get_seat_and_foot_weight_clean(\n user_id, database_path)\n total_weight = seat_clean + foot_clean\n return total_weight\n"
] |
[
[
"numpy.ceil",
"numpy.mean",
"pandas.Series",
"numpy.floor"
]
] |
vicFigure/tf-ssd-mobilenetv1
|
[
"9adf4f821f5ddeb45c973dd52a440cfabc09c9fd"
] |
[
"train_ssd_network.py"
] |
[
"# Copyright 2016 Paul Balanca. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Generic training script that trains a SSD model using a given dataset.\"\"\"\nimport tensorflow as tf\nfrom tensorflow.python.ops import control_flow_ops\n\nfrom datasets import dataset_factory\nfrom deployment import model_deploy\nfrom nets import nets_factory\nfrom preprocessing import preprocessing_factory\nimport tf_utils\n\nslim = tf.contrib.slim\n\nDATA_FORMAT = 'NHWC'\n\n# =========================================================================== #\n# SSD Network flags.\n# =========================================================================== #\ntf.app.flags.DEFINE_float(\n 'loss_alpha', 1., 'Alpha parameter in the loss function.')\ntf.app.flags.DEFINE_float(\n 'negative_ratio', 3., 'Negative ratio in the loss function.')\ntf.app.flags.DEFINE_float(\n 'match_threshold', 0.5, 'Matching threshold in the loss function.')\n\n# =========================================================================== #\n# General Flags.\n# =========================================================================== #\ntf.app.flags.DEFINE_string(\n 'train_dir', '/tmp/tfmodel/',\n 'Directory where checkpoints and event logs are written to.')\ntf.app.flags.DEFINE_integer('num_clones', 1,\n 'Number of model clones to deploy.')\ntf.app.flags.DEFINE_boolean('clone_on_cpu', False,\n 'Use CPUs to deploy clones.')\ntf.app.flags.DEFINE_integer(\n 'num_readers', 4,\n 'The number of parallel readers that read data from the dataset.')\ntf.app.flags.DEFINE_integer(\n 'num_preprocessing_threads', 4,\n 'The number of threads used to create the batches.')\n\ntf.app.flags.DEFINE_integer(\n 'log_every_n_steps', 10,\n 'The frequency with which logs are print.')\ntf.app.flags.DEFINE_integer(\n 'save_summaries_secs', 600,\n 'The frequency with which summaries are saved, in seconds.')\ntf.app.flags.DEFINE_integer(\n 'save_interval_secs', 600,\n 'The frequency with which the model is saved, in seconds.')\ntf.app.flags.DEFINE_float(\n 'gpu_memory_fraction', 1, 'GPU memory fraction to use.')\n\n# =========================================================================== #\n# Optimization Flags.\n# =========================================================================== #\ntf.app.flags.DEFINE_float(\n 'weight_decay', 0.00004, 'The weight decay on the model weights.')\ntf.app.flags.DEFINE_string(\n 'optimizer', 'rmsprop',\n 'The name of the optimizer, one of \"adadelta\", \"adagrad\", \"adam\",'\n '\"ftrl\", \"momentum\", \"sgd\" or \"rmsprop\".')\ntf.app.flags.DEFINE_float(\n 'adadelta_rho', 0.95,\n 'The decay rate for adadelta.')\ntf.app.flags.DEFINE_float(\n 'adagrad_initial_accumulator_value', 0.1,\n 'Starting value for the AdaGrad accumulators.')\ntf.app.flags.DEFINE_float(\n 'adam_beta1', 0.9,\n 'The exponential decay rate for the 1st moment estimates.')\ntf.app.flags.DEFINE_float(\n 'adam_beta2', 0.999,\n 'The exponential decay rate for the 2nd moment estimates.')\ntf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')\ntf.app.flags.DEFINE_float('ftrl_learning_rate_power', -0.5,\n 'The learning rate power.')\ntf.app.flags.DEFINE_float(\n 'ftrl_initial_accumulator_value', 0.1,\n 'Starting value for the FTRL accumulators.')\ntf.app.flags.DEFINE_float(\n 'ftrl_l1', 0.0, 'The FTRL l1 regularization strength.')\ntf.app.flags.DEFINE_float(\n 'ftrl_l2', 0.0, 'The FTRL l2 regularization strength.')\ntf.app.flags.DEFINE_float(\n 'momentum', 0.9,\n 'The momentum for the MomentumOptimizer and RMSPropOptimizer.')\ntf.app.flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')\ntf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')\n\n# =========================================================================== #\n# Learning Rate Flags.\n# =========================================================================== #\ntf.app.flags.DEFINE_string(\n 'learning_rate_decay_type',\n 'exponential',\n 'Specifies how the learning rate is decayed. One of \"fixed\", \"exponential\",'\n ' or \"polynomial\"')\ntf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')\ntf.app.flags.DEFINE_float(\n 'end_learning_rate', 0.0001,\n 'The minimal end learning rate used by a polynomial decay learning rate.')\ntf.app.flags.DEFINE_float(\n 'label_smoothing', 0.0, 'The amount of label smoothing.')\ntf.app.flags.DEFINE_float(\n 'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')\ntf.app.flags.DEFINE_float(\n 'num_epochs_per_decay', 2.0,\n 'Number of epochs after which learning rate decays.')\ntf.app.flags.DEFINE_float(\n 'moving_average_decay', None,\n 'The decay to use for the moving average.'\n 'If left as None, then moving averages are not used.')\n\n# =========================================================================== #\n# Dataset Flags.\n# =========================================================================== #\ntf.app.flags.DEFINE_string(\n 'dataset_name', 'imagenet', 'The name of the dataset to load.')\ntf.app.flags.DEFINE_integer(\n 'num_classes', 21, 'Number of classes to use in the dataset.')\ntf.app.flags.DEFINE_string(\n 'dataset_split_name', 'train', 'The name of the train/test split.')\ntf.app.flags.DEFINE_string(\n 'dataset_dir', None, 'The directory where the dataset files are stored.')\ntf.app.flags.DEFINE_integer(\n 'labels_offset', 0,\n 'An offset for the labels in the dataset. This flag is primarily used to '\n 'evaluate the VGG and ResNet architectures which do not use a background '\n 'class for the ImageNet dataset.')\ntf.app.flags.DEFINE_string(\n 'model_name', 'ssd_300_vgg', 'The name of the architecture to train.')\ntf.app.flags.DEFINE_string(\n 'preprocessing_name', None, 'The name of the preprocessing to use. If left '\n 'as `None`, then the model_name flag is used.')\ntf.app.flags.DEFINE_integer(\n 'batch_size', 32, 'The number of samples in each batch.')\ntf.app.flags.DEFINE_integer(\n 'train_image_size', None, 'Train image size')\ntf.app.flags.DEFINE_integer('max_number_of_steps', None,\n 'The maximum number of training steps.')\n\n# =========================================================================== #\n# Fine-Tuning Flags.\n# =========================================================================== #\ntf.app.flags.DEFINE_string(\n 'checkpoint_path', None,\n 'The path to a checkpoint from which to fine-tune.')\ntf.app.flags.DEFINE_string(\n 'checkpoint_model_scope', None,\n 'Model scope in the checkpoint. None if the same as the trained model.')\ntf.app.flags.DEFINE_string(\n 'checkpoint_exclude_scopes', None,\n 'Comma-separated list of scopes of variables to exclude when restoring '\n 'from a checkpoint.')\ntf.app.flags.DEFINE_string(\n 'trainable_scopes', None,\n 'Comma-separated list of scopes to filter the set of variables to train.'\n 'By default, None would train all the variables.')\ntf.app.flags.DEFINE_boolean(\n 'ignore_missing_vars', False,\n 'When restoring a checkpoint would ignore missing variables.')\n\nFLAGS = tf.app.flags.FLAGS\n\n\n# =========================================================================== #\n# Main training routine.\n# =========================================================================== #\ndef main(_):\n if not FLAGS.dataset_dir:\n raise ValueError('You must supply the dataset directory with --dataset_dir')\n\n tf.logging.set_verbosity(tf.logging.DEBUG)\n with tf.Graph().as_default():\n # Config model_deploy. Keep TF Slim Models structure.\n # Useful if want to need multiple GPUs and/or servers in the future.\n deploy_config = model_deploy.DeploymentConfig(\n num_clones=FLAGS.num_clones,\n clone_on_cpu=FLAGS.clone_on_cpu,\n replica_id=0,\n num_replicas=1,\n num_ps_tasks=0)\n # Create global_step.\n with tf.device(deploy_config.variables_device()):\n global_step = slim.create_global_step()\n\n # Select the dataset.\n dataset = dataset_factory.get_dataset(\n FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)\n\n # Get the SSD network and its anchors.\n ssd_class = nets_factory.get_network(FLAGS.model_name)\n ssd_params = ssd_class.default_params._replace(num_classes=FLAGS.num_classes)\n ssd_net = ssd_class(ssd_params)\n ssd_shape = ssd_net.params.img_shape\n ssd_anchors = ssd_net.anchors(ssd_shape)\n\n # Select the preprocessing function.\n preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name\n image_preprocessing_fn = preprocessing_factory.get_preprocessing(\n preprocessing_name, is_training=True)\n\n tf_utils.print_configuration(FLAGS.__flags, ssd_params,\n dataset.data_sources, FLAGS.train_dir)\n # =================================================================== #\n # Create a dataset provider and batches.\n # =================================================================== #\n with tf.device(deploy_config.inputs_device()):\n with tf.name_scope(FLAGS.dataset_name + '_data_provider'):\n provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset,\n num_readers=FLAGS.num_readers,\n common_queue_capacity=20 * FLAGS.batch_size,\n common_queue_min=10 * FLAGS.batch_size,\n shuffle=True)\n # Get for SSD network: image, labels, bboxes.\n [image, shape, glabels, gbboxes] = provider.get(['image', 'shape',\n 'object/label',\n 'object/bbox'])\n # Pre-processing image, labels and bboxes.\n image, glabels, gbboxes = \\\n image_preprocessing_fn(image, glabels, gbboxes,\n out_shape=ssd_shape,\n data_format=DATA_FORMAT)\n\n # Encode groundtruth labels and bboxes.\n gclasses, glocalisations, gscores = \\\n ssd_net.bboxes_encode(glabels, gbboxes, ssd_anchors)\n batch_shape = [1] + [len(ssd_anchors)] * 3\n\n\n # Training batches and queue.\n r = tf.train.batch(\n tf_utils.reshape_list([image, gclasses, glocalisations, gscores]),\n batch_size=FLAGS.batch_size,\n num_threads=FLAGS.num_preprocessing_threads,\n capacity=5 * FLAGS.batch_size)\n b_image, b_gclasses, b_glocalisations, b_gscores = \\\n tf_utils.reshape_list(r, batch_shape)\n\n # Intermediate queueing: unique batch computation pipeline for all\n # GPUs running the training.\n batch_queue = slim.prefetch_queue.prefetch_queue(\n tf_utils.reshape_list([b_image, b_gclasses, b_glocalisations, b_gscores]),\n capacity=2 * deploy_config.num_clones)\n\n # =================================================================== #\n # Define the model running on every GPU.\n # =================================================================== #\n def clone_fn(batch_queue):\n \"\"\"Allows data parallelism by creating multiple\n clones of network_fn.\"\"\"\n # Dequeue batch.\n b_image, b_gclasses, b_glocalisations, b_gscores = \\\n tf_utils.reshape_list(batch_queue.dequeue(), batch_shape)\n\n # Construct SSD network.\n arg_scope = ssd_net.arg_scope(weight_decay=FLAGS.weight_decay,\n data_format=DATA_FORMAT)\n with slim.arg_scope(arg_scope):\n predictions, localisations, logits, end_points = \\\n ssd_net.net(b_image, is_training=True)\n # Add loss function.\n ssd_net.losses(logits, localisations,\n b_gclasses, b_glocalisations, b_gscores,\n match_threshold=FLAGS.match_threshold,\n negative_ratio=FLAGS.negative_ratio,\n alpha=FLAGS.loss_alpha,\n label_smoothing=FLAGS.label_smoothing)\n return end_points\n\n # Gather initial summaries.\n summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))\n\n # =================================================================== #\n # Add summaries from first clone.\n # =================================================================== #\n clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue])\n first_clone_scope = deploy_config.clone_scope(0)\n # Gather update_ops from the first clone. These contain, for example,\n # the updates for the batch_norm variables created by network_fn.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)\n\n # Add summaries for end_points.\n end_points = clones[0].outputs\n for end_point in end_points:\n x = end_points[end_point]\n summaries.add(tf.summary.histogram('activations/' + end_point, x))\n summaries.add(tf.summary.scalar('sparsity/' + end_point,\n tf.nn.zero_fraction(x)))\n # Add summaries for losses and extra losses.\n for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):\n summaries.add(tf.summary.scalar(loss.op.name, loss))\n for loss in tf.get_collection('EXTRA_LOSSES', first_clone_scope):\n summaries.add(tf.summary.scalar(loss.op.name, loss))\n\n # Add summaries for variables.\n for variable in slim.get_model_variables():\n summaries.add(tf.summary.histogram(variable.op.name, variable))\n\n # =================================================================== #\n # Configure the moving averages.\n # =================================================================== #\n if FLAGS.moving_average_decay:\n moving_average_variables = slim.get_model_variables()\n variable_averages = tf.train.ExponentialMovingAverage(\n FLAGS.moving_average_decay, global_step)\n else:\n moving_average_variables, variable_averages = None, None\n\n # =================================================================== #\n # Configure the optimization procedure.\n # =================================================================== #\n with tf.device(deploy_config.optimizer_device()):\n learning_rate = tf_utils.configure_learning_rate(FLAGS,\n dataset.num_samples,\n global_step)\n optimizer = tf_utils.configure_optimizer(FLAGS, learning_rate)\n summaries.add(tf.summary.scalar('learning_rate', learning_rate))\n\n if FLAGS.moving_average_decay:\n # Update ops executed locally by trainer.\n update_ops.append(variable_averages.apply(moving_average_variables))\n\n # Variables to train.\n variables_to_train = tf_utils.get_variables_to_train(FLAGS)\n\n # and returns a train_tensor and summary_op\n total_loss, clones_gradients = model_deploy.optimize_clones(\n clones,\n optimizer,\n var_list=variables_to_train)\n # Add total_loss to summary.\n summaries.add(tf.summary.scalar('total_loss', total_loss))\n\n # Create gradient updates.\n grad_updates = optimizer.apply_gradients(clones_gradients,\n global_step=global_step)\n update_ops.append(grad_updates)\n update_op = tf.group(*update_ops)\n train_tensor = control_flow_ops.with_dependencies([update_op], total_loss,\n name='train_op')\n\n # Add the summaries from the first clone. These contain the summaries\n summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,\n first_clone_scope))\n # Merge all summaries together.\n summary_op = tf.summary.merge(list(summaries), name='summary_op')\n\n # =================================================================== #\n # Kicks off the training.\n # =================================================================== #\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)\n config = tf.ConfigProto(log_device_placement=False,\n gpu_options=gpu_options)\n saver = tf.train.Saver(max_to_keep=5,\n keep_checkpoint_every_n_hours=1.0,\n write_version=2,\n pad_step_number=False)\n\n slim.learning.train(\n train_tensor,\n logdir=FLAGS.train_dir,\n master='',\n is_chief=True,\n init_fn=tf_utils.get_init_fn(FLAGS),\n summary_op=summary_op,\n number_of_steps=FLAGS.max_number_of_steps,\n log_every_n_steps=FLAGS.log_every_n_steps,\n save_summaries_secs=FLAGS.save_summaries_secs,\n saver=saver,\n save_interval_secs=FLAGS.save_interval_secs,\n session_config=config,\n sync_optimizer=None)\n\n\nif __name__ == '__main__':\n tf.app.run()\n"
] |
[
[
"tensorflow.Graph",
"tensorflow.summary.histogram",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"tensorflow.get_collection",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.ConfigProto",
"tensorflow.nn.zero_fraction",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.logging.set_verbosity",
"tensorflow.app.flags.DEFINE_float",
"tensorflow.GPUOptions",
"tensorflow.train.Saver",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.name_scope",
"tensorflow.group",
"tensorflow.summary.scalar",
"tensorflow.app.run"
]
] |
zapatacomputing/z-quantum-core
|
[
"456f5d83ba641982e710e2abe19afa4e266ab4a8"
] |
[
"src/python/zquantum/core/openfermion/_utils.py"
] |
[
"import itertools\nimport random\nfrom typing import Iterable, List, Optional\n\nimport numpy as np\nfrom openfermion import (\n FermionOperator,\n InteractionOperator,\n InteractionRDM,\n PolynomialTensor,\n QubitOperator,\n count_qubits,\n)\nfrom openfermion import expectation as openfermion_expectation\nfrom openfermion import (\n get_interaction_operator,\n get_sparse_operator,\n normal_ordered,\n number_operator,\n)\nfrom openfermion.linalg import jw_get_ground_state_at_particle_number\nfrom openfermion.transforms import freeze_orbitals, get_fermion_operator\n\nfrom ..circuits import Circuit, X, Y, Z\nfrom ..measurement import ExpectationValues\nfrom ..utils import ValueEstimate, bin2dec, dec2bin\nfrom ..wavefunction import Wavefunction\n\n\ndef get_qubitop_from_matrix(operator: List[List]) -> QubitOperator:\n r\"\"\"Expands a 2^n by 2^n matrix into n-qubit Pauli basis. The runtime of\n this function is O(2^2n).\n\n Args:\n operator: a list of lists (rows) representing a 2^n by 2^n\n matrix.\n\n Returns:\n A QubitOperator instance corresponding to the expansion of\n the input operator as a sum of Pauli strings:\n\n O = 2^-n \\sum_P tr(O*P) P\n \"\"\"\n\n nrows = len(operator)\n ncols = len(operator[0])\n\n # Check if the input operator is square\n if nrows != ncols:\n raise Exception(\"The input operator is not square\")\n\n # Check if the dimensions are powers of 2\n if not (((nrows & (nrows - 1)) == 0) and nrows > 0):\n raise Exception(\"The number of rows is not a power of 2\")\n if not (((ncols & (ncols - 1)) == 0) and ncols > 0):\n raise Exception(\"The number of cols is not a power of 2\")\n\n n = int(np.log2(nrows)) # number of qubits\n\n def decode(bit_string): # Helper function for converting any 2n-bit\n # string to a label vector representing a Pauli\n # string of length n\n\n if len(bit_string) != 2 * n:\n raise Exception(\"LH_expand:decode: input bit string length not 2n\")\n\n output_label = list(np.zeros(n))\n for i in range(0, n):\n output_label[i] = bin2dec(bit_string[2 * i : 2 * i + 2])\n\n return output_label\n\n def trace_product(label_vec): # Helper function for computing tr(OP)\n # where O is the input operator and P is a\n # Pauli string operator\n\n def f(j): # Function which computes the index of the nonzero\n # element in P for a given column j\n\n j_str = dec2bin(j, n)\n for index in range(0, n):\n if label_vec[index] in [1, 2]: # flip if X or Y\n j_str[index] = int(not j_str[index])\n return bin2dec(j_str)\n\n def nz(j): # Function which computes the value of the nonzero\n # element in P on the column j\n\n val_nz = 1.0\n j_str = dec2bin(j, n)\n for index in range(0, n):\n if label_vec[index] == 2:\n if j_str[index] == 0:\n val_nz = val_nz * (1j)\n if j_str[index] == 1:\n val_nz = val_nz * (-1j)\n if label_vec[index] == 3:\n if j_str[index] == 1:\n val_nz = val_nz * (-1)\n return val_nz\n\n # Compute the trace\n tr = 0.0\n for j in range(0, 2 ** n): # loop over the columns\n tr = tr + operator[j][f(j)] * nz(j)\n\n return tr / 2 ** n\n\n # Expand the operator in Pauli basis\n coeffs = list(np.zeros(4 ** n))\n labels = list(np.zeros(4 ** n))\n for i in range(0, 4 ** n): # loop over all 2n-bit strings\n current_string = dec2bin(i, 2 * n) # see util.py\n current_label = decode(current_string)\n coeffs[i] = trace_product(current_label)\n labels[i] = current_label\n\n return get_qubitop_from_coeffs_and_labels(coeffs, labels)\n\n\ndef get_qubitop_from_coeffs_and_labels(\n coeffs: List[float], labels: List[List[int]]\n) -> QubitOperator:\n \"\"\"Generates a QubitOperator based on a coefficient vector and\n a label matrix.\n\n Args:\n coeffs: a list of floats representing the coefficients\n for the terms in the Hamiltonian\n labels: a list of lists (a matrix) where each list\n is a vector of integers representing the Pauli\n string. See pauliutil.py for details.\n\n Example:\n\n The Hamiltonian H = 0.1 X1 X2 - 0.4 Y1 Y2 Z3 Z4 can be\n initiated by calling\n\n H = QubitOperator([0.1, -0.4], # coefficients\n [[1 1 0 0], # label matrix\n [2 2 3 3]])\n \"\"\"\n\n output = QubitOperator()\n for i in range(0, len(labels)):\n string_term = \"\"\n for ind, elem in enumerate(labels[i]):\n pauli_symbol = \"\"\n if elem == 1:\n pauli_symbol = \"X\" + str(ind) + \" \"\n if elem == 2:\n pauli_symbol = \"Y\" + str(ind) + \" \"\n if elem == 3:\n pauli_symbol = \"Z\" + str(ind) + \" \"\n string_term += pauli_symbol\n\n output += coeffs[i] * QubitOperator(string_term)\n\n return output\n\n\ndef generate_random_qubitop(\n nqubits: int,\n nterms: int,\n nlocality: int,\n max_coeff: float,\n fixed_coeff: bool = False,\n) -> QubitOperator:\n \"\"\"Generates a Hamiltonian with term coefficients uniformly distributed\n in [-max_coeff, max_coeff].\n\n Args:\n nqubits - number of qubits\n nterms - number of terms in the Hamiltonian\n nlocality - locality of the Hamiltonian\n max_coeff - bound for generating the term coefficients\n fixed_coeff (bool) - If true, all the terms are assign the\n max_coeff as coefficient.\n\n Returns:\n A QubitOperator with the appropriate coefficient vector\n and label matrix.\n \"\"\"\n # generate random coefficient vector\n if fixed_coeff:\n coeffs = [max_coeff] * nterms\n else:\n coeffs = list(np.zeros(nterms))\n for j in range(0, nterms):\n coeffs[j] = random.uniform(-max_coeff, max_coeff)\n\n # generate random label vector\n labels = list(np.zeros(nterms, dtype=int))\n label_set = set()\n j = 0\n while j < nterms:\n inds_nontrivial = sorted(random.sample(range(0, nqubits), nlocality))\n label = list(np.zeros(nqubits, dtype=int))\n for ind in inds_nontrivial:\n label[ind] = random.randint(1, 3)\n if str(label) not in label_set:\n labels[j] = label\n j += 1\n label_set.add(str(label))\n return get_qubitop_from_coeffs_and_labels(coeffs, labels)\n\n\ndef evaluate_qubit_operator(\n qubit_operator: QubitOperator, expectation_values: ExpectationValues\n) -> ValueEstimate:\n \"\"\"Evaluate the expectation value of a qubit operator using expectation values for\n the terms.\n\n Args:\n qubit_operator: the operator\n expectation_values: the expectation values\n\n Returns:\n value_estimate: stores the value of the expectation and its precision\n \"\"\"\n\n # Sum the contributions from all terms\n total = 0\n\n # Add all non-trivial terms\n term_index = 0\n for term in qubit_operator.terms:\n total += np.real(\n qubit_operator.terms[term] * expectation_values.values[term_index]\n )\n term_index += 1\n\n value_estimate = ValueEstimate(total)\n return value_estimate\n\n\ndef evaluate_qubit_operator_list(\n qubit_operator_list: List[QubitOperator], expectation_values: ExpectationValues\n) -> ValueEstimate:\n \"\"\"Evaluate the expectation value of a qubit operator list using expectation values\n for the terms. The expectation values should be in the order given by the qubit\n operator list, and the value returned is the sum of all terms in the qubit operator\n list.\n\n Args:\n qubit_operator_list: the operator list\n expectation_values: the expectation values\n\n Returns:\n value_estimate: stores the value of the expectation and its precision\n \"\"\"\n\n # Sum the contributions from all terms\n total = 0\n\n # Add all non-trivial terms\n term_index = 0\n for qubit_operator in qubit_operator_list:\n for term in qubit_operator.terms:\n total += np.real(\n qubit_operator.terms[term] * expectation_values.values[term_index]\n )\n term_index += 1\n\n value_estimate = ValueEstimate(total)\n return value_estimate\n\n\ndef reverse_qubit_order(qubit_operator: QubitOperator, n_qubits: Optional[int] = None):\n \"\"\"Reverse the order of qubit indices in a qubit operator.\n\n Args:\n qubit_operator (openfermion.QubitOperator): the operator\n n_qubits (int): total number of qubits. Needs to be provided when\n the size of the system of interest is greater than the size of qubit\n operator (optional)\n\n Returns:\n reversed_op (openfermion.ops.QubitOperator)\n \"\"\"\n\n reversed_op = QubitOperator()\n\n if n_qubits is None:\n n_qubits = count_qubits(qubit_operator)\n if n_qubits < count_qubits(qubit_operator):\n raise ValueError(\"Invalid number of qubits specified.\")\n\n for term in qubit_operator.terms:\n new_term = []\n for factor in term:\n new_factor = list(factor)\n new_factor[0] = n_qubits - 1 - new_factor[0]\n new_term.append(tuple(new_factor))\n reversed_op += QubitOperator(tuple(new_term), qubit_operator.terms[term])\n return reversed_op\n\n\ndef get_expectation_value(\n qubit_op: QubitOperator, wavefunction: Wavefunction, reverse_operator: bool = False\n) -> complex:\n \"\"\"Get the expectation value of a qubit operator with respect to a wavefunction.\n Args:\n qubit_op: the operator\n wavefunction: the wavefunction\n reverse_operator: whether to reverse order of qubit operator\n before computing expectation value. This should be True if the convention\n of the basis states used for the wavefunction is the opposite of the one in\n the qubit operator. This is the case when the wavefunction uses\n Rigetti convention (https://arxiv.org/abs/1711.02086) of ordering qubits.\n Returns:\n the expectation value\n \"\"\"\n n_qubits = wavefunction.amplitudes.shape[0].bit_length() - 1\n\n # Convert the qubit operator to a sparse matrix. Note that the qubit indices\n # must be reversed because OpenFermion and our Wavefunction use\n # different conventions for how to order the computational basis states!\n if reverse_operator:\n qubit_op = reverse_qubit_order(qubit_op, n_qubits=n_qubits)\n sparse_op = get_sparse_operator(qubit_op, n_qubits=n_qubits)\n\n # Computer the expectation value\n exp_val = openfermion_expectation(sparse_op, wavefunction.amplitudes)\n return exp_val\n\n\ndef change_operator_type(operator, operatorType):\n \"\"\"Take an operator and attempt to cast it to an operator of a different type\n\n Args:\n operator: The operator\n operatorType: The type of the operator that the original operator is\n cast to\n Returns:\n An operator with type operatorType\n \"\"\"\n new_operator = operatorType()\n for op in operator.terms:\n new_operator += operatorType(tuple(op), operator.terms[op])\n\n return new_operator\n\n\ndef get_fermion_number_operator(n_qubits, n_particles=None):\n \"\"\"Return a FermionOperator representing the number operator for n qubits.\n If `n_particles` is specified, it can be used for creating constraint on the number\n of particles.\n\n Args:\n n_qubits (int): number of qubits in the system\n n_particles (int): number of particles in the system.\n If specified, it is subtracted from the number\n operator such as expectation value is zero.\n Returns:\n (openfermion.ops.FermionOperator): the number operator\n \"\"\"\n operator = number_operator(n_qubits)\n if n_particles is not None:\n operator += FermionOperator(\"\", -1.0 * float(n_particles))\n return get_interaction_operator(operator)\n\n\ndef get_diagonal_component(operator):\n if isinstance(operator, InteractionOperator):\n return _get_diagonal_component_interaction_operator(operator)\n elif isinstance(operator, PolynomialTensor):\n return _get_diagonal_component_polynomial_tensor(operator)\n else:\n raise TypeError(\n f\"Getting diagonal component not supported for {0}\".format(type(operator))\n )\n\n\ndef _get_diagonal_component_polynomial_tensor(polynomial_tensor):\n \"\"\"Get the component of an interaction operator that is\n diagonal in the computational basis under Jordan-Wigner\n transformation (i.e., the terms that can be expressed\n as products of number operators).\n Args:\n interaction_operator (openfermion.ops.InteractionOperator): the operator\n\n Returns:\n tuple: two openfermion.ops.InteractionOperator objects. The first is the\n diagonal component, and the second is the remainder.\n \"\"\"\n n_modes = count_qubits(polynomial_tensor)\n remainder_tensors = {}\n diagonal_tensors = {}\n\n diagonal_tensors[()] = polynomial_tensor.constant\n for key in polynomial_tensor.n_body_tensors:\n if key == ():\n continue\n remainder_tensors[key] = np.zeros((n_modes,) * len(key), complex)\n diagonal_tensors[key] = np.zeros((n_modes,) * len(key), complex)\n\n for indices in itertools.product(range(n_modes), repeat=len(key)):\n creation_counts = {}\n annihilation_counts = {}\n\n for meta_index, index in enumerate(indices):\n if key[meta_index] == 0:\n if annihilation_counts.get(index) is None:\n annihilation_counts[index] = 1\n else:\n annihilation_counts[index] += 1\n elif key[meta_index] == 1:\n if creation_counts.get(index) is None:\n creation_counts[index] = 1\n else:\n creation_counts[index] += 1\n\n term_is_diagonal = True\n for index in creation_counts:\n if creation_counts[index] != annihilation_counts.get(index):\n term_is_diagonal = False\n break\n if term_is_diagonal:\n for index in annihilation_counts:\n if annihilation_counts[index] != creation_counts.get(index):\n term_is_diagonal = False\n break\n if term_is_diagonal:\n diagonal_tensors[key][indices] = polynomial_tensor.n_body_tensors[key][\n indices\n ]\n else:\n remainder_tensors[key][indices] = polynomial_tensor.n_body_tensors[key][\n indices\n ]\n\n return PolynomialTensor(diagonal_tensors), PolynomialTensor(remainder_tensors)\n\n\ndef _get_diagonal_component_interaction_operator(interaction_operator):\n \"\"\"Get the component of an interaction operator that is\n diagonal in the computational basis under Jordan-Wigner\n transformation (i.e., the terms that can be expressed\n as products of number operators).\n Args:\n interaction_operator (openfermion.ops.InteractionOperator): the operator\n\n Returns:\n tuple: two openfermion.ops.InteractionOperator objects. The first is the\n diagonal component, and the second is the remainder.\n \"\"\"\n\n one_body_tensor = np.zeros(\n interaction_operator.one_body_tensor.shape, dtype=complex\n )\n two_body_tensor = np.zeros(\n interaction_operator.two_body_tensor.shape, dtype=complex\n )\n diagonal_op = InteractionOperator(\n interaction_operator.constant, one_body_tensor, two_body_tensor\n )\n\n one_body_tensor = np.copy(interaction_operator.one_body_tensor).astype(complex)\n two_body_tensor = np.copy(interaction_operator.two_body_tensor).astype(complex)\n remainder_op = InteractionOperator(0.0, one_body_tensor, two_body_tensor)\n\n n_spin_orbitals = interaction_operator.two_body_tensor.shape[0]\n\n for p in range(n_spin_orbitals):\n for q in range(n_spin_orbitals):\n diagonal_op.two_body_tensor[\n p, q, p, q\n ] = interaction_operator.two_body_tensor[p, q, p, q]\n diagonal_op.two_body_tensor[\n p, q, q, p\n ] = interaction_operator.two_body_tensor[p, q, q, p]\n remainder_op.two_body_tensor[p, q, p, q] = 0.0\n remainder_op.two_body_tensor[p, q, q, p] = 0.0\n\n for p in range(n_spin_orbitals):\n diagonal_op.one_body_tensor[p, p] = interaction_operator.one_body_tensor[p, p]\n remainder_op.one_body_tensor[p, p] = 0.0\n\n return diagonal_op, remainder_op\n\n\ndef get_polynomial_tensor(fermion_operator, n_qubits=None):\n r\"\"\"Convert a fermionic operator to a Polynomial Tensor.\n\n Args:\n fermion_operator (openferion.ops.FermionOperator): The operator.\n n_qubits (int): The number of qubits to be included in the\n PolynomialTensor. Must be at least equal to the number of qubits\n that are acted on by fermion_operator. If None, then the number of\n qubits is inferred from fermion_operator.\n\n Returns:\n openfermion.ops.PolynomialTensor: The tensor representation of the\n operator.\n \"\"\"\n if not isinstance(fermion_operator, FermionOperator):\n raise TypeError(\"Input must be a FermionOperator.\")\n\n if n_qubits is None:\n n_qubits = count_qubits(fermion_operator)\n if n_qubits < count_qubits(fermion_operator):\n raise ValueError(\"Invalid number of qubits specified.\")\n\n # Normal order the terms and initialize.\n fermion_operator = normal_ordered(fermion_operator)\n tensor_dict = {}\n\n # Loop through terms and assign to matrix.\n for term in fermion_operator.terms:\n coefficient = fermion_operator.terms[term]\n\n # Handle constant shift.\n if len(term) == 0:\n tensor_dict[()] = coefficient\n\n else:\n key = tuple([operator[1] for operator in term])\n if tensor_dict.get(key) is None:\n tensor_dict[key] = np.zeros((n_qubits,) * len(key), complex)\n\n indices = tuple([operator[0] for operator in term])\n tensor_dict[key][indices] = coefficient\n\n return PolynomialTensor(tensor_dict)\n\n\ndef create_circuits_from_qubit_operator(qubit_operator: QubitOperator) -> List[Circuit]:\n \"\"\"Creates a list of circuit objects from the Pauli terms of a QubitOperator\n Args:\n qubit_operator: operator for which the Pauli terms are converted into circuits\n\n Return:\n circuit_set: a list of Pauli string gate circuits\n \"\"\"\n\n # Get the Pauli terms, ignoring coefficients\n pauli_terms = list(qubit_operator.terms.keys())\n term_gate_map = {\"X\": X, \"Y\": Y, \"Z\": Z}\n circuit_set = []\n\n # Loop over Pauli terms and populate circuit set list\n for term in pauli_terms:\n\n circuit = Circuit()\n\n # Loop over Pauli factors in Pauli term and construct Pauli term circuit\n for pauli in term: # loop over pauli operators in an n qubit pauli term\n pauli_index = pauli[0]\n pauli_factor = pauli[1]\n circuit += term_gate_map[pauli_factor](pauli_index)\n\n circuit_set += [circuit]\n\n return circuit_set\n\n\ndef get_ground_state_rdm_from_qubit_op(\n qubit_operator: QubitOperator, n_particles: int\n) -> InteractionRDM:\n \"\"\"Diagonalize operator and compute the ground state 1- and 2-RDM\n\n Args:\n qubit_operator: The openfermion operator to diagonalize\n n_particles: number of particles in the target ground state\n\n Returns:\n rdm: interaction RDM of the ground state with the particle number n_particles\n \"\"\"\n\n sparse_operator = get_sparse_operator(qubit_operator)\n e, ground_state_wf = jw_get_ground_state_at_particle_number(\n sparse_operator, n_particles\n ) # float/np.array pair\n n_qubits = count_qubits(qubit_operator)\n\n one_body_tensor_list = []\n for i in range(n_qubits):\n for j in range(n_qubits):\n idag_j = get_sparse_operator(\n FermionOperator(f\"{i}^ {j}\"), n_qubits=n_qubits\n )\n idag_j = idag_j.toarray()\n one_body_tensor_list.append(\n np.conjugate(ground_state_wf) @ idag_j @ ground_state_wf\n )\n\n one_body_tensor = np.array(one_body_tensor_list)\n one_body_tensor = one_body_tensor.reshape(n_qubits, n_qubits)\n\n two_body_tensor = np.zeros((n_qubits,) * 4, dtype=complex)\n for p in range(n_qubits):\n for q in range(0, p + 1):\n for r in range(n_qubits):\n for s in range(0, r + 1):\n pdag_qdag_r_s = get_sparse_operator(\n FermionOperator(f\"{p}^ {q}^ {r} {s}\"), n_qubits=n_qubits\n )\n pdag_qdag_r_s = pdag_qdag_r_s.toarray()\n rdm_element = (\n np.conjugate(ground_state_wf) @ pdag_qdag_r_s @ ground_state_wf\n )\n two_body_tensor[p, q, r, s] = rdm_element\n two_body_tensor[q, p, r, s] = -rdm_element\n two_body_tensor[q, p, s, r] = rdm_element\n two_body_tensor[p, q, s, r] = -rdm_element\n\n return InteractionRDM(one_body_tensor, two_body_tensor)\n\n\ndef remove_inactive_orbitals(\n interaction_op: InteractionOperator, n_active: int = None, n_core: int = 0\n) -> InteractionOperator:\n \"\"\"Remove orbitals not in the active space from an interaction operator.\n\n Args:\n interaction_op: the operator, assumed to be ordered with alternating spin-up and\n spin-down spin orbitals.\n n_active: the number of active molecular orbitals. If None, include all orbitals\n beyond n_core. Note that the number of active spin orbitals will be twice\n the number of active molecular orbitals.\n n_core: the number of core molecular orbitals to be frozen.\n\n Returns:\n The interaction operator with inactive orbitals removed, and the Hartree-Fock\n energy of the core orbitals added to the constant.\n \"\"\"\n\n # This implementation is probably not very efficient, because it converts the\n # interaction operator into a fermion operator and then back to an interaction\n # operator.\n\n # Convert the InteractionOperator to a FermionOperator\n fermion_op = get_fermion_operator(interaction_op)\n\n # Determine which occupied spin-orbitals are to be frozen\n occupied = range(2 * n_core)\n\n unoccupied: Iterable\n # Determine which unoccupied spin-orbitals are to be frozen\n if n_active is not None:\n unoccupied = range(\n 2 * n_core + 2 * n_active, interaction_op.one_body_tensor.shape[0]\n )\n else:\n unoccupied = []\n\n # Freeze the spin-orbitals\n frozen_fermion_op = freeze_orbitals(fermion_op, occupied, unoccupied)\n\n # Convert back to an interaction operator\n frozen_interaction_op = get_interaction_operator(frozen_fermion_op)\n\n return frozen_interaction_op\n"
] |
[
[
"numpy.log2",
"numpy.copy",
"numpy.real",
"numpy.array",
"numpy.zeros",
"numpy.conjugate"
]
] |
lsc25846/R
|
[
"9712203f264f8a487c56135cdc4359d4e022f991"
] |
[
"train_refinedet.py"
] |
[
"import os\nimport time\nimport argparse\nimport torch\nimport _init_paths\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torch.utils.data as data\nfrom libs.utils.augmentations import SSDAugmentation\nfrom libs.networks.vgg_refinedet import VGGRefineDet\nfrom libs.networks.resnet_refinedet import ResNetRefineDet\nfrom libs.utils.config import voc320, voc512, coco320, coco512, MEANS\nfrom libs.data_layers.transform import detection_collate\nfrom libs.data_layers.roidb import combined_roidb\nfrom libs.data_layers.blob_dataset import BlobDataset\nfrom libs.utils.path_config import cfg as path_cfg\n\nimport numpy as np\nimport random\nimport pdb\n\n\ndef setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True\n\n\nsetup_seed(0)\n\n\ndef str2bool(v):\n return v.lower() in ('yes', 'true', 't', '1')\n\n\nparser = argparse.ArgumentParser(\n description='RefineDet Training With Pytorch')\nparser.add_argument('--dataset', default='voc',\n choices=['voc', 'coco'],\n type=str, help='voc or coco')\nparser.add_argument('--network', default='vgg16',\n help='backbone network')\nparser.add_argument('--base_model', default='vgg16_reducedfc.pth',\n help='Pretrained base model')\nparser.add_argument('--input_size', default=320, type=int,\n help='Input size for training')\nparser.add_argument('--batch_size', default=32, type=int,\n help='Batch size for training')\nparser.add_argument('--resume_checkpoint', default=None, type=str,\n help='Checkpoint state_dict file to resume training from')\nparser.add_argument('--start_iter', default=0, type=int,\n help='Resume training at this iter')\nparser.add_argument('--num_workers', default=8, type=int,\n help='Number of workers used in dataloading')\nparser.add_argument('--cuda', default=True, type=str2bool,\n help='Use CUDA to train model')\nparser.add_argument('--lr', '--learning-rate', default=1e-3, type=float,\n help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float,\n help='Momentum value for optim')\nparser.add_argument('--weight_decay', default=5e-4, type=float,\n help='Weight decay for SGD')\nparser.add_argument('--gamma', default=0.1, type=float,\n help='Gamma update for SGD')\nparser.add_argument('--output_folder', default='output',\n help='Directory for saving checkpoint models')\nparser.add_argument('--pretrained_folder', default='pretrained_model',\n help='Directory for saving checkpoint models')\n\nargs = parser.parse_args()\n\nnum_gpus = 0\n\nif torch.cuda.is_available():\n print('CUDA devices: ', torch.cuda.device)\n print('GPU numbers: ', torch.cuda.device_count())\n num_gpus = torch.cuda.device_count()\n\n\n# num_gpus = 0\nnum_gpus = 1\n\n# if torch.cuda.is_available():\n# if args.cuda:\n# torch.set_default_tensor_type('torch.cuda.FloatTensor')\n# if not args.cuda:\n# print('WARNING: It looks like you have a CUDA device, but are not' +\n# 'using CUDA.\\nRun with --cuda for optimal training speed.')\n# torch.set_default_tensor_type('torch.FloatTensor')\n# else:\n# torch.set_default_tensor_type('torch.FloatTensor')\n\n\ndef train():\n # Assign imdb_name and imdbval_name according to args.dataset.\n if args.dataset == \"voc\":\n # args.imdb_name = \"voc_2007_trainval\"\n args.imdb_name = \"voc_2007_trainval\"\n args.imdbval_name = \"voc_2007_test\"\n elif args.dataset == \"coco\":\n args.imdb_name = \"coco_2014_train+coco_2014_valminusminival\"\n args.imdbval_name = \"coco_2014_minival\"\n # Import config\n if args.dataset == 'coco':\n cfg = (coco320, coco512)[args.input_size == 512]\n elif args.dataset == 'voc':\n cfg = (voc320, voc512)[args.input_size == 512]\n # Create imdb, roidb and blob_dataset\n print('Create or load an imdb.')\n imdb, roidb = combined_roidb(args.imdb_name)\n blob_dataset = BlobDataset(\n imdb, roidb, transform=SSDAugmentation(cfg['min_dim'], MEANS),\n target_normalization=True)\n \n # Construct networks.\n print('Construct {}_refinedet network.'.format(args.network))\n if args.network == 'vgg16':\n refinedet = VGGRefineDet(cfg['num_classes'], cfg)\n elif args.network == 'resnet101':\n refinedet = ResNetRefineDet(cfg['num_classes'], cfg)\n \n pretrained_model = os.path.join(path_cfg.DATA_DIR, args.pretrained_folder, args.base_model)\n refinedet.create_architecture(pretrained_model, pretrained=True, fine_tuning=True)\n # For CPU\n net = refinedet\n # For GPU/GPUs\n if args.cuda:\n net = refinedet.cuda()\n if num_gpus > 1:\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = True\n # Resume\n if args.resume_checkpoint:\n print('Resuming training, loading {}...'.format(args.resume_checkpoint))\n net.load_weights(args.resume_checkpoint)\n \n # pdb.set_trace()\n # params = net.state_dict()\n # for k, v in params.items():\n # print(k)\n # print(v.shape)\n optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),\n lr=args.lr, momentum=args.momentum,\n weight_decay=args.weight_decay)\n net.train()\n print('Training RefineDet on:', args.imdb_name)\n print('Using the specified args:')\n print(args)\n \n output_folder = os.path.join(path_cfg.OUTPUT_DIR, args.output_folder)\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n \n str_input_size = str(cfg['min_dim'])\n model_info = 'refinedet{}_{}'.format(str_input_size, args.dataset)\n model_output_folder = os.path.join(output_folder, '{}'.format(args.network), model_info)\n if not os.path.exists(model_output_folder):\n os.makedirs(model_output_folder)\n \n data_loader = data.DataLoader(blob_dataset, args.batch_size,\n num_workers=args.num_workers,\n shuffle=True, collate_fn=detection_collate,\n pin_memory=True)\n # Create batch iterator\n # Number of iterations in each epoch\n base_batch_size = 32\n batch_multiplier = float(args.batch_size) / base_batch_size\n # number of epoch, in case of resuming from args.start_iter\n # fixed number of epoches, no matter what args.batch_size is\n num_epoch = (cfg['max_iter'] - args.start_iter) // (len(blob_dataset) // base_batch_size)\n base_iteration = args.start_iter\n lr_steps = [int(x / batch_multiplier) for x in cfg['lr_steps']]\n actual_iteration = 0\n decay_step = 0\n # print('num_epoch: {}, batch_multiplier: {}, maximum base_iteration {} actual_iteration: {}')\n for epoch in range(0, num_epoch):\n # pdb.set_trace()\n t0 = time.time()\n for i_batch, (images, targets) in enumerate(data_loader):\n if actual_iteration in lr_steps:\n decay_step += 1\n adjust_learning_rate(optimizer, args.gamma, decay_step)\n \n if args.cuda:\n images = Variable(images.cuda())\n targets = Variable(targets.cuda())\n else:\n images = Variable(images)\n targets = Variable(targets)\n t1_data = time.time()\n # forward and backprop\n optimizer.zero_grad()\n bi_loss_loc, bi_loss_conf, multi_loss_loc, multi_loss_conf = \\\n net(images, targets)\n loss = bi_loss_loc.mean() + bi_loss_conf.mean() + \\\n multi_loss_loc.mean() + multi_loss_conf.mean()\n # loss = bi_loss_loc.mean() + bi_loss_conf.mean()\n loss.backward()\n optimizer.step()\n t1 = time.time()\n if num_gpus > 1:\n arm_loss_loc = bi_loss_loc.mean().item()\n arm_loss_conf = bi_loss_conf.mean().item()\n odm_loss_loc = multi_loss_loc.mean().item()\n odm_loss_conf = multi_loss_conf.mean().item()\n else:\n arm_loss_loc = bi_loss_loc.item()\n arm_loss_conf = bi_loss_conf.item()\n odm_loss_loc = multi_loss_loc.item()\n odm_loss_conf = multi_loss_conf.item()\n \n if actual_iteration % 10 == 0:\n print('timer: %.4f sec, data loading timer: %.4f sec' % (t1 - t0, t1_data - t0))\n print('iter ' + repr(actual_iteration) +\n (' || ARM Loss Loc: %.4f || ARM Loss Conf: %.4f' +\n ' || ODM Loss Loc: %.4f || ODM Loss Conf: %.4f' +\n ' || Loss: %.4f ||') % (\n arm_loss_loc, arm_loss_conf,\n odm_loss_loc, odm_loss_conf,\n loss.item()) + ' ')\n # save checkpoint.\n if actual_iteration != 0 and actual_iteration % (int(10000 / batch_multiplier)) == 0:\n print('Saving state, iter:', base_iteration)\n torch.save(refinedet.state_dict(),\n os.path.join(model_output_folder,\n '_'.join([args.network, model_info, repr(base_iteration) + '.pth'])))\n # update counts.\n actual_iteration += 1\n base_iteration = int(actual_iteration * batch_multiplier)\n t0 = time.time()\n \n torch.save(refinedet.state_dict(),\n os.path.join(model_output_folder, '_'.join([args.network, model_info + '.pth'])))\n\n\ndef adjust_learning_rate(optimizer, gamma, step):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 at every\n specified step\n # Adapted from PyTorch Imagenet example:\n # https://github.com/pytorch/examples/blob/master/imagenet/main.py\n \"\"\"\n lr = args.lr * (gamma ** (step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\nif __name__ == '__main__':\n train()\n"
] |
[
[
"numpy.random.seed",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.nn.DataParallel",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"torch.cuda.device_count",
"torch.autograd.Variable"
]
] |
junjihashimoto/vision
|
[
"791c172a337d98012018f98ffde93b1020ba3ed5"
] |
[
"test/test_models.py"
] |
[
"from common_utils import TestCase, map_nested_tensor_object, freeze_rng_state\nfrom collections import OrderedDict\nfrom itertools import product\nimport torch\nimport numpy as np\nfrom torchvision import models\nimport unittest\nimport traceback\nimport random\n\n\ndef set_rng_seed(seed):\n torch.manual_seed(seed)\n random.seed(seed)\n np.random.seed(seed)\n\n\ndef get_available_classification_models():\n # TODO add a registration mechanism to torchvision.models\n return [k for k, v in models.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != \"_\"]\n\n\ndef get_available_segmentation_models():\n # TODO add a registration mechanism to torchvision.models\n return [k for k, v in models.segmentation.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != \"_\"]\n\n\ndef get_available_detection_models():\n # TODO add a registration mechanism to torchvision.models\n return [k for k, v in models.detection.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != \"_\"]\n\n\ndef get_available_video_models():\n # TODO add a registration mechanism to torchvision.models\n return [k for k, v in models.video.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != \"_\"]\n\n\n# models that are in torch hub, as well as r3d_18. we tried testing all models\n# but the test was too slow. not included are detection models, because\n# they are not yet supported in JIT.\n# If 'unwrapper' is provided it will be called with the script model outputs\n# before they are compared to the eager model outputs. This is useful if the\n# model outputs are different between TorchScript / Eager mode\nscript_test_models = {\n 'deeplabv3_resnet101': {},\n 'mobilenet_v2': {},\n 'resnext50_32x4d': {},\n 'fcn_resnet101': {},\n 'googlenet': {\n 'unwrapper': lambda x: x.logits\n },\n 'densenet121': {},\n 'resnet18': {},\n 'alexnet': {},\n 'shufflenet_v2_x1_0': {},\n 'squeezenet1_0': {},\n 'vgg11': {},\n 'inception_v3': {\n 'unwrapper': lambda x: x.logits\n },\n 'r3d_18': {},\n \"fasterrcnn_resnet50_fpn\": {\n 'unwrapper': lambda x: x[1]\n },\n \"maskrcnn_resnet50_fpn\": {\n 'unwrapper': lambda x: x[1]\n },\n \"keypointrcnn_resnet50_fpn\": {\n 'unwrapper': lambda x: x[1]\n },\n}\n\n\nclass ModelTester(TestCase):\n def checkModule(self, model, name, args):\n if name not in script_test_models:\n return\n unwrapper = script_test_models[name].get('unwrapper', None)\n return super(ModelTester, self).checkModule(model, args, unwrapper=unwrapper, skip=False)\n\n def _test_classification_model(self, name, input_shape):\n set_rng_seed(0)\n # passing num_class equal to a number other than 1000 helps in making the test\n # more enforcing in nature\n model = models.__dict__[name](num_classes=50)\n model.eval()\n x = torch.rand(input_shape)\n out = model(x)\n self.assertExpected(out, prec=0.1)\n self.assertEqual(out.shape[-1], 50)\n self.checkModule(model, name, (x,))\n\n def _test_segmentation_model(self, name):\n # passing num_class equal to a number other than 1000 helps in making the test\n # more enforcing in nature\n model = models.segmentation.__dict__[name](num_classes=50, pretrained_backbone=False)\n model.eval()\n input_shape = (1, 3, 300, 300)\n x = torch.rand(input_shape)\n out = model(x)\n self.assertEqual(tuple(out[\"out\"].shape), (1, 50, 300, 300))\n self.checkModule(model, name, (x,))\n\n def _test_detection_model(self, name):\n set_rng_seed(0)\n model = models.detection.__dict__[name](num_classes=50, pretrained_backbone=False)\n model.eval()\n input_shape = (3, 300, 300)\n x = torch.rand(input_shape)\n model_input = [x]\n out = model(model_input)\n self.assertIs(model_input[0], x)\n self.assertEqual(len(out), 1)\n\n def subsample_tensor(tensor):\n num_elems = tensor.numel()\n num_samples = 20\n if num_elems <= num_samples:\n return tensor\n\n flat_tensor = tensor.flatten()\n ith_index = num_elems // num_samples\n return flat_tensor[ith_index - 1::ith_index]\n\n def compute_mean_std(tensor):\n # can't compute mean of integral tensor\n tensor = tensor.to(torch.double)\n mean = torch.mean(tensor)\n std = torch.std(tensor)\n return {\"mean\": mean, \"std\": std}\n\n # maskrcnn_resnet_50_fpn numerically unstable across platforms, so for now\n # compare results with mean and std\n if name == \"maskrcnn_resnet50_fpn\":\n test_value = map_nested_tensor_object(out, tensor_map_fn=compute_mean_std)\n # mean values are small, use large prec\n self.assertExpected(test_value, prec=.01)\n else:\n self.assertExpected(map_nested_tensor_object(out, tensor_map_fn=subsample_tensor), prec=0.01)\n\n scripted_model = torch.jit.script(model)\n scripted_model.eval()\n scripted_out = scripted_model(model_input)[1]\n self.assertEqual(scripted_out[0][\"boxes\"], out[0][\"boxes\"])\n self.assertEqual(scripted_out[0][\"scores\"], out[0][\"scores\"])\n # labels currently float in script: need to investigate (though same result)\n self.assertEqual(scripted_out[0][\"labels\"].to(dtype=torch.long), out[0][\"labels\"])\n self.assertTrue(\"boxes\" in out[0])\n self.assertTrue(\"scores\" in out[0])\n self.assertTrue(\"labels\" in out[0])\n # don't check script because we are compiling it here:\n # TODO: refactor tests\n # self.check_script(model, name)\n self.checkModule(model, name, ([x],))\n\n def _test_video_model(self, name):\n # the default input shape is\n # bs * num_channels * clip_len * h *w\n input_shape = (1, 3, 4, 112, 112)\n # test both basicblock and Bottleneck\n model = models.video.__dict__[name](num_classes=50)\n model.eval()\n x = torch.rand(input_shape)\n out = model(x)\n self.checkModule(model, name, (x,))\n self.assertEqual(out.shape[-1], 50)\n\n def _make_sliced_model(self, model, stop_layer):\n layers = OrderedDict()\n for name, layer in model.named_children():\n layers[name] = layer\n if name == stop_layer:\n break\n new_model = torch.nn.Sequential(layers)\n return new_model\n\n def test_memory_efficient_densenet(self):\n input_shape = (1, 3, 300, 300)\n x = torch.rand(input_shape)\n\n for name in ['densenet121', 'densenet169', 'densenet201', 'densenet161']:\n model1 = models.__dict__[name](num_classes=50, memory_efficient=True)\n params = model1.state_dict()\n model1.eval()\n out1 = model1(x)\n out1.sum().backward()\n\n model2 = models.__dict__[name](num_classes=50, memory_efficient=False)\n model2.load_state_dict(params)\n model2.eval()\n out2 = model2(x)\n\n max_diff = (out1 - out2).abs().max()\n\n self.assertTrue(max_diff < 1e-5)\n\n def test_resnet_dilation(self):\n # TODO improve tests to also check that each layer has the right dimensionality\n for i in product([False, True], [False, True], [False, True]):\n model = models.__dict__[\"resnet50\"](replace_stride_with_dilation=i)\n model = self._make_sliced_model(model, stop_layer=\"layer4\")\n model.eval()\n x = torch.rand(1, 3, 224, 224)\n out = model(x)\n f = 2 ** sum(i)\n self.assertEqual(out.shape, (1, 2048, 7 * f, 7 * f))\n\n def test_mobilenetv2_residual_setting(self):\n model = models.__dict__[\"mobilenet_v2\"](inverted_residual_setting=[[1, 16, 1, 1], [6, 24, 2, 2]])\n model.eval()\n x = torch.rand(1, 3, 224, 224)\n out = model(x)\n self.assertEqual(out.shape[-1], 1000)\n\n def test_fasterrcnn_double(self):\n model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, pretrained_backbone=False)\n model.double()\n model.eval()\n input_shape = (3, 300, 300)\n x = torch.rand(input_shape, dtype=torch.float64)\n model_input = [x]\n out = model(model_input)\n self.assertIs(model_input[0], x)\n self.assertEqual(len(out), 1)\n self.assertTrue(\"boxes\" in out[0])\n self.assertTrue(\"scores\" in out[0])\n self.assertTrue(\"labels\" in out[0])\n\n @unittest.skipIf(not torch.cuda.is_available(), 'needs GPU')\n def test_fasterrcnn_switch_devices(self):\n model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, pretrained_backbone=False)\n model.cuda()\n model.eval()\n input_shape = (3, 300, 300)\n x = torch.rand(input_shape, device='cuda')\n model_input = [x]\n out = model(model_input)\n self.assertIs(model_input[0], x)\n self.assertEqual(len(out), 1)\n self.assertTrue(\"boxes\" in out[0])\n self.assertTrue(\"scores\" in out[0])\n self.assertTrue(\"labels\" in out[0])\n # now switch to cpu and make sure it works\n model.cpu()\n x = x.cpu()\n out_cpu = model([x])\n self.assertTrue(\"boxes\" in out_cpu[0])\n self.assertTrue(\"scores\" in out_cpu[0])\n self.assertTrue(\"labels\" in out_cpu[0])\n\n\nfor model_name in get_available_classification_models():\n # for-loop bodies don't define scopes, so we have to save the variables\n # we want to close over in some way\n def do_test(self, model_name=model_name):\n input_shape = (1, 3, 224, 224)\n if model_name in ['inception_v3']:\n input_shape = (1, 3, 299, 299)\n self._test_classification_model(model_name, input_shape)\n\n setattr(ModelTester, \"test_\" + model_name, do_test)\n\n\nfor model_name in get_available_segmentation_models():\n # for-loop bodies don't define scopes, so we have to save the variables\n # we want to close over in some way\n def do_test(self, model_name=model_name):\n self._test_segmentation_model(model_name)\n\n setattr(ModelTester, \"test_\" + model_name, do_test)\n\n\nfor model_name in get_available_detection_models():\n # for-loop bodies don't define scopes, so we have to save the variables\n # we want to close over in some way\n def do_test(self, model_name=model_name):\n self._test_detection_model(model_name)\n\n setattr(ModelTester, \"test_\" + model_name, do_test)\n\n\nfor model_name in get_available_video_models():\n\n def do_test(self, model_name=model_name):\n self._test_video_model(model_name)\n\n setattr(ModelTester, \"test_\" + model_name, do_test)\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"torch.jit.script",
"torch.nn.Sequential",
"torch.mean",
"numpy.random.seed",
"torch.manual_seed",
"torch.std",
"torch.rand",
"torch.cuda.is_available"
]
] |
wzhlifelover/JengaBot
|
[
"eb161c7e0222c4f09f0d4c95b7650e4c2574e39e"
] |
[
"arm control/locate_jenga.py"
] |
[
"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport arm_camera_calibrate\nimport Calibration\n\ndef saveImage(labeled_img1, fname1, labeled_img2, fname2):\n # show image for down camera\n plt.imshow(cv2.cvtColor(labeled_img1, cv2.COLOR_BGR2RGB))\n plt.axis(\"off\")\n plt.title(\"Down Image\")\n plt.savefig(\"calibration_thresholding/\" + fname1)\n \n # show image for up camera\n plt.imshow(cv2.cvtColor(labeled_img2, cv2.COLOR_BGR2RGB))\n plt.axis('off')\n plt.title(\"Up Image\")\n plt.savefig(\"calibration_thresholding/\" + fname2)\n \ndef save1Image(labeled_img1, fname1):\n # show image for down camera\n plt.imshow(cv2.cvtColor(labeled_img1, cv2.COLOR_BGR2RGB))\n plt.axis(\"off\")\n plt.title(\"Sample Image\")\n plt.savefig(\"calibration_thresholding/\" + fname1)\n \n\ndef getCupLocation2D_RED(fname):\n low_H1 = 0.992 * 180\n low_H2 = 0 * 180\n low_S = 0.585 * 255\n low_V = 0.572 * 255\n high_H1 = 1 * 180\n high_H2 = 0.057 * 180\n high_S = 0.746 * 255\n high_V = 0.859 * 255\n \n dir_name = \"calibration_picture/\"\n \n img = cv2.imread(dir_name + fname)\n\n img_HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n img_threshold1 = cv2.inRange(img_HSV, (low_H1, low_S, low_V), (high_H1, high_S, high_V))\n img_threshold2 = cv2.inRange(img_HSV, (low_H2, low_S, low_V), (high_H2, high_S, high_V))\n img_threshold = img_threshold1 + img_threshold2\n num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(img_threshold, 4, cv2.CV_32S)\n\n # Map component labels to hue val, 0-179 is the hue range in OpenCV\n label_hue = np.uint8(179*labels/np.max(labels))\n blank_ch = 255*np.ones_like(label_hue)\n labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])\n\n # Converting cvt to BGR\n labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)\n\n # set bg label to black\n labeled_img[label_hue==0] = 0\n \n #e1Image(labeled_img, fname)\n \n # find component with largest area (excluding first component)\n cup_label = np.argmax(stats[1:,4]) + 1 \n\n return labeled_img, centroids[cup_label]\n\n\n\ndef getCupLocation2D_GREEN(fname):\n low_H = 0.282 * 180\n low_S = 0.358 * 255\n low_V = 0.424 * 255\n high_H = 0.390 * 180\n high_S = 0.579 * 255\n high_V = 0.655 * 255\n \n dir_name = \"calibration_picture/\"\n \n img = cv2.imread(dir_name + fname)\n\n img_HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n img_threshold = cv2.inRange(img_HSV, (low_H, low_S, low_V), (high_H, high_S, high_V))\n num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(img_threshold, 4, cv2.CV_32S)\n\n # Map component labels to hue val, 0-179 is the hue range in OpenCV\n label_hue = np.uint8(179*labels/np.max(labels))\n blank_ch = 255*np.ones_like(label_hue)\n labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])\n\n # Converting cvt to BGR\n labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)\n\n # set bg label to black\n labeled_img[label_hue==0] = 0\n \n #ave1Image(labeled_img, fname)\n \n # find component with largest area (excluding first component)\n cup_label = np.argmax(stats[1:,4]) + 1 \n\n return labeled_img, centroids[cup_label]\n\n\n\n# Stereo Calibration with OpenCV\nM1 = np.array([[1059.75, 0, 1118.19], \n [0, 1058.99, 615.079], \n [0, 0, 1]])\n\n\nM2 = np.array([[1059.55, 0, 1147.19], \n [0, 1058.64, 634.958], \n [0, 0, 1]])\n\ndist1 = np.array([[-0.0435878, 0.0129137, -0.000229128, 0.000790854, -0.00592556]])\ndist2 = np.array([[-0.0398599, 0.00700217, -0.000241005, 0.000243961, -0.00359243]])\n\nR, jacob = cv2.Rodrigues(np.array([0.0027161, 0, 0.000430528]))\nT = np.array([[119.837, 0.0441432, -0.0386536]]).T\n\nimage_size = (1920, 1080)\n\nR1, R2, P1, P2, Q, ROI1, ROI2 = cv2.stereoRectify(M1, dist1, M2, dist2, image_size, R, T)\n\n\n \n \n# triangulation of points\ndef triangulate_RED(fname1, fname2, save=True, debug=True):\n labeled_img1, x1 = getCupLocation2D_RED(fname1)\n labeled_img2, x2 = getCupLocation2D_RED(fname2)\n\n x1 = cv2.undistortPoints(x1, M1, dist1, R=R1, P=P1)\n x2 = cv2.undistortPoints(x2, M2, dist2, R=R2, P=P2)\n\n X = cv2.triangulatePoints(P1, P2, x1, x2)\n X = X/X[3]\n X = X[0:3]\n \n if save:\n saveImage(labeled_img1, fname1, labeled_img2, fname2)\n \n if debug:\n print(\"threshold result for %s: \" % fname1, end=\"\")\n print(x1)\n print(\"threshold result for %s: \" % fname2, end=\"\")\n print(x2)\n \n return X\n\ndef triangulate_GREEN(fname1, fname2, save=True, debug=True):\n labeled_img1, x1 = getCupLocation2D_GREEN(fname1)\n labeled_img2, x2 = getCupLocation2D_GREEN(fname2)\n\n x1 = cv2.undistortPoints(x1, M1, dist1, R=R1, P=P1)\n x2 = cv2.undistortPoints(x2, M2, dist2, R=R2, P=P2)\n\n X = cv2.triangulatePoints(P1, P2, x1, x2)\n X = X/X[3]\n X = X[0:3]\n \n if save:\n saveImage(labeled_img1, fname1, labeled_img2, fname2)\n \n if debug:\n print(\"threshold result for %s: \" % fname1, end=\"\")\n print(x1)\n print(\"threshold result for %s: \" % fname2, end=\"\")\n print(x2)\n \n return X\n\ndef get_jenga_location(cam2arm):\n\n# labeled_img1, red_up = getCupLocation2D_RED('calibration_picture/up1.jpg')\n# labeled_img2, green_up = getCupLocation2D_GREEN('calibration_picture/up1.jpg')\n\n# labeled_img1, red_down = getCupLocation2D_RED('calibration_picture/down1.jpg')\n# labeled_img2, green_down = getCupLocation2D_GREEN('calibration_picture/down1.jpg')\n# plt.imshow(cv2.cvtColor(labeled_img1, cv2.COLOR_BGR2RGB))\n# plt.axis(\"off\")\n# plt.title(\"Left Image\")\n# plt.show()\n \n# plt.imshow(cv2.cvtColor(labeled_img2, cv2.COLOR_BGR2RGB))\n# plt.axis(\"off\")\n# plt.title(\"right Image\")\n# plt.show()\n if_locate = input(\"Do you want to locate Jenga(Y/N):\")\n if(if_locate == 'N' or if_locate == 'n'):\n return \"Not Locating Jenga\",1,1, if_locate\n \n Calibration.take_picture(5)\n \n center_red = triangulate_RED('down5.jpg', 'up5.jpg', True, True)\n \n center_green = triangulate_GREEN('down5.jpg', 'up5.jpg', False, False)\n #print(\"after triang, before cam2arm\", center_red, center_green)\n \n cord_pred_red = np.matmul(cam2arm, np.append(center_red, 1))\n cord_pred_red = cord_pred_red[0:3]/cord_pred_red[3]\n #print(\"red:\", cord_pred_red)\n \n cord_pred_green = np.matmul(cam2arm, np.append(center_green, 1))\n cord_pred_green = cord_pred_green[0:3]/cord_pred_green[3]\n #print(\"green:\", cord_pred_green)\n \n center_jenga = (cord_pred_red + cord_pred_green)/2\n \n return center_jenga, cord_pred_red, cord_pred_green, if_locate\n\nprint(get_jenga_location)\n\ndef get_jenga_orientation(cord_pred_red, cord_pred_green):\n vec_jenga = cord_pred_green[0:2] - cord_pred_red[0:2]\n x1 = vec_jenga[0]\n y1 = vec_jenga[1]\n x2 = 1\n y2 = 0\n angle = np.arccos((x1*x2 + y1*y2)/np.linalg.norm(vec_jenga))\n return angle"
] |
[
[
"numpy.ones_like",
"matplotlib.pyplot.title",
"numpy.linalg.norm",
"matplotlib.pyplot.savefig",
"numpy.max",
"numpy.append",
"numpy.argmax",
"matplotlib.pyplot.axis",
"numpy.array"
]
] |
msk-access/sequence_qc
|
[
"a90ecd70c402472078b2a0a2ed20fc9727551ddd"
] |
[
"sequence_qc/plot_noise_by_tlen.py"
] |
[
"import numpy as np\nimport pandas as pd\n\nimport plotly.figure_factory as ff\n\n\nSUBSTITUTION_TYPES_COMBINED = [\n [[\"G>T\", \"C>A\"], \"C>A\"],\n [[\"C>G\", \"G>C\"], \"C>G\"],\n [[\"G>A\", \"C>T\"], \"C>T\"],\n [[\"T>A\", \"A>T\"], \"T>A\"],\n [[\"A>G\", \"T>C\"], \"T>C\"],\n [[\"T>G\", \"A>C\"], \"T>G\"],\n]\n\nCOLORS = [\"#556278\", \"#C1292E\", \"#F2A535\"]\n\n\ndef write_summary(df, outfile):\n summary = (\n df.groupby([\"Sample\", \"Var\"], as_index=False)[[\"Type\"]]\n .count()\n .rename(columns={\"Type\": \"Count\"})\n )\n all_summary = (\n df.groupby([\"Var\"], as_index=False)[[\"Type\"]]\n .count()\n .rename(columns={\"Type\": \"Count\"})\n )\n all_summary[\"Sample\"] = \"Total\"\n summary = pd.concat([all_summary, summary], ignore_index=True)[\n [\"Sample\", \"Var\", \"Count\"]\n ]\n summary.to_csv(outfile.replace(\".pdf\", \".txt\"), sep=\"\\t\", index=False)\n\n\ndef plot_data(frag_size_select_df):\n \"\"\"\n Select data for creating plot\n\n :param: frag_size_select_df - pd.DataFrame\n :param:\n \"\"\"\n frag_size_select_df[\"Size\"] = pd.to_numeric(\n frag_size_select_df[\"Size\"], downcast=\"integer\"\n )\n frag = frag_size_select_df[(frag_size_select_df[\"Size\"] > 0)]\n # write_summary(frag, outfile)\n fig = plot_data_type(frag)\n return fig\n\n\ndef plot_data_type(frag):\n \"\"\"\n Create the plot\n\n :param: frag pd.DataFrame -\n :param:\n \"\"\"\n data = []\n labels = []\n for st_pair, st in SUBSTITUTION_TYPES_COMBINED:\n st_sizes = frag[frag['Var'].isin(st_pair)]['Size']\n if len(st_sizes) > 1:\n data.append(st_sizes)\n labels.append(st)\n geno_series = frag[frag['Var'] == 'GENOTYPE']['Size']\n if len(geno_series) > 1:\n data.append(geno_series)\n labels.append('Genotype')\n n_series = frag[frag['Var'] == 'N']['Size']\n if len(n_series) > 1:\n data.append(n_series)\n labels.append('N')\n\n try:\n fig = ff.create_distplot(\n data,\n labels,\n # colors=COLORS,\n # bin_size=.2,\n show_rug=False,\n )\n fig.update_layout(\n title='Insert size distribution for noisy positions',\n xaxis_title=\"Insert Size\",\n )\n except np.linalg.LinAlgError:\n # Not enough data for plot\n return None\n return fig\n\n\ndef create_noisy_tlen_plot(noisy_tlen_df):\n \"\"\"\n Interface to this module\n\n :return:\n \"\"\"\n frag_size_select_df = noisy_tlen_df[['Var', 'Size', 'Chr', 'Pos']]\n fig = plot_data(frag_size_select_df)\n return fig\n"
] |
[
[
"pandas.concat",
"pandas.to_numeric"
]
] |
dk-wei/GCP-code-snippets
|
[
"c23ca7f2981b5df946448b20a54f823ce52bba05"
] |
[
"ml/kubeflow-argo/components/kubeflow/taxi_model/trainer/taxi.py"
] |
[
"# Copyright 2018 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility and schema methods for the chicago_taxi sample.\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow_transform import coders as tft_coders\nfrom tensorflow_transform.tf_metadata import dataset_schema\n\n# Categorical features are assumed to each have a maximum value in the dataset.\nMAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]\nCATEGORICAL_FEATURE_KEYS = [\n 'trip_start_hour',\n 'trip_start_day',\n 'trip_start_month'\n]\n\nDENSE_FLOAT_FEATURE_KEYS = [\n 'trip_miles',\n 'fare',\n 'trip_seconds'\n]\n\n# Number of buckets used by tf.transform for encoding each feature.\nFEATURE_BUCKET_COUNT = 10\n\nBUCKET_FEATURE_KEYS = [\n 'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',\n 'dropoff_longitude'\n]\n\n# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform\nVOCAB_SIZE = 1000\n\n# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.\nOOV_SIZE = 10\n\nVOCAB_FEATURE_KEYS = [\n 'pickup_census_tract',\n 'dropoff_census_tract',\n 'payment_type',\n 'company',\n 'pickup_community_area',\n 'dropoff_community_area'\n]\nLABEL_KEY = 'tips'\nFARE_KEY = 'fare'\n\n\n# Tf.Transform considers these features as \"raw\"\ndef get_raw_feature_spec():\n return {\n 'fare':\n tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=0.0),\n 'trip_start_timestamp':\n tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=0.0),\n 'trip_start_hour':\n tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=0),\n 'trip_start_day':\n tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=0),\n 'trip_start_month':\n tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=0),\n 'pickup_latitude':\n tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=0.0),\n 'pickup_longitude':\n tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=0.0),\n 'dropoff_latitude':\n tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=0.0),\n 'dropoff_longitude':\n tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=0.0),\n 'tips':\n tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=0.0),\n 'trip_miles':\n tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=0.0),\n 'pickup_census_tract':\n tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=''),\n 'dropoff_census_tract':\n tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=''),\n 'pickup_community_area':\n tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=''),\n 'payment_type':\n tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=''),\n 'company':\n tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=''),\n 'trip_seconds':\n tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=0.0),\n 'dropoff_community_area':\n tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=''),\n }\n\n\ndef make_csv_coder():\n \"\"\"Return a coder for tf.transform to read csv files.\"\"\"\n column_names = [\n 'pickup_community_area', 'fare', 'trip_start_month', 'trip_start_hour',\n 'trip_start_day', 'trip_start_timestamp', 'pickup_latitude',\n 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude', 'trip_miles',\n 'pickup_census_tract', 'dropoff_census_tract', 'payment_type', 'company',\n 'dropoff_community_area', 'tips', 'trip_seconds'\n ]\n parsing_feature_spec = get_raw_feature_spec()\n parsing_schema = dataset_schema.from_feature_spec(parsing_feature_spec)\n return tft_coders.CsvCoder(column_names, parsing_schema)\n\n\ndef clean_raw_data_dict(input_dict):\n output_dict = {}\n\n raw_feature_spec = get_raw_feature_spec()\n for key in get_raw_feature_spec():\n if key not in input_dict or not input_dict[key]:\n output_dict[key] = raw_feature_spec[key].default_value\n else:\n output_dict[key] = input_dict[key]\n return output_dict\n\n\ndef make_sql(table_name, max_rows=None, for_eval=False):\n \"\"\"Creates the sql command for pulling data from BigQuery.\n\n Args:\n table_name: BigQuery table name\n max_rows: if set, limits the number of rows pulled from BigQuery\n for_eval: True if this is for evaluation, false otherwise\n\n Returns:\n sql command as string\n \"\"\"\n if for_eval:\n # 1/3 of the dataset used for eval\n where_clause = 'WHERE MOD(FARM_FINGERPRINT(unique_key), 3) = 0'\n else:\n # 2/3 of the dataset used for training\n where_clause = 'WHERE MOD(FARM_FINGERPRINT(unique_key), 3) > 0'\n\n limit_clause = ''\n if max_rows:\n limit_clause = 'LIMIT {max_rows}'.format(max_rows=max_rows)\n return \"\"\"\n SELECT\n CAST(pickup_community_area AS string) AS pickup_community_area,\n CAST(dropoff_community_area AS string) AS dropoff_community_area,\n CAST(pickup_census_tract AS string) AS pickup_census_tract,\n CAST(dropoff_census_tract AS string) AS dropoff_census_tract,\n fare,\n EXTRACT(MONTH FROM trip_start_timestamp) AS trip_start_month,\n EXTRACT(HOUR FROM trip_start_timestamp) AS trip_start_hour,\n EXTRACT(DAYOFWEEK FROM trip_start_timestamp) AS trip_start_day,\n UNIX_SECONDS(trip_start_timestamp) AS trip_start_timestamp,\n pickup_latitude,\n pickup_longitude,\n dropoff_latitude,\n dropoff_longitude,\n trip_miles,\n payment_type,\n company,\n trip_seconds,\n tips\n FROM `{table_name}`\n {where_clause}\n {limit_clause}\n\"\"\".format(table_name=table_name,\n where_clause=where_clause,\n limit_clause=limit_clause)\n"
] |
[
[
"tensorflow.FixedLenFeature"
]
] |
Collebt/LoFTR_implement
|
[
"f63c3fe08a1868380ed9a20dcddf4e8949a816e4"
] |
[
"src/datasets/megadepth.py"
] |
[
"import os.path as osp\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset\nfrom loguru import logger\n\nfrom src.utils.dataset import read_megadepth_gray, read_megadepth_depth\n\n\nclass MegaDepthDataset(Dataset):\n def __init__(self,\n root_dir,\n npz_path,\n mode='train',\n min_overlap_score=0.4,\n img_resize=None,\n df=None,\n img_padding=False,\n depth_padding=False,\n augment_fn=None,\n **kwargs):\n \"\"\"\n Manage one scene(npz_path) of MegaDepth dataset. \n Args:\n root_dir (str): megadepth root directory that has `phoenix`.\n npz_path (str): {scene_id}.npz path. This contains image pair information of a scene.\n mode (str): options are ['train', 'val', 'test']\n min_overlap_score (float): how much a pair should have in common. In range of [0, 1]. Set to 0 when testing.\n img_resize (int, optional): the longer edge of resized images. None for no resize. 640 is recommended.\n This is useful during training with batches and testing with memory intensive algorithms.\n df (int, optional): image size division factor. NOTE: this will change the final image size after img_resize.\n img_padding (bool): If set to 'True', zero-pad the image to squared size. This is useful during training.\n depth_padding (bool): If set to 'True', zero-pad depthmap to (2000, 2000). This is useful during training.\n augment_fn (callable, optional): augments images with pre-defined visual effects.\n \"\"\"\n super().__init__()\n self.root_dir = root_dir\n self.mode = mode\n self.scene_id = npz_path.split('.')[0]\n\n # prepare scene_info and pair_info\n if mode == 'test' and min_overlap_score != 0:\n logger.warning(\"You are using `min_overlap_score`!=0 in test mode. Set to 0.\")\n min_overlap_score = 0\n self.scene_info = np.load(npz_path, allow_pickle=True)\n self.pair_infos = self.scene_info['pair_infos'].copy()\n del self.scene_info['pair_infos']\n self.pair_infos = [pair_info for pair_info in self.pair_infos if pair_info[1] > min_overlap_score]\n\n # parameters for image resizing, padding and depthmap padding\n if mode == 'train':\n assert img_resize is not None and img_padding and depth_padding\n self.img_resize = img_resize\n self.df = df\n self.img_padding = img_padding\n self.depth_max_size = 2000 if depth_padding else None # the upperbound of depthmaps size in megadepth.\n\n # for training LoFTR\n self.augment_fn = augment_fn if mode == 'train' else None\n self.coarse_scale = getattr(kwargs, 'coarse_scale', 0.125)\n\n def __len__(self):\n return len(self.pair_infos)\n\n def __getitem__(self, idx):\n (idx0, idx1), overlap_score, central_matches = self.pair_infos[idx]\n\n # read grayscale image and mask. (1, h, w) and (h, w)\n img_name0 = osp.join(self.root_dir, self.scene_info['image_paths'][idx0])\n img_name1 = osp.join(self.root_dir, self.scene_info['image_paths'][idx1])\n image0, mask0, scale0 = read_megadepth_gray(\n img_name0, self.img_resize, self.df, self.img_padding,\n np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))\n image1, mask1, scale1 = read_megadepth_gray(\n img_name1, self.img_resize, self.df, self.img_padding,\n np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))\n\n # read depth. shape: (h, w)\n if self.mode in ['train', 'val']:\n depth0 = read_megadepth_depth(\n osp.join(self.root_dir, self.scene_info['depth_paths'][idx0]), pad_to=self.depth_max_size)\n depth1 = read_megadepth_depth(\n osp.join(self.root_dir, self.scene_info['depth_paths'][idx1]), pad_to=self.depth_max_size)\n else:\n depth0 = depth1 = torch.tensor([])\n\n # read intrinsics of original size\n K_0 = torch.tensor(self.scene_info['intrinsics'][idx0].copy(), dtype=torch.float).reshape(3, 3)\n K_1 = torch.tensor(self.scene_info['intrinsics'][idx1].copy(), dtype=torch.float).reshape(3, 3)\n\n # read and compute relative poses\n T0 = self.scene_info['poses'][idx0]\n T1 = self.scene_info['poses'][idx1]\n T_0to1 = torch.tensor(np.matmul(T1, np.linalg.inv(T0)), dtype=torch.float)[:4, :4] # (4, 4)\n T_1to0 = T_0to1.inverse()\n\n data = {\n 'image0': image0, # (1, h, w)\n 'depth0': depth0, # (h, w)\n 'image1': image1,\n 'depth1': depth1,\n 'T_0to1': T_0to1, # (4, 4)\n 'T_1to0': T_1to0,\n 'K0': K_0, # (3, 3)\n 'K1': K_1,\n 'scale0': scale0, # [scale_w, scale_h]\n 'scale1': scale1,\n 'dataset_name': 'MegaDepth',\n 'scene_id': self.scene_id,\n 'pair_id': idx,\n 'pair_names': (self.scene_info['image_paths'][idx0], self.scene_info['image_paths'][idx1]),\n }\n\n # for LoFTR training\n if mask0 is not None: # img_padding is True\n if self.coarse_scale:\n [ts_mask_0, ts_mask_1] = F.interpolate(torch.stack([mask0, mask1], dim=0)[None].float(),\n scale_factor=self.coarse_scale,\n mode='nearest',\n recompute_scale_factor=False)[0].bool()\n data.update({'mask0': ts_mask_0, 'mask1': ts_mask_1})\n\n return data\n"
] |
[
[
"numpy.random.choice",
"numpy.linalg.inv",
"torch.tensor",
"torch.stack",
"numpy.load"
]
] |
punch872/EyeWarnYou
|
[
"71ea21a8b3f1ae213478d735a10a240524b89702"
] |
[
"allinone/geo8_choropleth.py"
] |
[
"#1.0 Import Library\nimport json\nimport folium\nimport pandas as pd\nfrom folium.plugins import MarkerCluster\n\n#Load Data\ngeo_data = json.load(open(\"thailand.json\"))\nemp_data = pd.read_csv(\"us-unemployment.csv\")\n\ndata = pd.read_csv(\"data/output-2015.csv\")\nlat = data['latitude']\nlon = data['longitude']\nelevation = data['input_string']\n\n\n#Function to change colors\ndef color_change(elev):\n if(True):\n return('green')\n\n\n\n# elif(1000 <= elev <3000):\n# return('orange')\n# else:\n# return('black')\n\n\n\n#Create base map\nmap = folium.Map(location=[13.73,100.59], zoom_start = 11)\n#map = folium.Map(location=[37.296933,-121.9574983], zoom_start = 5)\n\nmarker_cluster = MarkerCluster().add_to(map)\n\n#Plot Markers\nfor lat, lon,elevation in zip(lat, lon,elevation):\n folium.CircleMarker(location=[lat, lon], radius = 9, popup=str(elevation)+\" m\", fill_color=color_change(elevation), color=\"gray\", fill_opacity = 0.9).add_to(marker_cluster)\n\n\n#Method to create Choropleth map, All parameters are mandatory\nfolium.Choropleth(\n geo_data=geo_data, data=emp_data,\n name = 'Unemployment Rate',\n columns=['State', 'Unemployment'],\n key_on='feature.id',\n fill_color='YlGn', fill_opacity=0.2, line_opacity=0.2,\n legend_name='Unemployment Rate (%)'\n).add_to(map)\n\n\n#Save the map\nmap.save(\"map1.html\")\n"
] |
[
[
"pandas.read_csv"
]
] |
chokobole/felicia-examples
|
[
"83303cf0f4bf688e9794ee574394d98619a16007"
] |
[
"examples/deep_learning/object_detection/object_detection_from_camera.py"
] |
[
"# Copyright (c) 2019 The Felicia Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport sys\n\nimport numpy as np\n\nimport import_order_resolver # Should be before import felicia_py\nimport felicia_py as fel\nimport felicia_py.command_line_interface as cli\nfrom felicia.core.protobuf.bounding_box_pb2 import ImageWithBoundingBoxesMessage\nfrom felicia.core.protobuf.master_data_pb2 import NodeInfo\nfrom felicia.core.protobuf.channel_pb2 import ChannelDef\nfrom felicia.core.protobuf.ui_pb2 import PIXEL_FORMAT_RGB\nfrom felicia.drivers.camera.camera_frame_message_pb2 import CameraFrameMessage\nfrom tf_object_detection import ObjectDetection\n\n\nclass ObjectDetectionNode(fel.NodeLifecycle):\n def __init__(self, topic, camera_descriptor):\n super().__init__()\n self.topic = topic\n self.camera_descriptor = camera_descriptor\n self.draw_on_image = False\n if self.draw_on_image:\n self.publisher = fel.communication.Publisher(CameraFrameMessage)\n else:\n self.publisher = fel.communication.Publisher(\n ImageWithBoundingBoxesMessage)\n\n def on_init(self):\n self.camera = fel.drivers.CameraFactory.new_camera(\n self.camera_descriptor)\n s = self.camera.init()\n if not s.ok():\n fel.log(fel.ERROR, s.error_message())\n sys.exit(1)\n self.object_detection = ObjectDetection()\n\n def on_did_create(self, node_info):\n self.node_info = node_info\n self.request_publish()\n\n def on_request_publish(self, status):\n if status.ok():\n fel.main_thread.post_task(self.start_camera)\n else:\n fel.log(fel.ERROR, status.error_message())\n\n def request_publish(self):\n settings = fel.communication.Settings()\n settings.queue_size = 1\n settings.is_dynamic_buffer = True\n\n self.publisher.request_publish(self.node_info, self.topic,\n ChannelDef.CHANNEL_TYPE_TCP | ChannelDef.CHANNEL_TYPE_WS,\n settings, self.on_request_publish)\n\n def start_camera(self):\n # You should set the camera format if you have any you want to run with.\n s = self.camera.start(fel.drivers.CameraFormat(640, 480, PIXEL_FORMAT_RGB, 25),\n self.on_camera_frame, self.on_camera_error)\n if not s.ok():\n fel.log(fel.ERROR, s.error_message())\n sys.exit(1)\n\n def on_camera_frame(self, camera_frame):\n if self.publisher.is_unregistered():\n return\n\n image_np = np.array(camera_frame, copy=False)\n\n if self.draw_on_image:\n detected_image = self.object_detection.run(\n image_np, self.draw_on_image)\n\n detected_camera_frame = fel.drivers.CameraFrame(\n detected_image, camera_frame.camera_format, camera_frame.timestamp)\n\n self.publisher.publish(\n detected_camera_frame.to_camera_frame_message(False))\n else:\n image_with_bounding_boxes = self.object_detection.run(\n image_np, self.draw_on_image)\n\n self.publisher.publish(image_with_bounding_boxes)\n\n def on_camera_error(self, status):\n fel.log_if(fel.ERROR, not status.ok(), status.error_message())\n\n\ndef main():\n fel.felicia_init()\n\n camera_descriptors = []\n s = fel.drivers.CameraFactory.get_camera_descriptors(camera_descriptors)\n if not s.ok():\n print(\"{} {}.\".format(cli.RED_ERROR, s), file=sys.stderr)\n sys.exit(1)\n\n if len(camera_descriptors) == 0:\n print(\"{} {}.\".format(cli.RED_ERROR, \"No camera device\"), file=sys.stderr)\n sys.exit(1)\n\n master_proxy = fel.master_proxy\n s = master_proxy.start()\n if not s.ok():\n print(\"{} {}.\".format(cli.RED_ERROR, s), file=sys.stderr)\n sys.exit(1)\n\n node_info = NodeInfo()\n\n # Use the first camera\n master_proxy.request_register_node(\n ObjectDetectionNode, node_info, \"message\", camera_descriptors[0])\n\n fel.main_thread.run()\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.array"
]
] |
PolasekT/ICTree
|
[
"d13ad603101805bcc288411504ecffd6f2e1f365"
] |
[
"PerceptualMetric/psrc/perceptree/test/data/dummy_tree_image.py"
] |
[
"# -*- coding: utf-8 -*-\n\n\"\"\"\nWrapper for testing tree file.\n\"\"\"\n\n\nfrom typing import Tuple\n\nimport base64\nimport numpy as np\nimport struct\n\n\ndef dummy_tree_image_dict(shape: Tuple[int, int, int]) -> dict:\n data = np.random.random_sample(size=shape).flatten()\n pack_format = f\"{len(data)}f\"\n packed = struct.pack(pack_format, *data)\n encoded = base64.b64encode(packed)\n\n return {\n \"image\": {\n \"data\": encoded,\n \"width\": shape[0],\n \"height\": shape[1],\n \"channels\": shape[2],\n \"valueType\": \"Float\"\n }\n }\n\n"
] |
[
[
"numpy.random.random_sample"
]
] |
vanthaiunghoa/ourBlock
|
[
"c106ddc030707c91af1e4ceff6b3a6086f727d8d"
] |
[
"classification/tflearn-stuff/run-prediction.py"
] |
[
"# restore all of our data structures\nimport pickle\nimport nltk\nfrom nltk.stem.lancaster import LancasterStemmer\nstemmer = LancasterStemmer()\n\n# things we need for Tensorflow\nimport numpy as np\nimport tflearn\nimport tensorflow as tf\nimport random\n\ndata = pickle.load( open( \"training_data\", \"rb\" ) )\nwords = data['words']\nclasses = data['classes']\ntrain_x = data['train_x']\ntrain_y = data['train_y']\n\n# import our chat-bot intents file\nimport json\nwith open('intents.json', 'r', encoding = 'utf-8') as json_data:\n intents = json.load(json_data)\n\nnet = tflearn.input_data(shape=[None, len(train_x[0])])\nnet = tflearn.fully_connected(net, 8)\nnet = tflearn.fully_connected(net, 8)\nnet = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')\nnet = tflearn.regression(net)\n\nmodel = tflearn.DNN(net, tensorboard_dir='tflearn_logs')\n# load our saved model\nmodel.load('./model.tflearn')\n\ndef clean_up_sentence(sentence):\n # tokenize the pattern\n sentence_words = nltk.word_tokenize(sentence)\n # stem each word\n sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]\n return sentence_words\n\n# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence\ndef bow(sentence, words, show_details=False):\n # tokenize the pattern\n sentence_words = clean_up_sentence(sentence)\n # bag of words\n bag = [0]*len(words) \n for s in sentence_words:\n for i,w in enumerate(words):\n if w == s: \n bag[i] = 1\n if show_details:\n print (\"found in bag: %s\" % w)\n\n return(np.array(bag))\n\nERROR_THRESHOLD = 0.25\ndef classify(sentence):\n # generate probabilities from the model\n results = model.predict([bow(sentence, words)])[0]\n # filter out predictions below a threshold\n results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD]\n # sort by strength of probability\n results.sort(key=lambda x: x[1], reverse=True)\n return_list = []\n for r in results:\n return_list.append((classes[r[0]], r[1]))\n # return tuple of intent and probability\n return return_list\n\ndef response(sentence, userID='123', show_details=False):\n results = classify(sentence)\n # if we have a classification then find the matching intent tag\n if results:\n # loop as long as there are matches to process\n while results:\n for i in intents['intents']:\n # find a tag matching the first result\n if i['tag'] == results[0][0]:\n # a random response from the intent\n return print(random.choice(i['responses']))\n\n results.pop(0)\n \n \nprint(classify(\"i'm gay\"))"
] |
[
[
"numpy.array"
]
] |
ArashAhmadian/AirTrafficSTGCN
|
[
"ae705d14f6aace31633bd26cfd69b227c6596fe0"
] |
[
"STGCN/models/base_model.py"
] |
[
"# @Time : Jan. 12, 2019 19:01\n# @Author : Veritas YIN\n# @FileName : base_model.py\n# @Version : 1.0\n# @IDE : PyCharm\n# @Github : https://github.com/VeritasYin/Project_Orion\n\nfrom models.layers import *\nfrom os.path import join as pjoin\nimport tensorflow as tf\nimport sys\n\n\ndef build_model(inputs, n_his, Ks, Kt, blocks, keep_prob):\n '''\n Build the base model.\n :param inputs: placeholder.\n :param n_his: int, size of historical records for training.\n :param Ks: int, kernel size of spatial convolution.\n :param Kt: int, kernel size of temporal convolution.\n :param blocks: list, channel configs of st_conv blocks.\n :param keep_prob: placeholder.\n '''\n x = inputs[:, 0:n_his, :, :]\n print(x.get_shape())\n\n # Ko>0: kernel size of temporal convolution in the output layer.\n Ko = n_his\n # ST-Block\n #will run 3 times, n_his - (Ks-1) -> 14 - 3*(2) = 8\n for i, channels in enumerate(blocks):\n x = st_conv_block(x, Ks, Kt, channels, i, keep_prob, act_func='GLU')\n Ko -= 2 * (Ks - 1)\n\n # Output Layer\n if Ko > 1:\n print((f\"Ko{Ko}\"))\n y = output_layer(x, Ko, 'output_layer')\n else:\n raise ValueError(f'ERROR: kernel size Ko must be greater than 1, but received \"{Ko}\".')\n\n tf.add_to_collection(name='copy_loss',\n value=tf.nn.l2_loss(inputs[:, n_his - 1:n_his, :, :] - inputs[:, n_his:n_his + 1, :, :]))\n \n \n train_loss = []\n valid_loss = []\n\n train_loss.append(tf.sqrt(tf.losses.mean_squared_error(y, inputs[:, n_his:n_his + 1, :, :])))\n train_loss.append(tf.nn.l2_loss(y - inputs[:, n_his:n_his + 1, :, :]))\n\n valid_loss.append(tf.sqrt(tf.losses.mean_squared_error(y, inputs[:, n_his:n_his + 1, :, :])))\n valid_loss.append(tf.nn.l2_loss(y - inputs[:, n_his:n_his + 1, :, :]))\n\n #valid_loss = tf.sqrt(tf.losses.mean_squared_error(y, inputs[:, n_his:n_his + 1, :, :]))\n #valid_loss = \n single_pred = y[:, 0, :, :]\n tf.add_to_collection(name='y_pred', value=single_pred)\n return train_loss,valid_loss, single_pred\n\n\ndef model_save(sess, global_steps, model_name, save_path='./output/models/'):\n '''\n Save the checkpoint of trained model.\n :param sess: tf.Session().\n :param global_steps: tensor, record the global step of training in epochs.\n :param model_name: str, the name of saved model.\n :param save_path: str, the path of saved model.\n :return:\n '''\n saver = tf.train.Saver(max_to_keep=3)\n prefix_path = saver.save(sess, pjoin(save_path, model_name), global_step=global_steps)\n print(f'<< Saving model to {prefix_path} ...')\n"
] |
[
[
"tensorflow.train.Saver",
"tensorflow.nn.l2_loss",
"tensorflow.add_to_collection",
"tensorflow.losses.mean_squared_error"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.